diff --git a/.clang-format b/.clang-format
index e58d518b3b8cacdd1e13dd965805fa364a996eb2..56ca83e724ad0b804a10b9be0dd42aa7a05eeaf7 100644
--- a/.clang-format
+++ b/.clang-format
@@ -88,4 +88,3 @@ Standard: Auto
TabWidth: 8
UseTab: Never
...
-
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..912b302ad23d47c46708d672175a908f2dbc74e8
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.py linguist-detectable=false
diff --git a/.gitignore b/.gitignore
index 76b581b18224c4036c59573900943804aeabe905..5f1e24109df622591bdd31536d0316d2bb2c5450 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
build/
compile_commands.json
+CMakeSettings.json
.cache
.ycm_extra_conf.py
.tasks
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0c7e31bfbb891cc23c2cd5e788772d6f33bc329e..90e841d5e04fd72338f38ca11f1dd5a522b61918 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -34,7 +34,7 @@ endif(${BUILD_TEST})
add_subdirectory(source)
add_subdirectory(tools)
-add_subdirectory(tests)
+add_subdirectory(utils)
add_subdirectory(examples/c)
# docs
diff --git a/CMakeSettings.json b/CMakeSettings.json
deleted file mode 100644
index d3f2c27bf6ecb5bb433e468c34577408c7ba6603..0000000000000000000000000000000000000000
--- a/CMakeSettings.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "configurations": [
- {
- "name": "WSL-GCC-Debug",
- "generator": "Unix Makefiles",
- "configurationType": "Debug",
- "buildRoot": "${projectDir}\\build\\",
- "installRoot": "${projectDir}\\build\\",
- "cmakeExecutable": "/usr/bin/cmake",
- "cmakeCommandArgs": "",
- "buildCommandArgs": "",
- "ctestCommandArgs": "",
- "inheritEnvironments": [ "linux_x64" ],
- "wslPath": "${defaultWSLPath}",
- "addressSanitizerRuntimeFlags": "detect_leaks=0",
- "variables": [
- {
- "name": "CMAKE_INSTALL_PREFIX",
- "value": "/mnt/d/TDengine/TDengine/build",
- "type": "PATH"
- }
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/README-CN.md b/README-CN.md
index e30e38ae7875251df6fe1b5ab487f508ad6eeaa7..0b7e42d4fa19045e94f004ab61159a8d79e4bb82 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -303,14 +303,14 @@ Query OK, 2 row(s) in set (0.001700s)
TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用:
-- [Java](https://docs.taosdata.com/reference/connector/java/)
-- [C/C++](https://www.taosdata.com/cn/documentation/connector#c-cpp)
-- [Python](https://docs.taosdata.com/reference/connector/python/)
-- [Go](https://docs.taosdata.com/reference/connector/go/)
-- [Node.js](https://docs.taosdata.com/reference/connector/node/)
-- [Rust](https://docs.taosdata.com/reference/connector/rust/)
-- [C#](https://docs.taosdata.com/reference/connector/csharp/)
-- [RESTful API](https://docs.taosdata.com/reference/rest-api/)
+- [Java](https://docs.taosdata.com/connector/java/)
+- [C/C++](https://docs.taosdata.com/connector/cpp/)
+- [Python](https://docs.taosdata.com/connector/python/)
+- [Go](https://docs.taosdata.com/connector/go/)
+- [Node.js](https://docs.taosdata.com/connector/node/)
+- [Rust](https://docs.taosdata.com/connector/rust/)
+- [C#](https://docs.taosdata.com/connector/csharp/)
+- [RESTful API](https://docs.taosdata.com/connector/rest-api/)
# 成为社区贡献者
diff --git a/README.md b/README.md
index 02dd9984e81d7e4b0c8c6ed6ff4a57da4eba1de3..611d97aac9436bdcc732efcf98822f2dd11d74ab 100644
--- a/README.md
+++ b/README.md
@@ -19,29 +19,29 @@ English | [简体中文](README-CN.md) | We are hiring, check [here](https://tde
# What is TDengine?
-TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
+TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/what-is-a-time-series-database/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
-- **High-Performance**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
+- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
-- **Simplified Solution**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
+- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
-- **Cloud Native**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
+- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
-- **Ease of Use**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
+- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
-- **Easy Data Analytics**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
+- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
-- **Open Source**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
+- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
# Documentation
-For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.taosdata.com) ([TDengine 文档](https://docs.taosdata.com))
+For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
# Building
-At the moment, TDengine server supports running on Linux, Windows systems.Any OS application can also choose the RESTful interface of taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU , and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
+At the moment, TDengine server supports running on Linux and Windows systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service . TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future.
-You can choose to install through source code according to your needs, [container](https://docs.taosdata.com/get-started/docker/), [installation package](https://docs.taosdata.com/get-started/package/) or [Kubernetes](https://docs.taosdata.com/deployment/k8s/) to install. This quick guide only applies to installing from source.
+You can choose to install through source code, [container](https://docs.tdengine.com/get-started/docker/), [installation package](https://docs.tdengine.com/get-started/package/) or [Kubernetes](https://docs.tdengine.com/deployment/k8s/). This quick guide only applies to installing from source.
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
@@ -256,6 +256,7 @@ After building successfully, TDengine can be installed by:
nmake install
```
+
## Quick Run
@@ -304,14 +306,14 @@ Query OK, 2 row(s) in set (0.001700s)
TDengine provides abundant developing tools for users to develop on TDengine. Follow the links below to find your desired connectors and relevant documentation.
-- [Java](https://docs.taosdata.com/reference/connector/java/)
-- [C/C++](https://docs.taosdata.com/reference/connector/cpp/)
-- [Python](https://docs.taosdata.com/reference/connector/python/)
-- [Go](https://docs.taosdata.com/reference/connector/go/)
-- [Node.js](https://docs.taosdata.com/reference/connector/node/)
-- [Rust](https://docs.taosdata.com/reference/connector/rust/)
-- [C#](https://docs.taosdata.com/reference/connector/csharp/)
-- [RESTful API](https://docs.taosdata.com/reference/rest-api/)
+- [Java](https://docs.tdengine.com/reference/connector/java/)
+- [C/C++](https://docs.tdengine.com/reference/connector/cpp/)
+- [Python](https://docs.tdengine.com/reference/connector/python/)
+- [Go](https://docs.tdengine.com/reference/connector/go/)
+- [Node.js](https://docs.tdengine.com/reference/connector/node/)
+- [Rust](https://docs.tdengine.com/reference/connector/rust/)
+- [C#](https://docs.tdengine.com/reference/connector/csharp/)
+- [RESTful API](https://docs.tdengine.com/reference/rest-api/)
# Contribute to TDengine
diff --git a/TDenginelogo.png b/TDenginelogo.png
deleted file mode 100644
index 19a92592d7e8871778f5f3a6edd6314260d62551..0000000000000000000000000000000000000000
Binary files a/TDenginelogo.png and /dev/null differ
diff --git a/cmake/cmake.define b/cmake/cmake.define
index 989b69a89b31b43b1caab28c7fbf50515845465e..5d64815a9aa90741a0d6aca7e51518d2263932a2 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -2,8 +2,6 @@ cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE OFF)
-SET(BUILD_SHARED_LIBS "OFF")
-
#set output directory
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin)
@@ -103,6 +101,9 @@ IF (TD_WINDOWS)
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMMON_FLAGS}")
ELSE ()
+ IF (${TD_DARWIN})
+ set(CMAKE_MACOSX_RPATH 0)
+ ENDIF ()
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")
SET(GCC_COVERAGE_COMPILE_FLAGS "-fprofile-arcs -ftest-coverage")
diff --git a/cmake/cmake.install b/cmake/cmake.install
index 6dc6864975c0d36a024500d8a09fe3b6f9a6a850..fd1e080ddab1478f73689e7cced405ae8404fbc2 100644
--- a/cmake/cmake.install
+++ b/cmake/cmake.install
@@ -1,3 +1,19 @@
+SET(PREPARE_ENV_CMD "prepare_env_cmd")
+SET(PREPARE_ENV_TARGET "prepare_env_target")
+ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}
+ POST_BUILD
+ COMMAND echo "make test directory"
+ DEPENDS taosd
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/cfg/
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/log/
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/data/
+ COMMAND ${CMAKE_COMMAND} -E echo dataDir ${TD_TESTS_OUTPUT_DIR}/data > ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
+ COMMAND ${CMAKE_COMMAND} -E echo logDir ${TD_TESTS_OUTPUT_DIR}/log >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
+ COMMAND ${CMAKE_COMMAND} -E echo charset UTF-8 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
+ COMMAND ${CMAKE_COMMAND} -E echo monitor 0 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
+ COMMENT "prepare taosd environment")
+ADD_CUSTOM_TARGET(${PREPARE_ENV_TARGET} ALL WORKING_DIRECTORY ${TD_EXECUTABLE_OUTPUT_PATH} DEPENDS ${PREPARE_ENV_CMD})
+
IF (TD_LINUX)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
diff --git a/cmake/cmake.options b/cmake/cmake.options
index bec64f7bf00cdb0c6fddc713af0801eae08d45ea..3baccde4d711e7c7a535829c95a0ee8cdff3fae6 100644
--- a/cmake/cmake.options
+++ b/cmake/cmake.options
@@ -90,6 +90,12 @@ ELSE ()
ENDIF ()
ENDIF ()
+option(
+ BUILD_SHARED_LIBS
+ ""
+ OFF
+ )
+
option(
RUST_BINDINGS
"If build with rust-bindings"
diff --git a/cmake/cmake.platform b/cmake/cmake.platform
index 887fbd86d55d782cdf3c1d7c95dfee2dc2ec446d..e4d440d76edbfcf0d1d6f932cfa598fe0a0f43d2 100644
--- a/cmake/cmake.platform
+++ b/cmake/cmake.platform
@@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
- MESSAGE("Current system arch is arm64")
+ MESSAGE("Current system arch is 64")
SET(TD_DARWIN_64 TRUE)
ADD_DEFINITIONS("-D_TD_DARWIN_64")
ENDIF ()
@@ -87,7 +87,7 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_ARM_32 TRUE)
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_32")
- ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
+ ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64)|(arm64)")
MESSAGE(STATUS "The current platform is aarch64")
SET(PLATFORM_ARCH_STR "arm64")
SET(TD_ARM_64 TRUE)
diff --git a/cmake/cmake.version b/cmake/cmake.version
index db29644b387306ce8f3ee473921dab4c7d05b10a..7c895b12ff3397a221be5d2cadd95f4e0e64897d 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.0.0.1")
+ SET(TD_VER_NUMBER "3.0.1.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in
index f182beed33c76200649f93d96b68c153ec452b9a..eb0faf6d5d8072fdfeffae9e1732337fb7440aaf 100644
--- a/cmake/taosadapter_CMakeLists.txt.in
+++ b/cmake/taosadapter_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
- GIT_TAG abed566
+ GIT_TAG 71e7ccf
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index e593e6d62b31ad8c4aa73f102a20bee32a24fee7..c273e9889f0ea7b166a90b7131594564a2b57b25 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 2af2222
+ GIT_TAG 2dba49c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in
index 506559a245ee7a3e506e8481a12a3fe7f01dd5ac..04b1262cafd6f1dd984f568b847454c409d301ed 100644
--- a/cmake/taosws_CMakeLists.txt.in
+++ b/cmake/taosws_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
- GIT_TAG 7a54d21
+ GIT_TAG e771403
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/01-index.md b/docs/en/01-index.md
index 22e62bc5e05fe83065b0e101644b01dbba2b5874..5265be42f81c4f43fa73e5b7d603d8989c2a5671 100644
--- a/docs/en/01-index.md
+++ b/docs/en/01-index.md
@@ -4,25 +4,24 @@ sidebar_label: Documentation Home
slug: /
---
-
-TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
+TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
-TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
+TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly.
-If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work.
+If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
-We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster deployment"](../deployment).
+We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
-TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions.
+TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.
-If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section.
+If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section.
-If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter.
+If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter.
-If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
+If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
-TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
+TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
-Together, we make a difference.
+Together, we make a difference!
diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md
index b4636e54a676598d0fc513034d972c4b365a620c..d385845d7c57203d6e1cc8ddb8d53307f2655914 100644
--- a/docs/en/02-intro/index.md
+++ b/docs/en/02-intro/index.md
@@ -11,23 +11,35 @@ This section introduces the major features, competitive advantages, typical use-
The major features are listed below:
-1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others.
-2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code.
-3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others.
-4. Support for [user defined functions](/develop/udf).
-5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios.
-6. Support for [continuous query](../develop/stream).
-7. Support for [data subscription](../develop/tmq) with the capability to specify filter conditions.
-8. Support for [cluster](../deployment/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication.
-9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries.
-10. Provides many ways to [import](/operation/import) and [export](/operation/export) data.
-11. Provides [monitoring](/operation/monitor) on running instances of TDengine.
-12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages.
-13. Provides a [REST API](/reference/rest-api/).
-14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization.
-15. Supports seamless integration with Google Data Studio.
-
-For more details on features, please read through the entire documentation.
+1. Insert data
+ - Supports [using SQL to insert](../develop/insert-data/sql-writing).
+ - Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
+ - Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
+2. Query data
+ - Supports standard [SQL](../taos-sql/), including nested query.
+ - Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
+ - Supports [User Defined Functions (UDF)](../taos-sql/udf).
+3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
+4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
+5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
+6. Visualization
+ - Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
+ - Supports seamless integration with Google Data Studio.
+7. Cluster
+ - Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
+ - Supports [deployment on Kubernetes](../deployment/k8s/).
+ - Supports high availability via data replication.
+8. Administration
+ - Provides [monitoring](../operation/monitor) on running instances of TDengine.
+ - Provides many ways to [import](../operation/import) and [export](../operation/export) data.
+9. Tools
+ - Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
+ - Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
+10. Programming
+ - Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
+ - Provides a [REST API](../reference/rest-api/).
+
+For more details on features, please read through the entire documentation.
## Competitive Advantages
@@ -37,23 +49,31 @@ By making full use of [characteristics of time series data](https://tdengine.com
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
-- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
+- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds.
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
-](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
+ ](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
-- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
+- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
-With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
+With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
+
+1. With its superior performance, the computing and storage resources are reduced significantly.
+2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly.
+3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
## Technical Ecosystem
+
This is how TDengine would be situated, in a typical time-series data processing platform:
+
+

-Figure 1. TDengine Technical Ecosystem
+Figure 1. TDengine Technical Ecosystem
+
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
@@ -63,42 +83,42 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
### Characteristics and Requirements of Data Sources
-| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
-| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
-| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
-| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
+| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. |
+| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
+| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
### System Architecture Requirements
-| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
-| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
-| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
+| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
+| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
### System Function Requirements
-| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
-| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
+| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. |
+| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
### System Performance Requirements
-| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
-| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
-| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
+| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
+| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
+| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
+| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
-| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
-| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
-| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
+| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
+| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. |
+| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. |
## Comparison with other databases
diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md
index 44dcad82fc8e77bdb63ed3f8d5a36b9542c72aea..0b1b226c17100d56313b5480e26f437841afe8c7 100644
--- a/docs/en/04-concept/index.md
+++ b/docs/en/04-concept/index.md
@@ -6,128 +6,127 @@ In order to explain the basic concepts and provide some sample code, the TDengin
-
- Device ID
- Time Stamp
- Collected Metrics
- Tags
+
+
+ Device ID
+ Timestamp
+ Collected Metrics
+ Tags
-
-Device ID
-Time Stamp
-current
-voltage
-phase
-location
-groupId
-
-
-
-
-d1001
-1538548685000
-10.3
-219
-0.31
-California.SanFrancisco
-2
-
-
-d1002
-1538548684000
-10.2
-220
-0.23
-California.SanFrancisco
-3
-
-
-d1003
-1538548686500
-11.5
-221
-0.35
-California.LosAngeles
-3
-
-
-d1004
-1538548685500
-13.4
-223
-0.29
-California.LosAngeles
-2
-
-
-d1001
-1538548695000
-12.6
-218
-0.33
-California.SanFrancisco
-2
-
-
-d1004
-1538548696600
-11.8
-221
-0.28
-California.LosAngeles
-2
-
-
-d1002
-1538548696650
-10.3
-218
-0.25
-California.SanFrancisco
-3
-
-
-d1001
-1538548696800
-12.3
-221
-0.31
-California.SanFrancisco
-2
-
-
+
+ current
+ voltage
+ phase
+ location
+ groupid
+
+
+
+
+ d1001
+ 1538548685000
+ 10.3
+ 219
+ 0.31
+ California.SanFrancisco
+ 2
+
+
+ d1002
+ 1538548684000
+ 10.2
+ 220
+ 0.23
+ California.SanFrancisco
+ 3
+
+
+ d1003
+ 1538548686500
+ 11.5
+ 221
+ 0.35
+ California.LosAngeles
+ 3
+
+
+ d1004
+ 1538548685500
+ 13.4
+ 223
+ 0.29
+ California.LosAngeles
+ 2
+
+
+ d1001
+ 1538548695000
+ 12.6
+ 218
+ 0.33
+ California.SanFrancisco
+ 2
+
+
+ d1004
+ 1538548696600
+ 11.8
+ 221
+ 0.28
+ California.LosAngeles
+ 2
+
+
+ d1002
+ 1538548696650
+ 10.3
+ 218
+ 0.25
+ California.SanFrancisco
+ 3
+
+
+ d1001
+ 1538548696800
+ 12.3
+ 221
+ 0.31
+ California.SanFrancisco
+ 2
+
+
Table 1: Smart meter example data
-Each row contains the device ID, time stamp, collected metrics (current, voltage, phase as above), and static tags (location and groupId in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated time stamps.
+Each row contains the device ID, timestamp, collected metrics (`current`, `voltage`, `phase` as above), and static tags (`location` and `groupid` in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated timestamps.
## Metric
-Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases.
+Metric refers to the physical quantity collected by sensors, equipment or other types of data collection devices, such as current, voltage, temperature, pressure, GPS position, etc., which change with time, and the data type can be integer, float, Boolean, or strings. As time goes by, the amount of collected metric data stored increases. In the smart meters example, current, voltage and phase are the metrics.
## Label/Tag
-Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time.
+Label/Tag refers to the static properties of sensors, equipment or other types of data collection devices, which do not change with time, such as device model, color, fixed location of the device, etc. The data type can be any type. Although static, TDengine allows users to add, delete or update tag values at any time. Unlike the collected metric data, the amount of tag data stored does not change over time. In the meters example, `location` and `groupid` are the tags.
## Data Collection Point
-Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points.
+Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same timestamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
## Table
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables, then insert data points and execute queries to explore the data.
-To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several benefits:
+To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices d1001, d1002, d1003, and d1004 to store the data collected. This design has several benefits:
1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved.
2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed.
3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude.
4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate.
-If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. ** One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
+If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
-TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
+TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
@@ -137,7 +136,7 @@ The design of one table for one data collection point will require a huge number
STable is a template for a type of data collection point. A STable contains a set of data collection points (tables) that have the same schema or data structure, but with different static attributes (tags). To describe a STable, in addition to defining the table structure of the metrics, it is also necessary to define the schema of its tags. The data type of tags can be int, float, string, and there can be multiple tags, which can be added, deleted, or modified afterward. If the whole system has N different types of data collection points, N STables need to be established.
-In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**.
+In the design of TDengine, **a table is used to represent a specific data collection point, and STable is used to represent a set of data collection points of the same type**. In the smart meters example, we can create a super table named `meters`.
## Subtable
@@ -156,11 +155,20 @@ The relationship between a STable and the subtables created based on this STable
Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type.
-In TDengine, it is recommended to use a subtable instead of a regular table for a DCP.
+In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table `meters`.
+
+To better understand the data model using metrics, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example.
+
+
+
+
+
+Figure 1. Meters Data Model Diagram
+
## Database
-A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
+A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
In a database, there can be one or more STables, but a STable belongs to only one database. All tables owned by a STable are stored in only one database.
@@ -170,4 +178,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute
Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet.
-TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
+TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
diff --git a/docs/en/04-concept/supertable.webp b/docs/en/04-concept/supertable.webp
new file mode 100644
index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37
Binary files /dev/null and b/docs/en/04-concept/supertable.webp differ
diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md
index 32eee6b942e9fe315b7fdc868678254398fb4dad..66f7d5d5941866c803f05cd825ca8cc2347f9a52 100644
--- a/docs/en/05-get-started/01-docker.md
+++ b/docs/en/05-get-started/01-docker.md
@@ -13,7 +13,7 @@ If Docker is already installed on your computer, run the following command:
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
```
-Note that TDengine Server uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
+Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
Run the following command to ensure that your container is running:
@@ -21,7 +21,7 @@ Run the following command to ensure that your container is running:
docker ps
```
-Enter the container and open the bash shell:
+Enter the container and open the `bash` shell:
```shell
docker exec -it bash
@@ -31,68 +31,68 @@ You can now access TDengine or run other Linux commands.
Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/).
-## Insert Data into TDengine
-
-You can use the `taosBenchmark` tool included with TDengine to write test data into your deployment.
+## Open the TDengine CLI
-To do so, run the following command:
+On the container, run the following command to open the TDengine CLI:
- ```bash
- $ taosBenchmark
-
- ```
+```
+$ taos
- This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to ten and a `location` tag of either `California.SanFrancisco` or `California.SanDiego`.
+taos>
- The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system.
+```
- You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](/reference/taosbenchmark).
+## Test data insert performance
-## Open the TDengine CLI
+After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
-On the container, run the following command to open the TDengine CLI:
+Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
+```bash
+taosBenchmark
```
-$ taos
-taos>
+This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
-```
+The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
-## Query Data in TDengine
+You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
-After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance. For example:
+## Test data query performance
-From the TDengine CLI query the number of rows in the `meters` supertable:
+After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
+
+From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
```sql
-select count(*) from test.meters;
+SELECT COUNT(*) FROM test.meters;
```
Query the average, maximum, and minimum values of all 100 million rows of data:
```sql
-select avg(current), max(voltage), min(phase) from test.meters;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
Query the number of rows whose `location` tag is `San Francisco`:
```sql
-select count(*) from test.meters where location="San Francisco";
+SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
```
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
```sql
-select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
+Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
```sql
-select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
+SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
```
-In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
+
+In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
## Additional Information
diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md
index 88096a759c58529d4150c0a750a4354a88da988f..7257fccc80ea8df05449e7e29842d89794d702bf 100644
--- a/docs/en/05-get-started/03-package.md
+++ b/docs/en/05-get-started/03-package.md
@@ -9,23 +9,24 @@ import PkgListV3 from "/components/PkgListV3";
For information about installing TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
-The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
+The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
-The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector.
+The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
-The TDengine Community Edition is released as .deb and .rpm packages. The .deb package can be installed on Debian, Ubuntu, and derivative systems. The .rpm package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows servers.
+The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows.
## Installation
-1. Download the .deb installation package.
-
+1. Download the Deb installation package.
+
2. In the directory where the package is located, use `dpkg` to install the package:
+> Please replace `` with the corresponding version of the package downloaded
+
```bash
-# Enter the name of the package that you downloaded.
sudo dpkg -i TDengine-server--Linux-x64.deb
```
@@ -34,11 +35,12 @@ sudo dpkg -i TDengine-server--Linux-x64.deb
1. Download the .rpm installation package.
-
+
2. In the directory where the package is located, use rpm to install the package:
+> Please replace `` with the corresponding version of the package downloaded
+
```bash
-# Enter the name of the package that you downloaded.
sudo rpm -ivh TDengine-server--Linux-x64.rpm
```
@@ -47,11 +49,12 @@ sudo rpm -ivh TDengine-server--Linux-x64.rpm
1. Download the .tar.gz installation package.
-
+
2. In the directory where the package is located, use `tar` to decompress the package:
+> Please replace `` with the corresponding version of the package downloaded
+
```bash
-# Enter the name of the package that you downloaded.
tar -zxvf TDengine-server--Linux-x64.tar.gz
```
@@ -96,23 +99,23 @@ sudo apt-get install tdengine
This installation method is supported only for Debian and Ubuntu.
::::
-
+
-Note: TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform.
+Note: TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the Windows platform.
1. Download the Windows installation package.
-
+
2. Run the downloaded package to install TDengine.
:::info
-For information about TDengine releases, see [Release History](../../releases).
+For information about TDengine releases, see [Release History](../../releases).
:::
:::note
-On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the end point of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
+On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the endpoint of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
:::
@@ -147,7 +150,7 @@ Active: inactive (dead)
After confirming that TDengine is running, run the `taos` command to access the TDengine CLI.
-The following `systemctl` commands can help you manage TDengine:
+The following `systemctl` commands can help you manage TDengine service:
- Start TDengine Server: `systemctl start taosd`
@@ -159,7 +162,7 @@ The following `systemctl` commands can help you manage TDengine:
:::info
-- The `systemctl` command requires _root_ privileges. If you are not logged in as the `root` user, use the `sudo` command.
+- The `systemctl` command requires _root_ privileges. If you are not logged in as the _root_ user, use the `sudo` command.
- The `systemctl stop taosd` command does not instantly stop TDengine Server. The server is stopped only after all data in memory is flushed to disk. The time required depends on the cache size.
- If your system does not include `systemd`, you can run `/usr/local/taos/bin/taosd` to start TDengine manually.
@@ -174,23 +177,9 @@ After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengin
-## Test data insert performance
-
-After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
-
-```bash
-taosBenchmark
-```
-
-This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
+## Command Line Interface (CLI)
-The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in less than a minute.
-
-You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
-
-## Command Line Interface
-
-You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, run the following command:
+You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in the Linux terminal where TDengine is installed, or you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal where TDengine is installed to start the TDengine command line.
```bash
taos
@@ -205,52 +194,71 @@ taos>
For example, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example:
```sql
-create database demo;
-use demo;
-create table t (ts timestamp, speed int);
-insert into t values ('2019-07-15 00:00:00', 10);
-insert into t values ('2019-07-15 01:00:00', 20);
-select * from t;
+CREATE DATABASE demo;
+USE demo;
+CREATE TABLE t (ts TIMESTAMP, speed INT);
+INSERT INTO t VALUES ('2019-07-15 00:00:00', 10);
+INSERT INTO t VALUES ('2019-07-15 01:00:00', 20);
+SELECT * FROM t;
+
ts | speed |
========================================
2019-07-15 00:00:00.000 | 10 |
2019-07-15 01:00:00.000 | 20 |
+
Query OK, 2 row(s) in set (0.003128s)
```
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
-
+
+## Test data insert performance
+
+After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
+
+Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
+
+```bash
+taosBenchmark
+```
+
+This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
+
+The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
+
+You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
+
## Test data query performance
-After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance:
+After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
-From the TDengine CLI query the number of rows in the `meters` supertable:
+From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
```sql
-select count(*) from test.meters;
+SELECT COUNT(*) FROM test.meters;
```
Query the average, maximum, and minimum values of all 100 million rows of data:
```sql
-select avg(current), max(voltage), min(phase) from test.meters;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
Query the number of rows whose `location` tag is `San Francisco`:
```sql
-select count(*) from test.meters where location="San Francisco";
+SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
```
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
```sql
-select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
+Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
```sql
-select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
+SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
```
-In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
+
+In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md
index 20537064216f812990414ffd7260dbda64c56251..61eb8f04eb3fb8cea5096b321105fa9e88722bda 100644
--- a/docs/en/07-develop/01-connect/index.md
+++ b/docs/en/07-develop/01-connect/index.md
@@ -1,6 +1,7 @@
---
-title: Connect
-description: "This document explains how to establish connections to TDengine and how to install and use TDengine connectors."
+sidebar_label: Connect
+title: Connect to TDengine
+description: "How to establish connections to TDengine and how to install and use TDengine connectors."
---
import Tabs from "@theme/Tabs";
@@ -279,6 +280,6 @@ Prior to establishing connection, please make sure TDengine is already running a
:::tip
-If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq).
+If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
:::
diff --git a/docs/en/07-develop/03-insert-data/05-high-volume.md b/docs/en/07-develop/03-insert-data/05-high-volume.md
new file mode 100644
index 0000000000000000000000000000000000000000..9ea0c884473e670d0624cb3be737830f46bedc38
--- /dev/null
+++ b/docs/en/07-develop/03-insert-data/05-high-volume.md
@@ -0,0 +1,441 @@
+---
+sidebar_label: High Performance Writing
+title: High Performance Writing
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+This chapter introduces how to write data into TDengine with high throughput.
+
+## How to achieve high performance data writing
+
+To achieve high performance writing, there are a few aspects to consider. In the following sections we will describe these important factors in achieving high performance writing.
+
+### Application Program
+
+From the perspective of application program, you need to consider:
+
+1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
+
+2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
+
+3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch.
+
+4. Data Writing Protocol.
+ - Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
+ - Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it
+ - Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema
+
+Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
+
+### Data Source
+
+Application programs need to read data from data source then write into TDengine. If you meet one or more of below situations, you need to setup message queues between the threads for reading from data source and the threads for writing into TDengine.
+
+1. There are multiple data sources, the data generation speed of each data source is much slower than the speed of single writing thread. In this case, the purpose of message queues is to consolidate the data from multiple data sources together to increase the batch size of single write.
+2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer.
+3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency.
+
+If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
+
+1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch
+2. Subscribe multiple topics to accumulate data together.
+3. Add more consumers to gain more concurrency and throughput.
+4. Incrase the size of single fetch to increase the size of write batch.
+
+### Tune TDengine
+
+On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
+
+For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
+
+## Sample Programs
+
+This section will introduce the sample programs to demonstrate how to write into TDengine with high performance.
+
+### Scenario
+
+Below are the scenario for the sample programs of high performance wrting.
+
+- Application program reads data from data source, the sample program simulates a data source by generating data
+- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size.
+- Application program maps the received data to different writing threads based on table name to make sure all the data for each table is always processed by a specific writing thread.
+- Each writing thread writes the received data into TDengine once the message queue becomes empty or the read data meets a threshold.
+
+
+
+### Sample Programs
+
+The sample programs listed in this section are based on the scenario described previously. If your scenarios is different, please try to adjust the code based on the principles described in this chapter.
+
+The sample programs assume the source data is for all the different sub tables in same super table (meters). The super table has been created before the sample program starts to writing data. Sub tables are created automatically according to received data. If there are multiple super tables in your case, please try to adjust the part of creating table automatically.
+
+
+
+
+**Program Inventory**
+
+| Class | Description |
+| ---------------- | ----------------------------------------------------------------------------------------------------- |
+| FastWriteExample | Main Program |
+| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name |
+| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine |
+| MockDataSource | Generate data for some sub tables of super table meters |
+| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data |
+| StmtWriter | Write in Parameter binding mode (Not finished yet) |
+| DataBaseMonitor | Calculate the writing speed and output on console every 10 seconds |
+
+Below is the list of complete code of the classes in above table and more detailed description.
+
+
+FastWriteExample
+The main Program is responsible for:
+
+1. Create message queues
+2. Start writing threads
+3. Start reading threads
+4. Otuput writing speed every 10 seconds
+
+The main program provides 4 parameters for tuning:
+
+1. The number of reading threads, default value is 1
+2. The number of writing threads, default alue is 2
+3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables.
+4. The batch size of single write, default value is 3,000
+
+The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
+```
+
+
+
+
+ReadTask
+
+ReadTask reads data from data source. Each ReadTask is associated with a simulated data source, each data source generates data for a group of specific tables, and the data of any table is only generated from a single specific data source.
+
+ReadTask puts data in message queue in blocking mode. That means, the putting operation is blocked if the message queue is full.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
+```
+
+
+
+
+WriteTask
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
+```
+
+
+
+
+
+MockDataSource
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
+```
+
+
+
+
+
+SQLWriter
+
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
+```
+
+
+
+
+
+DataBaseMonitor
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
+```
+
+
+
+**Steps to Launch**
+
+
+Launch Java Sample Program
+
+You need to set environment variable `TDENGINE_JDBC_URL` before launching the program. If TDengine Server is setup on localhost, then the default value for user name, password and port can be used, like below:
+
+```
+TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+```
+
+**Launch in IDE**
+
+1. Clone TDengine repolitory
+ ```
+ git clone git@github.com:taosdata/TDengine.git --depth 1
+ ```
+2. Use IDE to open `docs/examples/java` directory
+3. Configure environment variable `TDENGINE_JDBC_URL`, you can also configure it before launching the IDE, if so you can skip this step.
+4. Run class `com.taos.example.highvolume.FastWriteExample`
+
+**Launch on server**
+
+If you want to launch the sample program on a remote server, please follow below steps:
+
+1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
+ ```
+ mvn package
+ ```
+2. Create `examples/java` directory on the server
+ ```
+ mkdir -p examples/java
+ ```
+3. Copy dependencies (below commands assume you are working on a local Windows host and try to launch on a remote Linux host)
+ - Copy dependent packages
+ ```
+ scp -r .\target\lib @:~/examples/java
+ ```
+ - Copy the jar of sample programs
+ ```
+ scp -r .\target\javaexample-1.0.jar @:~/examples/java
+ ```
+4. Configure environment variable
+ Edit `~/.bash_profile` or `~/.bashrc` and add below:
+
+ ```
+ export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+ ```
+
+ If your TDengine server is not deployed on localhost or doesn't use default port, you need to change the above URL to correct value in your environment.
+
+5. Launch the sample program
+
+ ```
+ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample
+ ```
+
+6. The sample program doesn't exit unless you press CTRL + C to terminate it.
+ Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
+
+ ```
+ root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
+ 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
+ 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
+ 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
+ 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
+ 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
+ 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
+ 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
+ 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
+ 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
+ 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
+ ```
+
+
+
+
+
+
+**Program Inventory**
+
+Sample programs in Python uses multi-process and cross-process message queues.
+
+| Function/CLass | Description |
+| ---------------------------- | --------------------------------------------------------------------------- |
+| main Function | Program entry point, create child processes and message queues |
+| run_monitor_process Function | Create database, super table, calculate writing speed and output to console |
+| run_read_task Function | Read data and distribute to message queues |
+| MockDataSource Class | Simulate data source, return next 1,000 rows of each table |
+| run_write_task Function | Read as much as possible data from message queue and write in batch |
+| SQLWriter Class | Write in SQL and create table utomatically |
+| StmtWriter Class | Write in parameter binding mode (not finished yet) |
+
+
+main function
+
+`main` function is responsible for creating message queues and fork child processes, there are 3 kinds of child processes:
+
+1. Monitoring process, initializes database and calculating writing speed
+2. Reading process (n), reads data from data source
+3. Writing process (m), wirtes data into TDengine
+
+`main` function provides 5 parameters:
+
+1. The number of reading tasks, default value is 1
+2. The number of writing tasks, default value is 1
+3. The number of tables, default value is 1,000
+4. The capacity of message queue, default value is 1,000,000 bytes
+5. The batch size in single write, default value is 3000
+
+```python
+{{#include docs/examples/python/fast_write_example.py:main}}
+```
+
+
+
+
+run_monitor_process
+
+Monitoring process initilizes database and monitoring writing speed.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:monitor}}
+```
+
+
+
+
+
+run_read_task function
+
+Reading process reads data from other data system and distributes to the message queue allocated for it.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:read}}
+```
+
+
+
+
+
+MockDataSource
+
+Below is the simulated data source, we assume table name exists in each generated data.
+
+```python
+{{#include docs/examples/python/mockdatasource.py}}
+```
+
+
+
+
+run_write_task function
+
+Writing process tries to read as much as possible data from message queue and writes in batch.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:write}}
+```
+
+
+
+
+
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
+
+SQLWriter
+
+```python
+{{#include docs/examples/python/sql_writer.py}}
+```
+
+
+
+**Steps to Launch**
+
+
+
+Launch Sample Program in Python
+
+1. Prerequisities
+
+ - TDengine client driver has been installed
+ - Python3 has been installed, the the version >= 3.8
+ - TDengine Python connector `taospy` has been installed
+
+2. Install faster-fifo to replace python builtin multiprocessing.Queue
+
+ ```
+ pip3 install faster-fifo
+ ```
+
+3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
+
+4. Execute the program
+
+ ```
+ python3 fast_write_example.py
+ ```
+
+ Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
+
+ ```
+ root@vm85$ python3 fast_write_example.py 8 8
+ 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
+ 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
+ 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
+ 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
+ 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
+ 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
+ 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
+ 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
+ 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
+ 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
+ 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
+ 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
+ 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
+ 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
+ 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
+ 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
+ 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
+ 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
+ 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
+ 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
+ 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
+ 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
+ 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
+ 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
+ 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
+ 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
+ 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
+ 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
+ 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
+ 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
+ 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
+ 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
+ ```
+
+
+
+:::note
+Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue.
+
+:::
+
+
+
diff --git a/docs/en/07-develop/03-insert-data/highvolume.webp b/docs/en/07-develop/03-insert-data/highvolume.webp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad
Binary files /dev/null and b/docs/en/07-develop/03-insert-data/highvolume.webp differ
diff --git a/docs/en/07-develop/07-tmq.mdx b/docs/en/07-develop/07-tmq.mdx
index ceeea64fca91473ea62de404fb9e92c179f7e6d4..17b3f5caa062eaacb4216b7153e899040e702cc1 100644
--- a/docs/en/07-develop/07-tmq.mdx
+++ b/docs/en/07-develop/07-tmq.mdx
@@ -16,7 +16,7 @@ import CDemo from "./_sub_c.mdx";
TDengine provides data subscription and consumption interfaces similar to message queue products. These interfaces make it easier for applications to obtain data written to TDengine either in real time and to process data in the order that events occurred. This simplifies your time-series data processing systems and reduces your costs because it is no longer necessary to deploy a message queue product such as Kafka.
-To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, standard table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications.
+To use TDengine data subscription, you define topics like in Kafka. However, a topic in TDengine is based on query conditions for an existing supertable, table, or subtable - in other words, a SELECT statement. You can use SQL to filter data by tag, table name, column, or expression and then perform a scalar function or user-defined function on the data. Aggregate functions are not supported. This gives TDengine data subscription more flexibility than similar products. The granularity of data can be controlled on demand by applications, while filtering and preprocessing are handled by TDengine instead of the application layer. This implementation reduces the amount of data transmitted and the complexity of applications.
By subscribing to a topic, a consumer can obtain the latest data in that topic in real time. Multiple consumers can be formed into a consumer group that consumes messages together. Consumer groups enable faster speed through multi-threaded, distributed data consumption. Note that consumers in different groups that are subscribed to the same topic do not consume messages together. A single consumer can subscribe to multiple topics. If the data in a supertable is sharded across multiple vnodes, consumer groups can consume it much more efficiently than single consumers. TDengine also includes an acknowledgement mechanism that ensures at-least-once delivery in complicated environments where machines may crash or restart.
diff --git a/docs/en/07-develop/08-cache.md b/docs/en/07-develop/08-cache.md
index 4892c21c9ddb97b3f967053ee64be24f8cb78c85..82a4787016f608f8e32e89b1747443b7cd164551 100644
--- a/docs/en/07-develop/08-cache.md
+++ b/docs/en/07-develop/08-cache.md
@@ -20,11 +20,11 @@ In theory, larger cache sizes are always better. However, at a certain point, it
## Read Cache
-When you create a database, you can configure whether the latest data from every subtable is cached. To do so, set the *cachelast* parameter as follows:
-- 0: Caching is disabled.
-- 1: The latest row of data in each subtable is cached. This option significantly improves the performance of the `LAST_ROW` function
-- 2: The latest non-null value in each column of each subtable is cached. This option significantly improves the performance of the `LAST` function in normal situations, such as WHERE, ORDER BY, GROUP BY, and INTERVAL statements.
-- 3: Rows and columns are both cached. This option is equivalent to simultaneously enabling options 1 and 2.
+When you create a database, you can configure whether the latest data from every subtable is cached. To do so, set the *cachemodel* parameter as follows:
+- none: Caching is disabled.
+- last_row: The latest row of data in each subtable is cached. This option significantly improves the performance of the `LAST_ROW` function
+- last_value: The latest non-null value in each column of each subtable is cached. This option significantly improves the performance of the `LAST` function in normal situations, such as WHERE, ORDER BY, GROUP BY, and INTERVAL statements.
+- both: Rows and columns are both cached. This option is equivalent to simultaneously enabling option last_row and last_value.
## Metadata Cache
diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md
index bfbb547bd4177cba369ec9d3d2541bceed853ef0..5dfcd3108d8b10cf24cdd5c852c4225ced0f16b2 100644
--- a/docs/en/10-deployment/01-deploy.md
+++ b/docs/en/10-deployment/01-deploy.md
@@ -39,18 +39,18 @@ To get the hostname on any host, the command `hostname -f` can be executed.
On the physical machine running the application, ping the dnode that is running taosd. If the dnode is not accessible, the application cannot connect to taosd. In this case, verify the DNS and hosts settings on the physical node running the application.
-The end point of each dnode is the output hostname and port, such as h1.taosdata.com:6030.
+The end point of each dnode is the output hostname and port, such as h1.tdengine.com:6030.
### Step 5
-Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following.
+Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.tdengine.com:6030", its `taos.cfg` is configured as following.
```c
// firstEp is the end point to connect to when any dnode starts
-firstEp h1.taosdata.com:6030
+firstEp h1.tdengine.com:6030
// must be configured to the FQDN of the host where the dnode is launched
-fqdn h1.taosdata.com
+fqdn h1.tdengine.com
// the port used by the dnode, default is 6030
serverPort 6030
@@ -76,13 +76,13 @@ The first dnode can be started following the instructions in [Get Started](/get-
taos> show dnodes;
id | endpoint | vnodes | support_vnodes | status | create_time | note |
============================================================================================================================================
-1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
+1 | h1.tdengine.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
Query OK, 1 rows affected (0.007984s)
```
-From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster.
+From the above output, it is shown that the end point of the started dnode is "h1.tdengine.com:6030", which is the `firstEp` of the cluster.
## Add DNODE
@@ -90,7 +90,7 @@ There are a few steps necessary to add other dnodes in the cluster.
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
-Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command:
+Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:
```sql
CREATE DNODE "h2.taos.com:6030";
@@ -98,7 +98,7 @@ CREATE DNODE "h2.taos.com:6030";
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
-Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos`
+Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
```sql
SHOW DNODES;
@@ -114,7 +114,9 @@ The above process can be repeated to add more dnodes in the cluster.
Any node that is in the cluster and online can be the firstEp of new nodes.
Nodes use the firstEp parameter only when joining a cluster for the first time. After a node has joined the cluster, it stores the latest mnode in its end point list and no longer makes use of firstEp.
-However, firstEp is used by clients that connect to the cluster. For example, if you run `taos shell` without arguments, it connects to the firstEp by default.
+
+However, firstEp is used by clients that connect to the cluster. For example, if you run TDengine CLI `taos` without arguments, it connects to the firstEp by default.
+
Two dnodes that are launched without a firstEp value operate independently of each other. It is not possible to add one dnode to the other dnode and form a cluster. It is also not possible to form two independent clusters into a new cluster.
:::
diff --git a/docs/en/10-deployment/03-k8s.md b/docs/en/10-deployment/03-k8s.md
index b3f71ed5bd0e0dbaf3108cc40be6b18bdf5fb7e8..b0aa6777130864404e97dc332cf0e5ce830bf8ed 100644
--- a/docs/en/10-deployment/03-k8s.md
+++ b/docs/en/10-deployment/03-k8s.md
@@ -9,6 +9,7 @@ TDengine is a cloud-native time-series database that can be deployed on Kubernet
Before deploying TDengine on Kubernetes, perform the following:
+* Current steps are compatible with Kubernetes v1.5 and later version.
* Install and configure minikube, kubectl, and helm.
* Install and deploy Kubernetes and ensure that it can be accessed and used normally. Update any container registries or other services as necessary.
@@ -100,7 +101,7 @@ spec:
# Must set if you want a cluster.
- name: TAOS_FIRST_EP
value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
- # TAOS_FQND should always be setted in k8s env.
+ # TAOS_FQDN should always be set in k8s env.
- name: TAOS_FQDN
value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
volumeMounts:
diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md
index 48cd9df32c16d346ceece01f01ee3880231427e7..a4fa68100078efe85fff5e1b078ebd07e5337d5a 100644
--- a/docs/en/10-deployment/05-helm.md
+++ b/docs/en/10-deployment/05-helm.md
@@ -152,7 +152,7 @@ clusterDomainSuffix: ""
# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
# to a camelCase taos config variable `debugFlag`.
#
-# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
+# See the [Configuration Variables](../../reference/config)
#
# Note:
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
@@ -170,71 +170,21 @@ taoscfg:
# number of replications, for cluster only
TAOS_REPLICA: "1"
-
- # number of days per DB file
- # TAOS_DAYS: "10"
-
- # number of days to keep DB file, default is 10 years.
- #TAOS_KEEP: "3650"
-
- # cache block size (Mbyte)
- #TAOS_CACHE: "16"
-
- # number of cache blocks per vnode
- #TAOS_BLOCKS: "6"
-
- # minimum rows of records in file block
- #TAOS_MIN_ROWS: "100"
-
- # maximum rows of records in file block
- #TAOS_MAX_ROWS: "4096"
-
#
- # TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core
- #TAOS_NUM_OF_THREADS_PER_CORE: "1.0"
+ # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC
+ #TAOS_NUM_OF_RPC_THREADS: "2"
+
#
# TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data
#TAOS_NUM_OF_COMMIT_THREADS: "4"
- #
- # TAOS_RATIO_OF_QUERY_CORES:
- # the proportion of total CPU cores available for query processing
- # 2.0: the query threads will be set to double of the CPU cores.
- # 1.0: all CPU cores are available for query processing [default].
- # 0.5: only half of the CPU cores are available for query.
- # 0.0: only one core available.
- #TAOS_RATIO_OF_QUERY_CORES: "1.0"
-
- #
- # TAOS_KEEP_COLUMN_NAME:
- # the last_row/first/last aggregator will not change the original column name in the result fields
- #TAOS_KEEP_COLUMN_NAME: "0"
-
- # enable/disable backuping vnode directory when removing vnode
- #TAOS_VNODE_BAK: "1"
-
# enable/disable installation / usage report
#TAOS_TELEMETRY_REPORTING: "1"
- # enable/disable load balancing
- #TAOS_BALANCE: "1"
-
- # max timer control blocks
- #TAOS_MAX_TMR_CTRL: "512"
-
# time interval of system monitor, seconds
#TAOS_MONITOR_INTERVAL: "30"
- # number of seconds allowed for a dnode to be offline, for cluster only
- #TAOS_OFFLINE_THRESHOLD: "8640000"
-
- # RPC re-try timer, millisecond
- #TAOS_RPC_TIMER: "1000"
-
- # RPC maximum time for ack, seconds.
- #TAOS_RPC_MAX_TIME: "600"
-
# time interval of dnode status reporting to mnode, seconds, for cluster only
#TAOS_STATUS_INTERVAL: "1"
@@ -245,37 +195,7 @@ taoscfg:
#TAOS_MIN_SLIDING_TIME: "10"
# minimum time window, milli-second
- #TAOS_MIN_INTERVAL_TIME: "10"
-
- # maximum delay before launching a stream computation, milli-second
- #TAOS_MAX_STREAM_COMP_DELAY: "20000"
-
- # maximum delay before launching a stream computation for the first time, milli-second
- #TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000"
-
- # retry delay when a stream computation fails, milli-second
- #TAOS_RETRY_STREAM_COMP_DELAY: "10"
-
- # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
- #TAOS_STREAM_COMP_DELAY_RATIO: "0.1"
-
- # max number of vgroups per db, 0 means configured automatically
- #TAOS_MAX_VGROUPS_PER_DB: "0"
-
- # max number of tables per vnode
- #TAOS_MAX_TABLES_PER_VNODE: "1000000"
-
- # the number of acknowledgments required for successful data writing
- #TAOS_QUORUM: "1"
-
- # enable/disable compression
- #TAOS_COMP: "2"
-
- # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
- #TAOS_WAL_LEVEL: "1"
-
- # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
- #TAOS_FSYNC: "3000"
+ #TAOS_MIN_INTERVAL_TIME: "1"
# the compressed rpc message, option:
# -1 (no compression)
@@ -283,17 +203,8 @@ taoscfg:
# > 0 (rpc message body which larger than this value will be compressed)
#TAOS_COMPRESS_MSG_SIZE: "-1"
- # max length of an SQL
- #TAOS_MAX_SQL_LENGTH: "1048576"
-
- # the maximum number of records allowed for super table time sorting
- #TAOS_MAX_NUM_OF_ORDERED_RES: "100000"
-
# max number of connections allowed in dnode
- #TAOS_MAX_SHELL_CONNS: "5000"
-
- # max number of connections allowed in client
- #TAOS_MAX_CONNECTIONS: "5000"
+ #TAOS_MAX_SHELL_CONNS: "50000"
# stop writing logs when the disk size of the log folder is less than this value
#TAOS_MINIMAL_LOG_DIR_G_B: "0.1"
@@ -313,21 +224,8 @@ taoscfg:
# enable/disable system monitor
#TAOS_MONITOR: "1"
- # enable/disable recording the SQL statements via restful interface
- #TAOS_HTTP_ENABLE_RECORD_SQL: "0"
-
- # number of threads used to process http requests
- #TAOS_HTTP_MAX_THREADS: "2"
-
- # maximum number of rows returned by the restful interface
- #TAOS_RESTFUL_ROW_LIMIT: "10240"
-
- # The following parameter is used to limit the maximum number of lines in log files.
- # max number of lines per log filters
- # numOfLogLines 10000000
-
# enable/disable async log
- #TAOS_ASYNC_LOG: "0"
+ #TAOS_ASYNC_LOG: "1"
#
# time of keeping log files, days
@@ -344,25 +242,8 @@ taoscfg:
# debug flag for all log type, take effect when non-zero value\
#TAOS_DEBUG_FLAG: "143"
- # enable/disable recording the SQL in taos client
- #TAOS_ENABLE_RECORD_SQL: "0"
-
# generate core file when service crash
#TAOS_ENABLE_CORE_FILE: "1"
-
- # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
- #TAOS_MAX_BINARY_DISPLAY_WIDTH: "30"
-
- # enable/disable stream (continuous query)
- #TAOS_STREAM: "1"
-
- # in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
- #TAOS_RETRIEVE_BLOCKING_MODEL: "0"
-
- # the maximum allowed query buffer size in MB during query processing for each data node
- # -1 no limit (default)
- # 0 no query allowed, queries are disabled
- #TAOS_QUERY_BUFFER_SIZE: "-1"
```
## Scaling Out
diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md
index b830994ac9323f85d9ca68a40366edd9f2da1432..876de50f35ee3ba533bd7d5916632de853a84c0e 100644
--- a/docs/en/12-taos-sql/01-data-type.md
+++ b/docs/en/12-taos-sql/01-data-type.md
@@ -11,7 +11,7 @@ When using TDengine to store and query data, the most important part of the data
- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
- Internal function `now` can be used to get the current timestamp on the client side
- The current timestamp of the client side is applied when `now` is used to insert data
-- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from 1970-01-01 00:00:00.000 (UTC/GMT)
+- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md
index d9dadae976bf07bbf6cfb49401d55bb0bf18da49..5a84bbf3709ff2355157409ae11d5f85191a8271 100644
--- a/docs/en/12-taos-sql/02-database.md
+++ b/docs/en/12-taos-sql/02-database.md
@@ -71,9 +71,9 @@ database_option: {
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
-- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
-- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
-- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
+- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
+- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
+- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
### Example Statement
diff --git a/docs/en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md
index bf32cf171bbeea23ada946d5011a73dd70ddd6ca..5a2c8ed6ee4a5ea129023fec68fa97d577832f60 100644
--- a/docs/en/12-taos-sql/03-table.md
+++ b/docs/en/12-taos-sql/03-table.md
@@ -57,7 +57,7 @@ table_option: {
3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database.
4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first.
5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
-6. TTL: specifies the time to live (TTL) for the table. If the period specified by the TTL parameter elapses without any data being written to the table, TDengine will automatically delete the table. Note: The system may not delete the table at the exact moment that the TTL expires. Enter a value in days. The default value is 0. Note: The TTL parameter has a higher priority than the KEEP parameter. If a table is marked for deletion because the TTL has expired, it will be deleted even if the time specified by the KEEP parameter has not elapsed. This parameter can be used with standard tables and subtables.
+6. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
## Create Subtables
diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md
index 1dd0caed38235d3d10813b2cd74fec6446c5ec24..1f861119250e9e32fcac3344a5bdfb760e6191a7 100644
--- a/docs/en/12-taos-sql/06-select.md
+++ b/docs/en/12-taos-sql/06-select.md
@@ -52,11 +52,6 @@ window_clause: {
| STATE_WINDOW(col)
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
-changes_option: {
- DURATION duration_val
- | ROWS rows_val
-}
-
group_by_clause:
GROUP BY expr [, expr] ... HAVING condition
@@ -126,7 +121,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers.
-3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement.
:::
diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md
index d35fd3109998608475e4e0429265c8ac7274f57d..f74d0dbe5c6ae2019b266df4c55a13a49630bf71 100644
--- a/docs/en/12-taos-sql/10-function.md
+++ b/docs/en/12-taos-sql/10-function.md
@@ -613,6 +613,7 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
**Explanations**:
- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
+- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
### AVG
@@ -916,7 +917,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
**Return value type**:Same as the data type of the column being operated upon
-**Applicable data types**: Numeric
+**Applicable data types**: Numeric, Timestamp
**Applicable table types**: standard tables and supertables
@@ -931,7 +932,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
**Return value type**:Same as the data type of the column being operated upon
-**Applicable data types**: Numeric
+**Applicable data types**: Numeric, Timestamp
**Applicable table types**: standard tables and supertables
@@ -1139,7 +1140,7 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
**Applicable parameter values**:
-- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive
+- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val : Numeric types
**Return value type**: Integer
@@ -1166,7 +1167,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
**Applicable parameter values**:
-- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive
+- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val : Numeric types
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md
index fcd78765104af17285b43749969821ceb98da33b..17e4e4d1b0da6d0461c9ab478a9430855379fb12 100644
--- a/docs/en/12-taos-sql/14-stream.md
+++ b/docs/en/12-taos-sql/14-stream.md
@@ -44,13 +44,13 @@ For example, the following SQL statement creates a stream and automatically crea
```sql
CREATE STREAM avg_vol_s INTO avg_vol AS
-SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
+SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
```
## Delete a Stream
```sql
-DROP STREAM [IF NOT EXISTS] stream_name
+DROP STREAM [IF EXISTS] stream_name
```
This statement deletes the stream processing service only. The data generated by the stream is retained.
diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md
index 0486ea30940cdcb5d034bb730d12c0c120a59cd1..678c38a22ea763187cd0c87dceae3bf6ca03957c 100644
--- a/docs/en/12-taos-sql/19-limit.md
+++ b/docs/en/12-taos-sql/19-limit.md
@@ -30,7 +30,7 @@ The following characters cannot occur in a password: single quotation marks ('),
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- The maximum length of a tag name is 64 bytes
- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values cannot exceed 16 KB.
-- Maximum length of single SQL statement is 1 MB (1048576 bytes). It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
+- Maximum length of single SQL statement is 1 MB (1048576 bytes).
- At most 4096 columns can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded.
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
- The number of replicas can only be 1 or 3.
diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md
index 796b25dcb0a425aa0ffd76a6e9b8de45ba069357..9bda5a0a1027243ea5f50c55e303fdb7155c853b 100644
--- a/docs/en/12-taos-sql/22-meta.md
+++ b/docs/en/12-taos-sql/22-meta.md
@@ -245,3 +245,35 @@ Provides dnode configuration information.
| 1 | dnode_id | INT | Dnode ID |
| 2 | name | BINARY(32) | Parameter |
| 3 | value | BINARY(64) | Value |
+
+## INS_TOPICS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :---------: | ------------ | ------------------------------ |
+| 1 | topic_name | BINARY(192) | Topic name |
+| 2 | db_name | BINARY(64) | Database for the topic |
+| 3 | create_time | TIMESTAMP | Creation time |
+| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
+
+## INS_SUBSCRIPTIONS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :------------: | ------------ | ------------------------ |
+| 1 | topic_name | BINARY(204) | Subscribed topic |
+| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
+| 3 | vgroup_id | INT | Vgroup ID for the consumer |
+| 4 | consumer_id | BIGINT | Consumer ID |
+
+## INS_STREAMS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :----------: | ------------ | --------------------------------------- |
+| 1 | stream_name | BINARY(64) | Stream name |
+| 2 | create_time | TIMESTAMP | Creation time |
+| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
+| 4 | status | BIANRY(20) | Current status |
+| 5 | source_db | BINARY(64) | Source database |
+| 6 | target_db | BIANRY(64) | Target database |
+| 7 | target_table | BINARY(192) | Target table |
+| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
+| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md
index 10a93380220d357261914066d2fe036b8470e224..29cf3af6abfbbc06e42ae99c78f35f33a3c7c30a 100644
--- a/docs/en/12-taos-sql/23-perf.md
+++ b/docs/en/12-taos-sql/23-perf.md
@@ -61,15 +61,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 12 | sub_status | BINARY(1000) | Subquery status |
| 13 | sql | BINARY(1024) | SQL statement |
-## PERF_TOPICS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :---------: | ------------ | ------------------------------ |
-| 1 | topic_name | BINARY(192) | Topic name |
-| 2 | db_name | BINARY(64) | Database for the topic |
-| 3 | create_time | TIMESTAMP | Creation time |
-| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
-
## PERF_CONSUMERS
| # | **Column** | **Data Type** | **Description** |
@@ -83,15 +74,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering |
-## PERF_SUBSCRIPTIONS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :------------: | ------------ | ------------------------ |
-| 1 | topic_name | BINARY(204) | Subscribed topic |
-| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
-| 3 | vgroup_id | INT | Vgroup ID for the consumer |
-| 4 | consumer_id | BIGINT | Consumer ID |
-
## PERF_TRANS
| # | **Column** | **Data Type** | **Description** |
@@ -113,17 +95,3 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | stable_name | BINARY(192) | Supertable name |
| 4 | vgroup_id | INT | Dedicated vgroup name |
-
-## PERF_STREAMS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :----------: | ------------ | --------------------------------------- |
-| 1 | stream_name | BINARY(64) | Stream name |
-| 2 | create_time | TIMESTAMP | Creation time |
-| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
-| 4 | status | BIANRY(20) | Current status |
-| 5 | source_db | BINARY(64) | Source database |
-| 6 | target_db | BIANRY(64) | Target database |
-| 7 | target_table | BINARY(192) | Target table |
-| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
-| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md
index 96503c95989b4ae2e99fa0c38181a74232e6dc23..5f3bef3546ea05745070268e1f6add25add4773b 100644
--- a/docs/en/12-taos-sql/24-show.md
+++ b/docs/en/12-taos-sql/24-show.md
@@ -3,17 +3,7 @@ sidebar_label: SHOW Statement
title: SHOW Statement for Metadata
---
-In addition to running SELECT statements on INFORMATION_SCHEMA, you can also use SHOW to obtain system metadata, information, and status.
-
-## SHOW ACCOUNTS
-
-```sql
-SHOW ACCOUNTS;
-```
-
-Shows information about tenants on the system.
-
-Note: TDengine Enterprise Edition only.
+`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
## SHOW APPS
@@ -194,7 +184,7 @@ Shows information about streams in the system.
SHOW SUBSCRIPTIONS;
```
-Shows all subscriptions in the current database.
+Shows all subscriptions in the system.
## SHOW TABLES
diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md
index 37438ee780cac17b463e0dbb1b5385d0f3965de7..b9a3fa2321c8d073845d0cf9157ce335c930e06f 100644
--- a/docs/en/12-taos-sql/25-grant.md
+++ b/docs/en/12-taos-sql/25-grant.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Permissions Management
-title: Permissions Management
+sidebar_label: Access Control
+title: User and Access Control
+description: Manage user and user's permission
---
This document describes how to manage permissions in TDengine.
diff --git a/docs/en/12-taos-sql/index.md b/docs/en/12-taos-sql/index.md
index e243cd23186a6b9286d3297e467567c26c316112..a5ffc9dc8dce158eccc0fa0519f09ba346710c31 100644
--- a/docs/en/12-taos-sql/index.md
+++ b/docs/en/12-taos-sql/index.md
@@ -1,6 +1,6 @@
---
title: TDengine SQL
-description: "The syntax supported by TDengine SQL "
+description: 'The syntax supported by TDengine SQL '
---
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
@@ -15,7 +15,7 @@ Syntax Specifications used in this chapter:
- | means one of a few options, excluding | itself.
- … means the item prior to it can be repeated multiple times.
-To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
+To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
```
taos> DESCRIBE meters;
diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md
index a8d8d7b4744d0e99157dc0bc1cd34bf93f0001a1..d7713b943f5fe8fbd5e685b8ba03ff8cc8ed4e53 100644
--- a/docs/en/13-operation/01-pkg-install.md
+++ b/docs/en/13-operation/01-pkg-install.md
@@ -1,12 +1,12 @@
---
-title: Install & Uninstall
+title: Install and Uninstall
description: Install, Uninstall, Start, Stop and Upgrade
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
+This document gives more information about installing, uninstalling, and upgrading TDengine.
## Install
@@ -15,11 +15,48 @@ About details of installing TDenine, please refer to [Installation Guide](../../
## Uninstall
+
+
+Apt-get package of TDengine can be uninstalled as below:
+
+```bash
+$ sudo apt-get remove tdengine
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ tdengine
+0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n] y
+(Reading database ... 135625 files and directories currently installed.)
+Removing tdengine (3.0.0.0) ...
+TDengine is removed successfully!
+
+```
+
+Apt-get package of taosTools can be uninstalled as below:
+
+```
+$ sudo apt remove taostools
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ taostools
+0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n]
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
+
Deb package of TDengine can be uninstalled as below:
-```bash
+```
$ sudo dpkg -r tdengine
(Reading database ... 137504 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
@@ -27,6 +64,14 @@ TDengine is removed successfully!
```
+Deb package of taosTools can be uninstalled as below:
+
+```
+$ sudo dpkg -r taostools
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
@@ -38,6 +83,13 @@ $ sudo rpm -e tdengine
TDengine is removed successfully!
```
+RPM package of taosTools can be uninstalled as below:
+
+```
+sudo rpm -e taostools
+taosToole is removed successfully!
+```
+
@@ -46,115 +98,69 @@ tar.gz package of TDengine can be uninstalled as below:
```
$ rmtaos
-Nginx for TDengine is running, stopping it...
TDengine is removed successfully!
-
-taosKeeper is removed successfully!
```
-
-
-
-:::note
-
-- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
-- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
+tar.gz package of taosTools can be uninstalled as below:
-```bash
- $ sudo rm -f /var/lib/dpkg/info/tdengine*
```
+$ rmtaostools
+Start to uninstall taos tools ...
-- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
-
-```bash
- $ sudo rpm -e --noscripts tdengine
-```
-
-:::
-
-## Installation Directory
-
-TDengine is installed at /usr/local/taos if successful.
-
-```bash
-$ cd /usr/local/taos
-$ ll
-$ ll
-total 28
-drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./
-drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/
-lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/
-drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/
-lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
+taos tools is uninstalled successfully!
```
-During the installation process:
-
-- Configuration directory, data directory, and log directory are created automatically if they don't exist
-- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
-- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
-- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
-- The executables at /usr/local/taos/bin are linked to /usr/bin
-- The DLL files at /usr/local/taos/driver are linked to /usr/lib
-- The header files at /usr/local/taos/include are linked to /usr/include
-
-:::note
+
+
+Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
+
+
-- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
-- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
+:::info
-## Start and Stop
+- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
-Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server.
+- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
-For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below:
+ ```
+ $ sudo rm -f /var/lib/dpkg/info/tdengine*
+ ```
-- Start server:`systemctl start taosd`
+You can then reinstall if needed.
-- Stop server:`systemctl stop taosd`
+- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
-- Restart server:`systemctl restart taosd`
+ ```
+ $ sudo rpm -e --noscripts tdengine
+ ```
-- Check server status:`systemctl status taosd`
+You can then reinstall if needed.
-Another component named as `taosAdapter` is to provide HTTP service for TDengine, it should be started and stopped using `systemctl`.
+:::
-If the server process is OK, the output of `systemctl status` is like below:
+Uninstalling and Modifying Files
-```
-Active: active (running)
-```
+- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
-Otherwise, the output is as below:
+- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
-```
-Active: inactive (dead)
-```
## Upgrade
-
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
-
- Stop inserting data
- Make sure all data is persisted to disk
-- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine
-- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
+- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
- Run some simple data insertion statements to make sure the cluster works well
- Restore business services
:::warning
-
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
:::
diff --git a/docs/en/13-operation/02-planning.mdx b/docs/en/13-operation/02-planning.mdx
index c1baf92dbfa8d93f83174c05c2ea631d1a469739..2dffa7bb8747e21e4754740208eafed65d341217 100644
--- a/docs/en/13-operation/02-planning.mdx
+++ b/docs/en/13-operation/02-planning.mdx
@@ -1,40 +1,32 @@
---
+sidebar_label: Resource Planning
title: Resource Planning
---
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
-## Memory Requirement of Server Side
+## Server Memory Requirements
-By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
+Each database creates a fixed number of vgroups. This number is 2 by default and can be configured with the `vgroups` parameter. The number of replicas can be controlled with the `replica` parameter. Each replica requires one vnode per vgroup. Altogether, the memory required by each database depends on the following configuration options:
-```
-Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
-```
+- vgroups
+- replica
+- buffer
+- pages
+- pagesize
+- cachesize
-For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
+For more information, see [Database](../../taos-sql/database).
-In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.
+The memory required by a database is therefore greater than or equal to:
```
- taosd_memory = vnode_memory + mnode_memory + query_memory
+vgroups * replica * (buffer + pages * pagesize + cachesize)
```
-In the above formula:
-
-1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas.
-
-```
- vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica
-```
-
-2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster".
-
-3. "query_memory" is the memory used when processing query requests. Each ongoing query consumes at least "0.2 KB \* total number of involved tables".
-
-Please note that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to reserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query.
+However, note that this requirement is spread over all dnodes in the cluster, not on a single physical machine. The physical servers that run dnodes meet the requirement together. If a cluster has multiple databases, the memory required increases accordingly. In complex environments where dnodes were added after initial deployment in response to increasing resource requirements, load may not be balanced among the original dnodes and newer dnodes. In this situation, the actual status of your dnodes is more important than theoretical calculations.
-## Memory Requirement of Client Side
+## Client Memory Requirements
For the client programs using TDengine client driver `taosc` to connect to the server side there is a memory requirement as well.
@@ -56,10 +48,10 @@ So, at least 3GB needs to be reserved for such a client.
The CPU resources required depend on two aspects:
-- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
+- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. If each insert request contains more than 200 records, a single core can process more than 1 million records per second. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users.
-In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
+In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. If possible, ensure that CPU usage remains below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
## Disk Requirement
@@ -77,6 +69,6 @@ To increase performance, multiple disks can be setup for parallel data reading o
## Number of Hosts
-A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
+A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. If the number of data replicas is not 1, the required resources are multiplied by the number of replicas.
-**Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html).
+Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
diff --git a/docs/en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md
index d4d48d7fcdc2c990b6ea0821e2347c70a809ed79..21a5a902822d7b85f555114a112686d4e35c64aa 100644
--- a/docs/en/13-operation/03-tolerance.md
+++ b/docs/en/13-operation/03-tolerance.md
@@ -1,6 +1,5 @@
---
-sidebar_label: Fault Tolerance
-title: Fault Tolerance & Disaster Recovery
+title: Fault Tolerance and Disaster Recovery
---
## Fault Tolerance
@@ -11,22 +10,21 @@ When a data block is received by TDengine, the original data block is first writ
There are 2 configuration parameters related to WAL:
-- walLevel:
- - 0:wal is disabled
- - 1:wal is enabled without fsync
- - 2:wal is enabled with fsync
-- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
+- wal_level: Specifies the WAL level. 1 indicates that WAL is enabled but fsync is disabled. 2 indicates that WAL and fsync are both enabled. The default value is 1.
+- wal_fsync_period: This parameter is only valid when wal_level is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
-To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds.
+To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when wal_fsync_period is set to 3000 milliseconds.
## Disaster Recovery
-TDengine uses replication to provide high availability and disaster recovery capability.
+TDengine uses replication to provide high availability.
-A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
+A TDengine cluster is managed by mnodes. You can configure up to three mnodes to ensure high availability. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
-The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
+The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, the parameter `replica` is used to specify the number of replicas. To achieve high availability, set `replica` to 3.
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
+
+Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com.
diff --git a/docs/en/13-operation/17-diagnose.md b/docs/en/13-operation/17-diagnose.md
index 2b474fddba4af5ba0c29103cd8ab1249d10d055b..d01d12e831956e6a6db654e1f6dbf5072ac6b243 100644
--- a/docs/en/13-operation/17-diagnose.md
+++ b/docs/en/13-operation/17-diagnose.md
@@ -13,110 +13,59 @@ Diagnostic steps:
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port.
-
--l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
+
+-l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
+Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
Output of the server side for the example is below:
```bash
-# taos -n server -P 6000
-12/21 14:50:13.522509 0x7f536f455200 UTL work as server, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
-
-12/21 14:50:13.522659 0x7f5352242700 UTL TCP server at port:6000 is listening
-12/21 14:50:13.522727 0x7f5351240700 UTL TCP server at port:6001 is listening
-...
-...
+# taos -n server -P 6030 -l 1000
+network test server is initialized, port:6030
+request is received, size:1000
+request is received, size:1000
...
-12/21 14:50:13.523954 0x7f5342fed700 UTL TCP server at port:6011 is listening
-12/21 14:50:13.523989 0x7f53437ee700 UTL UDP server at port:6010 is listening
-12/21 14:50:13.524019 0x7f53427ec700 UTL UDP server at port:6011 is listening
-12/21 14:50:22.192849 0x7f5352242700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6000
-12/21 14:50:22.192993 0x7f5352242700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6000
-12/21 14:50:22.237082 0x7f5351a41700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6000
-12/21 14:50:22.237203 0x7f5351a41700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6000
-12/21 14:50:22.237450 0x7f5351240700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6001
-12/21 14:50:22.237576 0x7f5351240700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6001
-12/21 14:50:22.281038 0x7f5350a3f700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6001
-12/21 14:50:22.281141 0x7f5350a3f700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6001
...
...
-...
-12/21 14:50:22.677443 0x7f5342fed700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6011
-12/21 14:50:22.677576 0x7f5342fed700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6011
-12/21 14:50:22.721144 0x7f53427ec700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6011
-12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011
+request is received, size:1000
+request is received, size:1000
```
Output of the client side for the example is below:
```bash
# taos -n client -h 172.27.0.7 -P 6000
-12/21 14:50:22.192434 0x7fc95d859200 UTL work as client, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
-
-12/21 14:50:22.192472 0x7fc95d859200 UTL server ip:172.27.0.7 is resolved from host:172.27.0.7
-12/21 14:50:22.236869 0x7fc95d859200 UTL successed to test TCP port:6000
-12/21 14:50:22.237215 0x7fc95d859200 UTL successed to test UDP port:6000
+taos -n client -h v3s2 -P 6030 -l 1000
+network test client is initialized, the server is v3s2:6030
+request is sent, size:1000
+response is received, size:1000
+request is sent, size:1000
+response is received, size:1000
...
...
...
-12/21 14:50:22.676891 0x7fc95d859200 UTL successed to test TCP port:6010
-12/21 14:50:22.677240 0x7fc95d859200 UTL successed to test UDP port:6010
-12/21 14:50:22.720893 0x7fc95d859200 UTL successed to test TCP port:6011
-12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011
-```
-
-The output needs to be checked carefully for the system operator to find the root cause and resolve the problem.
-
-## Startup Status and RPC Diagnostic
-
-`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully.
-
-`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal.
-
-## Sync and Arbitrator Diagnostic
+request is sent, size:1000
+response is received, size:1000
+request is sent, size:1000
+response is received, size:1000
-```bash
-taos -n sync -P 6040 -h
-taos -n sync -P 6042 -h
+total succ: 100/100 cost: 16.23 ms speed: 5.87 MB/s
```
-The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well.
-
-## Network Speed Diagnostic
-
-`taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP`
-
-From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
-
--n:When set to "speed", it means testing network speed.
--h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used.
--P:The port of the server process to connect to, the default value is 6030.
--N:The number of packages that will be sent in the test, range is [1,10000], default value is 100.
--l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024.
--S:The type of network packages to send, can be either TCP or UDP, default value is TCP.
-
-## FQDN Resolution Diagnostic
-
-`taos -n fqdn -h `
-
-From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
-
--n:When set to "fqdn", it means testing the speed of resolving FQDN.
--h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default.
+The output needs to be checked carefully for the system operator to find the root cause and resolve the problem.
## Server Log
-The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
-
-Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs.
+The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
-- The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information
-- The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog`
+Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. Ensure that the disk drive on which logs are stored has sufficient space.
## Client Log
-An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
+An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
+
+The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process.
-Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions.
+Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions. You can configure asynclog to 0 when needed for troubleshooting purposes to ensure that no log information is lost.
diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
index 8d4186a36bb983e688ae2824f13c71f4461bebf2..ce28ee87d9317487d5c610d23287775be6b753ec 100644
--- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
@@ -10,7 +10,7 @@ One difference from the native connector is that the REST interface is stateless
## Installation
-The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol.
+The REST interface does not rely on any TDengine native library, so the client application does not need to install any TDengine libraries. The client application's development language only needs to support the HTTP protocol. The REST interface is provided by [taosAdapter](../taosadapter), to use REST interface you need to make sure `taosAdapter` is running properly.
## Verification
@@ -18,12 +18,12 @@ If the TDengine server is already installed, it can be verified as follows:
The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
-The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
+The following example lists all databases on the host h1.tdengine.com. To use it in your environment, replace `h1.tdengine.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```bash
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
-d "select name, ntables, status from information_schema.ins_databases;" \
- h1.taosdata.com:6041/rest/sql
+ h1.tdengine.com:6041/rest/sql
```
The following return value results indicate that the verification passed.
diff --git a/docs/en/14-reference/03-connector/cpp.mdx b/docs/en/14-reference/03-connector/03-cpp.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/cpp.mdx
rename to docs/en/14-reference/03-connector/03-cpp.mdx
index 5839ed4af89723dcee5e80c186af25a90ae59972..02d7df48db540a3eb44379ada7332b2838924212 100644
--- a/docs/en/14-reference/03-connector/cpp.mdx
+++ b/docs/en/14-reference/03-connector/03-cpp.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 1
sidebar_label: C/C++
title: C/C++ Connector
---
diff --git a/docs/en/14-reference/03-connector/java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/java.mdx
rename to docs/en/14-reference/03-connector/04-java.mdx
index 39514c37ebf45974ad90b1b7b1e548c8cd4ea672..129d90ea85d9455c1ae460b3799b5253dd3a49fc 100644
--- a/docs/en/14-reference/03-connector/java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 2
sidebar_label: Java
title: TDengine Java Connector
description: The TDengine Java Connector is implemented on the standard JDBC API and provides native and REST connectors.
@@ -134,8 +133,6 @@ The configuration parameters in the URL are as follows:
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false.
-For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html).
-
**Connect using the TDengine client-driven configuration file **
When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below:
diff --git a/docs/en/14-reference/03-connector/go.mdx b/docs/en/14-reference/03-connector/05-go.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/go.mdx
rename to docs/en/14-reference/03-connector/05-go.mdx
index 29263550403e71614296e52285c956040b04387f..518d3625d54492c2b6ec209302ac91ca32d03ad2 100644
--- a/docs/en/14-reference/03-connector/go.mdx
+++ b/docs/en/14-reference/03-connector/05-go.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 4
sidebar_label: Go
title: TDengine Go Connector
---
@@ -8,7 +7,7 @@ title: TDengine Go Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
diff --git a/docs/en/14-reference/03-connector/rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx
similarity index 99%
rename from docs/en/14-reference/03-connector/rust.mdx
rename to docs/en/14-reference/03-connector/06-rust.mdx
index e9b16ba94d1db27d0571aad24d04492aeea32fb8..0d391c6ac308c5e9e998e2e7e3423cc5a809905e 100644
--- a/docs/en/14-reference/03-connector/rust.mdx
+++ b/docs/en/14-reference/03-connector/06-rust.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 5
sidebar_label: Rust
title: TDengine Rust Connector
---
@@ -8,7 +7,7 @@ title: TDengine Rust Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
diff --git a/docs/en/14-reference/03-connector/python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
similarity index 97%
rename from docs/en/14-reference/03-connector/python.mdx
rename to docs/en/14-reference/03-connector/07-python.mdx
index e183bbee2258392dafb8102393ec045fe0dae2e9..d92a93fd4fd79bfa449249a16e87268b924c8475 100644
--- a/docs/en/14-reference/03-connector/python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 3
sidebar_label: Python
title: TDengine Python Connector
description: "taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. tasopy wraps both the native and REST interfaces of TDengine, corresponding to the two submodules of tasopy: taos and taosrest. In addition to wrapping the native and REST interfaces, taospy also provides a programming interface that conforms to the Python Data Access Specification (PEP 249), making it easy to integrate taospy with many third-party tools, such as SQLAlchemy and pandas."
@@ -8,7 +7,7 @@ description: "taospy is the official Python connector for TDengine. taospy provi
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
+`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
diff --git a/docs/en/14-reference/03-connector/node.mdx b/docs/en/14-reference/03-connector/08-node.mdx
similarity index 98%
rename from docs/en/14-reference/03-connector/node.mdx
rename to docs/en/14-reference/03-connector/08-node.mdx
index d1700444351d6f54f799a1c84674735800959c3c..bf7c6b95ea67dc8bf8fa1277591b549a2fd6322d 100644
--- a/docs/en/14-reference/03-connector/node.mdx
+++ b/docs/en/14-reference/03-connector/08-node.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 6
sidebar_label: Node.js
title: TDengine Node.js Connector
---
@@ -8,7 +7,7 @@ title: TDengine Node.js Connector
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-import Preparition from "./_preparition.mdx";
+import Preparition from "./_preparation.mdx";
import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx";
import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
diff --git a/docs/en/14-reference/03-connector/csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx
similarity index 98%
rename from docs/en/14-reference/03-connector/csharp.mdx
rename to docs/en/14-reference/03-connector/09-csharp.mdx
index 388ae49d09e1ee8a7e0f012432d9bbb98da3fc45..bc16cd086bdbef4b594df6e866a019a02ae54fd8 100644
--- a/docs/en/14-reference/03-connector/csharp.mdx
+++ b/docs/en/14-reference/03-connector/09-csharp.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 7
sidebar_label: C#
title: C# Connector
---
@@ -8,7 +7,7 @@ title: C# Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx"
import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx"
import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx"
@@ -173,7 +172,6 @@ namespace TDengineExample
`Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to:
* Interface download:
-* Usage notes:
## Frequently Asked Questions
diff --git a/docs/en/14-reference/03-connector/php.mdx b/docs/en/14-reference/03-connector/10-php.mdx
similarity index 98%
rename from docs/en/14-reference/03-connector/php.mdx
rename to docs/en/14-reference/03-connector/10-php.mdx
index 08cf34495f53099ce32a2cb921e06fb7fd631e23..820f70375982eb54cdd87602b891e5f04756c0e5 100644
--- a/docs/en/14-reference/03-connector/php.mdx
+++ b/docs/en/14-reference/03-connector/10-php.mdx
@@ -1,6 +1,5 @@
---
-sidebar_position: 1
-sidebar_label: PHP (community contribution)
+sidebar_label: PHP
title: PHP Connector
---
diff --git a/docs/en/14-reference/03-connector/_preparation.mdx b/docs/en/14-reference/03-connector/_preparation.mdx
index 07ebdbca3d891ff51a254bc1b83016f1404bb47e..c6e42ce02348595da0fdd75847d6442c285dc10a 100644
--- a/docs/en/14-reference/03-connector/_preparation.mdx
+++ b/docs/en/14-reference/03-connector/_preparation.mdx
@@ -2,7 +2,7 @@
:::info
-Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine.
+Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
diff --git a/docs/en/14-reference/03-connector/_preparition.mdx b/docs/en/14-reference/03-connector/_preparition.mdx
deleted file mode 100644
index 87538ebfd8c60507aec90ee86e427d85979dbc4a..0000000000000000000000000000000000000000
--- a/docs/en/14-reference/03-connector/_preparition.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
-- 已安装客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装)
-
-:::info
-
-由于 TDengine 的客户端驱动使用 C 语言编写,使用原生连接时需要加载系统对应安装在本地的客户端驱动共享库文件,通常包含在 TDengine 安装包。TDengine Linux 服务端安装包附带了 TDengine 客户端,也可以单独安装 [Linux 客户端](/get-started/) 。在 Windows 环境开发时需要安装 TDengine 对应的 [Windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) 。
-
-- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
-- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-
-:::
diff --git a/docs/en/14-reference/03-connector/03-connector.mdx b/docs/en/14-reference/03-connector/index.mdx
similarity index 100%
rename from docs/en/14-reference/03-connector/03-connector.mdx
rename to docs/en/14-reference/03-connector/index.mdx
diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md
index 2105ba83fad9700674e28609016b07ef6de66833..e73441a96b087062b2e3912ed73010fc3e761bb9 100644
--- a/docs/en/14-reference/06-taosdump.md
+++ b/docs/en/14-reference/06-taosdump.md
@@ -116,5 +116,4 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.
-Report bugs to .
```
diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md
index e74c9de7b2aa71278a99d45f250e0dcaf86d4704..2e562035254311f2caa0b6d4512842080aab64d5 100644
--- a/docs/en/14-reference/07-tdinsight/index.md
+++ b/docs/en/14-reference/07-tdinsight/index.md
@@ -263,7 +263,7 @@ Once the import is complete, the full page view of TDinsight is shown below.
## TDinsight dashboard details
-The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases.
+The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](../../taos-sql/node/) or databases.
Details of the metrics are as follows.
diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md
index b3c3cddd9a9958dcb0bab477128c0339da1f0aa3..7cd1e810dca010d16b0f2e257d47e012c6ef06cc 100644
--- a/docs/en/14-reference/11-docker/index.md
+++ b/docs/en/14-reference/11-docker/index.md
@@ -72,7 +72,7 @@ Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`.
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
```
-Finally, the TDengine service can be accessed from the taos shell or any connector with "tdengine" as the server address.
+Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address.
```shell
taos -h tdengine -P 6030
@@ -116,7 +116,7 @@ If you want to start your application in a container, you need to add the corres
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -217,7 +217,7 @@ Here is the full Dockerfile:
```docker
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -233,7 +233,7 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index cb7daf3c476b2117b5de53c683e76ce07de97bc5..02921c3f6a4ce21175504c3c07bd51bb4a3dcf60 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -1,16 +1,13 @@
---
-sidebar_label: Configuration
title: Configuration Parameters
description: "Configuration parameters for client and server in TDengine"
---
-In this chapter, all the configuration parameters on both server and client side are described thoroughly.
-
## Configuration File on Server Side
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below
-```bash
+```
taosd -c /home/user
```
@@ -24,8 +21,6 @@ taosd -C
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
-From version 2.0.10.0 below commands can be used to show the configuration parameters of the client side.
-
```bash
taos -C
```
@@ -36,6 +31,11 @@ taos --dump-config
# Configuration Parameters
+:::note
+The parameters described in this document by the effect that they have on the system.
+
+:::
+
:::note
`taosd` needs to be restarted for the parameters changed in the configuration file to take effect.
@@ -45,19 +45,19 @@ taos --dump-config
### firstEp
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
-| Default Value | localhost:6030 |
+| Default | localhost:6030 |
### secondEp
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ------------------------------------------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
-| Default Value | None |
+| Default | None |
### fqdn
@@ -65,35 +65,28 @@ taos --dump-config
| ------------- | ------------------------------------------------------------------------ |
| Applicable | Server Only |
| Meaning | The FQDN of the host where `taosd` will be started. It can be IP address |
-| Default Value | The first hostname configured for the host |
-| Note | It should be within 96 bytes |
+| Default Value | The first hostname configured for the host |
+| Note | It should be within 96 bytes | |
### serverPort
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | ----------------------------------------------------------------------------------------------------------------------- |
+| Applicable | Server Only |
| Meaning | The port for external access after `taosd` is started |
| Default Value | 6030 |
:::note
-TDengine uses 13 continuous ports, both TCP and UDP, starting with the port specified by `serverPort`. You should ensure, in your firewall rules, that these ports are kept open. Below table describes the ports used by TDengine in details.
-
+- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
:::
-
| Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
-| TCP | 6030 | Communication between client and server | serverPort |
-| TCP | 6035 | Communication among server nodes in cluster | serverPort+5 |
-| TCP | 6040 | Data syncup among server nodes in cluster | serverPort+10 |
-| TCP | 6041 | REST connection between client and server | Please refer to [taosAdapter](../taosadapter/) |
-| TCP | 6042 | Service Port of Arbitrator | The parameter of Arbitrator |
-| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper |
-| TCP | 6044 | Data access port for StatsD | refer to [taosAdapter](../taosadapter/) |
-| UDP | 6045 | Data access for statsd | refer to [taosAdapter](../taosadapter/) |
-| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
-| UDP | 6030-6034 | Communication between client and server | serverPort |
-| UDP | 6035-6039 | Communication among server nodes in cluster | serverPort |
+| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
+| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
+| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper |
+| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
+| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
+| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
### maxShellConns
@@ -104,104 +97,109 @@ TDengine uses 13 continuous ports, both TCP and UDP, starting with the port spec
| Value Range | 10-50000000 |
| Default Value | 5000 |
-### maxConnections
+## Monitoring Parameters
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The maximum number of connections allowed by a database |
-| Value Range | 1-100000 |
-| Default Value | 5000 |
-| Note | The maximum number of worker threads on the client side is maxConnections/100 |
+### monitor
-### rpcForceTcp
+| Attribute | Description |
+| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Applicable | Server only |
+| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
+| Value Range | 0: monitoring disabled, 1: monitoring enabled |
+| Default | 1 |
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------- |
-| Applicable | Server and Client |
-| Meaning | TCP is used by force |
-| Value Range | 0: disabled 1: enabled |
-| Default Value | 0 |
-| Note | It's suggested to configure to enable if network is not good enough |
+### monitorFqdn
-## Monitoring Parameters
+| Attribute | Description |
+| -------- | -------------------------- |
+| Applicable | Server Only |
+| Meaning | FQDN of taosKeeper monitoring service |
+| Default | None |
-### monitor
+### monitorPort
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The switch for monitoring inside server. The workload of the hosts, including CPU, memory, disk, network, TTP requests, are collected and stored in a system builtin database `LOG` |
-| Value Range | 0: monitoring disabled, 1: monitoring enabled |
-| Default Value | 1 |
+| Attribute | Description |
+| -------- | --------------------------- |
+| Applicable | Server Only |
+| Meaning | Port of taosKeeper monitoring service |
+| Default Value | 6043 |
### monitorInterval
-| Attribute | Description |
-| ------------- | ------------------------------------------ |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | -------------------------------------------- |
+| Applicable | Server Only |
| Meaning | The interval of collecting system workload |
| Unit | second |
-| Value Range | 1-600 |
-| Default Value | 30 |
+| Value Range | 1-200000 |
+| Default Value | 30 |
### telemetryReporting
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | ---------------------------------------- |
+| Applicable | Server Only |
| Meaning | Switch for allowing TDengine to collect and report service usage information |
| Value Range | 0: Not allowed; 1: Allowed |
-| Default Value | 1 |
+| Default Value | 1 |
## Query Parameters
-### queryBufferSize
+### queryPolicy
+
+| Attribute | Description |
+| -------- | ----------------------------- |
+| Applicable | Client only |
+| Meaning | Execution policy for query statements |
+| Unit | None |
+| Default | 1 |
+| Notes | 1: Run queries on vnodes and not on qnodes |
+
+2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
+
+3: Only run scan operators on vnodes; run all other operators on qnodes.
+
+### querySmaOptimize
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The total memory size reserved for all queries |
-| Unit | MB |
-| Default Value | None |
-| Note | It can be estimated by "maximum number of concurrent queries" _ "number of tables" _ 170 |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Client only |
+| 含义 | SMA index optimization policy |
+| Unit | None |
+| Default Value | 0 |
+| Notes |
+
+0: Disable SMA indexing and perform all queries on non-indexed data.
-### ratioOfQueryCores
+1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Maximum number of query threads |
-| Default Value | 1 |
-| Note | value range: float number between [0, 2] 0: only 1 query thread; >0: the times of the number of cores |
### maxNumOfDistinctRes
-| Attribute | Description |
-| ------------- | -------------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | -------------------------------- | --- |
+| Applicable | Server Only |
| Meaning | The maximum number of distinct rows returned |
| Value Range | [100,000 - 100,000,000] |
| Default Value | 100,000 |
-| Note | After version 2.3.0.0 |
## Locale Parameters
### timezone
-| Attribute | Description |
-| ------------- | ------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ------------------------------ |
+| Applicable | Server and Client |
| Meaning | TimeZone |
| Default Value | TimeZone configured in the host |
:::info
-To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
+To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. Note that Unix timestamps are converted and recorded on the client side. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
-On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below.
+On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example:
```
-timezone UTC-7
+timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
@@ -239,11 +237,11 @@ To avoid the problems of using time strings, Unix timestamp can be used directly
| Default Value | Locale configured in host |
:::info
-A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
+A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. Note that the correct encoding is determined by the user. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
-The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
+The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
:::
@@ -256,29 +254,37 @@ The locale definition standard on Linux is: \_., f
| Default Value | charset set in the system |
:::info
-On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start. So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
+On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start.
+
+So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
```
locale zh_CN.UTF-8
```
+On Windows system, it's not possible to obtain charset from system locale. If it's not set in configuration file `taos.cfg`, it would be default to CP936, same as set as below in `taos.cfg`. For example
+
+```
+charset CP936
+```
+
+Refer to the documentation for your operating system before changing the charset.
+
On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
-```title="Effective charset is GBK"
+```
locale zh_CN.UTF-8
charset GBK
```
-```title="Effective charset is UTF-8"
+The charset that takes effect is GBK.
+
+```
charset GBK
locale zh_CN.UTF-8
```
-On Windows system, it's not possible to obtain charset from system locale. If it's not set in configuration file `taos.cfg`, it would be default to CP936, same as set as below in `taos.cfg`. For example
-
-```
-charset CP936
-```
+The charset that takes effect is UTF-8.
:::
@@ -286,429 +292,98 @@ charset CP936
### dataDir
-| Attribute | Description |
-| ------------- | ------------------------------------------- |
+| Attribute | Description |
+| -------- | ------------------------------------------ |
| Applicable | Server Only |
| Meaning | All data files are stored in this directory |
| Default Value | /var/lib/taos |
-### cache
-
-| Attribute | Description |
-| ------------- | ----------------------------- |
-| Applicable | Server Only |
-| Meaning | The size of each memory block |
-| Unit | MB |
-| Default Value | 16 |
-
-### blocks
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of memory blocks of size `cache` used by each vnode |
-| Default Value | 6 |
-
-### days
-
-| Attribute | Description |
-| ------------- | ----------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The time range of the data stored in single data file |
-| Unit | day |
-| Default Value | 10 |
-
-### keep
-
-| Attribute | Description |
-| ------------- | -------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of days for data to be kept |
-| Unit | day |
-| Default Value | 3650 |
-
-### minRows
-
-| Attribute | Description |
-| ------------- | ------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | minimum number of rows in single data file |
-| Default Value | 100 |
-
-### maxRows
-
-| Attribute | Description |
-| ------------- | ------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | maximum number of rows in single data file |
-| Default Value | 4096 |
-
-### walLevel
-
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | WAL level |
-| Value Range | 0: wal disabled 1: wal enabled without fsync 2: wal enabled with fsync |
-| Default Value | 1 |
-
-### fsync
-
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The waiting time for invoking fsync when walLevel is 2 |
-| Unit | millisecond |
-| Value Range | 0: no waiting time, fsync is performed immediately once WAL is written; maximum value is 180000, i.e. 3 minutes |
-| Default Value | 3000 |
-
-### update
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | If it's allowed to update existing data |
-| Value Range | 0: not allowed 1: a row can only be updated as a whole 2: a part of columns can be updated |
-| Default Value | 0 |
-| Note | Not available from version 2.0.8.0 |
-
-### cacheLast
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether to cache the latest rows of each sub table in memory |
-| Value Range | 0: not cached 1: the last row of each sub table is cached 2: the last non-null value of each column is cached 3: identical to both 1 and 2 are set |
-| Default Value | 0 |
-
### minimalTmpDirGB
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ------------------------------------------------ |
+| Applicable | Server and Client |
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
-| Unit | GB |
-| Default Value | 1.0 |
+| Unit | GB |
+| Default Value | 1.0 |
### minimalDataDirGB
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | hen the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
-| Unit | GB |
-| Default Value | 2.0 |
-
-### vnodeBak
-
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether to backup the corresponding vnode directory when a vnode is deleted |
-| Value Range | 0: not backed up, 1: backup |
-| Default Value | 1 |
+| Attribute | Description |
+| -------- | ------------------------------------------------ |
+| Applicable | Server Only |
+| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
+| Unit | GB |
+| Default Value | 2.0 |
## Cluster Parameters
-### numOfMnodes
-
-| Attribute | Description |
-| ------------- | ------------------------------ |
-| Applicable | Server Only |
-| Meaning | The number of management nodes |
-| Default Value | 3 |
-
-### replica
-
-| Attribute | Description |
-| ------------- | -------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of replications |
-| Value Range | 1-3 |
-| Default Value | 1 |
-
-### quorum
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | The number of required confirmations for data replication in case of multiple replications |
-| Value Range | 1,2 |
-| Default Value | 1 |
-
-### role
-
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The role of the dnode |
-| Value Range | 0: both mnode and vnode 1: mnode only 2: dnode only |
-| Default Value | 0 |
-
-### balance
-
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server Only |
-| Meaning | Automatic load balancing |
-| Value Range | 0: disabled, 1: enabled |
-| Default Value | 1 |
-
-### balanceInterval
+### supportVnodes
-| Attribute | Description |
-| ------------- | ----------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The interval for checking load balance by mnode |
-| Unit | second |
-| Value Range | 1-30000 |
-| Default Value | 300 |
-
-### arbitrator
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | End point of arbitrator, format is same as firstEp |
-| Default Value | None |
+| Attribute | Description |
+| -------- | --------------------------- |
+| Applicable | Server Only |
+| Meaning | Maximum number of vnodes per dnode |
+| Value Range | 0-4096 |
+| Default Value | 256 |
## Time Parameters
-### precision
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------- |
-| Applicable | Server only |
-| Meaning | Time precision used for each database |
-| Value Range | ms: millisecond; us: microsecond ; ns: nanosecond |
-| Default Value | ms |
-
-### rpcTimer
-
-| Attribute | Description |
-| ------------- | ------------------ |
-| Applicable | Server and Client |
-| Meaning | rpc retry interval |
-| Unit | milliseconds |
-| Value Range | 100-3000 |
-| Default Value | 300 |
-
-### rpcMaxTime
-
-| Attribute | Description |
-| ------------- | ---------------------------------- |
-| Applicable | Server and Client |
-| Meaning | maximum wait time for rpc response |
-| Unit | second |
-| Value Range | 100-7200 |
-| Default Value | 600 |
-
### statusInterval
-| Attribute | Description |
-| ------------- | ----------------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | --------------------------- |
+| Applicable | Server Only |
| Meaning | the interval of dnode reporting status to mnode |
| Unit | second |
-| Value Range | 1-10 |
-| Default Value | 1 |
+| Value Range | 1-10 |
+| Default Value | 1 |
### shellActivityTimer
-| Attribute | Description |
-| ------------- | ------------------------------------------------------ |
-| Applicable | Server and Client |
-| Meaning | The interval for taos shell to send heartbeat to mnode |
-| Unit | second |
-| Value Range | 1-120 |
-| Default Value | 3 |
-
-### tableMetaKeepTimer
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The expiration time for metadata in cache, once it's reached the client would refresh the metadata |
-| Unit | second |
-| Value Range | 1-8640000 |
-| Default Value | 7200 |
-
-### maxTmrCtrl
-
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server and Client |
-| Meaning | Maximum number of timers |
-| Unit | None |
-| Value Range | 8-2048 |
-| Default Value | 512 |
-
-### offlineThreshold
-
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The expiration time for dnode online status, once it's reached before receiving status from a node, the dnode becomes offline |
-| Unit | second |
-| Value Range | 5-7200000 |
-| Default Value | 86400\*10 (i.e. 10 days) |
-
-## Performance Optimization Parameters
-
-### numOfThreadsPerCore
-
-| Attribute | Description |
-| ------------- | ------------------------------------------- |
-| Applicable | Server and Client |
-| Meaning | The number of consumer threads per CPU core |
-| Default Value | 1.0 |
-
-### ratioOfQueryThreads
-
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Maximum number of query threads |
-| Value Range | 0: Only one query thread 1: Same as number of CPU cores 2: two times of CPU cores |
-| Default Value | 1 |
-| Note | This value can be a float number, 0.5 means half of the CPU cores |
-
-### maxVgroupsPerDb
-
-| Attribute | Description |
-| ------------- | ------------------------------------ |
-| Applicable | Server Only |
-| Meaning | Maximum number of vnodes for each DB |
-| Value Range | 0-8192 |
-| Default Value | |
-
-### maxTablesPerVnode
-
-| Attribute | Description |
-| ------------- | -------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Maximum number of tables in each vnode |
-| Default Value | 1000000 |
-
-### minTablesPerVnode
-
| Attribute | Description |
-| ------------- | -------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Minimum number of tables in each vnode |
-| Default Value | 1000 |
-
-### tableIncStepPerVnode
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | When minTablesPerVnode is reached, the number of tables are allocated for a vnode each time |
-| Default Value | 1000 |
-
-### maxNumOfOrderedRes
-
-| Attribute | Description |
-| ------------- | ------------------------------------------- |
-| Applicable | Server and Client |
-| Meaning | Maximum number of rows ordered for a STable |
-| Default Value | 100,000 |
-
-### mnodeEqualVnodeNum
+| -------- | --------------------------------- |
+| Applicable | Server and Client |
+| Meaning | The interval for TDengine CLI to send heartbeat to mnode |
+| Unit | second |
+| Value Range | 1-120 |
+| Default Value | 3 |
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of vnodes whose system resources consumption are considered as equal to single mnode |
-| Default Value | 4 |
+## Performance Optimization Parameters
### numOfCommitThreads
-| Attribute | Description |
-| ------------- | ----------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | ---------------------- |
+| Applicable | Server Only |
| Meaning | Maximum of threads for committing to disk |
-| Default Value | |
+| Default Value | |
## Compression Parameters
-### comp
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether data is compressed |
-| Value Range | 0: uncompressed, 1: One phase compression, 2: Two phase compression |
-| Default Value | 2 |
-
-### tsdbMetaCompactRatio
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------- |
-| Meaning | The threshold for percentage of redundant in meta file to trigger compression for meta file |
-| Value Range | 0: no compression forever, [1-100]: The threshold percentage |
-| Default Value | 0 |
-
### compressMsgSize
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The threshold for message size to compress the message.. |
+| Attribute | Description |
+| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Applicable | Server Only |
+| Meaning | The threshold for message size to compress the message. | Set the value to 64330 bytes for good message compression. |
| Unit | bytes |
| Value Range | 0: already compress; >0: compress when message exceeds it; -1: always uncompress |
-| Default Value | -1 |
+| Default Value | -1 |
### compressColData
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The threshold for size of column data to trigger compression for the query result |
+| Attribute | Description |
+| -------- | --------------------------------------------------------------------------------------- |
+| Applicable | Server Only |
+| Meaning | The threshold for size of column data to trigger compression for the query result |
| Unit | bytes |
| Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress |
+| Default Value | -1 |
| Default Value | -1 |
-| Note | available from version 2.3.0.0 |
-
-### lossyColumns
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The floating number types for lossy compression |
-| Value Range | "": lossy compression is disabled float: only for float double: only for double float \| double: for both float and double |
-| Default Value | "" , i.e. disabled |
-
-### fPrecision
-
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Compression precision for float type |
-| Value Range | 0.1 ~ 0.00000001 |
-| Default Value | 0.00000001 |
-| Note | The fractional part lower than this value will be discarded |
-
-### dPrecision
-
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Compression precision for double type |
-| Value Range | 0.1 ~ 0.0000000000000001 |
-| Default Value | 0.0000000000000001 |
-| Note | The fractional part lower than this value will be discarded |
+| Note | available from version 2.3.0.0 | |
-## Continuous Query Parameters
-
-### stream
-
-| Attribute | Description |
-| ------------- | ---------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether to enable continuous query |
-| Value Range | 0: disabled 1: enabled |
-| Default Value | 1 |
+## Continuous Query Parameters |
### minSlidingTime
@@ -730,375 +405,444 @@ charset CP936
| Value Range | 1-1000000 |
| Default Value | 10 |
-### maxStreamCompDelay
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | Maximum delay before starting a continuous query |
-| Unit | millisecond |
-| Value Range | 10-1000000000 |
-| Default Value | 20000 |
-
-### maxFirstStreamCompDelay
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Maximum delay time before starting a continuous query the first time |
-| Unit | millisecond |
-| Value Range | 10-1000000000 |
-| Default Value | 10000 |
-
-### retryStreamCompDelay
-
-| Attribute | Description |
-| ------------- | --------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Delay time before retrying a continuous query |
-| Unit | millisecond |
-| Value Range | 10-1000000000 |
-| Default Value | 10 |
-
-### streamCompDelayRatio
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | The delay ratio, with time window size as the base, for continuous query |
-| Value Range | 0.1-0.9 |
-| Default Value | 0.1 |
-
:::info
To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`.
:::
-## HTTP Parameters
-
-### http
-
-| Attribute | Description |
-| ------------- | ------------------------------ |
-| Applicable | Server Only |
-| Meaning | Whether to enable http service |
-| Value Range | 0: disabled, 1: enabled |
-| Default Value | 1 |
-
-### httpEnableRecordSql
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether to record the SQL invocation through REST interface |
-| Default Value | 0: false; 1: true |
-| Note | The resulting files, i.e. httpnote.0/httpnote.1, are located under logDir |
-
-### httpMaxThreads
-
-| Attribute | Description |
-| ------------- | -------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of threads for RESTFul interface. |
-| Default Value | 2 |
-
-### restfulRowLimit
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | Maximum number of rows returned each time by REST interface. |
-| Default Value | 10240 |
-| Note | Maximum value is 10,000,000 |
-
-### httpDBNameMandatory
-
-| Attribute | Description |
-| ------------- | ---------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether database name is required in URL |
-| Value Range | 0:not required, 1: required |
-| Default Value | 0 |
-| Note | From version 2.3.0.0 |
-
## Log Parameters
### logDir
-| Attribute | Description |
-| ------------- | ----------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | The directory for writing log files |
| Default Value | /var/log/taos |
### minimalLogDirGB
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
-| Unit | GB |
-| Default Value | 1.0 |
+| Unit | GB |
+| Default Value | 1.0 |
### numOfLogLines
-| Attribute | Description |
-| ------------- | ------------------------------------------ |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ---------------------------- |
+| Applicable | Server and Client |
| Meaning | Maximum number of lines in single log file |
-| Default Value | 10,000,000 |
+| Default Value | 10000000 |
### asyncLog
-| Attribute | Description |
-| ------------- | ---------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server and Client |
| Meaning | The mode of writing log file |
| Value Range | 0: sync way; 1: async way |
-| Default Value | 1 |
+| Default Value | 1 |
### logKeepDays
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ----------------------------------------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | The number of days for log files to be kept |
-| Unit | day |
-| Default Value | 0 |
+| Unit | day |
+| Default Value | 0 |
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
### debugFlag
-| Attribute | Description |
-| ------------- | --------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ------------------------------------------------------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | Log level |
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
| Default Value | 131 or 135, depending on the module |
-### mDebugFlag
+### tmrDebugFlag
-| Attribute | Description |
-| ------------- | ------------------ |
-| Applicable | Server Only |
-| Meaning | Log level of mnode |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server and Client |
+| Meaning | Log level of timer module |
| Value Range | same as debugFlag |
-| Default Value | 135 |
+| Default Value | |
-### dDebugFlag
+### uDebugFlag
-| Attribute | Description |
-| ------------- | ------------------ |
-| Applicable | Server and Client |
-| Meaning | Log level of dnode |
+| Attribute | Description |
+| -------- | ---------------------- |
+| Applicable | Server and Client |
+| Meaning | Log level of common module |
| Value Range | same as debugFlag |
-| Default Value | 135 |
-
-### sDebugFlag
-
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server and Client |
-| Meaning | Log level of sync module |
-| Value Range | same as debugFlag |
-| Default Value | 135 |
-
-### wDebugFlag
-
-| Attribute | Description |
-| ------------- | ----------------------- |
-| Applicable | Server and Client |
-| Meaning | Log level of WAL module |
-| Value Range | same as debugFlag |
-| Default Value | 135 |
-
-### sdbDebugFlag
-
-| Attribute | Description |
-| ------------- | ---------------------- |
-| Applicable | Server and Client |
-| Meaning | logLevel of sdb module |
-| Value Range | same as debugFlag |
-| Default Value | 135 |
+| Default Value | |
### rpcDebugFlag
-| Attribute | Description |
-| ------------- | ----------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server and Client |
| Meaning | Log level of rpc module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Value Range | same as debugFlag |
+| Default Value | |
-### tmrDebugFlag
+### jniDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------- |
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Client Only |
+| Meaning | Log level of jni module |
+| Value Range | same as debugFlag |
+| Default Value | |
+
+### qDebugFlag
+
+| Attribute | Description |
+| -------- | -------------------- |
| Applicable | Server and Client |
-| Meaning | Log level of timer module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Meaning | Log level of query module |
+| Value Range | same as debugFlag |
+| Default Value | |
### cDebugFlag
-| Attribute | Description |
-| ------------- | ------------------- |
+| Attribute | Description |
+| -------- | --------------------- |
| Applicable | Client Only |
| Meaning | Log level of Client |
-| Value Range | Same as debugFlag |
-| Default Value | |
-
-### jniDebugFlag
-
-| Attribute | Description |
-| ------------- | ----------------------- |
-| Applicable | Client Only |
-| Meaning | Log level of jni module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Value Range | same as debugFlag |
+| Default Value | |
-### odbcDebugFlag
+### dDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Client Only |
-| Meaning | Log level of odbc module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of dnode |
+| Value Range | same as debugFlag |
+| Default Value | 135 |
-### uDebugFlag
+### vDebugFlag
-| Attribute | Description |
-| ------------- | -------------------------- |
-| Applicable | Server and Client |
-| Meaning | Log level of common module |
-| Value Range | Same as debugFlag |
-| Default Value | | |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of vnode |
+| Value Range | same as debugFlag |
+| Default Value | |
-### mqttDebugFlag
+### mDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server Only |
-| Meaning | Log level of mqtt module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of mnode module |
+| Value Range | same as debugFlag |
+| Default Value | 135 |
-### monitorDebugFlag
+### wDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------------ |
-| Applicable | Server Only |
-| Meaning | Log level of monitoring module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Log level of WAL module |
+| Value Range | same as debugFlag |
+| Default Value | 135 |
-### qDebugFlag
+### sDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------- |
+| Attribute | Description |
+| -------- | -------------------- |
| Applicable | Server and Client |
-| Meaning | Log level of query module |
-| Value Range | Same as debugFlag |
-| Default Value | |
-
-### vDebugFlag
-
-| Attribute | Description |
-| ------------- | ------------------ |
-| Applicable | Server and Client |
-| Meaning | Log level of vnode |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Meaning | Log level of sync module |
+| Value Range | same as debugFlag |
+| Default Value | 135 |
### tsdbDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server Only |
-| Meaning | Log level of TSDB module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Attribute | Description |
+| -------- | ------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of TSDB module |
+| Value Range | same as debugFlag |
+| Default Value | |
-### cqDebugFlag
+### tqDebugFlag
| Attribute | Description |
-| ------------- | ------------------------------------ |
-| Applicable | Server and Client |
-| Meaning | Log level of continuous query module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| -------- | ----------------- |
+| Applicable | Server only |
+| Meaning | Log level of TQ module |
+| Value Range | same as debugFlag |
+| Default Value | |
-## Client Only
+### fsDebugFlag
-### maxSQLLength
+| Attribute | Description |
+| -------- | ----------------- |
+| Applicable | Server only |
+| Meaning | Log level of FS module |
+| Value Range | same as debugFlag |
+| Default Value | |
+
+### udfDebugFlag
| Attribute | Description |
-| ------------- | -------------------------------------- |
-| Applicable | Client Only |
-| Meaning | Maximum length of single SQL statement |
-| Unit | bytes |
-| Value Range | 65480-1048576 |
-| Default Value | 1048576 |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Log level of UDF module |
+| Value Range | same as debugFlag |
+| Default Value | |
-### tscEnableRecordSql
+### smaDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Meaning | Whether to record SQL statements in file |
-| Value Range | 0: false, 1: true |
-| Default Value | 0 |
-| Note | The generated files are named as "tscnote-xxxx.0/tscnote-xxx.1" in which "xxxx" is the pid of the client, and located at same place as client log |
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Log level of SMA module |
+| Value Range | same as debugFlag |
+| Default Value | |
-### maxBinaryDisplayWidth
+### idxDebugFlag
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------------------------------------------- |
-| Meaning | Maximum display width of binary and nchar in taos shell. Anything beyond this limit would be hidden |
-| Value Range | 5 - |
-| Default Value | 30 |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of index module |
+| Value Range | same as debugFlag |
+| Default Value | |
-:::info
-If the length of value exceeds `maxBinaryDisplayWidth`, then the actual display width is max(column name, maxBinaryDisplayLength); otherwise the actual display width is max(length of column name, length of column value). This parameter can also be changed dynamically using `set max_binary_display_width ` in TDengine CLI `taos`.
+### tdbDebugFlag
-:::
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Log level of TDB module |
+| Value Range | same as debugFlag |
+| Default Value | |
-### maxWildCardsLength
+## Schemaless Parameters
-| Attribute | Description |
-| ------------- | ----------------------------------------------------- |
-| Meaning | The maximum length for wildcard string used with LIKE |
-| Unit | bytes |
-| Value Range | 0-16384 |
-| Default Value | 100 |
-| Note | From version 2.1.6.1 |
+### smlChildTableName
-### clientMerge
+| Attribute | Description |
+| -------- | ------------------------- |
+| Applicable | Client only |
+| Meaning | Custom subtable name for schemaless writes |
+| Type | String |
+| Default Value | None |
-| Attribute | Description |
-| ------------- | --------------------------------------------------- |
-| Meaning | Whether to filter out duplicate data on client side |
-| Value Range | 0: false; 1: true |
-| Default Value | 0 |
-| Note | From version 2.3.0.0 |
+### smlTagName
-### maxRegexStringLen
+| Attribute | Description |
+| -------- | ------------------------------------ |
+| Applicable | Client only |
+| Meaning | Default tag for schemaless writes without tag value specified |
+| Type | String |
+| Default Value | _tag_null |
+
+### smlDataFormat
| Attribute | Description |
-| ------------- | ------------------------------------ |
-| Meaning | Maximum length of regular expression |
-| Value Range | [128, 16384] |
-| Default Value | 128 |
-| Note | From version 2.3.0.0 |
+| -------- | ----------------------------- |
+| Applicable | Client only |
+| Meaning | Whether schemaless columns are consistently ordered |
+| Value Range | 0: not consistent; 1: consistent. |
+| Default | 1 |
## Other Parameters
### enableCoreFile
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Attribute | Description |
+| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| Applicable | Server and Client |
| Meaning | Whether to generate core file when server crashes |
| Value Range | 0: false, 1: true |
| Default Value | 1 |
| Note | The core file is generated under root directory `systemctl start taosd` is used to start, or under the working directory if `taosd` is started directly on Linux Shell. |
+
+### udf
+
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Whether the UDF service is enabled |
+| Value Range | 0: disable UDF; 1: enabled UDF |
+| Default Value | 1 |
+
+## Parameter Comparison of TDengine 2.x and 3.0
+| # | **Parameter** | **In 2.x** | **In 3.0** |
+| --- | :-----------------: | --------------- | --------------- |
+| 1 | firstEp | Yes | Yes |
+| 2 | secondEp | Yes | Yes |
+| 3 | fqdn | Yes | Yes |
+| 4 | serverPort | Yes | Yes |
+| 5 | maxShellConns | Yes | Yes |
+| 6 | monitor | Yes | Yes |
+| 7 | monitorFqdn | No | Yes |
+| 8 | monitorPort | No | Yes |
+| 9 | monitorInterval | Yes | Yes |
+| 10 | monitorMaxLogs | No | Yes |
+| 11 | monitorComp | No | Yes |
+| 12 | telemetryReporting | Yes | Yes |
+| 13 | telemetryInterval | No | Yes |
+| 14 | telemetryServer | No | Yes |
+| 15 | telemetryPort | No | Yes |
+| 16 | queryPolicy | No | Yes |
+| 17 | querySmaOptimize | No | Yes |
+| 18 | queryBufferSize | Yes | Yes |
+| 19 | maxNumOfDistinctRes | Yes | Yes |
+| 20 | minSlidingTime | Yes | Yes |
+| 21 | minIntervalTime | Yes | Yes |
+| 22 | countAlwaysReturnValue | Yes | Yes |
+| 23 | dataDir | Yes | Yes |
+| 24 | minimalDataDirGB | Yes | Yes |
+| 25 | supportVnodes | No | Yes |
+| 26 | tempDir | Yes | Yes |
+| 27 | minimalTmpDirGB | Yes | Yes |
+| 28 | compressMsgSize | Yes | Yes |
+| 29 | compressColData | Yes | Yes |
+| 30 | smlChildTableName | Yes | Yes |
+| 31 | smlTagName | Yes | Yes |
+| 32 | smlDataFormat | No | Yes |
+| 33 | statusInterval | Yes | Yes |
+| 34 | shellActivityTimer | Yes | Yes |
+| 35 | transPullupInterval | No | Yes |
+| 36 | mqRebalanceInterval | No | Yes |
+| 37 | ttlUnit | No | Yes |
+| 38 | ttlPushInterval | No | Yes |
+| 39 | numOfTaskQueueThreads | No | Yes |
+| 40 | numOfRpcThreads | No | Yes |
+| 41 | numOfCommitThreads | Yes | Yes |
+| 42 | numOfMnodeReadThreads | No | Yes |
+| 43 | numOfVnodeQueryThreads | No | Yes |
+| 44 | numOfVnodeStreamThreads | No | Yes |
+| 45 | numOfVnodeFetchThreads | No | Yes |
+| 46 | numOfVnodeWriteThreads | No | Yes |
+| 47 | numOfVnodeSyncThreads | No | Yes |
+| 48 | numOfQnodeQueryThreads | No | Yes |
+| 49 | numOfQnodeFetchThreads | No | Yes |
+| 50 | numOfSnodeSharedThreads | No | Yes |
+| 51 | numOfSnodeUniqueThreads | No | Yes |
+| 52 | rpcQueueMemoryAllowed | No | Yes |
+| 53 | logDir | Yes | Yes |
+| 54 | minimalLogDirGB | Yes | Yes |
+| 55 | numOfLogLines | Yes | Yes |
+| 56 | asyncLog | Yes | Yes |
+| 57 | logKeepDays | Yes | Yes |
+| 58 | debugFlag | Yes | Yes |
+| 59 | tmrDebugFlag | Yes | Yes |
+| 60 | uDebugFlag | Yes | Yes |
+| 61 | rpcDebugFlag | Yes | Yes |
+| 62 | jniDebugFlag | Yes | Yes |
+| 63 | qDebugFlag | Yes | Yes |
+| 64 | cDebugFlag | Yes | Yes |
+| 65 | dDebugFlag | Yes | Yes |
+| 66 | vDebugFlag | Yes | Yes |
+| 67 | mDebugFlag | Yes | Yes |
+| 68 | wDebugFlag | Yes | Yes |
+| 69 | sDebugFlag | Yes | Yes |
+| 70 | tsdbDebugFlag | Yes | Yes |
+| 71 | tqDebugFlag | No | Yes |
+| 72 | fsDebugFlag | Yes | Yes |
+| 73 | udfDebugFlag | No | Yes |
+| 74 | smaDebugFlag | No | Yes |
+| 75 | idxDebugFlag | No | Yes |
+| 76 | tdbDebugFlag | No | Yes |
+| 77 | metaDebugFlag | No | Yes |
+| 78 | timezone | Yes | Yes |
+| 79 | locale | Yes | Yes |
+| 80 | charset | Yes | Yes |
+| 81 | udf | Yes | Yes |
+| 82 | enableCoreFile | Yes | Yes |
+| 83 | arbitrator | Yes | No |
+| 84 | numOfThreadsPerCore | Yes | No |
+| 85 | numOfMnodes | Yes | No |
+| 86 | vnodeBak | Yes | No |
+| 87 | balance | Yes | No |
+| 88 | balanceInterval | Yes | No |
+| 89 | offlineThreshold | Yes | No |
+| 90 | role | Yes | No |
+| 91 | dnodeNopLoop | Yes | No |
+| 92 | keepTimeOffset | Yes | No |
+| 93 | rpcTimer | Yes | No |
+| 94 | rpcMaxTime | Yes | No |
+| 95 | rpcForceTcp | Yes | No |
+| 96 | tcpConnTimeout | Yes | No |
+| 97 | syncCheckInterval | Yes | No |
+| 98 | maxTmrCtrl | Yes | No |
+| 99 | monitorReplica | Yes | No |
+| 100 | smlTagNullName | Yes | No |
+| 101 | keepColumnName | Yes | No |
+| 102 | ratioOfQueryCores | Yes | No |
+| 103 | maxStreamCompDelay | Yes | No |
+| 104 | maxFirstStreamCompDelay | Yes | No |
+| 105 | retryStreamCompDelay | Yes | No |
+| 106 | streamCompDelayRatio | Yes | No |
+| 107 | maxVgroupsPerDb | Yes | No |
+| 108 | maxTablesPerVnode | Yes | No |
+| 109 | minTablesPerVnode | Yes | No |
+| 110 | tableIncStepPerVnode | Yes | No |
+| 111 | cache | Yes | No |
+| 112 | blocks | Yes | No |
+| 113 | days | Yes | No |
+| 114 | keep | Yes | No |
+| 115 | minRows | Yes | No |
+| 116 | maxRows | Yes | No |
+| 117 | quorum | Yes | No |
+| 118 | comp | Yes | No |
+| 119 | walLevel | Yes | No |
+| 120 | fsync | Yes | No |
+| 121 | replica | Yes | No |
+| 122 | partitions | Yes | No |
+| 123 | quorum | Yes | No |
+| 124 | update | Yes | No |
+| 125 | cachelast | Yes | No |
+| 126 | maxSQLLength | Yes | No |
+| 127 | maxWildCardsLength | Yes | No |
+| 128 | maxRegexStringLen | Yes | No |
+| 129 | maxNumOfOrderedRes | Yes | No |
+| 130 | maxConnections | Yes | No |
+| 131 | mnodeEqualVnodeNum | Yes | No |
+| 132 | http | Yes | No |
+| 133 | httpEnableRecordSql | Yes | No |
+| 134 | httpMaxThreads | Yes | No |
+| 135 | restfulRowLimit | Yes | No |
+| 136 | httpDbNameMandatory | Yes | No |
+| 137 | httpKeepAlive | Yes | No |
+| 138 | enableRecordSql | Yes | No |
+| 139 | maxBinaryDisplayWidth | Yes | No |
+| 140 | stream | Yes | No |
+| 141 | retrieveBlockingModel | Yes | No |
+| 142 | tsdbMetaCompactRatio | Yes | No |
+| 143 | defaultJSONStrType | Yes | No |
+| 144 | walFlushSize | Yes | No |
+| 145 | keepTimeOffset | Yes | No |
+| 146 | flowctrl | Yes | No |
+| 147 | slaveQuery | Yes | No |
+| 148 | adjustMaster | Yes | No |
+| 149 | topicBinaryLen | Yes | No |
+| 150 | telegrafUseFieldNum | Yes | No |
+| 151 | deadLockKillQuery | Yes | No |
+| 152 | clientMerge | Yes | No |
+| 153 | sdbDebugFlag | Yes | No |
+| 154 | odbcDebugFlag | Yes | No |
+| 155 | httpDebugFlag | Yes | No |
+| 156 | monDebugFlag | Yes | No |
+| 157 | cqDebugFlag | Yes | No |
+| 158 | shortcutFlag | Yes | No |
+| 159 | probeSeconds | Yes | No |
+| 160 | probeKillSeconds | Yes | No |
+| 161 | probeInterval | Yes | No |
+| 162 | lossyColumns | Yes | No |
+| 163 | fPrecision | Yes | No |
+| 164 | dPrecision | Yes | No |
+| 165 | maxRange | Yes | No |
+| 166 | range | Yes | No |
diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md
index 8b6a26ae52af42e339e2f5a8d0824a9e1be3f386..4f50c38cbbfda9d8d8567517f9109f18e2007988 100644
--- a/docs/en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/en/14-reference/13-schemaless/13-schemaless.md
@@ -1,9 +1,10 @@
---
title: Schemaless Writing
-description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
+description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.'
---
-In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
+In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
+will automatically add the required columns to ensure that the data written by the user is stored correctly.
The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
@@ -19,12 +20,12 @@ With the following formatting conventions, schemaless writing uses a single stri
measurement,tag_set field_set timestamp
```
-where :
+where:
- measurement will be used as the data table name. It will be separated from tag_set by a comma.
-- tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space.
-- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space.
-- The timestamp is the primary key corresponding to the data in this row.
+- `tag_set` will be used as tags, with format like `=,=` Enter a space between `tag_set` and `field_set`.
+- `field_set`will be used as data columns, with format like `=,=` Enter a space between `field_set` and `timestamp`.
+- `timestamp` is the primary key timestamp corresponding to this row of data
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
@@ -35,18 +36,20 @@ In the schemaless writing data line protocol, each data item in the field_set ne
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
- Numeric types will be distinguished from data types by the suffix.
-| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
-| -------- | -------- | ------------ | -------------- |
-| 1 | none or f64 | double | 8 |
-| 2 | f32 | float | 4 |
-| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
-| 4 | i16/u16 | SmallInt/USmallInt | 2 |
-| 5 | i32/u32 | Int/UInt | 4 |
-| 6 | i64/i/u64/u | Bigint/Bigint/UBigint/UBigint | 8 |
+| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
+| ----------------- | ----------- | ----------------------------- | ---------------- |
+| 1 | None or f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
-For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
+For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label
+is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
+is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
@@ -58,102 +61,105 @@ Note that if the wrong case is used when describing the data type suffix, or if
Schemaless writes process row data according to the following principles.
-1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
+1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
-The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has.
+The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
+You can configure smlChildTableName to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
-If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
+3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental).
-5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL.
+5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to
+ NULL.
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
-8. In order to improve the efficiency of writing, it is assumed by default that the order of the fields in the same Super is the same (the first data contains all fields, and the following data is in this order). If the order is different, the parameter smlDataFormat needs to be configured to be false. Otherwise, the data is written in the same order, and the data in the library will be abnormal.
+8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat to false. Otherwise, data will be written out of order and a database error will occur.
:::tip
-All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
+16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+
:::
## Time resolution recognition
Three specified modes are supported in the schemaless writing process, as follows:
-| **Serial** | **Value** | **Description** |
-| -------- | ------------------- | ------------------------------- |
-| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
-| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol |
-| 3 | SML_JSON_PROTOCOL | JSON protocol format |
-
-In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table.
+| **Serial** | **Value** | **Description** |
+| ---------- | ------------------- | ---------------------- |
+| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
+| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
+| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
-| **Serial Number** | **Time Resolution Definition** | **Meaning** |
-| -------- | --------------------------------- | -------------- |
-| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
-| 2 | TSDB_SML_TIMESTAMP_HOURS | hour |
-| 3 | TSDB_SML_TIMESTAMP_MINUTES | MINUTES
-| 4 | TSDB_SML_TIMESTAMP_SECONDS | SECONDS
-| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | milliseconds
-| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | microseconds
-| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | nanoseconds |
+In InfluxDB line protocol mode, you must specify the precision of the input timestamp. Valid precisions are described in the following table.
-In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determined based on the length of the timestamp (in the same way as the OpenTSDB standard operation), and the user-specified time resolution is ignored at this point.
+| **No.** | **Precision** | **Description** |
+| ------- | --------------------------------- | --------------------- |
+| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
+| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
+| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
+| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
+| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
+| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
+| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
-## Data schema mapping rules
+In OpenTSDB file and JSON protocol modes, the precision of the timestamp is determined from its length in the standard OpenTSDB manner. User input is ignored.
-This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows:
-- The tag name in tag_set is the name of the tag in the data schema
-- The name in field_set is the column's name.
+## Data Model Mapping
-The following data is used as an example to illustrate the mapping rules.
+This section describes how data in line protocol is mapped to a schema. The data measurement in each line is mapped to a
+supertable name. The tag name in tag_set is the tag name in the schema, and the name in field_set is the column name in the schema. The following example shows how data is mapped:
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
```
-The row data mapping generates a super table: `st`, which contains three labels of type NCHAR: t1, t2, t3. Five data columns are ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), c4 (bigint). The mapping becomes the following SQL statement.
+This row is mapped to a supertable: `st` contains three NCHAR tags: t1, t2, and t3. Five columns are created: ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), and c4 (bigint). The following SQL statement is generated:
```json
create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2))
```
-## Data schema change handling
+## Processing Schema Changes
-This section describes the impact on the data schema for different line protocol data writing cases.
+This section describes the impact on the schema caused by different data being written.
-When writing to an explicitly identified field type using the line protocol, subsequent changes to the field's type definition will result in an explicit data schema error, i.e., will trigger a write API report error. As shown below, the
+If you use line protocol to write to a specific tag field and then later change the field type, a schema error will ocur. This triggers an error on the write API. This is shown as follows:
```json
-st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
-st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000
```
-The data type mapping in the first row defines column c4 as DOUBLE, but the data in the second row is declared as BIGINT by the numeric suffix, which triggers a parsing error with schemaless writing.
+The first row defines c4 as a double. However, in the second row, the suffix indicates that the value of c4 is a bigint. This causes schemaless writing to throw an error.
-If the line protocol before the column declares the data column as BINARY, the subsequent one requires a longer binary length, which triggers a super table schema change.
+An error also occurs if data input into a binary column exceeds the defined length of the column.
```json
-st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
-st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
```
-The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string.
+The first row defines c5 as a binary(4). but the second row writes 6 bytes to it. This means that the length of the binary column must be expanded to contain the data.
```json
-st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
-st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
+st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
```
-The second line of data has an additional column c6 of type BINARY(6) compared to the first row. Then a column c6 of type BINARY(6) is automatically added at this point.
+The preceding data includes a new entry, c6, with type binary(6). When this occurs, a new column c6 with type binary(6) is added automatically.
-## Write integrity
+## Write Integrity
-TDengine provides idempotency guarantees for data writing, i.e., you can repeatedly call the API to write data with errors. However, it does not give atomicity guarantees for writing multiple rows of data. During the process of writing numerous rows of data in one batch, some data will be written successfully, and some data will fail.
+TDengine guarantees the idempotency of data writes. This means that you can repeatedly call the API to perform write operations with bad data. However, TDengine does not guarantee the atomicity of multi-row writes. In a multi-row write, some data may be written successfully and other data unsuccessfully.
-## Error code
+##: Error Codes
-If it is an error in the data itself during the schemaless writing process, the application will get `TSDB_CODE_TSC_LINE_SYNTAX_ERROR` error message, which indicates that the error occurred in writing. The other error codes are consistent with the TDengine and can be obtained via the `taos_errstr()` to get the specific cause of the error.
+The TSDB_CODE_TSC_LINE_SYNTAX_ERROR indicates an error in the schemaless writing component.
+This error occurs when writing text. For other errors, schemaless writing uses the standard TDengine error codes
+found in taos_errstr.
diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md
index 476b5a1fd20b4dce4379026a6300ae8e26db6656..665bc75380d4f59666d792d074fb37c65c810264 100644
--- a/docs/en/14-reference/14-taosKeeper.md
+++ b/docs/en/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: Instructions and tips for using taosKeeper
+description: exports TDengine monitoring metrics.
---
## Introduction
@@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
### Configuration and running methods
-
-taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
+taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
-
+### Environment variable
+
+You can use Environment variable to run taosKeeper and control its behavior:
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+you can run `taoskeeper -h` for more detail.
+
### Configuration File
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**Sample configuration files**
@@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
#### Export Monitoring Metrics
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
Sample result set (excerpt):
diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx
index 5dbeb31a231464e48b4f977420f03f0ede81e78e..e0fbefd5a8634d2001f2cc0601afa110aff33632 100644
--- a/docs/en/20-third-party/01-grafana.mdx
+++ b/docs/en/20-third-party/01-grafana.mdx
@@ -6,9 +6,7 @@ title: Grafana
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard.
-
-You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md).
+TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md).
## Prerequisites
@@ -65,7 +63,6 @@ Restart Grafana service and open Grafana in web-browser, usually
-
Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
@@ -76,7 +73,7 @@ grafana-cli plugins install tdengine-datasource
sudo -u grafana grafana-cli plugins install tdengine-datasource
```
-Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory.
+You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
```bash
GF_VERSION=3.2.2
@@ -131,7 +128,7 @@ docker run -d \
grafana/grafana
```
-You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
+You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
1. Save the provisioning configuration file to `tdengine.yml`.
@@ -196,7 +193,7 @@ Go back to the main interface to create a dashboard and click Add Query to enter
As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query.
-- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported.
+- INPUT SQL: Enter the desired query (the results being two columns and multiple rows), such as `select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)`. In this statement, $from, $to, and $interval are variables that Grafana replaces with the query time range and interval. In addition to the built-in variables, custom template variables are also supported.
- ALIAS BY: This allows you to set the current query alias.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement.
@@ -208,7 +205,11 @@ Follow the default prompt to query the average system memory usage for the speci
### Importing the Dashboard
-You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. The dashboard is published in Grafana as [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
+You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x.
+
+
+
+A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。 Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
diff --git a/docs/en/20-third-party/06-statsd.md b/docs/en/20-third-party/06-statsd.md
index 1cddbcf63db5bf64c77f40c7a3aa95698362fdac..32b1bbb97acafd2494c7fadb8af3d06cf69219ea 100644
--- a/docs/en/20-third-party/06-statsd.md
+++ b/docs/en/20-third-party/06-statsd.md
@@ -1,6 +1,6 @@
---
sidebar_label: StatsD
-title: StatsD writing
+title: StatsD Writing
---
import StatsD from "../14-reference/_statsd.mdx"
@@ -12,8 +12,8 @@ You can write StatsD data to TDengine by simply modifying the configuration file
## Prerequisites
To write StatsD data to TDengine requires the following preparations.
-- The TDengine cluster has been deployed and is working properly
-- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
+1. The TDengine cluster is deployed and functioning properly
+2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details.
- StatsD has been installed. To install StatsD, please refer to [official documentation](https://github.com/statsd/statsd)
## Configuration steps
@@ -39,8 +39,12 @@ $ echo "foo:1|c" | nc -u -w0 127.0.0.1 8125
Use the TDengine CLI to verify that StatsD data is written to TDengine and can read out correctly.
```
-Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
-Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
+taos> show databases;
+ name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
+====================================================================================================================================================================================================================================================================================
+ log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
+ statsd | 2022-04-20 09:54:51.220 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
+Query OK, 2 row(s) in set (0.003142s)
taos> use statsd;
Database changed.
diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md
index 0900dd3d7571dc0ab8d93174aa2d7b5eccf1fbf5..2ead1bbaf40f06fec2a5cbf85e46fdfdcc5216df 100644
--- a/docs/en/20-third-party/09-emq-broker.md
+++ b/docs/en/20-third-party/09-emq-broker.md
@@ -9,7 +9,7 @@ MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emq
The following preparations are required for EMQX to add TDengine data sources correctly.
- The TDengine cluster is deployed and working properly
-- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
+- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended.
## Install and start EMQX
@@ -28,8 +28,6 @@ USE test;
CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP);
```
-Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario.
-
## Configuring EMQX Rules
Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation.
@@ -137,5 +135,5 @@ Use the TDengine CLI program to log in and query the appropriate databases and t

-Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
+Please refer to the [TDengine official documentation](https://docs.tdengine.com/) for more details on how to use TDengine.
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
diff --git a/docs/en/20-third-party/10-hive-mq-broker.md b/docs/en/20-third-party/10-hive-mq-broker.md
index 333e00fa0e9b724ffbb067a83ad07d0b846b1a23..828a62ac5b336766d5c3770cc42cd3a61cfd8d5d 100644
--- a/docs/en/20-third-party/10-hive-mq-broker.md
+++ b/docs/en/20-third-party/10-hive-mq-broker.md
@@ -1,6 +1,6 @@
---
sidebar_label: HiveMQ Broker
-title: HiveMQ Broker writing
+title: HiveMQ Broker Writing
---
-[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. Please refer to the [HiveMQ extension - TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md) for details on how to use it.
\ No newline at end of file
+[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. For more information, see [HiveMQ TDengine Extension](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md).
diff --git a/docs/en/20-third-party/12-google-data-studio.md b/docs/en/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc94f98056bbeeeec88ca7ea12a4a6a7e6f15dc5
--- /dev/null
+++ b/docs/en/20-third-party/12-google-data-studio.md
@@ -0,0 +1,36 @@
+---
+sidebar_label: Google Data Studio
+title: Use Google Data Studio to access TDengine
+---
+
+Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
+
+TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution.
+
+The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
+
+With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
+
+
+
+Select the TDengine connector and click Authorize.
+
+
+
+Then sign in to your Google Account and click Allow to enable the connection to TDengine.
+
+
+
+In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect.
+
+
+
+After the connection is established, you can use Data Studio to process your data and create reports.
+
+
+
+In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
+
+
+
+With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
diff --git a/docs/en/20-third-party/gds/gds-01.webp b/docs/en/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000000000000000000000000000000000..2e5f9e4ff5db1e37718e2397c9a13a9f0e05602d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-01.webp differ
diff --git a/docs/en/20-third-party/gds/gds-02.png.webp b/docs/en/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3b3537f5a488019482f94452e70bd1bd79867ab5
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-03.png.webp b/docs/en/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5719436d5b2f21aa861067b966511e4b34d17dce
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-04.png.webp b/docs/en/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ddaae5c1a63b6b4db692e12491df55b88dcaadee
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-05.png.webp b/docs/en/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9a917678fc7e60f0a739fa1e2b0f4fa010d12708
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-06.png.webp b/docs/en/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..c76b68d32b5907bd5ba4e4010456f2ca5303448f
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-07.png.webp b/docs/en/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1386ae9c4db4f2465dd071afc5a047658b47031c
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-08.png.webp b/docs/en/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..59dcf8b31df8bde8d4073ee0c7b1c7bdd7bd439d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-09.png.webp b/docs/en/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b94439f211a814f66d41231c9386c57f3ffe8322
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-10.png.webp b/docs/en/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a63cad9e9a3d412b1132359506530498fb1a0e57
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-11.png.webp b/docs/en/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fc38cd9a29c00afa48238741c33b439f737a7b8f
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/en/20-third-party/import_dashboard.webp b/docs/en/20-third-party/import_dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..164e3f4690a5a55f937a3c29e1e8ca026648e6b1
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard.webp differ
diff --git a/docs/en/20-third-party/import_dashboard1.webp b/docs/en/20-third-party/import_dashboard1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d4fb374ce8bb75c8a0fbdbb9cab5b30eb29ab06d
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard1.webp differ
diff --git a/docs/en/20-third-party/import_dashboard2.webp b/docs/en/20-third-party/import_dashboard2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9f74dc96be20ab64b5fb555aaccdaa1c1139b35c
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard2.webp differ
diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md
index 44651c0496481c410640e577aaad5781f846e302..2f876adffc2543bb9f117e5812ccc5241d7a6d99 100644
--- a/docs/en/21-tdinternal/01-arch.md
+++ b/docs/en/21-tdinternal/01-arch.md
@@ -12,6 +12,7 @@ The design of TDengine is based on the assumption that any hardware or software
Logical structure diagram of TDengine's distributed architecture is as follows:

+
Figure 1: TDengine architecture diagram
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
@@ -38,15 +39,16 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
-**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
+**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
-1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
+1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
-**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+
- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
@@ -57,6 +59,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.

+
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
@@ -121,16 +124,17 @@ The load balancing process does not require any manual intervention, and it is t
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
-### Leader vnode Writing Process
+### Leader vnode Writing Process
Leader Vnode uses a writing process as follows:

+
Figure 3: TDengine Leader writing process
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
-3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
+3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
@@ -140,6 +144,7 @@ Leader Vnode uses a writing process as follows:
For a follower vnode, the write process as follows:

+
Figure 4: TDengine Follower Writing Process
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
@@ -212,6 +217,7 @@ When data is written to disk, the system decideswhether to compress the data bas
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
dataDir format is as follows:
+
```
dataDir data_path [tier_level]
```
@@ -270,6 +276,7 @@ For the data collected by device D1001, the number of records per hour is counte
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure:

+
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system;
@@ -279,9 +286,8 @@ TDengine creates a separate table for each data collection point, but in practic
5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
-Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
+Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details.
### Precomputation
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL.
-
diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md
index 9614574c71b0a28853de413ea6928101da899bf7..1aabaa43e77660d72bca00d7d59cdee69b1a7c92 100644
--- a/docs/en/25-application/03-immigrate.md
+++ b/docs/en/25-application/03-immigrate.md
@@ -41,7 +41,7 @@ The agents deployed in the application nodes are responsible for providing opera
- **TDengine installation and deployment**
-First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html).
+First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to [Install TDengine](../../get-started/package)
Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters.
@@ -51,7 +51,7 @@ TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a
Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios.
-Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/).
+Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](../../reference/taosadapter/).
If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows.
@@ -411,7 +411,7 @@ TDengine provides a wealth of help documents to explain many aspects of cluster
### Cluster Deployment
-The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats.
+The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to [Install TDengine](../../get-started/package) for more details.
Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters.
diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md
index c10bca1d05edd8cebe451901b3abb91923618a26..733b4184741ec3bdcea5ae5ef4b236493a03be35 100644
--- a/docs/en/27-train-faq/01-faq.md
+++ b/docs/en/27-train-faq/01-faq.md
@@ -1,114 +1,163 @@
---
-sidebar_label: FAQ
title: Frequently Asked Questions
---
## Submit an Issue
-If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131.
+If your issue could not be resolved by reviewing this documentation, you can submit your issue on GitHub and receive support from the TDengine Team. When you submit an issue, attach the following directories from your TDengine deployment:
-## Frequently Asked Questions
+1. The directory containing TDengine logs (`/var/log/taos` by default)
+2. The directory containing TDengine configuration files (`/etc/taos` by default)
-### 1. How to upgrade to TDengine 2.0 from older version?
+In your GitHub issue, provide the version of TDengine and the operating system and environment for your deployment, the operations that you performed when the issue occurred, and the time of occurrence and affected tables.
-version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data.
+To obtain more debugging information, open `taos.cfg` and set the `debugFlag` parameter to `135`. Then restart TDengine Server and reproduce the issue. The debug-level logs generated help the TDengine Team to resolve your issue. If it is not possible to restart TDengine Server, you can run the following command in the TDengine CLI to set the debug flag:
-1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg`
-2. Delete log files: `sudo rm -rf /var/log/taos/`
-3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/`
-4. Install latest 2.x version
-5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance.
+```
+ alter dnode 'debugFlag' '135';
+```
-### 2. How to handle "Unable to establish connection"?
+You can run the `SHOW DNODES` command to determine the dnode ID.
-When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem.
+When debugging information is no longer needed, set `debugFlag` to 131.
-1. Check the network
+## Frequently Asked Questions
- - Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command.
- - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols.
- - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side.
- - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side.
+### 1. What are the best practices for upgrading a previous version of TDengine to version 3.0?
-2. Make sure the client version and server version are same.
+TDengine 3.0 is not compatible with the configuration and data files from previous versions. Before upgrading, perform the following steps:
-3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally.
+1. Run `sudo rm -rf /etc/taos/taos.cfg` to delete your configuration file.
+2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
+3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
+4. Install TDengine 3.0.
+5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
-4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`.
+### 4. How can I resolve the "Unable to establish connection" error?
-5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`.
+This error indicates that the client could not connect to the server. Perform the following troubleshooting steps:
-6. Some advanced network diagnostics tools
+1. Check the network.
- - On Linux system tool `nc` can be used to check whether the TCP/UDP can be accessible on a specified port
- Check whether a UDP port is open: `nc -vuz {hostIP} {port} `
- Check whether a TCP port on server side is open: `nc -l {port}`
- Check whether a TCP port on client side is open: `nc {hostIP} {port}`
+ - For machines deployed in the cloud, verify that your security group can access ports 6030 and 6031 (TCP and UDP).
+ - For virtual machines deployed locally, verify that the hosts where the client and server are running are accessible to each other. Do not use localhost as the hostname.
+ - For machines deployed on a corporate network, verify that your NAT configuration allows the server to respond to the client.
- - On Windows system `Test-NetConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access.
+2. Verify that the client and server are running the same version of TDengine.
-7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell).
+3. On the server, run `systemctl status taosd` to verify that taosd is running normally. If taosd is stopped, run `systemctl start taosd`.
-### 3. How to handle "Unexpected generic error in RPC" or "Unable to resolve FQDN" ?
+4. Verify that the client is configured with the correct FQDN for the server.
-This error is caused because the FQDN can't be resolved. Please try following ways:
+5. If the server cannot be reached with the `ping` command, verify that network and DNS or hosts file settings are correct. For a TDengine cluster, the client must be able to ping the FQDN of every node in the cluster.
-1. Check whether the FQDN is configured properly on the server side
-2. If DSN server is configured in the network, please check whether it works; otherwise, check `/etc/hosts` to see whether the FQDN is configured with correct IP
-3. If the network configuration on the server side is OK, try to ping the server from the client side.
-4. If TDengine has been used before with an old hostname then the hostname has been changed, please check `/var/lib/taos/taos/dnode/dnodeEps.json`. Before setting up a new TDengine cluster, it's better to cleanup the directories configured.
+6. Verify that your firewall settings allow all hosts in the cluster to communicate on ports 6030 and 6041 (TCP and UDP). You can run `ufw status` (Ubuntu) or `firewall-cmd --list-port` (CentOS) to check the configuration.
-### 4. "Invalid SQL" is returned even though the Syntax is correct
+7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
-"Invalid SQL" is returned when the length of SQL statement exceeds maximum allowed length or the syntax is not correct.
+8. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
-### 5. Whether validation queries are supported?
+9. On Linux systems, you can use the `nc` tool to check whether a port is accessible:
+ - To check whether a UDP port is open, run `nc -vuz {hostIP} {port}`.
+ - To check whether a TCP port on the server side is open, run `nc -l {port}`.
+ - To check whether a TCP port on client side is open, run `nc {hostIP} {port}`.
-It's suggested to use a builtin database named as `log` to monitor.
+10. On Windows systems, you can run `Test-NetConnection -ComputerName {fqdn} -Port {port}` in PowerShell to check whether a port on the server side is accessible.
-
+11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/).
-### 6. Can I delete a record?
+### 5. How can I resolve the "Unable to resolve FQDN" error?
-From version 2.6.0.0 Enterprise version, deleting data can be supported.
+Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows:
-### 7. How to create a table of over 1024 columns?
+1. Verify that the FQDN is configured properly on the server.
+2. If your network has a DNS server, verify that it is operational.
+3. If your network does not have a DNS server, verify that the FQDNs in the `hosts` file are correct.
+4. On the client, use the `ping` command to test your connection to the server. If you cannot ping an FQDN, TDengine cannot reach it.
+5. If TDengine has been previously installed and the `hostname` was modified, open `dnode.json` in the `data` folder and verify that the endpoint configuration is correct. The default location of the dnode file is `/var/lib/taos/dnode`. Ensure that you clean up previous installations before reinstalling TDengine.
+6. Confirm whether FQDNs are preconfigured in `/etc/hosts` and `/etc/hostname`.
-From version 2.1.7.0, at most 4096 columns can be defined for a table.
+### 6. What is the most effective way to write data to TDengine?
-### 8. How to improve the efficiency of inserting data?
+Writing data in batches provides higher efficiency in most situations. You can insert one or more data records into one or more tables in a single SQL statement.
-Inserting data in batch is a good practice. Single SQL statement can insert data for one or multiple tables in batch.
+### 9. Why are table names not fully displayed?
-### 9. JDBC Error: the executed SQL is not a DML or a DDL?
+The number of columns in the TDengine CLI terminal display is limited. This can cause table names to be cut off, and if you use an incomplete name in a statement, the "Table does not exist" error will occur. You can increase the display size with the `maxBinaryDisplayWidth` parameter or the SQL statement `set max_binary_display_width`. You can also append `\G` to your SQL statement to bypass this limitation.
-Please upgrade to latest JDBC driver, for details please refer to [Java Connector](/reference/connector/java)
+### 10. How can I migrate data?
-### 10. Failed to connect with error "invalid timestamp"
+In TDengine, the `hostname` uniquely identifies a machine. When you move data files to a new machine, you must configure the new machine to have the same `host name` as the original machine.
-The most common reason is that the time setting is not aligned on the client side and the server side. On Linux system, please use `ntpdate` command. On Windows system, please enable automatic sync in system time setting.
+:::note
-### 11. Table name is not shown in full
+The data structure of previous versions of TDengine is not compatible with version 3.0. To migrate from TDengine 1.x or 2.x to 3.0, you must export data from your older deployment and import it back into TDengine 3.0.
-There is a display width setting in TDengine CLI `taos`. It can be controlled by configuration parameter `maxBinaryDisplayWidth`, or can be set using SQL command `set max_binary_display_width`. A more convenient way is to append `\G` in a SQL command to bypass this limitation.
+:::
-### 12. How to change log level temporarily?
+### 11. How can I temporary change the log level from the TDengine Client?
-Below SQL command can be used to adjust log level temporarily
+To change the log level for debugging purposes, you can use the following command:
```sql
-ALTER LOCAL flag_name flag_value;
+ALTER LOCAL local_option
+
+local_option: {
+ 'resetLog'
+ | 'rpcDebugFlag' value
+ | 'tmrDebugFlag' value
+ | 'cDebugFlag' value
+ | 'uDebugFlag' value
+ | 'debugFlag' value
+}
```
- - flag_name can be: debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag
- - flag_value can be: 131 (INFO/WARNING/ERROR), 135 (plus DEBUG), 143 (plus TRACE)
-
+Use `resetlog` to remove all logs generated on the local client. Use the other parameters to specify a log level for a specific component.
-### 13. What to do if go compilation fails?
+For each parameter, you can set the value to `131` (error and warning), `135` (error, warning, and debug), or `143` (error, warning, debug, and trace).
-From version 2.3.0.0, a new component named `taosAdapter` is introduced. Its' developed in Go. If you want to compile from source code and meet go compilation problems, try to do below steps to resolve Go environment problems.
+### Why do TDengine components written in Go fail to compile?
-```sh
-go env -w GO111MODULE=on
-go env -w GOPROXY=https://goproxy.cn,direct
-```
+TDengine includes taosAdapter, an independent component written in Go. This component provides the REST API as well as data access for other products such as Prometheus and Telegraf.
+When using the develop branch, you must run `git submodule update --init --recursive` to download the taosAdapter repository and then compile it.
+
+TDengine Go components require Go version 1.14 or later.
+
+### 13. How can I query the storage space being used by my data?
+
+The TDengine data files are stored in `/var/lib/taos` by default. Log files are stored in `/var/log/taos`.
+
+To see how much space your data files occupy, run `du -sh /var/lib/taos/vnode --exclude='wal'`. This excludes the write-ahead log (WAL) because its size is relatively fixed while writes are occurring, and it is written to disk and cleared when you shut down TDengine.
+
+If you want to see how much space is occupied by a single database, first determine which vgroup is storing the database by running `show vgroups`. Then check `/var/lib/taos/vnode` for the files associated with the vgroup ID.
+
+### 15. How is timezone information processed for timestamps?
+
+TDengine uses the timezone of the client for timestamps. The server timezone does not affect timestamps. The client converts Unix timestamps in SQL statements to UTC before sending them to the server. When you query data on the server, it provides timestamps in UTC to the client, which converts them to its local time.
+
+Timestamps are processed as follows:
+
+1. The client uses its system timezone unless it has been configured otherwise.
+2. A timezone configured in `taos.cfg` takes precedence over the system timezone.
+3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL.
+4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method.
+
+### 16. Which network ports are required by TDengine?
+
+See [serverPort](https://docs.tdengine.com/reference/config/#serverport) in Configuration Parameters.
+
+Note that ports are specified using 6030 as the default first port. If you change this port, all other ports change as well.
+
+### 17. Why do applications such as Grafana fail to connect to TDengine over the REST API?
+
+In TDengine, the REST API is provided by taosAdapter. Ensure that taosAdapter is running before you connect an application to TDengine over the REST API. You can run `systemctl start taosadapter` to start the service.
+
+Note that the log path for taosAdapter must be configured separately. The default path is `/var/log/taos`. You can choose one of eight log levels. The default is `info`. You can set the log level to `panic` to disable log output. You can modify the taosAdapter configuration file to change these settings. The default location is `/etc/taos/taosadapter.toml`.
+
+For more information, see [taosAdapter](https://docs.tdengine.com/reference/taosadapter/).
+
+### 18. How can I resolve out-of-memory (OOM) errors?
+
+OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient.
+
+TDengine preallocates memory to each vnode. The number of vnodes per database is determined by the `vgroups` parameter, and the amount of memory per vnode is determined by the `buffer` parameter. To prevent OOM errors from occurring, ensure that you prepare sufficient memory on your hosts to support the number of vnodes that your deployment requires. Configure an appropriately sized swap space. If you continue to receive OOM errors, your SQL statements may be querying too much data for your system. TDengine Enterprise Edition includes optimized memory management that increases stability for enterprise customers.
diff --git a/docs/examples/csharp/SQLInsertExample.cs b/docs/examples/csharp/SQLInsertExample.cs
index 192ea96d5713bbf7f37f2208687c41e3e66d473b..3ce70fe9144d997b320610500be29b329b69a08f 100644
--- a/docs/examples/csharp/SQLInsertExample.cs
+++ b/docs/examples/csharp/SQLInsertExample.cs
@@ -23,6 +23,7 @@ namespace TDengineExample
CheckRes(conn, res, "failed to insert data");
int affectedRows = TDengine.AffectRows(res);
Console.WriteLine("affectedRows " + affectedRows);
+ TDengine.FreeResult(res);
ExitProgram(conn, 0);
}
diff --git a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
index af97fe4373ca964260e5614f133f359e229b0e15..9d85bf2a94abda71bcdab89d46008b70e52ce437 100644
--- a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
+++ b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
@@ -16,14 +16,14 @@ public class RestInsertExample {
private static List getRawData() {
return Arrays.asList(
- "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
- "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
- "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
- "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
- "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
- "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
- "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
- "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
+ "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2",
+ "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2",
+ "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2",
+ "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3",
+ "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2",
+ "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2",
+ "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3",
+ "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3"
);
}
diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
index 50e8b357719fc6d1f4707e474afdf58fb4531970..179e6e6911185631901b79e34a343967e73c4936 100644
--- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
@@ -57,7 +57,7 @@ public class SubscribeDemo {
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
properties.setProperty(TMQConstants.GROUP_ID, "test");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
- "com.taosdata.jdbc.MetersDeserializer");
+ "com.taos.example.MetersDeserializer");
// poll data
try (TaosConsumer consumer = new TaosConsumer<>(properties)) {
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..04b149a4b96441ecfd1b0bdde54c9ed71349cab2
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java
@@ -0,0 +1,63 @@
+package com.taos.example.highvolume;
+
+import java.sql.*;
+
+/**
+ * Prepare target database.
+ * Count total records in database periodically so that we can estimate the writing speed.
+ */
+public class DataBaseMonitor {
+ private Connection conn;
+ private Statement stmt;
+
+ public DataBaseMonitor init() throws SQLException {
+ if (conn == null) {
+ String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
+ conn = DriverManager.getConnection(jdbcURL);
+ stmt = conn.createStatement();
+ }
+ return this;
+ }
+
+ public void close() {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ }
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+
+ public void prepareDatabase() throws SQLException {
+ stmt.execute("DROP DATABASE IF EXISTS test");
+ stmt.execute("CREATE DATABASE test");
+ stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
+ }
+
+ public Long count() throws SQLException {
+ if (!stmt.isClosed()) {
+ ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters");
+ result.next();
+ return result.getLong(1);
+ }
+ return null;
+ }
+
+ /**
+ * show test.stables;
+ *
+ * name | created_time | columns | tags | tables |
+ * ============================================================================================
+ * meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 |
+ */
+ public Long getTableCount() throws SQLException {
+ if (!stmt.isClosed()) {
+ ResultSet result = stmt.executeQuery("show test.stables");
+ result.next();
+ return result.getLong(5);
+ }
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..41b59551ca69a4056c2f2b572d169bd08dc4fcfe
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java
@@ -0,0 +1,70 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.*;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+
+
+public class FastWriteExample {
+ final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class);
+
+ final static int taskQueueCapacity = 1000000;
+ final static List> taskQueues = new ArrayList<>();
+ final static List readTasks = new ArrayList<>();
+ final static List writeTasks = new ArrayList<>();
+ final static DataBaseMonitor databaseMonitor = new DataBaseMonitor();
+
+ public static void stopAll() {
+ logger.info("shutting down");
+ readTasks.forEach(task -> task.stop());
+ writeTasks.forEach(task -> task.stop());
+ databaseMonitor.close();
+ }
+
+ public static void main(String[] args) throws InterruptedException, SQLException {
+ int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1;
+ int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3;
+ int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000;
+ int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000;
+
+ logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}",
+ readTaskCount, writeTaskCount, tableCount, maxBatchSize);
+
+ databaseMonitor.init().prepareDatabase();
+
+ // Create task queues, whiting tasks and start writing threads.
+ for (int i = 0; i < writeTaskCount; ++i) {
+ BlockingQueue queue = new ArrayBlockingQueue<>(taskQueueCapacity);
+ taskQueues.add(queue);
+ WriteTask task = new WriteTask(queue, maxBatchSize);
+ Thread t = new Thread(task);
+ t.setName("WriteThread-" + i);
+ t.start();
+ }
+
+ // create reading tasks and start reading threads
+ int tableCountPerTask = tableCount / readTaskCount;
+ for (int i = 0; i < readTaskCount; ++i) {
+ ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask);
+ Thread t = new Thread(task);
+ t.setName("ReadThread-" + i);
+ t.start();
+ }
+
+ Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll));
+
+ long lastCount = 0;
+ while (true) {
+ Thread.sleep(10000);
+ long numberOfTable = databaseMonitor.getTableCount();
+ long count = databaseMonitor.count();
+ logger.info("numberOfTable={} count={} speed={}", numberOfTable, count, (count - lastCount) / 10);
+ lastCount = count;
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java
new file mode 100644
index 0000000000000000000000000000000000000000..6fe83f002ebcb9d82e026e9a32886fd22bfefbe9
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java
@@ -0,0 +1,53 @@
+package com.taos.example.highvolume;
+
+import java.util.Iterator;
+
+/**
+ * Generate test data
+ */
+class MockDataSource implements Iterator {
+ private String tbNamePrefix;
+ private int tableCount;
+ private long maxRowsPerTable = 1000000000L;
+
+ // 100 milliseconds between two neighbouring rows.
+ long startMs = System.currentTimeMillis() - maxRowsPerTable * 100;
+ private int currentRow = 0;
+ private int currentTbId = -1;
+
+ // mock values
+ String[] location = {"LosAngeles", "SanDiego", "Hollywood", "Compton", "San Francisco"};
+ float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
+ int[] voltage = {119, 116, 111, 113, 118};
+ float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
+
+ public MockDataSource(String tbNamePrefix, int tableCount) {
+ this.tbNamePrefix = tbNamePrefix;
+ this.tableCount = tableCount;
+ }
+
+ @Override
+ public boolean hasNext() {
+ currentTbId += 1;
+ if (currentTbId == tableCount) {
+ currentTbId = 0;
+ currentRow += 1;
+ }
+ return currentRow < maxRowsPerTable;
+ }
+
+ @Override
+ public String next() {
+ long ts = startMs + 100 * currentRow;
+ int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1;
+ StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName
+ sb.append(ts).append(','); // ts
+ sb.append(current[currentRow % 5]).append(','); // current
+ sb.append(voltage[currentRow % 5]).append(','); // voltage
+ sb.append(phase[currentRow % 5]).append(','); // phase
+ sb.append(location[currentRow % 5]).append(','); // location
+ sb.append(groupId); // groupID
+
+ return sb.toString();
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..a6fcfed1d28281d46aff493ef9783972858ebe62
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java
@@ -0,0 +1,58 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+
+class ReadTask implements Runnable {
+ private final static Logger logger = LoggerFactory.getLogger(ReadTask.class);
+ private final int taskId;
+ private final List> taskQueues;
+ private final int queueCount;
+ private final int tableCount;
+ private boolean active = true;
+
+ public ReadTask(int readTaskId, List> queues, int tableCount) {
+ this.taskId = readTaskId;
+ this.taskQueues = queues;
+ this.queueCount = queues.size();
+ this.tableCount = tableCount;
+ }
+
+ /**
+ * Assign data received to different queues.
+ * Here we use the suffix number in table name.
+ * You are expected to define your own rule in practice.
+ *
+ * @param line record received
+ * @return which queue to use
+ */
+ public int getQueueId(String line) {
+ String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101
+ String suffixNumber = tbName.split("_")[1];
+ return Integer.parseInt(suffixNumber) % this.queueCount;
+ }
+
+ @Override
+ public void run() {
+ logger.info("started");
+ Iterator it = new MockDataSource("tb" + this.taskId, tableCount);
+ try {
+ while (it.hasNext() && active) {
+ String line = it.next();
+ int queueId = getQueueId(line);
+ taskQueues.get(queueId).put(line);
+ }
+ } catch (Exception e) {
+ logger.error("Read Task Error", e);
+ }
+ }
+
+ public void stop() {
+ logger.info("stop");
+ this.active = false;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..c2989acdbe3d0f56d7451ac86051a55955ce14de
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java
@@ -0,0 +1,205 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.*;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A helper class encapsulate the logic of writing using SQL.
+ *
+ * The main interfaces are two methods:
+ *
+ * {@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.
+ * {@link SQLWriter#flush}, which assemble INSERT statement and execute it.
+ *
+ *
+ * There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb".
+ * This ensure that checking table existence is a one-time-only operation.
+ *
+ *
+ *
+ */
+public class SQLWriter {
+ final static Logger logger = LoggerFactory.getLogger(SQLWriter.class);
+
+ private Connection conn;
+ private Statement stmt;
+
+ /**
+ * current number of buffered records
+ */
+ private int bufferedCount = 0;
+ /**
+ * Maximum number of buffered records.
+ * Flush action will be triggered if bufferedCount reached this value,
+ */
+ private int maxBatchSize;
+
+
+ /**
+ * Maximum SQL length.
+ */
+ private int maxSQLLength;
+
+ /**
+ * Map from table name to column values. For example:
+ * "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)"
+ */
+ private Map tbValues = new HashMap<>();
+
+ /**
+ * Map from table name to tag values in the same order as creating stable.
+ * Used for creating table.
+ */
+ private Map tbTags = new HashMap<>();
+
+ public SQLWriter(int maxBatchSize) {
+ this.maxBatchSize = maxBatchSize;
+ }
+
+
+ /**
+ * Get Database Connection
+ *
+ * @return Connection
+ * @throws SQLException
+ */
+ private static Connection getConnection() throws SQLException {
+ String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
+ return DriverManager.getConnection(jdbcURL);
+ }
+
+ /**
+ * Create Connection and Statement
+ *
+ * @throws SQLException
+ */
+ public void init() throws SQLException {
+ conn = getConnection();
+ stmt = conn.createStatement();
+ stmt.execute("use test");
+ ResultSet rs = stmt.executeQuery("show variables");
+ while (rs.next()) {
+ String configName = rs.getString(1);
+ if ("maxSQLLength".equals(configName)) {
+ maxSQLLength = Integer.parseInt(rs.getString(2));
+ logger.info("maxSQLLength={}", maxSQLLength);
+ }
+ }
+ }
+
+ /**
+ * Convert raw data to SQL fragments, group them by table name and cache them in a HashMap.
+ * Trigger writing when number of buffered records reached maxBachSize.
+ *
+ * @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId
+ */
+ public void processLine(String line) throws SQLException {
+ bufferedCount += 1;
+ int firstComma = line.indexOf(',');
+ String tbName = line.substring(0, firstComma);
+ int lastComma = line.lastIndexOf(',');
+ int secondLastComma = line.lastIndexOf(',', lastComma - 1);
+ String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") ";
+ if (tbValues.containsKey(tbName)) {
+ tbValues.put(tbName, tbValues.get(tbName) + value);
+ } else {
+ tbValues.put(tbName, value);
+ }
+ if (!tbTags.containsKey(tbName)) {
+ String location = line.substring(secondLastComma + 1, lastComma);
+ String groupId = line.substring(lastComma + 1);
+ String tagValues = "('" + location + "'," + groupId + ')';
+ tbTags.put(tbName, tagValues);
+ }
+ if (bufferedCount == maxBatchSize) {
+ flush();
+ }
+ }
+
+
+ /**
+ * Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it.
+ * In case of "Table does not exit" exception, create all tables in the sql and retry the sql.
+ */
+ public void flush() throws SQLException {
+ StringBuilder sb = new StringBuilder("INSERT INTO ");
+ for (Map.Entry entry : tbValues.entrySet()) {
+ String tableName = entry.getKey();
+ String values = entry.getValue();
+ String q = tableName + " values " + values + " ";
+ if (sb.length() + q.length() > maxSQLLength) {
+ executeSQL(sb.toString());
+ logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance");
+ sb = new StringBuilder("INSERT INTO ");
+ }
+ sb.append(q);
+ }
+ executeSQL(sb.toString());
+ tbValues.clear();
+ bufferedCount = 0;
+ }
+
+ private void executeSQL(String sql) throws SQLException {
+ try {
+ stmt.executeUpdate(sql);
+ } catch (SQLException e) {
+ // convert to error code defined in taoserror.h
+ int errorCode = e.getErrorCode() & 0xffff;
+ if (errorCode == 0x362 || errorCode == 0x218) {
+ // Table does not exist
+ createTables();
+ executeSQL(sql);
+ } else {
+ logger.error("Execute SQL: {}", sql);
+ throw e;
+ }
+ } catch (Throwable throwable) {
+ logger.error("Execute SQL: {}", sql);
+ throw throwable;
+ }
+ }
+
+ /**
+ * Create tables in batch using syntax:
+ *
+ * CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
+ *
+ */
+ private void createTables() throws SQLException {
+ StringBuilder sb = new StringBuilder("CREATE TABLE ");
+ for (String tbName : tbValues.keySet()) {
+ String tagValues = tbTags.get(tbName);
+ sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" ");
+ }
+ String sql = sb.toString();
+ try {
+ stmt.executeUpdate(sql);
+ } catch (Throwable throwable) {
+ logger.error("Execute SQL: {}", sql);
+ throw throwable;
+ }
+ }
+
+ public boolean hasBufferedValues() {
+ return bufferedCount > 0;
+ }
+
+ public int getBufferedCount() {
+ return bufferedCount;
+ }
+
+ public void close() {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ }
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..8ade06625d708a112c85d5657aa00bcd0e605ff4
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java
@@ -0,0 +1,4 @@
+package com.taos.example.highvolume;
+
+public class StmtWriter {
+}
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..de9e5463d7dc59478f991e4783aacaae527b4c4b
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java
@@ -0,0 +1,58 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.BlockingQueue;
+
+class WriteTask implements Runnable {
+ private final static Logger logger = LoggerFactory.getLogger(WriteTask.class);
+ private final int maxBatchSize;
+
+ // the queue from which this writing task get raw data.
+ private final BlockingQueue queue;
+
+ // A flag indicate whether to continue.
+ private boolean active = true;
+
+ public WriteTask(BlockingQueue taskQueue, int maxBatchSize) {
+ this.queue = taskQueue;
+ this.maxBatchSize = maxBatchSize;
+ }
+
+ @Override
+ public void run() {
+ logger.info("started");
+ String line = null; // data getting from the queue just now.
+ SQLWriter writer = new SQLWriter(maxBatchSize);
+ try {
+ writer.init();
+ while (active) {
+ line = queue.poll();
+ if (line != null) {
+ // parse raw data and buffer the data.
+ writer.processLine(line);
+ } else if (writer.hasBufferedValues()) {
+ // write data immediately if no more data in the queue
+ writer.flush();
+ } else {
+ // sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, .
+ Thread.sleep(100);
+ }
+ }
+ if (writer.hasBufferedValues()) {
+ writer.flush();
+ }
+ } catch (Exception e) {
+ String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount());
+ logger.error(msg, e);
+ } finally {
+ writer.close();
+ }
+ }
+
+ public void stop() {
+ logger.info("stop");
+ this.active = false;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java
index 42db24485afec05298159f7b0c3a4e15835d98ed..8d201da0745e1d2d36220c9d78383fc37d4a813a 100644
--- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java
+++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java
@@ -23,16 +23,16 @@ public class TestAll {
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
try (Statement stmt = conn.createStatement()) {
- String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
- " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
- " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
- " power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
- " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
- " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
+ String sql = "INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
+ " power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
+ " power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
+ " power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
+ " power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
+ " power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
stmt.execute(sql);
}
diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d606388fdecd85f1468f24cc497ecc5941f035
--- /dev/null
+++ b/docs/examples/python/fast_write_example.py
@@ -0,0 +1,180 @@
+# install dependencies:
+# recommend python >= 3.8
+# pip3 install faster-fifo
+#
+
+import logging
+import math
+import sys
+import time
+import os
+from multiprocessing import Process
+from faster_fifo import Queue
+from mockdatasource import MockDataSource
+from queue import Empty
+from typing import List
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s")
+
+READ_TASK_COUNT = 1
+WRITE_TASK_COUNT = 1
+TABLE_COUNT = 1000
+QUEUE_SIZE = 1000000
+MAX_BATCH_SIZE = 3000
+
+read_processes = []
+write_processes = []
+
+
+def get_connection():
+ """
+ If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used.
+ You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD
+ """
+ import taos
+ firstEP = os.environ.get("TDENGINE_FIRST_EP")
+ if firstEP:
+ host, port = firstEP.split(":")
+ else:
+ host, port = None, 0
+ user = os.environ.get("TDENGINE_USER", "root")
+ password = os.environ.get("TDENGINE_PASSWORD", "taosdata")
+ return taos.connect(host=host, port=int(port), user=user, password=password)
+
+
+# ANCHOR: read
+
+def run_read_task(task_id: int, task_queues: List[Queue]):
+ table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
+ data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
+ try:
+ for batch in data_source:
+ for table_id, rows in batch:
+ # hash data to different queue
+ i = table_id % len(task_queues)
+ # block putting forever when the queue is full
+ task_queues[i].put_many(rows, block=True, timeout=-1)
+ except KeyboardInterrupt:
+ pass
+
+
+# ANCHOR_END: read
+
+# ANCHOR: write
+def run_write_task(task_id: int, queue: Queue):
+ from sql_writer import SQLWriter
+ log = logging.getLogger(f"WriteTask-{task_id}")
+ writer = SQLWriter(get_connection)
+ lines = None
+ try:
+ while True:
+ try:
+ # get as many as possible
+ lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
+ writer.process_lines(lines)
+ except Empty:
+ time.sleep(0.01)
+ except KeyboardInterrupt:
+ pass
+ except BaseException as e:
+ log.debug(f"lines={lines}")
+ raise e
+
+
+# ANCHOR_END: write
+
+def set_global_config():
+ argc = len(sys.argv)
+ if argc > 1:
+ global READ_TASK_COUNT
+ READ_TASK_COUNT = int(sys.argv[1])
+ if argc > 2:
+ global WRITE_TASK_COUNT
+ WRITE_TASK_COUNT = int(sys.argv[2])
+ if argc > 3:
+ global TABLE_COUNT
+ TABLE_COUNT = int(sys.argv[3])
+ if argc > 4:
+ global QUEUE_SIZE
+ QUEUE_SIZE = int(sys.argv[4])
+ if argc > 5:
+ global MAX_BATCH_SIZE
+ MAX_BATCH_SIZE = int(sys.argv[5])
+
+
+# ANCHOR: monitor
+def run_monitor_process():
+ log = logging.getLogger("DataBaseMonitor")
+ conn = get_connection()
+ conn.execute("DROP DATABASE IF EXISTS test")
+ conn.execute("CREATE DATABASE test")
+ conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
+ "TAGS (location BINARY(64), groupId INT)")
+
+ def get_count():
+ res = conn.query("SELECT count(*) FROM test.meters")
+ rows = res.fetch_all()
+ return rows[0][0] if rows else 0
+
+ last_count = 0
+ while True:
+ time.sleep(10)
+ count = get_count()
+ log.info(f"count={count} speed={(count - last_count) / 10}")
+ last_count = count
+
+
+# ANCHOR_END: monitor
+# ANCHOR: main
+def main():
+ set_global_config()
+ logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
+ f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
+
+ monitor_process = Process(target=run_monitor_process)
+ monitor_process.start()
+ time.sleep(3) # waiting for database ready.
+
+ task_queues: List[Queue] = []
+ # create task queues
+ for i in range(WRITE_TASK_COUNT):
+ queue = Queue(max_size_bytes=QUEUE_SIZE)
+ task_queues.append(queue)
+
+ # create write processes
+ for i in range(WRITE_TASK_COUNT):
+ p = Process(target=run_write_task, args=(i, task_queues[i]))
+ p.start()
+ logging.debug(f"WriteTask-{i} started with pid {p.pid}")
+ write_processes.append(p)
+
+ # create read processes
+ for i in range(READ_TASK_COUNT):
+ queues = assign_queues(i, task_queues)
+ p = Process(target=run_read_task, args=(i, queues))
+ p.start()
+ logging.debug(f"ReadTask-{i} started with pid {p.pid}")
+ read_processes.append(p)
+
+ try:
+ monitor_process.join()
+ except KeyboardInterrupt:
+ monitor_process.terminate()
+ [p.terminate() for p in read_processes]
+ [p.terminate() for p in write_processes]
+ [q.close() for q in task_queues]
+
+
+def assign_queues(read_task_id, task_queues):
+ """
+ Compute target queues for a specific read task.
+ """
+ ratio = WRITE_TASK_COUNT / READ_TASK_COUNT
+ from_index = math.floor(read_task_id * ratio)
+ end_index = math.ceil((read_task_id + 1) * ratio)
+ return task_queues[from_index:end_index]
+
+
+if __name__ == '__main__':
+ main()
+# ANCHOR_END: main
diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py
new file mode 100644
index 0000000000000000000000000000000000000000..852860aec0adc8f9b043c9dcd5deb0bf00239201
--- /dev/null
+++ b/docs/examples/python/mockdatasource.py
@@ -0,0 +1,49 @@
+import time
+
+
+class MockDataSource:
+ samples = [
+ "8.8,119,0.32,LosAngeles,0",
+ "10.7,116,0.34,SanDiego,1",
+ "9.9,111,0.33,Hollywood,2",
+ "8.9,113,0.329,Compton,3",
+ "9.4,118,0.141,San Francisco,4"
+ ]
+
+ def __init__(self, tb_name_prefix, table_count):
+ self.table_name_prefix = tb_name_prefix + "_"
+ self.table_count = table_count
+ self.max_rows = 10000000
+ self.current_ts = round(time.time() * 1000) - self.max_rows * 100
+ # [(tableId, tableName, values),]
+ self.data = self._init_data()
+
+ def _init_data(self):
+ lines = self.samples * (self.table_count // 5 + 1)
+ data = []
+ for i in range(self.table_count):
+ table_name = self.table_name_prefix + str(i)
+ data.append((i, table_name, lines[i])) # tableId, row
+ return data
+
+ def __iter__(self):
+ self.row = 0
+ return self
+
+ def __next__(self):
+ """
+ next 1000 rows for each table.
+ return: {tableId:[row,...]}
+ """
+ # generate 1000 timestamps
+ ts = []
+ for _ in range(1000):
+ self.current_ts += 100
+ ts.append(str(self.current_ts))
+ # add timestamp to each row
+ # [(tableId, ["tableName,ts,current,voltage,phase,location,groupId"])]
+ result = []
+ for table_id, table_name, values in self.data:
+ rows = [table_name + ',' + t + ',' + values for t in ts]
+ result.append((table_id, rows))
+ return result
diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..758167376b009f21afc701be7d89c1bfbabdeb9f
--- /dev/null
+++ b/docs/examples/python/sql_writer.py
@@ -0,0 +1,90 @@
+import logging
+import taos
+
+
+class SQLWriter:
+ log = logging.getLogger("SQLWriter")
+
+ def __init__(self, get_connection_func):
+ self._tb_values = {}
+ self._tb_tags = {}
+ self._conn = get_connection_func()
+ self._max_sql_length = self.get_max_sql_length()
+ self._conn.execute("USE test")
+
+ def get_max_sql_length(self):
+ rows = self._conn.query("SHOW variables").fetch_all()
+ for r in rows:
+ name = r[0]
+ if name == "maxSQLLength":
+ return int(r[1])
+ return 1024 * 1024
+
+ def process_lines(self, lines: str):
+ """
+ :param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
+ """
+ for line in lines:
+ ps = line.split(",")
+ table_name = ps[0]
+ value = '(' + ",".join(ps[1:-2]) + ') '
+ if table_name in self._tb_values:
+ self._tb_values[table_name] += value
+ else:
+ self._tb_values[table_name] = value
+
+ if table_name not in self._tb_tags:
+ location = ps[-2]
+ group_id = ps[-1]
+ tag_value = f"('{location}',{group_id})"
+ self._tb_tags[table_name] = tag_value
+ self.flush()
+
+ def flush(self):
+ """
+ Assemble INSERT statement and execute it.
+ When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created.
+ In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed.
+ """
+ sql = "INSERT INTO "
+ sql_len = len(sql)
+ buf = []
+ for tb_name, values in self._tb_values.items():
+ q = tb_name + " VALUES " + values
+ if sql_len + len(q) >= self._max_sql_length:
+ sql += " ".join(buf)
+ self.execute_sql(sql)
+ sql = "INSERT INTO "
+ sql_len = len(sql)
+ buf = []
+ buf.append(q)
+ sql_len += len(q)
+ sql += " ".join(buf)
+ self.execute_sql(sql)
+ self._tb_values.clear()
+
+ def execute_sql(self, sql):
+ try:
+ self._conn.execute(sql)
+ except taos.Error as e:
+ error_code = e.errno & 0xffff
+ # Table does not exit
+ if error_code == 9731:
+ self.create_tables()
+ else:
+ self.log.error("Execute SQL: %s", sql)
+ raise e
+ except BaseException as baseException:
+ self.log.error("Execute SQL: %s", sql)
+ raise baseException
+
+ def create_tables(self):
+ sql = "CREATE TABLE "
+ for tb in self._tb_values.keys():
+ tag_values = self._tb_tags[tb]
+ sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " "
+ try:
+ self._conn.execute(sql)
+ except BaseException as e:
+ self.log.error("Execute SQL: %s", sql)
+ raise e
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 79d5424ac2e67e05c346e546847c743595d7a82b..f9127121f35c8cdb9d28e121c20b9b7bb9101625 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -4,22 +4,22 @@ sidebar_label: 文档首页
slug: /
---
-TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB ), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
+TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB ), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。
-TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
+TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[基本概念](./concept)一章。
-如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。
+如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
-我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[部署集群](./deployment)一章。
+我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
-TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
+TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
-如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。
+如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
-如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。
+如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
-如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
+如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
-最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。
+最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
Together, we make a difference!
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index a6ef2b94b6c0b030e967c498a36fd8ae4655f724..47bfd3f96b6fdbb27d3f3e326e14a6b22108d508 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -1,74 +1,98 @@
---
title: 产品简介
+description: 简要介绍 TDengine 的主要功能
toc_max_heading_level: 2
---
-TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB )。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
+TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
-本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
+本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要功能
-TDengine的主要功能如下:
-
-1. 高速数据写入,除 [SQL 写入](../develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](../reference/schemaless/),支持 [InfluxDB LINE 协议](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json)等协议写入;
-2. 第三方数据采集工具 [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQ](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入;
-3. 支持[各种查询](../develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等
-4. 支持[用户自定义函数](../develop/udf)
-5. 支持[缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis
-6. 支持[流式计算](../develop/stream)(Stream Processing)
-7. 支持[数据订阅](../develop/tmq),而且可以指定过滤条件
-8. 支持[集群](../deployment/),可以通过多节点进行水平扩展,并通过多副本实现高可靠
-9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
-10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export)
-11. 支持对[TDengine 集群本身的监控](../operation/monitor)
-12. 提供 [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) 等多种编程语言的[连接器](../reference/connector/)
-13. 支持 [REST 接口](../reference/rest-api/)
-14. 支持与[ Grafana 无缝集成](../third-party/grafana)
-15. 支持与 Google Data Studio 无缝集成
-16. 支持 [Kubernetes 部署](../deployment/k8s)
-
-更多细小的功能,请阅读整个文档。
+TDengine 的主要功能如下:
+
+1. 写入数据,支持
+ - [SQL 写入](../develop/insert-data/sql-writing)
+ - [无模式(Schemaless)写入](../reference/schemaless/),支持多种标准写入协议
+ - [InfluxDB Line 协议](../develop/insert-data/influxdb-line)
+ - [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet)
+ - [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json)
+ - 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine
+ - [Telegraf](../third-party/telegraf)
+ - [Prometheus](../third-party/prometheus)
+ - [StatsD](../third-party/statsd)
+ - [collectd](../third-party/collectd)
+ - [Icinga2](../third-party/icinga2)
+ - [TCollector](../third-party/tcollector)
+ - [EMQX](../third-party/emq-broker)
+ - [HiveMQ](../third-party/hive-mq-broker)
+2. 查询数据,支持
+ - [标准 SQL](../taos-sql),含嵌套查询
+ - [时序数据特色函数](../taos-sql/function/#time-series-extensions)
+ - [时序数据特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等
+ - [用户自定义函数(UDF)](../taos-sql/udf)
+3. [缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis 就能对时序数据进行高效处理
+4. [流式计算(Stream Processing)](../develop/stream),TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流式计算组件
+5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,提供与 Kafka 相同的 API,而且可以指定过滤条件
+6. 可视化
+ - 支持与 [Grafana](../third-party/grafana/) 的无缝集成
+ - 支持与 Google Data Studio 的无缝集成
+7. 集群
+ - [集群部署](../deployment/),可以通过增加节点进行水平扩展以提升处理能力
+ - 可以通过 [Kubernetes 部署 TDengine](../deployment/k8s/)
+ - 通过多副本提供高可用能力
+8. 管理
+ - [监控](../operation/monitor)运行中的 TDengine 实例
+ - 多种[数据导入](../operation/import)方式
+ - 多种[数据导出](../operation/export)方式
+9. 工具
+ - 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
+ - 提供压力测试工具 [taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
+10. 编程
+ - 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
+ - 支持 [REST 接口](../connector/rest-api/)
+
+更多细节功能,请阅读整个文档。
## 竞争优势
-由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点:
+由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,因此与其他时序数据库相比,TDengine 有以下特点:
-- **[高性能](https://www.taosdata.com/tdengine/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
+- **[高性能](https://www.taosdata.com/tdengine/fast)**:TDengine 是唯一一个解决了时序数据存储的高基数难题的时序数据库,支持上亿数据采集点,并在数据插入、查询和数据压缩上远胜其它时序数据库。
-- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
+- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建缓存、流式计算和数据订阅等功能,为时序数据的处理提供了极简的解决方案,从而大幅降低了业务系统的设计复杂度和运维成本。
-- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
+- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernetes 部署和完整的可观测性,TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。
-- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。
+- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:对系统管理员来说,TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说,TDengine 提供了便捷的数据访问能力。
-- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
+- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。
-- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
+- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
-1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低
+1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低
2. 因为支持 SQL,能与众多第三方软件无缝集成,学习迁移成本大幅下降
-3. 因为是一极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
-4. 因为维护简单,运营维护成本能大幅降低
+3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
## 技术生态
-在整个时序大数据平台中,TDengine 在其中扮演的角色如下:
+在整个时序大数据平台中,TDengine 扮演的角色如下:

+图 1. TDengine 技术生态图
-图 1. TDengine技术生态图
-上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。
+上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
-## 总体适用场景
+## 典型适用场景
-作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。
+作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求
@@ -90,18 +114,18 @@ TDengine的主要功能如下:
### 系统功能需求
-| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- |
-| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 |
-| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 |
+| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- |
+| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 |
+| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 |
### 系统性能需求
-| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ |
-| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
-| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
-| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
+| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- |
+| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
+| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
+| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
### 系统维护需求
diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md
index 8e97d4a2f43537c1229c8e8ea092ddfc1257dde7..2cba68edcd152f5059845b9e25342b3f335f3b8b 100644
--- a/docs/zh/04-concept/index.md
+++ b/docs/zh/04-concept/index.md
@@ -1,120 +1,121 @@
---
+sidebar_label: 基本概念
title: 数据模型和基本概念
+description: TDengine 的数据模型和基本概念
---
-为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 location 和分组 group ID 的静态属性. 其采集的数据类似如下的表格:
+为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 Location 和分组 Group ID 的静态属性. 其采集的数据类似如下的表格:
-
- Device ID
- Time Stamp
- Collected Metrics
- Tags
+
+
+ Device ID
+ Timestamp
+ Collected Metrics
+ Tags
-
-Device ID
-Time Stamp
-current
-voltage
-phase
-location
-groupId
-
-
-
-
-d1001
-1538548685000
-10.3
-219
-0.31
-California.SanFrancisco
-2
-
-
-d1002
-1538548684000
-10.2
-220
-0.23
-California.SanFrancisco
-3
-
-
-d1003
-1538548686500
-11.5
-221
-0.35
-California.LosAngeles
-3
-
-
-d1004
-1538548685500
-13.4
-223
-0.29
-California.LosAngeles
-2
-
-
-d1001
-1538548695000
-12.6
-218
-0.33
-California.SanFrancisco
-2
-
-
-d1004
-1538548696600
-11.8
-221
-0.28
-California.LosAngeles
-2
-
-
-d1002
-1538548696650
-10.3
-218
-0.25
-California.SanFrancisco
-3
-
-
-d1001
-1538548696800
-12.3
-221
-0.31
-California.SanFrancisco
-2
-
-
+
+ current
+ voltage
+ phase
+ location
+ groupid
+
+
+
+
+ d1001
+ 1538548685000
+ 10.3
+ 219
+ 0.31
+ California.SanFrancisco
+ 2
+
+
+ d1002
+ 1538548684000
+ 10.2
+ 220
+ 0.23
+ California.SanFrancisco
+ 3
+
+
+ d1003
+ 1538548686500
+ 11.5
+ 221
+ 0.35
+ California.LosAngeles
+ 3
+
+
+ d1004
+ 1538548685500
+ 13.4
+ 223
+ 0.29
+ California.LosAngeles
+ 2
+
+
+ d1001
+ 1538548695000
+ 12.6
+ 218
+ 0.33
+ California.SanFrancisco
+ 2
+
+
+ d1004
+ 1538548696600
+ 11.8
+ 221
+ 0.28
+ California.LosAngeles
+ 2
+
+
+ d1002
+ 1538548696650
+ 10.3
+ 218
+ 0.25
+ California.SanFrancisco
+ 3
+
+
+ d1001
+ 1538548696800
+ 12.3
+ 221
+ 0.31
+ California.SanFrancisco
+ 2
+
+
-
表 1:智能电表数据示例
+
表 1. 智能电表数据示例
-每一条记录都有设备 ID,时间戳,采集的物理量以及每个设备相关的静态标签。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
+每一条记录都有设备 ID、时间戳、采集的物理量(如上表中的 `current`、`voltage` 和 `phase`)以及每个设备相关的静态标签(`location` 和 `groupid`)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
-## 采集量 (Metric)
+## 采集量(Metric)
-采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。
+采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。
-## 标签 (Label/Tag)
+## 标签(Label/Tag)
-标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。
+标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的 `location` 与 `groupid` 就是标签。
-## 数据采集点 (Data Collection Point)
+## 数据采集点(Data Collection Point)
-数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。
+数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的 d1001、d1002、d1003、d1004 等就是数据采集点。
-## 表 (Table)
+## 表(Table)
因为采集量一般是结构化数据,同时为降低学习门槛,TDengine 采用传统的关系型数据库模型管理数据。用户需要先创建库,然后创建表,之后才能插入或查询数据。
@@ -127,47 +128,56 @@ title: 数据模型和基本概念
如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。**采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的。**
-TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 current,voltage,phase),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。
+TDengine 建议用数据采集点的名字(如上表中的 d1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 `current`、`voltage` 和 `phase`),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 Timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。
-对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一台汽车建立多张表。
+对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一辆汽车建立多张表。
-## 超级表 (STable)
+## 超级表(STable)
由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。
-超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。
+超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 Schema,标签的数据类型可以是整数、浮点数、字符串、JSON,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。
-在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。
+在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表 `meters`.
-## 子表 (Subtable)
+## 子表(Subtable)
当为某个具体数据采集点创建表时,用户可以使用超级表的定义做模板,同时指定该具体采集点(表)的具体标签值来创建该表。**通过超级表创建的表称之为子表**。正常的表与子表的差异在于:
-1. 子表就是表,因此所有正常表的SQL操作都可以在子表上执行。
+1. 子表就是表,因此所有正常表的 SQL 操作都可以在子表上执行。
2. 子表在正常表的基础上有扩展,它是带有静态标签的,而且这些标签可以事后增加、删除、修改,而正常的表没有。
3. 子表一定属于一张超级表,但普通表不属于任何超级表
4. 普通表无法转为子表,子表也无法转为普通表。
超级表与与基于超级表建立的子表之间的关系表现在:
-1. 一张超级表包含有多张子表,这些子表具有相同的采集量 schema,但带有不同的标签值。
+1. 一张超级表包含有多张子表,这些子表具有相同的采集量 Schema,但带有不同的标签值。
2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。
3. 超级表只定义一个模板,自身不存储任何数据或标签信息。因此,不能向一个超级表写入数据,只能将数据写入子表中。
查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。
-TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。
+TDengine 系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表 meters 创建子表 d1001、d1002、d1003、d1004 等。
+
+为了更好地理解采集量、标签、超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。
+
+
+
+
+
+图 1. 智能电表数据模型示意图
+
-## 库 (database)
+## 库(Database)
库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的超级表创建在不同的库里。
一个库里,可以有一到多个超级表,但一个超级表只属于一个库。一个超级表所拥有的子表全部存在一个库里。
-## FQDN & End Point
+## FQDN & Endpoint
-FQDN (fully qualified domain name, 完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。
+FQDN(Fully Qualified Domain Name,完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。
-TDengine 集群的每个节点是由 End Point 来唯一标识的,End Point 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。
+TDengine 集群的每个节点是由 Endpoint 来唯一标识的,Endpoint 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。
TDengine 不建议采用直接的 IP 地址访问集群,不利于管理。不了解 FQDN 概念,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。
diff --git a/docs/zh/04-concept/supertable.webp b/docs/zh/04-concept/supertable.webp
new file mode 100644
index 0000000000000000000000000000000000000000..764b8f3de7ee92a103b2fcd0e75c03773af5ee37
Binary files /dev/null and b/docs/zh/04-concept/supertable.webp differ
diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md
index f0f09d4c7eeb9e5669008c4c95be5eade58b2090..e3345fed966f6940dd0c6665301c15d18501c787 100644
--- a/docs/zh/05-get-started/01-docker.md
+++ b/docs/zh/05-get-started/01-docker.md
@@ -1,13 +1,14 @@
---
sidebar_label: Docker
title: 通过 Docker 快速体验 TDengine
+description: 使用 Docker 快速体验 TDengine 的高效写入和查询
---
-本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
+本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine)下载源码构建和安装。
## 启动 TDengine
-如果已经安装了 docker, 只需执行下面的命令。
+如果已经安装了 Docker,只需执行下面的命令:
```shell
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
@@ -15,84 +16,84 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
-确定该容器已经启动并且在正常运行
+确定该容器已经启动并且在正常运行。
```shell
docker ps
```
-进入该容器并执行 bash
+进入该容器并执行 `bash`
```shell
docker exec -it bash
```
-然后就可以执行相关的 Linux 命令操作和访问 TDengine
+然后就可以执行相关的 Linux 命令操作和访问 TDengine。
-注: Docker 工具自身的下载和使用请参考 [Docker 官网文档](https://docs.docker.com/get-docker/)。
+注:Docker 工具自身的下载和使用请参考 [Docker 官网文档](https://docs.docker.com/get-docker/)。
## 运行 TDengine CLI
-进入容器,执行 taos
+进入容器,执行 `taos`:
```
$ taos
-taos>
-
+taos>
```
-## 写入数据
+## 使用 taosBenchmark 体验写入速度
-可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。
+可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入速度。
-进入容器,启动 taosBenchmark:
+启动 TDengine 的服务,在 Linux 或 Windows 终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
- ```bash
- $ taosBenchmark
-
- ```
+```bash
+$ taosBenchmark
+```
- 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "San Francisco" 或者 "Los Angeles"等城市名称。
+该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `Campbell`、`Cupertino`、`Los Angeles`、`Mountain View`、`Palo Alto`、`San Diego`、`San Francisco`、`San Jose`、`Santa Clara` 或者 `Sunnyvale`。
- 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能。
+这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
- taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
+taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照[如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)和 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
-## 体验查询
+## 使用 TDengine CLI 体验查询速度
-使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。。
+使用上述 `taosBenchmark` 插入数据后,可以在 TDengine CLI(taos)输入查询命令,体验查询速度。
-查询超级表下记录总条数:
+查询超级表 `meters` 下的记录总条数:
```sql
-taos> select count(*) from test.meters;
+SELECT COUNT(*) FROM test.meters;
```
查询 1 亿条记录的平均值、最大值、最小值等:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
-查询 location="San Francisco" 的记录总条数:
+查询 location = "San Francisco" 的记录总条数:
```sql
-taos> select count(*) from test.meters where location="San Francisco";
+SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
```
-查询 groupId=10 的所有记录的平均值、最大值、最小值等:
+查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
+对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
+SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
```
+在上面的查询中,你选择的是区间内的第一个时间戳(ts),另一种选择方式是 `\_wstart`,它将给出时间窗口的开始。关于窗口查询的更多信息,参见[特色查询](../../taos-sql/distinguished/)。
+
## 其它
-更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)
+更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)。
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
index 85005b9551c3089a5cbf11bb622ae893489f7f65..cb2553a0bfe6e946422514562ebeb9326511a480 100644
--- a/docs/zh/05-get-started/03-package.md
+++ b/docs/zh/05-get-started/03-package.md
@@ -1,6 +1,7 @@
---
sidebar_label: 安装包
title: 使用安装包立即开始
+description: 使用安装包快速体验 TDengine
---
import Tabs from "@theme/Tabs";
@@ -9,23 +10,24 @@ import PkgListV3 from "/components/PkgListV3";
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
-TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../reference/rest-api/)。
+TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、命令行程序(CLI,taos)和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
-为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
+为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。
-在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,rpm 和 deb 包不含 taosdump 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
+在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
## 安装
-1. 从列表中下载获得 deb 安装包;
-
+1. 从列表中下载获得 Deb 安装包;
+
2. 进入到安装包所在目录,执行如下的安装命令:
+> 请将 `` 替换为下载的安装包版本
+
```bash
-# 替换为下载的安装包版本
sudo dpkg -i TDengine-server--Linux-x64.deb
```
@@ -33,12 +35,13 @@ sudo dpkg -i TDengine-server--Linux-x64.deb
-1. 从列表中下载获得 rpm 安装包;
-
+1. 从列表中下载获得 RPM 安装包;
+
2. 进入到安装包所在目录,执行如下的安装命令:
+> 请将 `` 替换为下载的安装包版本
+
```bash
-# 替换为下载的安装包版本
sudo rpm -ivh TDengine-server--Linux-x64.rpm
```
@@ -47,44 +50,46 @@ sudo rpm -ivh TDengine-server--Linux-x64.rpm
1. 从列表中下载获得 tar.gz 安装包;
-
-2. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
+
+2. 进入到安装包所在目录,使用 `tar` 解压安装包;
+3. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本。
+
+> 请将 `` 替换为下载的安装包版本
```bash
-# 替换为下载的安装包版本
tar -zxvf TDengine-server--Linux-x64.tar.gz
```
-解压后进入相应路径,执行
+解压文件后,进入相应子目录,执行其中的 `install.sh` 安装脚本:
```bash
sudo ./install.sh
```
:::info
-install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
+install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以运行 `./install.sh -e no`。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
:::
-可以使用 apt-get 工具从官方仓库安装。
+可以使用 `apt-get` 工具从官方仓库安装。
-**安装包仓库**
+**配置包仓库**
```bash
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
```
-如果安装 Beta 版需要安装包仓库
+如果安装 Beta 版需要安装包仓库:
```bash
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
```
-**使用 apt-get 命令安装**
+**使用 `apt-get` 命令安装**
```bash
sudo apt-get update
@@ -93,26 +98,26 @@ sudo apt-get install tdengine
```
:::tip
-apt-get 方式只适用于 Debian 或 Ubuntu 系统
+apt-get 方式只适用于 Debian 或 Ubuntu 系统。
::::
-
+
-注意:目前 TDengine 在 Windows 平台上只支持 Windows server 2016/2019 和 Windows 10/11 系统版本。
+注意:目前 TDengine 在 Windows 平台上只支持 Windows Server 2016/2019 和 Windows 10/11。
1. 从列表中下载获得 exe 安装程序;
-
+
2. 运行可执行程序来安装 TDengine。
:::info
-下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)
+下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)。
:::
:::note
-当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
+当安装第一个节点时,出现 `Enter FQDN:` 提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
:::
@@ -147,7 +152,7 @@ Active: inactive (dead)
如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
-systemctl 命令汇总:
+如下 `systemctl` 命令可以帮助你管理 TDengine 服务:
- 启动服务进程:`systemctl start taosd`
@@ -159,7 +164,7 @@ systemctl 命令汇总:
:::info
-- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
+- `systemctl` 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 `sudo`。
- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
- 如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。
@@ -169,87 +174,93 @@ systemctl 命令汇总:
-安装后,在 C:\TDengine 目录下,运行 taosd.exe 来启动 TDengine 服务进程。
+安装后,在 `C:\TDengine` 目录下,运行 `taosd.exe` 来启动 TDengine 服务进程。
-## TDengine 命令行 (CLI)
+## TDengine 命令行(CLI)
-为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可,也可以在安装有 TDengine 的 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。
+为便于检查 TDengine 的状态,执行数据库(Database)的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI)taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可,也可以在安装有 TDengine 的 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。
```bash
taos
```
-如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下:
+如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
```cmd
taos>
```
-在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
+在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(Database)插入查询操作。在终端中运行的 SQL 语句需要以分号(;)结束来运行。示例:
```sql
-create database demo;
-use demo;
-create table t (ts timestamp, speed int);
-insert into t values ('2019-07-15 00:00:00', 10);
-insert into t values ('2019-07-15 01:00:00', 20);
-select * from t;
+CREATE DATABASE demo;
+USE demo;
+CREATE TABLE t (ts TIMESTAMP, speed INT);
+INSERT INTO t VALUES ('2019-07-15 00:00:00', 10);
+INSERT INTO t VALUES ('2019-07-15 01:00:00', 20);
+SELECT * FROM t;
+
ts | speed |
========================================
2019-07-15 00:00:00.000 | 10 |
2019-07-15 01:00:00.000 | 20 |
+
Query OK, 2 row(s) in set (0.003128s)
```
-除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../../reference/taos-shell/)
+除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [TDengine 命令行](../../reference/taos-shell/)。
## 使用 taosBenchmark 体验写入速度
-启动 TDengine 的服务,在 Linux 或 windows 终端执行 `taosBenchmark` (曾命名为 `taosdemo`):
+可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入速度。
+
+启动 TDengine 的服务,在 Linux 或 Windows 终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
```bash
-taosBenchmark
+$ taosBenchmark
```
-该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
+该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `Campbell`、`Cupertino`、`Los Angeles`、`Mountain View`、`Palo Alto`、`San Diego`、`San Francisco`、`San Jose`、`Santa Clara` 或者 `Sunnyvale`。
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
-taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。
+taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照[如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)和 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
## 使用 TDengine CLI 体验查询速度
-使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。
+使用上述 `taosBenchmark` 插入数据后,可以在 TDengine CLI(taos)输入查询命令,体验查询速度。
-查询超级表下记录总条数:
+查询超级表 `meters` 下的记录总条数:
```sql
-taos> select count(*) from test.meters;
+SELECT COUNT(*) FROM test.meters;
```
查询 1 亿条记录的平均值、最大值、最小值等:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
-查询 location="California.SanFrancisco" 的记录总条数:
+查询 location = "San Francisco" 的记录总条数:
```sql
-taos> select count(*) from test.meters where location="California.SanFrancisco";
+SELECT COUNT(*) FROM test.meters WHERE location = "San Francisco";
```
-查询 groupId=10 的所有记录的平均值、最大值、最小值等:
+查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
+对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
+SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
```
+
+在上面的查询中,你选择的是区间内的第一个时间戳(ts),另一种选择方式是 `\_wstart`,它将给出时间窗口的开始。关于窗口查询的更多信息,参见[特色查询](../../taos-sql/distinguished/)。
diff --git a/docs/zh/05-get-started/index.md b/docs/zh/05-get-started/index.md
index 794081b4e4c438dee2d8cbe125de4094056f190f..20f8235d87426f7a98ded2f7be431289ea00a045 100644
--- a/docs/zh/05-get-started/index.md
+++ b/docs/zh/05-get-started/index.md
@@ -3,7 +3,7 @@ title: 立即开始
description: '快速设置 TDengine 环境并体验其高效写入和查询'
---
-TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](/reference/taosadapter) 提供 [RESTful 接口](/reference/rest-api)。
+TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../reference/taosadapter) 提供 [RESTful 接口](../connector/rest-api)。
本章主要介绍如何利用 Docker 或者安装包快速设置 TDengine 环境并体验其高效写入和查询。
diff --git a/docs/zh/07-develop/01-connect/_connect_java.mdx b/docs/zh/07-develop/01-connect/_connect_java.mdx
index f5b8ea1cc2bf309bbb182be6ae06100102328a16..86c70ef7dc9a84d61fa36502f83e0be6a0836214 100644
--- a/docs/zh/07-develop/01-connect/_connect_java.mdx
+++ b/docs/zh/07-develop/01-connect/_connect_java.mdx
@@ -12,4 +12,4 @@
{{#include docs/examples/java/src/main/java/com/taos/example/WSConnectExample.java:main}}
```
-更多连接参数配置,参考[Java 连接器](/reference/connector/java)
+更多连接参数配置,参考[Java 连接器](../../connector/java)
diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md
index c1f7e34cb9c8bfd4938133ddbb56a47f77c5f15d..075d99cfee78b01b66ebc527892e90b9291dd422 100644
--- a/docs/zh/07-develop/01-connect/index.md
+++ b/docs/zh/07-develop/01-connect/index.md
@@ -1,6 +1,6 @@
---
title: 建立连接
-description: "本节介绍如何使用连接器建立与 TDengine 的连接,给出连接器安装、连接的简单说明。"
+description: 使用连接器建立与 TDengine 的连接,以及连接器的安装和连接
---
import Tabs from "@theme/Tabs";
@@ -33,7 +33,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速
关键不同点在于:
1. 使用 REST 连接,用户无需安装客户端驱动程序 taosc,具有跨平台易用的优势,但性能要下降 30%左右。
-2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](/reference/connector/cpp#参数绑定-api)、[订阅](/reference/connector/cpp#订阅和消费-api)等等。
+2. 使用原生连接可以体验 TDengine 的全部功能,如[参数绑定接口](../../connector/cpp/#参数绑定-api)、[订阅](../../connector/cpp/#订阅和消费-api)等等。
## 安装客户端驱动 taosc
diff --git a/docs/zh/07-develop/02-model/index.mdx b/docs/zh/07-develop/02-model/index.mdx
index 1609eb5362cf40e7d134b0987968f7cc9bd31c92..d66059c2cda2a0e4629b16ca44cee036dc67546f 100644
--- a/docs/zh/07-develop/02-model/index.mdx
+++ b/docs/zh/07-develop/02-model/index.mdx
@@ -1,5 +1,7 @@
---
+sidebar_label: 数据建模
title: TDengine 数据建模
+description: TDengine 中如何建立数据模型
---
TDengine 采用类关系型数据模型,需要建库、建表。因此对于一个具体的应用场景,需要考虑库、超级表和普通表的设计。本节不讨论细致的语法规则,只介绍概念。
@@ -39,7 +41,7 @@ USE power;
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
```
-与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](/taos-sql/stable) 章节。
+与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TDengine SQL 的超级表管理](/taos-sql/stable) 章节。
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。
@@ -53,7 +55,7 @@ TDengine 对每个数据采集点需要独立建表。与标准的关系型数
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
```
-其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
+其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TDengine SQL 的表管理](/taos-sql/table) 章节。
TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。
diff --git a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
index 214cbdaa96d02e0cd1251eeda97c6a897887cc7e..8818eaae3dc1806a00e73d9846fbd1dfe15e0c8a 100644
--- a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
+++ b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
@@ -23,9 +23,10 @@ import PhpStmt from "./_php_stmt.mdx";
## SQL 写入简介
-应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TAOS Shell,手动输入 INSERT 语句插入数据。
+应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TDengine CLI,手动输入 INSERT 语句插入数据。
### 一次写入一条
+
下面这条 INSERT 就将一条记录写入到表 d1001 中:
```sql
@@ -48,7 +49,7 @@ TDengine 也支持一次向多个表写入数据,比如下面这条命令就
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
```
-详细的 SQL INSERT 语法规则参考 [TAOS SQL 的数据写入](/taos-sql/insert)。
+详细的 SQL INSERT 语法规则参考 [TDengine SQL 的数据写入](/taos-sql/insert)。
:::info
@@ -134,4 +135,3 @@ TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 类似,这
-
diff --git a/docs/zh/07-develop/03-insert-data/05-high-volume.md b/docs/zh/07-develop/03-insert-data/05-high-volume.md
new file mode 100644
index 0000000000000000000000000000000000000000..d7581467ae0315442d89de395d35bbd677f75d3a
--- /dev/null
+++ b/docs/zh/07-develop/03-insert-data/05-high-volume.md
@@ -0,0 +1,436 @@
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+# 高效写入
+
+本节介绍如何高效地向 TDengine 写入数据。
+
+## 高效写入原理 {#principle}
+
+### 客户端程序的角度 {#application-view}
+
+从客户端程序的角度来说,高效写入数据要考虑以下几个因素:
+
+1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。
+2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
+3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;
+4. 写入方式。一般来讲:
+ - 参数绑定写入比 SQL 写入更高效。因参数绑定方式避免了 SQL 解析。(但增加了 C 接口的调用次数,对于连接器也有性能损耗)。
+ - SQL 写入不自动建表比自动建表更高效。因自动建表要频繁检查表是否存在
+ - SQL 写入比无模式写入更高效。因无模式写入会自动建表且支持动态更改表结构
+
+客户端程序要充分且恰当地利用以上几个因素。在单次写入中尽量只向同一张表(或子表)写入数据,每批次写入的数据量经过测试和调优设定为一个最适合当前系统处理能力的数值,并发写入的连接数同样经过测试和调优后设定为一个最适合当前系统处理能力的数值,以实现在当前系统中的最佳写入速度。
+
+### 数据源的角度 {#datasource-view}
+
+客户端程序通常需要从数据源读数据再写入 TDengine。从数据源角度来说,以下几种情况需要在读线程和写线程之间增加队列:
+
+1. 有多个数据源,单个数据源生成数据的速度远小于单线程写入的速度,但数据量整体比较大。此时队列的作用是把多个数据源的数据汇聚到一起,增加单次写入的数据量。
+2. 单个数据源生成数据的速度远大于单线程写入的速度。此时队列的作用是增加写入的并发度。
+3. 单张表的数据分散在多个数据源。此时队列的作用是将同一张表的数据提前汇聚到一起,提高写入时数据的相邻性。
+
+如果写应用的数据源是 Kafka, 写应用本身即 Kafka 的消费者,则可利用 Kafka 的特性实现高效写入。比如:
+
+1. 将同一张表的数据写到同一个 Topic 的同一个 Partition,增加数据的相邻性
+2. 通过订阅多个 Topic 实现数据汇聚
+3. 通过增加 Consumer 线程数增加写入的并发度
+4. 通过增加每次 fetch 的最大数据量来增加单次写入的最大数据量
+
+### 服务器配置的角度 {#setting-view}
+
+从服务端配置的角度,要根据系统中磁盘的数量,磁盘的 I/O 能力,以及处理器能力在创建数据库时设置适当的 vgroups 数量以充分发挥系统性能。如果 vgroups 过少,则系统性能无法发挥;如果 vgroups 过多,会造成无谓的资源竞争。常规推荐 vgroups 数量为 CPU 核数的 2 倍,但仍然要结合具体的系统资源配置进行调优。
+
+更多调优参数,请参考 [数据库管理](../../../taos-sql/database) 和 [服务端配置](../../../reference/config)。
+
+## 高效写入示例 {#sample-code}
+
+### 场景设计 {#scenario}
+
+下面的示例程序展示了如何高效写入数据,场景设计如下:
+
+- TDengine 客户端程序从其它数据源不断读入数据,在示例程序中采用生成模拟数据的方式来模拟读取数据源
+- 单个连接向 TDengine 写入的速度无法与读数据的速度相匹配,因此客户端程序启动多个线程,每个线程都建立了与 TDengine 的连接,每个线程都有一个独占的固定大小的消息队列
+- 客户端程序将接收到的数据根据所属的表名(或子表名)HASH 到不同的线程,即写入该线程所对应的消息队列,以此确保属于某个表(或子表)的数据一定会被一个固定的线程处理
+- 各个子线程在将所关联的消息队列中的数据读空后或者读取数据量达到一个预定的阈值后将该批数据写入 TDengine,并继续处理后面接收到的数据
+
+
+
+### 示例代码 {#code}
+
+这一部分是针对以上场景的示例代码。对于其它场景高效写入原理相同,不过代码需要适当修改。
+
+本示例代码假设源数据属于同一张超级表(meters)的不同子表。程序在开始写入数据之前已经在 test 库创建了这个超级表。对于子表,将根据收到的数据,由应用程序自动创建。如果实际场景是多个超级表,只需修改写任务自动建表的代码。
+
+
+
+
+**程序清单**
+
+| 类名 | 功能说明 |
+| ---------------- | --------------------------------------------------------------------------- |
+| FastWriteExample | 主程序 |
+| ReadTask | 从模拟源中读取数据,将表名经过 hash 后得到 Queue 的 index,写入对应的 Queue |
+| WriteTask | 从 Queue 中获取数据,组成一个 Batch,写入 TDengine |
+| MockDataSource | 模拟生成一定数量 meters 子表的数据 |
+| SQLWriter | WriteTask 依赖这个类完成 SQL 拼接、自动建表、 SQL 写入、SQL 长度检查 |
+| StmtWriter | 实现参数绑定方式批量写入(暂未完成) |
+| DataBaseMonitor | 统计写入速度,并每隔 10 秒把当前写入速度打印到控制台 |
+
+
+以下是各类的完整代码和更详细的功能说明。
+
+
+FastWriteExample
+主程序负责:
+
+1. 创建消息队列
+2. 启动写线程
+3. 启动读线程
+4. 每隔 10 秒统计一次写入速度
+
+主程序默认暴露了 4 个参数,每次启动程序都可调节,用于测试和调优:
+
+1. 读线程个数。默认为 1。
+2. 写线程个数。默认为 3。
+3. 模拟生成的总表数。默认为 1000。将会平分给各个读线程。如果总表数较大,建表需要花费较长,开始统计的写入速度可能较慢。
+4. 每批最多写入记录数量。默认为 3000。
+
+队列容量(taskQueueCapacity)也是与性能有关的参数,可通过修改程序调节。一般来讲,队列容量越大,入队被阻塞的概率越小,队列的吞吐量越大,但是内存占用也会越大。 示例程序默认值已经设置地足够大。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
+```
+
+
+
+
+ReadTask
+
+读任务负责从数据源读数据。每个读任务都关联了一个模拟数据源。每个模拟数据源可生成一点数量表的数据。不同的模拟数据源生成不同表的数据。
+
+读任务采用阻塞的方式写消息队列。也就是说,一旦队列满了,写操作就会阻塞。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
+```
+
+
+
+
+WriteTask
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
+```
+
+
+
+
+
+MockDataSource
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
+```
+
+
+
+
+
+SQLWriter
+
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。注意,所有的表都没有提前创建,而是在 catch 到表不存在异常的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它异常,这里简单地记录当时执行的 SQL 语句到日志中,你也可以记录更多线索到日志,已便排查错误和故障恢复。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
+```
+
+
+
+
+
+DataBaseMonitor
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
+```
+
+
+
+**执行步骤**
+
+
+执行 Java 示例程序
+
+执行程序前需配置环境变量 `TDENGINE_JDBC_URL`。如果 TDengine Server 部署在本机,且用户名、密码和端口都是默认值,那么可配置:
+
+```
+TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+```
+
+**本地集成开发环境执行示例程序**
+
+1. clone TDengine 仓库
+ ```
+ git clone git@github.com:taosdata/TDengine.git --depth 1
+ ```
+2. 用集成开发环境打开 `docs/examples/java` 目录。
+3. 在开发环境中配置环境变量 `TDENGINE_JDBC_URL`。如果已配置了全局的环境变量 `TDENGINE_JDBC_URL` 可跳过这一步。
+4. 运行类 `com.taos.example.highvolume.FastWriteExample`。
+
+**远程服务器上执行示例程序**
+
+若要在服务器上执行示例程序,可按照下面的步骤操作:
+
+1. 打包示例代码。在目录 TDengine/docs/examples/java 下执行:
+ ```
+ mvn package
+ ```
+2. 远程服务器上创建 examples 目录:
+ ```
+ mkdir -p examples/java
+ ```
+3. 复制依赖到服务器指定目录:
+ - 复制依赖包,只用复制一次
+ ```
+ scp -r .\target\lib @:~/examples/java
+ ```
+ - 复制本程序的 jar 包,每次更新代码都需要复制
+ ```
+ scp -r .\target\javaexample-1.0.jar @:~/examples/java
+ ```
+4. 配置环境变量。
+ 编辑 `~/.bash_profile` 或 `~/.bashrc` 添加如下内容例如:
+
+ ```
+ export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+ ```
+
+ 以上使用的是本地部署 TDengine Server 时默认的 JDBC URL。你需要根据自己的实际情况更改。
+
+5. 用 java 命令启动示例程序,命令模板:
+
+ ```
+ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample
+ ```
+
+6. 结束测试程序。测试程序不会自动结束,在获取到当前配置下稳定的写入速度后,按 CTRL + C 结束程序。
+ 下面是一次实际运行的日志输出,机器配置 16核 + 64G + 固态硬盘。
+
+ ```
+ root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
+ 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
+ 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
+ 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
+ 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
+ 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
+ 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
+ 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
+ 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
+ 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
+ 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
+ ```
+
+
+
+
+
+
+**程序清单**
+
+Python 示例程序中采用了多进程的架构,并使用了跨进程的消息队列。
+
+| 函数或类 | 功能说明 |
+| ------------------------ | -------------------------------------------------------------------- |
+| main 函数 | 程序入口, 创建各个子进程和消息队列 |
+| run_monitor_process 函数 | 创建数据库,超级表,统计写入速度并定时打印到控制台 |
+| run_read_task 函数 | 读进程主要逻辑,负责从其它数据系统读数据,并分发数据到为之分配的队列 |
+| MockDataSource 类 | 模拟数据源, 实现迭代器接口,每次批量返回每张表的接下来 1000 条数据 |
+| run_write_task 函数 | 写进程主要逻辑。每次从队列中取出尽量多的数据,并批量写入 |
+| SQLWriter类 | SQL 写入和自动建表 |
+| StmtWriter 类 | 实现参数绑定方式批量写入(暂未完成) |
+
+
+
+main 函数
+
+main 函数负责创建消息队列和启动子进程,子进程有 3 类:
+
+1. 1 个监控进程,负责数据库初始化和统计写入速度
+2. n 个读进程,负责从其它数据系统读数据
+3. m 个写进程,负责写数据库
+
+main 函数可以接收 5 个启动参数,依次是:
+
+1. 读任务(进程)数, 默认为 1
+2. 写任务(进程)数, 默认为 1
+3. 模拟生成的总表数,默认为 1000
+4. 队列大小(单位字节),默认为 1000000
+5. 每批最多写入记录数量, 默认为 3000
+
+```python
+{{#include docs/examples/python/fast_write_example.py:main}}
+```
+
+
+
+
+run_monitor_process
+
+监控进程负责初始化数据库,并监控当前的写入速度。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:monitor}}
+```
+
+
+
+
+
+run_read_task 函数
+
+读进程,负责从其它数据系统读数据,并分发数据到为之分配的队列。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:read}}
+```
+
+
+
+
+
+MockDataSource
+
+以下是模拟数据源的实现,我们假设数据源生成的每一条数据都带有目标表名信息。实际中你可能需要一定的规则确定目标表名。
+
+```python
+{{#include docs/examples/python/mockdatasource.py}}
+```
+
+
+
+
+run_write_task 函数
+
+写进程每次从队列中取出尽量多的数据,并批量写入。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:write}}
+```
+
+
+
+
+
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,根据 TDengine 3.0 的限制由输入参数 maxSQLLength 传入了支持的最大 SQL 长度,即 1048576 。
+
+SQLWriter
+
+```python
+{{#include docs/examples/python/sql_writer.py}}
+```
+
+
+
+**执行步骤**
+
+
+
+执行 Python 示例程序
+
+1. 前提条件
+
+ - 已安装 TDengine 客户端驱动
+ - 已安装 Python3, 推荐版本 >= 3.8
+ - 已安装 taospy
+
+2. 安装 faster-fifo 代替 python 内置的 multiprocessing.Queue
+
+ ```
+ pip3 install faster-fifo
+ ```
+
+3. 点击上面的“查看源码”链接复制 `fast_write_example.py` 、 `sql_writer.py` 和 `mockdatasource.py` 三个文件。
+
+4. 执行示例程序
+
+ ```
+ python3 fast_write_example.py
+ ```
+
+ 下面是一次实际运行的输出, 机器配置 16核 + 64G + 固态硬盘。
+
+ ```
+ root@vm85$ python3 fast_write_example.py 8 8
+ 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
+ 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
+ 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
+ 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
+ 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
+ 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
+ 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
+ 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
+ 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
+ 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
+ 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
+ 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
+ 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
+ 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
+ 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
+ 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
+ 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
+ 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
+ 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
+ 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
+ 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
+ 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
+ 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
+ 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
+ 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
+ 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
+ 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
+ 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
+ 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
+ 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
+ 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
+ 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
+ ```
+
+
+
+:::note
+使用 Python 连接器多进程连接 TDengine 的时候,有一个限制:不能在父进程中建立连接,所有连接只能在子进程中创建。
+如果在父进程中创建连接,子进程再创建连接就会一直阻塞。这是个已知问题。
+
+:::
+
+
+
+
+
diff --git a/docs/zh/07-develop/03-insert-data/highvolume.webp b/docs/zh/07-develop/03-insert-data/highvolume.webp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad
Binary files /dev/null and b/docs/zh/07-develop/03-insert-data/highvolume.webp differ
diff --git a/docs/zh/07-develop/03-insert-data/index.md b/docs/zh/07-develop/03-insert-data/index.md
index 55a28e4a8ba13501e2f481c9aba67b7300da98d0..f1e5ada4dfd350e982fa0ae57412af07ac43e03a 100644
--- a/docs/zh/07-develop/03-insert-data/index.md
+++ b/docs/zh/07-develop/03-insert-data/index.md
@@ -1,5 +1,7 @@
---
+sidebar_label: 写入数据
title: 写入数据
+description: TDengine 的各种写入方式
---
TDengine 支持多种写入协议,包括 SQL,InfluxDB Line 协议, OpenTSDB Telnet 协议,OpenTSDB JSON 格式协议。数据可以单条插入,也可以批量插入,可以插入一个数据采集点的数据,也可以同时插入多个数据采集点的数据。同时,TDengine 支持多线程插入,支持时间乱序数据插入,也支持历史数据插入。InfluxDB Line 协议、OpenTSDB Telnet 协议和 OpenTSDB JSON 格式协议是 TDengine 支持的三种无模式写入协议。使用无模式方式写入无需提前创建超级表和子表,并且引擎能自适用数据对表结构做调整。
diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx
index 2631d147a5f3e968e7153de8576e96f2c07c57cd..d6156c8a59a70af80f2632cdf3801ef7281b69d5 100644
--- a/docs/zh/07-develop/04-query-data/index.mdx
+++ b/docs/zh/07-develop/04-query-data/index.mdx
@@ -1,4 +1,5 @@
---
+sidebar_label: 查询数据
title: 查询数据
description: "主要查询功能,通过连接器执行同步查询和异步查询"
---
@@ -43,7 +44,7 @@ Query OK, 2 row(s) in set (0.001100s)
为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。
-具体的查询语法请看 [TAOS SQL 的数据查询](../../taos-sql/select) 章节。
+具体的查询语法请看 [TDengine SQL 的数据查询](../../taos-sql/select) 章节。
## 多表聚合查询
@@ -51,7 +52,7 @@ Query OK, 2 row(s) in set (0.001100s)
### 示例一
-在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
+在 TDengine CLI,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
```
taos> SELECT AVG(voltage), location FROM meters GROUP BY location;
@@ -64,7 +65,7 @@ Query OK, 2 rows in database (0.005995s)
### 示例二
-在 TAOS shell, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。
+在 TDengine CLI, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。
```
taos> SELECT count(*), max(current) FROM meters where groupId = 2;
@@ -74,7 +75,7 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2;
Query OK, 1 row(s) in set (0.002136s)
```
-在 [TAOS SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
+在 [TDengine SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
## 降采样查询、插值
@@ -122,7 +123,7 @@ Query OK, 6 rows in database (0.005515s)
如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。
-语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
+语法规则细节请见 [TDengine SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
## 示例代码
diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx
index da8bf5e20ed9d230419150dd10ee6739d85a37e9..2f5c13d9b0bc0e3940fb99b45c693e2ae80c8f47 100644
--- a/docs/zh/07-develop/07-tmq.mdx
+++ b/docs/zh/07-develop/07-tmq.mdx
@@ -64,7 +64,7 @@ DLL_EXPORT void tmq_conf_destroy(tmq_conf_t *conf);
DLL_EXPORT void tmq_conf_set_auto_commit_cb(tmq_conf_t *conf, tmq_commit_cb *cb, void *param);
```
-这些 API 的文档请见 [C/C++ Connector](/reference/connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
+这些 API 的文档请见 [C/C++ Connector](../../connector/cpp),下面介绍一下它们的具体用法(超级表和子表结构请参考“数据建模”一节),完整的示例代码请见下面 C 语言的示例代码。
diff --git a/docs/zh/07-develop/08-cache.md b/docs/zh/07-develop/08-cache.md
index bd9da6062d3cc1a21be418079f0fee40520f4460..29e28e3dde0816d9e5a08f74abd2382854d336da 100644
--- a/docs/zh/07-develop/08-cache.md
+++ b/docs/zh/07-develop/08-cache.md
@@ -20,11 +20,11 @@ create database db0 vgroups 100 buffer 16MB
## 读缓存
-在创建数据库时可以选择是否缓存该数据库中每个子表的最新数据。由参数 cachelast 设置,分为三种情况:
-- 0: 不缓存
-- 1: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能
-- 2: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能
-- 3: 同时缓存行和列,即等同于上述 cachelast 值为 1 或 2 时的行为同时生效
+在创建数据库时可以选择是否缓存该数据库中每个子表的最新数据。由参数 cachemodel 设置,分为四种情况:
+- none: 不缓存
+- last_row: 缓存子表最近一行数据,这将显著改善 last_row 函数的性能
+- last_value: 缓存子表每一列最近的非 NULL 值,这将显著改善无特殊影响(比如 WHERE, ORDER BY, GROUP BY, INTERVAL)时的 last 函数的性能
+- both: 同时缓存最近的行和列,即等同于上述 cachemodel 值为 last_row 和 last_value 的行为同时生效
## 元数据缓存
diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md
index ef1cd4797a217b601b1b8e3eaa0e74b8c2907c88..3239eae49b05180c4a0dba5850de9f1c5e08a4f3 100644
--- a/docs/zh/07-develop/09-udf.md
+++ b/docs/zh/07-develop/09-udf.md
@@ -116,7 +116,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
参数的具体含义是:
- inputDataBlock: 输入的数据块
- - resultColumn: 输出列。输出列
+ - resultColumn: 输出列
### 聚合接口函数
diff --git a/docs/zh/07-develop/_sub_java.mdx b/docs/zh/07-develop/_sub_java.mdx
index d14b5fd6095dd90f89dd2c2e828858585cfddff9..e7de158cc8d2b0b686b25bbe96e7a092c2a68e51 100644
--- a/docs/zh/07-develop/_sub_java.mdx
+++ b/docs/zh/07-develop/_sub_java.mdx
@@ -1,7 +1,5 @@
```java
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
-{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
-{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
diff --git a/docs/zh/07-develop/index.md b/docs/zh/07-develop/index.md
index 4d0f3c3cea3da3d70051dd07f835c34b4f47c3cd..efaffaea71ce68ee0a8ddbf5634c4150adc94bfb 100644
--- a/docs/zh/07-develop/index.md
+++ b/docs/zh/07-develop/index.md
@@ -1,5 +1,7 @@
---
title: 开发指南
+sidebar_label: 开发指南
+description: 让开发者能够快速上手的指南
---
开发一个应用,如果你准备采用TDengine作为时序数据处理的工具,那么有如下几个事情要做:
@@ -12,7 +14,7 @@ title: 开发指南
7. 在很多场景下(如车辆管理),应用需要获取每个数据采集点的最新状态,那么建议你采用TDengine的cache功能,而不用单独部署Redis等缓存软件。
8. 如果你发现TDengine的函数无法满足你的要求,那么你可以使用用户自定义函数来解决问题。
-本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](/reference/connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](/third-party/)。
+本部分内容就是按照上述的顺序组织的。为便于理解,TDengine为每个功能为每个支持的编程语言都提供了示例代码。如果你希望深入了解SQL的使用,需要查看[SQL手册](/taos-sql/)。如果想更深入地了解各连接器的使用,请阅读[连接器参考指南](../connector/)。如果还希望想将TDengine与第三方系统集成起来,比如Grafana, 请参考[第三方工具](../third-party/)。
如果在开发过程中遇到任何问题,请点击每个页面下方的["反馈问题"](https://github.com/taosdata/TDengine/issues/new/choose), 在GitHub上直接递交issue。
diff --git a/docs/zh/08-connector/02-rest-api.mdx b/docs/zh/08-connector/02-rest-api.mdx
index 4b9171c07d165bfa10aea14871da2697cae4b54d..e254244657b457e10bc2daab020b230c9a8bb2cc 100644
--- a/docs/zh/08-connector/02-rest-api.mdx
+++ b/docs/zh/08-connector/02-rest-api.mdx
@@ -1,5 +1,7 @@
---
title: REST API
+sidebar_label: REST API
+description: 详细介绍 TDengine 提供的 RESTful API.
---
为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见 [视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
@@ -10,7 +12,7 @@ title: REST API
## 安装
-RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。
+RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安装任何 TDengine 的库,只要客户端的开发语言支持 HTTP 协议即可。TDengine 的 RESTful API 由 [taosAdapter](../../reference/taosadapter) 提供,在使用 RESTful API 之前需要确保 `taosAdapter` 正常运行。
## 验证
diff --git a/docs/zh/08-connector/cpp.mdx b/docs/zh/08-connector/03-cpp.mdx
similarity index 99%
rename from docs/zh/08-connector/cpp.mdx
rename to docs/zh/08-connector/03-cpp.mdx
index bd5776d035b0228637f7ed2255c502ed73d6a654..c0bd33f12964537699849e35644a8c04e0f716f0 100644
--- a/docs/zh/08-connector/cpp.mdx
+++ b/docs/zh/08-connector/03-cpp.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 1
sidebar_label: C/C++
title: C/C++ Connector
---
@@ -22,7 +21,7 @@ TDengine 客户端驱动的动态库位于:
## 支持的平台
-请参考[支持的平台列表](/reference/connector#支持的平台)
+请参考[支持的平台列表](../#支持的平台)
## 支持的版本
@@ -30,7 +29,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
## 安装步骤
-TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤)
+TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤)
## 建立连接
diff --git a/docs/zh/08-connector/java.mdx b/docs/zh/08-connector/04-java.mdx
similarity index 99%
rename from docs/zh/08-connector/java.mdx
rename to docs/zh/08-connector/04-java.mdx
index 183994313e205bbaf13f30d534fa151a23216708..6b1715f8c6a2f949fca552885ea3920f43e8a849 100644
--- a/docs/zh/08-connector/java.mdx
+++ b/docs/zh/08-connector/04-java.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 2
sidebar_label: Java
title: TDengine Java Connector
description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原生连接与 REST连接两种连接器。
@@ -35,7 +34,7 @@ REST 连接支持所有能运行 Java 的平台。
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
## TDengine DataType 和 Java DataType
@@ -64,7 +63,7 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对
使用 Java Connector 连接数据库前,需要具备以下条件:
- 已安装 Java 1.8 或以上版本运行时环境和 Maven 3.6 或以上版本
-- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
+- 已安装 TDengine 客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装),具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
### 安装连接器
@@ -630,7 +629,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws
### 无模式写入
-TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../schemaless)。
+TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../reference/schemaless/)。
**注意**:
diff --git a/docs/zh/08-connector/go.mdx b/docs/zh/08-connector/05-go.mdx
similarity index 98%
rename from docs/zh/08-connector/go.mdx
rename to docs/zh/08-connector/05-go.mdx
index fd90f964bd08d4a8ce2ce894f6349cec410b740b..9d30f75190cddbb17c40e97655002a158cd6aae6 100644
--- a/docs/zh/08-connector/go.mdx
+++ b/docs/zh/08-connector/05-go.mdx
@@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
## 支持的功能特性
@@ -56,7 +56,7 @@ REST 连接支持所有能运行 Go 的平台。
### 安装前准备
* 安装 Go 开发环境(Go 1.14 及以上,GCC 4.8.5 及以上)
-* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
+* 如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
配置好环境变量,检查命令:
diff --git a/docs/zh/08-connector/rust.mdx b/docs/zh/08-connector/06-rust.mdx
similarity index 98%
rename from docs/zh/08-connector/rust.mdx
rename to docs/zh/08-connector/06-rust.mdx
index e824b7e73c0236cd1f341df2070dd4be95ed946e..26f53c82d630fda168dd98b4c8ec993afc5e3a1d 100644
--- a/docs/zh/08-connector/rust.mdx
+++ b/docs/zh/08-connector/06-rust.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 5
sidebar_label: Rust
title: TDengine Rust Connector
---
@@ -28,7 +27,7 @@ Websocket 连接支持所有能运行 Rust 的平台。
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
@@ -37,7 +36,7 @@ Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容
### 安装前准备
* 安装 Rust 开发工具链
-* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
+* 如果使用原生连接,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
### 添加 taos 依赖
diff --git a/docs/zh/08-connector/python.mdx b/docs/zh/08-connector/07-python.mdx
similarity index 96%
rename from docs/zh/08-connector/python.mdx
rename to docs/zh/08-connector/07-python.mdx
index 9ce81f9d70993b484fdd2100af85fa78af45355a..0242486d3b8820ac38301d38ccbaf8bb9fc7e1c3 100644
--- a/docs/zh/08-connector/python.mdx
+++ b/docs/zh/08-connector/07-python.mdx
@@ -1,5 +1,4 @@
---
-sidebar_position: 3
sidebar_label: Python
title: TDengine Python Connector
description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。tasopy 对 TDengine 的原生接口和 REST 接口都进行了封装, 分别对应 tasopy 的两个子模块:tasos 和 taosrest。除了对原生接口和 REST 接口的封装,taospy 还提供了符合 Python 数据访问规范(PEP 249)的编程接口。这使得 taospy 和很多第三方工具集成变得简单,比如 SQLAlchemy 和 pandas"
@@ -8,7 +7,7 @@ description: "taospy 是 TDengine 的官方 Python 连接器。taospy 提供了
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](/reference/connector/cpp)和 [REST 接口](/reference/rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。
+`taospy` 是 TDengine 的官方 Python 连接器。`taospy` 提供了丰富的 API, 使得 Python 应用可以很方便地使用 TDengine。`taospy` 对 TDengine 的[原生接口](../cpp)和 [REST 接口](../rest-api)都进行了封装, 分别对应 `taospy` 包的 `taos` 模块 和 `taosrest` 模块。
除了对原生接口和 REST 接口的封装,`taospy` 还提供了符合 [Python 数据访问规范(PEP 249)](https://peps.python.org/pep-0249/) 的编程接口。这使得 `taospy` 和很多第三方工具集成变得简单,比如 [SQLAlchemy](https://www.sqlalchemy.org/) 和 [pandas](https://pandas.pydata.org/)。
使用客户端驱动提供的原生接口直接与服务端建立的连接的方式下文中称为“原生连接”;使用 taosAdapter 提供的 REST 接口与服务端建立的连接的方式下文中称为“REST 连接”。
@@ -17,7 +16,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
## 支持的平台
-- 原生连接[支持的平台](/reference/connector/#支持的平台)和 TDengine 客户端支持的平台一致。
+- 原生连接[支持的平台](../#支持的平台)和 TDengine 客户端支持的平台一致。
- REST 连接支持所有能运行 Python 的平台。
## 版本选择
@@ -275,7 +274,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
##### RestClient 类的使用
-`RestClient` 类是对于 [REST API](/reference/rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
+`RestClient` 类是对于 [REST API](../rest-api) 的直接封装。它只包含一个 `sql()` 方法用于执行任意 SQL 语句, 并返回执行结果。
```python title="RestClient 的使用"
{{#include docs/examples/python/rest_client_example.py}}
diff --git a/docs/zh/08-connector/node.mdx b/docs/zh/08-connector/08-node.mdx
similarity index 96%
rename from docs/zh/08-connector/node.mdx
rename to docs/zh/08-connector/08-node.mdx
index f840c26fd21a0ea653e30835e681291d007cf983..167ae069d6175873679e8c7cc4ecbb16dafe2ad8 100644
--- a/docs/zh/08-connector/node.mdx
+++ b/docs/zh/08-connector/08-node.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 6
sidebar_label: Node.js
title: TDengine Node.js Connector
---
@@ -28,7 +27,7 @@ REST 连接器支持所有能运行 Node.js 的平台。
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
## 支持的功能特性
@@ -52,7 +51,7 @@ REST 连接器支持所有能运行 Node.js 的平台。
### 安装前准备
- 安装 Node.js 开发环境
-- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。
+- 如果使用 REST 连接器,跳过此步。但如果使用原生连接器,请安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)。我们使用 [node-gyp](https://github.com/nodejs/node-gyp) 和 TDengine 实例进行交互,还需要根据具体操作系统来安装下文提到的一些依赖工具。
diff --git a/docs/zh/08-connector/csharp.mdx b/docs/zh/08-connector/09-csharp.mdx
similarity index 91%
rename from docs/zh/08-connector/csharp.mdx
rename to docs/zh/08-connector/09-csharp.mdx
index 00d6dffa2cfa44efb705191a077548c2abe30200..4e49d84835d66622293e607a58699ae93fc7013d 100644
--- a/docs/zh/08-connector/csharp.mdx
+++ b/docs/zh/08-connector/09-csharp.mdx
@@ -1,6 +1,5 @@
---
toc_max_heading_level: 4
-sidebar_position: 7
sidebar_label: C#
title: C# Connector
---
@@ -18,7 +17,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。
-`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](/reference/rest-api/) 文档自行编写。
+`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../rest-api/) 文档自行编写。
本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。
@@ -32,7 +31,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
## 版本支持
-请参考[版本支持列表](/reference/connector#版本支持)
+请参考[版本支持列表](../#版本支持)
## 支持的功能特性
@@ -49,7 +48,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
* 安装 [.NET SDK](https://dotnet.microsoft.com/download)
* [Nuget 客户端](https://docs.microsoft.com/en-us/nuget/install-nuget-client-tools) (可选安装)
-* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](/reference/connector#安装客户端驱动)
+* 安装 TDengine 客户端驱动,具体步骤请参考[安装客户端驱动](../#安装客户端驱动)
### 使用 dotnet CLI 安装
@@ -170,9 +169,9 @@ namespace TDengineExample
### 第三方驱动
-`Maikebing.Data.Taos` 是一个 TDengine 的 ADO.NET 连接器,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考:
+[`IoTSharp.Data.Taos`](https://github.com/IoTSharp/EntityFrameworkCore.Taos) 是一个 TDengine 的 ADO.NET 连接器,其中包含了用于EntityFrameworkCore 的提供程序 IoTSharp.EntityFrameworkCore.Taos 和健康检查组件 IoTSharp.HealthChecks.Taos ,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考:
-* 接口下载:
+* 接口下载:
* 用法说明:
## 常见问题
diff --git a/docs/zh/08-connector/php.mdx b/docs/zh/08-connector/10-php.mdx
similarity index 96%
rename from docs/zh/08-connector/php.mdx
rename to docs/zh/08-connector/10-php.mdx
index 5617dc6f738788a54e2480f592a9424fc2418f1c..5e32c709de89d69b8602b506a9c774cb0a0244f0 100644
--- a/docs/zh/08-connector/php.mdx
+++ b/docs/zh/08-connector/10-php.mdx
@@ -1,6 +1,5 @@
---
-sidebar_position: 1
-sidebar_label: PHP(社区贡献)
+sidebar_label: PHP
title: PHP Connector
---
@@ -38,7 +37,7 @@ TDengine 客户端驱动的版本号与 TDengine 服务端的版本号是一一
### 安装 TDengine 客户端驱动
-TDengine 客户端驱动的安装请参考 [安装指南](/reference/connector#安装步骤)
+TDengine 客户端驱动的安装请参考 [安装指南](../#安装步骤)
### 编译安装 php-tdengine
diff --git a/docs/zh/08-connector/_01-error-code.md b/docs/zh/08-connector/_01-error-code.md
index 53e006e108543805232c8195474f2afd793e7332..3111d4bbf8a071500052309f2e3643f494c1be9a 100644
--- a/docs/zh/08-connector/_01-error-code.md
+++ b/docs/zh/08-connector/_01-error-code.md
@@ -1,6 +1,7 @@
---
sidebar_label: 错误码
title: TDengine C/C++ 连接器错误码
+description: C/C++ 连接器的错误码列表和详细说明
---
本文中详细列举了在使用 TDengine C/C++ 连接器时客户端可能得到的错误码以及所要采取的相应动作。其它语言的连接器在使用原生连接方式时也会所得到的返回码返回给连接器的调用者。
diff --git a/docs/zh/08-connector/03-connector.mdx b/docs/zh/08-connector/index.md
similarity index 98%
rename from docs/zh/08-connector/03-connector.mdx
rename to docs/zh/08-connector/index.md
index bdad0b7e25a3a94fa34b14bf47403ba2afd7db8d..17de8e926cd9a3633dc8746b0fb49c38ff8ca61f 100644
--- a/docs/zh/08-connector/03-connector.mdx
+++ b/docs/zh/08-connector/index.md
@@ -1,5 +1,7 @@
---
+sidebar_label: 连接器
title: 连接器
+description: 详细介绍各种语言的连接器及 REST API
---
TDengine 提供了丰富的应用程序开发接口,为了便于用户快速开发自己的应用,TDengine 支持了多种编程语言的连接器,其中官方连接器包括支持 C/C++、Java、Python、Go、Node.js、C# 和 Rust 的连接器。这些连接器支持使用原生接口(taosc)和 REST 接口(部分语言暂不支持)连接 TDengine 集群。社区开发者也贡献了多个非官方连接器,例如 ADO.NET 连接器、Lua 连接器和 PHP 连接器。
diff --git a/docs/zh/10-deployment/01-deploy.md b/docs/zh/10-deployment/01-deploy.md
index 22a9c2ff8e68880ce5b0be2e01924eca12707a37..03b4ce30f980cd77e9845076ce9bb35c4474f948 100644
--- a/docs/zh/10-deployment/01-deploy.md
+++ b/docs/zh/10-deployment/01-deploy.md
@@ -1,6 +1,7 @@
---
sidebar_label: 手动部署
title: 集群部署和管理
+description: 使用命令行工具手动部署 TDengine 集群
---
## 准备工作
@@ -70,7 +71,7 @@ serverPort 6030
## 启动集群
-按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 taos shell,从 shell 里执行命令“SHOW DNODES”,如下所示:
+按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 TDengine CLI,在其中执行命令 “SHOW DNODES”,如下所示:
```
taos> show dnodes;
@@ -114,7 +115,7 @@ SHOW DNODES;
任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEp。
firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。
-接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。
+接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 TDengine CLI 如果不加参数,会默认连接由 firstEp 指定的节点。
两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。无法将两个独立的集群合并成为新的集群。
:::
diff --git a/docs/zh/10-deployment/03-k8s.md b/docs/zh/10-deployment/03-k8s.md
index 396b8343243ba824dd87b83fd5f94c14c2059730..0cae59657c2a0199d3452bc37d36f2c537944d21 100644
--- a/docs/zh/10-deployment/03-k8s.md
+++ b/docs/zh/10-deployment/03-k8s.md
@@ -1,6 +1,7 @@
---
sidebar_label: Kubernetes
title: 在 Kubernetes 上部署 TDengine 集群
+description: 利用 Kubernetes 部署 TDengine 集群的详细指南
---
作为面向云原生架构设计的时序数据库,TDengine 支持 Kubernetes 部署。这里介绍如何使用 YAML 文件一步一步从头创建一个 TDengine 集群,并重点介绍 Kubernetes 环境下 TDengine 的常用操作。
@@ -9,6 +10,7 @@ title: 在 Kubernetes 上部署 TDengine 集群
要使用 Kubernetes 部署管理 TDengine 集群,需要做好如下准备工作。
+* 本文适用 Kubernetes v1.5 以上版本
* 本文和下一章使用 minikube、kubectl 和 helm 等工具进行安装部署,请提前安装好相应软件
* Kubernetes 已经安装部署并能正常访问使用或更新必要的容器仓库或其他服务
@@ -365,7 +367,7 @@ kubectl scale statefulsets tdengine --replicas=1
```
-在 taos shell 中的所有数据库操作将无法成功。
+在 TDengine CLI 中的所有数据库操作将无法成功。
```
taos> show dnodes;
diff --git a/docs/zh/10-deployment/05-helm.md b/docs/zh/10-deployment/05-helm.md
index 9a723ff62f23da4906ee268becef1d812c29d797..9a3b21f09296e6f5a8dbd089225b6580b9567586 100644
--- a/docs/zh/10-deployment/05-helm.md
+++ b/docs/zh/10-deployment/05-helm.md
@@ -1,6 +1,7 @@
---
sidebar_label: Helm
title: 使用 Helm 部署 TDengine 集群
+description: 使用 Helm 部署 TDengine 集群的详细指南
---
Helm 是 Kubernetes 的包管理器,上一节使用 Kubernets 部署 TDengine 集群的操作已经足够简单,但 Helm 依然可以提供更强大的能力。
@@ -171,70 +172,19 @@ taoscfg:
TAOS_REPLICA: "1"
- # number of days per DB file
- # TAOS_DAYS: "10"
-
- # number of days to keep DB file, default is 10 years.
- #TAOS_KEEP: "3650"
-
- # cache block size (Mbyte)
- #TAOS_CACHE: "16"
-
- # number of cache blocks per vnode
- #TAOS_BLOCKS: "6"
-
- # minimum rows of records in file block
- #TAOS_MIN_ROWS: "100"
-
- # maximum rows of records in file block
- #TAOS_MAX_ROWS: "4096"
-
- #
- # TAOS_NUM_OF_THREADS_PER_CORE: number of threads per CPU core
- #TAOS_NUM_OF_THREADS_PER_CORE: "1.0"
+ # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC
+ #TAOS_NUM_OF_RPC_THREADS: "2"
#
# TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data
#TAOS_NUM_OF_COMMIT_THREADS: "4"
- #
- # TAOS_RATIO_OF_QUERY_CORES:
- # the proportion of total CPU cores available for query processing
- # 2.0: the query threads will be set to double of the CPU cores.
- # 1.0: all CPU cores are available for query processing [default].
- # 0.5: only half of the CPU cores are available for query.
- # 0.0: only one core available.
- #TAOS_RATIO_OF_QUERY_CORES: "1.0"
-
- #
- # TAOS_KEEP_COLUMN_NAME:
- # the last_row/first/last aggregator will not change the original column name in the result fields
- #TAOS_KEEP_COLUMN_NAME: "0"
-
- # enable/disable backuping vnode directory when removing vnode
- #TAOS_VNODE_BAK: "1"
-
# enable/disable installation / usage report
#TAOS_TELEMETRY_REPORTING: "1"
- # enable/disable load balancing
- #TAOS_BALANCE: "1"
-
- # max timer control blocks
- #TAOS_MAX_TMR_CTRL: "512"
-
# time interval of system monitor, seconds
#TAOS_MONITOR_INTERVAL: "30"
- # number of seconds allowed for a dnode to be offline, for cluster only
- #TAOS_OFFLINE_THRESHOLD: "8640000"
-
- # RPC re-try timer, millisecond
- #TAOS_RPC_TIMER: "1000"
-
- # RPC maximum time for ack, seconds.
- #TAOS_RPC_MAX_TIME: "600"
-
# time interval of dnode status reporting to mnode, seconds, for cluster only
#TAOS_STATUS_INTERVAL: "1"
@@ -245,37 +195,7 @@ taoscfg:
#TAOS_MIN_SLIDING_TIME: "10"
# minimum time window, milli-second
- #TAOS_MIN_INTERVAL_TIME: "10"
-
- # maximum delay before launching a stream computation, milli-second
- #TAOS_MAX_STREAM_COMP_DELAY: "20000"
-
- # maximum delay before launching a stream computation for the first time, milli-second
- #TAOS_MAX_FIRST_STREAM_COMP_DELAY: "10000"
-
- # retry delay when a stream computation fails, milli-second
- #TAOS_RETRY_STREAM_COMP_DELAY: "10"
-
- # the delayed time for launching a stream computation, from 0.1(default, 10% of whole computing time window) to 0.9
- #TAOS_STREAM_COMP_DELAY_RATIO: "0.1"
-
- # max number of vgroups per db, 0 means configured automatically
- #TAOS_MAX_VGROUPS_PER_DB: "0"
-
- # max number of tables per vnode
- #TAOS_MAX_TABLES_PER_VNODE: "1000000"
-
- # the number of acknowledgments required for successful data writing
- #TAOS_QUORUM: "1"
-
- # enable/disable compression
- #TAOS_COMP: "2"
-
- # write ahead log (WAL) level, 0: no wal; 1: write wal, but no fysnc; 2: write wal, and call fsync
- #TAOS_WAL_LEVEL: "1"
-
- # if walLevel is set to 2, the cycle of fsync being executed, if set to 0, fsync is called right away
- #TAOS_FSYNC: "3000"
+ #TAOS_MIN_INTERVAL_TIME: "1"
# the compressed rpc message, option:
# -1 (no compression)
@@ -283,17 +203,8 @@ taoscfg:
# > 0 (rpc message body which larger than this value will be compressed)
#TAOS_COMPRESS_MSG_SIZE: "-1"
- # max length of an SQL
- #TAOS_MAX_SQL_LENGTH: "1048576"
-
- # the maximum number of records allowed for super table time sorting
- #TAOS_MAX_NUM_OF_ORDERED_RES: "100000"
-
# max number of connections allowed in dnode
- #TAOS_MAX_SHELL_CONNS: "5000"
-
- # max number of connections allowed in client
- #TAOS_MAX_CONNECTIONS: "5000"
+ #TAOS_MAX_SHELL_CONNS: "50000"
# stop writing logs when the disk size of the log folder is less than this value
#TAOS_MINIMAL_LOG_DIR_G_B: "0.1"
@@ -313,21 +224,8 @@ taoscfg:
# enable/disable system monitor
#TAOS_MONITOR: "1"
- # enable/disable recording the SQL statements via restful interface
- #TAOS_HTTP_ENABLE_RECORD_SQL: "0"
-
- # number of threads used to process http requests
- #TAOS_HTTP_MAX_THREADS: "2"
-
- # maximum number of rows returned by the restful interface
- #TAOS_RESTFUL_ROW_LIMIT: "10240"
-
- # The following parameter is used to limit the maximum number of lines in log files.
- # max number of lines per log filters
- # numOfLogLines 10000000
-
# enable/disable async log
- #TAOS_ASYNC_LOG: "0"
+ #TAOS_ASYNC_LOG: "1"
#
# time of keeping log files, days
@@ -344,25 +242,8 @@ taoscfg:
# debug flag for all log type, take effect when non-zero value\
#TAOS_DEBUG_FLAG: "143"
- # enable/disable recording the SQL in taos client
- #TAOS_ENABLE_RECORD_SQL: "0"
-
# generate core file when service crash
#TAOS_ENABLE_CORE_FILE: "1"
-
- # maximum display width of binary and nchar fields in the shell. The parts exceeding this limit will be hidden
- #TAOS_MAX_BINARY_DISPLAY_WIDTH: "30"
-
- # enable/disable stream (continuous query)
- #TAOS_STREAM: "1"
-
- # in retrieve blocking model, only in 50% query threads will be used in query processing in dnode
- #TAOS_RETRIEVE_BLOCKING_MODEL: "0"
-
- # the maximum allowed query buffer size in MB during query processing for each data node
- # -1 no limit (default)
- # 0 no query allowed, queries are disabled
- #TAOS_QUERY_BUFFER_SIZE: "-1"
```
## 扩容
diff --git a/docs/zh/10-deployment/index.md b/docs/zh/10-deployment/index.md
index 96ac7b176d1125df6cf4763a485c4edba520a48c..4ff1add779c68a7098002dd95dcf28c9dc1acf72 100644
--- a/docs/zh/10-deployment/index.md
+++ b/docs/zh/10-deployment/index.md
@@ -1,5 +1,7 @@
---
+sidebar_label: 部署集群
title: 部署集群
+description: 部署 TDengine 集群的多种方式
---
TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。
diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md
index 628086f5a9f31d15fccdae107b8bd997a6ba1c0b..ee7b3a4715a11346b9a06da20dbc93ef309c0a3d 100644
--- a/docs/zh/12-taos-sql/01-data-type.md
+++ b/docs/zh/12-taos-sql/01-data-type.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 支持的数据类型
-title: 支持的数据类型
+sidebar_label: 数据类型
+title: 数据类型
description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
---
@@ -11,7 +11,7 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类
- 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128`
- 内部函数 now 是客户端的当前时间
- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
-- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数;纳秒精度逻辑类似。)
+- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似。
- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。
TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。
diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md
index 1675356c49c3435d6f9dad3ccc6b868da929f08f..c76311f008433f36259b08acaf56cafa729550b7 100644
--- a/docs/zh/12-taos-sql/02-database.md
+++ b/docs/zh/12-taos-sql/02-database.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 数据库管理
-title: 数据库管理
+sidebar_label: 数据库
+title: 数据库
description: "创建、删除数据库,查看、修改数据库参数"
---
@@ -71,9 +71,9 @@ database_option: {
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
- 0:表示可以创建多张超级表。
- 1:表示只可以创建一张超级表。
-- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。默认为 0,即落盘后立即删除。-1 表示不删除。
-- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。默认为 0,即落盘后立即删除。-1 表示不删除。
-- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。
+- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。
+- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。
+- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。
- WAL_SEGMENT_SIZE:wal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。
### 创建数据库示例
diff --git a/docs/zh/12-taos-sql/03-table.md b/docs/zh/12-taos-sql/03-table.md
index 0e104bb7b6f09e886ab3c6cb55b1ecd68dfaf1ce..f6790e3c692b815c1031413933c47eb7ad203204 100644
--- a/docs/zh/12-taos-sql/03-table.md
+++ b/docs/zh/12-taos-sql/03-table.md
@@ -1,5 +1,7 @@
---
-title: 表管理
+title: 表
+sidebar_label: 表
+description: 对表的各种管理操作
---
## 创建表
@@ -8,27 +10,24 @@ title: 表管理
```sql
CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) [table_options]
-
+
CREATE TABLE create_subtable_clause
-
+
CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...)
[TAGS (create_definition [, create_definitionn] ...)]
[table_options]
-
+
create_subtable_clause: {
create_subtable_clause [create_subtable_clause] ...
| [IF NOT EXISTS] [db_name.]tb_name USING [db_name.]stb_name [(tag_name [, tag_name] ...)] TAGS (tag_value [, tag_value] ...)
}
-
+
create_definition:
- col_name column_definition
-
-column_definition:
- type_name [comment 'string_value']
-
+ col_name column_type
+
table_options:
table_option ...
-
+
table_option: {
COMMENT 'string_value'
| WATERMARK duration[,duration]
@@ -52,12 +51,13 @@ table_option: {
需要注意的是转义字符中的内容必须是可打印字符。
**参数说明**
+
1. COMMENT:表注释。可用于超级表、子表和普通表。
-2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为0到15分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。
-3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为1毫秒到15分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。
-4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。作用于超级表除TS列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。
-5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括MAX、MIN和SUM。可用于超级表/普通表。
-6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果在持续的TTL时间内,都没有数据写入该表,则TDengine系统会自动删除该表。这个TTL的时间只是一个大概时间,我们系统不保证到了时间一定会将其删除,而只保证存在这样一个机制。TTL单位是天,默认为0,表示不限制。用户需要注意,TTL优先级高于KEEP,即TTL时间满足删除机制时,即使当前数据的存在时间小于KEEP,此表也会被删除。只可用于子表和普通表。
+2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为 0 到 15 分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
+3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为 1 毫秒到 15 分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
+4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。作用于超级表除 TS 列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。
+5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括 MAX、MIN 和 SUM。可用于超级表/普通表。
+6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果创建表时指定了这个参数,当该表的存在时间超过 TTL 指定的时间后,TDengine 自动删除该表。这个 TTL 的时间只是一个大概时间,系统不保证到了时间一定会将其删除,而只保证存在这样一个机制且最终一定会删除。TTL 单位是天,默认为 0,表示不限制,到期时间为表创建时间加上 TTL 时间。
## 创建子表
@@ -87,7 +87,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF
```sql
ALTER TABLE [db_name.]tb_name alter_table_clause
-
+
alter_table_clause: {
alter_table_options
| ADD COLUMN col_name column_type
@@ -95,10 +95,10 @@ alter_table_clause: {
| MODIFY COLUMN col_name column_type
| RENAME COLUMN old_col_name new_col_name
}
-
+
alter_table_options:
alter_table_option ...
-
+
alter_table_option: {
TTL value
| COMMENT 'string_value'
@@ -108,6 +108,7 @@ alter_table_option: {
**使用说明**
对普通表可以进行如下修改操作
+
1. ADD COLUMN:添加列。
2. DROP COLUMN:删除列。
3. MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
@@ -141,15 +142,15 @@ ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name
```sql
ALTER TABLE [db_name.]tb_name alter_table_clause
-
+
alter_table_clause: {
alter_table_options
| SET TAG tag_name = new_tag_value
}
-
+
alter_table_options:
alter_table_option ...
-
+
alter_table_option: {
TTL value
| COMMENT 'string_value'
@@ -157,6 +158,7 @@ alter_table_option: {
```
**使用说明**
+
1. 对子表的列和标签的修改,除了更改标签值以外,都要通过超级表才能进行。
### 修改子表标签值
@@ -167,7 +169,7 @@ ALTER TABLE tb_name SET TAG tag_name=new_tag_value;
## 删除表
-可以在一条SQL语句中删除一个或多个普通表或子表。
+可以在一条 SQL 语句中删除一个或多个普通表或子表。
```sql
DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
@@ -177,7 +179,7 @@ DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
### 显示所有表
-如下SQL语句可以列出当前数据库中的所有表名。
+如下 SQL 语句可以列出当前数据库中的所有表名。
```sql
SHOW TABLES [LIKE tb_name_wildchar];
diff --git a/docs/zh/12-taos-sql/04-stable.md b/docs/zh/12-taos-sql/04-stable.md
index 59d9657694340ae263fb23b8c2b17ede8984426d..95ef405fa780e831628e21766e1b3c3b18265059 100644
--- a/docs/zh/12-taos-sql/04-stable.md
+++ b/docs/zh/12-taos-sql/04-stable.md
@@ -1,6 +1,7 @@
---
-sidebar_label: 超级表管理
-title: 超级表 STable 管理
+sidebar_label: 超级表
+title: 超级表
+description: 对超级表的各种管理操作
---
## 创建超级表
diff --git a/docs/zh/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md
index c91e70c481055b804d88c8911fb454a3dd15b799..59af9c55ed076fb23814a24a5d2429e51d5fc051 100644
--- a/docs/zh/12-taos-sql/05-insert.md
+++ b/docs/zh/12-taos-sql/05-insert.md
@@ -1,6 +1,7 @@
---
sidebar_label: 数据写入
title: 数据写入
+description: 写入数据的详细语法
---
## 写入语法
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 5312d7d2f3597ca63d9d3c43bc2264ca75877fb7..b0a7d88efea4811b193117f220871af64341ff97 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -1,6 +1,7 @@
---
sidebar_label: 数据查询
title: 数据查询
+description: 查询数据的详细语法
---
## 查询语法
@@ -52,11 +53,6 @@ window_clause: {
| STATE_WINDOW(col)
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
-changes_option: {
- DURATION duration_val
- | ROWS rows_val
-}
-
group_by_clause:
GROUP BY expr [, expr] ... HAVING condition
@@ -126,7 +122,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
1. cfg 文件中的配置参数 maxNumOfDistinctRes 将对 DISTINCT 能够输出的数据行数进行限制。其最小值是 100000,最大值是 100000000,默认值是 10000000。如果实际计算结果超出了这个限制,那么会仅输出这个数量范围内的部分。
2. 由于浮点数天然的精度机制原因,在特定情况下,对 FLOAT 和 DOUBLE 列使用 DISTINCT 并不能保证输出值的完全唯一性。
-3. 在当前版本下,DISTINCT 不能在嵌套查询的子查询中使用,也不能与聚合函数、GROUP BY、或 JOIN 在同一条语句中混用。
:::
@@ -354,19 +349,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::info
-- 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。
-- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。
-- 目前不能在“连续查询”功能中使用子查询。
+- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表建议起别名,以便于外层查询中方便引用。
- 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
-- 目前内层查询、外层查询均不支持 UNION 操作。
- 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
- 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
- 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
- 计算函数部分:
- - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
- - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
- - 外层查询中不支持 IN 算子,但在内层中可以使用。
- - 外层查询不支持 GROUP BY。
+ - 如果内层查询的结果数据未提供时间戳,那么计算过程隐式依赖时间戳的函数在外层会无法正常工作。例如:INTERP, DERIVATIVE, IRATE, LAST_ROW, FIRST, LAST, TWA, STATEDURATION, TAIL, UNIQUE。
+ - 如果内层查询的结果数据不是有效的时间序列,那么计算过程依赖数据为时间序列的函数在外层会无法正常工作。例如:LEASTSQUARES, ELAPSED, INTERP, DERIVATIVE, IRATE, TWA, DIFF, STATECOUNT, STATEDURATION, CSUM, MAVG, TAIL, UNIQUE。
+ - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:PERCENTILE。
:::
diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md
index e99d915101b2d870d2019cac9cdde500af1aa406..9c5b7f771ecaf52da55a693ed5c789197ab57b05 100644
--- a/docs/zh/12-taos-sql/10-function.md
+++ b/docs/zh/12-taos-sql/10-function.md
@@ -1,6 +1,7 @@
---
sidebar_label: 函数
title: 函数
+description: TDengine 支持的函数列表
toc_max_heading_level: 4
---
@@ -613,6 +614,7 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
**说明**:
- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
+- "t-digest"算法的近似结果对于输入数据顺序敏感,对超级表查询时不同的输入排序结果可能会有微小的误差。
### AVG
@@ -916,7 +918,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
**返回数据类型**:同应用的字段。
-**适用数据类型**:数值类型。
+**适用数据类型**:数值类型,时间戳类型。
**适用于**:表和超级表。
@@ -931,7 +933,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
**返回数据类型**:同应用的字段。
-**适用数据类型**:数值类型。
+**适用数据类型**:数值类型,时间戳类型。
**适用于**:表和超级表。
@@ -1166,7 +1168,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
**参数范围**:
-- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
+- oper : `'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。
- val : 数值型
- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。
diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md
index 2dad49ece942d0530c12afa145c2e11682c23fe3..268712e757304fe22848318befd16d1a93de5dac 100644
--- a/docs/zh/12-taos-sql/12-distinguished.md
+++ b/docs/zh/12-taos-sql/12-distinguished.md
@@ -1,15 +1,16 @@
---
-sidebar_label: 时序数据特色查询
-title: 时序数据特色查询
+sidebar_label: 特色查询
+title: 特色查询
+description: TDengine 提供的时序数据特有的查询功能
---
TDengine 是专为时序数据而研发的大数据平台,存储和计算都针对时序数据的特定进行了量身定制,在支持标准 SQL 的基础之上,还提供了一系列贴合时序业务场景的特色查询语法,极大的方便时序场景的应用开发。
-TDengine 提供的特色查询包括标签切分查询和窗口切分查询。
+TDengine 提供的特色查询包括数据切分查询和窗口切分查询。
-## 标签切分查询
+## 数据切分查询
-超级表查询中,当需要针对标签进行数据切分然后在切分出的数据空间内再进行一系列的计算时使用标签切分子句,标签切分的语句如下:
+当需要按一定的维度对数据进行切分然后在切分出的数据空间内再进行一系列的计算时使用数据切分子句,数据切分语句的语法如下:
```sql
PARTITION BY part_list
@@ -17,22 +18,23 @@ PARTITION BY part_list
part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。
-当 PARTITION BY 和标签一起使用时,TDengine 按如下方式处理标签切分子句:
+TDengine 按如下方式处理数据切分子句:
-- 标签切分子句位于 WHERE 子句之后,且不能和 JOIN 子句一起使用。
-- 标签切分子句将超级表数据按指定的标签组合进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
-- 标签切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
+- 数据切分子句位于 WHERE 子句之后。
+- 数据切分子句将表数据按指定的维度进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
+- 数据切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
```sql
select max(current) from meters partition by location interval(10m)
```
+数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。
## 窗口切分查询
TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下:
```sql
-SELECT function_list FROM tb_name
+SELECT select_list FROM tb_name
[WHERE where_condition]
[SESSION(ts_col, tol_val)]
[STATE_WINDOW(col)]
@@ -42,19 +44,15 @@ SELECT function_list FROM tb_name
在上述语法中的具体限制如下
-### 窗口切分查询中使用函数的限制
-
-- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。
-- 此外 LAST_ROW 查询也不能与窗口聚合同时出现。
-- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。
-
### 窗口子句的规则
-- 窗口子句位于标签切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
+- 窗口子句位于数据切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
- 窗口子句将数据按窗口进行切分,对每个窗口进行 SELECT 列表中的表达式的计算,SELECT 列表中的表达式只能包含:
- 常量。
- - 聚集函数。
+ - _wstart伪列、_wend伪列和_wduration伪列。
+ - 聚集函数(包括选择函数和可以由参数确定输出行数的时序特有函数)。
- 包含上面表达式的表达式。
+ - 且至少包含一个聚集函数。
- 窗口子句不可以和 GROUP BY 子句一起使用。
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
@@ -73,7 +71,7 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填
1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
-3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
+3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 PARTITION BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 PARTITION BY 语句分组,则返回结果中每个 PARTITION 内不按照时间序列严格单调递增。
:::
@@ -105,7 +103,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
### 状态窗口
-使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
+使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。

@@ -121,7 +119,7 @@ SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);

-在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
+在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。
```
diff --git a/docs/zh/12-taos-sql/13-tmq.md b/docs/zh/12-taos-sql/13-tmq.md
index b05d2bf680e2db5db08b2e86d98e2e3018078ddf..571300ad8cbfb031e38f330c0773ec6ee6f11e32 100644
--- a/docs/zh/12-taos-sql/13-tmq.md
+++ b/docs/zh/12-taos-sql/13-tmq.md
@@ -1,6 +1,7 @@
---
sidebar_label: 数据订阅
title: 数据订阅
+description: TDengine 消息队列提供的数据订阅功能
---
TDengine 3.0.0.0 开始对消息队列做了大幅的优化和增强以简化用户的解决方案。
diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md
index a967299e4093a4a8654d7aaf1b8c3914726aeadf..cd726e0a0ea644f575e16c656eeb4bb2cabf425d 100644
--- a/docs/zh/12-taos-sql/14-stream.md
+++ b/docs/zh/12-taos-sql/14-stream.md
@@ -1,6 +1,7 @@
---
sidebar_label: 流式计算
title: 流式计算
+description: 流式计算的相关 SQL 的详细语法
---
@@ -18,7 +19,7 @@ stream_options: {
其中 subquery 是 select 普通查询语法的子集:
```sql
-subquery: SELECT [DISTINCT] select_list
+subquery: SELECT select_list
from_clause
[WHERE condition]
[PARTITION BY tag_list]
@@ -37,13 +38,13 @@ window_clause: {
其中,SESSION 是会话窗口,tol_val 是时间间隔的最大范围。在 tol_val 时间间隔范围内的数据都属于同一个窗口,如果连续的两条数据的时间超过 tol_val,则自动开启下一个窗口。
-窗口的定义与时序数据特色查询中的定义完全相同。
+窗口的定义与时序数据特色查询中的定义完全相同,详见 [TDengine 特色查询](../distinguished)
例如,如下语句创建流式计算,同时自动创建名为 avg_vol 的超级表,此流计算以一分钟为时间窗口、30 秒为前向增量统计这些电表的平均电压,并将来自 meters 表的数据的计算结果写入 avg_vol 表,不同 partition 的数据会分别创建子表并写入不同子表。
```sql
CREATE STREAM avg_vol_s INTO avg_vol AS
-SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
+SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
```
## 流式计算的 partition
@@ -57,7 +58,7 @@ SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVA
## 删除流式计算
```sql
-DROP STREAM [IF NOT EXISTS] stream_name;
+DROP STREAM [IF EXISTS] stream_name;
```
仅删除流式计算任务,由流式计算写入的数据不会被删除。
diff --git a/docs/zh/12-taos-sql/16-operators.md b/docs/zh/12-taos-sql/16-operators.md
index 22b78455fb35e9ebe5978b30505819e1a2b678c8..48e9991799abf99ca868fc30e34f0435054afa0b 100644
--- a/docs/zh/12-taos-sql/16-operators.md
+++ b/docs/zh/12-taos-sql/16-operators.md
@@ -1,6 +1,7 @@
---
sidebar_label: 运算符
title: 运算符
+description: TDengine 支持的所有运算符
---
## 算术运算符
diff --git a/docs/zh/12-taos-sql/17-json.md b/docs/zh/12-taos-sql/17-json.md
index 4a4a8cca732ac433ba5ada1ec3805ebfa663edb3..18c25cfe230f81bf0b0e421634c1a768ae8e4628 100644
--- a/docs/zh/12-taos-sql/17-json.md
+++ b/docs/zh/12-taos-sql/17-json.md
@@ -1,6 +1,7 @@
---
-sidebar_label: JSON 类型使用说明
-title: JSON 类型使用说明
+sidebar_label: JSON 类型
+title: JSON 类型
+description: 对 JSON 类型如何使用的详细说明
---
diff --git a/docs/zh/12-taos-sql/18-escape.md b/docs/zh/12-taos-sql/18-escape.md
index d478340599595108a0eb7275323550b6e7c876db..5e0d292d396fdb54bd3df553544353a900415283 100644
--- a/docs/zh/12-taos-sql/18-escape.md
+++ b/docs/zh/12-taos-sql/18-escape.md
@@ -1,5 +1,7 @@
---
-title: 转义字符说明
+title: 转义字符
+sidebar_label: 转义字符
+description: TDengine 中使用转义字符的详细规则
---
## 转义字符表
diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md
index ff552fc9771f5b428554acc62e9aeac03a305ecc..a9743adddabe96440ffca8c8585787081d29398f 100644
--- a/docs/zh/12-taos-sql/19-limit.md
+++ b/docs/zh/12-taos-sql/19-limit.md
@@ -1,6 +1,7 @@
---
-sidebar_label: 命名与边界限制
-title: 命名与边界限制
+sidebar_label: 命名与边界
+title: 命名与边界
+description: 合法字符集和命名中的限制规则
---
## 名称命名规则
@@ -30,7 +31,7 @@ title: 命名与边界限制
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
- 标签名最大长度为 64
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
-- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576
+- SQL 语句最大长度 1048576 个字符
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
- 数据库的副本数只能设置为 1 或 3
diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md
index cac29d7863ff77a6ec15bb9bddedd006317b719c..09fea45b7a8221ce6ed955a78cec502e235506a0 100644
--- a/docs/zh/12-taos-sql/20-keywords.md
+++ b/docs/zh/12-taos-sql/20-keywords.md
@@ -1,11 +1,13 @@
---
sidebar_label: 保留关键字
-title: TDengine 保留关键字
+title: 保留关键字
+description: TDengine 保留关键字的详细列表
---
## 保留关键字
-目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
+目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写如果需要用作库名、表名、STable 名、数据列名及标签列名等,需要使用符合``将关键字括起来使用,例如`ADD`。
+关键字列表如下:
### A
@@ -238,6 +240,7 @@ title: TDengine 保留关键字
- TOPICS
- TRIGGER
- TSERIES
+- TTL
### U
diff --git a/docs/zh/12-taos-sql/21-node.md b/docs/zh/12-taos-sql/21-node.md
index 4816daf42042c0607aebf37c8b57961e5b1927fe..d47dc8198f41e7ee6e90624b0928c6bd215bb26d 100644
--- a/docs/zh/12-taos-sql/21-node.md
+++ b/docs/zh/12-taos-sql/21-node.md
@@ -1,6 +1,7 @@
---
sidebar_label: 集群管理
title: 集群管理
+description: 管理集群的 SQL 命令的详细解析
---
组成 TDengine 集群的物理实体是 dnode (data node 的缩写),它是一个运行在操作系统之上的进程。在 dnode 中可以建立负责时序数据存储的 vnode (virtual node),在多节点集群环境下当某个数据库的 replica 为 3 时,该数据库中的每个 vgroup 由 3 个 vnode 组成;当数据库的 replica 为 1 时,该数据库中的每个 vgroup 由 1 个 vnode 组成。如果要想配置某个数据库为多副本,则集群中的 dnode 数量至少为 3。在 dnode 还可以创建 mnode (management node),单个集群中最多可以创建三个 mnode。在 TDengine 3.0.0.0 中为了支持存算分离,引入了一种新的逻辑节点 qnode (query node),qnode 和 vnode 既可以共存在一个 dnode 中,也可以完全分离在不同的 dnode 上。
diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md
index 8139b2fc55d420edfb766aab6ed06477fbd3621f..c1ffc4a757500276f348d08cd577f63072dfece2 100644
--- a/docs/zh/12-taos-sql/22-meta.md
+++ b/docs/zh/12-taos-sql/22-meta.md
@@ -1,6 +1,7 @@
---
sidebar_label: 元数据
-title: 存储元数据的 Information_Schema 数据库
+title: 元数据
+description: Information_Schema 数据库中存储了系统中所有的元数据信息
---
TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数据库元数据、数据库系统信息和状态的访问,例如数据库或表的名称,当前执行的 SQL 语句等。该数据库存储有关 TDengine 维护的所有其他数据库的信息。它包含多个只读表。实际上,这些表都是视图,而不是基表,因此没有与它们关联的文件。所以对这些表只能查询,不能进行 INSERT 等写入操作。`INFORMATION_SCHEMA` 数据库旨在以一种更一致的方式来提供对 TDengine 支持的各种 SHOW 语句(如 SHOW TABLES、SHOW DATABASES)所提供的信息的访问。与 SHOW 语句相比,使用 SELECT ... FROM INFORMATION_SCHEMA.tablename 具有以下优点:
@@ -245,3 +246,35 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们
| 1 | dnode_id | INT | dnode 的 ID |
| 2 | name | BINARY(32) | 配置项名称 |
| 3 | value | BINARY(64) | 该配置项的值 |
+
+## INS_TOPICS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :---------: | ------------ | ------------------------------ |
+| 1 | topic_name | BINARY(192) | topic 名称 |
+| 2 | db_name | BINARY(64) | topic 相关的 DB |
+| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
+| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
+
+## INS_SUBSCRIPTIONS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :------------: | ------------ | ------------------------ |
+| 1 | topic_name | BINARY(204) | 被订阅的 topic |
+| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
+| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
+| 4 | consumer_id | BIGINT | 消费者的唯一 id |
+
+## INS_STREAMS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :----------: | ------------ | --------------------------------------- |
+| 1 | stream_name | BINARY(64) | 流计算名称 |
+| 2 | create_time | TIMESTAMP | 创建时间 |
+| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
+| 4 | status | BIANRY(20) | 流当前状态 |
+| 5 | source_db | BINARY(64) | 源数据库 |
+| 6 | target_db | BIANRY(64) | 目的数据库 |
+| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
+| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
+| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
diff --git a/docs/zh/12-taos-sql/23-perf.md b/docs/zh/12-taos-sql/23-perf.md
index ac852ee1506ce8da24c036c61ce96fa4eecaf1cb..d4ee0e178c02e65eb3f1ceaa73e170893f65cc88 100644
--- a/docs/zh/12-taos-sql/23-perf.md
+++ b/docs/zh/12-taos-sql/23-perf.md
@@ -1,6 +1,7 @@
---
sidebar_label: 统计数据
-title: 存储统计数据的 Performance_Schema 数据库
+title: 统计数据
+description: Performance_Schema 数据库中存储了系统中的各种统计信息
---
TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其中存储了与性能有关的统计数据。本节详细介绍其中的表和表结构。
@@ -61,15 +62,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 12 | sub_status | BINARY(1000) | 子查询状态 |
| 13 | sql | BINARY(1024) | SQL 语句 |
-## PERF_TOPICS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :---------: | ------------ | ------------------------------ |
-| 1 | topic_name | BINARY(192) | topic 名称 |
-| 2 | db_name | BINARY(64) | topic 相关的 DB |
-| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
-| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
-
## PERF_CONSUMERS
| # | **列名** | **数据类型** | **说明** |
@@ -83,15 +75,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
| 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 |
-## PERF_SUBSCRIPTIONS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :------------: | ------------ | ------------------------ |
-| 1 | topic_name | BINARY(204) | 被订阅的 topic |
-| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
-| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
-| 4 | consumer_id | BIGINT | 消费者的唯一 id |
-
## PERF_TRANS
| # | **列名** | **数据类型** | **说明** |
@@ -113,17 +96,3 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 2 | create_time | TIMESTAMP | sma 创建时间 |
| 3 | stable_name | BINARY(192) | sma 所属的超级表名称 |
| 4 | vgroup_id | INT | sma 专属的 vgroup 名称 |
-
-## PERF_STREAMS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :----------: | ------------ | --------------------------------------- |
-| 1 | stream_name | BINARY(64) | 流计算名称 |
-| 2 | create_time | TIMESTAMP | 创建时间 |
-| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
-| 4 | status | BIANRY(20) | 流当前状态 |
-| 5 | source_db | BINARY(64) | 源数据库 |
-| 6 | target_db | BIANRY(64) | 目的数据库 |
-| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
-| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
-| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md
index 781f94324c78e7975abde33803cffdb914da020c..31b7c085a1ba97630223c16e06022ec9dfd9ea50 100644
--- a/docs/zh/12-taos-sql/24-show.md
+++ b/docs/zh/12-taos-sql/24-show.md
@@ -1,19 +1,10 @@
---
sidebar_label: SHOW 命令
-title: 使用 SHOW 命令查看系统元数据
+title: SHOW 命令
+description: SHOW 命令的完整列表
---
-除了使用 `select` 语句查询 `INFORMATION_SCHEMA` 数据库中的表获得系统中的各种元数据、系统信息和状态之外,也可以用 `SHOW` 命令来实现同样的目的。
-
-## SHOW ACCOUNTS
-
-```sql
-SHOW ACCOUNTS;
-```
-
-显示当前系统中所有租户的信息。
-
-注:企业版独有
+SHOW 命令可以用来获取简要的系统信息。若想获取系统中详细的各种元数据、系统信息和状态,请使用 select 语句查询 INFORMATION_SCHEMA 数据库中的表。
## SHOW APPS
@@ -194,7 +185,7 @@ SHOW STREAMS;
SHOW SUBSCRIPTIONS;
```
-显示当前数据库下的所有的订阅关系
+显示当前系统内所有的订阅关系
## SHOW TABLES
diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md
index c41a3fcfc9ee42e56e48082da5b6420073d92cdf..7fb944710125de6fe4d6efcedbb0677b33e1fd0f 100644
--- a/docs/zh/12-taos-sql/25-grant.md
+++ b/docs/zh/12-taos-sql/25-grant.md
@@ -1,6 +1,7 @@
---
sidebar_label: 权限管理
title: 权限管理
+description: 企业版中才具有的权限管理功能
---
本节讲述如何在 TDengine 中进行权限管理的相关操作。
@@ -8,14 +9,51 @@ title: 权限管理
## 创建用户
```sql
-CREATE USER use_name PASS 'password';
+CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
```
创建用户。
-use_name最长为23字节。
+use_name 最长为 23 字节。
-password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+password 最长为 128 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+
+SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
+
+例如,创建密码为123456且可以查看系统信息的用户test如下:
+
+```sql
+taos> create user test pass '123456' sysinfo 1;
+Query OK, 0 of 0 rows affected (0.001254s)
+```
+
+## 查看用户
+
+```sql
+SHOW USERS;
+```
+
+查看用户信息。
+
+```sql
+taos> show users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001657s)
+```
+
+也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
+
+```sql
+taos> select * from information_schema.ins_users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001953s)
+```
## 删除用户
@@ -36,9 +74,15 @@ alter_user_clause: {
```
- PASS:修改用户密码。
-- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。
-- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。
+- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
+- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
+
+例如,禁用 test 用户:
+```sql
+taos> alter user test enable 0;
+Query OK, 0 of 0 rows affected (0.001160s)
+```
## 授权
@@ -61,7 +105,7 @@ priv_level : {
}
```
-对用户授权。
+对用户授权。授权功能只包含在企业版中。
授权级别支持到DATABASE,权限有READ和WRITE两种。
@@ -91,4 +135,4 @@ priv_level : {
```
-收回对用户的授权。
+收回对用户的授权。授权功能只包含在企业版中。
diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md
index 7ddcad298b4b9eb4191abded0663055620b741c3..6dc1b6eb5fbe346ae65993e4e290566179b0e6ee 100644
--- a/docs/zh/12-taos-sql/26-udf.md
+++ b/docs/zh/12-taos-sql/26-udf.md
@@ -1,6 +1,7 @@
---
sidebar_label: 自定义函数
-title: 用户自定义函数
+title: 自定义函数
+description: 使用 UDF 的详细指南
---
除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。
diff --git a/docs/zh/12-taos-sql/27-index.md b/docs/zh/12-taos-sql/27-index.md
index 2c0907723e76f304566e6a19bdef2d63225f903f..aa84140296832f79a6498d0da2b5a8f500cd1e90 100644
--- a/docs/zh/12-taos-sql/27-index.md
+++ b/docs/zh/12-taos-sql/27-index.md
@@ -1,6 +1,7 @@
---
sidebar_label: 索引
-title: 使用索引
+title: 索引
+description: 索引功能的使用细节
---
TDengine 从 3.0.0.0 版本开始引入了索引功能,支持 SMA 索引和 FULLTEXT 索引。
diff --git a/docs/zh/12-taos-sql/28-recovery.md b/docs/zh/12-taos-sql/28-recovery.md
index 72b220b8ff44917831ac16301237702c991b9b15..582c3739073513df4ceb212080805136947e62d4 100644
--- a/docs/zh/12-taos-sql/28-recovery.md
+++ b/docs/zh/12-taos-sql/28-recovery.md
@@ -1,6 +1,7 @@
---
sidebar_label: 异常恢复
title: 异常恢复
+description: 如何终止出现问题的连接、查询和事务以使系统恢复正常
---
在一个复杂的应用场景中,连接和查询任务等有可能进入一种错误状态或者耗时过长迟迟无法结束,此时需要有能够终止这些连接或任务的方法。
diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md
index d653c59a5cd1309fbdcd6ef7e3706e33c4a43dee..e63825045d5ddc26d289af4bbd7fa808719bb99c 100644
--- a/docs/zh/12-taos-sql/29-changes.md
+++ b/docs/zh/12-taos-sql/29-changes.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 3.0 版本语法变更
-title: 3.0 版本语法变更
+sidebar_label: 语法变更
+title: 语法变更
description: "TDengine 3.0 版本的语法变更说明"
---
diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md
index 821679551c453b1a3f2937ac5d2409dd733cd593..739d26b2240ddfcf32a269015f5c8915f4854f33 100644
--- a/docs/zh/12-taos-sql/index.md
+++ b/docs/zh/12-taos-sql/index.md
@@ -1,11 +1,11 @@
---
-title: TAOS SQL
-description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容"
+title: TDengine SQL
+description: 'TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容'
---
-本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
+本文档说明 TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
-TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
+TDengine SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TDengine SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TDengine SQL 语句的最大长度为 1M。TDengine SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
本章节 SQL 语法遵循如下约定:
diff --git a/docs/zh/14-reference/04-taosadapter.md b/docs/zh/14-reference/04-taosadapter.md
index 6177b52e4c5a9466220fa6a8e91f2bd1e615c68a..71bf5f4223ae97cf2c1153aaea3b8f946e213522 100644
--- a/docs/zh/14-reference/04-taosadapter.md
+++ b/docs/zh/14-reference/04-taosadapter.md
@@ -156,7 +156,7 @@ AllowWebSockets
## 功能列表
- RESTful 接口
- [https://docs.taosdata.com/reference/rest-api/](https://docs.taosdata.com/reference/rest-api/)
+ [RESTful API](../../connector/rest-api)
- 兼容 InfluxDB v1 写接口
[https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
- 兼容 OpenTSDB JSON 和 telnet 格式写入
@@ -179,7 +179,7 @@ AllowWebSockets
### TDengine RESTful 接口
-您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](/reference/rest-api/)。
+您可以使用任何支持 http 协议的客户端通过访问 RESTful 接口地址 `http://:6041/rest/sql` 来写入数据到 TDengine 或从 TDengine 中查询数据。细节请参考[官方文档](../../connector/rest-api/)。
### InfluxDB
diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md
index f84ec65b4c8574c0812567a65213d7605b306c99..0d6aad62401daf76737caf803461c187189cb76f 100644
--- a/docs/zh/14-reference/05-taosbenchmark.md
+++ b/docs/zh/14-reference/05-taosbenchmark.md
@@ -405,37 +405,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
订阅子表或者普通表的配置参数在 `specified_table_query` 中设置。
-- **threads** : 执行 SQL 的线程数,默认为 1。
-
-- **interval** : 执行订阅的时间间隔,单位为秒,默认为 0。
-
-- **restart** : "yes" 表示开始新的订阅,"no" 表示继续之前的订阅,默认值为 "no"。
-
-- **keepProgress** : "yes" 表示保留订阅进度,"no" 表示不保留,默认值为 "no"。
-
-- **resubAfterConsume** : "yes" 表示取消之前的订阅然后再次订阅, "no" 表示继续之前的订阅,默认值为 "no"。
+- **threads/concurrent** : 执行 SQL 的线程数,默认为 1。
- **sqls** :
- **sql** : 执行的 SQL 命令,必填。
- - **result** : 保存查询结果的文件,未指定则不保存。
-
-#### 订阅超级表的配置参数
-
-订阅超级表的配置参数在 `super_table_query` 中设置。
-
-- **stblname** : 要订阅的超级表名称,必填。
-
-- **threads** : 执行 SQL 的线程数,默认为 1。
-
-- **interval** : 执行订阅的时间间隔,单位为秒,默认为 0。
-
-- **restart** : "yes" 表示开始新的订阅,"no" 表示继续之前的订阅,默认值为 "no"。
-
-- **keepProgress** : "yes" 表示保留订阅进度,"no" 表示不保留,默认值为 "no"。
-
-- **resubAfterConsume** : "yes" 表示取消之前的订阅然后再次订阅, "no" 表示继续之前的订阅,默认值为 "no"。
-
-- **sqls** :
- - **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。
- 替换为超级表中所有的子表名。
- - **result** : 保存查询结果的文件,未指定则不保存。
diff --git a/docs/zh/14-reference/07-tdinsight/index.mdx b/docs/zh/14-reference/07-tdinsight/index.mdx
index 9548922e65e9c0fc29bea56a325fee2eda9a85e3..ecd63621432794e27fd80b88e864590c83e9b333 100644
--- a/docs/zh/14-reference/07-tdinsight/index.mdx
+++ b/docs/zh/14-reference/07-tdinsight/index.mdx
@@ -1,6 +1,7 @@
---
-title: TDinsight - 基于Grafana的TDengine零依赖监控解决方案
+title: TDinsight
sidebar_label: TDinsight
+description: 基于Grafana的TDengine零依赖监控解决方案
---
TDinsight 是使用监控数据库和 [Grafana] 对 TDengine 进行监控的解决方案。
diff --git a/docs/zh/14-reference/08-taos-shell.md b/docs/zh/14-reference/08-taos-shell.md
index 2f3b551502c8b9da789220b1b20e701e038dc5e7..580454987840b61a5efff4acd545443ebca9904b 100644
--- a/docs/zh/14-reference/08-taos-shell.md
+++ b/docs/zh/14-reference/08-taos-shell.md
@@ -8,7 +8,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine
## 安装
-如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [连接器](/reference/connector/)。
+如果在 TDengine 服务器端执行,无需任何安装,已经自动安装好 TDengine CLI。如果要在非 TDengine 服务器端运行,需要安装 TDengine 客户端驱动安装包,具体安装,请参考 [连接器](../../connector/)。
## 执行
@@ -18,7 +18,7 @@ TDengine 命令行程序(以下简称 TDengine CLI)是用户操作 TDengine
taos
```
-如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
+如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息。(请参考 [FAQ](../../train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
```cmd
taos>
diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md
index 743fc2d32f82778fb97e7879972cd23db1159c8e..58bbe1e1178fbb1a1aa649508b0e36b331964753 100644
--- a/docs/zh/14-reference/11-docker/index.md
+++ b/docs/zh/14-reference/11-docker/index.md
@@ -32,7 +32,7 @@ taos> show databases;
Query OK, 2 rows in database (0.033802s)
```
-因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 taos shell 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 taos shell 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。
+因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 TDengine CLI 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 TDengine CLI 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。
## 在 host 网络上启动 TDengine
@@ -75,7 +75,7 @@ docker run -d \
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
```
-最后,可以从 taos shell 或者任意连接器以 "tdengine" 为服务端地址访问 TDengine 服务。
+最后,可以从 TDengine CLI 或者任意连接器以 "tdengine" 为服务端地址访问 TDengine 服务。
```shell
taos -h tdengine -P 6030
@@ -119,7 +119,7 @@ taos -h tdengine -P 6030
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -234,7 +234,7 @@ go mod tidy
```dockerfile
FROM golang:1.19.0-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -250,7 +250,7 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -354,7 +354,7 @@ test-docker_td-2_1 /tini -- /usr/bin/entrypoi ... Up
test-docker_td-3_1 /tini -- /usr/bin/entrypoi ... Up
```
-4. 用 taos shell 查看 dnodes
+4. 用 TDengine CLI 查看 dnodes
```shell
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index d2efc5baf381d7631533f9b80fa2994dc16a221e..7b31e10572c4a6bafd088e7b7c14853ee0d32df1 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -698,122 +698,123 @@ charset 的有效值是 UTF-8。
| 45 | numOfVnodeFetchThreads | 否 | 是 |
| 46 | numOfVnodeWriteThreads | 否 | 是 |
| 47 | numOfVnodeSyncThreads | 否 | 是 |
-| 48 | numOfQnodeQueryThreads | 否 | 是 |
-| 49 | numOfQnodeFetchThreads | 否 | 是 |
-| 50 | numOfSnodeSharedThreads | 否 | 是 |
-| 51 | numOfSnodeUniqueThreads | 否 | 是 |
-| 52 | rpcQueueMemoryAllowed | 否 | 是 |
-| 53 | logDir | 是 | 是 |
-| 54 | minimalLogDirGB | 是 | 是 |
-| 55 | numOfLogLines | 是 | 是 |
-| 56 | asyncLog | 是 | 是 |
-| 57 | logKeepDays | 是 | 是 |
-| 58 | debugFlag | 是 | 是 |
-| 59 | tmrDebugFlag | 是 | 是 |
-| 60 | uDebugFlag | 是 | 是 |
-| 61 | rpcDebugFlag | 是 | 是 |
-| 62 | jniDebugFlag | 是 | 是 |
-| 63 | qDebugFlag | 是 | 是 |
-| 64 | cDebugFlag | 是 | 是 |
-| 65 | dDebugFlag | 是 | 是 |
-| 66 | vDebugFlag | 是 | 是 |
-| 67 | mDebugFlag | 是 | 是 |
-| 68 | wDebugFlag | 是 | 是 |
-| 69 | sDebugFlag | 是 | 是 |
-| 70 | tsdbDebugFlag | 是 | 是 |
-| 71 | tqDebugFlag | 否 | 是 |
-| 72 | fsDebugFlag | 是 | 是 |
-| 73 | udfDebugFlag | 否 | 是 |
-| 74 | smaDebugFlag | 否 | 是 |
-| 75 | idxDebugFlag | 否 | 是 |
-| 76 | tdbDebugFlag | 否 | 是 |
-| 77 | metaDebugFlag | 否 | 是 |
-| 78 | timezone | 是 | 是 |
-| 79 | locale | 是 | 是 |
-| 80 | charset | 是 | 是 |
-| 81 | udf | 是 | 是 |
-| 82 | enableCoreFile | 是 | 是 |
-| 83 | arbitrator | 是 | 否 |
-| 84 | numOfThreadsPerCore | 是 | 否 |
-| 85 | numOfMnodes | 是 | 否 |
-| 86 | vnodeBak | 是 | 否 |
-| 87 | balance | 是 | 否 |
-| 88 | balanceInterval | 是 | 否 |
-| 89 | offlineThreshold | 是 | 否 |
-| 90 | role | 是 | 否 |
-| 91 | dnodeNopLoop | 是 | 否 |
-| 92 | keepTimeOffset | 是 | 否 |
-| 93 | rpcTimer | 是 | 否 |
-| 94 | rpcMaxTime | 是 | 否 |
-| 95 | rpcForceTcp | 是 | 否 |
-| 96 | tcpConnTimeout | 是 | 否 |
-| 97 | syncCheckInterval | 是 | 否 |
-| 98 | maxTmrCtrl | 是 | 否 |
-| 99 | monitorReplica | 是 | 否 |
-| 100 | smlTagNullName | 是 | 否 |
-| 101 | keepColumnName | 是 | 否 |
-| 102 | ratioOfQueryCores | 是 | 否 |
-| 103 | maxStreamCompDelay | 是 | 否 |
-| 104 | maxFirstStreamCompDelay | 是 | 否 |
-| 105 | retryStreamCompDelay | 是 | 否 |
-| 106 | streamCompDelayRatio | 是 | 否 |
-| 107 | maxVgroupsPerDb | 是 | 否 |
-| 108 | maxTablesPerVnode | 是 | 否 |
-| 109 | minTablesPerVnode | 是 | 否 |
-| 110 | tableIncStepPerVnode | 是 | 否 |
-| 111 | cache | 是 | 否 |
-| 112 | blocks | 是 | 否 |
-| 113 | days | 是 | 否 |
-| 114 | keep | 是 | 否 |
-| 115 | minRows | 是 | 否 |
-| 116 | maxRows | 是 | 否 |
-| 117 | quorum | 是 | 否 |
-| 118 | comp | 是 | 否 |
-| 119 | walLevel | 是 | 否 |
-| 120 | fsync | 是 | 否 |
-| 121 | replica | 是 | 否 |
-| 122 | partitions | 是 | 否 |
-| 123 | quorum | 是 | 否 |
-| 124 | update | 是 | 否 |
-| 125 | cachelast | 是 | 否 |
-| 126 | maxSQLLength | 是 | 否 |
-| 127 | maxWildCardsLength | 是 | 否 |
-| 128 | maxRegexStringLen | 是 | 否 |
-| 129 | maxNumOfOrderedRes | 是 | 否 |
-| 130 | maxConnections | 是 | 否 |
-| 131 | mnodeEqualVnodeNum | 是 | 否 |
-| 132 | http | 是 | 否 |
-| 133 | httpEnableRecordSql | 是 | 否 |
-| 134 | httpMaxThreads | 是 | 否 |
-| 135 | restfulRowLimit | 是 | 否 |
-| 136 | httpDbNameMandatory | 是 | 否 |
-| 137 | httpKeepAlive | 是 | 否 |
-| 138 | enableRecordSql | 是 | 否 |
-| 139 | maxBinaryDisplayWidth | 是 | 否 |
-| 140 | stream | 是 | 否 |
-| 141 | retrieveBlockingModel | 是 | 否 |
-| 142 | tsdbMetaCompactRatio | 是 | 否 |
-| 143 | defaultJSONStrType | 是 | 否 |
-| 144 | walFlushSize | 是 | 否 |
-| 145 | keepTimeOffset | 是 | 否 |
-| 146 | flowctrl | 是 | 否 |
-| 147 | slaveQuery | 是 | 否 |
-| 148 | adjustMaster | 是 | 否 |
-| 149 | topicBinaryLen | 是 | 否 |
-| 150 | telegrafUseFieldNum | 是 | 否 |
-| 151 | deadLockKillQuery | 是 | 否 |
-| 152 | clientMerge | 是 | 否 |
-| 153 | sdbDebugFlag | 是 | 否 |
-| 154 | odbcDebugFlag | 是 | 否 |
-| 155 | httpDebugFlag | 是 | 否 |
-| 156 | monDebugFlag | 是 | 否 |
-| 157 | cqDebugFlag | 是 | 否 |
-| 158 | shortcutFlag | 是 | 否 |
-| 159 | probeSeconds | 是 | 否 |
-| 160 | probeKillSeconds | 是 | 否 |
-| 161 | probeInterval | 是 | 否 |
-| 162 | lossyColumns | 是 | 否 |
-| 163 | fPrecision | 是 | 否 |
-| 164 | dPrecision | 是 | 否 |
-| 165 | maxRange | 是 | 否 |
-| 166 | range | 是 | 否 |
+| 48 | numOfVnodeRsmaThreads | 否 | 是 |
+| 49 | numOfQnodeQueryThreads | 否 | 是 |
+| 50 | numOfQnodeFetchThreads | 否 | 是 |
+| 51 | numOfSnodeSharedThreads | 否 | 是 |
+| 52 | numOfSnodeUniqueThreads | 否 | 是 |
+| 53 | rpcQueueMemoryAllowed | 否 | 是 |
+| 54 | logDir | 是 | 是 |
+| 55 | minimalLogDirGB | 是 | 是 |
+| 56 | numOfLogLines | 是 | 是 |
+| 57 | asyncLog | 是 | 是 |
+| 58 | logKeepDays | 是 | 是 |
+| 59 | debugFlag | 是 | 是 |
+| 60 | tmrDebugFlag | 是 | 是 |
+| 61 | uDebugFlag | 是 | 是 |
+| 62 | rpcDebugFlag | 是 | 是 |
+| 63 | jniDebugFlag | 是 | 是 |
+| 64 | qDebugFlag | 是 | 是 |
+| 65 | cDebugFlag | 是 | 是 |
+| 66 | dDebugFlag | 是 | 是 |
+| 67 | vDebugFlag | 是 | 是 |
+| 68 | mDebugFlag | 是 | 是 |
+| 69 | wDebugFlag | 是 | 是 |
+| 70 | sDebugFlag | 是 | 是 |
+| 71 | tsdbDebugFlag | 是 | 是 |
+| 72 | tqDebugFlag | 否 | 是 |
+| 73 | fsDebugFlag | 是 | 是 |
+| 74 | udfDebugFlag | 否 | 是 |
+| 75 | smaDebugFlag | 否 | 是 |
+| 76 | idxDebugFlag | 否 | 是 |
+| 77 | tdbDebugFlag | 否 | 是 |
+| 78 | metaDebugFlag | 否 | 是 |
+| 79 | timezone | 是 | 是 |
+| 80 | locale | 是 | 是 |
+| 81 | charset | 是 | 是 |
+| 82 | udf | 是 | 是 |
+| 83 | enableCoreFile | 是 | 是 |
+| 84 | arbitrator | 是 | 否 |
+| 85 | numOfThreadsPerCore | 是 | 否 |
+| 86 | numOfMnodes | 是 | 否 |
+| 87 | vnodeBak | 是 | 否 |
+| 88 | balance | 是 | 否 |
+| 89 | balanceInterval | 是 | 否 |
+| 90 | offlineThreshold | 是 | 否 |
+| 91 | role | 是 | 否 |
+| 92 | dnodeNopLoop | 是 | 否 |
+| 93 | keepTimeOffset | 是 | 否 |
+| 94 | rpcTimer | 是 | 否 |
+| 95 | rpcMaxTime | 是 | 否 |
+| 96 | rpcForceTcp | 是 | 否 |
+| 97 | tcpConnTimeout | 是 | 否 |
+| 98 | syncCheckInterval | 是 | 否 |
+| 99 | maxTmrCtrl | 是 | 否 |
+| 100 | monitorReplica | 是 | 否 |
+| 101 | smlTagNullName | 是 | 否 |
+| 102 | keepColumnName | 是 | 否 |
+| 103 | ratioOfQueryCores | 是 | 否 |
+| 104 | maxStreamCompDelay | 是 | 否 |
+| 105 | maxFirstStreamCompDelay | 是 | 否 |
+| 106 | retryStreamCompDelay | 是 | 否 |
+| 107 | streamCompDelayRatio | 是 | 否 |
+| 108 | maxVgroupsPerDb | 是 | 否 |
+| 109 | maxTablesPerVnode | 是 | 否 |
+| 110 | minTablesPerVnode | 是 | 否 |
+| 111 | tableIncStepPerVnode | 是 | 否 |
+| 112 | cache | 是 | 否 |
+| 113 | blocks | 是 | 否 |
+| 114 | days | 是 | 否 |
+| 115 | keep | 是 | 否 |
+| 116 | minRows | 是 | 否 |
+| 117 | maxRows | 是 | 否 |
+| 118 | quorum | 是 | 否 |
+| 119 | comp | 是 | 否 |
+| 120 | walLevel | 是 | 否 |
+| 121 | fsync | 是 | 否 |
+| 122 | replica | 是 | 否 |
+| 123 | partitions | 是 | 否 |
+| 124 | quorum | 是 | 否 |
+| 125 | update | 是 | 否 |
+| 126 | cachelast | 是 | 否 |
+| 127 | maxSQLLength | 是 | 否 |
+| 128 | maxWildCardsLength | 是 | 否 |
+| 129 | maxRegexStringLen | 是 | 否 |
+| 130 | maxNumOfOrderedRes | 是 | 否 |
+| 131 | maxConnections | 是 | 否 |
+| 132 | mnodeEqualVnodeNum | 是 | 否 |
+| 133 | http | 是 | 否 |
+| 134 | httpEnableRecordSql | 是 | 否 |
+| 135 | httpMaxThreads | 是 | 否 |
+| 136 | restfulRowLimit | 是 | 否 |
+| 137 | httpDbNameMandatory | 是 | 否 |
+| 138 | httpKeepAlive | 是 | 否 |
+| 139 | enableRecordSql | 是 | 否 |
+| 140 | maxBinaryDisplayWidth | 是 | 否 |
+| 141 | stream | 是 | 否 |
+| 142 | retrieveBlockingModel | 是 | 否 |
+| 143 | tsdbMetaCompactRatio | 是 | 否 |
+| 144 | defaultJSONStrType | 是 | 否 |
+| 145 | walFlushSize | 是 | 否 |
+| 146 | keepTimeOffset | 是 | 否 |
+| 147 | flowctrl | 是 | 否 |
+| 148 | slaveQuery | 是 | 否 |
+| 149 | adjustMaster | 是 | 否 |
+| 150 | topicBinaryLen | 是 | 否 |
+| 151 | telegrafUseFieldNum | 是 | 否 |
+| 152 | deadLockKillQuery | 是 | 否 |
+| 153 | clientMerge | 是 | 否 |
+| 154 | sdbDebugFlag | 是 | 否 |
+| 155 | odbcDebugFlag | 是 | 否 |
+| 156 | httpDebugFlag | 是 | 否 |
+| 157 | monDebugFlag | 是 | 否 |
+| 158 | cqDebugFlag | 是 | 否 |
+| 159 | shortcutFlag | 是 | 否 |
+| 160 | probeSeconds | 是 | 否 |
+| 161 | probeKillSeconds | 是 | 否 |
+| 162 | probeInterval | 是 | 否 |
+| 163 | lossyColumns | 是 | 否 |
+| 164 | fPrecision | 是 | 否 |
+| 165 | dPrecision | 是 | 否 |
+| 166 | maxRange | 是 | 否 |
+| 167 | range | 是 | 否 |
diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md
index ae4280e26a64e2d10534a0faaf70ca0704cf58a6..a33abafaf82746afbf5669c6ea564b5a87060bb8 100644
--- a/docs/zh/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md
@@ -3,7 +3,7 @@ title: Schemaless 写入
description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构'
---
-在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
+在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
@@ -36,14 +36,14 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
- 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)
- 数值类型将通过后缀来区分数据类型:
-| **序号** | **后缀** | **映射类型** | **大小(字节)** |
-| -------- | -------- | ------------ | -------------- |
-| 1 | 无或 f64 | double | 8 |
-| 2 | f32 | float | 4 |
-| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
-| 4 | i16/u16 | SmallInt/USmallInt | 2 |
-| 5 | i32/u32 | Int/UInt | 4 |
-| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
+| **序号** | **后缀** | **映射类型** | **大小(字节)** |
+| -------- | ----------- | ----------------------------- | -------------- |
+| 1 | 无或 f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
@@ -69,7 +69,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
-为了让用户可以指定生成的表名,可以通过配置smlChildTableName来指定(比如 配置smlChildTableName=tname 插入数据为st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为cpu1,注意如果多行数据tname相同,但是后面的tag_set不同,则使用第一次自动建表时指定的tag_set,其他的会忽略)。
+为了让用户可以指定生成的表名,可以通过配置 smlChildTableName 来指定(比如 配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一次自动建表时指定的 tag_set,其他的会忽略)。
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
@@ -78,11 +78,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
NULL。
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
-8. 为了提高写入的效率,默认假设同一个超级表中field_set的顺序是一样的(第一条数据包含所有的field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数smlDataFormat为false,否则,数据写入按照相同顺序写入,库中数据会异常。
+8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
-16KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
+16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
:::
diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md
index f1165c9d0f01b6812c261c6f095f38fca55c44d8..ae0a496f03e8e545525fce49ae2394a10696c09c 100644
--- a/docs/zh/14-reference/14-taosKeeper.md
+++ b/docs/zh/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: TDengine taosKeeper 使用说明
+description: TDengine 3.0 版本监控指标的导出工具
---
## 简介
@@ -22,26 +22,36 @@ taosKeeper 安装方式:
### 配置和运行方式
-
-taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
+taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
-
+
+### 环境变量启动
+
+通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+具体参数列表请参照 `taoskeeper -h` 输入结果。
+
### 配置文件启动
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**下面是配置文件的示例:**
@@ -110,7 +120,7 @@ Query OK, 1 rows in database (0.036162s)
#### 导出监控指标
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
部分结果集:
diff --git a/docs/zh/14-reference/index.md b/docs/zh/14-reference/index.md
index e9c0c4fe236b8eefec1275a447c1dd1188921ee0..9d0a44af577beba67c445dac1cfcac0475e0ce3f 100644
--- a/docs/zh/14-reference/index.md
+++ b/docs/zh/14-reference/index.md
@@ -1,5 +1,6 @@
---
title: 参考手册
+description: TDengine 中的各种组件的详细说明
---
参考手册是对 TDengine 本身、 TDengine 各语言连接器及自带的工具最详细的介绍。
diff --git a/docs/zh/17-operation/01-pkg-install.md b/docs/zh/17-operation/01-pkg-install.md
index 5e4cc931309ea8bf45b1840a7da04e336434bdab..6d93c1697b1e0936b3f6539d3b1fb95db0baa956 100644
--- a/docs/zh/17-operation/01-pkg-install.md
+++ b/docs/zh/17-operation/01-pkg-install.md
@@ -47,43 +47,99 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
-内容 TBD
+TDengine 卸载命令如下:
+
+```
+$ sudo apt-get remove tdengine
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ tdengine
+0 upgraded, 0 newly installed, 1 to remove and 18 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n] y
+(Reading database ... 135625 files and directories currently installed.)
+Removing tdengine (3.0.0.0) ...
+TDengine is removed successfully!
+
+```
+
+taosTools 卸载命令如下:
+
+```
+$ sudo apt remove taostools
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ taostools
+0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n]
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
-卸载命令如下:
+TDengine 卸载命令如下:
```
$ sudo dpkg -r tdengine
(Reading database ... 120119 files and directories currently installed.)
-Removing tdengine (3.0.0.10002) ...
+Removing tdengine (3.0.0.0) ...
TDengine is removed successfully!
```
+taosTools 卸载命令如下:
+
+```
+$ sudo dpkg -r taostools
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
-卸载命令如下:
+卸载 TDengine 命令如下:
```
$ sudo rpm -e tdengine
TDengine is removed successfully!
```
+卸载 taosTools 命令如下:
+
+```
+sudo rpm -e taostools
+taosToole is removed successfully!
+```
+
-卸载命令如下:
+卸载 TDengine 命令如下:
```
$ rmtaos
TDengine is removed successfully!
```
+卸载 taosTools 命令如下:
+
+```
+$ rmtaostools
+Start to uninstall taos tools ...
+
+taos tools is uninstalled successfully!
+```
+
在 C:\TDengine 目录下,通过运行 unins000.exe 卸载程序来卸载 TDengine。
diff --git a/docs/zh/17-operation/02-planning.mdx b/docs/zh/17-operation/02-planning.mdx
index 0d63c4eaf365036cbba1d838ba6ee860a894724d..28e3f54020632e84721c20a9f63ee2a6117e6a03 100644
--- a/docs/zh/17-operation/02-planning.mdx
+++ b/docs/zh/17-operation/02-planning.mdx
@@ -1,6 +1,7 @@
---
sidebar_label: 容量规划
title: 容量规划
+description: 如何规划一个 TDengine 集群所需的物理资源
---
使用 TDengine 来搭建一个物联网大数据平台,计算资源、存储资源需要根据业务场景进行规划。下面分别讨论系统运行所需要的内存、CPU 以及硬盘空间。
diff --git a/docs/zh/17-operation/03-tolerance.md b/docs/zh/17-operation/03-tolerance.md
index 1ce485b042d6900ccc1c1bc3bcb6779e14b776ff..79cf10c39a7028e04e7c1ebbea54738dcdc528af 100644
--- a/docs/zh/17-operation/03-tolerance.md
+++ b/docs/zh/17-operation/03-tolerance.md
@@ -1,5 +1,7 @@
---
title: 容错和灾备
+sidebar_label: 容错和灾备
+description: TDengine 的容错和灾备功能
---
## 容错
diff --git a/docs/zh/17-operation/07-import.md b/docs/zh/17-operation/07-import.md
index 7dee05720d4c3446181e8e0d81a5c27e35300ba8..17945be595f9176a528e52d2344b5cd0545c3426 100644
--- a/docs/zh/17-operation/07-import.md
+++ b/docs/zh/17-operation/07-import.md
@@ -1,5 +1,6 @@
---
title: 数据导入
+description: 如何导入外部数据到 TDengine
---
TDengine 提供多种方便的数据导入功能,一种按脚本文件导入,一种按数据文件导入,一种是 taosdump 工具导入本身导出的文件。
diff --git a/docs/zh/17-operation/08-export.md b/docs/zh/17-operation/08-export.md
index 042ecc7ba29f976d50bbca1e3155bd03b2ae7ccc..44247e28bdf5ec48ccd05ab6f7e4d3558cf23103 100644
--- a/docs/zh/17-operation/08-export.md
+++ b/docs/zh/17-operation/08-export.md
@@ -1,12 +1,13 @@
---
title: 数据导出
+description: 如何导出 TDengine 中的数据
---
为方便数据导出,TDengine 提供了两种导出方式,分别是按表导出和用 taosdump 导出。
## 按表导出 CSV 文件
-如果用户需要导出一个表或一个 STable 中的数据,可在 taos shell 中运行:
+如果用户需要导出一个表或一个 STable 中的数据,可在 TDengine CLI 中运行:
```sql
select * from >> data.csv;
diff --git a/docs/zh/17-operation/10-monitor.md b/docs/zh/17-operation/10-monitor.md
index 9f0f06fde217faec851ccf0f0357241536f78625..e936f35dcac544ad94035b5e5c9716c4aa50562e 100644
--- a/docs/zh/17-operation/10-monitor.md
+++ b/docs/zh/17-operation/10-monitor.md
@@ -1,5 +1,6 @@
---
title: 系统监控
+description: 监控 TDengine 的运行状态
---
TDengine 通过 [taosKeeper](/reference/taosKeeper/) 将服务器的 CPU、内存、硬盘空间、带宽、请求数、磁盘读写速度等信息定时写入指定数据库。TDengine 还将重要的系统操作(比如登录、创建、删除数据库等)日志以及各种错误报警信息进行记录。系统管理员可以从 CLI 直接查看这个数据库,也可以在 WEB 通过图形化界面查看这些监测信息。
diff --git a/docs/zh/17-operation/17-diagnose.md b/docs/zh/17-operation/17-diagnose.md
index e6e9be7153dee855867c4ba4fcd1d3258c9d788f..ec529096a7513bd625131939d67c61279721b961 100644
--- a/docs/zh/17-operation/17-diagnose.md
+++ b/docs/zh/17-operation/17-diagnose.md
@@ -1,5 +1,6 @@
---
title: 诊断及其他
+description: 一些常见问题的诊断技巧
---
## 网络连接诊断
diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx
index becb1a70a908ad27a93a763ac46343b0ec46769d..83f3f8bb25de4b99a345bafab7e8a43c3d35f14e 100644
--- a/docs/zh/20-third-party/01-grafana.mdx
+++ b/docs/zh/20-third-party/01-grafana.mdx
@@ -1,6 +1,7 @@
---
sidebar_label: Grafana
title: Grafana
+description: 使用 Grafana 与 TDengine 的详细说明
---
import Tabs from "@theme/Tabs";
diff --git a/docs/zh/20-third-party/02-prometheus.md b/docs/zh/20-third-party/02-prometheus.md
index 0fe534b8df263064e5269e1732b69893efd7a79a..eb6c3bf1d0b5f6e5d8146566969df41dbad5bf99 100644
--- a/docs/zh/20-third-party/02-prometheus.md
+++ b/docs/zh/20-third-party/02-prometheus.md
@@ -1,6 +1,7 @@
---
sidebar_label: Prometheus
title: Prometheus
+description: 使用 Prometheus 访问 TDengine
---
import Prometheus from "../14-reference/_prometheus.mdx"
diff --git a/docs/zh/20-third-party/03-telegraf.md b/docs/zh/20-third-party/03-telegraf.md
index 88a69211c0592940d7f75d34c03bcc0593cd74d6..84883e665a84db89d564314a0e47f9caab04d6ff 100644
--- a/docs/zh/20-third-party/03-telegraf.md
+++ b/docs/zh/20-third-party/03-telegraf.md
@@ -1,6 +1,7 @@
---
sidebar_label: Telegraf
title: Telegraf 写入
+description: 使用 Telegraf 向 TDengine 写入数据
---
import Telegraf from "../14-reference/_telegraf.mdx"
diff --git a/docs/zh/20-third-party/05-collectd.md b/docs/zh/20-third-party/05-collectd.md
index 04892fd42e92e962fcccadf626f67c432e78d286..cc2235f2600ec44425a2f22f39dc3c58a4ccdd5a 100644
--- a/docs/zh/20-third-party/05-collectd.md
+++ b/docs/zh/20-third-party/05-collectd.md
@@ -1,6 +1,7 @@
---
sidebar_label: collectd
title: collectd 写入
+description: 使用 collected 向 TDengine 写入数据
---
import CollectD from "../14-reference/_collectd.mdx"
diff --git a/docs/zh/20-third-party/06-statsd.md b/docs/zh/20-third-party/06-statsd.md
index 260d01183598826e1c887164d0b1b146c5e80c95..122c9fd94c57ef4979d432e2a45cc5136b1644b2 100644
--- a/docs/zh/20-third-party/06-statsd.md
+++ b/docs/zh/20-third-party/06-statsd.md
@@ -1,6 +1,7 @@
---
sidebar_label: StatsD
title: StatsD 直接写入
+description: 使用 StatsD 向 TDengine 写入
---
import StatsD from "../14-reference/_statsd.mdx"
diff --git a/docs/zh/20-third-party/07-icinga2.md b/docs/zh/20-third-party/07-icinga2.md
index ed1f1404a730eca5f51e2ff9bbcd54949018f8ea..06ead57655cfad7bcf88945780dbed52e9c58e16 100644
--- a/docs/zh/20-third-party/07-icinga2.md
+++ b/docs/zh/20-third-party/07-icinga2.md
@@ -1,6 +1,7 @@
---
sidebar_label: icinga2
title: icinga2 写入
+description: 使用 icinga2 写入 TDengine
---
import Icinga2 from "../14-reference/_icinga2.mdx"
diff --git a/docs/zh/20-third-party/08-tcollector.md b/docs/zh/20-third-party/08-tcollector.md
index a1245e8c27f302d56f88fa382b5f38f9bd49a0aa..78d0b4a5dfda0c1a18908f5a0f5f9314e82e3737 100644
--- a/docs/zh/20-third-party/08-tcollector.md
+++ b/docs/zh/20-third-party/08-tcollector.md
@@ -1,6 +1,7 @@
---
sidebar_label: TCollector
title: TCollector 写入
+description: 使用 TCollector 写入 TDengine
---
import TCollector from "../14-reference/_tcollector.mdx"
diff --git a/docs/zh/20-third-party/09-emq-broker.md b/docs/zh/20-third-party/09-emq-broker.md
index dd98374558080a0ea11cbc22ede58b66a3984191..782a139e223456d0f3484d282d641075be1a3f81 100644
--- a/docs/zh/20-third-party/09-emq-broker.md
+++ b/docs/zh/20-third-party/09-emq-broker.md
@@ -1,6 +1,7 @@
---
sidebar_label: EMQX Broker
title: EMQX Broker 写入
+description: 使用 EMQX Broker 写入 TDengine
---
MQTT 是流行的物联网数据传输协议,[EMQX](https://github.com/emqx/emqx)是一开源的 MQTT Broker 软件,无需任何代码,只需要在 EMQX Dashboard 里使用“规则”做简单配置,即可将 MQTT 的数据直接写入 TDengine。EMQX 支持通过 发送到 Web 服务的方式保存数据到 TDengine,也在企业版上提供原生的 TDengine 驱动实现直接保存。
@@ -90,7 +91,7 @@ http://127.0.0.1:6041/rest/sql
```
Basic cm9vdDp0YW9zZGF0YQ==
```
-相关文档请参考[ TDengine REST API 文档](/reference/rest-api/)。
+相关文档请参考[ TDengine REST API 文档](../../connector/rest-api/)。
在消息体中输入规则引擎替换模板:
diff --git a/docs/zh/20-third-party/10-hive-mq-broker.md b/docs/zh/20-third-party/10-hive-mq-broker.md
index f75ed793d6272ae27f92676e2096ef455f638aa6..a388ff6daff41aa6f74af646f6121a360da56f36 100644
--- a/docs/zh/20-third-party/10-hive-mq-broker.md
+++ b/docs/zh/20-third-party/10-hive-mq-broker.md
@@ -1,6 +1,7 @@
---
sidebar_label: HiveMQ Broker
title: HiveMQ Broker 写入
+description: 使用 HivMQ Broker 写入 TDengine
---
[HiveMQ](https://www.hivemq.com/) 是一个提供免费个人版和企业版的 MQTT 代理,主要用于企业和新兴的机器到机器 M2M 通讯和内部传输,满足可伸缩性、易管理和安全特性。HiveMQ 提供了开源的插件开发包。可以通过 HiveMQ extension - TDengine 保存数据到 TDengine。详细使用方法请参考 [HiveMQ extension - TDengine 说明文档](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README.md)。
diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md
index 8369806adcfe1b195348e7d60160609cde9150e8..1172f4fbc5bcd9f240bd5e2a47108a8791810e76 100644
--- a/docs/zh/20-third-party/11-kafka.md
+++ b/docs/zh/20-third-party/11-kafka.md
@@ -1,6 +1,7 @@
---
sidebar_label: Kafka
-title: TDengine Kafka Connector 使用教程
+title: TDengine Kafka Connector
+description: 使用 TDengine Kafka Connector 的详细指南
---
TDengine Kafka Connector 包含两个插件: TDengine Source Connector 和 TDengine Sink Connector。用户只需提供简单的配置文件,就可以将 Kafka 中指定 topic 的数据(批量或实时)同步到 TDengine, 或将 TDengine 中指定数据库的数据(批量或实时)同步到 Kafka。
@@ -184,7 +185,7 @@ echo `cat /tmp/confluent.current`/connect/connect.stdout
TDengine Sink Connector 的作用是同步指定 topic 的数据到 TDengine。用户无需提前创建数据库和超级表。可手动指定目标数据库的名字(见配置参数 connection.database), 也可按一定规则生成(见配置参数 connection.database.prefix)。
-TDengine Sink Connector 内部使用 TDengine [无模式写入接口](/reference/connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。
+TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../connector/cpp#无模式写入-api)写数据到 TDengine,目前支持三种格式的数据:[InfluxDB 行协议格式](/develop/insert-data/influxdb-line)、 [OpenTSDB Telnet 协议格式](/develop/insert-data/opentsdb-telnet) 和 [OpenTSDB JSON 协议格式](/develop/insert-data/opentsdb-json)。
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
diff --git a/docs/zh/20-third-party/12-google-data-studio.md b/docs/zh/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000000000000000000000000000000000..bc06f0ea3261bcd93247e0c7b8e1d6c3628f3121
--- /dev/null
+++ b/docs/zh/20-third-party/12-google-data-studio.md
@@ -0,0 +1,39 @@
+---
+sidebar_label: Google Data Studio
+title: TDengine Google Data Studio Connector
+description: 使用 Google Data Studio 存取 TDengine 数据的详细指南
+---
+
+Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。
+
+Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage,或是通过连接器来接入其它数据源。
+
+
+
+目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine,将其选作数据源。
+
+
+
+接下来选择 AUTHORIZE 按钮。
+
+
+
+设置允许连接自己的账号到外部服务。
+
+
+
+在接下来的页面选择运行 TDengine REST 服务的 URL,并输入用户名、密码、数据库名称、表名称以及查询时间范围,并点击右上角的 CONNECT 按钮。
+
+
+
+连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。
+
+
+
+目前的维度和指标规则是:timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。
+
+
+
+
+
+
diff --git a/docs/zh/20-third-party/gds/gds-01.webp b/docs/zh/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000000000000000000000000000000000..2e5f9e4ff5db1e37718e2397c9a13a9f0e05602d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-01.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-02.png.webp b/docs/zh/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3b3537f5a488019482f94452e70bd1bd79867ab5
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-03.png.webp b/docs/zh/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5719436d5b2f21aa861067b966511e4b34d17dce
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-04.png.webp b/docs/zh/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ddaae5c1a63b6b4db692e12491df55b88dcaadee
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-05.png.webp b/docs/zh/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9a917678fc7e60f0a739fa1e2b0f4fa010d12708
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-06.png.webp b/docs/zh/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..c76b68d32b5907bd5ba4e4010456f2ca5303448f
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-07.png.webp b/docs/zh/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1386ae9c4db4f2465dd071afc5a047658b47031c
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-08.png.webp b/docs/zh/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..59dcf8b31df8bde8d4073ee0c7b1c7bdd7bd439d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-09.png.webp b/docs/zh/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b94439f211a814f66d41231c9386c57f3ffe8322
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-10.png.webp b/docs/zh/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a63cad9e9a3d412b1132359506530498fb1a0e57
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-11.png.webp b/docs/zh/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fc38cd9a29c00afa48238741c33b439f737a7b8f
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md
index a910c584d6ba47844d51e45e5010581075a72fb6..704524fd210152af34e15d248d3d4dbe050e4fef 100644
--- a/docs/zh/21-tdinternal/01-arch.md
+++ b/docs/zh/21-tdinternal/01-arch.md
@@ -1,6 +1,7 @@
---
sidebar_label: 整体架构
title: 整体架构
+description: TDengine 架构设计,包括:集群、存储、缓存与持久化、数据备份、多级存储等
---
## 集群与基本逻辑单元
@@ -287,7 +288,7 @@ TDengine 对每个数据采集点单独建表,但在实际应用中经常需
7. vnode 返回本节点的查询计算结果;
8. qnode 完成多节点数据聚合后将最终查询结果返回给客户端;
-由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。
+由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TDengine SQL。
### 预计算
diff --git a/docs/zh/21-tdinternal/03-high-availability.md b/docs/zh/21-tdinternal/03-high-availability.md
index ba056b6f162df90fcb271fe536a2b24d0745f75a..4cdf04f6d14d73a819f90bc2317a713c90fa9b91 100644
--- a/docs/zh/21-tdinternal/03-high-availability.md
+++ b/docs/zh/21-tdinternal/03-high-availability.md
@@ -1,5 +1,6 @@
---
title: 高可用
+description: TDengine 的高可用设计
---
## Vnode 的高可用性
diff --git a/docs/zh/21-tdinternal/05-load-balance.md b/docs/zh/21-tdinternal/05-load-balance.md
index 2376dd3e612a00006eaf2fc7b1782da3901908bc..07af2328d52573343fb28c045b25785f6822191f 100644
--- a/docs/zh/21-tdinternal/05-load-balance.md
+++ b/docs/zh/21-tdinternal/05-load-balance.md
@@ -1,5 +1,6 @@
---
title: 负载均衡
+description: TDengine 的负载均衡设计
---
TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TDengine 采用 Hash 一致性算法将一个数据库中的所有表和子表的数据均衡分散在属于该数据库的所有 vgroup 中,每张表或子表只能由一个 vgroup 处理,一个 vgroup 可能负责处理多个表或子表。
@@ -7,7 +8,7 @@ TDengine 中的负载均衡主要指对时序数据的处理的负载均衡。TD
创建数据库时可以指定其中的 vgroup 的数量:
```sql
-create database db0 vgroups 100;
+create database db0 vgroups 20;
```
如何指定合适的 vgroup 的数量,这取决于系统资源。假定系统中只计划建立一个数据库,则 vgroup 数量由集群中所有 dnode 所能使用的资源决定。原则上可用的 CPU 和 Memory 越多,可建立的 vgroup 也越多。但也要考虑到磁盘性能,过多的 vgroup 在磁盘性能达到上限后反而会拖累整个系统的性能。假如系统中会建立多个数据库,则多个数据库的 vgroup 之和取决于系统中可用资源的数量。要综合考虑多个数据库之间表的数量、写入频率、数据量等多个因素在多个数据库之间分配 vgroup。实际中建议首先根据系统资源配置选择一个初始的 vgroup 数量,比如 CPU 总核数的 2 倍,以此为起点通过测试找到最佳的 vgroup 数量配置,此为系统中的 vgroup 总数。如果有多个数据库的话,再根据各个数据库的表数和数据量对 vgroup 进行分配。
diff --git a/docs/zh/21-tdinternal/index.md b/docs/zh/21-tdinternal/index.md
index 63a746623e0dd955f61ba887a76f8ecf7eb16972..21f106edc999972f9e1cc4b04bc8308878cee56a 100644
--- a/docs/zh/21-tdinternal/index.md
+++ b/docs/zh/21-tdinternal/index.md
@@ -1,5 +1,6 @@
---
title: 技术内幕
+description: TDengine 的内部设计
---
```mdx-code-block
diff --git a/docs/zh/25-application/01-telegraf.md b/docs/zh/25-application/01-telegraf.md
index a949fa97210d88b397c50da64957f719c3d7befa..4e9597f96454730ebcdee5adeebf55439923e8e7 100644
--- a/docs/zh/25-application/01-telegraf.md
+++ b/docs/zh/25-application/01-telegraf.md
@@ -1,6 +1,7 @@
---
sidebar_label: TDengine + Telegraf + Grafana
-title: 使用 TDengine + Telegraf + Grafana 快速搭建 IT 运维展示系统
+title: TDengine + Telegraf + Grafana
+description: 使用 TDengine + Telegraf + Grafana 快速搭建 IT 运维展示系统
---
## 背景介绍
diff --git a/docs/zh/25-application/02-collectd.md b/docs/zh/25-application/02-collectd.md
index 6bdebd50308293eabbf64dfbf999adfab4a0410a..c6230f48abb545e3064f406d9005a4a3ba8ea5ba 100644
--- a/docs/zh/25-application/02-collectd.md
+++ b/docs/zh/25-application/02-collectd.md
@@ -1,6 +1,7 @@
---
sidebar_label: TDengine + collectd/StatsD + Grafana
-title: 使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统
+title: TDengine + collectd/StatsD + Grafana
+description: 使用 TDengine + collectd/StatsD + Grafana 快速搭建 IT 运维监控系统
---
## 背景介绍
diff --git a/docs/zh/25-application/index.md b/docs/zh/25-application/index.md
index 1305cf230f78b68f988918921540a1df05f0931f..76aa1799278c0301359e0761f54267293b2152d4 100644
--- a/docs/zh/25-application/index.md
+++ b/docs/zh/25-application/index.md
@@ -1,5 +1,6 @@
---
title: 应用实践
+description: TDengine 配合其它开源组件的一些应用示例
---
```mdx-code-block
diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md
index 04ee011b9368eb5fd60cc25fd675a5b276a8ab2b..0a46db4a28862e07dd86e427e320e8b2d1276034 100644
--- a/docs/zh/27-train-faq/01-faq.md
+++ b/docs/zh/27-train-faq/01-faq.md
@@ -1,5 +1,6 @@
---
title: 常见问题及反馈
+description: 一些常见问题的解决方法汇总
---
## 问题反馈
@@ -115,7 +116,7 @@ charset UTF-8
### 9. 表名显示不全
-由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
+由于 TDengine CLI 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
### 10. 如何进行数据迁移?
diff --git a/docs/zh/27-train-faq/index.md b/docs/zh/27-train-faq/index.md
index b42bff0288fc8ab59810a7d7121be28ddf781551..e7159d98c8669245de834a9281a72cd2529ab9a9 100644
--- a/docs/zh/27-train-faq/index.md
+++ b/docs/zh/27-train-faq/index.md
@@ -1,5 +1,6 @@
---
title: FAQ 及其他
+description: 用户经常遇到的问题
---
```mdx-code-block
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index a64798caa09004eff647f4378f9f70624fac2bca..a6ec560d3c5951ea1500893640be5905a31e8d61 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -1,15 +1,12 @@
---
sidebar_label: TDengine 发布历史
title: TDengine 发布历史
+description: TDengine 发布历史、Release Notes 及下载链接
---
import Release from "/components/ReleaseV3";
-## 3.0.0.1
+## 3.0.1.0
-
-
-## 3.0.0.0
-
-
+
diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md
index 751333304014c8a9af89fe9481858c8327688505..9e8757cc4e09f661cfd4c552ff27537411e2d420 100644
--- a/docs/zh/28-releases/02-tools.md
+++ b/docs/zh/28-releases/02-tools.md
@@ -1,10 +1,11 @@
---
sidebar_label: taosTools 发布历史
title: taosTools 发布历史
+description: taosTools 的发布历史、Release Notes 和下载链接
---
import Release from "/components/ReleaseV3";
-## 2.1.2
+## 2.1.3
-
\ No newline at end of file
+
diff --git a/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/examples/JDBC/JDBCDemo/README-jdbc-windows.md
index 17c5c8df00ab8727d1adfe493d3fbbd32891a676..5a781f40f730218286edb9f6a7f184ee79e7a5fc 100644
--- a/examples/JDBC/JDBCDemo/README-jdbc-windows.md
+++ b/examples/JDBC/JDBCDemo/README-jdbc-windows.md
@@ -129,7 +129,7 @@ https://www.taosdata.com/cn/all-downloads/
192.168.236.136 td01
```
-配置完成后,在命令行内使用taos shell连接server端
+配置完成后,在命令行内使用TDengine CLI连接server端
```shell
C:\TDengine>taos -h td01
diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt
index 9d06dbac6dc3ba9d4dcafe6d8316b52e1b3daeca..4a9007acecaa679dc716c5665eea7f0cd1e34dbb 100644
--- a/examples/c/CMakeLists.txt
+++ b/examples/c/CMakeLists.txt
@@ -13,15 +13,9 @@ IF (TD_LINUX)
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
add_executable(tmq "")
- add_executable(tmq_taosx "")
add_executable(stream_demo "")
add_executable(demoapi "")
- target_sources(tmq_taosx
- PRIVATE
- "tmq_taosx.c"
- )
-
target_sources(tmq
PRIVATE
"tmq.c"
@@ -41,10 +35,6 @@ IF (TD_LINUX)
taos_static
)
- target_link_libraries(tmq_taosx
- taos_static
- )
-
target_link_libraries(stream_demo
taos_static
)
@@ -57,10 +47,6 @@ IF (TD_LINUX)
PUBLIC "${TD_SOURCE_DIR}/include/os"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
- target_include_directories(tmq_taosx
- PUBLIC "${TD_SOURCE_DIR}/include/os"
- PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
- )
target_include_directories(stream_demo
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
@@ -73,7 +59,6 @@ IF (TD_LINUX)
)
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
- SET_TARGET_PROPERTIES(tmq_taosx PROPERTIES OUTPUT_NAME tmq_taosx)
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
ENDIF ()
diff --git a/examples/c/stream_demo.c b/examples/c/stream_demo.c
index 2fcf4dd62c1e0a2f5aabda4ce5eb9fae6aa72be8..1c9d11b755f77bf259e45d77c6e5983c3747835a 100644
--- a/examples/c/stream_demo.c
+++ b/examples/c/stream_demo.c
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+// clang-format off
#include
#include
#include
@@ -94,13 +95,8 @@ int32_t create_stream() {
}
taos_free_result(pRes);
- /*const char* sql = "select min(k), max(k), sum(k) from tu1";*/
- /*const char* sql = "select min(k), max(k), sum(k) as sum_of_k from st1";*/
- /*const char* sql = "select sum(k) from tu1 interval(10m)";*/
- /*pRes = tmq_create_stream(pConn, "stream1", "out1", sql);*/
pRes = taos_query(pConn,
- "create stream stream1 trigger max_delay 10s watermark 10s into outstb as select _wstart start, "
- "count(k) from st1 partition by tbname interval(20s) ");
+ "create stream stream1 trigger at_once watermark 10s into outstb as select _wstart start, avg(k) from st1 partition by tbname interval(10s)");
if (taos_errno(pRes) != 0) {
printf("failed to create stream stream1, reason:%s\n", taos_errstr(pRes));
return -1;
diff --git a/examples/c/tmq_taosx.c b/examples/c/tmq_taosx.c
deleted file mode 100644
index d0def4426905b773db948b0cf6f0d22c8733d5da..0000000000000000000000000000000000000000
--- a/examples/c/tmq_taosx.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-#include "taos.h"
-
-static int running = 1;
-
-static TAOS* use_db(){
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return NULL;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "use db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes));
- return NULL;
- }
- taos_free_result(pRes);
- return pConn;
-}
-
-static void msg_process(TAOS_RES* msg) {
- /*memset(buf, 0, 1024);*/
- printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
- printf("db: %s\n", tmq_get_db_name(msg));
- printf("vg: %d\n", tmq_get_vgroup_id(msg));
- TAOS *pConn = use_db();
- if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
- char* result = tmq_get_json_meta(msg);
- if (result) {
- printf("meta result: %s\n", result);
- }
- tmq_free_json_meta(result);
- }
-
- tmq_raw_data raw = {0};
- tmq_get_raw(msg, &raw);
- int32_t ret = tmq_write_raw(pConn, raw);
- printf("write raw data: %s\n", tmq_err2str(ret));
-
-// else{
-// while(1){
-// int numOfRows = 0;
-// void *pData = NULL;
-// taos_fetch_raw_block(msg, &numOfRows, &pData);
-// if(numOfRows == 0) break;
-// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
-// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
-// printf("write raw data: %s\n", tmq_err2str(ret));
-// }
-// }
-
- taos_close(pConn);
-}
-
-int32_t init_env() {
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 4");
- if (taos_errno(pRes) != 0) {
- printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop database if exists abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists abc1 vgroups 3");
- if (taos_errno(pRes) != 0) {
- printf("error in create db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 select * from ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
- if (taos_errno(pRes) != 0) {
- printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table ct3 ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table st1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
- if (taos_errno(pRes) != 0) {
- printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 comment 'hello'");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 drop column c1");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table n1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt2 using jt tags('')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table st1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-int32_t create_topic() {
- printf("create topic\n");
- TAOS_RES* pRes;
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
- if (taos_errno(pRes) != 0) {
- printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
- printf("commit %d tmq %p param %p\n", code, tmq, param);
-}
-
-tmq_t* build_consumer() {
-#if 0
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- assert(pConn != NULL);
-
- TAOS_RES* pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- }
- taos_free_result(pRes);
-#endif
-
- tmq_conf_t* conf = tmq_conf_new();
- tmq_conf_set(conf, "group.id", "tg2");
- tmq_conf_set(conf, "client.id", "my app 1");
- tmq_conf_set(conf, "td.connect.user", "root");
- tmq_conf_set(conf, "td.connect.pass", "taosdata");
- tmq_conf_set(conf, "msg.with.table.name", "true");
- tmq_conf_set(conf, "enable.auto.commit", "true");
-
- /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
-
- tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
- tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
- assert(tmq);
- tmq_conf_destroy(conf);
- return tmq;
-}
-
-tmq_list_t* build_topic_list() {
- tmq_list_t* topic_list = tmq_list_new();
- tmq_list_append(topic_list, "topic_ctb_column");
- /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
- return topic_list;
-}
-
-void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- printf("subscribe err\n");
- return;
- }
- int32_t cnt = 0;
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, -1);
- if (tmqmessage) {
- cnt++;
- msg_process(tmqmessage);
- /*if (cnt >= 2) break;*/
- /*printf("get data\n");*/
- taos_free_result(tmqmessage);
- /*} else {*/
- /*break;*/
- /*tmq_commit_sync(tmq, NULL);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- static const int MIN_COMMIT_COUNT = 1;
-
- int msg_count = 0;
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- return;
- }
-
- tmq_list_t* subList = NULL;
- tmq_subscription(tmq, &subList);
- char** subTopics = tmq_list_to_c_array(subList);
- int32_t sz = tmq_list_get_size(subList);
- printf("subscribed topics: ");
- for (int32_t i = 0; i < sz; i++) {
- printf("%s, ", subTopics[i]);
- }
- printf("\n");
- tmq_list_destroy(subList);
-
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- msg_process(tmqmessage);
- taos_free_result(tmqmessage);
-
- /*tmq_commit_sync(tmq, NULL);*/
- /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-int main(int argc, char* argv[]) {
- printf("env init\n");
- if (init_env() < 0) {
- return -1;
- }
- create_topic();
-
- tmq_t* tmq = build_consumer();
- tmq_list_t* topic_list = build_topic_list();
- basic_consume_loop(tmq, topic_list);
- /*sync_consume_loop(tmq, topic_list);*/
-}
diff --git a/examples/nodejs/README-win.md b/examples/nodejs/README-win.md
index 75fec69413af2bb49498118ec7235c9947e2f89e..e496be2f87e3ff0fcc01359f23888734669b0c22 100644
--- a/examples/nodejs/README-win.md
+++ b/examples/nodejs/README-win.md
@@ -35,7 +35,7 @@ Python 2.7.18
下载地址:https://www.taosdata.com/cn/all-downloads/,选择一个合适的windows-client下载(client应该尽量与server端的版本保持一致)
-使用client的taos shell连接server
+使用client的TDengine CLI连接server
```shell
>taos -h node5
diff --git a/include/client/taos.h b/include/client/taos.h
index f260b84f4aaf238badb1de3a6446b639b5681fa9..49cfbb52b80e88103fe6befc6d2818641e731fcf 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -254,6 +254,7 @@ enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
+ TMQ_RES_TAOSX = 3,
};
typedef struct tmq_raw_data {
diff --git a/include/common/systable.h b/include/common/systable.h
index ed2e6a46c35006f8f9ffc189a98f3df5e2ac9ade..882c54de952dc044ed30aa6a1aed66145c0db804 100644
--- a/include/common/systable.h
+++ b/include/common/systable.h
@@ -22,54 +22,58 @@ extern "C" {
#ifndef TDENGINE_SYSTABLE_H
#define TDENGINE_SYSTABLE_H
-#define TSDB_INFORMATION_SCHEMA_DB "information_schema"
-#define TSDB_INS_TABLE_DNODES "ins_dnodes"
-#define TSDB_INS_TABLE_MNODES "ins_mnodes"
-#define TSDB_INS_TABLE_MODULES "ins_modules"
-#define TSDB_INS_TABLE_QNODES "ins_qnodes"
-#define TSDB_INS_TABLE_BNODES "ins_bnodes"
-#define TSDB_INS_TABLE_SNODES "ins_snodes"
-#define TSDB_INS_TABLE_CLUSTER "ins_cluster"
-#define TSDB_INS_TABLE_DATABASES "ins_databases"
-#define TSDB_INS_TABLE_FUNCTIONS "ins_functions"
-#define TSDB_INS_TABLE_INDEXES "ins_indexes"
-#define TSDB_INS_TABLE_STABLES "ins_stables"
-#define TSDB_INS_TABLE_TABLES "ins_tables"
-#define TSDB_INS_TABLE_TAGS "ins_tags"
-#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed"
-#define TSDB_INS_TABLE_USERS "ins_users"
-#define TSDB_INS_TABLE_LICENCES "ins_grants"
-#define TSDB_INS_TABLE_VGROUPS "ins_vgroups"
-#define TSDB_INS_TABLE_VNODES "ins_vnodes"
-#define TSDB_INS_TABLE_CONFIGS "ins_configs"
-#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables"
+#define TSDB_INFORMATION_SCHEMA_DB "information_schema"
+#define TSDB_INS_TABLE_DNODES "ins_dnodes"
+#define TSDB_INS_TABLE_MNODES "ins_mnodes"
+#define TSDB_INS_TABLE_MODULES "ins_modules"
+#define TSDB_INS_TABLE_QNODES "ins_qnodes"
+#define TSDB_INS_TABLE_BNODES "ins_bnodes"
+#define TSDB_INS_TABLE_SNODES "ins_snodes"
+#define TSDB_INS_TABLE_CLUSTER "ins_cluster"
+#define TSDB_INS_TABLE_DATABASES "ins_databases"
+#define TSDB_INS_TABLE_FUNCTIONS "ins_functions"
+#define TSDB_INS_TABLE_INDEXES "ins_indexes"
+#define TSDB_INS_TABLE_STABLES "ins_stables"
+#define TSDB_INS_TABLE_TABLES "ins_tables"
+#define TSDB_INS_TABLE_TAGS "ins_tags"
+#define TSDB_INS_TABLE_TABLE_DISTRIBUTED "ins_table_distributed"
+#define TSDB_INS_TABLE_USERS "ins_users"
+#define TSDB_INS_TABLE_LICENCES "ins_grants"
+#define TSDB_INS_TABLE_VGROUPS "ins_vgroups"
+#define TSDB_INS_TABLE_VNODES "ins_vnodes"
+#define TSDB_INS_TABLE_CONFIGS "ins_configs"
+#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables"
+#define TSDB_INS_TABLE_SUBSCRIPTIONS "ins_subscriptions"
+#define TSDB_INS_TABLE_TOPICS "ins_topics"
+#define TSDB_INS_TABLE_STREAMS "ins_streams"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
#define TSDB_PERFS_TABLE_CONNECTIONS "perf_connections"
#define TSDB_PERFS_TABLE_QUERIES "perf_queries"
-#define TSDB_PERFS_TABLE_TOPICS "perf_topics"
#define TSDB_PERFS_TABLE_CONSUMERS "perf_consumers"
-#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "perf_subscriptions"
#define TSDB_PERFS_TABLE_OFFSETS "perf_offsets"
#define TSDB_PERFS_TABLE_TRANS "perf_trans"
-#define TSDB_PERFS_TABLE_STREAMS "perf_streams"
#define TSDB_PERFS_TABLE_APPS "perf_apps"
typedef struct SSysDbTableSchema {
const char* name;
const int32_t type;
const int32_t bytes;
+ const bool sysInfo;
} SSysDbTableSchema;
typedef struct SSysTableMeta {
const char* name;
const SSysDbTableSchema* schema;
const int32_t colNum;
+ const bool sysInfo;
} SSysTableMeta;
void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size);
void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size);
+void getVisibleInfosTablesNum(bool sysInfo, size_t* size);
+bool invisibleColumn(bool sysInfo, int8_t tableType, int8_t flags);
#ifdef __cplusplus
}
diff --git a/include/common/taosdef.h b/include/common/taosdef.h
index 9bfee56e2974832593578c9c2b1c984373763088..bf4de9d4ded1d0955bef05b1e3000be0bf34d8aa 100644
--- a/include/common/taosdef.h
+++ b/include/common/taosdef.h
@@ -65,13 +65,6 @@ typedef enum {
TSDB_STATIS_NONE = 1, // statis part not exist
} ETsdbStatisStatus;
-typedef enum {
- TSDB_SMA_STAT_UNKNOWN = -1, // unknown
- TSDB_SMA_STAT_OK = 0, // ready to provide service
- TSDB_SMA_STAT_EXPIRED = 1, // not ready or expired
- TSDB_SMA_STAT_DROPPED = 2, // sma dropped
-} ETsdbSmaStat; // bit operation
-
typedef enum {
TSDB_SMA_TYPE_BLOCK = 0, // Block-wise SMA
TSDB_SMA_TYPE_TIME_RANGE = 1, // Time-range-wise SMA
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index dbe020f7ecaf869f2e3fdb99fb86e33c5f873ecb..37db574d98c731699e68b49c4d68108ddf1a670b 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -44,11 +44,36 @@ enum {
)
// clang-format on
+typedef struct {
+ TSKEY ts;
+ uint64_t groupId;
+} SWinKey;
+
+static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
+ SWinKey* pWin1 = (SWinKey*)pKey1;
+ SWinKey* pWin2 = (SWinKey*)pKey2;
+
+ if (pWin1->groupId > pWin2->groupId) {
+ return 1;
+ } else if (pWin1->groupId < pWin2->groupId) {
+ return -1;
+ }
+
+ if (pWin1->ts > pWin2->ts) {
+ return 1;
+ } else if (pWin1->ts < pWin2->ts) {
+ return -1;
+ }
+
+ return 0;
+}
+
enum {
TMQ_MSG_TYPE__DUMMY = 0,
TMQ_MSG_TYPE__POLL_RSP,
TMQ_MSG_TYPE__POLL_META_RSP,
TMQ_MSG_TYPE__EP_RSP,
+ TMQ_MSG_TYPE__TAOSX_RSP,
TMQ_MSG_TYPE__END_RSP,
};
@@ -105,7 +130,6 @@ typedef struct SDataBlockInfo {
uint32_t capacity;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization
- int64_t ts; // used for stream, and need serialization
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
@@ -181,7 +205,7 @@ typedef struct SColumn {
int16_t slotId;
char name[TSDB_COL_NAME_LEN];
- int8_t flag; // column type: normal column, tag, or user-input column (integer/float/string)
+ int16_t colType; // column type: normal column, tag, or window column
int16_t type;
int32_t bytes;
uint8_t precision;
diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h
index 410fa02ded3c16bd0e1fd2c669b5c8c46a7e1801..73d043b2d0ac680d69b517d042b02dfa71167435 100644
--- a/include/common/tdatablock.h
+++ b/include/common/tdatablock.h
@@ -184,7 +184,8 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u
int32_t getJsonValueLen(const char* data);
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
-int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows);
+int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
+ uint32_t numOfRows);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
@@ -225,15 +226,16 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
-int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
-int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
+int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
+int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
SSDataBlock* createDataBlock();
void* blockDataDestroy(SSDataBlock* pBlock);
void blockDataFreeRes(SSDataBlock* pBlock);
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
+SSDataBlock* createSpecialDataBlock(EStreamType type);
-int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
+int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId);
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index);
@@ -249,7 +251,6 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid);
-
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h
index af7c88acded2e151ff730ccb1ade5fdf15f9862a..df16f4f0ab9ad1a79c11ede9e54fdc086e9204df 100644
--- a/include/common/tdataformat.h
+++ b/include/common/tdataformat.h
@@ -96,6 +96,7 @@ char *tTagValToData(const STagVal *pTagVal, bool isJson);
int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
+void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid);
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index f872d1dbc2c1b61a271585f39b16318b99c89c2d..2de4ffdc17347f2e3afb2793a95e32b4cea966e6 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -66,6 +66,7 @@ extern int32_t tsNumOfVnodeStreamThreads;
extern int32_t tsNumOfVnodeFetchThreads;
extern int32_t tsNumOfVnodeWriteThreads;
extern int32_t tsNumOfVnodeSyncThreads;
+extern int32_t tsNumOfVnodeRsmaThreads;
extern int32_t tsNumOfQnodeQueryThreads;
extern int32_t tsNumOfQnodeFetchThreads;
extern int32_t tsNumOfSnodeSharedThreads;
@@ -88,11 +89,12 @@ extern uint16_t tsTelemPort;
// query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
-extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
+extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
// query client
extern int32_t tsQueryPolicy;
extern int32_t tsQuerySmaOptimize;
+extern bool tsQueryPlannerTrace;
// client
extern int32_t tsMinSlidingTime;
@@ -143,10 +145,10 @@ void taosCfgDynamicOptions(const char *option, const char *value);
struct SConfig *taosGetCfg();
-void taosSetAllDebugFlag(int32_t flag);
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
+void taosSetAllDebugFlag(int32_t flag, bool rewrite);
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosSetCfg(SConfig *pCfg, char *name);
-void taosLocalCfgForbiddenToChange(char* name, bool* forbidden);
+void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
#ifdef __cplusplus
}
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 8f199c72f7284e5a1a5192fad4f0fdd7a292bab2..d3db2f318cb12822ba3fa3f379bd210160a3ca7a 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -268,14 +268,41 @@ STSRow* tGetSubmitBlkNext(SSubmitBlkIter* pIter);
// for debug
int32_t tPrintFixedSchemaSubmitReq(SSubmitReq* pReq, STSchema* pSchema);
+struct SSchema {
+ int8_t type;
+ int8_t flags;
+ col_id_t colId;
+ int32_t bytes;
+ char name[TSDB_COL_NAME_LEN];
+};
+
typedef struct {
- int32_t code;
- int8_t hashMeta;
- int64_t uid;
- char* tblFName;
- int32_t numOfRows;
- int32_t affectedRows;
- int64_t sver;
+ char tbName[TSDB_TABLE_NAME_LEN];
+ char stbName[TSDB_TABLE_NAME_LEN];
+ char dbFName[TSDB_DB_FNAME_LEN];
+ int64_t dbId;
+ int32_t numOfTags;
+ int32_t numOfColumns;
+ int8_t precision;
+ int8_t tableType;
+ int32_t sversion;
+ int32_t tversion;
+ uint64_t suid;
+ uint64_t tuid;
+ int32_t vgId;
+ int8_t sysInfo;
+ SSchema* pSchemas;
+} STableMetaRsp;
+
+typedef struct {
+ int32_t code;
+ int8_t hashMeta;
+ int64_t uid;
+ char* tblFName;
+ int32_t numOfRows;
+ int32_t affectedRows;
+ int64_t sver;
+ STableMetaRsp* pMeta;
} SSubmitBlkRsp;
typedef struct {
@@ -290,19 +317,14 @@ typedef struct {
int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
+void tFreeSSubmitBlkRsp(void* param);
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
-#define COL_SMA_ON ((int8_t)0x1)
-#define COL_IDX_ON ((int8_t)0x2)
-#define COL_SET_NULL ((int8_t)0x10)
-#define COL_SET_VAL ((int8_t)0x20)
-struct SSchema {
- int8_t type;
- int8_t flags;
- col_id_t colId;
- int32_t bytes;
- char name[TSDB_COL_NAME_LEN];
-};
+#define COL_SMA_ON ((int8_t)0x1)
+#define COL_IDX_ON ((int8_t)0x2)
+#define COL_SET_NULL ((int8_t)0x10)
+#define COL_SET_VAL ((int8_t)0x20)
+#define COL_IS_SYSINFO ((int8_t)0x40)
#define COL_IS_SET(FLG) (((FLG) & (COL_SET_VAL | COL_SET_NULL)) != 0)
#define COL_CLR_SET(FLG) ((FLG) &= (~(COL_SET_VAL | COL_SET_NULL)))
@@ -472,6 +494,14 @@ int32_t tSerializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq
int32_t tDeserializeSMCreateStbReq(void* buf, int32_t bufLen, SMCreateStbReq* pReq);
void tFreeSMCreateStbReq(SMCreateStbReq* pReq);
+typedef struct {
+ STableMetaRsp* pMeta;
+} SMCreateStbRsp;
+
+int32_t tEncodeSMCreateStbRsp(SEncoder* pEncoder, const SMCreateStbRsp* pRsp);
+int32_t tDecodeSMCreateStbRsp(SDecoder* pDecoder, SMCreateStbRsp* pRsp);
+void tFreeSMCreateStbRsp(SMCreateStbRsp* pRsp);
+
typedef struct {
char name[TSDB_TABLE_FNAME_LEN];
int8_t igNotExists;
@@ -530,6 +560,7 @@ typedef struct {
uint32_t connId;
int32_t dnodeNum;
int8_t superUser;
+ int8_t sysInfo;
int8_t connType;
SEpSet epSet;
int32_t svrTimestamp;
@@ -753,6 +784,10 @@ typedef struct {
int64_t walRetentionSize;
int32_t walRollPeriod;
int64_t walSegmentSize;
+ int32_t sstTrigger;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
+ int32_t tsdbPageSize;
} SCreateDbReq;
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
@@ -774,6 +809,7 @@ typedef struct {
int8_t strict;
int8_t cacheLast;
int8_t replications;
+ int32_t sstTrigger;
} SAlterDbReq;
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
@@ -810,6 +846,8 @@ typedef struct {
int64_t uid;
int32_t vgVersion;
int32_t vgNum;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
int8_t hashMethod;
SArray* pVgroupInfos; // Array of SVgroupInfo
} SUseDbRsp;
@@ -1035,6 +1073,7 @@ typedef struct {
typedef struct {
int32_t vgId;
int32_t syncState;
+ int64_t cacheUsage;
int64_t numOfTables;
int64_t numOfTimeSeries;
int64_t totalStorage;
@@ -1159,6 +1198,9 @@ typedef struct {
int64_t walRetentionSize;
int32_t walRollPeriod;
int64_t walSegmentSize;
+ int16_t sstTrigger;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
} SCreateVnodeReq;
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
@@ -1239,23 +1281,6 @@ typedef struct {
SVgroupInfo vgroups[];
} SVgroupsInfo;
-typedef struct {
- char tbName[TSDB_TABLE_NAME_LEN];
- char stbName[TSDB_TABLE_NAME_LEN];
- char dbFName[TSDB_DB_FNAME_LEN];
- int64_t dbId;
- int32_t numOfTags;
- int32_t numOfColumns;
- int8_t precision;
- int8_t tableType;
- int32_t sversion;
- int32_t tversion;
- uint64_t suid;
- uint64_t tuid;
- int32_t vgId;
- SSchema* pSchemas;
-} STableMetaRsp;
-
typedef struct {
STableMetaRsp* pMeta;
} SMAlterStbRsp;
@@ -1266,7 +1291,7 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp* pRsp);
int32_t tSerializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp);
int32_t tDeserializeSTableMetaRsp(void* buf, int32_t bufLen, STableMetaRsp* pRsp);
-void tFreeSTableMetaRsp(STableMetaRsp* pRsp);
+void tFreeSTableMetaRsp(void* pRsp);
void tFreeSTableIndexRsp(void* info);
typedef struct {
@@ -2028,11 +2053,13 @@ int tEncodeSVCreateTbBatchReq(SEncoder* pCoder, const SVCreateTbBatchReq* pReq);
int tDecodeSVCreateTbBatchReq(SDecoder* pCoder, SVCreateTbBatchReq* pReq);
typedef struct {
- int32_t code;
+ int32_t code;
+ STableMetaRsp* pMeta;
} SVCreateTbRsp, SVUpdateTbRsp;
-int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
-int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
+int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
+int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
+void tFreeSVCreateTbRsp(void* param);
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
void* tDeserializeSVCreateTbReq(void* buf, SVCreateTbReq* pReq);
@@ -2053,8 +2080,9 @@ int32_t tDeserializeSVCreateTbBatchRsp(void* buf, int32_t bufLen, SVCreateTbBatc
// TDMT_VND_DROP_TABLE =================
typedef struct {
- char* name;
- int8_t igNotExists;
+ char* name;
+ uint64_t suid; // for tmq in wal format
+ int8_t igNotExists;
} SVDropTbReq;
typedef struct {
@@ -2598,7 +2626,7 @@ enum {
typedef struct {
int8_t type;
union {
- // snapshot data
+ // snapshot
struct {
int64_t uid;
int64_t ts;
@@ -2610,6 +2638,22 @@ typedef struct {
};
} STqOffsetVal;
+static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
+ pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pOffsetVal->uid = uid;
+ pOffsetVal->ts = ts;
+}
+
+static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) {
+ pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META;
+ pOffsetVal->uid = uid;
+}
+
+static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
+ pOffsetVal->type = TMQ_OFFSET__LOG;
+ pOffsetVal->version = ver;
+}
+
int32_t tEncodeSTqOffsetVal(SEncoder* pEncoder, const STqOffsetVal* pOffsetVal);
int32_t tDecodeSTqOffsetVal(SDecoder* pDecoder, STqOffsetVal* pOffsetVal);
int32_t tFormatOffset(char* buf, int32_t maxLen, const STqOffsetVal* pVal);
@@ -2658,15 +2702,6 @@ typedef struct {
int32_t tSerializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
int32_t tDeserializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
-typedef struct {
- int32_t vgId;
- SEpSet epSet;
-} SVgEpSet;
-
-typedef struct {
- int32_t padding;
-} SRSmaExecMsg;
-
typedef struct {
int8_t version; // for compatibility(default 0)
int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
@@ -2926,33 +2961,14 @@ static FORCE_INLINE void tDeleteSMqSubTopicEp(SMqSubTopicEp* pSubTopicEp) {
typedef struct {
SMqRspHead head;
- int64_t reqOffset;
- int64_t rspOffset;
- STqOffsetVal reqOffsetNew;
- STqOffsetVal rspOffsetNew;
+ STqOffsetVal rspOffset;
int16_t resMsgType;
int32_t metaRspLen;
void* metaRsp;
} SMqMetaRsp;
-static FORCE_INLINE int32_t tEncodeSMqMetaRsp(void** buf, const SMqMetaRsp* pRsp) {
- int32_t tlen = 0;
- tlen += taosEncodeFixedI64(buf, pRsp->reqOffset);
- tlen += taosEncodeFixedI64(buf, pRsp->rspOffset);
- tlen += taosEncodeFixedI16(buf, pRsp->resMsgType);
- tlen += taosEncodeFixedI32(buf, pRsp->metaRspLen);
- tlen += taosEncodeBinary(buf, pRsp->metaRsp, pRsp->metaRspLen);
- return tlen;
-}
-
-static FORCE_INLINE void* tDecodeSMqMetaRsp(const void* buf, SMqMetaRsp* pRsp) {
- buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
- buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
- buf = taosDecodeFixedI16(buf, &pRsp->resMsgType);
- buf = taosDecodeFixedI32(buf, &pRsp->metaRspLen);
- buf = taosDecodeBinary(buf, &pRsp->metaRsp, pRsp->metaRspLen);
- return (void*)buf;
-}
+int32_t tEncodeSMqMetaRsp(SEncoder* pEncoder, const SMqMetaRsp* pRsp);
+int32_t tDecodeSMqMetaRsp(SDecoder* pDecoder, SMqMetaRsp* pRsp);
typedef struct {
SMqRspHead head;
@@ -2969,6 +2985,27 @@ typedef struct {
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
+void tDeleteSMqDataRsp(SMqDataRsp* pRsp);
+
+typedef struct {
+ SMqRspHead head;
+ STqOffsetVal reqOffset;
+ STqOffsetVal rspOffset;
+ int32_t blockNum;
+ int8_t withTbName;
+ int8_t withSchema;
+ SArray* blockDataLen;
+ SArray* blockData;
+ SArray* blockTbName;
+ SArray* blockSchema;
+ int32_t createTableNum;
+ SArray* createTableLen;
+ SArray* createTableReq;
+} STaosxRsp;
+
+int32_t tEncodeSTaosxRsp(SEncoder* pEncoder, const STaosxRsp* pRsp);
+int32_t tDecodeSTaosxRsp(SDecoder* pDecoder, STaosxRsp* pRsp);
+void tDeleteSTaosxRsp(STaosxRsp* pRsp);
typedef struct {
SMqRspHead head;
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index 006ba7f21bf0177c2b0104a51ef7908785cced2d..3f917ff0d1665d90de079dfc3eae884412ed0e7f 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -272,6 +272,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SYNC_LEADER_TRANSFER, "sync-leader-transfer", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_MNODE_STANDBY, "set-mnode-standby", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_VNODE_STANDBY, "set-vnode-standby", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT, "sync-heartbeat", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT_REPLY, "sync-heartbeat-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
#if defined(TD_MSG_NUMBER_)
diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h
index b38ec664358d08622c06d8f941873b59e43b9455..3e170d5098ab34b1dc41c6d87d74484eff503fbc 100644
--- a/include/common/ttokendef.h
+++ b/include/common/ttokendef.h
@@ -89,237 +89,241 @@
#define TK_KEEP 71
#define TK_PAGES 72
#define TK_PAGESIZE 73
-#define TK_PRECISION 74
-#define TK_REPLICA 75
-#define TK_STRICT 76
-#define TK_VGROUPS 77
-#define TK_SINGLE_STABLE 78
-#define TK_RETENTIONS 79
-#define TK_SCHEMALESS 80
-#define TK_WAL_LEVEL 81
-#define TK_WAL_FSYNC_PERIOD 82
-#define TK_WAL_RETENTION_PERIOD 83
-#define TK_WAL_RETENTION_SIZE 84
-#define TK_WAL_ROLL_PERIOD 85
-#define TK_WAL_SEGMENT_SIZE 86
-#define TK_NK_COLON 87
-#define TK_TABLE 88
-#define TK_NK_LP 89
-#define TK_NK_RP 90
-#define TK_STABLE 91
-#define TK_ADD 92
-#define TK_COLUMN 93
-#define TK_MODIFY 94
-#define TK_RENAME 95
-#define TK_TAG 96
-#define TK_SET 97
-#define TK_NK_EQ 98
-#define TK_USING 99
-#define TK_TAGS 100
-#define TK_COMMENT 101
-#define TK_BOOL 102
-#define TK_TINYINT 103
-#define TK_SMALLINT 104
-#define TK_INT 105
-#define TK_INTEGER 106
-#define TK_BIGINT 107
-#define TK_FLOAT 108
-#define TK_DOUBLE 109
-#define TK_BINARY 110
-#define TK_TIMESTAMP 111
-#define TK_NCHAR 112
-#define TK_UNSIGNED 113
-#define TK_JSON 114
-#define TK_VARCHAR 115
-#define TK_MEDIUMBLOB 116
-#define TK_BLOB 117
-#define TK_VARBINARY 118
-#define TK_DECIMAL 119
-#define TK_MAX_DELAY 120
-#define TK_WATERMARK 121
-#define TK_ROLLUP 122
-#define TK_TTL 123
-#define TK_SMA 124
-#define TK_FIRST 125
-#define TK_LAST 126
-#define TK_SHOW 127
-#define TK_DATABASES 128
-#define TK_TABLES 129
-#define TK_STABLES 130
-#define TK_MNODES 131
-#define TK_MODULES 132
-#define TK_QNODES 133
-#define TK_FUNCTIONS 134
-#define TK_INDEXES 135
-#define TK_ACCOUNTS 136
-#define TK_APPS 137
-#define TK_CONNECTIONS 138
-#define TK_LICENCES 139
-#define TK_GRANTS 140
-#define TK_QUERIES 141
-#define TK_SCORES 142
-#define TK_TOPICS 143
-#define TK_VARIABLES 144
-#define TK_BNODES 145
-#define TK_SNODES 146
-#define TK_CLUSTER 147
-#define TK_TRANSACTIONS 148
-#define TK_DISTRIBUTED 149
-#define TK_CONSUMERS 150
-#define TK_SUBSCRIPTIONS 151
-#define TK_LIKE 152
-#define TK_INDEX 153
-#define TK_FUNCTION 154
-#define TK_INTERVAL 155
-#define TK_TOPIC 156
-#define TK_AS 157
-#define TK_WITH 158
-#define TK_META 159
-#define TK_CONSUMER 160
-#define TK_GROUP 161
-#define TK_DESC 162
-#define TK_DESCRIBE 163
-#define TK_RESET 164
-#define TK_QUERY 165
-#define TK_CACHE 166
-#define TK_EXPLAIN 167
-#define TK_ANALYZE 168
-#define TK_VERBOSE 169
-#define TK_NK_BOOL 170
-#define TK_RATIO 171
-#define TK_NK_FLOAT 172
-#define TK_OUTPUTTYPE 173
-#define TK_AGGREGATE 174
-#define TK_BUFSIZE 175
-#define TK_STREAM 176
-#define TK_INTO 177
-#define TK_TRIGGER 178
-#define TK_AT_ONCE 179
-#define TK_WINDOW_CLOSE 180
-#define TK_IGNORE 181
-#define TK_EXPIRED 182
-#define TK_KILL 183
-#define TK_CONNECTION 184
-#define TK_TRANSACTION 185
-#define TK_BALANCE 186
-#define TK_VGROUP 187
-#define TK_MERGE 188
-#define TK_REDISTRIBUTE 189
-#define TK_SPLIT 190
-#define TK_DELETE 191
-#define TK_INSERT 192
-#define TK_NULL 193
-#define TK_NK_QUESTION 194
-#define TK_NK_ARROW 195
-#define TK_ROWTS 196
-#define TK_TBNAME 197
-#define TK_QSTART 198
-#define TK_QEND 199
-#define TK_QDURATION 200
-#define TK_WSTART 201
-#define TK_WEND 202
-#define TK_WDURATION 203
-#define TK_CAST 204
-#define TK_NOW 205
-#define TK_TODAY 206
-#define TK_TIMEZONE 207
-#define TK_CLIENT_VERSION 208
-#define TK_SERVER_VERSION 209
-#define TK_SERVER_STATUS 210
-#define TK_CURRENT_USER 211
-#define TK_COUNT 212
-#define TK_LAST_ROW 213
-#define TK_BETWEEN 214
-#define TK_IS 215
-#define TK_NK_LT 216
-#define TK_NK_GT 217
-#define TK_NK_LE 218
-#define TK_NK_GE 219
-#define TK_NK_NE 220
-#define TK_MATCH 221
-#define TK_NMATCH 222
-#define TK_CONTAINS 223
-#define TK_IN 224
-#define TK_JOIN 225
-#define TK_INNER 226
-#define TK_SELECT 227
-#define TK_DISTINCT 228
-#define TK_WHERE 229
-#define TK_PARTITION 230
-#define TK_BY 231
-#define TK_SESSION 232
-#define TK_STATE_WINDOW 233
-#define TK_SLIDING 234
-#define TK_FILL 235
-#define TK_VALUE 236
-#define TK_NONE 237
-#define TK_PREV 238
-#define TK_LINEAR 239
-#define TK_NEXT 240
-#define TK_HAVING 241
-#define TK_RANGE 242
-#define TK_EVERY 243
-#define TK_ORDER 244
-#define TK_SLIMIT 245
-#define TK_SOFFSET 246
-#define TK_LIMIT 247
-#define TK_OFFSET 248
-#define TK_ASC 249
-#define TK_NULLS 250
-#define TK_ABORT 251
-#define TK_AFTER 252
-#define TK_ATTACH 253
-#define TK_BEFORE 254
-#define TK_BEGIN 255
-#define TK_BITAND 256
-#define TK_BITNOT 257
-#define TK_BITOR 258
-#define TK_BLOCKS 259
-#define TK_CHANGE 260
-#define TK_COMMA 261
-#define TK_COMPACT 262
-#define TK_CONCAT 263
-#define TK_CONFLICT 264
-#define TK_COPY 265
-#define TK_DEFERRED 266
-#define TK_DELIMITERS 267
-#define TK_DETACH 268
-#define TK_DIVIDE 269
-#define TK_DOT 270
-#define TK_EACH 271
-#define TK_END 272
-#define TK_FAIL 273
-#define TK_FILE 274
-#define TK_FOR 275
-#define TK_GLOB 276
-#define TK_ID 277
-#define TK_IMMEDIATE 278
-#define TK_IMPORT 279
-#define TK_INITIALLY 280
-#define TK_INSTEAD 281
-#define TK_ISNULL 282
-#define TK_KEY 283
-#define TK_NK_BITNOT 284
-#define TK_NK_SEMI 285
-#define TK_NOTNULL 286
-#define TK_OF 287
-#define TK_PLUS 288
-#define TK_PRIVILEGE 289
-#define TK_RAISE 290
-#define TK_REPLACE 291
-#define TK_RESTRICT 292
-#define TK_ROW 293
-#define TK_SEMI 294
-#define TK_STAR 295
-#define TK_STATEMENT 296
-#define TK_STRING 297
-#define TK_TIMES 298
-#define TK_UPDATE 299
-#define TK_VALUES 300
-#define TK_VARIABLE 301
-#define TK_VIEW 302
-#define TK_VNODES 303
-#define TK_WAL 304
+#define TK_TSDB_PAGESIZE 74
+#define TK_PRECISION 75
+#define TK_REPLICA 76
+#define TK_STRICT 77
+#define TK_VGROUPS 78
+#define TK_SINGLE_STABLE 79
+#define TK_RETENTIONS 80
+#define TK_SCHEMALESS 81
+#define TK_WAL_LEVEL 82
+#define TK_WAL_FSYNC_PERIOD 83
+#define TK_WAL_RETENTION_PERIOD 84
+#define TK_WAL_RETENTION_SIZE 85
+#define TK_WAL_ROLL_PERIOD 86
+#define TK_WAL_SEGMENT_SIZE 87
+#define TK_STT_TRIGGER 88
+#define TK_TABLE_PREFIX 89
+#define TK_TABLE_SUFFIX 90
+#define TK_NK_COLON 91
+#define TK_TABLE 92
+#define TK_NK_LP 93
+#define TK_NK_RP 94
+#define TK_STABLE 95
+#define TK_ADD 96
+#define TK_COLUMN 97
+#define TK_MODIFY 98
+#define TK_RENAME 99
+#define TK_TAG 100
+#define TK_SET 101
+#define TK_NK_EQ 102
+#define TK_USING 103
+#define TK_TAGS 104
+#define TK_COMMENT 105
+#define TK_BOOL 106
+#define TK_TINYINT 107
+#define TK_SMALLINT 108
+#define TK_INT 109
+#define TK_INTEGER 110
+#define TK_BIGINT 111
+#define TK_FLOAT 112
+#define TK_DOUBLE 113
+#define TK_BINARY 114
+#define TK_TIMESTAMP 115
+#define TK_NCHAR 116
+#define TK_UNSIGNED 117
+#define TK_JSON 118
+#define TK_VARCHAR 119
+#define TK_MEDIUMBLOB 120
+#define TK_BLOB 121
+#define TK_VARBINARY 122
+#define TK_DECIMAL 123
+#define TK_MAX_DELAY 124
+#define TK_WATERMARK 125
+#define TK_ROLLUP 126
+#define TK_TTL 127
+#define TK_SMA 128
+#define TK_FIRST 129
+#define TK_LAST 130
+#define TK_SHOW 131
+#define TK_DATABASES 132
+#define TK_TABLES 133
+#define TK_STABLES 134
+#define TK_MNODES 135
+#define TK_MODULES 136
+#define TK_QNODES 137
+#define TK_FUNCTIONS 138
+#define TK_INDEXES 139
+#define TK_ACCOUNTS 140
+#define TK_APPS 141
+#define TK_CONNECTIONS 142
+#define TK_LICENCES 143
+#define TK_GRANTS 144
+#define TK_QUERIES 145
+#define TK_SCORES 146
+#define TK_TOPICS 147
+#define TK_VARIABLES 148
+#define TK_BNODES 149
+#define TK_SNODES 150
+#define TK_CLUSTER 151
+#define TK_TRANSACTIONS 152
+#define TK_DISTRIBUTED 153
+#define TK_CONSUMERS 154
+#define TK_SUBSCRIPTIONS 155
+#define TK_VNODES 156
+#define TK_LIKE 157
+#define TK_INDEX 158
+#define TK_FUNCTION 159
+#define TK_INTERVAL 160
+#define TK_TOPIC 161
+#define TK_AS 162
+#define TK_WITH 163
+#define TK_META 164
+#define TK_CONSUMER 165
+#define TK_GROUP 166
+#define TK_DESC 167
+#define TK_DESCRIBE 168
+#define TK_RESET 169
+#define TK_QUERY 170
+#define TK_CACHE 171
+#define TK_EXPLAIN 172
+#define TK_ANALYZE 173
+#define TK_VERBOSE 174
+#define TK_NK_BOOL 175
+#define TK_RATIO 176
+#define TK_NK_FLOAT 177
+#define TK_OUTPUTTYPE 178
+#define TK_AGGREGATE 179
+#define TK_BUFSIZE 180
+#define TK_STREAM 181
+#define TK_INTO 182
+#define TK_TRIGGER 183
+#define TK_AT_ONCE 184
+#define TK_WINDOW_CLOSE 185
+#define TK_IGNORE 186
+#define TK_EXPIRED 187
+#define TK_KILL 188
+#define TK_CONNECTION 189
+#define TK_TRANSACTION 190
+#define TK_BALANCE 191
+#define TK_VGROUP 192
+#define TK_MERGE 193
+#define TK_REDISTRIBUTE 194
+#define TK_SPLIT 195
+#define TK_DELETE 196
+#define TK_INSERT 197
+#define TK_NULL 198
+#define TK_NK_QUESTION 199
+#define TK_NK_ARROW 200
+#define TK_ROWTS 201
+#define TK_TBNAME 202
+#define TK_QSTART 203
+#define TK_QEND 204
+#define TK_QDURATION 205
+#define TK_WSTART 206
+#define TK_WEND 207
+#define TK_WDURATION 208
+#define TK_CAST 209
+#define TK_NOW 210
+#define TK_TODAY 211
+#define TK_TIMEZONE 212
+#define TK_CLIENT_VERSION 213
+#define TK_SERVER_VERSION 214
+#define TK_SERVER_STATUS 215
+#define TK_CURRENT_USER 216
+#define TK_COUNT 217
+#define TK_LAST_ROW 218
+#define TK_BETWEEN 219
+#define TK_IS 220
+#define TK_NK_LT 221
+#define TK_NK_GT 222
+#define TK_NK_LE 223
+#define TK_NK_GE 224
+#define TK_NK_NE 225
+#define TK_MATCH 226
+#define TK_NMATCH 227
+#define TK_CONTAINS 228
+#define TK_IN 229
+#define TK_JOIN 230
+#define TK_INNER 231
+#define TK_SELECT 232
+#define TK_DISTINCT 233
+#define TK_WHERE 234
+#define TK_PARTITION 235
+#define TK_BY 236
+#define TK_SESSION 237
+#define TK_STATE_WINDOW 238
+#define TK_SLIDING 239
+#define TK_FILL 240
+#define TK_VALUE 241
+#define TK_NONE 242
+#define TK_PREV 243
+#define TK_LINEAR 244
+#define TK_NEXT 245
+#define TK_HAVING 246
+#define TK_RANGE 247
+#define TK_EVERY 248
+#define TK_ORDER 249
+#define TK_SLIMIT 250
+#define TK_SOFFSET 251
+#define TK_LIMIT 252
+#define TK_OFFSET 253
+#define TK_ASC 254
+#define TK_NULLS 255
+#define TK_ABORT 256
+#define TK_AFTER 257
+#define TK_ATTACH 258
+#define TK_BEFORE 259
+#define TK_BEGIN 260
+#define TK_BITAND 261
+#define TK_BITNOT 262
+#define TK_BITOR 263
+#define TK_BLOCKS 264
+#define TK_CHANGE 265
+#define TK_COMMA 266
+#define TK_COMPACT 267
+#define TK_CONCAT 268
+#define TK_CONFLICT 269
+#define TK_COPY 270
+#define TK_DEFERRED 271
+#define TK_DELIMITERS 272
+#define TK_DETACH 273
+#define TK_DIVIDE 274
+#define TK_DOT 275
+#define TK_EACH 276
+#define TK_END 277
+#define TK_FAIL 278
+#define TK_FILE 279
+#define TK_FOR 280
+#define TK_GLOB 281
+#define TK_ID 282
+#define TK_IMMEDIATE 283
+#define TK_IMPORT 284
+#define TK_INITIALLY 285
+#define TK_INSTEAD 286
+#define TK_ISNULL 287
+#define TK_KEY 288
+#define TK_NK_BITNOT 289
+#define TK_NK_SEMI 290
+#define TK_NOTNULL 291
+#define TK_OF 292
+#define TK_PLUS 293
+#define TK_PRIVILEGE 294
+#define TK_RAISE 295
+#define TK_REPLACE 296
+#define TK_RESTRICT 297
+#define TK_ROW 298
+#define TK_SEMI 299
+#define TK_STAR 300
+#define TK_STATEMENT 301
+#define TK_STRING 302
+#define TK_TIMES 303
+#define TK_UPDATE 304
+#define TK_VALUES 305
+#define TK_VARIABLE 306
+#define TK_VIEW 307
+#define TK_WAL 308
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301
diff --git a/include/common/ttypes.h b/include/common/ttypes.h
index ceb3eae0338455ab207034fca707473c6c44940d..a88f65f6acf69d552073ab0ede31a0b027b25692 100644
--- a/include/common/ttypes.h
+++ b/include/common/ttypes.h
@@ -49,9 +49,6 @@ typedef struct {
#define varDataCopy(dst, v) memcpy((dst), (void *)(v), varDataTLen(v))
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
-#define IS_VAR_DATA_TYPE(t) \
- (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
-#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
#define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0]))
#define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v))
@@ -268,11 +265,16 @@ typedef struct {
#define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT)
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)
#define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)))
+#define IS_TIMESTAMP_TYPE(_t) ((_t) == TSDB_DATA_TYPE_TIMESTAMP)
#define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t)))
#define IS_MATHABLE_TYPE(_t) \
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
+#define IS_VAR_DATA_TYPE(t) \
+ (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
+#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
+
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
#define IS_VALID_INT(_t) ((_t) >= INT32_MIN && (_t) <= INT32_MAX)
diff --git a/include/libs/command/command.h b/include/libs/command/command.h
index 8a4ecad37da3089c32ff0e3fca7473dcc334971c..b3339a417ba463212c3abc163b57519194953c10 100644
--- a/include/libs/command/command.h
+++ b/include/libs/command/command.h
@@ -17,12 +17,12 @@
#define TDENGINE_COMMAND_H
#include "cmdnodes.h"
-#include "tmsg.h"
#include "plannodes.h"
+#include "tmsg.h"
typedef struct SExplainCtx SExplainCtx;
-int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp);
+int32_t qExecCommand(bool sysInfoUser, SNode *pStmt, SRetrieveTableRsp **pRsp);
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);
int32_t qExecExplainBegin(SQueryPlan *pDag, SExplainCtx **pCtx, int64_t startTs);
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index a64815f14fe0a0dbe5b85ffd0969a68d43f50d8e..25a6221fcb5344cd1f0d98af15840b3905321612 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -29,7 +29,7 @@ typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
-typedef struct SReadHandle {
+typedef struct {
void* tqReader;
void* meta;
void* config;
@@ -41,6 +41,10 @@ typedef struct SReadHandle {
bool initTableReader;
bool initTqReader;
int32_t numOfVgroups;
+
+ void* sContext; // SSnapContext*
+
+ void* pStateBackend;
} SReadHandle;
// in queue mode, data streams are seperated by msg
@@ -78,8 +82,8 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
/**
* @brief Cleanup SSDataBlock for StreamScanInfo
- *
- * @param tinfo
+ *
+ * @param tinfo
*/
void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo);
@@ -163,7 +167,7 @@ int32_t qGetQualifiedTableIdList(void* pTableList, const char* tagCond, int32_t
void qProcessRspMsg(void* parent, struct SRpcMsg* pMsg, struct SEpSet* pEpSet);
-int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList/*,int32_t* resNum, SExplainExecInfo** pRes*/);
+int32_t qGetExplainExecInfo(qTaskInfo_t tinfo, SArray* pExecInfoList /*,int32_t* resNum, SExplainExecInfo** pRes*/);
int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
@@ -180,11 +184,17 @@ int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
-int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset);
+int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
-void* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
+SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
+
+int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo);
+
+const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo);
+
+const char* qExtractTbnameFromTask(qTaskInfo_t tinfo);
void* qExtractReaderFromStreamScanner(void* scanner);
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index d5da306fd297dd49f4753aa01c6423cb9dd82e9c..3f26eee86ad3f1b4666c55283ad346f60a7b4f31 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -92,6 +92,8 @@ struct SResultRowEntryInfo;
//for selectivity query, the corresponding tag value is assigned if the data is qualified
typedef struct SSubsidiaryResInfo {
int16_t num;
+ int32_t rowLen;
+ char* buf; // serialize data buffer
struct SqlFunctionCtx **pCtx;
} SSubsidiaryResInfo;
@@ -118,6 +120,11 @@ typedef struct SInputColumnInfoData {
uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
} SInputColumnInfoData;
+typedef struct SSerializeDataHandle {
+ struct SDiskbasedBuf* pBuf;
+ int32_t currentPage;
+} SSerializeDataHandle;
+
// sql function runtime context
typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
@@ -137,11 +144,9 @@ typedef struct SqlFunctionCtx {
SFuncExecFuncs fpSet;
SScalarFuncExecFuncs sfp;
struct SExprInfo *pExpr;
- struct SDiskbasedBuf *pBuf;
struct SSDataBlock *pSrcBlock;
- struct SSDataBlock *pDstBlock; // used by indifinite rows function to set selectivity
- int32_t curBufPage;
- bool increase;
+ struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
+ SSerializeDataHandle saveHandle;
bool isStream;
char udfName[TSDB_FUNC_NAME_LEN];
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index 741b0fddebf36cd1a8f16d0d2265742bcb9ac16c..c9c19579cb1c6943c5914aebed20668a1c1ff156 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -176,7 +176,8 @@ int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc);
-bool fmIsBuiltinFunc(const char* pFunc);
+bool fmIsBuiltinFunc(const char* pFunc);
+EFunctionType fmGetFuncType(const char* pFunc);
bool fmIsAggFunc(int32_t funcId);
bool fmIsScalarFunc(int32_t funcId);
diff --git a/include/libs/function/taosudf.h b/include/libs/function/taosudf.h
index 5e84b87a81ec1808dfc368ac285f4dabd2e1d57e..2b2063e3f61e575cd59de099feee3b83ad87ff9c 100644
--- a/include/libs/function/taosudf.h
+++ b/include/libs/function/taosudf.h
@@ -256,8 +256,9 @@ static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentR
typedef int32_t (*TUdfScalarProcFunc)(SUdfDataBlock* block, SUdfColumn *resultCol);
typedef int32_t (*TUdfAggStartFunc)(SUdfInterBuf *buf);
-typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
-typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf* buf, SUdfInterBuf *resultData);
+typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
+typedef int32_t (*TUdfAggMergeFunc)(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2, SUdfInterBuf *outputBuf);
+typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf *buf, SUdfInterBuf *resultData);
#ifdef __cplusplus
}
diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h
index 3223d4cdb8dfd36284f3d36922451166226fdd3e..22e92b2e8031d4b2d604433f9f32db0bc3754b4f 100644
--- a/include/libs/nodes/cmdnodes.h
+++ b/include/libs/nodes/cmdnodes.h
@@ -64,6 +64,7 @@ typedef struct SDatabaseOptions {
int64_t keep[3];
int32_t pages;
int32_t pagesize;
+ int32_t tsdbPageSize;
char precisionStr[3];
int8_t precision;
int8_t replica;
@@ -78,6 +79,12 @@ typedef struct SDatabaseOptions {
int32_t walRetentionSize;
int32_t walRollPeriod;
int32_t walSegmentSize;
+ bool walRetentionPeriodIsSet;
+ bool walRetentionSizeIsSet;
+ bool walRollPeriodIsSet;
+ int32_t sstTrigger;
+ int32_t tablePrefix;
+ int32_t tableSuffix;
} SDatabaseOptions;
typedef struct SCreateDatabaseStmt {
@@ -268,6 +275,12 @@ typedef struct SShowDnodeVariablesStmt {
SNode* pDnodeId;
} SShowDnodeVariablesStmt;
+typedef struct SShowVnodesStmt {
+ ENodeType type;
+ SNode* pDnodeId;
+ SNode* pDnodeEndpoint;
+} SShowVnodesStmt;
+
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT } EIndexType;
typedef struct SIndexOptions {
diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h
index 5743d3360857dab460841d89e50360ba53d36b39..6500d3d1831e817c497406e574b721594e63e209 100644
--- a/include/libs/nodes/nodes.h
+++ b/include/libs/nodes/nodes.h
@@ -183,12 +183,12 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
+ QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
- QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
@@ -244,6 +244,7 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
+ QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
@@ -319,6 +320,9 @@ int32_t nodesStringToNode(const char* pStr, SNode** pNode);
int32_t nodesListToString(const SNodeList* pList, bool format, char** pStr, int32_t* pLen);
int32_t nodesStringToList(const char* pStr, SNodeList** pList);
+int32_t nodesNodeToMsg(const SNode* pNode, char** pMsg, int32_t* pLen);
+int32_t nodesMsgToNode(const char* pStr, int32_t len, SNode** pNode);
+
int32_t nodesNodeToSQL(SNode* pNode, char* buf, int32_t bufSize, int32_t* len);
char* nodesGetNameFromColumnNode(SNode* pNode);
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots);
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 8661baceb2dc426e69e459aec33c6c730b419e7e..8aeb86102a7b4237276f59f25fe50d36c6f99efa 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -317,6 +317,7 @@ typedef struct SSystemTableScanPhysiNode {
SEpSet mgmtEpSet;
bool showRewrite;
int32_t accountId;
+ bool sysInfo;
} SSystemTableScanPhysiNode;
typedef struct STableScanPhysiNode {
@@ -487,6 +488,8 @@ typedef struct SPartitionPhysiNode {
SNodeList* pTargets;
} SPartitionPhysiNode;
+typedef SPartitionPhysiNode SStreamPartitionPhysiNode;
+
typedef struct SDataSinkNode {
ENodeType type;
SDataBlockDescNode* pInputDataBlockDesc;
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index cec6f1a6919ab66ad3928254d47a0943f60936b5..3a1eaf289e4ba245544b985e893f746845c37c88 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -57,7 +57,9 @@ typedef enum EColumnType {
COLUMN_TYPE_COLUMN = 1,
COLUMN_TYPE_TAG,
COLUMN_TYPE_TBNAME,
- COLUMN_TYPE_WINDOW_PC,
+ COLUMN_TYPE_WINDOW_START,
+ COLUMN_TYPE_WINDOW_END,
+ COLUMN_TYPE_WINDOW_DURATION,
COLUMN_TYPE_GROUP_KEY
} EColumnType;
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index 717278d51d1b252dc3f2bada18a61bbb65739b6e..95bde858640b3d4cd5df616bc1d0a5a65795d8f3 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -49,6 +49,7 @@ typedef struct SParseContext {
SStmtCallback* pStmtCb;
const char* pUser;
bool isSuperUser;
+ bool enableSysInfo;
bool async;
int8_t schemalessType;
const char* svrVer;
diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h
index d1a5c5db103d940c9e36dd9ad637461b2e3361b5..05caa7a7bb56617ef34c03e3646f85ac98f65a56 100644
--- a/include/libs/planner/planner.h
+++ b/include/libs/planner/planner.h
@@ -38,6 +38,7 @@ typedef struct SPlanContext {
char* pMsg;
int32_t msgLen;
const char* pUser;
+ bool sysInfo;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index 34d870397f953e9a85a9a5b44b6a4fc57c0594c9..44a9e10679c0d0d30ec743e6bf624dcd912b32e9 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -116,6 +116,8 @@ typedef struct STableMeta {
typedef struct SDBVgInfo {
int32_t vgVersion;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
int8_t hashMethod;
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
@@ -215,6 +217,7 @@ void initQueryModuleMsgHandle();
const SSchema* tGetTbnameColumnSchema();
bool tIsValidSchema(struct SSchema* pSchema, int32_t numOfCols, int32_t numOfTags);
+int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta);
int32_t queryCreateTableMetaFromMsg(STableMetaRsp* msg, bool isSuperTable, STableMeta** pMeta);
char* jobTaskStatusStr(int32_t status);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 384c6a289f304e4c59a097663bb4224e979bd226..afd8de6b1cc3306c6963265dacacc75705ea8ba4 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -263,6 +263,14 @@ typedef struct {
SArray* checkpointVer;
} SStreamRecoveringState;
+// incremental state storage
+typedef struct {
+ SStreamTask* pOwner;
+ TDB* db;
+ TTB* pStateDb;
+ TXN txn;
+} SStreamState;
+
typedef struct SStreamTask {
int64_t streamId;
int32_t taskId;
@@ -312,6 +320,10 @@ typedef struct SStreamTask {
// msg handle
SMsgCb* pMsgCb;
+
+ // state backend
+ SStreamState* pState;
+
} SStreamTask;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
@@ -507,7 +519,7 @@ typedef struct SStreamMeta {
char* path;
TDB* db;
TTB* pTaskDb;
- TTB* pStateDb;
+ TTB* pCheckpointDb;
SHashObj* pTasks;
SHashObj* pRecoverStatus;
void* ahandle;
@@ -528,6 +540,39 @@ int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta);
+SStreamState* streamStateOpen(char* path, SStreamTask* pTask);
+void streamStateClose(SStreamState* pState);
+int32_t streamStateBegin(SStreamState* pState);
+int32_t streamStateCommit(SStreamState* pState);
+int32_t streamStateAbort(SStreamState* pState);
+
+typedef struct {
+ TBC* pCur;
+} SStreamStateCur;
+
+#if 1
+int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
+int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
+int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
+void streamFreeVal(void* val);
+
+SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
+SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
+SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
+void streamStateFreeCur(SStreamStateCur* pCur);
+
+int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
+
+int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
+int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
+
+int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
+int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
+
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/sync/syncTools.h b/include/libs/sync/syncTools.h
index 6c95c3c6d72929045bd780056811c1938864717b..de2271554d2b9166ec240ea07e91dea9d017ef92 100644
--- a/include/libs/sync/syncTools.h
+++ b/include/libs/sync/syncTools.h
@@ -444,6 +444,70 @@ void syncAppendEntriesReplyPrint2(char* s, const SyncAppendEntriesReply* pMsg);
void syncAppendEntriesReplyLog(const SyncAppendEntriesReply* pMsg);
void syncAppendEntriesReplyLog2(char* s, const SyncAppendEntriesReply* pMsg);
+// ---------------------------------------------
+typedef struct SyncHeartbeat {
+ uint32_t bytes;
+ int32_t vgId;
+ uint32_t msgType;
+ SRaftId srcId;
+ SRaftId destId;
+
+ // private data
+ SyncTerm term;
+ SyncIndex commitIndex;
+ SyncTerm privateTerm;
+} SyncHeartbeat;
+
+SyncHeartbeat* syncHeartbeatBuild(int32_t vgId);
+void syncHeartbeatDestroy(SyncHeartbeat* pMsg);
+void syncHeartbeatSerialize(const SyncHeartbeat* pMsg, char* buf, uint32_t bufLen);
+void syncHeartbeatDeserialize(const char* buf, uint32_t len, SyncHeartbeat* pMsg);
+char* syncHeartbeatSerialize2(const SyncHeartbeat* pMsg, uint32_t* len);
+SyncHeartbeat* syncHeartbeatDeserialize2(const char* buf, uint32_t len);
+void syncHeartbeat2RpcMsg(const SyncHeartbeat* pMsg, SRpcMsg* pRpcMsg);
+void syncHeartbeatFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeat* pMsg);
+SyncHeartbeat* syncHeartbeatFromRpcMsg2(const SRpcMsg* pRpcMsg);
+cJSON* syncHeartbeat2Json(const SyncHeartbeat* pMsg);
+char* syncHeartbeat2Str(const SyncHeartbeat* pMsg);
+
+// for debug ----------------------
+void syncHeartbeatPrint(const SyncHeartbeat* pMsg);
+void syncHeartbeatPrint2(char* s, const SyncHeartbeat* pMsg);
+void syncHeartbeatLog(const SyncHeartbeat* pMsg);
+void syncHeartbeatLog2(char* s, const SyncHeartbeat* pMsg);
+
+// ---------------------------------------------
+typedef struct SyncHeartbeatReply {
+ uint32_t bytes;
+ int32_t vgId;
+ uint32_t msgType;
+ SRaftId srcId;
+ SRaftId destId;
+
+ // private data
+ SyncTerm term;
+ SyncTerm privateTerm;
+ int64_t startTime;
+} SyncHeartbeatReply;
+
+SyncHeartbeatReply* syncHeartbeatReplyBuild(int32_t vgId);
+void syncHeartbeatReplyDestroy(SyncHeartbeatReply* pMsg);
+void syncHeartbeatReplySerialize(const SyncHeartbeatReply* pMsg, char* buf, uint32_t bufLen);
+void syncHeartbeatReplyDeserialize(const char* buf, uint32_t len, SyncHeartbeatReply* pMsg);
+char* syncHeartbeatReplySerialize2(const SyncHeartbeatReply* pMsg, uint32_t* len);
+SyncHeartbeatReply* syncHeartbeatReplyDeserialize2(const char* buf, uint32_t len);
+void syncHeartbeatReply2RpcMsg(const SyncHeartbeatReply* pMsg, SRpcMsg* pRpcMsg);
+void syncHeartbeatReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeatReply* pMsg);
+SyncHeartbeatReply* syncHeartbeatReplyFromRpcMsg2(const SRpcMsg* pRpcMsg);
+cJSON* syncHeartbeatReply2Json(const SyncHeartbeatReply* pMsg);
+char* syncHeartbeatReply2Str(const SyncHeartbeatReply* pMsg);
+
+// for debug ----------------------
+void syncHeartbeatReplyPrint(const SyncHeartbeatReply* pMsg);
+void syncHeartbeatReplyPrint2(char* s, const SyncHeartbeatReply* pMsg);
+void syncHeartbeatReplyLog(const SyncHeartbeatReply* pMsg);
+void syncHeartbeatReplyLog2(char* s, const SyncHeartbeatReply* pMsg);
+
// ---------------------------------------------
typedef struct SyncApplyMsg {
uint32_t bytes;
diff --git a/include/os/os.h b/include/os/os.h
index b036002f8adb5d246db8346112f2189f779f73cd..71966061a19a175d816010ff6425b4004b1f2223 100644
--- a/include/os/os.h
+++ b/include/os/os.h
@@ -79,6 +79,7 @@ extern "C" {
#include
#include
+#include "taoserror.h"
#include "osAtomic.h"
#include "osDef.h"
#include "osDir.h"
diff --git a/include/os/osDir.h b/include/os/osDir.h
index 9019d4f80240b2335824cb5626488bf4d0957f06..95b1a6ee1d00ab18e31522063102ff0ec9a2bab8 100644
--- a/include/os/osDir.h
+++ b/include/os/osDir.h
@@ -56,6 +56,7 @@ void taosRemoveDir(const char *dirname);
bool taosDirExist(const char *dirname);
int32_t taosMkDir(const char *dirname);
int32_t taosMulMkDir(const char *dirname);
+int32_t taosMulModeMkDir(const char *dirname, int mode);
void taosRemoveOldFiles(const char *dirname, int32_t keepDays);
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen);
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);
diff --git a/include/os/osSemaphore.h b/include/os/osSemaphore.h
index 7fca20d75e2eaece441656bc4ae2c707e0b15cd3..e52da96f0170d4d67d9fb8fa3aeff7270223e2d3 100644
--- a/include/os/osSemaphore.h
+++ b/include/os/osSemaphore.h
@@ -23,10 +23,9 @@ extern "C" {
#include
#if defined(_TD_DARWIN_64)
-
+#include
// typedef struct tsem_s *tsem_t;
-typedef struct bosal_sem_t *tsem_t;
-
+typedef dispatch_semaphore_t tsem_t;
int tsem_init(tsem_t *sem, int pshared, unsigned int value);
int tsem_wait(tsem_t *sem);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index e39172d74e52e852f0fa1812634e494d61ac6213..d16a599811255f987adb38cc553c8c5734a5ea60 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -616,6 +616,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157)
+#define TSDB_CODE_RSMA_REGEX_MATCH TAOS_DEF_ERROR_CODE(0, 0x3158)
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
diff --git a/include/util/tcompare.h b/include/util/tcompare.h
index cc9e8ae4641138be528830e17467dab7897f0166..c7a3ca20f222c7d919460b31e9f3c55a79325f46 100644
--- a/include/util/tcompare.h
+++ b/include/util/tcompare.h
@@ -105,6 +105,97 @@ int32_t compareStrPatternNotMatch(const void *pLeft, const void *pRight);
int32_t compareWStrPatternMatch(const void *pLeft, const void *pRight);
int32_t compareWStrPatternNotMatch(const void *pLeft, const void *pRight);
+int32_t compareInt8Int16(const void *pLeft, const void *pRight);
+int32_t compareInt8Int32(const void *pLeft, const void *pRight);
+int32_t compareInt8Int64(const void *pLeft, const void *pRight);
+int32_t compareInt8Float(const void *pLeft, const void *pRight);
+int32_t compareInt8Double(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt16Int8(const void *pLeft, const void *pRight);
+int32_t compareInt16Int32(const void *pLeft, const void *pRight);
+int32_t compareInt16Int64(const void *pLeft, const void *pRight);
+int32_t compareInt16Float(const void *pLeft, const void *pRight);
+int32_t compareInt16Double(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt32Int8(const void *pLeft, const void *pRight);
+int32_t compareInt32Int16(const void *pLeft, const void *pRight);
+int32_t compareInt32Int64(const void *pLeft, const void *pRight);
+int32_t compareInt32Float(const void *pLeft, const void *pRight);
+int32_t compareInt32Double(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt64Int8(const void *pLeft, const void *pRight);
+int32_t compareInt64Int16(const void *pLeft, const void *pRight);
+int32_t compareInt64Int32(const void *pLeft, const void *pRight);
+int32_t compareInt64Float(const void *pLeft, const void *pRight);
+int32_t compareInt64Double(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint64(const void *pLeft, const void *pRight);
+int32_t compareFloatInt8(const void *pLeft, const void *pRight);
+int32_t compareFloatInt16(const void *pLeft, const void *pRight);
+int32_t compareFloatInt32(const void *pLeft, const void *pRight);
+int32_t compareFloatInt64(const void *pLeft, const void *pRight);
+int32_t compareFloatDouble(const void *pLeft, const void *pRight);
+int32_t compareFloatUint8(const void *pLeft, const void *pRight);
+int32_t compareFloatUint16(const void *pLeft, const void *pRight);
+int32_t compareFloatUint32(const void *pLeft, const void *pRight);
+int32_t compareFloatUint64(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt8(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt16(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt32(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt64(const void *pLeft, const void *pRight);
+int32_t compareDoubleFloat(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint8(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint16(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint32(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint64(const void *pLeft, const void *pRight);
+int32_t compareUint8Int8(const void *pLeft, const void *pRight);
+int32_t compareUint8Int16(const void *pLeft, const void *pRight);
+int32_t compareUint8Int32(const void *pLeft, const void *pRight);
+int32_t compareUint8Int64(const void *pLeft, const void *pRight);
+int32_t compareUint8Float(const void *pLeft, const void *pRight);
+int32_t compareUint8Double(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint32(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint16Int8(const void *pLeft, const void *pRight);
+int32_t compareUint16Int16(const void *pLeft, const void *pRight);
+int32_t compareUint16Int32(const void *pLeft, const void *pRight);
+int32_t compareUint16Int64(const void *pLeft, const void *pRight);
+int32_t compareUint16Float(const void *pLeft, const void *pRight);
+int32_t compareUint16Double(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint32(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint32Int8(const void *pLeft, const void *pRight);
+int32_t compareUint32Int16(const void *pLeft, const void *pRight);
+int32_t compareUint32Int32(const void *pLeft, const void *pRight);
+int32_t compareUint32Int64(const void *pLeft, const void *pRight);
+int32_t compareUint32Float(const void *pLeft, const void *pRight);
+int32_t compareUint32Double(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint64Int8(const void *pLeft, const void *pRight);
+int32_t compareUint64Int16(const void *pLeft, const void *pRight);
+int32_t compareUint64Int32(const void *pLeft, const void *pRight);
+int32_t compareUint64Int64(const void *pLeft, const void *pRight);
+int32_t compareUint64Float(const void *pLeft, const void *pRight);
+int32_t compareUint64Double(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint32(const void *pLeft, const void *pRight);
+
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
int32_t doCompare(const char *a, const char *b, int32_t type, size_t size);
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 2bc821b8736edf745a30e0e103734e4e7b7b31e4..840a2671fa6eb3f1bb05ff035ddf4a3d164239b7 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -300,6 +300,9 @@ typedef enum ELogicConditionType {
#define TSDB_DEFAULT_PAGES_PER_VNODE 256
#define TSDB_MIN_PAGESIZE_PER_VNODE 1 // unit KB
#define TSDB_MAX_PAGESIZE_PER_VNODE 16384
+#define TSDB_DEFAULT_TSDB_PAGESIZE 4
+#define TSDB_MIN_TSDB_PAGESIZE 1 // unit KB
+#define TSDB_MAX_TSDB_PAGESIZE 16384
#define TSDB_DEFAULT_PAGESIZE_PER_VNODE 4
#define TSDB_MIN_DAYS_PER_FILE 60 // unit minute
#define TSDB_MAX_DAYS_PER_FILE (3650 * 1440)
@@ -359,15 +362,27 @@ typedef enum ELogicConditionType {
#define TSDB_DB_SCHEMALESS_ON 1
#define TSDB_DB_SCHEMALESS_OFF 0
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
-
-#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
-#define TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD (24 * 60 * 60 * 4)
-#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
-#define TSDB_DEFAULT_DB_WAL_RETENTION_SIZE -1
-#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
-#define TSDB_DEFAULT_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
-#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
-#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
+#define TSDB_MIN_STT_TRIGGER 1
+#define TSDB_MAX_STT_TRIGGER 16
+#define TSDB_DEFAULT_SST_TRIGGER 8
+#define TSDB_MIN_HASH_PREFIX 0
+#define TSDB_MAX_HASH_PREFIX 128
+#define TSDB_DEFAULT_HASH_PREFIX 0
+#define TSDB_MIN_HASH_SUFFIX 0
+#define TSDB_MAX_HASH_SUFFIX 128
+#define TSDB_DEFAULT_HASH_SUFFIX 0
+
+#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
+#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 0
+#define TSDB_REPS_DEF_DB_WAL_RET_PERIOD (24 * 60 * 60 * 4)
+#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
+#define TSDB_REP_DEF_DB_WAL_RET_SIZE 0
+#define TSDB_REPS_DEF_DB_WAL_RET_SIZE -1
+#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
+#define TSDB_REP_DEF_DB_WAL_ROLL_PERIOD 0
+#define TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
+#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
+#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
#define TSDB_MIN_ROLLUP_MAX_DELAY 1 // unit millisecond
#define TSDB_MAX_ROLLUP_MAX_DELAY (15 * 60 * 1000)
@@ -386,7 +401,7 @@ typedef enum ELogicConditionType {
#define TSDB_DEFAULT_EXPLAIN_VERBOSE false
-#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16*1024)
+#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024)
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
#define TSDB_MAX_FIELD_LEN 16384
diff --git a/include/util/tencode.h b/include/util/tencode.h
index ad642cd612db4d1bb31f57b7a49d977e90978ee5..a6dd58297e8c1dba644d86eb5145b273406fbf9e 100644
--- a/include/util/tencode.h
+++ b/include/util/tencode.h
@@ -264,12 +264,14 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) {
static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) {
if (tEncodeU32v(pCoder, len) < 0) return -1;
- if (pCoder->data) {
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
- memcpy(TD_CODER_CURRENT(pCoder), val, len);
- }
+ if (len) {
+ if (pCoder->data) {
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
+ memcpy(TD_CODER_CURRENT(pCoder), val, len);
+ }
- TD_CODER_MOVE_POS(pCoder, len);
+ TD_CODER_MOVE_POS(pCoder, len);
+ }
return 0;
}
@@ -414,14 +416,18 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) {
uint64_t length = 0;
if (tDecodeU64v(pCoder, &length) < 0) return -1;
- if (len) *len = length;
+ if (length) {
+ if (len) *len = length;
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
- *val = taosMemoryMalloc(length);
- if (*val == NULL) return -1;
- memcpy(*val, TD_CODER_CURRENT(pCoder), length);
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
+ *val = taosMemoryMalloc(length);
+ if (*val == NULL) return -1;
+ memcpy(*val, TD_CODER_CURRENT(pCoder), length);
- TD_CODER_MOVE_POS(pCoder, length);
+ TD_CODER_MOVE_POS(pCoder, length);
+ } else {
+ *val = NULL;
+ }
return 0;
}
diff --git a/include/util/thash.h b/include/util/thash.h
index 781c22a56aaba0d449d1f711b32fe4bd75a39003..f4d09eb0906b04bfd40d97c39ec66feb3b1967a1 100644
--- a/include/util/thash.h
+++ b/include/util/thash.h
@@ -210,6 +210,8 @@ void taosHashSetEqualFp(SHashObj *pHashObj, _equal_fn_t fp);
*/
void taosHashSetFreeFp(SHashObj *pHashObj, _hash_free_fn_t fp);
+int64_t taosHashGetCompTimes(SHashObj *pHashObj);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/util/tpagedbuf.h b/include/util/tpagedbuf.h
index ef266068cbaff046ec6ebcf0bf02d0b44ee9d3a2..9ab89273e6895c2ea322fa116c06332a431028bc 100644
--- a/include/util/tpagedbuf.h
+++ b/include/util/tpagedbuf.h
@@ -58,19 +58,17 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
/**
*
* @param pBuf
- * @param groupId
* @param pageId
* @return
*/
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId);
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId);
/**
*
* @param pBuf
- * @param groupId
* @return
*/
-SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf, int32_t groupId);
+SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf);
/**
* get the specified buffer page by id
@@ -101,13 +99,6 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, struct SPageInfo* pi);
*/
size_t getTotalBufSize(const SDiskbasedBuf* pBuf);
-/**
- * get the number of groups in the result buffer
- * @param pBuf
- * @return
- */
-size_t getNumOfBufGroupId(const SDiskbasedBuf* pBuf);
-
/**
* destroy result buffer
* @param pBuf
diff --git a/include/util/trbtree.h b/include/util/trbtree.h
new file mode 100644
index 0000000000000000000000000000000000000000..f6d37e3d753de71fdf312b795935cb9014149f23
--- /dev/null
+++ b/include/util/trbtree.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef _TD_UTIL_RBTREE_H_
+#define _TD_UTIL_RBTREE_H_
+
+#include "os.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct SRBTree SRBTree;
+typedef struct SRBTreeNode SRBTreeNode;
+typedef struct SRBTreeIter SRBTreeIter;
+
+typedef int32_t (*tRBTreeCmprFn)(const void *, const void *);
+
+// SRBTree =============================================
+#define tRBTreeMin(T) ((T)->min == ((T)->NIL) ? NULL : (T)->min)
+#define tRBTreeMax(T) ((T)->max == ((T)->NIL) ? NULL : (T)->max)
+
+void tRBTreeCreate(SRBTree *pTree, tRBTreeCmprFn cmprFn);
+SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *z);
+void tRBTreeDrop(SRBTree *pTree, SRBTreeNode *z);
+SRBTreeNode *tRBTreeDropByKey(SRBTree *pTree, void *pKey);
+SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey);
+
+// SRBTreeIter =============================================
+#define tRBTreeIterCreate(tree, ascend) \
+ (SRBTreeIter) { .asc = (ascend), .pTree = (tree), .pNode = (ascend) ? (tree)->min : (tree)->max }
+
+SRBTreeNode *tRBTreeIterNext(SRBTreeIter *pIter);
+
+// STRUCT =============================================
+typedef enum { RED, BLACK } ECOLOR;
+struct SRBTreeNode {
+ ECOLOR color;
+ SRBTreeNode *parent;
+ SRBTreeNode *left;
+ SRBTreeNode *right;
+};
+
+#define RBTREE_NODE_PAYLOAD(N) ((const void *)&(N)[1])
+
+struct SRBTree {
+ tRBTreeCmprFn cmprFn;
+ int64_t n;
+ SRBTreeNode *root;
+ SRBTreeNode *min;
+ SRBTreeNode *max;
+ SRBTreeNode *NIL;
+ SRBTreeNode NILNODE;
+};
+
+struct SRBTreeIter {
+ int8_t asc;
+ SRBTree *pTree;
+ SRBTreeNode *pNode;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_TD_UTIL_RBTREE_H_*/
\ No newline at end of file
diff --git a/include/util/tutil.h b/include/util/tutil.h
index 6a1a40f14ccb865533f117524ffdfef3c84e20ad..32a88b37ecffc16c2a222d73d9fe452e6fcaacac 100644
--- a/include/util/tutil.h
+++ b/include/util/tutil.h
@@ -20,6 +20,7 @@
#include "tcrc32c.h"
#include "tdef.h"
#include "tmd5.h"
+#include "thash.h"
#ifdef __cplusplus
extern "C" {
@@ -68,6 +69,19 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
memcpy(target, buf, TSDB_PASSWORD_LEN);
}
+static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, int32_t method, int32_t prefix,
+ int32_t suffix) {
+ if (prefix == 0 && suffix == 0) {
+ return MurmurHash3_32(tbname, tblen);
+ } else {
+ if (tblen <= (prefix + suffix)) {
+ return MurmurHash3_32(tbname, tblen);
+ } else {
+ return MurmurHash3_32(tbname + prefix, tblen - prefix - suffix);
+ }
+ }
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile
new file mode 100644
index 0000000000000000000000000000000000000000..5b793459164baec2791107842bddb3f0bb90b2df
--- /dev/null
+++ b/packaging/MPtestJenkinsfile
@@ -0,0 +1,251 @@
+def sync_source(branch_name) {
+ sh '''
+ hostname
+ ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//"
+ echo ''' + branch_name + '''
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}
+ git reset --hard
+ git fetch || git fetch
+ rm -rf examples/rust/
+ git checkout ''' + branch_name + ''' -f
+ git branch
+ git pull || git pull
+ git log | head -n 20
+ git submodule update --init --recursive
+ '''
+ return 1
+}
+def run_test() {
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+
+ '''
+ sh '''
+ export LD_LIBRARY_PATH=${TDINTERNAL_ROOT_DIR}/debug/build/lib
+ ./fulltest.sh
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/tests
+ ./test-all.sh b1fq
+ '''
+}
+def build_run() {
+ sync_source("${BRANCH_NAME}")
+}
+pipeline {
+ agent none
+ parameters {
+ string (
+ name:'version',
+ defaultValue:'3.0.0.1',
+ description: 'release version number,eg: 3.0.0.1 or 3.0.0.'
+ )
+ string (
+ name:'baseVersion',
+ defaultValue:'3.0.0.1',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ string (
+ name:'toolsVersion',
+ defaultValue:'2.1.2',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ string (
+ name:'toolsBaseVersion',
+ defaultValue:'2.1.2',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ }
+ environment{
+ WORK_DIR = '/var/lib/jenkins/workspace'
+ TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal'
+ TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community'
+ BRANCH_NAME = 'test/chr/TD-14699'
+
+ TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz"
+ BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz"
+
+ TD_SERVER_ARM_TAR = "TDengine-server-${version}-Linux-arm64.tar.gz"
+ BASE_TD_SERVER_ARM_TAR = "TDengine-server-${baseVersion}-Linux-arm64.tar.gz"
+
+ TD_SERVER_LITE_TAR = "TDengine-server-${version}-Linux-x64-Lite.tar.gz"
+ BASE_TD_SERVER_LITE_TAR = "TDengine-server-${baseVersion}-Linux-x64-Lite.tar.gz"
+
+ TD_CLIENT_TAR = "TDengine-client-${version}-Linux-x64.tar.gz"
+ BASE_TD_CLIENT_TAR = "TDengine-client-${baseVersion}-Linux-x64.tar.gz"
+
+ TD_CLIENT_ARM_TAR = "TDengine-client-${version}-Linux-arm64.tar.gz"
+ BASE_TD_CLIENT_ARM_TAR = "TDengine-client-${baseVersion}-Linux-arm64.tar.gz"
+
+ TD_CLIENT_LITE_TAR = "TDengine-client-${version}-Linux-x64-Lite.tar.gz"
+ BASE_TD_CLIENT_LITE_TAR = "TDengine-client-${baseVersion}-Linux-x64-Lite.tar.gz"
+
+ TD_SERVER_RPM = "TDengine-server-${version}-Linux-x64.rpm"
+
+ TD_SERVER_DEB = "TDengine-server-${version}-Linux-x64.deb"
+
+ TD_SERVER_EXE = "TDengine-server-${version}-Windows-x64.exe"
+
+ TD_CLIENT_EXE = "TDengine-client-${version}-Windows-x64.exe"
+
+ TD_TOOLS_TAR = "taosTools-${toolsVersion}-Linux-x64.tar.gz"
+
+
+ }
+ stages {
+ stage ('Test Server') {
+ parallel {
+ stage('ubuntu16') {
+ agent{label " ubuntu16 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ stage('ubuntu18') {
+ agent{label " ubuntu18 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ dpkg -r tdengine
+ '''
+
+ }
+ }
+ }
+ stage('centos7') {
+ agent{label " centos7_9 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ stage('centos8') {
+ agent{label " centos8_3 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ sudo rpm -e tdengine
+ '''
+ }
+ }
+ }
+ stage('arm64') {
+ agent{label 'linux_arm64'}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ }
+ }
+ stage ('Test Client') {
+ parallel {
+ stage('ubuntu18') {
+ agent{label " ubuntu18 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client
+ python3 checkPackageRuning.py 192.168.0.21
+ '''
+ }
+ }
+ }
+ stage('centos8') {
+ agent{label " centos8_3 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client
+ python3 checkPackageRuning.py 192.168.0.24
+ '''
+ }
+ }
+ }
+ }
+ }
+ stage('arm64-client') {
+ agent{label " linux_arm64 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client
+ python3 checkPackageRuning.py 192.168.0.21
+ '''
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index aae2e7c856ac7ce4747d798acf5852d6cdf21535..87f465fdb93ddbff8973430b11ecadc13878069d 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -38,7 +38,7 @@
# The interval of dnode reporting status to mnode
# statusInterval 1
-# The interval for taos shell to send heartbeat to mnode
+# The interval for TDengine CLI to send heartbeat to mnode
# shellActivityTimer 3
# The minimum sliding window time, milli-second
diff --git a/packaging/checkPackageRuning.py b/packaging/checkPackageRuning.py
new file mode 100755
index 0000000000000000000000000000000000000000..2edeeb6dbbb682bb06150e30803a7f05c170a5b1
--- /dev/null
+++ b/packaging/checkPackageRuning.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+# install pip
+# pip install src/connector/python/
+
+# -*- coding: utf-8 -*-
+import sys , os
+import getopt
+import subprocess
+# from this import d
+import time
+
+
+if( len(sys.argv)>1 ):
+ serverHost=sys.argv[1]
+else:
+ serverHost="localhost"
+
+
+# install taospy
+
+out = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
+print("taospy version %s "%out)
+if (out == "" ):
+ os.system("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
+ print("install taos python connector")
+else:
+ os.system("pip3 install --upgrade taospy ")
+
+
+
+# start taosd prepare
+# os.system("rm -rf /var/lib/taos/*")
+# os.system("systemctl restart taosd ")
+
+# wait a moment ,at least 5 seconds
+time.sleep(5)
+
+# prepare data by taosBenchmark
+
+os.system("taosBenchmark -y -n 100 -t 100 -h %s "%serverHost )
+
+import taos
+
+conn = taos.connect(host="%s"%serverHost,
+ user="root",
+ password="taosdata",
+ database="test",
+ port=6030,
+ config="/etc/taos", # for windows the default value is C:\TDengine\cfg
+ timezone="Asia/Shanghai") # default your host's timezone
+
+server_version = conn.server_info
+print("server_version", server_version)
+client_version = conn.client_info
+print("client_version", client_version) # 3.0.0.0
+
+# Execute a sql and get its result set. It's useful for SELECT statement
+result: taos.TaosResult = conn.query("SELECT count(*) from test.meters")
+
+data = result.fetch_all()
+
+if data[0][0] !=10000:
+ print(" taosBenchmark work not as expected ")
+ sys.exit(1)
+else:
+ print(" taosBenchmark work as expected ")
+
+# test taosdump dump out data and dump in data
+
+# dump out datas
+os.system("taosdump --version")
+os.system("mkdir -p /tmp/dumpdata")
+os.system("rm -rf /tmp/dumpdata/*")
+
+
+
+# dump data out
+print("taosdump dump out data")
+
+os.system("taosdump -o /tmp/dumpdata -D test -y -h %s "%serverHost)
+
+# drop database of test
+print("drop database test")
+os.system(" taos -s ' drop database test ;' -h %s "%serverHost)
+
+# dump data in
+print("taosdump dump data in")
+os.system("taosdump -i /tmp/dumpdata -y -h %s "%serverHost)
+
+result = conn.query("SELECT count(*) from test.meters")
+
+data = result.fetch_all()
+
+if data[0][0] !=10000:
+ print(" taosdump work not as expected ")
+ sys.exit(1)
+else:
+ print(" taosdump work as expected ")
+
+conn.close()
\ No newline at end of file
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 49531028423142ccc9808b90b772bd97b0b3fc58..65f261db2c6c1ac70b761312af68a5188acea541 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -1,6 +1,6 @@
#!/bin/bash
-if [ $1 -eq "abort-upgrade" ]; then
+if [ "$1"x = "abort-upgrade"x ]; then
exit 0
fi
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 3db9005f95a3027c42dd05b9f28d448ade5852cb..94a24a41487e8d7b82571bcc524392e4335d7fae 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -45,6 +45,7 @@ mkdir -p ${pkg_dir}${install_home_path}/include
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
+cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
diff --git a/packaging/debRpmAutoInstall.sh b/packaging/debRpmAutoInstall.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3579f813e5b6ce91f0daa1fd230af14a4bf3d4b9
--- /dev/null
+++ b/packaging/debRpmAutoInstall.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/expect
+set packgeName [lindex $argv 0]
+set packageSuffix [lindex $argv 1]
+set timeout 3
+if { ${packageSuffix} == "deb" } {
+ spawn dpkg -i ${packgeName}
+} elseif { ${packageSuffix} == "rpm"} {
+ spawn rpm -ivh ${packgeName}
+}
+expect "*one:"
+send "\r"
+expect "*skip:"
+send "\r"
+
+expect eof
\ No newline at end of file
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
index e41182f471050af6b4d47b696eb237e319b2dd80..763ab73724587eb4dc231eb399a60937eaba6dca 100644
--- a/packaging/docker/README.md
+++ b/packaging/docker/README.md
@@ -47,7 +47,7 @@ taos> show databases;
Query OK, 1 row(s) in set (0.002843s)
```
-Since TDengine use container hostname to establish connections, it's a bit more complex to use taos shell and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use taos shell or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need.
+Since TDengine use container hostname to establish connections, it's a bit more complex to use TDengine CLI and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use TDengine CLI or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need.
### Start with host network
@@ -87,7 +87,7 @@ docker run -d \
This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`).
-If you want to use taos shell or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service.
+If you want to use TDengine CLI or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service.
If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable):
@@ -158,7 +158,7 @@ When you build your application with docker, you should add the TDengine client
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -265,7 +265,7 @@ Full version of dockerfile could be:
```dockerfile
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -279,7 +279,7 @@ RUN go env && go mod tidy && go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -391,7 +391,7 @@ test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp,
test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
```
-Check dnodes with taos shell:
+Check dnodes with TDengine CLI:
```bash
$ docker-compose exec td-1 taos -s "show dnodes"
diff --git a/packaging/release.bat b/packaging/release.bat
index ffd3a680486985a9e302a51f6fd2d910ea2c381d..591227382f9cec4a2fa1308a9b827994430f7236 100644
--- a/packaging/release.bat
+++ b/packaging/release.bat
@@ -40,10 +40,12 @@ if not exist %work_dir%\debug\ver-%2-x86 (
)
cd %work_dir%\debug\ver-%2-x64
call vcvarsall.bat x64
-cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x64
+cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DBUILD_TEST=false -DVERNUMBER=%2 -DCPUTYPE=x64
cmake --build .
rd /s /Q C:\TDengine
cmake --install .
+for /r c:\TDengine %%i in (*.dll) do signtool sign /f D:\\123.pfx /p taosdata %%i
+for /r c:\TDengine %%i in (*.exe) do signtool sign /f D:\\123.pfx /p taosdata %%i
if not %errorlevel% == 0 ( call :RUNFAILED build x64 failed & exit /b 1)
cd %package_dir%
iscc /DMyAppInstallName="%packagServerName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
@@ -51,19 +53,7 @@ if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x64% faile
iscc /DMyAppInstallName="%packagClientName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release
if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x64% failed & exit /b 1)
-cd %work_dir%\debug\ver-%2-x86
-call vcvarsall.bat x86
-cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x86
-cmake --build .
-rd /s /Q C:\TDengine
-cmake --install .
-if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1)
-cd %package_dir%
-@REM iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
-@REM if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1)
-iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release
-if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1)
-
+for /r ..\release %%i in (*.exe) do signtool sign /f d:\\123.pfx /p taosdata %%i
goto EXIT0
:USAGE
diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh
new file mode 100755
index 0000000000000000000000000000000000000000..794b3968fe4df178e3da91d2ebbd1512e288a57e
--- /dev/null
+++ b/packaging/testpackage.sh
@@ -0,0 +1,273 @@
+#!/bin/sh
+#parameter
+scriptDir=$(dirname $(readlink -f $0))
+packgeName=$1
+version=$2
+originPackageName=$3
+originversion=$4
+testFile=$5
+subFile="taos.tar.gz"
+
+# Color setting
+RED='\033[41;30m'
+GREEN='\033[1;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[1;34m'
+GREEN_DARK='\033[0;32m'
+YELLOW_DARK='\033[0;33m'
+BLUE_DARK='\033[0;34m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+if [ ${testFile} = "server" ];then
+ tdPath="TDengine-server-${version}"
+ originTdpPath="TDengine-server-${originversion}"
+ installCmd="install.sh"
+elif [ ${testFile} = "client" ];then
+ tdPath="TDengine-client-${version}"
+ originTdpPath="TDengine-client-${originversion}"
+ installCmd="install_client.sh"
+elif [ ${testFile} = "tools" ];then
+ tdPath="taosTools-${version}"
+ originTdpPath="taosTools-${originversion}"
+ installCmd="install-taostools.sh"
+fi
+
+function cmdInstall {
+command=$1
+if command -v ${command} ;then
+ echoColor YD "${command} is already installed"
+else
+ if command -v apt ;then
+ apt-get install ${command} -y
+ elif command -v yum ;then
+ yum -y install ${command}
+ echoColor YD "you should install ${command} manually"
+ fi
+fi
+}
+
+function echoColor {
+color=$1
+command=$2
+
+if [ ${color} = 'Y' ];then
+ echo -e "${YELLOW}${command}${NC}"
+elif [ ${color} = 'YD' ];then
+ echo -e "${YELLOW_DARK}${command}${NC}"
+elif [ ${color} = 'R' ];then
+ echo -e "${RED}${command}${NC}"
+elif [ ${color} = 'G' ];then
+ echo -e "${GREEN}${command}${NC}\r\n"
+elif [ ${color} = 'B' ];then
+ echo -e "${BLUE}${command}${NC}"
+elif [ ${color} = 'BD' ];then
+ echo -e "${BLUE_DARK}${command}${NC}"
+fi
+}
+
+
+function wgetFile {
+
+file=$1
+
+if [ ! -f ${file} ];then
+ echoColor BD "wget https://www.taosdata.com/assets-download/3.0/${file}"
+ wget https://www.taosdata.com/assets-download/3.0/${file}
+else
+ echoColor YD "${file} already exists "
+fi
+}
+
+function newPath {
+
+buildPath=$1
+
+if [ ! -d ${buildPath} ] ;then
+ echoColor BD "mkdir -p ${buildPath}"
+ mkdir -p ${buildPath}
+else
+ echoColor YD "${buildPath} already exists"
+fi
+
+}
+
+
+echoColor G "===== install basesoft ====="
+
+cmdInstall tree
+cmdInstall wget
+cmdInstall expect
+
+echoColor G "===== Uninstall all components of TDeingne ====="
+
+if command -v rmtaos ;then
+ echoColor YD "uninstall all components of TDeingne:rmtaos"
+ rmtaos
+else
+ echoColor YD "os doesn't include TDengine"
+fi
+
+if command -v rmtaostools ;then
+ echoColor YD "uninstall all components of TDeingne:rmtaostools"
+ rmtaostools
+else
+ echoColor YD "os doesn't include rmtaostools "
+fi
+
+
+
+
+echoColor G "===== new workroom path ====="
+installPath="/usr/local/src/packageTest"
+oriInstallPath="/usr/local/src/packageTest/3.1"
+
+newPath ${installPath}
+
+newPath ${oriInstallPath}
+
+
+if [ -d ${oriInstallPath}/${originTdpPath} ] ;then
+ echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}/*"
+ rm -rf ${oriInstallPath}/${originTdpPath}/*
+fi
+
+if [ -d ${installPath}/${tdPath} ] ;then
+ echoColor BD "rm -rf ${installPath}/${tdPath}/*"
+ rm -rf ${installPath}/${tdPath}/*
+fi
+
+echoColor G "===== download installPackage ====="
+cd ${installPath} && wgetFile ${packgeName}
+cd ${oriInstallPath} && wgetFile ${originPackageName}
+
+cd ${installPath}
+cp -r ${scriptDir}/debRpmAutoInstall.sh .
+
+packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}')
+
+
+if [ ! -f debRpmAutoInstall.sh ];then
+ echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
+ echo 'set packgeName [lindex $argv 0]' >> debRpmAutoInstall.sh
+ echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
+ echo 'set timeout 3 ' >> debRpmAutoInstall.sh
+ echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
+ echo ' spawn dpkg -i ${packgeName} ' >> debRpmAutoInstall.sh
+ echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh
+ echo ' spawn rpm -ivh ${packgeName}' >> debRpmAutoInstall.sh
+ echo '}' >> debRpmAutoInstall.sh
+ echo 'expect "*one:"' >> debRpmAutoInstall.sh
+ echo 'send "\r"' >> debRpmAutoInstall.sh
+ echo 'expect "*skip:"' >> debRpmAutoInstall.sh
+ echo 'send "\r" ' >> debRpmAutoInstall.sh
+fi
+
+
+echoColor G "===== instal Package ====="
+
+if [[ ${packgeName} =~ "deb" ]];then
+ cd ${installPath}
+ dpkg -r taostools
+ dpkg -r tdengine
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
+ else
+ echoColor BD "dpkg -i ${packgeName}" && dpkg -i ${packgeName}
+ fi
+elif [[ ${packgeName} =~ "rpm" ]];then
+ cd ${installPath}
+ sudo rpm -e tdengine
+ sudo rpm -e taostools
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
+ else
+ echoColor BD "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName}
+ fi
+elif [[ ${packgeName} =~ "tar" ]];then
+ echoColor G "===== check installPackage File of tar ====="
+ cd ${oriInstallPath}
+ if [ ! -f {originPackageName} ];then
+ echoColor YD "download base installPackage"
+ wgetFile ${originPackageName}
+ fi
+ echoColor YD "unzip the base installation package"
+ echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName}
+ cd ${installPath}
+ echoColor YD "unzip the new installation package"
+ echoColor BD "tar -xf ${packgeName}" && tar -xf ${packgeName}
+
+ if [ ${testFile} != "tools" ] ;then
+ cd ${installPath}/${tdPath} && tar xf ${subFile}
+ cd ${oriInstallPath}/${originTdpPath} && tar xf ${subFile}
+ fi
+
+ cd ${oriInstallPath}/${originTdpPath} && tree > ${installPath}/base_${originversion}_checkfile
+ cd ${installPath}/${tdPath} && tree > ${installPath}/now_${version}_checkfile
+
+ cd ${installPath}
+ diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
+ diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
+
+ if [ ${diffNumbers} != 0 ];then
+ echoColor R "The number and names of files is different from the previous installation package"
+ echoColor Y `cat ${installPath}/diffFile.log`
+ exit -1
+ else
+ echoColor G "The number and names of files are the same as previous installation packages"
+ fi
+ echoColor YD "===== install Package of tar ====="
+ cd ${installPath}/${tdPath}
+ if [ ${testFile} = "server" ];then
+ echoColor BD "bash ${installCmd} -e no "
+ bash ${installCmd} -e no
+ else
+ echoColor BD "bash ${installCmd} "
+ bash ${installCmd}
+ fi
+fi
+
+cd ${installPath}
+
+if [[ ${packgeName} =~ "Lite" ]] || ([[ ${packgeName} =~ "x64" ]] && [[ ${packgeName} =~ "client" ]]) || ([[ ${packgeName} =~ "deb" ]] && [[ ${packgeName} =~ "server" ]]) || ([[ ${packgeName} =~ "rpm" ]] && [[ ${packgeName} =~ "server" ]]) ;then
+ echoColor G "===== install taos-tools when package is lite or client ====="
+ cd ${installPath}
+ wgetFile taosTools-2.1.3-Linux-x64.tar.gz .
+ tar xf taosTools-2.1.3-Linux-x64.tar.gz
+ cd taosTools-2.1.3 && bash install-taostools.sh
+elif ([[ ${packgeName} =~ "arm64" ]] && [[ ${packgeName} =~ "client" ]]);then
+ echoColor G "===== install taos-tools arm when package is arm64-client ====="
+ cd ${installPath}
+ wgetFile taosTools-2.1.3-Linux-arm64.tar.gz .
+ tar xf taosTools-2.1.3-Linux-arm64.tar.gz
+ cd taosTools-2.1.3 && bash install-taostools.sh
+fi
+
+echoColor G "===== start TDengine ====="
+
+if [[ ${packgeName} =~ "server" ]] ;then
+ echoColor BD " rm -rf /var/lib/taos/* && systemctl restart taosd "
+ rm -rf /var/lib/taos/*
+ systemctl restart taosd
+fi
+
+# if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
+# echoColor G "===== install taos-tools when package is lite or client ====="
+# cd ${installPath}
+# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
+# tar xf taosTools-2.1.2-Linux-x64.tar.gz
+# cd taosTools-2.1.2 && bash install-taostools.sh
+# elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
+# echoColor G "===== install taos-tools when package is lite or client ====="
+# cd ${installPath}
+# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
+# tar xf taosTools-2.1.2-Linux-x64.tar.gz
+# cd taosTools-2.1.2 && bash install-taostools.sh
+# elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
+# echoColor G "===== install taos-tools when package is lite or client ====="
+# cd ${installPath}
+# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
+# tar xf taosTools-2.1.2-Linux-x64.tar.gz
+# cd taosTools-2.1.2 && bash install-taostools.sh
+# fi
+
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 39606ead300c8c603b9f25360d19e3af49b642ff..f2f72acafa9e5cda273d26933723d4328146d7cd 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -33,6 +33,7 @@ adapterName="taosadapter"
benchmarkName="taosBenchmark"
dumpName="taosdump"
demoName="taosdemo"
+xname="taosx"
data_dir=${dataDir}
log_dir=${logDir}
@@ -199,6 +200,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${demoName} || :
${csudo}rm -f ${bin_link_dir}/${benchmarkName} || :
${csudo}rm -f ${bin_link_dir}/${dumpName} || :
+ ${csudo}rm -f ${bin_link_dir}/${xname} || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
@@ -212,6 +214,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
+ [ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${xname} || :
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 6a95ace99ee521b9e1baca39d72bf7fa1cabb7d5..a6dceeeaadaf53ae510b38439df9e5130f88b35d 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -172,6 +172,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/taosdump || :
+ ${csudo}rm -f ${bin_link_dir}/taosx || :
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
@@ -184,6 +185,7 @@ function install_bin() {
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
+ [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || :
@@ -199,6 +201,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
+ [ -x ${install_main_dir}/bin/taosx ] && ${csudo}ln -s ${install_main_dir}/bin/taosx ${bin_link_dir}/taosx || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
@@ -215,6 +218,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/udfd ] || [ -x ${install_main_2_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || ${csudo}ln -s ${install_main_2_dir}/bin/udfd || :
[ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
+ [ -x ${install_main_dir}/bin/taosx ] || [ -x ${install_main_2_dir}/bin/taosx ] && ${csudo}ln -s ${install_main_dir}/bin/taosx ${bin_link_dir}/taosx || ln -s ${install_main_2_dir}/bin/taosx ${bin_link_dir}/taosx || :
fi
}
@@ -381,8 +385,7 @@ function install_header() {
${install_main_dir}/include ||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
${install_main_2_dir}/include &&
- ${csudo}chmod 644 ${install_main_dir}/include/* ||:
- ${csudo}chmod 644 ${install_main_2_dir}/include/*
+ ${csudo}chmod 644 ${install_main_dir}/include/* || ${csudo}chmod 644 ${install_main_2_dir}/include/*
fi
}
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index f5e3bf18822676f54ee2f20412b5ebb4ce57fd3a..2305b96b3663c3c52ca84988518de3fd00769af8 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -80,10 +80,12 @@ else
${build_dir}/bin/taosBenchmark \
${build_dir}/bin/TDinsight.sh \
$tdinsight_caches"
+ [ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"
bin_files="${build_dir}/bin/${serverName} \
${build_dir}/bin/${clientName} \
${taostools_bin_files} \
+ ${taosx_bin} \
${build_dir}/bin/taosadapter \
${build_dir}/bin/udfd \
${script_dir}/remove.sh \
diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt
index f52edbe71f151398c5ebdcd705eab376f2318aae..e8e3c878496c58631131922cc6de47491d548f06 100644
--- a/source/client/CMakeLists.txt
+++ b/source/client/CMakeLists.txt
@@ -27,11 +27,18 @@ else()
INCLUDE_DIRECTORIES(jni/linux)
endif()
+set_target_properties(
+ taos
+ PROPERTIES
+ CLEAN_DIRECT_OUTPUT
+ 1
+)
+
set_target_properties(
taos
PROPERTIES
VERSION ${TD_VER_NUMBER}
- SOVERSION ${TD_VER_NUMBER}
+ SOVERSION 1
)
add_library(taos_static STATIC ${CLIENT_SRC})
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index f275ae0885f10663b3c0ae853ecf1298fac25777..b8fa9580e70c1c7aa17a1402ce6c8113a7f8e094 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -52,15 +52,17 @@ enum {
RES_TYPE__QUERY = 1,
RES_TYPE__TMQ,
RES_TYPE__TMQ_META,
+ RES_TYPE__TAOSX,
};
#define SHOW_VARIABLES_RESULT_COLS 2
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
-#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
-#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
-#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
+#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ || *(int8_t*)res == RES_TYPE__TAOSX)
+#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_TMQ_TAOSX(res) (*(int8_t*)res == RES_TYPE__TAOSX)
typedef struct SAppInstInfo SAppInstInfo;
@@ -95,10 +97,17 @@ typedef struct {
} SClientHbMgr;
typedef struct SQueryExecMetric {
- int64_t start; // start timestamp, us
- int64_t parsed; // start to parse, us
- int64_t send; // start to send to server, us
- int64_t rsp; // receive response from server, us
+ int64_t start; // start timestamp, us
+ int64_t syntaxStart; // start to parse, us
+ int64_t syntaxEnd; // end to parse, us
+ int64_t ctgStart; // start to parse, us
+ int64_t ctgEnd; // end to parse, us
+ int64_t semanticEnd;
+ int64_t planEnd;
+ int64_t resultReady;
+ int64_t execEnd;
+ int64_t send; // start to send to server, us
+ int64_t rsp; // receive response from server, us
} SQueryExecMetric;
struct SAppInstInfo {
@@ -132,6 +141,7 @@ typedef struct STscObj {
char db[TSDB_DB_FNAME_LEN];
char sVer[TSDB_VERSION_LEN];
char sDetailVer[128];
+ int8_t sysInfo;
int8_t connType;
int32_t acctId;
uint32_t connId;
@@ -192,8 +202,8 @@ typedef struct {
int32_t vgId;
SSchemaWrapper schema;
int32_t resIter;
- SMqDataRsp rsp;
SReqResultInfo resInfo;
+ SMqDataRsp rsp;
} SMqRspObj;
typedef struct {
@@ -204,6 +214,17 @@ typedef struct {
SMqMetaRsp metaRsp;
} SMqMetaRspObj;
+typedef struct {
+ int8_t resType;
+ char topic[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
+ int32_t vgId;
+ SSchemaWrapper schema;
+ int32_t resIter;
+ SReqResultInfo resInfo;
+ STaosxRsp rsp;
+} SMqTaosxRspObj;
+
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
@@ -252,7 +273,7 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly);
void taosAsyncQueryImpl(uint64_t connId, const char* sql, __taos_async_fn_t fp, void* param, bool validateOnly);
-int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
+int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols);
static FORCE_INLINE SReqResultInfo* tmqGetCurResInfo(TAOS_RES* res) {
SMqRspObj* msg = (SMqRspObj*)res;
@@ -363,8 +384,9 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData*
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta);
-int32_t removeMeta(STscObj* pTscObj, SArray* tbList); // todo move to clientImpl.c and become a static function
-int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog); // todo move to xxx
+int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
+int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog);
+int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest);
#ifdef __cplusplus
diff --git a/source/client/inc/clientLog.h b/source/client/inc/clientLog.h
index d47edcd79535a3c8fc5d94aabd3bd8b08d0448f7..ec0a41a68f9515bc7ea2c54e96b0235c0a9683eb 100644
--- a/source/client/inc/clientLog.h
+++ b/source/client/inc/clientLog.h
@@ -29,6 +29,7 @@ extern "C" {
#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", DEBUG_TRACE, cDebugFlag, __VA_ARGS__); }} while(0)
#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", DEBUG_DEBUG, cDebugFlag, __VA_ARGS__); }} while(0)
+#define tscPerf(...) do { taosPrintLog("TSC ", 0, cDebugFlag, __VA_ARGS__); } while(0)
#ifdef __cplusplus
}
diff --git a/source/client/src/TMQConnector.c b/source/client/src/TMQConnector.c
index 17d3a212c482c3462e542721d7d57f516250ff13..fcf6957df92e92b990c60cd3b41342dbbf90ae9e 100644
--- a/source/client/src/TMQConnector.c
+++ b/source/client/src/TMQConnector.c
@@ -42,6 +42,7 @@ void commit_cb(tmq_t *tmq, int32_t code, void *param) {
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConfNewImp(JNIEnv *env, jobject jobj) {
tmq_conf_t *conf = tmq_conf_new();
+ jniGetGlobalMethod(env);
return (jlong)conf;
}
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index ff1b9322c92a8791ed79a3025f0af362fb441adc..b739aedca0ff7c8bd8e408e2e456aa7414f1ac30 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -69,14 +69,26 @@ static void deregisterRequest(SRequestObj *pRequest) {
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
- int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
+ int64_t nowUs = taosGetTimestampUs();
+ int64_t duration = nowUs - pRequest->metric.start;
tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
" ms, current:%d, app current:%d",
pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
+ tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
+ "us, exec:%" PRId64 "us",
+ duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
+ pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
+ pRequest->metric.execEnd - pRequest->metric.semanticEnd);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
+ tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
+ "us, planner:%" PRId64 "us, exec:%" PRId64 "us",
+ duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
+ pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
+ pRequest->metric.planEnd - pRequest->metric.semanticEnd,
+ pRequest->metric.resultReady - pRequest->metric.planEnd);
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
}
@@ -330,7 +342,6 @@ void doDestroyRequest(void *p) {
schedulerFreeJob(&pRequest->body.queryJob, 0);
taosMemoryFreeClear(pRequest->msgBuf);
- taosMemoryFreeClear(pRequest->sqlstr);
taosMemoryFreeClear(pRequest->pDb);
doFreeReqResultInfo(&pRequest->body.resInfo);
@@ -349,6 +360,7 @@ void doDestroyRequest(void *p) {
taosMemoryFree(pRequest->body.param);
}
+ taosMemoryFreeClear(pRequest->sqlstr);
taosMemoryFree(pRequest);
tscTrace("end to destroy request %" PRIx64 " p:%p", reqId, pRequest);
}
@@ -393,7 +405,9 @@ void taos_init_imp(void) {
schedulerInit();
tscDebug("starting to initialize TAOS driver");
+#ifndef WINDOWS
taosSetCoreDump(true);
+#endif
initTaskQueue();
fmFuncMgtInit();
@@ -474,7 +488,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
*/
uint64_t generateRequestId() {
static uint64_t hashId = 0;
- static int32_t requestSerialId = 0;
+ static uint32_t requestSerialId = 0;
if (hashId == 0) {
char uid[64] = {0};
@@ -493,7 +507,8 @@ uint64_t generateRequestId() {
while (true) {
int64_t ts = taosGetTimestampMs();
uint64_t pid = taosGetPId();
- int32_t val = atomic_add_fetch_32(&requestSerialId, 1);
+ uint32_t val = atomic_add_fetch_32(&requestSerialId, 1);
+ if (val >= 0xFFFF) atomic_store_32(&requestSerialId, 0);
id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
if (id) {
diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c
index 9475d1b51e51d093bcf7335d1668908e0c039a80..84a827ed78d6feb5dbba98ce89695d33fcef2073 100644
--- a/source/client/src/clientHb.c
+++ b/source/client/src/clientHb.c
@@ -73,6 +73,8 @@ static int32_t hbProcessDBInfoRsp(void *value, int32_t valueLen, struct SCatalog
vgInfo->vgVersion = rsp->vgVersion;
vgInfo->hashMethod = rsp->hashMethod;
+ vgInfo->hashPrefix = rsp->hashPrefix;
+ vgInfo->hashSuffix = rsp->hashSuffix;
vgInfo->vgHash = taosHashInit(rsp->vgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
if (NULL == vgInfo->vgHash) {
taosMemoryFree(vgInfo);
@@ -145,7 +147,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
}
static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
if (NULL == pReq) {
tscWarn("pReq to get activeInfo, may be dropped, refId:%" PRIx64 ", type:%d", pRsp->connKey.tscRid,
pRsp->connKey.connType);
@@ -260,6 +262,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
}
}
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
+
return TSDB_CODE_SUCCESS;
}
@@ -914,10 +918,11 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
}
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
if (pReq) {
tFreeClientHbReq(pReq);
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
}
if (NULL == pReq) {
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 5f0af55d13c3e3c79f796f5f34f31dff121f1281..5ebc2729f8b4a2d87316c5d317e0b4c8667edf88 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -215,6 +215,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
.pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
+ .enableSysInfo = pTscObj->sysInfo,
.svrVer = pTscObj->sVer,
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)};
@@ -246,7 +247,7 @@ int32_t parseSql(SRequestObj* pRequest, bool topicQuery, SQuery** pQuery, SStmtC
int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
SRetrieveTableRsp* pRsp = NULL;
- int32_t code = qExecCommand(pQuery->pRoot, &pRsp);
+ int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@@ -284,7 +285,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
return;
}
- int32_t code = qExecCommand(pQuery->pRoot, &pRsp);
+ int32_t code = qExecCommand(pRequest->pTscObj->sysInfo, pQuery->pRoot, &pRsp);
if (TSDB_CODE_SUCCESS == code && NULL != pRsp) {
code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true);
}
@@ -419,7 +420,8 @@ int32_t getPlan(SRequestObj* pRequest, SQuery* pQuery, SQueryPlan** pPlan, SArra
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
- .pUser = pRequest->pTscObj->user};
+ .pUser = pRequest->pTscObj->user,
+ .sysInfo = pRequest->pTscObj->sysInfo};
return qCreateQueryPlan(&cxt, pPlan, pNodeList);
}
@@ -721,6 +723,12 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
SSubmitBlkRsp* blk = pRsp->pBlocks + i;
+ if (blk->pMeta) {
+ handleCreateTbExecRes(blk->pMeta, pCatalog);
+ tFreeSTableMetaRsp(blk->pMeta);
+ taosMemoryFreeClear(blk->pMeta);
+ }
+
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
continue;
}
@@ -780,6 +788,10 @@ int32_t handleAlterTbExecRes(void* res, SCatalog* pCatalog) {
return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res);
}
+int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) {
+ return catalogUpdateTableMeta(pCatalog, (STableMetaRsp*)res);
+}
+
int32_t handleQueryExecRsp(SRequestObj* pRequest) {
if (NULL == pRequest->body.resInfo.execRes.res) {
return TSDB_CODE_SUCCESS;
@@ -802,6 +814,19 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
code = handleAlterTbExecRes(pRes->res, pCatalog);
break;
}
+ case TDMT_VND_CREATE_TABLE: {
+ SArray* pList = (SArray*)pRes->res;
+ int32_t num = taosArrayGetSize(pList);
+ for (int32_t i = 0; i < num; ++i) {
+ void* res = taosArrayGetP(pList, i);
+ code = handleCreateTbExecRes(res, pCatalog);
+ }
+ break;
+ }
+ case TDMT_MND_CREATE_STB: {
+ code = handleCreateTbExecRes(pRes->res, pCatalog);
+ break;
+ }
case TDMT_VND_SUBMIT: {
atomic_add_fetch_64((int64_t*)&pAppInfo->summary.insertBytes, pRes->numOfBytes);
@@ -826,6 +851,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SRequestObj* pRequest = (SRequestObj*)param;
pRequest->code = code;
+ pRequest->metric.resultReady = taosGetTimestampUs();
+
if (pResult) {
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
}
@@ -842,6 +869,8 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
}
schedulerFreeJob(&pRequest->body.queryJob, 0);
+
+ pRequest->metric.execEnd = taosGetTimestampUs();
}
taosMemoryFree(pResult);
@@ -859,17 +888,13 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
return;
}
- if (code == TSDB_CODE_SUCCESS) {
- code = handleQueryExecRsp(pRequest);
- ASSERT(pRequest->code == TSDB_CODE_SUCCESS);
- pRequest->code = code;
- }
-
tscDebug("schedulerExecCb request type %s", TMSG_INFO(pRequest->type));
- if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
+ if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) {
removeMeta(pTscObj, pRequest->targetTableList);
}
+ handleQueryExecRsp(pRequest);
+
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
}
@@ -930,6 +955,10 @@ SRequestObj* launchQueryImpl(SRequestObj* pRequest, SQuery* pQuery, bool keepQue
qDestroyQuery(pQuery);
}
+ if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type) && NULL == pRequest->body.resInfo.execRes.res) {
+ removeMeta(pRequest->pTscObj, pRequest->targetTableList);
+ }
+
handleQueryExecRsp(pRequest);
if (NULL != pRequest && TSDB_CODE_SUCCESS != code) {
@@ -990,7 +1019,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
.showRewrite = pQuery->showRewrite,
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
- .pUser = pRequest->pTscObj->user};
+ .pUser = pRequest->pTscObj->user,
+ .sysInfo = pRequest->pTscObj->sysInfo};
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
SQueryPlan* pDag = NULL;
@@ -1002,6 +1032,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
pRequest->body.subplanNum = pDag->numOfSubplans;
}
+ pRequest->metric.planEnd = taosGetTimestampUs();
+
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
@@ -1127,10 +1159,6 @@ SRequestObj* execQuery(uint64_t connId, const char* sql, int sqlLen, bool valida
inRetry = true;
} while (retryNum++ < REQUEST_TOTAL_EXEC_TIMES);
- if (NEED_CLIENT_RM_TBLMETA_REQ(pRequest->type)) {
- removeMeta(pRequest->pTscObj, pRequest->targetTableList);
- }
-
return pRequest;
}
@@ -1575,10 +1603,11 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int
}
int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) {
- int32_t cols = *(int32_t*) (p + sizeof(int32_t) * 3);
+ int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3);
ASSERT(numOfCols == cols);
- return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t)*3 + sizeof(uint64_t) + numOfCols * (sizeof(int8_t) + sizeof(int32_t));
+ return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) * 3 + sizeof(uint64_t) +
+ numOfCols * (sizeof(int8_t) + sizeof(int32_t));
}
static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) {
@@ -1643,7 +1672,12 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
break;
}
}
- if (!needConvert) return TSDB_CODE_SUCCESS;
+
+ if (!needConvert) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ tscDebug("start to convert form json format string");
char* p = (char*)pResultInfo->pData;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
@@ -1950,7 +1984,7 @@ _OVER:
int32_t appendTbToReq(SHashObj* pHash, int32_t pos1, int32_t len1, int32_t pos2, int32_t len2, const char* str,
int32_t acctId, char* db) {
- SName name;
+ SName name = {0};
if (len1 <= 0) {
return -1;
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index f449641f1008e79a58e02786a855711dbaeb6b9c..30860780807a820b041e27729f8e351fb46c99b3 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -184,6 +184,19 @@ void taos_free_result(TAOS_RES *res) {
SRequestObj *pRequest = (SRequestObj *)res;
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
destroyRequest(pRequest);
+ } else if (TD_RES_TMQ_TAOSX(res)) {
+ SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res;
+ if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
+ if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
+ if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
+ if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ // taosx
+ taosArrayDestroy(pRsp->rsp.createTableLen);
+ taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree);
+
+ pRsp->resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&pRsp->resInfo);
+ taosMemoryFree(pRsp);
} else if (TD_RES_TMQ(res)) {
SMqRspObj *pRsp = (SMqRspObj *)res;
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
@@ -685,6 +698,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
SQuery *pQuery = pWrapper->pQuery;
SRequestObj *pRequest = pWrapper->pRequest;
+ pRequest->metric.ctgEnd = taosGetTimestampUs();
+
if (code == TSDB_CODE_SUCCESS) {
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
pRequest->stableQuery = pQuery->stableQuery;
@@ -693,6 +708,8 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
}
}
+ pRequest->metric.semanticEnd = taosGetTimestampUs();
+
if (code == TSDB_CODE_SUCCESS) {
if (pQuery->haveResultSet) {
setResSchemaInfo(&pRequest->body.resInfo, pQuery->pResSchema, pQuery->numOfResCols);
@@ -755,6 +772,7 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
.pUser = pTscObj->user,
.schemalessType = pTscObj->schemalessType,
.isSuperUser = (0 == strcmp(pTscObj->user, TSDB_DEFAULT_USER)),
+ .enableSysInfo = pTscObj->sysInfo,
.async = true,
.svrVer = pTscObj->sVer,
.nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)};
@@ -784,12 +802,16 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
SQuery *pQuery = NULL;
+ pRequest->metric.syntaxStart = taosGetTimestampUs();
+
SCatalogReq catalogReq = {.forceUpdate = updateMetaForce, .qNodeRequired = qnodeRequired(pRequest)};
code = qParseSqlSyntax(pCxt, &pQuery, &catalogReq);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
+ pRequest->metric.syntaxEnd = taosGetTimestampUs();
+
if (!updateMetaForce) {
STscObj *pTscObj = pRequest->pTscObj;
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
@@ -816,6 +838,8 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
.requestObjRefId = pCxt->requestRid,
.mgmtEps = pCxt->mgmtEpSet};
+ pRequest->metric.ctgStart = taosGetTimestampUs();
+
code = catalogAsyncGetAllMeta(pCxt->pCatalog, &conn, &catalogReq, retrieveMetaCallback, pWrapper,
&pRequest->body.queryJob);
pCxt = NULL;
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index 0c4cf23c4e1708f4479a1b744dea37752513670d..a7a16d484ca10a8baa65419105f42e46dc3814f3 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -96,6 +96,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
connectRsp.epSet.eps[i].fqdn, connectRsp.epSet.eps[i].port, pTscObj->id);
}
+ pTscObj->sysInfo = connectRsp.sysInfo;
pTscObj->connId = connectRsp.connId;
pTscObj->acctId = connectRsp.acctId;
tstrncpy(pTscObj->sVer, connectRsp.sVer, tListLen(pTscObj->sVer));
@@ -232,13 +233,36 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
assert(pMsg != NULL && param != NULL);
SRequestObj* pRequest = param;
- taosMemoryFree(pMsg->pData);
if (code != TSDB_CODE_SUCCESS) {
setErrno(pRequest, code);
+ } else {
+ SMCreateStbRsp createRsp = {0};
+ SDecoder coder = {0};
+ tDecoderInit(&coder, pMsg->pData, pMsg->len);
+ tDecodeSMCreateStbRsp(&coder, &createRsp);
+ tDecoderClear(&coder);
+
+ pRequest->body.resInfo.execRes.msgType = TDMT_MND_CREATE_STB;
+ pRequest->body.resInfo.execRes.res = createRsp.pMeta;
}
+ taosMemoryFree(pMsg->pData);
+
if (pRequest->body.queryFp != NULL) {
- removeMeta(pRequest->pTscObj, pRequest->tableList);
+ SExecResult* pRes = &pRequest->body.resInfo.execRes;
+
+ if (code == TSDB_CODE_SUCCESS) {
+ SCatalog* pCatalog = NULL;
+ int32_t ret = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (pRes->res != NULL) {
+ ret = handleCreateTbExecRes(pRes->res, pCatalog);
+ }
+
+ if (ret != TSDB_CODE_SUCCESS) {
+ code = ret;
+ }
+ }
+
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
diff --git a/source/client/src/taosx.c b/source/client/src/clientRawBlockWrite.c
similarity index 94%
rename from source/client/src/taosx.c
rename to source/client/src/clientRawBlockWrite.c
index 677567e38ffcecefaa72373ac02a976cb2078676..c135965f07454c46e20780d029c18b2359110877 100644
--- a/source/client/src/taosx.c
+++ b/source/client/src/clientRawBlockWrite.c
@@ -765,6 +765,31 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
}
taosArrayPush(pRequest->tableList, &pName);
+ pCreateReq->flags |= TD_CREATE_IF_NOT_EXISTS;
+ // change tag cid to new cid
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ STableMeta* pTableMeta = NULL;
+ SName sName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.name, &sName);
+ code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("taosCreateTable:catalogGetTableMeta failed. table name: %s", pCreateReq->ctb.name);
+ goto end;
+ }
+
+ for (int32_t i = 0; i < taosArrayGetSize(pCreateReq->ctb.tagName); i++) {
+ char* tName = taosArrayGet(pCreateReq->ctb.tagName, i);
+ for (int32_t j = pTableMeta->tableInfo.numOfColumns;
+ j < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; j++) {
+ SSchema* tag = &pTableMeta->schema[j];
+ if (strcmp(tag->name, tName) == 0 && tag->type != TSDB_DATA_TYPE_JSON) {
+ tTagSetCid((STag*)pCreateReq->ctb.pTag, i, tag->colId);
+ }
+ }
+ }
+ taosMemoryFreeClear(pTableMeta);
+ }
+
SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
if (pTableBatch == NULL) {
SVgroupCreateTableBatch tBatch = {0};
@@ -1299,12 +1324,13 @@ end:
return code;
}
-static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
- int32_t code = TSDB_CODE_SUCCESS;
- SHashObj* pVgHash = NULL;
- SQuery* pQuery = NULL;
- SMqRspObj rspObj = {0};
- SDecoder decoder = {0};
+static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SHashObj* pVgHash = NULL;
+ SQuery* pQuery = NULL;
+ SMqRspObj rspObj = {0};
+ SDecoder decoder = {0};
+ STableMeta* pTableMeta = NULL;
terrno = TSDB_CODE_SUCCESS;
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
@@ -1361,24 +1387,6 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
goto end;
}
- uint16_t fLen = 0;
- int32_t rowSize = 0;
- int16_t nVar = 0;
- for (int i = 0; i < pSW->nCols; i++) {
- SSchema* schema = pSW->pSchema + i;
- fLen += TYPE_BYTES[schema->type];
- rowSize += schema->bytes;
- if (IS_VAR_DATA_TYPE(schema->type)) {
- nVar++;
- }
- }
-
- int32_t rows = rspObj.resInfo.numOfRows;
- int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
- int32_t schemaLen = 0;
- int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
-
const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
if (!tbName) {
uError("WriteRaw: tbname is null");
@@ -1398,6 +1406,35 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
goto end;
}
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
+ code = TSDB_CODE_SUCCESS;
+ continue;
+ }
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
+ goto end;
+ }
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
+ SSchema* schema = &pTableMeta->schema[i];
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t rows = rspObj.resInfo.numOfRows;
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1);
+ int32_t schemaLen = 0;
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
SSubmitReq* subReq = NULL;
SSubmitBlk* blk = NULL;
void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
@@ -1430,23 +1467,25 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
}
- STableMeta* pTableMeta = NULL;
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
- goto end;
- }
+ // pSW->pSchema should be same as pTableMeta->schema
+ // ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
uint64_t uid = pTableMeta->uid;
- taosMemoryFreeClear(pTableMeta);
+ int16_t sver = pTableMeta->sversion;
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
SRowBuilder rb = {0};
- tdSRowInit(&rb, pSW->version);
- tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
- int32_t dataLen = 0;
+ tdSRowInit(&rb, sver);
+ tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen);
+ int32_t totalLen = 0;
+
+ SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ for (int i = 0; i < pSW->nCols; i++) {
+ SSchema* schema = &pSW->pSchema[i];
+ taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
+ }
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
@@ -1455,33 +1494,41 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
rspObj.resInfo.current += 1;
int32_t offset = 0;
- for (int32_t k = 0; k < pSW->nCols; k++) {
- const SSchema* pColumn = &pSW->pSchema[k];
- char* data = rspObj.resInfo.row[k];
- if (!data) {
+ for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
+ const SSchema* pColumn = &pTableMeta->schema[k];
+ int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
+ if (!index) {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
} else {
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- data -= VARSTR_HEADER_SIZE;
+ char* colData = rspObj.resInfo.row[*index];
+ if (!colData) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ } else {
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ colData -= VARSTR_HEADER_SIZE;
+ }
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k);
}
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
}
+
offset += TYPE_BYTES[pColumn->type];
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
+ totalLen += rowLen;
}
+ taosHashCleanup(schemaHash);
blk->uid = htobe64(uid);
blk->suid = htobe64(suid);
- blk->sversion = htonl(pSW->version);
+ blk->sversion = htonl(sver);
blk->schemaLen = htonl(schemaLen);
blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
+ blk->dataLen = htonl(totalLen);
+ subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen;
subReq->numOfBlocks++;
+ taosMemoryFreeClear(pTableMeta);
}
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
@@ -1535,6 +1582,7 @@ end:
qDestroyQuery(pQuery);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
+ taosMemoryFreeClear(pTableMeta);
return code;
}
@@ -1622,7 +1670,7 @@ int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
} else if (raw.raw_type == TDMT_VND_DELETE) {
return taosDeleteData(taos, raw.raw, raw.raw_len);
} else if (raw.raw_type == RES_TYPE__TMQ) {
- return tmqWriteRaw(taos, raw.raw, raw.raw_len);
+ return tmqWriteRawDataImpl(taos, raw.raw, raw.raw_len);
}
return TSDB_CODE_INVALID_PARA;
}
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index 9f905a835241d54722cf3e15056d1d1019123dcf..f2493f6c57daf5654524f2c0e4f49d14df40b022 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -85,8 +85,11 @@ typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
typedef enum {
SCHEMA_ACTION_NULL,
- SCHEMA_ACTION_COLUMN,
- SCHEMA_ACTION_TAG
+ SCHEMA_ACTION_CREATE_STABLE,
+ SCHEMA_ACTION_ADD_COLUMN,
+ SCHEMA_ACTION_ADD_TAG,
+ SCHEMA_ACTION_CHANGE_COLUMN_SIZE,
+ SCHEMA_ACTION_CHANGE_TAG_SIZE,
} ESchemaAction;
typedef struct {
@@ -219,7 +222,7 @@ static int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const
static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSmlKv *kv, bool isTag,
ESchemaAction *action, SSmlHandle *info) {
- uint16_t *index = (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen);
+ uint16_t *index = colHash ? (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen) : NULL;
if (index) {
if (colField[*index].type != kv->type) {
uError("SML:0x%" PRIx64 " point type and db type mismatch. key: %s. point type: %d, db type: %d", info->id,
@@ -232,16 +235,16 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
(colField[*index].type == TSDB_DATA_TYPE_NCHAR &&
((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) {
if (isTag) {
- *action = SCHEMA_ACTION_TAG;
+ *action = SCHEMA_ACTION_CHANGE_TAG_SIZE;
} else {
- *action = SCHEMA_ACTION_COLUMN;
+ *action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE;
}
}
} else {
if (isTag) {
- *action = SCHEMA_ACTION_TAG;
+ *action = SCHEMA_ACTION_ADD_TAG;
} else {
- *action = SCHEMA_ACTION_COLUMN;
+ *action = SCHEMA_ACTION_ADD_COLUMN;
}
}
return 0;
@@ -310,9 +313,31 @@ static int32_t getBytes(uint8_t type, int32_t length){
}
}
+static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray* results, int32_t numOfCols, bool isTag) {
+ for (int j = 0; j < taosArrayGetSize(cols); ++j) {
+ SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, j);
+ ESchemaAction action = SCHEMA_ACTION_NULL;
+ smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, &action, info);
+ if(action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_ADD_TAG){
+ SField field = {0};
+ field.type = kv->type;
+ field.bytes = getBytes(kv->type, kv->length);
+ memcpy(field.name, kv->key, kv->keyLen);
+ taosArrayPush(results, &field);
+ }else if(action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE || action == SCHEMA_ACTION_CHANGE_TAG_SIZE){
+ uint16_t *index = (uint16_t *)taosHashGet(schemaHash, kv->key, kv->keyLen);
+ uint16_t newIndex = *index;
+ if(isTag) newIndex -= numOfCols;
+ SField *field = (SField *)taosArrayGet(results, newIndex);
+ field->bytes = getBytes(kv->type, kv->length);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
//static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
// int32_t colVer, int32_t tagVer, int8_t source, uint64_t suid){
-static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
+static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray* pColumns, SArray* pTags,
STableMeta *pTableMeta, ESchemaAction action){
SRequestObj* pRequest = NULL;
@@ -320,101 +345,58 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *s
int32_t code = TSDB_CODE_SUCCESS;
SCmdMsgInfo pCmdMsg = {0};
+ // put front for free
+ pReq.numOfColumns = taosArrayGetSize(pColumns);
+ pReq.pColumns = pColumns;
+ pReq.numOfTags = taosArrayGetSize(pTags);
+ pReq.pTags = pTags;
+
code = buildRequest(info->taos->id, "", 0, NULL, false, &pRequest);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
}
- if (action == SCHEMA_ACTION_NULL){
+ if (action == SCHEMA_ACTION_CREATE_STABLE){
pReq.colVer = 1;
pReq.tagVer = 1;
pReq.suid = 0;
pReq.source = TD_REQ_FROM_APP;
- } else if (action == SCHEMA_ACTION_TAG){
+ } else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE){
pReq.colVer = pTableMeta->sversion;
pReq.tagVer = pTableMeta->tversion + 1;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
- } else if (action == SCHEMA_ACTION_COLUMN){
+ } else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE){
pReq.colVer = pTableMeta->sversion + 1;
pReq.tagVer = pTableMeta->tversion;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
}
+ if (pReq.numOfTags == 0){
+ pReq.numOfTags = 1;
+ SField field = {0};
+ field.type = TSDB_DATA_TYPE_NCHAR;
+ field.bytes = 1;
+ strcpy(field.name, tsSmlTagName);
+ taosArrayPush(pReq.pTags, &field);
+ }
+
pReq.commentLen = -1;
pReq.igExists = true;
tNameExtractFullName(pName, pReq.name);
- if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_COLUMN){
- pReq.numOfColumns = taosArrayGetSize(sTableData->cols);
- pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField));
- for (int i = 0; i < pReq.numOfColumns; i++) {
- SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->cols, i);
- SField field = {0};
- field.type = kv->type;
- field.bytes = getBytes(kv->type, kv->length);
- memcpy(field.name, kv->key, kv->keyLen);
- taosArrayPush(pReq.pColumns, &field);
- }
- }else if (action == SCHEMA_ACTION_TAG){
- pReq.numOfColumns = pTableMeta->tableInfo.numOfColumns;
- pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField));
- for (int i = 0; i < pReq.numOfColumns; i++) {
- SSchema *s = &pTableMeta->schema[i];
- SField field = {0};
- field.type = s->type;
- field.bytes = s->bytes;
- strcpy(field.name, s->name);
- taosArrayPush(pReq.pColumns, &field);
- }
- }
-
- if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_TAG){
- pReq.numOfTags = taosArrayGetSize(sTableData->tags);
- if (pReq.numOfTags == 0){
- pReq.numOfTags = 1;
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- SField field = {0};
- field.type = TSDB_DATA_TYPE_NCHAR;
- field.bytes = 1;
- strcpy(field.name, tsSmlTagName);
- taosArrayPush(pReq.pTags, &field);
- }else{
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- for (int i = 0; i < pReq.numOfTags; i++) {
- SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->tags, i);
- SField field = {0};
- field.type = kv->type;
- field.bytes = getBytes(kv->type, kv->length);
- memcpy(field.name, kv->key, kv->keyLen);
- taosArrayPush(pReq.pTags, &field);
- }
- }
- }else if (action == SCHEMA_ACTION_COLUMN){
- pReq.numOfTags = pTableMeta->tableInfo.numOfTags;
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- for (int i = 0; i < pReq.numOfTags; i++) {
- SSchema *s = &pTableMeta->schema[i + pTableMeta->tableInfo.numOfColumns];
- SField field = {0};
- field.type = s->type;
- field.bytes = s->bytes;
- strcpy(field.name, s->name);
- taosArrayPush(pReq.pTags, &field);
- }
- }
-
pCmdMsg.epSet = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
pCmdMsg.msgType = TDMT_MND_CREATE_STB;
pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
if (NULL == pCmdMsg.pMsg) {
- tFreeSMCreateStbReq(&pReq);
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
@@ -442,7 +424,10 @@ end:
}
static int32_t smlModifyDBSchemas(SSmlHandle *info) {
- int32_t code = 0;
+ int32_t code = 0;
+ SHashObj *hashTmp = NULL;
+ STableMeta *pTableMeta = NULL;
+
SName pName = {TSDB_TABLE_NAME_T, info->taos->acctId, {0}, {0}};
strcpy(pName.dbname, info->pRequest->pDb);
@@ -455,7 +440,6 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
SSmlSTableMeta **tableMetaSml = (SSmlSTableMeta **)taosHashIterate(info->superTables, NULL);
while (tableMetaSml) {
SSmlSTableMeta *sTableData = *tableMetaSml;
- STableMeta *pTableMeta = NULL;
bool needCheckMeta = false; // for multi thread
size_t superTableLen = 0;
@@ -466,14 +450,19 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) {
- code = smlSendMetaMsg(info, &pName, sTableData, NULL, SCHEMA_ACTION_NULL);
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField));
+ smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true);
+ smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
info->cost.numOfCreateSTables++;
} else if (code == TSDB_CODE_SUCCESS) {
- SHashObj *hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags,
+ hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags,
taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
for (uint16_t i = pTableMeta->tableInfo.numOfColumns;
i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
@@ -483,36 +472,72 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
ESchemaAction action = SCHEMA_ACTION_NULL;
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, &action, true);
if (code != TSDB_CODE_SUCCESS) {
- taosHashCleanup(hashTmp);
goto end;
}
- if (action == SCHEMA_ACTION_TAG){
- code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action);
+ if (action != SCHEMA_ACTION_NULL){
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
+
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
+ SField field = {0};
+ field.type = pTableMeta->schema[i].type;
+ field.bytes = pTableMeta->schema[i].bytes;
+ strcpy(field.name, pTableMeta->schema[i].name);
+ if(i < pTableMeta->tableInfo.numOfColumns){
+ taosArrayPush(pColumns, &field);
+ }else{
+ taosArrayPush(pTags, &field);
+ }
+ }
+ smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->tags, pTags, pTableMeta->tableInfo.numOfColumns, true);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
}
+ taosMemoryFreeClear(pTableMeta);
code = catalogRefreshTableMeta(info->pCatalog, &conn, &pName, -1);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
+ code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
taosHashClear(hashTmp);
- for (uint16_t i = 1; i < pTableMeta->tableInfo.numOfColumns; i++) {
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
}
action = SCHEMA_ACTION_NULL;
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &action, false);
- taosHashCleanup(hashTmp);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
- if (action == SCHEMA_ACTION_COLUMN){
- code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action);
+ if (action != SCHEMA_ACTION_NULL){
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
+
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
+ SField field = {0};
+ field.type = pTableMeta->schema[i].type;
+ field.bytes = pTableMeta->schema[i].bytes;
+ strcpy(field.name, pTableMeta->schema[i].name);
+ if(i < pTableMeta->tableInfo.numOfColumns){
+ taosArrayPush(pColumns, &field);
+ }else{
+ taosArrayPush(pTags, &field);
+ }
+ }
+
+ smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->cols, pColumns, pTableMeta->tableInfo.numOfColumns, false);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
}
@@ -522,15 +547,17 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
goto end;
}
needCheckMeta = true;
+ taosHashCleanup(hashTmp);
+ hashTmp = NULL;
} else {
uError("SML:0x%" PRIx64 " load table meta error: %s", info->id, tstrerror(code));
goto end;
}
- if (pTableMeta) taosMemoryFree(pTableMeta);
+ taosMemoryFreeClear(pTableMeta);
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, pName.tname);
goto end;
}
@@ -538,12 +565,12 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags,
sTableData->tags, true);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, pName.tname);
goto end;
}
code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, pName.tname);
goto end;
}
}
@@ -555,6 +582,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
return 0;
end:
+ taosHashCleanup(hashTmp);
+ taosMemoryFreeClear(pTableMeta);
catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1);
return code;
}
@@ -1531,7 +1560,7 @@ cleanup:
/************* TSDB_SML_JSON_PROTOCOL function start **************/
static int32_t smlJsonCreateSring(const char **output, char *input, int32_t inputLen) {
- *output = (const char *)taosMemoryMalloc(inputLen);
+ *output = (const char *)taosMemoryCalloc(1, inputLen);
if (*output == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -2057,10 +2086,6 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
if (info->dataFormat) taosArrayDestroy(cols);
return ret;
}
- if (taosArrayGetSize(cols) > TSDB_MAX_COLUMNS) {
- smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL);
- return TSDB_CODE_PAR_TOO_MANY_COLUMNS;
- }
bool hasTable = true;
SSmlTableInfo *tinfo = NULL;
@@ -2094,6 +2119,11 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
return TSDB_CODE_PAR_INVALID_TAGS_NUM;
}
+ if (taosArrayGetSize(cols) + taosArrayGetSize((*oneTable)->tags) > TSDB_MAX_COLUMNS) {
+ smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL);
+ return TSDB_CODE_PAR_TOO_MANY_COLUMNS;
+ }
+
(*oneTable)->sTableName = elements.measure;
(*oneTable)->sTableNameLen = elements.measureLen;
if (strlen((*oneTable)->childTableName) == 0) {
@@ -2421,9 +2451,11 @@ static void smlInsertCallback(void *param, void *res, int32_t code) {
uDebug("SML:0x%" PRIx64 " result. code:%d, msg:%s", info->id, pRequest->code, pRequest->msgBuf);
// lock
taosThreadSpinLock(&info->params->lock);
- info->params->request->body.resInfo.numOfRows += rows;
if (code != TSDB_CODE_SUCCESS) {
info->params->request->code = code;
+ info->params->request->body.resInfo.numOfRows += rows;
+ }else{
+ info->params->request->body.resInfo.numOfRows += info->affectedRows;
}
taosThreadSpinUnlock(&info->params->lock);
// unlock
diff --git a/source/client/src/tmq.c b/source/client/src/clientTmq.c
similarity index 94%
rename from source/client/src/tmq.c
rename to source/client/src/clientTmq.c
index 7637ffbc80baa3f4e67b4a4fc27bc57adb8b7d3a..9f9a14952e75bdac29564c39dd4ef60da0d07ef0 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/clientTmq.c
@@ -164,6 +164,7 @@ typedef struct {
union {
SMqDataRsp dataRsp;
SMqMetaRsp metaRsp;
+ STaosxRsp taosxRsp;
};
} SMqPollRspWrapper;
@@ -810,8 +811,19 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
}
int32_t tmq_unsubscribe(tmq_t* tmq) {
+ int32_t rsp;
+ int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
- int32_t rsp = tmq_subscribe(tmq, lst);
+ while (1) {
+ rsp = tmq_subscribe(tmq, lst);
+ if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
+ break;
+ } else {
+ retryCnt++;
+ taosMsleep(500);
+ }
+ }
+
tmq_list_destroy(lst);
return rsp;
}
@@ -1130,18 +1142,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
- } else {
- ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
- tDecodeSMqMetaRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->metaRsp);
+
+ tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
+ tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
+ rspType);
+
+ } else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
+ SDecoder decoder;
+ tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
+ tDecodeSMqMetaRsp(&decoder, &pRspWrapper->metaRsp);
+ tDecoderClear(&decoder);
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else if (rspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SDecoder decoder;
+ tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
+ tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
+ tDecoderClear(&decoder);
+ memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else {
+ ASSERT(0);
}
taosMemoryFree(pMsg->pData);
- tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
- tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
- rspType);
-
taosWriteQitem(tmq->mqueue, pRspWrapper);
tsem_post(&tmq->rspSem);
@@ -1440,6 +1463,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
return pRspObj;
}
+SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
+ SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
+ pRspObj->resType = RES_TYPE__TAOSX;
+ tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
+ tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN);
+ pRspObj->vgId = pWrapper->vgHandle->vgId;
+ pRspObj->resIter = -1;
+ memcpy(&pRspObj->rsp, &pWrapper->dataRsp, sizeof(SMqTaosxRspObj));
+
+ pRspObj->resInfo.totalRows = 0;
+ pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
+ if (!pWrapper->dataRsp.withSchema) {
+ setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
+ }
+
+ return pRspObj;
+}
+
int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
/*tscDebug("call poll");*/
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
@@ -1581,8 +1622,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
SMqClientVg* pVg = pollRspWrapper->vgHandle;
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
* rspMsg->msg.rspOffset);*/
- pVg->currentOffset.version = pollRspWrapper->metaRsp.rspOffset;
- pVg->currentOffset.type = TMQ_OFFSET__LOG;
+ pVg->currentOffset = pollRspWrapper->metaRsp.rspOffset;
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
// build rsp
SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper);
@@ -1593,6 +1633,30 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
taosFreeQitem(pollRspWrapper);
}
+ } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
+ /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
+ int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
+ if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
+ SMqClientVg* pVg = pollRspWrapper->vgHandle;
+ /*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
+ * rspMsg->msg.rspOffset);*/
+ pVg->currentOffset = pollRspWrapper->taosxRsp.rspOffset;
+ atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
+ if (pollRspWrapper->taosxRsp.blockNum == 0) {
+ taosFreeQitem(pollRspWrapper);
+ rspWrapper = NULL;
+ continue;
+ }
+ // build rsp
+ SMqRspObj* pRsp = tmqBuildRspFromWrapper(pollRspWrapper);
+ taosFreeQitem(pollRspWrapper);
+ return pRsp;
+ } else {
+ tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n",
+ pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
+ taosFreeQitem(pollRspWrapper);
+ }
} else {
/*printf("handle ep rsp %d\n", rspMsg->head.mqMsgType);*/
bool reset = false;
@@ -1705,9 +1769,11 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) {
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_DELETE) {
- return TMQ_RES_DATA;
+ return TMQ_RES_TAOSX;
}
return TMQ_RES_TABLE_META;
+ } else if (TD_RES_TMQ_TAOSX(res)) {
+ return TMQ_RES_DATA;
} else {
return TMQ_RES_INVALID;
}
diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp
index 68a8b9d336ae49e34c3dab28d3fdad6d3f27e9d4..b62238ccf26c991a516313270889a05a5b87d6ee 100644
--- a/source/client/test/smlTest.cpp
+++ b/source/client/test/smlTest.cpp
@@ -692,3 +692,52 @@ TEST(testCase, smlParseTelnetLine_diff_json_type2_Test) {
ASSERT_NE(ret, 0);
smlDestroyInfo(info);
}
+
+TEST(testCase, sml_col_4096_Test) {
+ SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ ASSERT_NE(info, nullptr);
+
+ const char *sql[] = {
+ "spgwgvldxv,id=spgwgvldxv_1,t0=f c0=t,c1=t,c2=t,c3=t,c4=t,c5=t,c6=t,c7=t,c8=t,c9=t,c10=t,c11=t,c12=t,c13=t,c14=t,c15=t,c16=t,c17=t,c18=t,c19=t,c20=t,c21=t,c22=t,c23=t,c24=t,c25=t,c26=t,c27=t,c28=t,c29=t,c30=t,c31=t,c32=t,c33=t,c34=t,c35=t,c36=t,c37=t,c38=t,c39=t,c40=t,c41=t,c42=t,c43=t,c44=t,c45=t,c46=t,c47=t,c48=t,c49=t,c50=t,c51=t,c52=t,c53=t,c54=t,c55=t,c56=t,c57=t,c58=t,c59=t,c60=t,c61=t,c62=t,c63=t,c64=t,c65=t,c66=t,c67=t,c68=t,c69=t,c70=t,c71=t,c72=t,c73=t,c74=t,c75=t,c76=t,c77=t,c78=t,c79=t,c80=t,c81=t,c82=t,c83=t,c84=t,c85=t,c86=t,c87=t,c88=t,c89=t,c90=t,c91=t,c92=t,c93=t,c94=t,c95=t,c96=t,c97=t,c98=t,c99=t,c100=t,"
+ "c101=t,c102=t,c103=t,c104=t,c105=t,c106=t,c107=t,c108=t,c109=t,c110=t,c111=t,c112=t,c113=t,c114=t,c115=t,c116=t,c117=t,c118=t,c119=t,c120=t,c121=t,c122=t,c123=t,c124=t,c125=t,c126=t,c127=t,c128=t,c129=t,c130=t,c131=t,c132=t,c133=t,c134=t,c135=t,c136=t,c137=t,c138=t,c139=t,c140=t,c141=t,c142=t,c143=t,c144=t,c145=t,c146=t,c147=t,c148=t,c149=t,c150=t,c151=t,c152=t,c153=t,c154=t,c155=t,c156=t,c157=t,c158=t,c159=t,c160=t,c161=t,c162=t,c163=t,c164=t,c165=t,c166=t,c167=t,c168=t,c169=t,c170=t,c171=t,c172=t,c173=t,c174=t,c175=t,c176=t,c177=t,c178=t,c179=t,c180=t,c181=t,c182=t,c183=t,c184=t,c185=t,c186=t,c187=t,c188=t,c189=t,"
+ "c190=t,c191=t,c192=t,c193=t,c194=t,c195=t,c196=t,c197=t,c198=t,c199=t,c200=t,c201=t,c202=t,c203=t,c204=t,c205=t,c206=t,c207=t,c208=t,c209=t,c210=t,c211=t,c212=t,c213=t,c214=t,c215=t,c216=t,c217=t,c218=t,c219=t,c220=t,c221=t,c222=t,c223=t,c224=t,c225=t,c226=t,c227=t,c228=t,c229=t,c230=t,c231=t,c232=t,c233=t,c234=t,c235=t,c236=t,c237=t,c238=t,c239=t,c240=t,c241=t,c242=t,c243=t,c244=t,c245=t,c246=t,c247=t,c248=t,c249=t,c250=t,c251=t,c252=t,c253=t,c254=t,c255=t,c256=t,c257=t,c258=t,c259=t,c260=t,c261=t,c262=t,c263=t,c264=t,c265=t,c266=t,c267=t,c268=t,c269=t,c270=t,c271=t,c272=t,c273=t,c274=t,c275=t,c276=t,c277=t,c278=t,"
+ "c279=t,c280=t,c281=t,c282=t,c283=t,c284=t,c285=t,c286=t,c287=t,c288=t,c289=t,c290=t,c291=t,c292=t,c293=t,c294=t,c295=t,c296=t,c297=t,c298=t,c299=t,c300=t,c301=t,c302=t,c303=t,c304=t,c305=t,c306=t,c307=t,c308=t,c309=t,c310=t,c311=t,c312=t,c313=t,c314=t,c315=t,c316=t,c317=t,c318=t,c319=t,c320=t,c321=t,c322=t,c323=t,c324=t,c325=t,c326=t,c327=t,c328=t,c329=t,c330=t,c331=t,c332=t,c333=t,c334=t,c335=t,c336=t,c337=t,c338=t,c339=t,c340=t,c341=t,c342=t,c343=t,c344=t,c345=t,c346=t,c347=t,c348=t,c349=t,c350=t,c351=t,c352=t,c353=t,c354=t,c355=t,c356=t,c357=t,c358=t,c359=t,c360=t,c361=t,c362=t,c363=t,c364=t,c365=t,c366=t,c367=t,c368=t,c369=t,c370=t,c371=t,c372=t,c373=t,c374=t,c375=t,c376=t,c377=t,c378=t,c379=t,c380=t,c381=t,c382=t,c383=t,c384=t,c385=t,c386=t,c387=t,c388=t,c389=t,c390=t,c391=t,c392=t,c393=t,c394=t,c395=t,c396=t,c397=t,c398=t,c399=t,c400=t,c401=t,c402=t,c403=t,c404=t,c405=t,c406=t,c407=t,c408=t,c409=t,c410=t,c411=t,c412=t,c413=t,c414=t,c415=t,c416=t,c417=t,c418=t,c419=t,c420=t,c421=t,c422=t,c423=t,c424=t,c425=t,c426=t,c427=t,c428=t,c429=t,c430=t,c431=t,c432=t,c433=t,c434=t,c435=t,c436=t,c437=t,c438=t,c439=t,c440=t,c441=t,c442=t,c443=t,c444=t,c445=t,c446=t,"
+ "c447=t,c448=t,c449=t,c450=t,c451=t,c452=t,c453=t,c454=t,c455=t,c456=t,c457=t,c458=t,c459=t,c460=t,c461=t,c462=t,c463=t,c464=t,c465=t,c466=t,c467=t,c468=t,c469=t,c470=t,c471=t,c472=t,c473=t,c474=t,c475=t,c476=t,c477=t,c478=t,c479=t,c480=t,c481=t,c482=t,c483=t,c484=t,c485=t,c486=t,c487=t,c488=t,c489=t,c490=t,c491=t,c492=t,c493=t,c494=t,c495=t,c496=t,c497=t,c498=t,c499=t,c500=t,c501=t,c502=t,c503=t,c504=t,c505=t,c506=t,c507=t,c508=t,c509=t,c510=t,c511=t,c512=t,c513=t,c514=t,c515=t,c516=t,c517=t,c518=t,c519=t,c520=t,c521=t,c522=t,c523=t,c524=t,c525=t,c526=t,c527=t,c528=t,c529=t,c530=t,c531=t,c532=t,c533=t,c534=t,c535=t,c536=t,c537=t,c538=t,c539=t,c540=t,c541=t,c542=t,c543=t,c544=t,c545=t,c546=t,c547=t,c548=t,c549=t,c550=t,c551=t,c552=t,c553=t,c554=t,c555=t,c556=t,c557=t,c558=t,c559=t,c560=t,c561=t,c562=t,c563=t,c564=t,c565=t,c566=t,c567=t,c568=t,c569=t,c570=t,c571=t,c572=t,c573=t,c574=t,c575=t,c576=t,c577=t,c578=t,c579=t,c580=t,c581=t,c582=t,c583=t,c584=t,c585=t,c586=t,c587=t,c588=t,c589=t,c590=t,c591=t,c592=t,c593=t,c594=t,c595=t,c596=t,c597=t,c598=t,c599=t,c600=t,c601=t,c602=t,c603=t,c604=t,c605=t,c606=t,c607=t,c608=t,c609=t,c610=t,c611=t,c612=t,c613=t,c614=t,"
+ "c615=t,c616=t,c617=t,c618=t,c619=t,c620=t,c621=t,c622=t,c623=t,c624=t,c625=t,c626=t,c627=t,c628=t,c629=t,c630=t,c631=t,c632=t,c633=t,c634=t,c635=t,c636=t,c637=t,c638=t,c639=t,c640=t,c641=t,c642=t,c643=t,c644=t,c645=t,c646=t,c647=t,c648=t,c649=t,c650=t,c651=t,c652=t,c653=t,c654=t,c655=t,c656=t,c657=t,c658=t,c659=t,c660=t,c661=t,c662=t,c663=t,c664=t,c665=t,c666=t,c667=t,c668=t,c669=t,c670=t,c671=t,c672=t,c673=t,c674=t,c675=t,c676=t,c677=t,c678=t,c679=t,c680=t,c681=t,c682=t,c683=t,c684=t,c685=t,c686=t,c687=t,c688=t,c689=t,c690=t,c691=t,c692=t,c693=t,c694=t,c695=t,c696=t,c697=t,c698=t,c699=t,c700=t,c701=t,c702=t,c703=t,c704=t,c705=t,c706=t,c707=t,c708=t,c709=t,c710=t,c711=t,c712=t,c713=t,c714=t,c715=t,c716=t,c717=t,c718=t,c719=t,c720=t,c721=t,c722=t,c723=t,c724=t,c725=t,c726=t,c727=t,c728=t,c729=t,c730=t,c731=t,c732=t,c733=t,c734=t,c735=t,c736=t,c737=t,c738=t,c739=t,c740=t,c741=t,c742=t,c743=t,c744=t,c745=t,c746=t,c747=t,c748=t,c749=t,c750=t,c751=t,c752=t,c753=t,c754=t,c755=t,c756=t,c757=t,c758=t,c759=t,c760=t,c761=t,c762=t,c763=t,c764=t,c765=t,c766=t,c767=t,c768=t,c769=t,c770=t,c771=t,c772=t,c773=t,c774=t,c775=t,c776=t,c777=t,c778=t,c779=t,c780=t,c781=t,c782=t,"
+ "c783=t,c784=t,c785=t,c786=t,c787=t,c788=t,c789=t,c790=t,c791=t,c792=t,c793=t,c794=t,c795=t,c796=t,c797=t,c798=t,c799=t,c800=t,c801=t,c802=t,c803=t,c804=t,c805=t,c806=t,c807=t,c808=t,c809=t,c810=t,c811=t,c812=t,c813=t,"
+ "c814=t,c815=t,c816=t,c817=t,c818=t,c819=t,c820=t,c821=t,c822=t,c823=t,c824=t,c825=t,c826=t,c827=t,c828=t,c829=t,c830=t,c831=t,c832=t,c833=t,c834=t,c835=t,c836=t,c837=t,c838=t,c839=t,c840=t,c841=t,c842=t,c843=t,c844=t,c845=t,c846=t,c847=t,c848=t,c849=t,c850=t,c851=t,c852=t,c853=t,c854=t,c855=t,c856=t,c857=t,c858=t,c859=t,c860=t,c861=t,c862=t,"
+ "c863=t,c864=t,c865=t,c866=t,c867=t,c868=t,c869=t,c870=t,c871=t,c872=t,c873=t,c874=t,c875=t,c876=t,c877=t,c878=t,c879=t,c880=t,c881=t,c882=t,c883=t,c884=t,c885=t,c886=t,c887=t,c888=t,c889=t,c890=t,c891=t,c892=t,c893=t,c894=t,c895=t,c896=t,c897=t,c898=t,c899=t,c900=t,c901=t,c902=t,c903=t,c904=t,c905=t,c906=t,c907=t,c908=t,c909=t,c910=t,c911=t,c912=t,c913=t,c914=t,c915=t,c916=t,c917=t,c918=t,c919=t,c920=t,c921=t,c922=t,c923=t,c924=t,c925=t,c926=t,c927=t,c928=t,c929=t,c930=t,c931=t,c932=t,c933=t,c934=t,c935=t,c936=t,c937=t,c938=t,c939=t,c940=t,c941=t,c942=t,c943=t,c944=t,c945=t,c946=t,c947=t,c948=t,c949=t,c950=t,c951=t,c952=t,c953=t,c954=t,c955=t,c956=t,c957=t,c958=t,c959=t,c960=t,c961=t,c962=t,c963=t,c964=t,c965=t,c966=t,c967=t,c968=t,c969=t,c970=t,c971=t,c972=t,c973=t,c974=t,c975=t,c976=t,c977=t,c978=t,c979=t,c980=t,c981=t,c982=t,c983=t,c984=t,c985=t,c986=t,c987=t,c988=t,c989=t,c990=t,c991=t,c992=t,c993=t,c994=t,c995=t,c996=t,c997=t,c998=t,c999=t,c1000=t,c1001=t,c1002=t,c1003=t,c1004=t,c1005=t,c1006=t,c1007=t,c1008=t,c1009=t,c1010=t,c1011=t,c1012=t,c1013=t,c1014=t,c1015=t,c1016=t,c1017=t,c1018=t,c1019=t,c1020=t,c1021=t,c1022=t,c1023=t,c1024=t,c1025=t,c1026=t,"
+ "c1027=t,c1028=t,c1029=t,c1030=t,c1031=t,c1032=t,c1033=t,c1034=t,c1035=t,c1036=t,c1037=t,c1038=t,c1039=t,c1040=t,c1041=t,c1042=t,c1043=t,c1044=t,c1045=t,c1046=t,c1047=t,c1048=t,c1049=t,c1050=t,c1051=t,c1052=t,c1053=t,c1054=t,c1055=t,c1056=t,c1057=t,c1058=t,c1059=t,c1060=t,c1061=t,c1062=t,c1063=t,c1064=t,c1065=t,c1066=t,c1067=t,c1068=t,c1069=t,c1070=t,c1071=t,c1072=t,c1073=t,c1074=t,c1075=t,c1076=t,c1077=t,c1078=t,c1079=t,c1080=t,c1081=t,c1082=t,c1083=t,c1084=t,c1085=t,c1086=t,c1087=t,c1088=t,c1089=t,c1090=t,c1091=t,c1092=t,c1093=t,c1094=t,c1095=t,c1096=t,c1097=t,c1098=t,c1099=t,c1100=t,c1101=t,c1102=t,c1103=t,c1104=t,c1105=t,c1106=t,c1107=t,c1108=t,c1109=t,c1110=t,c1111=t,c1112=t,c1113=t,c1114=t,c1115=t,c1116=t,c1117=t,c1118=t,c1119=t,c1120=t,c1121=t,c1122=t,c1123=t,c1124=t,c1125=t,c1126=t,c1127=t,c1128=t,c1129=t,c1130=t,c1131=t,c1132=t,c1133=t,c1134=t,c1135=t,c1136=t,c1137=t,c1138=t,c1139=t,c1140=t,c1141=t,c1142=t,c1143=t,c1144=t,c1145=t,c1146=t,c1147=t,c1148=t,c1149=t,c1150=t,c1151=t,c1152=t,c1153=t,c1154=t,c1155=t,c1156=t,c1157=t,c1158=t,c1159=t,c1160=t,c1161=t,c1162=t,c1163=t,c1164=t,c1165=t,c1166=t,c1167=t,c1168=t,c1169=t,c1170=t,c1171=t,c1172=t,c1173=t,"
+ "c1174=t,c1175=t,c1176=t,c1177=t,c1178=t,c1179=t,c1180=t,c1181=t,c1182=t,c1183=t,c1184=t,c1185=t,c1186=t,c1187=t,c1188=t,c1189=t,c1190=t,c1191=t,c1192=t,c1193=t,c1194=t,c1195=t,c1196=t,c1197=t,c1198=t,c1199=t,c1200=t,c1201=t,c1202=t,c1203=t,c1204=t,c1205=t,c1206=t,c1207=t,c1208=t,c1209=t,c1210=t,c1211=t,c1212=t,c1213=t,c1214=t,c1215=t,c1216=t,c1217=t,c1218=t,c1219=t,c1220=t,c1221=t,c1222=t,c1223=t,c1224=t,c1225=t,c1226=t,c1227=t,c1228=t,c1229=t,c1230=t,c1231=t,c1232=t,c1233=t,c1234=t,c1235=t,c1236=t,c1237=t,c1238=t,c1239=t,c1240=t,c1241=t,c1242=t,c1243=t,c1244=t,c1245=t,c1246=t,c1247=t,c1248=t,c1249=t,c1250=t,c1251=t,c1252=t,c1253=t,c1254=t,c1255=t,c1256=t,c1257=t,c1258=t,c1259=t,c1260=t,c1261=t,c1262=t,c1263=t,c1264=t,c1265=t,c1266=t,c1267=t,c1268=t,c1269=t,c1270=t,c1271=t,c1272=t,c1273=t,c1274=t,c1275=t,c1276=t,c1277=t,c1278=t,c1279=t,c1280=t,c1281=t,c1282=t,c1283=t,c1284=t,c1285=t,c1286=t,c1287=t,c1288=t,c1289=t,c1290=t,c1291=t,c1292=t,c1293=t,c1294=t,c1295=t,c1296=t,c1297=t,c1298=t,c1299=t,c1300=t,c1301=t,c1302=t,c1303=t,c1304=t,c1305=t,c1306=t,c1307=t,c1308=t,c1309=t,c1310=t,c1311=t,c1312=t,c1313=t,c1314=t,c1315=t,c1316=t,c1317=t,c1318=t,c1319=t,c1320=t,"
+ "c1321=t,c1322=t,c1323=t,c1324=t,c1325=t,c1326=t,c1327=t,c1328=t,c1329=t,c1330=t,c1331=t,c1332=t,c1333=t,c1334=t,c1335=t,c1336=t,c1337=t,c1338=t,c1339=t,c1340=t,c1341=t,c1342=t,c1343=t,c1344=t,c1345=t,c1346=t,c1347=t,"
+ "c1348=t,c1349=t,c1350=t,c1351=t,c1352=t,c1353=t,c1354=t,c1355=t,c1356=t,c1357=t,c1358=t,c1359=t,c1360=t,c1361=t,c1362=t,c1363=t,c1364=t,c1365=t,c1366=t,c1367=t,c1368=t,c1369=t,c1370=t,c1371=t,c1372=t,c1373=t,c1374=t,c1375=t,c1376=t,c1377=t,c1378=t,c1379=t,c1380=t,c1381=t,c1382=t,c1383=t,c1384=t,c1385=t,c1386=t,c1387=t,c1388=t,c1389=t,c1390=t,c1391=t,c1392=t,c1393=t,c1394=t,c1395=t,c1396=t,c1397=t,c1398=t,c1399=t,c1400=t,c1401=t,c1402=t,c1403=t,c1404=t,c1405=t,c1406=t,c1407=t,c1408=t,c1409=t,c1410=t,c1411=t,c1412=t,c1413=t,c1414=t,c1415=t,c1416=t,c1417=t,c1418=t,c1419=t,c1420=t,c1421=t,c1422=t,c1423=t,c1424=t,c1425=t,c1426=t,c1427=t,c1428=t,c1429=t,c1430=t,c1431=t,c1432=t,c1433=t,c1434=t,c1435=t,c1436=t,c1437=t,c1438=t,c1439=t,c1440=t,c1441=t,c1442=t,c1443=t,c1444=t,c1445=t,c1446=t,c1447=t,c1448=t,c1449=t,c1450=t,c1451=t,c1452=t,c1453=t,c1454=t,c1455=t,c1456=t,c1457=t,c1458=t,c1459=t,c1460=t,c1461=t,c1462=t,c1463=t,c1464=t,c1465=t,c1466=t,c1467=t,c1468=t,c1469=t,c1470=t,c1471=t,c1472=t,c1473=t,c1474=t,c1475=t,c1476=t,c1477=t,c1478=t,c1479=t,c1480=t,c1481=t,c1482=t,c1483=t,c1484=t,c1485=t,c1486=t,c1487=t,c1488=t,c1489=t,c1490=t,c1491=t,c1492=t,c1493=t,c1494=t,"
+ "c1495=t,c1496=t,c1497=t,c1498=t,c1499=t,c1500=t,c1501=t,c1502=t,c1503=t,c1504=t,c1505=t,c1506=t,c1507=t,c1508=t,c1509=t,c1510=t,c1511=t,c1512=t,c1513=t,c1514=t,c1515=t,c1516=t,c1517=t,c1518=t,c1519=t,c1520=t,c1521=t,c1522=t,c1523=t,c1524=t,c1525=t,c1526=t,c1527=t,c1528=t,c1529=t,c1530=t,c1531=t,c1532=t,c1533=t,c1534=t,c1535=t,c1536=t,c1537=t,c1538=t,c1539=t,c1540=t,c1541=t,c1542=t,c1543=t,c1544=t,c1545=t,c1546=t,c1547=t,c1548=t,c1549=t,c1550=t,c1551=t,c1552=t,c1553=t,c1554=t,c1555=t,c1556=t,c1557=t,c1558=t,c1559=t,c1560=t,c1561=t,c1562=t,c1563=t,c1564=t,c1565=t,c1566=t,c1567=t,c1568=t,c1569=t,c1570=t,c1571=t,c1572=t,c1573=t,c1574=t,c1575=t,c1576=t,c1577=t,c1578=t,c1579=t,c1580=t,c1581=t,c1582=t,c1583=t,c1584=t,c1585=t,c1586=t,c1587=t,c1588=t,c1589=t,c1590=t,c1591=t,c1592=t,c1593=t,c1594=t,c1595=t,c1596=t,c1597=t,c1598=t,c1599=t,c1600=t,c1601=t,c1602=t,c1603=t,c1604=t,c1605=t,c1606=t,c1607=t,c1608=t,c1609=t,c1610=t,c1611=t,c1612=t,c1613=t,c1614=t,c1615=t,c1616=t,c1617=t,c1618=t,c1619=t,c1620=t,c1621=t,c1622=t,c1623=t,c1624=t,c1625=t,c1626=t,c1627=t,c1628=t,c1629=t,c1630=t,c1631=t,c1632=t,c1633=t,c1634=t,c1635=t,c1636=t,c1637=t,c1638=t,c1639=t,c1640=t,c1641=t,"
+ "c1642=t,c1643=t,c1644=t,c1645=t,c1646=t,c1647=t,c1648=t,c1649=t,c1650=t,c1651=t,c1652=t,c1653=t,c1654=t,c1655=t,c1656=t,c1657=t,c1658=t,c1659=t,c1660=t,c1661=t,c1662=t,c1663=t,c1664=t,c1665=t,c1666=t,c1667=t,c1668=t,c1669=t,c1670=t,c1671=t,c1672=t,c1673=t,c1674=t,c1675=t,c1676=t,c1677=t,c1678=t,c1679=t,c1680=t,c1681=t,c1682=t,c1683=t,c1684=t,c1685=t,c1686=t,c1687=t,c1688=t,c1689=t,c1690=t,c1691=t,c1692=t,c1693=t,c1694=t,c1695=t,c1696=t,c1697=t,c1698=t,c1699=t,c1700=t,c1701=t,c1702=t,c1703=t,c1704=t,c1705=t,c1706=t,c1707=t,c1708=t,c1709=t,c1710=t,c1711=t,c1712=t,c1713=t,c1714=t,c1715=t,c1716=t,c1717=t,c1718=t,c1719=t,c1720=t,c1721=t,c1722=t,c1723=t,c1724=t,c1725=t,c1726=t,c1727=t,c1728=t,c1729=t,c1730=t,c1731=t,c1732=t,c1733=t,c1734=t,c1735=t,c1736=t,c1737=t,c1738=t,c1739=t,c1740=t,c1741=t,c1742=t,c1743=t,c1744=t,c1745=t,c1746=t,c1747=t,c1748=t,c1749=t,c1750=t,c1751=t,c1752=t,c1753=t,c1754=t,c1755=t,c1756=t,c1757=t,c1758=t,c1759=t,c1760=t,c1761=t,c1762=t,c1763=t,c1764=t,c1765=t,c1766=t,c1767=t,c1768=t,c1769=t,c1770=t,c1771=t,c1772=t,c1773=t,c1774=t,c1775=t,c1776=t,c1777=t,c1778=t,c1779=t,c1780=t,c1781=t,c1782=t,c1783=t,c1784=t,c1785=t,c1786=t,c1787=t,c1788=t,"
+ "c1789=t,c1790=t,c1791=t,c1792=t,c1793=t,c1794=t,c1795=t,c1796=t,c1797=t,c1798=t,c1799=t,c1800=t,c1801=t,c1802=t,c1803=t,c1804=t,c1805=t,c1806=t,c1807=t,c1808=t,c1809=t,c1810=t,c1811=t,c1812=t,c1813=t,c1814=t,c1815=t,"
+ "c1816=t,c1817=t,c1818=t,c1819=t,c1820=t,c1821=t,c1822=t,c1823=t,c1824=t,c1825=t,c1826=t,c1827=t,c1828=t,c1829=t,c1830=t,c1831=t,c1832=t,c1833=t,c1834=t,c1835=t,c1836=t,c1837=t,c1838=t,c1839=t,c1840=t,c1841=t,c1842=t,c1843=t,c1844=t,c1845=t,c1846=t,c1847=t,c1848=t,c1849=t,c1850=t,c1851=t,c1852=t,c1853=t,c1854=t,c1855=t,c1856=t,c1857=t,c1858=t,c1859=t,c1860=t,c1861=t,c1862=t,c1863=t,c1864=t,c1865=t,c1866=t,c1867=t,c1868=t,c1869=t,c1870=t,c1871=t,c1872=t,c1873=t,c1874=t,c1875=t,c1876=t,c1877=t,c1878=t,c1879=t,c1880=t,c1881=t,c1882=t,c1883=t,c1884=t,c1885=t,c1886=t,c1887=t,c1888=t,c1889=t,c1890=t,c1891=t,c1892=t,c1893=t,c1894=t,c1895=t,c1896=t,c1897=t,c1898=t,c1899=t,c1900=t,c1901=t,c1902=t,c1903=t,c1904=t,c1905=t,c1906=t,c1907=t,c1908=t,c1909=t,c1910=t,c1911=t,c1912=t,c1913=t,c1914=t,c1915=t,c1916=t,c1917=t,c1918=t,c1919=t,c1920=t,c1921=t,c1922=t,c1923=t,c1924=t,c1925=t,c1926=t,c1927=t,c1928=t,c1929=t,c1930=t,c1931=t,c1932=t,c1933=t,c1934=t,c1935=t,c1936=t,c1937=t,c1938=t,c1939=t,c1940=t,c1941=t,c1942=t,c1943=t,c1944=t,c1945=t,c1946=t,c1947=t,c1948=t,c1949=t,c1950=t,c1951=t,c1952=t,c1953=t,c1954=t,c1955=t,c1956=t,c1957=t,c1958=t,c1959=t,c1960=t,c1961=t,c1962=t,"
+ "c1963=t,c1964=t,c1965=t,c1966=t,c1967=t,c1968=t,c1969=t,c1970=t,c1971=t,c1972=t,c1973=t,c1974=t,c1975=t,c1976=t,c1977=t,c1978=t,c1979=t,c1980=t,c1981=t,c1982=t,c1983=t,c1984=t,c1985=t,c1986=t,c1987=t,c1988=t,c1989=t,c1990=t,c1991=t,c1992=t,c1993=t,c1994=t,c1995=t,c1996=t,c1997=t,c1998=t,c1999=t,c2000=t,c2001=t,c2002=t,c2003=t,c2004=t,c2005=t,c2006=t,c2007=t,c2008=t,c2009=t,c2010=t,c2011=t,c2012=t,c2013=t,c2014=t,c2015=t,c2016=t,c2017=t,c2018=t,c2019=t,c2020=t,c2021=t,c2022=t,c2023=t,c2024=t,c2025=t,c2026=t,c2027=t,c2028=t,c2029=t,c2030=t,c2031=t,c2032=t,c2033=t,c2034=t,c2035=t,c2036=t,c2037=t,c2038=t,c2039=t,c2040=t,c2041=t,c2042=t,c2043=t,c2044=t,c2045=t,c2046=t,c2047=t,c2048=t,c2049=t,c2050=t,c2051=t,c2052=t,c2053=t,c2054=t,c2055=t,c2056=t,c2057=t,c2058=t,c2059=t,c2060=t,c2061=t,c2062=t,c2063=t,c2064=t,c2065=t,c2066=t,c2067=t,c2068=t,c2069=t,c2070=t,c2071=t,c2072=t,c2073=t,c2074=t,c2075=t,c2076=t,c2077=t,c2078=t,c2079=t,c2080=t,c2081=t,c2082=t,c2083=t,c2084=t,c2085=t,c2086=t,c2087=t,c2088=t,c2089=t,c2090=t,c2091=t,c2092=t,c2093=t,c2094=t,c2095=t,c2096=t,c2097=t,c2098=t,c2099=t,c2100=t,c2101=t,c2102=t,c2103=t,c2104=t,c2105=t,c2106=t,c2107=t,c2108=t,c2109=t,"
+ "c2110=t,c2111=t,c2112=t,c2113=t,c2114=t,c2115=t,c2116=t,c2117=t,c2118=t,c2119=t,c2120=t,c2121=t,c2122=t,c2123=t,c2124=t,c2125=t,c2126=t,c2127=t,c2128=t,c2129=t,c2130=t,c2131=t,c2132=t,c2133=t,c2134=t,c2135=t,c2136=t,c2137=t,c2138=t,c2139=t,c2140=t,c2141=t,c2142=t,c2143=t,c2144=t,c2145=t,c2146=t,c2147=t,c2148=t,c2149=t,c2150=t,c2151=t,c2152=t,c2153=t,c2154=t,c2155=t,c2156=t,c2157=t,c2158=t,c2159=t,c2160=t,c2161=t,c2162=t,c2163=t,c2164=t,c2165=t,c2166=t,c2167=t,c2168=t,c2169=t,c2170=t,c2171=t,c2172=t,c2173=t,c2174=t,c2175=t,c2176=t,c2177=t,c2178=t,c2179=t,c2180=t,c2181=t,c2182=t,c2183=t,c2184=t,c2185=t,c2186=t,c2187=t,c2188=t,c2189=t,c2190=t,c2191=t,c2192=t,c2193=t,c2194=t,c2195=t,c2196=t,c2197=t,c2198=t,c2199=t,c2200=t,c2201=t,c2202=t,c2203=t,c2204=t,c2205=t,c2206=t,c2207=t,c2208=t,c2209=t,c2210=t,c2211=t,c2212=t,c2213=t,c2214=t,c2215=t,c2216=t,c2217=t,c2218=t,c2219=t,c2220=t,c2221=t,c2222=t,c2223=t,c2224=t,c2225=t,c2226=t,c2227=t,c2228=t,c2229=t,c2230=t,c2231=t,c2232=t,c2233=t,c2234=t,c2235=t,c2236=t,c2237=t,c2238=t,c2239=t,c2240=t,c2241=t,c2242=t,c2243=t,c2244=t,c2245=t,c2246=t,c2247=t,c2248=t,c2249=t,c2250=t,c2251=t,c2252=t,c2253=t,c2254=t,c2255=t,c2256=t,"
+ "c2257=t,c2258=t,c2259=t,c2260=t,c2261=t,c2262=t,c2263=t,c2264=t,c2265=t,c2266=t,c2267=t,c2268=t,c2269=t,c2270=t,c2271=t,c2272=t,c2273=t,c2274=t,c2275=t,c2276=t,c2277=t,c2278=t,c2279=t,c2280=t,c2281=t,c2282=t,c2283=t,"
+ "c2284=t,c2285=t,c2286=t,c2287=t,c2288=t,c2289=t,c2290=t,c2291=t,c2292=t,c2293=t,c2294=t,c2295=t,c2296=t,c2297=t,c2298=t,c2299=t,c2300=t,c2301=t,c2302=t,c2303=t,c2304=t,c2305=t,c2306=t,c2307=t,c2308=t,c2309=t,c2310=t,c2311=t,c2312=t,c2313=t,c2314=t,c2315=t,c2316=t,c2317=t,c2318=t,c2319=t,c2320=t,c2321=t,c2322=t,c2323=t,c2324=t,c2325=t,c2326=t,c2327=t,c2328=t,c2329=t,c2330=t,c2331=t,c2332=t,c2333=t,c2334=t,c2335=t,c2336=t,c2337=t,c2338=t,c2339=t,c2340=t,c2341=t,c2342=t,c2343=t,c2344=t,c2345=t,c2346=t,c2347=t,c2348=t,c2349=t,c2350=t,c2351=t,c2352=t,c2353=t,c2354=t,c2355=t,c2356=t,c2357=t,c2358=t,c2359=t,c2360=t,c2361=t,c2362=t,c2363=t,c2364=t,c2365=t,c2366=t,c2367=t,c2368=t,c2369=t,c2370=t,c2371=t,c2372=t,c2373=t,c2374=t,c2375=t,c2376=t,c2377=t,c2378=t,c2379=t,c2380=t,c2381=t,c2382=t,c2383=t,c2384=t,c2385=t,c2386=t,c2387=t,c2388=t,c2389=t,c2390=t,c2391=t,c2392=t,c2393=t,c2394=t,c2395=t,c2396=t,c2397=t,c2398=t,c2399=t,c2400=t,c2401=t,c2402=t,c2403=t,c2404=t,c2405=t,c2406=t,c2407=t,c2408=t,c2409=t,c2410=t,c2411=t,c2412=t,c2413=t,c2414=t,c2415=t,c2416=t,c2417=t,c2418=t,c2419=t,c2420=t,c2421=t,c2422=t,c2423=t,c2424=t,c2425=t,c2426=t,c2427=t,c2428=t,c2429=t,c2430=t,"
+ "c2431=t,c2432=t,c2433=t,c2434=t,c2435=t,c2436=t,c2437=t,c2438=t,c2439=t,c2440=t,c2441=t,c2442=t,c2443=t,c2444=t,c2445=t,c2446=t,c2447=t,c2448=t,c2449=t,c2450=t,c2451=t,c2452=t,c2453=t,c2454=t,c2455=t,c2456=t,c2457=t,c2458=t,c2459=t,c2460=t,c2461=t,c2462=t,c2463=t,c2464=t,c2465=t,c2466=t,c2467=t,c2468=t,c2469=t,c2470=t,c2471=t,c2472=t,c2473=t,c2474=t,c2475=t,c2476=t,c2477=t,c2478=t,c2479=t,c2480=t,c2481=t,c2482=t,c2483=t,c2484=t,c2485=t,c2486=t,c2487=t,c2488=t,c2489=t,c2490=t,c2491=t,c2492=t,c2493=t,c2494=t,c2495=t,c2496=t,c2497=t,c2498=t,c2499=t,c2500=t,c2501=t,c2502=t,c2503=t,c2504=t,c2505=t,c2506=t,c2507=t,c2508=t,c2509=t,c2510=t,c2511=t,c2512=t,c2513=t,c2514=t,c2515=t,c2516=t,c2517=t,c2518=t,c2519=t,c2520=t,c2521=t,c2522=t,c2523=t,c2524=t,c2525=t,c2526=t,c2527=t,c2528=t,c2529=t,c2530=t,c2531=t,c2532=t,c2533=t,c2534=t,c2535=t,c2536=t,c2537=t,c2538=t,c2539=t,c2540=t,c2541=t,c2542=t,c2543=t,c2544=t,c2545=t,c2546=t,c2547=t,c2548=t,c2549=t,c2550=t,c2551=t,c2552=t,c2553=t,c2554=t,c2555=t,c2556=t,c2557=t,c2558=t,c2559=t,c2560=t,c2561=t,c2562=t,c2563=t,c2564=t,c2565=t,c2566=t,c2567=t,c2568=t,c2569=t,c2570=t,c2571=t,c2572=t,c2573=t,c2574=t,c2575=t,c2576=t,c2577=t,"
+ "c2578=t,c2579=t,c2580=t,c2581=t,c2582=t,c2583=t,c2584=t,c2585=t,c2586=t,c2587=t,c2588=t,c2589=t,c2590=t,c2591=t,c2592=t,c2593=t,c2594=t,c2595=t,c2596=t,c2597=t,c2598=t,c2599=t,c2600=t,c2601=t,c2602=t,c2603=t,c2604=t,c2605=t,c2606=t,c2607=t,c2608=t,c2609=t,c2610=t,c2611=t,c2612=t,c2613=t,c2614=t,c2615=t,c2616=t,c2617=t,c2618=t,c2619=t,c2620=t,c2621=t,c2622=t,c2623=t,c2624=t,c2625=t,c2626=t,c2627=t,c2628=t,c2629=t,c2630=t,c2631=t,c2632=t,c2633=t,c2634=t,c2635=t,c2636=t,c2637=t,c2638=t,c2639=t,c2640=t,c2641=t,c2642=t,c2643=t,c2644=t,c2645=t,c2646=t,c2647=t,c2648=t,c2649=t,c2650=t,c2651=t,c2652=t,c2653=t,c2654=t,c2655=t,c2656=t,c2657=t,c2658=t,c2659=t,c2660=t,c2661=t,c2662=t,c2663=t,c2664=t,c2665=t,c2666=t,c2667=t,c2668=t,c2669=t,c2670=t,c2671=t,c2672=t,c2673=t,c2674=t,c2675=t,c2676=t,c2677=t,c2678=t,c2679=t,c2680=t,c2681=t,c2682=t,c2683=t,c2684=t,c2685=t,c2686=t,c2687=t,c2688=t,c2689=t,c2690=t,c2691=t,c2692=t,c2693=t,c2694=t,c2695=t,c2696=t,c2697=t,c2698=t,c2699=t,c2700=t,c2701=t,c2702=t,c2703=t,c2704=t,c2705=t,c2706=t,c2707=t,c2708=t,c2709=t,c2710=t,c2711=t,c2712=t,c2713=t,c2714=t,c2715=t,c2716=t,c2717=t,c2718=t,c2719=t,c2720=t,c2721=t,c2722=t,c2723=t,c2724=t,"
+ "c2725=t,c2726=t,c2727=t,c2728=t,c2729=t,c2730=t,c2731=t,c2732=t,c2733=t,c2734=t,c2735=t,c2736=t,c2737=t,c2738=t,c2739=t,c2740=t,c2741=t,c2742=t,c2743=t,c2744=t,c2745=t,c2746=t,c2747=t,c2748=t,c2749=t,c2750=t,c2751=t,c2752=t,c2753=t,c2754=t,c2755=t,c2756=t,c2757=t,c2758=t,c2759=t,c2760=t,c2761=t,c2762=t,c2763=t,c2764=t,c2765=t,c2766=t,c2767=t,c2768=t,c2769=t,c2770=t,c2771=t,c2772=t,c2773=t,c2774=t,c2775=t,c2776=t,c2777=t,c2778=t,c2779=t,c2780=t,c2781=t,c2782=t,c2783=t,c2784=t,c2785=t,c2786=t,c2787=t,c2788=t,c2789=t,c2790=t,c2791=t,c2792=t,c2793=t,c2794=t,c2795=t,c2796=t,c2797=t,c2798=t,c2799=t,c2800=t,c2801=t,c2802=t,c2803=t,c2804=t,c2805=t,c2806=t,c2807=t,c2808=t,c2809=t,c2810=t,c2811=t,c2812=t,c2813=t,c2814=t,c2815=t,c2816=t,c2817=t,c2818=t,c2819=t,c2820=t,c2821=t,c2822=t,c2823=t,c2824=t,c2825=t,c2826=t,c2827=t,c2828=t,c2829=t,c2830=t,c2831=t,c2832=t,c2833=t,c2834=t,c2835=t,c2836=t,c2837=t,c2838=t,c2839=t,c2840=t,c2841=t,c2842=t,c2843=t,c2844=t,c2845=t,c2846=t,c2847=t,c2848=t,c2849=t,c2850=t,c2851=t,c2852=t,c2853=t,c2854=t,c2855=t,c2856=t,c2857=t,c2858=t,c2859=t,c2860=t,c2861=t,c2862=t,c2863=t,c2864=t,c2865=t,c2866=t,c2867=t,c2868=t,c2869=t,c2870=t,c2871=t,"
+ "c2872=t,c2873=t,c2874=t,c2875=t,c2876=t,c2877=t,c2878=t,c2879=t,c2880=t,c2881=t,c2882=t,c2883=t,c2884=t,c2885=t,c2886=t,c2887=t,c2888=t,c2889=t,c2890=t,c2891=t,c2892=t,c2893=t,c2894=t,c2895=t,c2896=t,c2897=t,c2898=t,c2899=t,c2900=t,c2901=t,c2902=t,c2903=t,c2904=t,c2905=t,c2906=t,c2907=t,c2908=t,c2909=t,c2910=t,c2911=t,c2912=t,c2913=t,c2914=t,c2915=t,c2916=t,c2917=t,c2918=t,c2919=t,c2920=t,c2921=t,c2922=t,c2923=t,c2924=t,c2925=t,c2926=t,c2927=t,c2928=t,c2929=t,c2930=t,c2931=t,c2932=t,c2933=t,c2934=t,c2935=t,c2936=t,c2937=t,c2938=t,c2939=t,c2940=t,c2941=t,c2942=t,c2943=t,c2944=t,c2945=t,c2946=t,c2947=t,c2948=t,c2949=t,c2950=t,c2951=t,c2952=t,c2953=t,c2954=t,c2955=t,c2956=t,c2957=t,c2958=t,c2959=t,c2960=t,c2961=t,c2962=t,c2963=t,c2964=t,c2965=t,c2966=t,c2967=t,c2968=t,c2969=t,c2970=t,c2971=t,c2972=t,c2973=t,c2974=t,c2975=t,c2976=t,c2977=t,c2978=t,c2979=t,c2980=t,c2981=t,c2982=t,c2983=t,c2984=t,c2985=t,c2986=t,c2987=t,c2988=t,c2989=t,c2990=t,c2991=t,c2992=t,c2993=t,c2994=t,c2995=t,c2996=t,c2997=t,c2998=t,c2999=t,c3000=t,c3001=t,c3002=t,c3003=t,c3004=t,c3005=t,c3006=t,c3007=t,c3008=t,c3009=t,c3010=t,c3011=t,c3012=t,c3013=t,c3014=t,c3015=t,c3016=t,c3017=t,c3018=t,"
+ "c3019=t,c3020=t,c3021=t,c3022=t,c3023=t,c3024=t,c3025=t,c3026=t,c3027=t,c3028=t,c3029=t,c3030=t,c3031=t,c3032=t,c3033=t,c3034=t,c3035=t,c3036=t,c3037=t,c3038=t,c3039=t,c3040=t,c3041=t,c3042=t,c3043=t,c3044=t,c3045=t,c3046=t,c3047=t,c3048=t,c3049=t,c3050=t,c3051=t,c3052=t,c3053=t,c3054=t,c3055=t,c3056=t,c3057=t,c3058=t,c3059=t,c3060=t,c3061=t,c3062=t,c3063=t,c3064=t,c3065=t,c3066=t,c3067=t,c3068=t,c3069=t,c3070=t,c3071=t,c3072=t,c3073=t,c3074=t,c3075=t,c3076=t,c3077=t,c3078=t,c3079=t,c3080=t,c3081=t,c3082=t,c3083=t,c3084=t,c3085=t,c3086=t,c3087=t,c3088=t,c3089=t,c3090=t,c3091=t,c3092=t,c3093=t,c3094=t,c3095=t,c3096=t,c3097=t,c3098=t,c3099=t,c3100=t,c3101=t,c3102=t,c3103=t,c3104=t,c3105=t,c3106=t,c3107=t,c3108=t,c3109=t,c3110=t,c3111=t,c3112=t,c3113=t,c3114=t,c3115=t,c3116=t,c3117=t,c3118=t,c3119=t,c3120=t,c3121=t,c3122=t,c3123=t,c3124=t,c3125=t,c3126=t,c3127=t,c3128=t,c3129=t,c3130=t,c3131=t,c3132=t,c3133=t,c3134=t,c3135=t,c3136=t,c3137=t,c3138=t,c3139=t,c3140=t,c3141=t,c3142=t,c3143=t,c3144=t,c3145=t,c3146=t,c3147=t,c3148=t,c3149=t,c3150=t,c3151=t,c3152=t,c3153=t,c3154=t,c3155=t,c3156=t,c3157=t,c3158=t,c3159=t,c3160=t,c3161=t,c3162=t,c3163=t,c3164=t,c3165=t,"
+ "c3166=t,c3167=t,c3168=t,c3169=t,c3170=t,c3171=t,c3172=t,c3173=t,c3174=t,c3175=t,c3176=t,c3177=t,c3178=t,c3179=t,c3180=t,c3181=t,c3182=t,c3183=t,c3184=t,c3185=t,c3186=t,c3187=t,c3188=t,c3189=t,c3190=t,c3191=t,c3192=t,c3193=t,c3194=t,c3195=t,c3196=t,c3197=t,c3198=t,c3199=t,c3200=t,c3201=t,c3202=t,c3203=t,c3204=t,c3205=t,c3206=t,c3207=t,c3208=t,c3209=t,c3210=t,c3211=t,c3212=t,c3213=t,c3214=t,c3215=t,c3216=t,c3217=t,c3218=t,c3219=t,c3220=t,c3221=t,c3222=t,c3223=t,c3224=t,c3225=t,c3226=t,c3227=t,c3228=t,c3229=t,c3230=t,c3231=t,c3232=t,c3233=t,c3234=t,c3235=t,c3236=t,c3237=t,c3238=t,c3239=t,c3240=t,c3241=t,c3242=t,c3243=t,c3244=t,c3245=t,c3246=t,c3247=t,c3248=t,c3249=t,c3250=t,c3251=t,c3252=t,c3253=t,c3254=t,c3255=t,c3256=t,c3257=t,c3258=t,c3259=t,c3260=t,c3261=t,c3262=t,c3263=t,c3264=t,c3265=t,c3266=t,c3267=t,c3268=t,c3269=t,c3270=t,c3271=t,c3272=t,c3273=t,c3274=t,c3275=t,c3276=t,c3277=t,c3278=t,c3279=t,c3280=t,c3281=t,c3282=t,c3283=t,c3284=t,c3285=t,c3286=t,c3287=t,c3288=t,c3289=t,c3290=t,c3291=t,c3292=t,c3293=t,c3294=t,c3295=t,c3296=t,c3297=t,c3298=t,c3299=t,c3300=t,c3301=t,c3302=t,c3303=t,c3304=t,c3305=t,c3306=t,c3307=t,c3308=t,c3309=t,c3310=t,c3311=t,c3312=t,"
+ "c3313=t,c3314=t,c3315=t,c3316=t,c3317=t,c3318=t,c3319=t,c3320=t,c3321=t,c3322=t,c3323=t,c3324=t,c3325=t,c3326=t,c3327=t,c3328=t,c3329=t,c3330=t,c3331=t,c3332=t,c3333=t,c3334=t,c3335=t,c3336=t,c3337=t,c3338=t,c3339=t,c3340=t,c3341=t,c3342=t,c3343=t,c3344=t,c3345=t,c3346=t,c3347=t,c3348=t,c3349=t,c3350=t,c3351=t,c3352=t,c3353=t,c3354=t,c3355=t,c3356=t,c3357=t,c3358=t,c3359=t,c3360=t,c3361=t,c3362=t,c3363=t,c3364=t,c3365=t,c3366=t,c3367=t,c3368=t,c3369=t,c3370=t,c3371=t,c3372=t,c3373=t,c3374=t,c3375=t,c3376=t,c3377=t,c3378=t,c3379=t,c3380=t,c3381=t,c3382=t,c3383=t,c3384=t,c3385=t,c3386=t,c3387=t,c3388=t,c3389=t,c3390=t,c3391=t,c3392=t,c3393=t,c3394=t,c3395=t,c3396=t,c3397=t,c3398=t,c3399=t,c3400=t,c3401=t,c3402=t,c3403=t,c3404=t,c3405=t,c3406=t,c3407=t,c3408=t,c3409=t,c3410=t,c3411=t,c3412=t,c3413=t,c3414=t,c3415=t,c3416=t,c3417=t,c3418=t,c3419=t,c3420=t,c3421=t,c3422=t,c3423=t,c3424=t,c3425=t,c3426=t,c3427=t,c3428=t,c3429=t,c3430=t,c3431=t,c3432=t,c3433=t,c3434=t,c3435=t,c3436=t,c3437=t,c3438=t,c3439=t,c3440=t,c3441=t,c3442=t,c3443=t,c3444=t,c3445=t,c3446=t,c3447=t,c3448=t,c3449=t,c3450=t,c3451=t,c3452=t,c3453=t,c3454=t,c3455=t,c3456=t,c3457=t,c3458=t,c3459=t,"
+ "c3460=t,c3461=t,c3462=t,c3463=t,c3464=t,c3465=t,c3466=t,c3467=t,c3468=t,c3469=t,c3470=t,c3471=t,c3472=t,c3473=t,c3474=t,c3475=t,c3476=t,c3477=t,c3478=t,c3479=t,c3480=t,c3481=t,c3482=t,c3483=t,c3484=t,c3485=t,c3486=t,c3487=t,c3488=t,c3489=t,c3490=t,c3491=t,c3492=t,c3493=t,c3494=t,c3495=t,c3496=t,c3497=t,c3498=t,c3499=t,c3500=t,c3501=t,c3502=t,c3503=t,c3504=t,c3505=t,c3506=t,c3507=t,c3508=t,c3509=t,c3510=t,c3511=t,c3512=t,c3513=t,"
+ "c3514=t,c3515=t,c3516=t,c3517=t,c3518=t,c3519=t,c3520=t,c3521=t,c3522=t,c3523=t,c3524=t,c3525=t,c3526=t,c3527=t,c3528=t,c3529=t,c3530=t,c3531=t,c3532=t,c3533=t,c3534=t,c3535=t,c3536=t,c3537=t,c3538=t,c3539=t,c3540=t,c3541=t,c3542=t,c3543=t,c3544=t,c3545=t,c3546=t,c3547=t,c3548=t,c3549=t,c3550=t,c3551=t,c3552=t,c3553=t,c3554=t,c3555=t,c3556=t,c3557=t,c3558=t,c3559=t,c3560=t,c3561=t,c3562=t,c3563=t,c3564=t,c3565=t,c3566=t,c3567=t,c3568=t,c3569=t,c3570=t,c3571=t,c3572=t,c3573=t,c3574=t,c3575=t,c3576=t,c3577=t,c3578=t,c3579=t,c3580=t,c3581=t,c3582=t,c3583=t,c3584=t,c3585=t,c3586=t,c3587=t,c3588=t,c3589=t,c3590=t,c3591=t,c3592=t,c3593=t,c3594=t,c3595=t,c3596=t,c3597=t,c3598=t,c3599=t,c3600=t,c3601=t,c3602=t,c3603=t,c3604=t,c3605=t,c3606=t,c3607=t,c3608=t,c3609=t,c3610=t,c3611=t,c3612=t,c3613=t,c3614=t,c3615=t,c3616=t,c3617=t,c3618=t,c3619=t,c3620=t,c3621=t,c3622=t,c3623=t,c3624=t,c3625=t,c3626=t,c3627=t,c3628=t,c3629=t,c3630=t,c3631=t,c3632=t,c3633=t,c3634=t,c3635=t,c3636=t,c3637=t,c3638=t,c3639=t,c3640=t,c3641=t,c3642=t,c3643=t,c3644=t,c3645=t,c3646=t,c3647=t,c3648=t,c3649=t,c3650=t,c3651=t,c3652=t,c3653=t,c3654=t,c3655=t,c3656=t,c3657=t,c3658=t,c3659=t,c3660=t,"
+ "c3661=t,c3662=t,c3663=t,c3664=t,c3665=t,c3666=t,c3667=t,c3668=t,c3669=t,c3670=t,c3671=t,c3672=t,c3673=t,c3674=t,c3675=t,c3676=t,c3677=t,c3678=t,c3679=t,c3680=t,c3681=t,c3682=t,c3683=t,c3684=t,c3685=t,c3686=t,c3687=t,c3688=t,c3689=t,c3690=t,c3691=t,c3692=t,c3693=t,c3694=t,c3695=t,c3696=t,c3697=t,c3698=t,c3699=t,c3700=t,c3701=t,c3702=t,c3703=t,c3704=t,c3705=t,c3706=t,c3707=t,c3708=t,c3709=t,c3710=t,c3711=t,c3712=t,c3713=t,c3714=t,c3715=t,c3716=t,c3717=t,c3718=t,c3719=t,c3720=t,c3721=t,c3722=t,c3723=t,c3724=t,c3725=t,c3726=t,c3727=t,c3728=t,c3729=t,c3730=t,c3731=t,c3732=t,c3733=t,c3734=t,c3735=t,c3736=t,c3737=t,c3738=t,c3739=t,c3740=t,c3741=t,c3742=t,c3743=t,c3744=t,c3745=t,c3746=t,c3747=t,c3748=t,c3749=t,c3750=t,c3751=t,c3752=t,c3753=t,c3754=t,c3755=t,c3756=t,c3757=t,c3758=t,c3759=t,c3760=t,c3761=t,c3762=t,c3763=t,c3764=t,c3765=t,c3766=t,c3767=t,c3768=t,c3769=t,c3770=t,c3771=t,c3772=t,c3773=t,c3774=t,c3775=t,c3776=t,c3777=t,c3778=t,c3779=t,c3780=t,c3781=t,c3782=t,c3783=t,c3784=t,c3785=t,c3786=t,c3787=t,c3788=t,c3789=t,c3790=t,c3791=t,c3792=t,c3793=t,c3794=t,c3795=t,c3796=t,c3797=t,c3798=t,c3799=t,c3800=t,c3801=t,c3802=t,c3803=t,c3804=t,c3805=t,c3806=t,c3807=t,"
+ "c3808=t,c3809=t,c3810=t,c3811=t,c3812=t,c3813=t,c3814=t,c3815=t,c3816=t,c3817=t,c3818=t,c3819=t,c3820=t,c3821=t,c3822=t,c3823=t,c3824=t,c3825=t,c3826=t,c3827=t,c3828=t,c3829=t,c3830=t,c3831=t,c3832=t,c3833=t,c3834=t,c3835=t,c3836=t,c3837=t,c3838=t,c3839=t,c3840=t,c3841=t,c3842=t,c3843=t,c3844=t,c3845=t,c3846=t,c3847=t,c3848=t,c3849=t,c3850=t,c3851=t,c3852=t,c3853=t,c3854=t,c3855=t,c3856=t,c3857=t,c3858=t,c3859=t,c3860=t,c3861=t,c3862=t,c3863=t,c3864=t,c3865=t,c3866=t,c3867=t,c3868=t,c3869=t,c3870=t,c3871=t,c3872=t,c3873=t,c3874=t,c3875=t,c3876=t,c3877=t,c3878=t,c3879=t,c3880=t,c3881=t,c3882=t,c3883=t,c3884=t,c3885=t,c3886=t,c3887=t,c3888=t,c3889=t,c3890=t,c3891=t,c3892=t,c3893=t,c3894=t,c3895=t,c3896=t,c3897=t,c3898=t,c3899=t,c3900=t,c3901=t,c3902=t,c3903=t,c3904=t,c3905=t,c3906=t,c3907=t,c3908=t,c3909=t,c3910=t,c3911=t,c3912=t,c3913=t,c3914=t,c3915=t,c3916=t,c3917=t,c3918=t,c3919=t,c3920=t,c3921=t,c3922=t,c3923=t,c3924=t,c3925=t,c3926=t,c3927=t,c3928=t,c3929=t,c3930=t,c3931=t,c3932=t,c3933=t,c3934=t,c3935=t,c3936=t,c3937=t,c3938=t,c3939=t,c3940=t,c3941=t,c3942=t,c3943=t,c3944=t,c3945=t,c3946=t,c3947=t,c3948=t,c3949=t,c3950=t,c3951=t,c3952=t,c3953=t,c3954=t,"
+ "c3955=t,c3956=t,c3957=t,c3958=t,c3959=t,c3960=t,c3961=t,c3962=t,c3963=t,c3964=t,c3965=t,c3966=t,c3967=t,c3968=t,c3969=t,c3970=t,c3971=t,c3972=t,c3973=t,c3974=t,c3975=t,c3976=t,c3977=t,c3978=t,c3979=t,c3980=t,c3981=t,c3982=t,c3983=t,c3984=t,c3985=t,c3986=t,c3987=t,c3988=t,c3989=t,c3990=t,c3991=t,c3992=t,c3993=t,c3994=t,c3995=t,c3996=t,c3997=t,c3998=t,c3999=t,c4000=t,c4001=t,c4002=t,c4003=t,c4004=t,c4005=t,c4006=t,c4007=t,c4008=t,c4009=t,c4010=t,c4011=t,c4012=t,c4013=t,c4014=t,c4015=t,c4016=t,c4017=t,c4018=t,c4019=t,c4020=t,c4021=t,c4022=t,c4023=t,c4024=t,c4025=t,c4026=t,c4027=t,c4028=t,c4029=t,c4030=t,c4031=t,c4032=t,c4033=t,c4034=t,c4035=t,c4036=t,c4037=t,c4038=t,c4039=t,c4040=t,c4041=t,c4042=t,c4043=t,c4044=t,c4045=t,c4046=t,c4047=t,c4048=t,c4049=t,c4050=t,c4051=t,c4052=t,c4053=t,c4054=t,c4055=t,c4056=t,c4057=t,c4058=t,c4059=t,c4060=t,c4061=t,c4062=t,c4063=t,c4064=t,c4065=t,c4066=t,c4067=t,c4068=t,c4069=t,c4070=t,c4071=t,c4072=t,c4073=t,c4074=t,c4075=t,c4076=t,c4077=t,c4078=t,c4079=t,c4080=t,c4081=t,c4082=t,c4083=t,c4084=t,c4085=t,c4086=t,c4087=t,c4088=t,c4089=t,c4090=t,c4091=t,c4092=t,c4093=t 1626006833640000000"
+ };
+
+ int ret = TSDB_CODE_SUCCESS;
+ for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
+ ret = smlParseInfluxLine(info, sql[i]);
+ if(ret != TSDB_CODE_SUCCESS) break;
+ }
+ ASSERT_NE(ret, 0);
+ smlDestroyInfo(info);
+}
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 68a77a9f332522b56cee144655f129da61006691..f77320c5acc605f2be4df0e7e14d8800c1fee94a 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -15,343 +15,361 @@
#include "systable.h"
#include "taos.h"
+#include "taosdef.h"
#include "tdef.h"
#include "tgrant.h"
+#include "tmsg.h"
#include "types.h"
#define SYSTABLE_SCH_TABLE_NAME_LEN ((TSDB_TABLE_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_DB_NAME_LEN ((TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
#define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE)
+// clang-format off
static const SSysDbTableSchema dnodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
- {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
- {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
+ {.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema mnodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "role", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema modulesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = 134 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "module", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema qnodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema snodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema bnodesSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema clusterSchema[] = {
- {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
};
static const SSysDbTableSchema userDBSchema[] = {
- {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
- {.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
+ {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
+ {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "duration", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "keep", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "buffer", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "pages", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
+ {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = true},
+ {.name = "cachemodel", .bytes = TSDB_CACHE_MODEL_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "cachesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "wal_level", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
+ {.name = "wal_fsync_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "sst_trigger", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "table_prefix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
};
static const SSysDbTableSchema userFuncSchema[] = {
- {.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
+ {.name = "name", .bytes = TSDB_FUNC_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "comment", .bytes = PATH_MAX - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "aggregate", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "output_type", .bytes = TSDB_TYPE_STR_MAX_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "code_len", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "bufsize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
};
static const SSysDbTableSchema userIdxSchema[] = {
- {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "index_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysDbTableSchema userStbsSchema[] = {
- {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "tags", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "last_update", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "watermark", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "max_delay", .bytes = 64 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "rollup", .bytes = 128 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema streamSchema[] = {
- {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "stream_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "target_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "target_table", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "watermark", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "trigger", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema userTblsSchema[] = {
- {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "columns", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "uid", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "ttl", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "table_comment", .bytes = TSDB_TB_COMMENT_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "type", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema userTagsSchema[] = {
- {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "tag_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "table_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "tag_name", .bytes = TSDB_COL_NAME_LEN - 1 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "tag_type", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "tag_value", .bytes = TSDB_MAX_TAGS_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema userTblDistSchema[] = {
- {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE},
- {.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
+ {.name = "db_name", .bytes = 32 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "table_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "distributed_histogram", .bytes = 500 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "min_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "max_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "avg_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "stddev_of_rows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "rows", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "storage_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "compression_ratio", .bytes = 8, .type = TSDB_DATA_TYPE_DOUBLE, .sysInfo = true},
+ {.name = "rows_in_mem", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "seek_header_time", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
};
static const SSysDbTableSchema userUsersSchema[] = {
- {.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "super", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "enable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "sysinfo", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "name", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "super", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false},
+ {.name = "enable", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false},
+ {.name = "sysinfo", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
GRANTS_SCHEMA;
static const SSysDbTableSchema vgroupsSchema[] = {
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "tables", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "v1_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "v1_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "v2_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "v2_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "cacheload", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
};
static const SSysDbTableSchema smaSchema[] = {
- {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
+ {.name = "sma_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "stable_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
};
static const SSysDbTableSchema transSchema[] = {
- {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "last_action_info",
- .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE,
- .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "last_action_info", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema configSchema[] = {
- {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysDbTableSchema variablesSchema[] = {
{.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+};
+
+static const SSysDbTableSchema topicSchema[] = {
+ {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ // TODO config
+};
+
+
+static const SSysDbTableSchema subscriptionSchema[] = {
+ {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+};
+
+static const SSysDbTableSchema vnodesSchema[] = {
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
+ {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "dnode_ep", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
static const SSysTableMeta infosMeta[] = {
- {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema)},
- {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)},
- {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)},
- {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)},
+ {TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
+ {TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
+ {TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema), true},
+ {TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema), true},
// {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
// {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
- {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)},
- {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema)},
- {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
- {TSDB_INS_TABLE_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
- {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema)},
- {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema)},
- {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema)},
+ {TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema), true},
+ {TSDB_INS_TABLE_DATABASES, userDBSchema, tListLen(userDBSchema), false},
+ {TSDB_INS_TABLE_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema), false},
+ {TSDB_INS_TABLE_INDEXES, userIdxSchema, tListLen(userIdxSchema), false},
+ {TSDB_INS_TABLE_STABLES, userStbsSchema, tListLen(userStbsSchema), false},
+ {TSDB_INS_TABLE_TABLES, userTblsSchema, tListLen(userTblsSchema), false},
+ {TSDB_INS_TABLE_TAGS, userTagsSchema, tListLen(userTagsSchema), false},
// {TSDB_INS_TABLE_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
- {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema)},
- {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)},
- {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)},
- {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema)},
- {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema)},
+ {TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema), false},
+ {TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema), true},
+ {TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true},
+ {TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true},
+ {TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true},
+ {TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
+ {TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
+ {TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
+ {TSDB_INS_TABLE_VNODES, vnodesSchema, tListLen(vnodesSchema), true},
};
static const SSysDbTableSchema connectionsSchema[] = {
- {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
- {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
- {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false},
+ {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false},
+ {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "login_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
-static const SSysDbTableSchema topicSchema[] = {
- {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- // TODO config
-};
static const SSysDbTableSchema consumerSchema[] = {
- {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},*/
- {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
-};
-
-static const SSysDbTableSchema subscriptionSchema[] = {
- {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
+ {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "consumer_group", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},*/
+ {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysDbTableSchema offsetSchema[] = {
- {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
+ {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "committed_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "current_offset", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "skip_log_cnt", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
};
static const SSysDbTableSchema querySchema[] = {
- {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT},
- {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
- {.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL},
- {.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
+ {.name = "kill_id", .bytes = TSDB_QUERY_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "query_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "conn_id", .bytes = 4, .type = TSDB_DATA_TYPE_UINT, .sysInfo = false},
+ {.name = "app", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "user", .bytes = TSDB_USER_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "exec_usec", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+ {.name = "stable_query", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = false},
+ {.name = "sub_num", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "sub_status", .bytes = TSDB_SHOW_SUBQUERY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema appSchema[] = {
- {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
- {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
- {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
- {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "slow_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT},
- {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP},
+ {.name = "app_id", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "ip", .bytes = TSDB_IPv4ADDR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "pid", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "name", .bytes = TSDB_APP_NAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "start_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "insert_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "insert_row", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "insert_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "insert_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "fetch_bytes", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "query_time", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "slow_query", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "total_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "current_req", .bytes = 8, .type = TSDB_DATA_TYPE_UBIGINT, .sysInfo = false},
+ {.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
static const SSysTableMeta perfsMeta[] = {
- {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema)},
- {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema)},
- {TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema)},
- {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema)},
- {TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema)},
+ {TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema), false},
+ {TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema), false},
+ {TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema), false},
// {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
- {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema)},
- {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema)},
- {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
- {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}};
+ {TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema), false},
+ // {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema), false},
+ {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema), false}};
+// clang-format on
void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) {
if (pInfosTableMeta) {
@@ -370,3 +388,26 @@ void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size) {
*size = tListLen(perfsMeta);
}
}
+
+void getVisibleInfosTablesNum(bool sysInfo, size_t* size) {
+ if (sysInfo) {
+ getInfosDbMeta(NULL, size);
+ return;
+ }
+ *size = 0;
+ const SSysTableMeta* pMeta = NULL;
+ size_t totalNum = 0;
+ getInfosDbMeta(&pMeta, &totalNum);
+ for (size_t i = 0; i < totalNum; ++i) {
+ if (!pMeta[i].sysInfo) {
+ ++(*size);
+ }
+ }
+}
+
+bool invisibleColumn(bool sysInfo, int8_t tableType, int8_t flags) {
+ if (sysInfo || TSDB_SYSTEM_TABLE != tableType) {
+ return false;
+ }
+ return 0 != (flags & COL_IS_SYSINFO);
+}
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index c65e966046912edb6f8c0ca77db3f55d24710785..16b8e55cf7a1fe57d9e984ad6d434297794aa07d 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -140,7 +140,8 @@ int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
return TSDB_CODE_SUCCESS;
}
-static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData, int32_t itemLen, int32_t numOfRows) {
+static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData,
+ int32_t itemLen, int32_t numOfRows) {
ASSERT(pColumnInfoData->info.bytes >= itemLen);
size_t start = 1;
@@ -148,21 +149,23 @@ static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t curren
memcpy(pColumnInfoData->pData, pData, itemLen);
int32_t t = 0;
- int32_t count = log(numOfRows)/log(2);
- while(t < count) {
+ int32_t count = log(numOfRows) / log(2);
+ while (t < count) {
int32_t xlen = 1 << t;
- memcpy(pColumnInfoData->pData + start * itemLen + pColumnInfoData->varmeta.length, pColumnInfoData->pData, xlen * itemLen);
+ memcpy(pColumnInfoData->pData + start * itemLen + pColumnInfoData->varmeta.length, pColumnInfoData->pData,
+ xlen * itemLen);
t += 1;
start += xlen;
}
// the tail part
if (numOfRows > start) {
- memcpy(pColumnInfoData->pData + start * itemLen + currentRow * itemLen, pColumnInfoData->pData, (numOfRows - start) * itemLen);
+ memcpy(pColumnInfoData->pData + start * itemLen + currentRow * itemLen, pColumnInfoData->pData,
+ (numOfRows - start) * itemLen);
}
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
- for(int32_t i = 0; i < numOfRows; ++i) {
+ for (int32_t i = 0; i < numOfRows; ++i) {
pColumnInfoData->varmeta.offset[i + currentRow] = pColumnInfoData->varmeta.length + i * itemLen;
}
@@ -170,7 +173,8 @@ static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t curren
}
}
-int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows) {
+int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
+ uint32_t numOfRows) {
ASSERT(pData != NULL && pColumnInfoData != NULL);
int32_t len = pColumnInfoData->info.bytes;
@@ -278,7 +282,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
} else {
if (finalNumOfRows > *capacity || (numOfRow1 == 0 && pColumnInfoData->info.bytes != 0)) {
// all data may be null, when the pColumnInfoData->info.type == 0, bytes == 0;
-// ASSERT(finalNumOfRows * pColumnInfoData->info.bytes);
+ // ASSERT(finalNumOfRows * pColumnInfoData->info.bytes);
char* tmp = taosMemoryRealloc(pColumnInfoData->pData, finalNumOfRows * pColumnInfoData->info.bytes);
if (tmp == NULL) {
return TSDB_CODE_VND_OUT_OF_MEMORY;
@@ -557,7 +561,7 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
}
int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
- int32_t numOfRows = *(int32_t*) buf;
+ int32_t numOfRows = *(int32_t*)buf;
blockDataEnsureCapacity(pBlock, numOfRows);
pBlock->info.rows = numOfRows;
@@ -676,7 +680,8 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
* @return
*/
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
- // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
+ // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
+ // length |
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
}
@@ -1228,6 +1233,7 @@ void blockDataFreeRes(SSDataBlock* pBlock) {
}
taosArrayDestroy(pBlock->pDataBlock);
+ pBlock->pDataBlock = NULL;
taosMemoryFreeClear(pBlock->pBlockAgg);
memset(&pBlock->info, 0, sizeof(SDataBlockInfo));
}
@@ -1301,6 +1307,40 @@ int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
return TSDB_CODE_SUCCESS;
}
+SSDataBlock* createSpecialDataBlock(EStreamType type) {
+ SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
+ pBlock->info.hasVarCol = false;
+ pBlock->info.groupId = 0;
+ pBlock->info.rows = 0;
+ pBlock->info.type = type;
+ pBlock->info.rowSize =
+ sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
+ pBlock->info.watermark = INT64_MIN;
+
+ pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
+ SColumnInfoData infoData = {0};
+ infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
+ infoData.info.bytes = sizeof(TSKEY);
+ // window start ts
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+ // window end ts
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+
+ infoData.info.type = TSDB_DATA_TYPE_UBIGINT;
+ infoData.info.bytes = sizeof(uint64_t);
+ // uid
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+ // group id
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+
+ // calculate start ts
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+ // calculate end ts
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+
+ return pBlock;
+}
+
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
if (pDataBlock == NULL) {
return NULL;
@@ -1425,7 +1465,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
}
void colDataDestroy(SColumnInfoData* pColData) {
- if(!pColData) return;
+ if (!pColData) return;
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
taosMemoryFreeClear(pColData->varmeta.offset);
} else {
@@ -1692,7 +1732,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
struct tm ptm = {0};
taosLocalTime(&tt, &ptm);
- size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm);
+ size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm);
if (precision == TSDB_TIME_PRECISION_NANO) {
sprintf(buf + pos, ".%09d", ms);
@@ -1706,8 +1746,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag) {
- SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock));
- taosArrayPush(dataBlocks, pBlock);
+ SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock*));
+ taosArrayPush(dataBlocks, &pBlock);
blockDebugShowDataBlocks(dataBlocks, flag);
taosArrayDestroy(dataBlocks);
}
@@ -1846,20 +1886,20 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
break;
case TSDB_DATA_TYPE_VARCHAR: {
memset(pBuf, 0, sizeof(pBuf));
- char* pData = colDataGetVarData(pColInfoData, j);
+ char* pData = colDataGetVarData(pColInfoData, j);
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
memcpy(pBuf, varDataVal(pData), dataSize);
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
if (len >= size - 1) return dumpBuf;
- } break;
+ } break;
case TSDB_DATA_TYPE_NCHAR: {
- char* pData = colDataGetVarData(pColInfoData, j);
+ char* pData = colDataGetVarData(pColInfoData, j);
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
memset(pBuf, 0, sizeof(pBuf));
- taosUcs4ToMbs((TdUcs4 *)varDataVal(pData), dataSize, pBuf);
+ taosUcs4ToMbs((TdUcs4*)varDataVal(pData), dataSize, pBuf);
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
if (len >= size - 1) return dumpBuf;
- } break;
+ } break;
}
}
len += snprintf(dumpBuf + len, size - len, "\n");
@@ -1876,7 +1916,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
* @param pDataBlocks
* @param vgId
* @param suid
- *
+ *
*/
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid) {
@@ -1903,8 +1943,8 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
tdSRowInit(&rb, pTSchema->version);
for (int32_t i = 0; i < sz; ++i) {
- int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
- int32_t rows = pDataBlock->info.rows;
+ int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
+ int32_t rows = pDataBlock->info.rows;
// int32_t rowSize = pDataBlock->info.rowSize;
// int64_t groupId = pDataBlock->info.groupId;
@@ -1925,7 +1965,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
msgLen += sizeof(SSubmitBlk);
int32_t dataLen = 0;
- for (int32_t j = 0; j < rows; ++j) { // iterate by row
+ for (int32_t j = 0; j < rows; ++j) { // iterate by row
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen + dataLen)); // set row buf
bool isStartKey = false;
int32_t offset = 0;
@@ -2080,6 +2120,7 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
int32_t* rows = (int32_t*)data;
*rows = pBlock->info.rows;
data += sizeof(int32_t);
+ ASSERT(*rows > 0);
int32_t* cols = (int32_t*)data;
*cols = numOfCols;
@@ -2088,7 +2129,7 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
// flag segment.
// the inital bit is for column info
int32_t* flagSegment = (int32_t*)data;
- *flagSegment = (1<<31);
+ *flagSegment = (1 << 31);
data += sizeof(int32_t);
@@ -2143,12 +2184,14 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
*actualLen = *dataLen;
*groupId = pBlock->info.groupId;
+ ASSERT(*dataLen > 0);
+ uDebug("build data block, actualLen:%d, rows:%d, cols:%d", *dataLen, *rows, *cols);
}
const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
const char* pStart = pData;
- int32_t version = *(int32_t*) pStart;
+ int32_t version = *(int32_t*)pStart;
pStart += sizeof(int32_t);
ASSERT(version == 1);
@@ -2157,7 +2200,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
pStart += sizeof(int32_t);
// total rows sizeof(int32_t)
- int32_t numOfRows = *(int32_t*)pStart;
+ int32_t numOfRows = *(int32_t*)pStart;
pStart += sizeof(int32_t);
// total columns sizeof(int32_t)
diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c
index 8eeab77a157993bd8d89479b221982d3b1e5c336..b40f449a0550140784250b9c2250d191552e4652 100644
--- a/source/common/src/tdataformat.c
+++ b/source/common/src/tdataformat.c
@@ -1064,6 +1064,26 @@ _err:
return code;
}
+void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid) {
+ uint8_t *p = NULL;
+ int8_t isLarge = pTag->flags & TD_TAG_LARGE;
+ int16_t offset = 0;
+
+ if (isLarge) {
+ p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag];
+ } else {
+ p = (uint8_t *)&pTag->idx[pTag->nTag];
+ }
+
+ if (isLarge) {
+ offset = ((int16_t *)pTag->idx)[iTag];
+ } else {
+ offset = pTag->idx[iTag];
+ }
+
+ tPutI16v(p + offset, cid);
+}
+
#if 1 // ===================================================================================================================
int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) {
if (pBuilder == NULL) return -1;
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index a628f551f4f2fbc8ba4f6b55d6bb9df48418936e..c436e4ffd2ef8f68ea9a13484d5f167529614c9f 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -61,6 +61,7 @@ int32_t tsNumOfVnodeStreamThreads = 2;
int32_t tsNumOfVnodeFetchThreads = 4;
int32_t tsNumOfVnodeWriteThreads = 2;
int32_t tsNumOfVnodeSyncThreads = 2;
+int32_t tsNumOfVnodeRsmaThreads = 2;
int32_t tsNumOfQnodeQueryThreads = 4;
int32_t tsNumOfQnodeFetchThreads = 4;
int32_t tsNumOfSnodeSharedThreads = 2;
@@ -76,7 +77,7 @@ bool tsMonitorComp = false;
// telem
bool tsEnableTelem = true;
-int32_t tsTelemInterval = 86400;
+int32_t tsTelemInterval = 43200;
char tsTelemServer[TSDB_FQDN_LEN] = "telemetry.taosdata.com";
uint16_t tsTelemPort = 80;
@@ -90,6 +91,7 @@ bool tsSmlDataFormat =
// query
int32_t tsQueryPolicy = 1;
int32_t tsQuerySmaOptimize = 0;
+bool tsQueryPlannerTrace = false;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@@ -127,10 +129,6 @@ int32_t tsMinIntervalTime = 1;
int32_t tsQueryBufferSize = -1;
int64_t tsQueryBufferSizeBytes = -1;
-// tsdb config
-// For backward compatibility
-bool tsdbForceKeepFile = false;
-
int32_t tsDiskCfgNum = 0;
SDiskCfg tsDiskCfg[TFS_MAX_DISKS] = {0};
@@ -285,6 +283,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 3, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
+ if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1;
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
@@ -378,6 +377,10 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1;
+ tsNumOfVnodeRsmaThreads = tsNumOfCores;
+ tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
+ if (cfgAddInt32(pCfg, "numOfVnodeRsmaThreads", tsNumOfVnodeRsmaThreads, 1, 1024, 0) != 0) return -1;
+
tsNumOfQnodeQueryThreads = tsNumOfCores * 2;
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
@@ -415,12 +418,158 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1;
+ if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, 1) != 0) return -1;
if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1;
GRANT_CFG_ADD;
return 0;
}
+static int32_t taosUpdateServerCfg(SConfig *pCfg) {
+ SConfigItem *pItem;
+ ECfgSrcType stype;
+ int32_t numOfCores;
+ int64_t totalMemoryKB;
+
+ pItem = cfgGetItem(tsCfg, "numOfCores");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ numOfCores = pItem->fval;
+ }
+
+ pItem = cfgGetItem(tsCfg, "supportVnodes");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSupportVnodes = numOfCores * 2;
+ tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
+ pItem->i32 = tsNumOfSupportVnodes;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfRpcThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfRpcThreads = numOfCores / 2;
+ tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
+ pItem->i32 = tsNumOfRpcThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfCommitThreads = numOfCores / 2;
+ tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
+ pItem->i32 = tsNumOfCommitThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfMnodeReadThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfMnodeReadThreads = numOfCores / 8;
+ tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
+ pItem->i32 = tsNumOfMnodeReadThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeQueryThreads = numOfCores * 2;
+ tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfVnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeStreamThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeStreamThreads = numOfCores / 4;
+ tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
+ pItem->i32 = tsNumOfVnodeStreamThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeFetchThreads = numOfCores / 4;
+ tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfVnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeWriteThreads = numOfCores;
+ tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
+ pItem->i32 = tsNumOfVnodeWriteThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeSyncThreads = numOfCores * 2;
+ tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
+ pItem->i32 = tsNumOfVnodeSyncThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeRsmaThreads = numOfCores;
+ tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
+ pItem->i32 = tsNumOfVnodeRsmaThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeQueryThreads = numOfCores * 2;
+ tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfQnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeFetchThreads = numOfCores / 2;
+ tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfQnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeSharedThreads = numOfCores / 4;
+ tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeSharedThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeUniqueThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeUniqueThreads = numOfCores / 4;
+ tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeUniqueThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "totalMemoryKB");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ totalMemoryKB = pItem->i64;
+ }
+
+ pItem = cfgGetItem(tsCfg, "rpcQueueMemoryAllowed");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsRpcQueueMemoryAllowed = totalMemoryKB * 1024 * 0.1;
+ tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
+ pItem->i64 = tsRpcQueueMemoryAllowed;
+ pItem->stype = stype;
+ }
+
+ return 0;
+}
+
static void taosSetClientLogCfg(SConfig *pCfg) {
SConfigItem *pItem = cfgGetItem(pCfg, "logDir");
tstrncpy(tsLogDir, cfgGetItem(pCfg, "logDir")->str, PATH_MAX);
@@ -491,6 +640,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
+ tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
return 0;
}
@@ -539,6 +689,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
+ tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
@@ -561,6 +712,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32;
+ tsUptimeInterval = cfgGetItem(pCfg, "uptimeInterval")->i32;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
@@ -783,6 +935,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
} else if (strcasecmp("numOfVnodeSyncThreads", name) == 0) {
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
+ } else if (strcasecmp("numOfVnodeRsmaThreads", name) == 0) {
+ tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
} else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) {
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
} else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) {
@@ -816,6 +970,8 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsQnodeShmSize = cfgGetItem(pCfg, "qnodeShmSize")->i32;
} else if (strcasecmp("qDebugFlag", name) == 0) {
qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32;
+ } else if (strcasecmp("queryPlannerTrace", name) == 0) {
+ tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
}
break;
}
@@ -971,9 +1127,9 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
taosSetServerLogCfg(pCfg);
}
- taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32);
+ taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
- if (taosMulMkDir(tsLogDir) != 0) {
+ if (taosMulModeMkDir(tsLogDir, 0777) != 0) {
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
cfgCleanup(pCfg);
return -1;
@@ -1038,6 +1194,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
if (taosSetClientCfg(tsCfg)) return -1;
} else {
if (taosSetClientCfg(tsCfg)) return -1;
+ if (taosUpdateServerCfg(tsCfg)) return -1;
if (taosSetServerCfg(tsCfg)) return -1;
if (taosSetTfsCfg(tsCfg) != 0) return -1;
}
@@ -1062,7 +1219,7 @@ void taosCleanupCfg() {
void taosCfgDynamicOptions(const char *option, const char *value) {
if (strncasecmp(option, "debugFlag", 9) == 0) {
int32_t flag = atoi(value);
- taosSetAllDebugFlag(flag);
+ taosSetAllDebugFlag(flag, true);
return;
}
@@ -1084,14 +1241,14 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
}
const char *options[] = {
- "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
- "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
- "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
+ "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
+ "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
+ "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "jniDebugFlag",
};
int32_t *optionVars[] = {
- &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
- &tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
- &tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
+ &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, &tqDebugFlag,
+ &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag, &tmrDebugFlag, &uDebugFlag,
+ &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag, &jniDebugFlag,
};
int32_t optionSize = tListLen(options);
@@ -1103,41 +1260,42 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t flag = atoi(value);
uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
*optionVars[d] = flag;
- taosSetDebugFlag(optionVars[d], optName, flag);
+ taosSetDebugFlag(optionVars[d], optName, flag, true);
return;
}
uError("failed to cfg dynamic option:%s value:%s", option, value);
}
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) {
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
- if (pItem != NULL) {
+ if (pItem != NULL && (rewrite || pItem->i32 == 0)) {
pItem->i32 = flagVal;
}
*pFlagPtr = flagVal;
}
-void taosSetAllDebugFlag(int32_t flag) {
+void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
if (flag <= 0) return;
- taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
- taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
- taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
- taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
- taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
- taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
- taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
- taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
- taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
- taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
- taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
- taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
- taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
- taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
- taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
- taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
- taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
- taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
+ taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "tmrDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 533d924546eb553045fea97e6b50cdb42489d714..7d8461ca6046d3ae3dd6795809b2b9509d680f0b 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -994,6 +994,7 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
SVnodeLoad *pload = taosArrayGet(pReq->pVloads, i);
if (tEncodeI32(&encoder, pload->vgId) < 0) return -1;
if (tEncodeI32(&encoder, pload->syncState) < 0) return -1;
+ if (tEncodeI64(&encoder, pload->cacheUsage) < 0) return -1;
if (tEncodeI64(&encoder, pload->numOfTables) < 0) return -1;
if (tEncodeI64(&encoder, pload->numOfTimeSeries) < 0) return -1;
if (tEncodeI64(&encoder, pload->totalStorage) < 0) return -1;
@@ -1063,6 +1064,7 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
SVnodeLoad vload = {0};
if (tDecodeI32(&decoder, &vload.vgId) < 0) return -1;
if (tDecodeI32(&decoder, &vload.syncState) < 0) return -1;
+ if (tDecodeI64(&decoder, &vload.cacheUsage) < 0) return -1;
if (tDecodeI64(&decoder, &vload.numOfTables) < 0) return -1;
if (tDecodeI64(&decoder, &vload.numOfTimeSeries) < 0) return -1;
if (tDecodeI64(&decoder, &vload.totalStorage) < 0) return -1;
@@ -2024,6 +2026,9 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
if (tEncodeI64(&encoder, pReq->walRetentionSize) < 0) return -1;
if (tEncodeI32(&encoder, pReq->walRollPeriod) < 0) return -1;
if (tEncodeI64(&encoder, pReq->walSegmentSize) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->sstTrigger) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->hashPrefix) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->hashSuffix) < 0) return -1;
if (tEncodeI8(&encoder, pReq->ignoreExist) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfRetensions) < 0) return -1;
for (int32_t i = 0; i < pReq->numOfRetensions; ++i) {
@@ -2033,6 +2038,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1;
if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1;
}
+ if (tEncodeI32(&encoder, pReq->tsdbPageSize) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -2070,6 +2076,9 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
if (tDecodeI64(&decoder, &pReq->walRetentionSize) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->walRollPeriod) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->walSegmentSize) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->sstTrigger) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->hashPrefix) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->hashSuffix) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->ignoreExist) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfRetensions) < 0) return -1;
pReq->pRetensions = taosArrayInit(pReq->numOfRetensions, sizeof(SRetention));
@@ -2090,6 +2099,8 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
}
}
+ if (tDecodeI32(&decoder, &pReq->tsdbPageSize) < 0) return -1;
+
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -2120,6 +2131,7 @@ int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
if (tEncodeI8(&encoder, pReq->strict) < 0) return -1;
if (tEncodeI8(&encoder, pReq->cacheLast) < 0) return -1;
if (tEncodeI8(&encoder, pReq->replications) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->sstTrigger) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -2146,6 +2158,7 @@ int32_t tDeserializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->cacheLast) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->replications) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->sstTrigger) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -2453,6 +2466,8 @@ int32_t tSerializeSUseDbRspImp(SEncoder *pEncoder, const SUseDbRsp *pRsp) {
if (tEncodeI64(pEncoder, pRsp->uid) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->vgVersion) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->vgNum) < 0) return -1;
+ if (tEncodeI16(pEncoder, pRsp->hashPrefix) < 0) return -1;
+ if (tEncodeI16(pEncoder, pRsp->hashSuffix) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->hashMethod) < 0) return -1;
for (int32_t i = 0; i < pRsp->vgNum; ++i) {
@@ -2504,6 +2519,8 @@ int32_t tDeserializeSUseDbRspImp(SDecoder *pDecoder, SUseDbRsp *pRsp) {
if (tDecodeI64(pDecoder, &pRsp->uid) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->vgVersion) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->vgNum) < 0) return -1;
+ if (tDecodeI16(pDecoder, &pRsp->hashPrefix) < 0) return -1;
+ if (tDecodeI16(pDecoder, &pRsp->hashSuffix) < 0) return -1;
if (tDecodeI8(pDecoder, &pRsp->hashMethod) < 0) return -1;
if (pRsp->vgNum <= 0) {
@@ -3196,12 +3213,16 @@ static int32_t tDecodeSTableMetaRsp(SDecoder *pDecoder, STableMetaRsp *pRsp) {
if (tDecodeI32(pDecoder, &pRsp->vgId) < 0) return -1;
int32_t totalCols = pRsp->numOfTags + pRsp->numOfColumns;
- pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
- if (pRsp->pSchemas == NULL) return -1;
+ if (totalCols > 0) {
+ pRsp->pSchemas = taosMemoryMalloc(sizeof(SSchema) * totalCols);
+ if (pRsp->pSchemas == NULL) return -1;
- for (int32_t i = 0; i < totalCols; ++i) {
- SSchema *pSchema = &pRsp->pSchemas[i];
- if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1;
+ for (int32_t i = 0; i < totalCols; ++i) {
+ SSchema *pSchema = &pRsp->pSchemas[i];
+ if (tDecodeSSchema(pDecoder, pSchema) < 0) return -1;
+ }
+ } else {
+ pRsp->pSchemas = NULL;
}
return 0;
@@ -3326,7 +3347,7 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
return 0;
}
-void tFreeSTableMetaRsp(STableMetaRsp *pRsp) { taosMemoryFreeClear(pRsp->pSchemas); }
+void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas); }
void tFreeSTableIndexRsp(void *info) {
if (NULL == info) {
@@ -3630,6 +3651,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (tEncodeU32(&encoder, pRsp->connId) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->dnodeNum) < 0) return -1;
if (tEncodeI8(&encoder, pRsp->superUser) < 0) return -1;
+ if (tEncodeI8(&encoder, pRsp->sysInfo) < 0) return -1;
if (tEncodeI8(&encoder, pRsp->connType) < 0) return -1;
if (tEncodeSEpSet(&encoder, &pRsp->epSet) < 0) return -1;
if (tEncodeI32(&encoder, pRsp->svrTimestamp) < 0) return -1;
@@ -3652,6 +3674,7 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) {
if (tDecodeU32(&decoder, &pRsp->connId) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->dnodeNum) < 0) return -1;
if (tDecodeI8(&decoder, &pRsp->superUser) < 0) return -1;
+ if (tDecodeI8(&decoder, &pRsp->sysInfo) < 0) return -1;
if (tDecodeI8(&decoder, &pRsp->connType) < 0) return -1;
if (tDecodeSEpSet(&decoder, &pRsp->epSet) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->svrTimestamp) < 0) return -1;
@@ -3756,6 +3779,9 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR
if (tEncodeI64(&encoder, pReq->walRetentionSize) < 0) return -1;
if (tEncodeI32(&encoder, pReq->walRollPeriod) < 0) return -1;
if (tEncodeI64(&encoder, pReq->walSegmentSize) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->sstTrigger) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->hashPrefix) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->hashSuffix) < 0) return -1;
tEndEncode(&encoder);
@@ -3828,6 +3854,9 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
if (tDecodeI64(&decoder, &pReq->walRetentionSize) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->walRollPeriod) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->walSegmentSize) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->sstTrigger) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->hashPrefix) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->hashSuffix) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -5090,6 +5119,10 @@ int tEncodeSVCreateTbRsp(SEncoder *pCoder, const SVCreateTbRsp *pRsp) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeI32(pCoder, pRsp->code) < 0) return -1;
+ if (tEncodeI32(pCoder, pRsp->pMeta ? 1 : 0) < 0) return -1;
+ if (pRsp->pMeta) {
+ if (tEncodeSTableMetaRsp(pCoder, pRsp->pMeta) < 0) return -1;
+ }
tEndEncode(pCoder);
return 0;
@@ -5100,15 +5133,38 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) {
if (tDecodeI32(pCoder, &pRsp->code) < 0) return -1;
+ int32_t meta = 0;
+ if (tDecodeI32(pCoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(pCoder, pRsp->pMeta) < 0) return -1;
+ } else {
+ pRsp->pMeta = NULL;
+ }
+
tEndDecode(pCoder);
return 0;
}
+void tFreeSVCreateTbRsp(void *param) {
+ if (NULL == param) {
+ return;
+ }
+
+ SVCreateTbRsp *pRsp = (SVCreateTbRsp *)param;
+ if (pRsp->pMeta) {
+ taosMemoryFree(pRsp->pMeta->pSchemas);
+ taosMemoryFree(pRsp->pMeta);
+ }
+}
+
// TDMT_VND_DROP_TABLE =================
static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
+ if (tEncodeU64(pCoder, pReq->suid) < 0) return -1;
if (tEncodeI8(pCoder, pReq->igNotExists) < 0) return -1;
tEndEncode(pCoder);
@@ -5119,6 +5175,7 @@ static int32_t tDecodeSVDropTbReq(SDecoder *pCoder, SVDropTbReq *pReq) {
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
+ if (tDecodeU64(pCoder, &pReq->suid) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->igNotExists) < 0) return -1;
tEndDecode(pCoder);
@@ -5292,6 +5349,10 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl
if (tEncodeI32v(pEncoder, pBlock->numOfRows) < 0) return -1;
if (tEncodeI32v(pEncoder, pBlock->affectedRows) < 0) return -1;
if (tEncodeI64v(pEncoder, pBlock->sver) < 0) return -1;
+ if (tEncodeI32(pEncoder, pBlock->pMeta ? 1 : 0) < 0) return -1;
+ if (pBlock->pMeta) {
+ if (tEncodeSTableMetaRsp(pEncoder, pBlock->pMeta) < 0) return -1;
+ }
tEndEncode(pEncoder);
return 0;
@@ -5310,6 +5371,16 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
+ int32_t meta = 0;
+ if (tDecodeI32(pDecoder, &meta) < 0) return -1;
+ if (meta) {
+ pBlock->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pBlock->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(pDecoder, pBlock->pMeta) < 0) return -1;
+ } else {
+ pBlock->pMeta = NULL;
+ }
+
tEndDecode(pDecoder);
return 0;
}
@@ -5347,6 +5418,20 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
return 0;
}
+void tFreeSSubmitBlkRsp(void *param) {
+ if (NULL == param) {
+ return;
+ }
+
+ SSubmitBlkRsp *pRsp = (SSubmitBlkRsp *)param;
+
+ taosMemoryFree(pRsp->tblFName);
+ if (pRsp->pMeta) {
+ taosMemoryFree(pRsp->pMeta->pSchemas);
+ taosMemoryFree(pRsp->pMeta);
+ }
+}
+
void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
if (NULL == pRsp) return;
@@ -5558,9 +5643,60 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) {
}
}
+int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1;
+ if (pRsp->pMeta->pSchemas) {
+ if (tEncodeSTableMetaRsp(pEncoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndEncode(pEncoder);
+ return 0;
+}
+
+int32_t tDecodeSMCreateStbRsp(SDecoder *pDecoder, SMCreateStbRsp *pRsp) {
+ int32_t meta = 0;
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeI32(pDecoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(pDecoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndDecode(pDecoder);
+ return 0;
+}
+
+int32_t tDeserializeSMCreateStbRsp(void *buf, int32_t bufLen, SMCreateStbRsp *pRsp) {
+ int32_t meta = 0;
+ SDecoder decoder = {0};
+ tDecoderInit(&decoder, buf, bufLen);
+
+ if (tStartDecode(&decoder) < 0) return -1;
+ if (tDecodeI32(&decoder, &meta) < 0) return -1;
+ if (meta) {
+ pRsp->pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == pRsp->pMeta) return -1;
+ if (tDecodeSTableMetaRsp(&decoder, pRsp->pMeta) < 0) return -1;
+ }
+ tEndDecode(&decoder);
+ tDecoderClear(&decoder);
+ return 0;
+}
+
+void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) {
+ if (NULL == pRsp) {
+ return;
+ }
+
+ if (pRsp->pMeta) {
+ taosMemoryFree(pRsp->pMeta->pSchemas);
+ taosMemoryFree(pRsp->pMeta);
+ }
+}
+
int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) {
if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1;
- if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tEncodeI64(pEncoder, pOffsetVal->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@@ -5575,7 +5711,7 @@ int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal)
int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
if (tDecodeI8(pDecoder, &pOffsetVal->type) < 0) return -1;
- if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tDecodeI64(pDecoder, &pOffsetVal->uid) < 0) return -1;
if (tDecodeI64(pDecoder, &pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@@ -5597,10 +5733,8 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
snprintf(buf, maxLen, "offset(reset to latest)");
} else if (pVal->type == TMQ_OFFSET__LOG) {
snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
- } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
snprintf(buf, maxLen, "offset(ss data) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
- } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
- snprintf(buf, maxLen, "offset(ss meta) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
} else {
ASSERT(0);
}
@@ -5614,9 +5748,7 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
} else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_DATA) {
return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
} else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_META) {
- ASSERT(0);
- // TODO
- return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
+ return pLeft->uid == pRight->uid;
} else {
ASSERT(0);
/*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/
@@ -5701,6 +5833,21 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1;
return 0;
}
+
+int32_t tEncodeSMqMetaRsp(SEncoder *pEncoder, const SMqMetaRsp *pRsp) {
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
+ if (tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
+ if (tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
+ return 0;
+}
+
+int32_t tDecodeSMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) {
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
+ if (tDecodeI16(pDecoder, &pRsp->resMsgType) < 0) return -1;
+ if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t *)&pRsp->metaRspLen) < 0) return -1;
+ return 0;
+}
+
int32_t tEncodeSMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
@@ -5767,6 +5914,110 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
return 0;
}
+void tDeleteSMqDataRsp(SMqDataRsp *pRsp) {
+ taosArrayDestroy(pRsp->blockDataLen);
+ taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
+}
+
+int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ if (tEncodeI8(pEncoder, pRsp->withTbName) < 0) return -1;
+ if (tEncodeI8(pEncoder, pRsp->withSchema) < 0) return -1;
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i);
+ void *data = taosArrayGetP(pRsp->blockData, i);
+ if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1;
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i);
+ if (tEncodeSSchemaWrapper(pEncoder, pSW) < 0) return -1;
+ }
+ if (pRsp->withTbName) {
+ char *tbName = (char *)taosArrayGetP(pRsp->blockTbName, i);
+ if (tEncodeCStr(pEncoder, tbName) < 0) return -1;
+ }
+ }
+ }
+ if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *createTableReq = taosArrayGetP(pRsp->createTableReq, i);
+ int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i);
+ if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
+ if (tDecodeI8(pDecoder, &pRsp->withTbName) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pRsp->withSchema) < 0) return -1;
+ if (pRsp->withTbName) {
+ pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+ if (pRsp->withSchema) {
+ pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ void *data;
+ uint64_t bLen;
+ if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1;
+ taosArrayPush(pRsp->blockData, &data);
+ int32_t len = bLen;
+ taosArrayPush(pRsp->blockDataLen, &len);
+
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosMemoryCalloc(1, sizeof(SSchemaWrapper));
+ if (pSW == NULL) return -1;
+ if (tDecodeSSchemaWrapper(pDecoder, pSW) < 0) return -1;
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ }
+
+ if (pRsp->withTbName) {
+ char *tbName;
+ if (tDecodeCStrAlloc(pDecoder, &tbName) < 0) return -1;
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ }
+ }
+ }
+ if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *));
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *pCreate = NULL;
+ uint64_t len;
+ if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1;
+ int32_t l = (int32_t)len;
+ taosArrayPush(pRsp->createTableLen, &l);
+ taosArrayPush(pRsp->createTableReq, &pCreate);
+ }
+ }
+ return 0;
+}
+
+void tDeleteSTaosxRsp(STaosxRsp *pRsp) {
+ taosArrayDestroy(pRsp->blockDataLen);
+ taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
+
+ taosArrayDestroy(pRsp->createTableLen);
+ taosArrayDestroyP(pRsp->createTableReq, (FDelete)taosMemoryFree);
+}
+
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
diff --git a/source/common/src/trow.c b/source/common/src/trow.c
index 565498a47bb28bc5e76bb5a33911c86f8f42da5d..9880fe362eb55e909842cb0e9aec007667426c71 100644
--- a/source/common/src/trow.c
+++ b/source/common/src/trow.c
@@ -538,12 +538,12 @@ bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset,
} else {
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
}
- return TSDB_CODE_SUCCESS;
+ return true;
}
if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
pVal->valType = TD_VTYPE_NONE;
- return terrno;
+ return true;
}
if (pVal->valType == TD_VTYPE_NORM) {
diff --git a/source/common/src/tvariant.c b/source/common/src/tvariant.c
index 0810be149716e58fdac74b67db6946fde7db62e9..a01c393441c0a4b6945226ba2c77ffe1a23ced57 100644
--- a/source/common/src/tvariant.c
+++ b/source/common/src/tvariant.c
@@ -155,8 +155,8 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
void taosVariantDestroy(SVariant *pVar) {
if (pVar == NULL) return;
- if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR
- || pVar->nType == TSDB_DATA_TYPE_JSON) {
+ if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR ||
+ pVar->nType == TSDB_DATA_TYPE_JSON) {
taosMemoryFreeClear(pVar->pz);
pVar->nLen = 0;
}
@@ -185,8 +185,8 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
pDst->nType = pSrc->nType;
- if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR
- || pSrc->nType == TSDB_DATA_TYPE_JSON) {
+ if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR ||
+ pSrc->nType == TSDB_DATA_TYPE_JSON) {
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
char *p = taosMemoryRealloc(pDst->pz, len);
assert(p);
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index e610b41a04dc7792638a251fa379bcacb37e0050..981576efec3f4fab0bf3a64607abfa961a637b06 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -167,9 +167,12 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->walCfg.segSize = pCreate->walSegmentSize;
pCfg->walCfg.level = pCreate->walLevel;
+ pCfg->sttTrigger = pCreate->sstTrigger;
pCfg->hashBegin = pCreate->hashBegin;
pCfg->hashEnd = pCreate->hashEnd;
pCfg->hashMethod = pCreate->hashMethod;
+ pCfg->hashPrefix = pCreate->hashPrefix;
+ pCfg->hashSuffix = pCreate->hashSuffix;
pCfg->standby = pCfg->standby;
pCfg->syncCfg.myIndex = pCreate->selfIndex;
@@ -219,8 +222,11 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
- dDebug("vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d", createReq.vgId,
- createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize);
+ dInfo("vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d sstTrigger:%d",
+ createReq.vgId, createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize,
+ createReq.sstTrigger);
+ dInfo("vgId:%d, hashMethod:%d begin:%u end:%u prefix:%d surfix:%d", createReq.vgId, createReq.hashMethod,
+ createReq.hashBegin, createReq.hashEnd, createReq.hashPrefix, createReq.hashSuffix);
vmGenerateVnodeCfg(&createReq, &vnodeCfg);
if (vmTsmaAdjustDays(&vnodeCfg, &createReq) < 0) {
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index a059db6b00b7896289346eae0016dedfe95db400..b91b82b72e9cfd730bba4382136d427c215bf844 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -301,7 +301,7 @@ int32_t dmInitServer(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
SRpcInit rpcInit = {0};
- strncpy(rpcInit.localFqdn, tsLocalFqdn, strlen(tsLocalFqdn));
+ strncpy(rpcInit.localFqdn, tsLocalFqdn, TSDB_FQDN_LEN);
rpcInit.localPort = tsServerPort;
rpcInit.label = "DND-S";
rpcInit.numOfThreads = tsNumOfRpcThreads;
diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h
index ea05215fe90d30708013fe4b1c8fc08d2be8d3d6..9632be1b24ba4561804a1f7ec0e7bf1205f23e4c 100644
--- a/source/dnode/mnode/impl/inc/mndDef.h
+++ b/source/dnode/mnode/impl/inc/mndDef.h
@@ -305,11 +305,14 @@ typedef struct {
int8_t hashMethod; // default is 1
int8_t cacheLast;
int8_t schemaless;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
+ int16_t sstTrigger;
int32_t numOfRetensions;
SArray* pRetensions;
int32_t walRetentionPeriod;
- int64_t walRetentionSize;
int32_t walRollPeriod;
+ int64_t walRetentionSize;
int64_t walSegmentSize;
} SDbCfg;
@@ -340,6 +343,7 @@ typedef struct {
uint32_t hashEnd;
char dbName[TSDB_DB_FNAME_LEN];
int64_t dbUid;
+ int64_t cacheUsage;
int64_t numOfTables;
int64_t numOfTimeSeries;
int64_t totalStorage;
diff --git a/source/dnode/mnode/impl/inc/mndInfoSchema.h b/source/dnode/mnode/impl/inc/mndInfoSchema.h
index b10d92ee3de1a0e06d801c9a8840751a9f52f37c..4f98465cd170280d8c9f5e9356c37cebf26f9bd0 100644
--- a/source/dnode/mnode/impl/inc/mndInfoSchema.h
+++ b/source/dnode/mnode/impl/inc/mndInfoSchema.h
@@ -24,7 +24,8 @@ extern "C" {
int32_t mndInitInfos(SMnode *pMnode);
void mndCleanupInfos(SMnode *pMnode);
-int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp);
+int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, bool sysinfo,
+ STableMetaRsp *pRsp);
int32_t mndBuildInsTableCfg(SMnode *pMnode, const char *dbFName, const char *tbName, STableCfgRsp *pRsp);
#ifdef __cplusplus
diff --git a/source/dnode/mnode/impl/inc/mndStb.h b/source/dnode/mnode/impl/inc/mndStb.h
index 010199a89fcf28131371b589f344a5053e891620..8f0d55e10061ce4517c4305ae7450a7439b91cfd 100644
--- a/source/dnode/mnode/impl/inc/mndStb.h
+++ b/source/dnode/mnode/impl/inc/mndStb.h
@@ -35,6 +35,7 @@ SDbObj *mndAcquireDbByStb(SMnode *pMnode, const char *stbName);
int32_t mndBuildStbFromReq(SMnode *pMnode, SStbObj *pDst, SMCreateStbReq *pCreate, SDbObj *pDb);
int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *pStb);
void mndFreeStb(SStbObj *pStb);
+int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen);
void mndExtractDbNameFromStbFullName(const char *stbFullName, char *dst);
void mndExtractTbNameFromStbFullName(const char *stbFullName, char *dst, int32_t dstSize);
diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c
index 853ace79fd79bd2c30684446d0c12f5640eb881c..9e58b74017918d6b99eae59adecb501ac9989df4 100644
--- a/source/dnode/mnode/impl/src/mndDb.c
+++ b/source/dnode/mnode/impl/src/mndDb.c
@@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "mndDb.h"
+#include "mndCluster.h"
#include "mndDnode.h"
#include "mndOffset.h"
#include "mndPrivilege.h"
@@ -30,7 +31,7 @@
#include "systable.h"
#define DB_VER_NUMBER 1
-#define DB_RESERVE_SIZE 64
+#define DB_RESERVE_SIZE 58
static SSdbRaw *mndDbActionEncode(SDbObj *pDb);
static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw);
@@ -124,6 +125,9 @@ static SSdbRaw *mndDbActionEncode(SDbObj *pDb) {
SDB_SET_INT64(pRaw, dataPos, pDb->cfg.walRetentionSize, _OVER)
SDB_SET_INT32(pRaw, dataPos, pDb->cfg.walRollPeriod, _OVER)
SDB_SET_INT64(pRaw, dataPos, pDb->cfg.walSegmentSize, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pDb->cfg.sstTrigger, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pDb->cfg.hashPrefix, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pDb->cfg.hashSuffix, _OVER)
SDB_SET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER)
SDB_SET_DATALEN(pRaw, dataPos, _OVER)
@@ -207,6 +211,9 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT64(pRaw, dataPos, &pDb->cfg.walRetentionSize, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.walRollPeriod, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pDb->cfg.walSegmentSize, _OVER)
+ SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.sstTrigger, _OVER)
+ SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.hashPrefix, _OVER)
+ SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.hashSuffix, _OVER)
SDB_GET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER)
taosInitRWLatch(&pDb->lock);
@@ -254,6 +261,7 @@ static int32_t mndDbActionUpdate(SSdb *pSdb, SDbObj *pOld, SDbObj *pNew) {
pOld->cfg.strict = pNew->cfg.strict;
pOld->cfg.cacheLast = pNew->cfg.cacheLast;
pOld->cfg.replications = pNew->cfg.replications;
+ pOld->cfg.sstTrigger = pNew->cfg.sstTrigger;
taosWUnLockLatch(&pOld->lock);
return 0;
}
@@ -330,6 +338,9 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) {
if (pCfg->walRetentionSize < TSDB_DB_MIN_WAL_RETENTION_SIZE) return -1;
if (pCfg->walRollPeriod < TSDB_DB_MIN_WAL_ROLL_PERIOD) return -1;
if (pCfg->walSegmentSize < TSDB_DB_MIN_WAL_SEGMENT_SIZE) return -1;
+ if (pCfg->sstTrigger < TSDB_MIN_STT_TRIGGER || pCfg->sstTrigger > TSDB_MAX_STT_TRIGGER) return -1;
+ if (pCfg->hashPrefix < TSDB_MIN_HASH_PREFIX || pCfg->hashPrefix > TSDB_MAX_HASH_PREFIX) return -1;
+ if (pCfg->hashSuffix < TSDB_MIN_HASH_SUFFIX || pCfg->hashSuffix > TSDB_MAX_HASH_SUFFIX) return -1;
terrno = 0;
return terrno;
@@ -358,11 +369,14 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0;
if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF;
if (pCfg->walRetentionPeriod < 0 && pCfg->walRetentionPeriod != -1)
- pCfg->walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
+ pCfg->walRetentionPeriod = TSDB_REPS_DEF_DB_WAL_RET_PERIOD;
if (pCfg->walRetentionSize < 0 && pCfg->walRetentionSize != -1)
- pCfg->walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- if (pCfg->walRollPeriod < 0) pCfg->walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ pCfg->walRetentionSize = TSDB_REPS_DEF_DB_WAL_RET_SIZE;
+ if (pCfg->walRollPeriod < 0) pCfg->walRollPeriod = TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD;
if (pCfg->walSegmentSize < 0) pCfg->walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
+ if (pCfg->sstTrigger <= 0) pCfg->sstTrigger = TSDB_DEFAULT_SST_TRIGGER;
+ if (pCfg->hashPrefix < 0) pCfg->hashPrefix = TSDB_DEFAULT_HASH_PREFIX;
+ if (pCfg->hashSuffix < 0) pCfg->hashSuffix = TSDB_DEFAULT_HASH_SUFFIX;
}
static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
@@ -479,6 +493,9 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
.walRetentionSize = pCreate->walRetentionSize,
.walRollPeriod = pCreate->walRollPeriod,
.walSegmentSize = pCreate->walSegmentSize,
+ .sstTrigger = pCreate->sstTrigger,
+ .hashPrefix = pCreate->hashPrefix,
+ .hashSuffix = pCreate->hashSuffix,
};
dbObj.cfg.numOfRetensions = pCreate->numOfRetensions;
@@ -496,6 +513,12 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
return -1;
}
+ if (dbObj.cfg.hashPrefix > 0) {
+ int32_t dbLen = strlen(dbObj.name) + 1;
+ mInfo("db:%s, hashPrefix adjust from %d to %d", dbObj.name, dbObj.cfg.hashPrefix, dbObj.cfg.hashPrefix + dbLen);
+ dbObj.cfg.hashPrefix += dbLen;
+ }
+
SVgObj *pVgroups = NULL;
if (mndAllocVgroup(pMnode, &dbObj, &pVgroups) != 0) {
mError("db:%s, failed to create since %s", pCreate->db, terrstr());
@@ -1155,6 +1178,8 @@ int32_t mndExtractDbInfo(SMnode *pMnode, SDbObj *pDb, SUseDbRsp *pRsp, const SUs
pRsp->vgVersion = pDb->vgVersion;
pRsp->vgNum = taosArrayGetSize(pRsp->pVgroupInfos);
pRsp->hashMethod = pDb->cfg.hashMethod;
+ pRsp->hashPrefix = pDb->cfg.hashPrefix;
+ pRsp->hashSuffix = pDb->cfg.hashSuffix;
return 0;
}
@@ -1287,6 +1312,8 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs,
usedbRsp.vgVersion = pDb->vgVersion;
usedbRsp.vgNum = (int32_t)taosArrayGetSize(usedbRsp.pVgroupInfos);
usedbRsp.hashMethod = pDb->cfg.hashMethod;
+ usedbRsp.hashPrefix = pDb->cfg.hashPrefix;
+ usedbRsp.hashSuffix = pDb->cfg.hashSuffix;
taosArrayPush(batchUseRsp.pArray, &usedbRsp);
mndReleaseDb(pMnode, pDb);
@@ -1536,6 +1563,24 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
STR_WITH_MAXSIZE_TO_VARSTR(buf, "NULL", bytes);
}
+ const char *precStr = NULL;
+ switch (pDb->cfg.precision) {
+ case TSDB_TIME_PRECISION_MILLI:
+ precStr = TSDB_TIME_PRECISION_MILLI_STR;
+ break;
+ case TSDB_TIME_PRECISION_MICRO:
+ precStr = TSDB_TIME_PRECISION_MICRO_STR;
+ break;
+ case TSDB_TIME_PRECISION_NANO:
+ precStr = TSDB_TIME_PRECISION_NANO_STR;
+ break;
+ default:
+ precStr = "none";
+ break;
+ }
+ char precVstr[10] = {0};
+ STR_WITH_SIZE_TO_VARSTR(precVstr, precStr, 2);
+
char *statusStr = "ready";
if (objStatus == SDB_STATUS_CREATING) {
statusStr = "creating";
@@ -1546,7 +1591,6 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
statusStr = "unsynced";
}
}
-
char statusVstr[24] = {0};
STR_WITH_SIZE_TO_VARSTR(statusVstr, statusStr, strlen(statusStr));
@@ -1555,8 +1599,12 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, i);
if (i == 0) {
colDataAppend(pColInfo, rows, buf, false);
+ } else if (i == 1) {
+ colDataAppend(pColInfo, rows, (const char *)&pDb->createdTime, false);
} else if (i == 3) {
colDataAppend(pColInfo, rows, (const char *)&numOfTables, false);
+ } else if (i == 14) {
+ colDataAppend(pColInfo, rows, precVstr, false);
} else if (i == 15) {
colDataAppend(pColInfo, rows, statusVstr, false);
} else {
@@ -1621,23 +1669,6 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.compression, false);
- const char *precStr = NULL;
- switch (pDb->cfg.precision) {
- case TSDB_TIME_PRECISION_MILLI:
- precStr = TSDB_TIME_PRECISION_MILLI_STR;
- break;
- case TSDB_TIME_PRECISION_MICRO:
- precStr = TSDB_TIME_PRECISION_MICRO_STR;
- break;
- case TSDB_TIME_PRECISION_NANO:
- precStr = TSDB_TIME_PRECISION_NANO_STR;
- break;
- default:
- precStr = "none";
- break;
- }
- char precVstr[10] = {0};
- STR_WITH_SIZE_TO_VARSTR(precVstr, precStr, 2);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)precVstr, false);
@@ -1682,23 +1713,36 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.walSegmentSize, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.sstTrigger, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ int16_t hashPrefix = pDb->cfg.hashPrefix;
+ if (hashPrefix > 0) {
+ hashPrefix = pDb->cfg.hashPrefix - strlen(pDb->name) - 1;
+ }
+ colDataAppend(pColInfo, rows, (const char *)&hashPrefix, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.hashSuffix, false);
}
taosMemoryFree(buf);
}
-static void setInformationSchemaDbCfg(SDbObj *pDbObj) {
+static void setInformationSchemaDbCfg(SMnode *pMnode, SDbObj *pDbObj) {
tstrncpy(pDbObj->name, TSDB_INFORMATION_SCHEMA_DB, tListLen(pDbObj->name));
- pDbObj->createdTime = 0;
+ pDbObj->createdTime = mndGetClusterCreateTime(pMnode);
pDbObj->cfg.numOfVgroups = 0;
pDbObj->cfg.strict = 1;
pDbObj->cfg.replications = 1;
pDbObj->cfg.precision = TSDB_TIME_PRECISION_MILLI;
}
-static void setPerfSchemaDbCfg(SDbObj *pDbObj) {
+static void setPerfSchemaDbCfg(SMnode *pMnode, SDbObj *pDbObj) {
tstrncpy(pDbObj->name, TSDB_PERFORMANCE_SCHEMA_DB, tListLen(pDbObj->name));
- pDbObj->createdTime = 0;
+ pDbObj->createdTime = mndGetClusterCreateTime(pMnode);
pDbObj->cfg.numOfVgroups = 0;
pDbObj->cfg.strict = 1;
pDbObj->cfg.replications = 1;
@@ -1729,15 +1773,15 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
// Append the information_schema database into the result.
if (!pShow->sysDbRsp) {
SDbObj infoschemaDb = {0};
- setInformationSchemaDbCfg(&infoschemaDb);
+ setInformationSchemaDbCfg(pMnode, &infoschemaDb);
size_t numOfTables = 0;
- getInfosDbMeta(NULL, &numOfTables);
+ getVisibleInfosTablesNum(sysinfo, &numOfTables);
mndDumpDbInfoData(pMnode, pBlock, &infoschemaDb, pShow, numOfRows, numOfTables, true, 0, 1);
numOfRows += 1;
SDbObj perfschemaDb = {0};
- setPerfSchemaDbCfg(&perfschemaDb);
+ setPerfSchemaDbCfg(pMnode, &perfschemaDb);
numOfTables = 0;
getPerfDbMeta(NULL, &numOfTables);
mndDumpDbInfoData(pMnode, pBlock, &perfschemaDb, pShow, numOfRows, numOfTables, true, 0, 1);
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index fc5e20ef288d733927499484675acddd042fb3ca..26b4080d14288bee23740394fa4e96755b90feb9 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -347,6 +347,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
SVgObj *pVgroup = mndAcquireVgroup(pMnode, pVload->vgId);
if (pVgroup != NULL) {
if (pVload->syncState == TAOS_SYNC_STATE_LEADER) {
+ pVgroup->cacheUsage = pVload->cacheUsage;
pVgroup->numOfTables = pVload->numOfTables;
pVgroup->numOfTimeSeries = pVload->numOfTimeSeries;
pVgroup->totalStorage = pVload->totalStorage;
@@ -853,8 +854,8 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
}
int32_t code = -1;
- SSdb *pSdb = pMnode->pSdb;
- void *pIter = NULL;
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
while (1) {
SDnodeObj *pDnode = NULL;
pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode);
@@ -877,7 +878,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
sdbRelease(pSdb, pDnode);
}
-
+
if (code == -1) {
terrno = TSDB_CODE_MND_DNODE_NOT_EXIST;
}
diff --git a/source/dnode/mnode/impl/src/mndInfoSchema.c b/source/dnode/mnode/impl/src/mndInfoSchema.c
index bf33cf603f68ccedfdf69c972441021bdbcb0a53..09172115f8502e392c1d37ae1d256761afb02126 100644
--- a/source/dnode/mnode/impl/src/mndInfoSchema.c
+++ b/source/dnode/mnode/impl/src/mndInfoSchema.c
@@ -14,8 +14,8 @@
*/
#define _DEFAULT_SOURCE
-#include "systable.h"
#include "mndInt.h"
+#include "systable.h"
static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t colNum, SSchema **pDst) {
SSchema *schema = taosMemoryCalloc(colNum, sizeof(SSchema));
@@ -29,6 +29,9 @@ static int32_t mndInitInfosTableSchema(const SSysDbTableSchema *pSrc, int32_t co
schema[i].type = pSrc[i].type;
schema[i].colId = i + 1;
schema[i].bytes = pSrc[i].bytes;
+ if (pSrc[i].sysInfo) {
+ schema[i].flags |= COL_IS_SYSINFO;
+ }
}
*pDst = schema;
@@ -43,13 +46,14 @@ static int32_t mndInsInitMeta(SHashObj *hash) {
meta.sversion = 1;
meta.tversion = 1;
- size_t size = 0;
- const SSysTableMeta* pInfosTableMeta = NULL;
+ size_t size = 0;
+ const SSysTableMeta *pInfosTableMeta = NULL;
getInfosDbMeta(&pInfosTableMeta, &size);
for (int32_t i = 0; i < size; ++i) {
tstrncpy(meta.tbName, pInfosTableMeta[i].name, sizeof(meta.tbName));
meta.numOfColumns = pInfosTableMeta[i].colNum;
+ meta.sysInfo = pInfosTableMeta[i].sysInfo;
if (mndInitInfosTableSchema(pInfosTableMeta[i].schema, pInfosTableMeta[i].colNum, &meta.pSchemas)) {
return -1;
@@ -64,14 +68,15 @@ static int32_t mndInsInitMeta(SHashObj *hash) {
return 0;
}
-int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, STableMetaRsp *pRsp) {
+int32_t mndBuildInsTableSchema(SMnode *pMnode, const char *dbFName, const char *tbName, bool sysinfo,
+ STableMetaRsp *pRsp) {
if (NULL == pMnode->infosMeta) {
terrno = TSDB_CODE_APP_NOT_READY;
return -1;
}
STableMetaRsp *pMeta = taosHashGet(pMnode->infosMeta, tbName, strlen(tbName));
- if (NULL == pMeta) {
+ if (NULL == pMeta || (!sysinfo && pMeta->sysInfo)) {
mError("invalid information schema table name:%s", tbName);
terrno = TSDB_CODE_MND_INVALID_SYS_TABLENAME;
return -1;
@@ -121,7 +126,6 @@ int32_t mndBuildInsTableCfg(SMnode *pMnode, const char *dbFName, const char *tbN
return 0;
}
-
int32_t mndInitInfos(SMnode *pMnode) {
pMnode->infosMeta = taosHashInit(20, taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR), false, HASH_NO_LOCK);
if (pMnode->infosMeta == NULL) {
diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c
index 65a539bc9071e2d634ffa29683aac33e9f0f5447..2221718023c8d080059736fd811c946618fd948d 100644
--- a/source/dnode/mnode/impl/src/mndMain.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -132,7 +132,7 @@ static void *mndThreadFp(void *param) {
mndCalMqRebalance(pMnode);
}
- if (lastTime % (tsTelemInterval * 10) == 1) {
+ if (lastTime % (tsTelemInterval * 10) == ((tsTelemInterval - 1) * 10)) {
mndPullupTelem(pMnode);
}
diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c
index 4f07d9e0143f52da057c40d2e655044da01a6b72..71bda4d4f34213a7b3240f6634b26579fb66c1ee 100644
--- a/source/dnode/mnode/impl/src/mndMnode.c
+++ b/source/dnode/mnode/impl/src/mndMnode.c
@@ -89,14 +89,14 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
+ mInfo("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL);
if (pTrans == NULL) {
mError("mnode:%d, failed to create since %s", mnodeObj.id, terrstr());
return -1;
}
- mDebug("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id);
+ mInfo("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id);
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
@@ -365,7 +365,7 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
+ mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
@@ -392,7 +392,7 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("mnode:%d, start to create", createReq.dnodeId);
+ mInfo("mnode:%d, start to create", createReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_MNODE) != 0) {
goto _OVER;
}
@@ -574,7 +574,7 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
+ mInfo("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
if (mndSetDropMnodeInfoToTrans(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@@ -597,7 +597,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("mnode:%d, start to drop", dropReq.dnodeId);
+ mInfo("mnode:%d, start to drop", dropReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) {
goto _OVER;
}
@@ -732,7 +732,7 @@ static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) {
}
}
- mTrace("trans:-1, sync reconfig will be proposed");
+ mInfo("trans:-1, sync reconfig will be proposed");
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
pMgmt->standby = 0;
diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c
index e55c562e38c207600956cd1eafbb88d744750f7d..e8737e30c9817bd71d1b3a47f245ef0004603dc3 100644
--- a/source/dnode/mnode/impl/src/mndProfile.c
+++ b/source/dnode/mnode/impl/src/mndProfile.c
@@ -270,6 +270,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
SConnectRsp connectRsp = {0};
connectRsp.acctId = pUser->acctId;
connectRsp.superUser = pUser->superUser;
+ connectRsp.sysInfo = pUser->sysInfo;
connectRsp.clusterId = pMnode->clusterId;
connectRsp.connId = pConn->id;
connectRsp.connType = connReq.connType;
diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c
index 9499c90c57c59e3600c701668dd17671f641d919..5a998dfe986d9f012e066f45810604b7ca9d728f 100644
--- a/source/dnode/mnode/impl/src/mndShow.c
+++ b/source/dnode/mnode/impl/src/mndShow.c
@@ -88,7 +88,7 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_VGROUP;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_CONSUMERS, len) == 0) {
type = TSDB_MGMT_TABLE_CONSUMERS;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_SUBSCRIPTIONS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_SUBSCRIPTIONS, len) == 0) {
type = TSDB_MGMT_TABLE_SUBSCRIPTIONS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_TRANS, len) == 0) {
type = TSDB_MGMT_TABLE_TRANS;
@@ -102,9 +102,9 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_QUERIES;
} else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) {
type = TSDB_MGMT_TABLE_VNODES;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_TOPICS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_TOPICS, len) == 0) {
type = TSDB_MGMT_TABLE_TOPICS;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_STREAMS, len) == 0) {
type = TSDB_MGMT_TABLE_STREAMS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) {
type = TSDB_MGMT_TABLE_APPS;
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index 2fb934aaad735240e1a249447b5d041853819d82..8638cc511890066f45367253313aec8f626ceb8e 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -38,7 +38,6 @@ static SSdbRow *mndSmaActionDecode(SSdbRaw *pRaw);
static int32_t mndSmaActionInsert(SSdb *pSdb, SSmaObj *pSma);
static int32_t mndSmaActionDelete(SSdb *pSdb, SSmaObj *pSpSmatb);
static int32_t mndSmaActionUpdate(SSdb *pSdb, SSmaObj *pOld, SSmaObj *pNew);
-static int32_t mndSmaGetVgEpSet(SMnode *pMnode, SDbObj *pDb, SVgEpSet **ppVgEpSet, int32_t *numOfVgroups);
static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq);
static int32_t mndProcessDropSmaReq(SRpcMsg *pReq);
static int32_t mndProcessGetSmaReq(SRpcMsg *pReq);
@@ -841,6 +840,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
_OVER:
mndTransDrop(pTrans);
+ mndReleaseStream(pMnode, pStream);
mndReleaseVgroup(pMnode, pVgroup);
mndReleaseStb(pMnode, pStb);
return code;
@@ -961,6 +961,7 @@ _OVER:
mError("sma:%s, failed to drop since %s", dropReq.name, terrstr());
}
+ mndReleaseSma(pMnode, pSma);
mndReleaseDb(pMnode, pDb);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index ebec3d5ea686c3a976adf5d4890f2a7eb7d8be82..dc8285740a4bdf9e0bfb04c36e780aca0f32f758 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -536,7 +536,7 @@ int32_t mndCheckCreateStbReq(SMCreateStbReq *pCreate) {
return -1;
}
- if (pCreate->numOfColumns < TSDB_MIN_COLUMNS || pCreate->numOfColumns > TSDB_MAX_COLUMNS) {
+ if (pCreate->numOfColumns < TSDB_MIN_COLUMNS || pCreate->numOfTags + pCreate->numOfColumns > TSDB_MAX_COLUMNS) {
terrno = TSDB_CODE_PAR_INVALID_COLUMNS_NUM;
return -1;
}
@@ -1774,6 +1774,67 @@ static int32_t mndBuildSMAlterStbRsp(SDbObj *pDb, SStbObj *pObj, void **pCont, i
return 0;
}
+int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen) {
+ int32_t ret = -1;
+ SDbObj *pDb = mndAcquireDb(pMnode, dbFName);
+ if (NULL == pDb) {
+ return -1;
+ }
+
+ SStbObj *pObj = mndAcquireStb(pMnode, stbFName);
+ if (NULL == pObj) {
+ goto _OVER;
+ }
+
+ SEncoder ec = {0};
+ uint32_t contLen = 0;
+ SMCreateStbRsp stbRsp = {0};
+ SName name = {0};
+ tNameFromString(&name, pObj->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
+
+ stbRsp.pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+ if (NULL == stbRsp.pMeta) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _OVER;
+ }
+
+ ret = mndBuildStbSchemaImp(pDb, pObj, name.tname, stbRsp.pMeta);
+ if (ret) {
+ tFreeSMCreateStbRsp(&stbRsp);
+ goto _OVER;
+ }
+
+ tEncodeSize(tEncodeSMCreateStbRsp, &stbRsp, contLen, ret);
+ if (ret) {
+ tFreeSMCreateStbRsp(&stbRsp);
+ goto _OVER;
+ }
+
+ void *cont = taosMemoryMalloc(contLen);
+ tEncoderInit(&ec, cont, contLen);
+ tEncodeSMCreateStbRsp(&ec, &stbRsp);
+ tEncoderClear(&ec);
+
+ tFreeSMCreateStbRsp(&stbRsp);
+
+ *pCont = cont;
+ *pLen = contLen;
+
+ ret = 0;
+
+_OVER:
+ if (pObj) {
+ mndReleaseStb(pMnode, pObj);
+ }
+
+ if (pDb) {
+ mndReleaseDb(pMnode, pDb);
+ }
+
+ return ret;
+}
+
+
static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp,
void *alterOriData, int32_t alterOriDataLen) {
int32_t code = -1;
@@ -2157,6 +2218,10 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) {
STableInfoReq infoReq = {0};
STableMetaRsp metaRsp = {0};
+ SUserObj *pUser = mndAcquireUser(pMnode, pReq->info.conn.user);
+ if (pUser == NULL) return 0;
+ bool sysinfo = pUser->sysInfo;
+
if (tDeserializeSTableInfoReq(pReq->pCont, pReq->contLen, &infoReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
@@ -2164,7 +2229,7 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) {
if (0 == strcmp(infoReq.dbFName, TSDB_INFORMATION_SCHEMA_DB)) {
mDebug("information_schema table:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName);
- if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, &metaRsp) != 0) {
+ if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, sysinfo, &metaRsp) != 0) {
goto _OVER;
}
} else if (0 == strcmp(infoReq.dbFName, TSDB_PERFORMANCE_SCHEMA_DB)) {
@@ -2203,6 +2268,7 @@ _OVER:
mError("stb:%s.%s, failed to retrieve meta since %s", infoReq.dbFName, infoReq.tbName, terrstr());
}
+ mndReleaseUser(pMnode, pUser);
tFreeSTableMetaRsp(&metaRsp);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 6dc8e2072b71df78aa88aecdd924a98db658ab05..dd7a9e71eaa634a5bda506b318c6c4472a48726b 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -631,6 +631,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
SStreamObj *pStream = NULL;
SDbObj *pDb = NULL;
SCMCreateStreamReq createStreamReq = {0};
+ SStreamObj streamObj = {0};
if (tDeserializeSCMCreateStreamReq(pReq->pCont, pReq->contLen, &createStreamReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
@@ -659,7 +660,6 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
}
// build stream obj from request
- SStreamObj streamObj = {0};
if (mndBuildStreamObjFromCreateReq(pMnode, &streamObj, &createStreamReq) < 0) {
/*ASSERT(0);*/
mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr());
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 10e520d9ec49a53e5fcedcf668a40732480aa75b..1452c5ae2fd3e9cde7cb9052d22e10bfd31afb0f 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -287,6 +287,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
if (consumerVgNum > minVgCnt) {
if (imbCnt < imbConsumerNum) {
if (consumerVgNum == minVgCnt + 1) {
+ imbCnt++;
continue;
} else {
// pop until equal minVg + 1
diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c
index b7129cf56ebd7ece43094d4281a1c5b8f4464969..e8b75e6a94e1089b037be9ec42a4fdc9deef3b3c 100644
--- a/source/dnode/mnode/impl/src/mndSync.c
+++ b/source/dnode/mnode/impl/src/mndSync.c
@@ -50,7 +50,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
pMgmt->errCode = cbMeta.code;
- mDebug("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
+ mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
" role:%s raw:%p",
transId, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex, syncStr(cbMeta.state),
pRaw);
@@ -68,7 +68,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
if (pMgmt->errCode != 0) {
mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode));
} else {
- mDebug("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode));
+ mInfo("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode));
}
pMgmt->transId = 0;
taosWUnLockLatch(&pMgmt->lock);
@@ -88,7 +88,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
}
int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot, void *pReaderParam, void **ppReader) {
- mDebug("start to read snapshot from sdb in atomic way");
+ mInfo("start to read snapshot from sdb in atomic way");
SMnode *pMnode = pFsm->data;
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, &pSnapshot->lastApplyIndex, &pSnapshot->lastApplyTerm,
&pSnapshot->lastConfigIndex);
@@ -118,7 +118,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
pMgmt->errCode = cbMeta.code;
- mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId,
+ mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId,
cbMeta.code, cbMeta.index, cbMeta.term);
taosWLockLatch(&pMgmt->lock);
@@ -126,7 +126,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
if (pMgmt->errCode != 0) {
mError("trans:-1, failed to propose sync reconfig since %s, post sem", tstrerror(pMgmt->errCode));
} else {
- mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem",
+ mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem",
pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term);
}
pMgmt->transId = 0;
@@ -136,13 +136,13 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
}
int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void *pParam, void **ppReader) {
- mDebug("start to read snapshot from sdb");
+ mInfo("start to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, NULL, NULL, NULL);
}
int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) {
- mDebug("stop to read snapshot from sdb");
+ mInfo("stop to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStopRead(pMnode->pSdb, pReader);
}
@@ -174,12 +174,12 @@ int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int
void mndLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
SMnode *pMnode = pFsm->data;
atomic_store_8(&(pMnode->syncMgmt.leaderTransferFinish), 1);
- mDebug("vgId:1, mnode leader transfer finish");
+ mInfo("vgId:1, mnode leader transfer finish");
}
static void mndBecomeFollower(struct SSyncFSM *pFsm) {
SMnode *pMnode = pFsm->data;
- mDebug("vgId:1, become follower and post sem");
+ mInfo("vgId:1, become follower and post sem");
taosWLockLatch(&pMnode->syncMgmt.lock);
if (pMnode->syncMgmt.transId != 0) {
@@ -190,7 +190,7 @@ static void mndBecomeFollower(struct SSyncFSM *pFsm) {
}
static void mndBecomeLeader(struct SSyncFSM *pFsm) {
- mDebug("vgId:1, become leader");
+ mInfo("vgId:1, become leader");
SMnode *pMnode = pFsm->data;
}
@@ -228,7 +228,7 @@ int32_t mndInitSync(SMnode *pMnode) {
syncInfo.isStandBy = pMgmt->standby;
syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT;
- mDebug("start to open mnode sync, standby:%d", pMgmt->standby);
+ mInfo("start to open mnode sync, standby:%d", pMgmt->standby);
if (pMgmt->standby || pMgmt->replica.id > 0) {
SSyncCfg *pCfg = &syncInfo.syncCfg;
pCfg->replicaNum = 1;
@@ -236,7 +236,7 @@ int32_t mndInitSync(SMnode *pMnode) {
SNodeInfo *pNode = &pCfg->nodeInfo[0];
tstrncpy(pNode->nodeFqdn, pMgmt->replica.fqdn, sizeof(pNode->nodeFqdn));
pNode->nodePort = pMgmt->replica.port;
- mDebug("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort);
+ mInfo("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort);
}
tsem_init(&pMgmt->syncSem, 0, 0);
@@ -284,7 +284,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
return -1;
} else {
pMgmt->transId = transId;
- mDebug("trans:%d, will be proposed", pMgmt->transId);
+ mInfo("trans:%d, will be proposed", pMgmt->transId);
taosWUnLockLatch(&pMgmt->lock);
}
@@ -314,7 +314,7 @@ void mndSyncStart(SMnode *pMnode) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
syncSetMsgCb(pMgmt->sync, &pMnode->msgCb);
syncStart(pMgmt->sync);
- mDebug("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby);
+ mInfo("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby);
}
void mndSyncStop(SMnode *pMnode) {
diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c
index 27814fe5bea155c54fa32789efbaf2ae30cdb29b..93f7531a272860d63351ff1a008fa11f48b5a17c 100644
--- a/source/dnode/mnode/impl/src/mndTelem.c
+++ b/source/dnode/mnode/impl/src/mndTelem.c
@@ -131,7 +131,9 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) {
char* pCont = mndBuildTelemetryReport(pMnode);
if (pCont != NULL) {
if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) {
- mError("failed to send telemetry msg");
+ mError("failed to send telemetry report");
+ } else {
+ mTrace("succeed to send telemetry report");
}
taosMemoryFree(pCont);
}
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index ff208eae607ab0fa57be7431771f209e18e02ce5..eb072d013d0024e5b05a172c3c3d5d55ce41cd40 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -763,8 +763,9 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
int32_t cols = 0;
char topicName[TSDB_TOPIC_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);
- tNameGetDbName(&n, varDataVal(topicName));
+ strcpy(varDataVal(topicName), mndGetDbStr(pTopic->name));
+ /*tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);*/
+ /*tNameGetDbName(&n, varDataVal(topicName));*/
varDataSetLen(topicName, strlen(varDataVal(topicName)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)topicName, false);
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index 17b4336465c23c49e71adde85a8b5291124c4f43..9c4a5afb032e6677997b7a84e919451b238b2068 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -17,6 +17,7 @@
#include "mndTrans.h"
#include "mndConsumer.h"
#include "mndDb.h"
+#include "mndStb.h"
#include "mndPrivilege.h"
#include "mndShow.h"
#include "mndSync.h"
@@ -455,11 +456,11 @@ static const char *mndTransStr(ETrnStage stage) {
}
static void mndTransTestStartFunc(SMnode *pMnode, void *param, int32_t paramLen) {
- mDebug("test trans start, param:%s, len:%d", (char *)param, paramLen);
+ mInfo("test trans start, param:%s, len:%d", (char *)param, paramLen);
}
static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen) {
- mDebug("test trans stop, param:%s, len:%d", (char *)param, paramLen);
+ mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen);
}
static TransCbFp mndTransGetCbFp(ETrnFunc ftype) {
@@ -706,7 +707,7 @@ int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, c
if (pTrans->oper == oper) {
if (strcasecmp(dbname, pTrans->dbname1) == 0) {
- mDebug("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper);
+ mInfo("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper);
if (pTrans->pRpcArray == NULL) {
pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo));
}
@@ -745,7 +746,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
}
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
+ mInfo("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id);
if (code != 0) {
mError("trans:%d, failed to sync since %s", pTrans->id, terrstr());
@@ -754,7 +755,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
}
sdbFreeRaw(pRaw);
- mDebug("trans:%d, sync finished", pTrans->id);
+ mInfo("trans:%d, sync finished", pTrans->id);
return 0;
}
@@ -820,12 +821,12 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
return -1;
}
- mDebug("trans:%d, prepare transaction", pTrans->id);
+ mInfo("trans:%d, prepare transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, prepare finished", pTrans->id);
+ mInfo("trans:%d, prepare finished", pTrans->id);
STrans *pNew = mndAcquireTrans(pMnode, pTrans->id);
if (pNew == NULL) {
@@ -846,22 +847,22 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
}
static int32_t mndTransCommit(SMnode *pMnode, STrans *pTrans) {
- mDebug("trans:%d, commit transaction", pTrans->id);
+ mInfo("trans:%d, commit transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to commit since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, commit finished", pTrans->id);
+ mInfo("trans:%d, commit finished", pTrans->id);
return 0;
}
static int32_t mndTransRollback(SMnode *pMnode, STrans *pTrans) {
- mDebug("trans:%d, rollback transaction", pTrans->id);
+ mInfo("trans:%d, rollback transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to rollback since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, rollback finished", pTrans->id);
+ mInfo("trans:%d, rollback finished", pTrans->id);
return 0;
}
@@ -893,30 +894,21 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
for (int32_t i = 0; i < size; ++i) {
SRpcHandleInfo *pInfo = taosArrayGet(pTrans->pRpcArray, i);
if (pInfo->handle != NULL) {
- mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage),
+ mInfo("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage),
pInfo->ahandle);
if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL;
}
SRpcMsg rspMsg = {.code = code, .info = *pInfo};
- if (pTrans->rpcRspLen != 0) {
- void *rpcCont = rpcMallocCont(pTrans->rpcRspLen);
- if (rpcCont != NULL) {
- memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen);
- rspMsg.pCont = rpcCont;
- rspMsg.contLen = pTrans->rpcRspLen;
- }
- }
-
if (pTrans->originRpcType == TDMT_MND_CREATE_DB) {
- mDebug("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType));
+ mInfo("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType));
SDbObj *pDb = mndAcquireDb(pMnode, pTrans->dbname1);
if (pDb != NULL) {
for (int32_t j = 0; j < 12; j++) {
bool ready = mndIsDbReady(pMnode, pDb);
if (!ready) {
- mDebug("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname1, j);
+ mInfo("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname1, j);
taosMsleep(1000);
} else {
break;
@@ -924,6 +916,21 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
}
}
mndReleaseDb(pMnode, pDb);
+ } else if (pTrans->originRpcType == TDMT_MND_CREATE_STB) {
+ void *pCont = NULL;
+ int32_t contLen = 0;
+ if (0 == mndBuildSMCreateStbRsp(pMnode, pTrans->dbname1, pTrans->dbname2, &pCont, &contLen) != 0) {
+ mndTransSetRpcRsp(pTrans, pCont, contLen);
+ }
+ }
+
+ if (pTrans->rpcRspLen != 0) {
+ void *rpcCont = rpcMallocCont(pTrans->rpcRspLen);
+ if (rpcCont != NULL) {
+ memcpy(rpcCont, pTrans->rpcRsp, pTrans->rpcRspLen);
+ rspMsg.pCont = rpcCont;
+ rspMsg.contLen = pTrans->rpcRspLen;
+ }
}
tmsgSendRsp(&rspMsg);
@@ -971,7 +978,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
pAction->errCode = pRsp->code;
}
- mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId,
+ mInfo("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId,
mndTransStr(pAction->stage), action, pRsp->code, pAction->acceptableCode, pAction->retryCode);
mndTransExecute(pMnode, pTrans);
@@ -987,10 +994,10 @@ static void mndTransResetAction(SMnode *pMnode, STrans *pTrans, STransAction *pA
if (pAction->errCode == TSDB_CODE_RPC_REDIRECT || pAction->errCode == TSDB_CODE_SYN_NEW_CONFIG_ERROR ||
pAction->errCode == TSDB_CODE_SYN_INTERNAL_ERROR || pAction->errCode == TSDB_CODE_SYN_NOT_LEADER) {
pAction->epSet.inUse = (pAction->epSet.inUse + 1) % pAction->epSet.numOfEps;
- mDebug("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage),
+ mInfo("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage),
pAction->id, pAction->epSet.inUse);
} else {
- mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id);
}
pAction->errCode = 0;
}
@@ -1017,7 +1024,7 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi
pAction->rawWritten = true;
pAction->errCode = 0;
code = 0;
- mDebug("trans:%d, %s:%d write to sdb, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
+ mInfo("trans:%d, %s:%d write to sdb, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
sdbTableName(pAction->pRaw->type), sdbStatusName(pAction->pRaw->status));
pTrans->lastAction = pAction->id;
@@ -1066,7 +1073,7 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio
pAction->msgSent = 1;
pAction->msgReceived = 0;
pAction->errCode = 0;
- mDebug("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail);
+ mInfo("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail);
pTrans->lastAction = pAction->id;
pTrans->lastMsgType = pAction->msgType;
@@ -1093,7 +1100,7 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio
static int32_t mndTransExecNullMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
pAction->rawWritten = 0;
pAction->errCode = 0;
- mDebug("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id);
pTrans->lastAction = pAction->id;
pTrans->lastMsgType = pAction->msgType;
@@ -1153,7 +1160,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA
pTrans->lastMsgType = 0;
memset(&pTrans->lastEpset, 0, sizeof(pTrans->lastEpset));
pTrans->lastErrorNo = 0;
- mDebug("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions);
+ mInfo("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions);
return 0;
} else {
mError("trans:%d, all %d actions executed, code:0x%x", pTrans->id, numOfActions, errCode & 0XFFFF);
@@ -1168,7 +1175,7 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA
return errCode;
}
} else {
- mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions);
+ mInfo("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions);
return TSDB_CODE_ACTION_IN_PROGRESS;
}
}
@@ -1214,7 +1221,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
code = pAction->errCode;
mndTransResetAction(pMnode, pTrans, pAction);
} else {
- mDebug("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action);
+ mInfo("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action);
}
} else {
code = TSDB_CODE_ACTION_IN_PROGRESS;
@@ -1223,7 +1230,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
code = pAction->errCode;
} else {
- mDebug("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action);
+ mInfo("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action);
}
} else {
}
@@ -1247,7 +1254,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
if (code == 0) {
pTrans->code = 0;
pTrans->redoActionPos++;
- mDebug("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage),
+ mInfo("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage),
pAction->id);
code = mndTransSync(pMnode, pTrans);
if (code != 0) {
@@ -1256,17 +1263,17 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
mndTransStr(pAction->stage), pAction->id, terrstr());
}
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
break;
} else if (code == pAction->retryCode) {
- mDebug("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code);
+ mInfo("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code);
taosMsleep(300);
action--;
continue;
} else {
terrno = code;
pTrans->code = code;
- mDebug("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id,
+ mInfo("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id,
mndTransStr(pAction->stage), pAction->id, code, pTrans->failedTimes);
break;
}
@@ -1278,7 +1285,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
pTrans->stage = TRN_STAGE_REDO_ACTION;
- mDebug("trans:%d, stage from prepare to redoAction", pTrans->id);
+ mInfo("trans:%d, stage from prepare to redoAction", pTrans->id);
return continueExec;
}
@@ -1297,10 +1304,10 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_COMMIT;
- mDebug("trans:%d, stage from redoAction to commit", pTrans->id);
+ mInfo("trans:%d, stage from redoAction to commit", pTrans->id);
continueExec = true;
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code));
+ mInfo("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code));
continueExec = false;
} else {
pTrans->failedTimes++;
@@ -1308,7 +1315,7 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
if (pTrans->policy == TRN_POLICY_ROLLBACK) {
if (pTrans->lastAction != 0) {
STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->lastAction);
- if (pAction->retryCode != 0 && pAction->retryCode != pAction->errCode) {
+ if (pAction->retryCode != 0 && pAction->retryCode == pAction->errCode) {
if (pTrans->failedTimes < 6) {
mError("trans:%d, stage keep on redoAction since action:%d code:0x%x not 0x%x, failedTimes:%d", pTrans->id,
pTrans->lastAction, pTrans->code, pAction->retryCode, pTrans->failedTimes);
@@ -1340,7 +1347,7 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_COMMIT_ACTION;
- mDebug("trans:%d, stage from commit to commitAction", pTrans->id);
+ mInfo("trans:%d, stage from commit to commitAction", pTrans->id);
continueExec = true;
} else {
pTrans->code = terrno;
@@ -1359,7 +1366,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_FINISHED;
- mDebug("trans:%d, stage from commitAction to finished", pTrans->id);
+ mInfo("trans:%d, stage from commitAction to finished", pTrans->id);
continueExec = true;
} else {
pTrans->code = terrno;
@@ -1377,10 +1384,10 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->stage = TRN_STAGE_FINISHED;
- mDebug("trans:%d, stage from undoAction to finished", pTrans->id);
+ mInfo("trans:%d, stage from undoAction to finished", pTrans->id);
continueExec = true;
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code));
+ mInfo("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code));
continueExec = false;
} else {
pTrans->failedTimes++;
@@ -1399,7 +1406,7 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->stage = TRN_STAGE_UNDO_ACTION;
- mDebug("trans:%d, stage from rollback to undoAction", pTrans->id);
+ mInfo("trans:%d, stage from rollback to undoAction", pTrans->id);
continueExec = true;
} else {
pTrans->failedTimes++;
@@ -1424,7 +1431,7 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr());
}
- mDebug("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
+ mInfo("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
return continueExec;
}
@@ -1432,7 +1439,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
while (continueExec) {
- mDebug("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
+ mInfo("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
pTrans->lastExecTime = taosGetTimestampMs();
switch (pTrans->stage) {
case TRN_STAGE_PREPARE:
diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c
index 09eed7fb32e8831e6b6c863b44edd3e9e28110a3..de29dea511c169f5b584dded1c07da48b82f4552 100644
--- a/source/dnode/mnode/impl/src/mndVgroup.c
+++ b/source/dnode/mnode/impl/src/mndVgroup.c
@@ -234,6 +234,9 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
createReq.walRetentionSize = pDb->cfg.walRetentionSize;
createReq.walRollPeriod = pDb->cfg.walRollPeriod;
createReq.walSegmentSize = pDb->cfg.walSegmentSize;
+ createReq.sstTrigger = pDb->cfg.sstTrigger;
+ createReq.hashPrefix = pDb->cfg.hashPrefix;
+ createReq.hashSuffix = pDb->cfg.hashSuffix;
for (int32_t v = 0; v < pVgroup->replica; ++v) {
SReplica *pReplica = &createReq.replicas[v];
@@ -693,6 +696,9 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppendNULL(pColInfo, numOfRows);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->cacheUsage, false);
+
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppendNULL(pColInfo, numOfRows);
@@ -791,32 +797,43 @@ static int32_t mndRetrieveVnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
if (pShow->pIter == NULL) break;
for (int32_t i = 0; i < pVgroup->replica && numOfRows < rows; ++i) {
- SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
+ SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
+ SColumnInfoData *pColInfo = NULL;
cols = 0;
- SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->vgId, false);
-
- SName name = {0};
- char db[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- tNameFromString(&name, pVgroup->dbName, T_NAME_ACCT | T_NAME_DB);
- tNameGetDbName(&name, varDataVal(db));
- varDataSetLen(db, strlen(varDataVal(db)));
-
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)db, false);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->vgId, false);
- uint32_t val = 0;
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)&val, false);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->replica, false);
char buf[20] = {0};
STR_TO_VARSTR(buf, syncStr(pVgid->role));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)buf, false);
+ const char *dbname = mndGetDbStr(pVgroup->dbName);
+ char b1[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
+ if (dbname != NULL) {
+ STR_WITH_MAXSIZE_TO_VARSTR(b1, dbname, TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE);
+ } else {
+ STR_WITH_MAXSIZE_TO_VARSTR(b1, "NULL", TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE);
+ }
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)b1, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgid->dnodeId, false);
+
+ SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
+ char b2[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0};
+ if (pDnode != NULL) {
+ STR_WITH_MAXSIZE_TO_VARSTR(b2, pDnode->ep, TSDB_EP_LEN + VARSTR_HEADER_SIZE);
+ } else {
+ STR_WITH_MAXSIZE_TO_VARSTR(b2, "NULL", TSDB_EP_LEN + VARSTR_HEADER_SIZE);
+ }
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->replica, false); // onlines
+ colDataAppend(pColInfo, numOfRows, (const char *)b2, false);
numOfRows++;
}
diff --git a/source/dnode/mnode/impl/test/sma/CMakeLists.txt b/source/dnode/mnode/impl/test/sma/CMakeLists.txt
index 3f9ec123a80e88371a98fa54c99342726831372d..a55b45ca11d32f4aa0baa2462007f06e970ae3d6 100644
--- a/source/dnode/mnode/impl/test/sma/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/sma/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME smaTest
- COMMAND smaTest
-)
+if(NOT ${TD_WINDOWS})
+ add_test(
+ NAME smaTest
+ COMMAND smaTest
+ )
+endif(NOT ${TD_WINDOWS})
diff --git a/source/dnode/mnode/impl/test/stb/CMakeLists.txt b/source/dnode/mnode/impl/test/stb/CMakeLists.txt
index dcfbe658fcca82f928400b1e9eed2efcfb09a052..e3a3fc2e793fa84a5da05519ae727bb572edaa27 100644
--- a/source/dnode/mnode/impl/test/stb/CMakeLists.txt
+++ b/source/dnode/mnode/impl/test/stb/CMakeLists.txt
@@ -5,7 +5,9 @@ target_link_libraries(
PUBLIC sut
)
-add_test(
- NAME stbTest
- COMMAND stbTest
-)
\ No newline at end of file
+if(NOT ${TD_WINDOWS})
+ add_test(
+ NAME stbTest
+ COMMAND stbTest
+ )
+endif(NOT ${TD_WINDOWS})
\ No newline at end of file
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index a3e17f53774c82ea9fca1ff0a88943c8e7971725..7a99d26683bf08b2595c51ff9c34b4aea588f800 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -29,6 +29,7 @@ target_sources(
# sma
"src/sma/smaEnv.c"
"src/sma/smaUtil.c"
+ "src/sma/smaFS.c"
"src/sma/smaOpen.c"
"src/sma/smaCommit.c"
"src/sma/smaRollup.c"
@@ -49,6 +50,10 @@ target_sources(
"src/tsdb/tsdbSnapshot.c"
"src/tsdb/tsdbCacheRead.c"
"src/tsdb/tsdbRetention.c"
+ "src/tsdb/tsdbDiskData.c"
+ "src/tsdb/tsdbCompress.c"
+ "src/tsdb/tsdbCompact.c"
+ "src/tsdb/tsdbMergeTree.c"
# tq
"src/tq/tq.c"
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index ec27ba8ce659e454b768945aca50fb071d4e7b4f..6ba10641f5e7054034f2008f1b317ec8121286c6 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -63,7 +63,7 @@ void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen);
int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list);
int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list);
-int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray* list);
+int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list);
void *vnodeGetIdx(SVnode *pVnode);
void *vnodeGetIvtIdx(SVnode *pVnode);
@@ -96,7 +96,7 @@ int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList, SHash
int32_t metaReadNext(SMetaReader *pReader);
const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
-bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
+bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
typedef struct SMetaFltParam {
tb_uid_t suid;
@@ -125,11 +125,16 @@ int32_t metaTbCursorNext(SMTbCursor *pTbCur);
// typedef struct STsdb STsdb;
typedef struct STsdbReader STsdbReader;
+#define TSDB_DEFAULT_STT_FILE 8
+#define TSDB_DEFAULT_PAGE_SIZE 4096
+
#define TIMEWINDOW_RANGE_CONTAINED 1
#define TIMEWINDOW_RANGE_EXTERNAL 2
-#define LASTROW_RETRIEVE_TYPE_ALL 0x1
-#define LASTROW_RETRIEVE_TYPE_SINGLE 0x2
+#define CACHESCAN_RETRIEVE_TYPE_ALL 0x1
+#define CACHESCAN_RETRIEVE_TYPE_SINGLE 0x2
+#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
+#define CACHESCAN_RETRIEVE_LAST 0x8
int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid);
int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader,
@@ -146,15 +151,41 @@ void *tsdbGetIdx(SMeta *pMeta);
void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
-int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
-int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
-int32_t tsdbLastrowReaderClose(void *pReader);
+int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
+int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
+int32_t tsdbCacherowsReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
+size_t tsdbCacheGetUsage(SVnode *pVnode);
// tq
+typedef struct SMetaTableInfo {
+ int64_t suid;
+ int64_t uid;
+ SSchemaWrapper *schema;
+ char tbName[TSDB_TABLE_NAME_LEN];
+} SMetaTableInfo;
+
+typedef struct SIdInfo {
+ int64_t version;
+ int32_t index;
+} SIdInfo;
+
+typedef struct SSnapContext {
+ SMeta *pMeta;
+ int64_t snapVersion;
+ TBC *pCur;
+ int64_t suid;
+ int8_t subType;
+ SHashObj *idVersion;
+ SHashObj *suidInfo;
+ SArray *idList;
+ int32_t index;
+ bool withMeta;
+ bool queryMetaOrData; // true-get meta, false-get data
+} SSnapContext;
typedef struct STqReader {
int64_t ver;
@@ -205,6 +236,13 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot);
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData);
+int32_t buildSnapContext(SMeta *pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
+ SSnapContext **ctxRet);
+int32_t getMetafromSnapShot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
+SMetaTableInfo getUidfromSnapShot(SSnapContext *ctx);
+int32_t setForSnapShot(SSnapContext *ctx, int64_t uid);
+int32_t destroySnapContext(SSnapContext *ctx);
+
// structs
struct STsdbCfg {
int8_t precision;
@@ -224,7 +262,9 @@ typedef struct {
int64_t numOfSTables;
int64_t numOfCTables;
int64_t numOfNTables;
+ int64_t numOfNTimeSeries;
int64_t numOfTimeSeries;
+ int64_t itvTimeSeries;
int64_t pointsWritten;
int64_t totalStorage;
int64_t compStorage;
@@ -251,6 +291,10 @@ struct SVnodeCfg {
SVnodeStats vndStats;
uint32_t hashBegin;
uint32_t hashEnd;
+ int16_t sttTrigger;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
+ int32_t tsdbPageSize;
};
typedef struct {
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
index ca77042bb26d72f87471e4ac80329efc92449427..9931462e5fc94f0e2fde5f10f5e8a48a81899e48 100644
--- a/source/dnode/vnode/src/inc/sma.h
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -33,15 +33,15 @@ extern "C" {
// clang-format on
#define RSMA_TASK_INFO_HASH_SLOT (8)
-#define RSMA_EXECUTOR_MAX (1)
typedef struct SSmaEnv SSmaEnv;
typedef struct SSmaStat SSmaStat;
typedef struct STSmaStat STSmaStat;
typedef struct SRSmaStat SRSmaStat;
-typedef struct SSmaKey SSmaKey;
+typedef struct SRSmaRef SRSmaRef;
typedef struct SRSmaInfo SRSmaInfo;
typedef struct SRSmaInfoItem SRSmaInfoItem;
+typedef struct SRSmaFS SRSmaFS;
typedef struct SQTaskFile SQTaskFile;
typedef struct SQTaskFReader SQTaskFReader;
typedef struct SQTaskFWriter SQTaskFWriter;
@@ -49,13 +49,27 @@ typedef struct SQTaskFWriter SQTaskFWriter;
struct SSmaEnv {
SRWLatch lock;
int8_t type;
+ int8_t flag; // 0x01 inClose
SSmaStat *pStat;
};
+#define SMA_ENV_FLG_CLOSE ((int8_t)0x1)
+
+struct SRSmaRef {
+ int64_t refId; // for SRSmaStat
+ int64_t suid;
+};
+
typedef struct {
int8_t inited;
int32_t rsetId;
void *tmrHandle; // shared by all fetch tasks
+ /**
+ * @brief key: void* of SRSmaInfoItem, value: SRSmaRef
+ * N.B. Although there is a very small possibility that "void*" point to different objects while with the same
+ * address after release/renew, the functionality is not affected as it just used to fetch the rsma results.
+ */
+ SHashObj *refHash; // shared by all vgroups
} SSmaMgmt;
#define SMA_ENV_LOCK(env) (&(env)->lock)
@@ -71,20 +85,25 @@ struct STSmaStat {
struct SQTaskFile {
volatile int32_t nRef;
- int64_t commitID;
+ int32_t padding;
+ int64_t version;
int64_t size;
};
struct SQTaskFReader {
- SSma *pSma;
- SQTaskFile fTask;
- TdFilePtr pReadH;
+ SSma *pSma;
+ int64_t version;
+ TdFilePtr pReadH;
};
struct SQTaskFWriter {
- SSma *pSma;
- SQTaskFile fTask;
- TdFilePtr pWriteH;
- char *fname;
+ SSma *pSma;
+ int64_t version;
+ TdFilePtr pWriteH;
+ char *fname;
+};
+
+struct SRSmaFS {
+ SArray *aQTaskInf; // array of SQTaskFile
};
struct SRSmaStat {
@@ -93,10 +112,11 @@ struct SRSmaStat {
int64_t refId; // shared by fetch tasks
volatile int64_t nBufItems; // number of items in queue buffer
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
- volatile int8_t nExecutor; // [1, max(half of query threads, 4)]
- int8_t triggerStat; // shared by fetch tasks
- int8_t commitStat; // 0 not in committing, 1 in committing
- SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
+ volatile int32_t nFetchAll; // active number of fetch all
+ volatile int8_t triggerStat; // shared by fetch tasks
+ volatile int8_t commitStat; // 0 not in committing, 1 in committing
+ volatile int8_t delFlag; // 0 no deleted SRSmaInfo, 1 has deleted SRSmaInfo
+ SRSmaFS fs; // for recovery/snapshot r/w
SHashObj *infoHash; // key: suid, value: SRSmaInfo
tsem_t notEmpty; // has items in queue buffer
};
@@ -107,6 +127,7 @@ struct SSmaStat {
SRSmaStat rsmaStat; // rollup sma
};
T_REF_DECLARE()
+ char data[];
};
#define SMA_STAT_TSMA(s) (&(s)->tsmaStat)
@@ -115,21 +136,22 @@ struct SSmaStat {
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
#define RSMA_REF_ID(r) ((r)->refId)
+#define RSMA_FS(r) (&(r)->fs)
#define RSMA_FS_LOCK(r) (&(r)->lock)
struct SRSmaInfoItem {
int8_t level : 4;
int8_t fetchLevel : 4;
int8_t triggerStat;
- uint16_t nSkipped;
+ uint16_t nScanned;
int32_t maxDelay; // ms
tmr_h tmrId;
};
struct SRSmaInfo {
+ SSma *pSma;
STSchema *pTSchema;
int64_t suid;
- int64_t refId; // refId of SRSmaStat
int64_t lastRecv; // ms
int8_t assigned; // 0 idle, 1 assgined for exec
int8_t delFlag;
@@ -160,14 +182,6 @@ enum {
TASK_TRIGGER_STAT_DROPPED = 5,
};
-enum {
- RSMA_ROLE_CREATE = 0,
- RSMA_ROLE_DROP = 1,
- RSMA_ROLE_SUBMIT = 2,
- RSMA_ROLE_FETCH = 3,
- RSMA_ROLE_ITERATE = 4,
-};
-
enum {
RSMA_RESTORE_REBOOT = 1,
RSMA_RESTORE_SYNC = 2,
@@ -179,89 +193,49 @@ typedef enum {
RSMA_EXEC_COMMIT = 3, // triggered by commit
} ERsmaExecType;
-void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
-void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
-
-int32_t tdDropTSma(SSma *pSma, char *pMsg);
-int32_t tdDropTSmaData(SSma *pSma, int64_t indexUid);
-int32_t tdInsertRSmaData(SSma *pSma, char *msg);
-
-int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat);
-int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat);
-int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo);
-int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo);
-
-void *tdAcquireSmaRef(int32_t rsetId, int64_t refId);
-int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId);
-
+// sma
int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType);
-
+void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
+void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
int32_t tdLockSma(SSma *pSma);
int32_t tdUnLockSma(SSma *pSma);
+void *tdAcquireSmaRef(int32_t rsetId, int64_t refId);
+int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId);
-static FORCE_INLINE int8_t tdSmaStat(STSmaStat *pTStat) {
- if (pTStat) {
- return atomic_load_8(&pTStat->state);
- }
- return TSDB_SMA_STAT_UNKNOWN;
-}
-
-static FORCE_INLINE bool tdSmaStatIsOK(STSmaStat *pTStat, int8_t *state) {
- if (!pTStat) {
- return false;
- }
-
- if (state) {
- *state = atomic_load_8(&pTStat->state);
- return *state == TSDB_SMA_STAT_OK;
- }
- return atomic_load_8(&pTStat->state) == TSDB_SMA_STAT_OK;
-}
-
-static FORCE_INLINE bool tdSmaStatIsExpired(STSmaStat *pTStat) {
- return pTStat ? (atomic_load_8(&pTStat->state) & TSDB_SMA_STAT_EXPIRED) : true;
+static FORCE_INLINE void tdRefSmaStat(SSma *pSma, SSmaStat *pStat) {
+ int32_t ref = T_REF_INC(pStat);
+ smaDebug("vgId:%d, ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
}
-
-static FORCE_INLINE bool tdSmaStatIsDropped(STSmaStat *pTStat) {
- return pTStat ? (atomic_load_8(&pTStat->state) & TSDB_SMA_STAT_DROPPED) : true;
+static FORCE_INLINE void tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
+ int32_t ref = T_REF_DEC(pStat);
+ smaDebug("vgId:%d, unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
}
-static FORCE_INLINE void tdSmaStatSetOK(STSmaStat *pTStat) {
- if (pTStat) {
- atomic_store_8(&pTStat->state, TSDB_SMA_STAT_OK);
- }
+// rsma
+void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
+int32_t tdRSmaFSOpen(SSma *pSma, int64_t version);
+void tdRSmaFSClose(SRSmaFS *fs);
+int32_t tdRSmaFSRef(SSma *pSma, SRSmaStat *pStat, int64_t version);
+void tdRSmaFSUnRef(SSma *pSma, SRSmaStat *pStat, int64_t version);
+int64_t tdRSmaFSMaxVer(SSma *pSma, SRSmaStat *pStat);
+int32_t tdRSmaFSUpsertQTaskFile(SRSmaFS *pFS, SQTaskFile *qTaskFile);
+int32_t tdRSmaRestore(SSma *pSma, int8_t type, int64_t committedVer);
+int32_t tdRSmaProcessCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
+int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type);
+int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
+int32_t tdRSmaProcessRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer);
+void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName);
+void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName);
+
+static FORCE_INLINE void tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
+ int32_t ref = T_REF_INC(pRSmaInfo);
+ smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
}
-
-static FORCE_INLINE void tdSmaStatSetExpired(STSmaStat *pTStat) {
- if (pTStat) {
- atomic_or_fetch_8(&pTStat->state, TSDB_SMA_STAT_EXPIRED);
- }
+static FORCE_INLINE void tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
+ int32_t ref = T_REF_DEC(pRSmaInfo);
+ smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
}
-static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) {
- if (pTStat) {
- atomic_or_fetch_8(&pTStat->state, TSDB_SMA_STAT_DROPPED);
- }
-}
-
-void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName);
-void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName);
-int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
-void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
-static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
-void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
-void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
-int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
-int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type);
-
-int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
-int32_t tdProcessRSmaRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer);
-int32_t tdRsmaRestore(SSma *pSma, int8_t type, int64_t committedVer);
-
-int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg);
-int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg);
-int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
-
// smaFileUtil ================
#define TD_FILE_HEAD_SIZE 512
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index cb5ec7aabe48363f57b68238be80a6c124af9509..c3441a43f0e736881fc8bc491dd5717223645ed4 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -67,8 +67,7 @@ typedef struct {
// tqExec
typedef struct {
- char* qmsg;
- qTaskInfo_t task;
+ char* qmsg;
} STqExecCol;
typedef struct {
@@ -82,7 +81,8 @@ typedef struct {
typedef struct {
int8_t subType;
- STqReader* pExecReader;
+ STqReader* pExecReader;
+ qTaskInfo_t task;
union {
STqExecCol execCol;
STqExecTb execTb;
@@ -101,7 +101,6 @@ typedef struct {
int64_t snapshotVer;
- // TODO remove
SWalReader* pWalReader;
SWalRef* pRef;
@@ -141,11 +140,12 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle);
// tqRead
-int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* offset);
+int32_t tqScan(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset);
+int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset);
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
// tqExec
-int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp);
+int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp);
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp);
// tqMeta
@@ -176,17 +176,6 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
char* tqOffsetBuildFName(const char* path, int32_t ver);
int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
-static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
- pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
- pOffsetVal->uid = uid;
- pOffsetVal->ts = ts;
-}
-
-static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
- pOffsetVal->type = TMQ_OFFSET__LOG;
- pOffsetVal->version = ver;
-}
-
// tqStream
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask);
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index d1f5cfb122d6fdfee2cb8f54911a07a25cbb078c..a836fa2bc5c678e144dbcfed659eeb180d74eb69 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -42,15 +42,15 @@ typedef struct SMemTable SMemTable;
typedef struct STbDataIter STbDataIter;
typedef struct SMapData SMapData;
typedef struct SBlockIdx SBlockIdx;
-typedef struct SBlock SBlock;
-typedef struct SBlockL SBlockL;
+typedef struct SDataBlk SDataBlk;
+typedef struct SSttBlk SSttBlk;
typedef struct SColData SColData;
typedef struct SDiskDataHdr SDiskDataHdr;
typedef struct SBlockData SBlockData;
typedef struct SDelFile SDelFile;
typedef struct SHeadFile SHeadFile;
typedef struct SDataFile SDataFile;
-typedef struct SLastFile SLastFile;
+typedef struct SSttFile SSttFile;
typedef struct SSmaFile SSmaFile;
typedef struct SDFileSet SDFileSet;
typedef struct SDataFWriter SDataFWriter;
@@ -64,6 +64,8 @@ typedef struct STsdbReadSnap STsdbReadSnap;
typedef struct SBlockInfo SBlockInfo;
typedef struct SSmaInfo SSmaInfo;
typedef struct SBlockCol SBlockCol;
+typedef struct SVersionRange SVersionRange;
+typedef struct SLDataIter SLDataIter;
#define TSDB_FILE_DLMT ((uint32_t)0xF00AFA0F)
#define TSDB_MAX_SUBBLOCKS 8
@@ -79,6 +81,27 @@ typedef struct SBlockCol SBlockCol;
#define TSDBKEY_MIN ((TSDBKEY){.ts = TSKEY_MIN, .version = VERSION_MIN})
#define TSDBKEY_MAX ((TSDBKEY){.ts = TSKEY_MAX, .version = VERSION_MAX})
+#define TABLE_SAME_SCHEMA(SUID1, UID1, SUID2, UID2) ((SUID1) ? (SUID1) == (SUID2) : (UID1) == (UID2))
+
+#define PAGE_CONTENT_SIZE(PAGE) ((PAGE) - sizeof(TSCKSUM))
+#define LOGIC_TO_FILE_OFFSET(LOFFSET, PAGE) \
+ ((LOFFSET) / PAGE_CONTENT_SIZE(PAGE) * (PAGE) + (LOFFSET) % PAGE_CONTENT_SIZE(PAGE))
+#define FILE_TO_LOGIC_OFFSET(OFFSET, PAGE) ((OFFSET) / (PAGE)*PAGE_CONTENT_SIZE(PAGE) + (OFFSET) % (PAGE))
+#define PAGE_OFFSET(PGNO, PAGE) (((PGNO)-1) * (PAGE))
+#define OFFSET_PGNO(OFFSET, PAGE) ((OFFSET) / (PAGE) + 1)
+
+static FORCE_INLINE int64_t tsdbLogicToFileSize(int64_t lSize, int32_t szPage) {
+ int64_t fOffSet = LOGIC_TO_FILE_OFFSET(lSize, szPage);
+ int64_t pgno = OFFSET_PGNO(fOffSet, szPage);
+ int32_t szPageCont = PAGE_CONTENT_SIZE(szPage);
+
+ if (fOffSet % szPageCont == 0) {
+ pgno--;
+ }
+
+ return pgno * szPage;
+}
+
// tsdbUtil.c ==============================================================================================
// TSDBROW
#define TSDBROW_TS(ROW) (((ROW)->type == 0) ? (ROW)->pTSRow->ts : (ROW)->pBlockData->aTSKEY[(ROW)->iRow])
@@ -111,15 +134,15 @@ int32_t tTABLEIDCmprFn(const void *p1, const void *p2);
int32_t tPutBlockCol(uint8_t *p, void *ph);
int32_t tGetBlockCol(uint8_t *p, void *ph);
int32_t tBlockColCmprFn(const void *p1, const void *p2);
-// SBlock
-void tBlockReset(SBlock *pBlock);
-int32_t tPutBlock(uint8_t *p, void *ph);
-int32_t tGetBlock(uint8_t *p, void *ph);
-int32_t tBlockCmprFn(const void *p1, const void *p2);
-bool tBlockHasSma(SBlock *pBlock);
-// SBlockL
-int32_t tPutBlockL(uint8_t *p, void *ph);
-int32_t tGetBlockL(uint8_t *p, void *ph);
+// SDataBlk
+void tDataBlkReset(SDataBlk *pBlock);
+int32_t tPutDataBlk(uint8_t *p, void *ph);
+int32_t tGetDataBlk(uint8_t *p, void *ph);
+int32_t tDataBlkCmprFn(const void *p1, const void *p2);
+bool tDataBlkHasSma(SDataBlk *pDataBlk);
+// SSttBlk
+int32_t tPutSttBlk(uint8_t *p, void *ph);
+int32_t tGetSttBlk(uint8_t *p, void *ph);
// SBlockIdx
int32_t tPutBlockIdx(uint8_t *p, void *ph);
int32_t tGetBlockIdx(uint8_t *p, void *ph);
@@ -170,6 +193,7 @@ int32_t tGetDelData(uint8_t *p, void *ph);
void tMapDataReset(SMapData *pMapData);
void tMapDataClear(SMapData *pMapData);
int32_t tMapDataPutItem(SMapData *pMapData, void *pItem, int32_t (*tPutItemFn)(uint8_t *, void *));
+int32_t tMapDataCopy(SMapData *pFrom, SMapData *pTo);
void tMapDataGetItemByIdx(SMapData *pMapData, int32_t idx, void *pItem, int32_t (*tGetItemFn)(uint8_t *, void *));
int32_t tMapDataSearch(SMapData *pMapData, void *pSearchItem, int32_t (*tGetItemFn)(uint8_t *, void *),
int32_t (*tItemCmprFn)(const void *, const void *), void *pItem);
@@ -191,7 +215,6 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
uint8_t **ppBuf);
int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData,
uint8_t **ppBuf);
-int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck);
// tsdbMemTable ==============================================================================================
// SMemTable
int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
@@ -215,7 +238,7 @@ bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2);
int32_t tsdbDFileRollback(STsdb *pTsdb, SDFileSet *pSet, EDataFileT ftype);
int32_t tPutHeadFile(uint8_t *p, SHeadFile *pHeadFile);
int32_t tPutDataFile(uint8_t *p, SDataFile *pDataFile);
-int32_t tPutLastFile(uint8_t *p, SLastFile *pLastFile);
+int32_t tPutSttFile(uint8_t *p, SSttFile *pSttFile);
int32_t tPutSmaFile(uint8_t *p, SSmaFile *pSmaFile);
int32_t tPutDelFile(uint8_t *p, SDelFile *pDelFile);
int32_t tGetDelFile(uint8_t *p, SDelFile *pDelFile);
@@ -224,7 +247,7 @@ int32_t tGetDFileSet(uint8_t *p, SDFileSet *pSet);
void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]);
void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]);
-void tsdbLastFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SLastFile *pLastF, char fname[]);
+void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]);
void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]);
// SDelFile
void tsdbDelFileName(STsdb *pTsdb, SDelFile *pFile, char fname[]);
@@ -249,8 +272,8 @@ int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pS
int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync);
int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter);
int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx);
-int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *pMapData, SBlockIdx *pBlockIdx);
-int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL);
+int32_t tsdbWriteDataBlk(SDataFWriter *pWriter, SMapData *mDataBlk, SBlockIdx *pBlockIdx);
+int32_t tsdbWriteSttBlk(SDataFWriter *pWriter, SArray *aSttBlk);
int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
int8_t cmprAlg, int8_t toLast);
@@ -259,11 +282,11 @@ int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo);
int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet);
int32_t tsdbDataFReaderClose(SDataFReader **ppReader);
int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx);
-int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *pMapData);
-int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL);
-int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg);
-int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData);
-int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData);
+int32_t tsdbReadDataBlk(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mDataBlk);
+int32_t tsdbReadSttBlk(SDataFReader *pReader, int32_t iStt, SArray *aSttBlk);
+int32_t tsdbReadBlockSma(SDataFReader *pReader, SDataBlk *pBlock, SArray *aColumnDataAgg);
+int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pBlock, SBlockData *pBlockData);
+int32_t tsdbReadSttBlock(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData);
// SDelFWriter
int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb);
int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync);
@@ -278,6 +301,8 @@ int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx);
// tsdbRead.c ==============================================================================================
int32_t tsdbTakeReadSnap(STsdb *pTsdb, STsdbReadSnap **ppSnap);
void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap);
+// tsdbMerge.c ==============================================================================================
+int32_t tsdbMerge(STsdb *pTsdb);
#define TSDB_CACHE_NO(c) ((c).cacheLast == 0)
#define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0)
@@ -324,6 +349,11 @@ struct TSDBKEY {
TSKEY ts;
};
+struct SVersionRange {
+ uint64_t minVer;
+ uint64_t maxVer;
+};
+
typedef struct SMemSkipListNode SMemSkipListNode;
struct SMemSkipListNode {
int8_t level;
@@ -416,7 +446,7 @@ struct SSmaInfo {
int32_t size;
};
-struct SBlock {
+struct SDataBlk {
TSDBKEY minKey;
TSDBKEY maxKey;
int64_t minVer;
@@ -428,7 +458,7 @@ struct SBlock {
SSmaInfo smaInfo;
};
-struct SBlockL {
+struct SSttBlk {
int64_t suid;
int64_t minUid;
int64_t maxUid;
@@ -467,12 +497,6 @@ struct SBlockData {
SArray *aColData; // SArray
};
-// ================== TSDB global config
-extern bool tsdbForceKeepFile;
-
-#define TSDB_FS_ITER_FORWARD TSDB_ORDER_ASC
-#define TSDB_FS_ITER_BACKWARD TSDB_ORDER_DESC
-
struct TABLEID {
tb_uid_t suid;
tb_uid_t uid;
@@ -536,7 +560,7 @@ struct SDataFile {
int64_t size;
};
-struct SLastFile {
+struct SSttFile {
volatile int32_t nRef;
int64_t commitID;
@@ -556,8 +580,9 @@ struct SDFileSet {
int32_t fid;
SHeadFile *pHeadF;
SDataFile *pDataF;
- SLastFile *pLastF;
SSmaFile *pSmaF;
+ uint8_t nSttF;
+ SSttFile *aSttF[TSDB_MAX_STT_TRIGGER];
};
struct SRowIter {
@@ -572,37 +597,99 @@ struct SRowMerger {
SArray *pArray; // SArray
};
-struct SDelFWriter {
- STsdb *pTsdb;
- SDelFile fDel;
- TdFilePtr pWriteH;
+typedef struct {
+ char *path;
+ int32_t szPage;
+ int32_t flag;
+ TdFilePtr pFD;
+ int64_t pgno;
+ uint8_t *pBuf;
+ int64_t szFile;
+} STsdbFD;
+struct SDelFWriter {
+ STsdb *pTsdb;
+ SDelFile fDel;
+ STsdbFD *pWriteH;
uint8_t *aBuf[1];
};
+struct STsdbReadSnap {
+ SMemTable *pMem;
+ SMemTable *pIMem;
+ STsdbFS fs;
+};
+
struct SDataFWriter {
STsdb *pTsdb;
SDFileSet wSet;
- TdFilePtr pHeadFD;
- TdFilePtr pDataFD;
- TdFilePtr pLastFD;
- TdFilePtr pSmaFD;
+ STsdbFD *pHeadFD;
+ STsdbFD *pDataFD;
+ STsdbFD *pSmaFD;
+ STsdbFD *pSttFD;
SHeadFile fHead;
SDataFile fData;
- SLastFile fLast;
SSmaFile fSma;
+ SSttFile fStt[TSDB_MAX_STT_TRIGGER];
uint8_t *aBuf[4];
};
-struct STsdbReadSnap {
- SMemTable *pMem;
- SMemTable *pIMem;
- STsdbFS fs;
+struct SDataFReader {
+ STsdb *pTsdb;
+ SDFileSet *pSet;
+ STsdbFD *pHeadFD;
+ STsdbFD *pDataFD;
+ STsdbFD *pSmaFD;
+ STsdbFD *aSttFD[TSDB_MAX_STT_TRIGGER];
+ uint8_t *aBuf[3];
};
+typedef struct {
+ int64_t suid;
+ int64_t uid;
+ TSDBROW row;
+} SRowInfo;
+
+typedef struct SSttBlockLoadInfo {
+ SBlockData blockData[2];
+ SArray *aSttBlk;
+ int32_t blockIndex[2]; // to denote the loaded block in the corresponding position.
+ int32_t currentLoadBlockIndex;
+ int32_t loadBlocks;
+ double elapsedTime;
+} SSttBlockLoadInfo;
+
+typedef struct SMergeTree {
+ int8_t backward;
+ SRBTree rbt;
+ SArray *pIterList;
+ SLDataIter *pIter;
+ bool destroyLoadInfo;
+ SSttBlockLoadInfo *pLoadInfo;
+ const char *idStr;
+} SMergeTree;
+
+typedef struct {
+ int64_t suid;
+ int64_t uid;
+ STSchema *pTSchema;
+} SSkmInfo;
+
+int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
+ STimeWindow *pTimeWindow, SVersionRange *pVerRange, void *pLoadInfo, const char *idStr);
+void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
+bool tMergeTreeNext(SMergeTree *pMTree);
+TSDBROW tMergeTreeGetRow(SMergeTree *pMTree);
+void tMergeTreeClose(SMergeTree *pMTree);
+
+SSttBlockLoadInfo *tCreateLastBlockLoadInfo();
+void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
+void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el);
+void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
+
// ========== inline functions ==========
static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
TSDBKEY *pKey1 = (TSDBKEY *)p1;
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 39c5f3873ed9884109c0dc28f66d314b12b83a99..0e85e7bfb60313b106d8838986fa685eedf2c409 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -36,6 +36,7 @@
#include "tlosertree.h"
#include "tlrucache.h"
#include "tmsgcb.h"
+#include "trbtree.h"
#include "tref.h"
#include "tskiplist.h"
#include "tstream.h"
@@ -70,8 +71,8 @@ typedef struct SStreamTaskReader SStreamTaskReader;
typedef struct SStreamTaskWriter SStreamTaskWriter;
typedef struct SStreamStateReader SStreamStateReader;
typedef struct SStreamStateWriter SStreamStateWriter;
-typedef struct SRsmaSnapReader SRsmaSnapReader;
-typedef struct SRsmaSnapWriter SRsmaSnapWriter;
+typedef struct SRSmaSnapReader SRSmaSnapReader;
+typedef struct SRSmaSnapWriter SRSmaSnapWriter;
typedef struct SSnapDataHdr SSnapDataHdr;
#define VNODE_META_DIR "meta"
@@ -102,8 +103,8 @@ int metaCommit(SMeta* pMeta);
int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList);
-int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq);
-int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids);
+int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp** pMetaRsp);
+int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids, int64_t* tbUid);
int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids);
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp);
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, bool isinline);
@@ -173,7 +174,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
// tq-stream
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
-int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* data, int64_t ver);
+int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* data, int64_t ver);
+int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver);
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg);
@@ -189,7 +191,6 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
int32_t smaInit();
void smaCleanUp();
int32_t smaOpen(SVnode* pVnode);
-int32_t smaPreClose(SSma* pSma);
int32_t smaClose(SSma* pSma);
int32_t smaBegin(SSma* pSma);
int32_t smaSyncPreCommit(SSma* pSma);
@@ -199,7 +200,6 @@ int32_t smaAsyncPreCommit(SSma* pSma);
int32_t smaAsyncCommit(SSma* pSma);
int32_t smaAsyncPostCommit(SSma* pSma);
int32_t smaDoRetention(SSma* pSma, int64_t now);
-int32_t smaProcessExec(SSma* pSma, void* pMsg);
int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg);
int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg);
@@ -208,7 +208,7 @@ int32_t tdProcessRSmaCreate(SSma* pSma, SVCreateStbReq* pReq);
int32_t tdProcessRSmaSubmit(SSma* pSma, void* pMsg, int32_t inputType);
int32_t tdProcessRSmaDrop(SSma* pSma, SVDropStbReq* pReq);
int32_t tdFetchTbUidList(SSma* pSma, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid);
-int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore);
+int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore, bool isAdd);
void tdUidStoreDestory(STbUidStore* pStore);
void* tdUidStoreFree(STbUidStore* pStore);
@@ -248,14 +248,14 @@ int32_t tqOffsetSnapWrite(STqOffsetWriter* pWriter, uint8_t* pData, uint32_t nDa
// SStreamTaskReader ======================================
// SStreamStateWriter =====================================
// SStreamStateReader =====================================
-// SRsmaSnapReader ========================================
-int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader);
-int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader);
-int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData);
-// SRsmaSnapWriter ========================================
-int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter);
-int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
-int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback);
+// SRSmaSnapReader ========================================
+int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapReader** ppReader);
+int32_t rsmaSnapReaderClose(SRSmaSnapReader** ppReader);
+int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData);
+// SRSmaSnapWriter ========================================
+int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWriter** ppWriter);
+int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
+int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback);
typedef struct {
int8_t streamType; // sma or other
@@ -323,7 +323,6 @@ struct SVnode {
TdThreadMutex lock;
bool blocked;
bool restored;
- bool inClose;
tsem_t syncSem;
SQHandle* pQuery;
};
diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c
index 805bc24d8c2824cb8e5e95df03c8b4b65ce25d6d..a34569b08ecdd87483dbf31e8c6d7e406e8ae766 100644
--- a/source/dnode/vnode/src/meta/metaQuery.c
+++ b/source/dnode/vnode/src/meta/metaQuery.c
@@ -129,10 +129,16 @@ _err:
bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid) {
// query uid.idx
+ metaRLock(pMeta);
+
if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) {
+ metaULock(pMeta);
+
return false;
}
+ metaULock(pMeta);
+
return true;
}
@@ -182,9 +188,14 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) {
}
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName) {
+ int code = 0;
SMetaReader mr = {0};
metaReaderInit(&mr, (SMeta *)meta, 0);
- metaGetTableEntryByUid(&mr, uid);
+ code = metaGetTableEntryByUid(&mr, uid);
+ if (code < 0) {
+ metaReaderClear(&mr);
+ return -1;
+ }
STR_TO_VARSTR(tbName, mr.me.name);
metaReaderClear(&mr);
@@ -280,6 +291,38 @@ _query:
tDecoderClear(&dc);
goto _exit;
}
+ { // Traverse to find the previous qualified data
+ TBC *pCur;
+ tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
+ STbDbKey key = {.version = sver, .uid = INT64_MAX};
+ int c = 0;
+ tdbTbcMoveTo(pCur, &key, sizeof(key), &c);
+ if(c < 0){
+ tdbTbcMoveToPrev(pCur);
+ }
+
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen = 0, kLen = 0;
+ while(1){
+ int32_t ret = tdbTbcPrev(pCur, &pKey, &kLen, &pVal, &vLen);
+ if (ret < 0) break;
+
+ STbDbKey *tmp = (STbDbKey*)pKey;
+ if(tmp->uid != uid){
+ continue;
+ }
+ SDecoder dcNew = {0};
+ SMetaEntry meNew = {0};
+ tDecoderInit(&dcNew, pVal, vLen);
+ metaDecodeEntry(&dcNew, &meNew);
+ pSchema = tCloneSSchemaWrapper(&meNew.stbEntry.schemaRow);
+ tDecoderClear(&dcNew);
+ tdbTbcClose(pCur);
+ goto _exit;
+ }
+ tdbTbcClose(pCur);
+ }
} else if (me.type == TSDB_CHILD_TABLE) {
uid = me.ctbEntry.suid;
tDecoderClear(&dc);
@@ -615,11 +658,15 @@ int64_t metaGetTbNum(SMeta *pMeta) {
// N.B. Called by statusReq per second
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
- int64_t num = 0;
- vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
- pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+ if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 || ++pMeta->pVnode->config.vndStats.itvTimeSeries % 60 == 0) {
+ int64_t num = 0;
+ vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
+ pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+
+ pMeta->pVnode->config.vndStats.itvTimeSeries = 0;
+ }
- return pMeta->pVnode->config.vndStats.numOfTimeSeries;
+ return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
}
typedef struct {
@@ -887,6 +934,37 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
if (!find) {
return NULL;
}
+
+#ifdef TAG_FILTER_DEBUG
+ if (IS_VAR_DATA_TYPE(val->type)) {
+ char *buf = taosMemoryCalloc(val->nData + 1, 1);
+ memcpy(buf, val->pData, val->nData);
+ metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double dval = 0;
+ GET_TYPED_DATA(dval, double, val->type, &val->i64);
+ metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval);
+ }
+
+ SArray *pTagVals = NULL;
+ tTagToValArray((STag *)pTag, &pTagVals);
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
+
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
+ memcpy(buf, pTagVal->pData, pTagVal->nData);
+ metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double dval = 0;
+ GET_TYPED_DATA(dval, double, pTagVal->type, &pTagVal->i64);
+ metaDebug("metaTag table number index:%d cid:%d type:%d value:%f", i, pTagVal->cid, pTagVal->type, dval);
+ }
+ }
+#endif
+
return val;
}
diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c
index 973c3814074685128395bd50243bba8981af4200..0edbd092e6b06883cc1e2b6be66e0ea55b8563a1 100644
--- a/source/dnode/vnode/src/meta/metaSnapshot.c
+++ b/source/dnode/vnode/src/meta/metaSnapshot.c
@@ -195,3 +195,434 @@ _err:
metaError("vgId:%d, vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
return code;
}
+
+typedef struct STableInfoForChildTable{
+ char *tableName;
+ SSchemaWrapper *schemaRow;
+ SSchemaWrapper *tagRow;
+}STableInfoForChildTable;
+
+static void destroySTableInfoForChildTable(void* data) {
+ STableInfoForChildTable* pData = (STableInfoForChildTable*)data;
+ taosMemoryFree(pData->tableName);
+ tDeleteSSchemaWrapper(pData->schemaRow);
+ tDeleteSSchemaWrapper(pData->tagRow);
+}
+
+static void MoveToSnapShotVersion(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ STbDbKey key = {.version = ctx->snapVersion, .uid = INT64_MAX};
+ int c = 0;
+ tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c);
+ if(c < 0){
+ tdbTbcMoveToPrev(ctx->pCur);
+ }
+}
+
+static int32_t MoveToPosition(SSnapContext* ctx, int64_t ver, int64_t uid){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ STbDbKey key = {.version = ver, .uid = uid};
+ int c = 0;
+ tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c);
+ return c;
+}
+
+static void MoveToFirst(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ tdbTbcMoveToFirst(ctx->pCur);
+}
+
+static void saveSuperTableInfoForChildTable(SMetaEntry *me, SHashObj *suidInfo){
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(suidInfo, &me->uid, sizeof(tb_uid_t));
+ if(data){
+ return;
+ }
+ STableInfoForChildTable dataTmp = {0};
+ dataTmp.tableName = strdup(me->name);
+
+ dataTmp.schemaRow = tCloneSSchemaWrapper(&me->stbEntry.schemaRow);
+ dataTmp.tagRow = tCloneSSchemaWrapper(&me->stbEntry.schemaTag);
+ taosHashPut(suidInfo, &me->uid, sizeof(tb_uid_t), &dataTmp, sizeof(STableInfoForChildTable));
+}
+
+int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta, SSnapContext** ctxRet){
+ SSnapContext* ctx = taosMemoryCalloc(1, sizeof(SSnapContext));
+ if(ctx == NULL) return -1;
+ *ctxRet = ctx;
+ ctx->pMeta = pMeta;
+ ctx->snapVersion = snapVersion;
+ ctx->suid = suid;
+ ctx->subType = subType;
+ ctx->queryMetaOrData = withMeta;
+ ctx->withMeta = withMeta;
+ ctx->idVersion = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
+ if(ctx->idVersion == NULL){
+ return -1;
+ }
+
+ ctx->suidInfo = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
+ if(ctx->suidInfo == NULL){
+ return -1;
+ }
+ taosHashSetFreeFp(ctx->suidInfo, destroySTableInfoForChildTable);
+
+ ctx->index = 0;
+ ctx->idList = taosArrayInit(100, sizeof(int64_t));
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen = 0, kLen = 0;
+
+ metaDebug("tmqsnap init snapVersion:%" PRIi64, ctx->snapVersion);
+ MoveToFirst(ctx);
+ while(1){
+ int32_t ret = tdbTbcNext(ctx->pCur, &pKey, &kLen, &pVal, &vLen);
+ if (ret < 0) break;
+ STbDbKey *tmp = (STbDbKey*)pKey;
+ if (tmp->version > ctx->snapVersion) break;
+
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t));
+ if(idData) {
+ continue;
+ }
+
+ if (tdbTbGet(pMeta->pUidIdx, &tmp->uid, sizeof(tb_uid_t), NULL, NULL) < 0) { // check if table exist for now, need optimize later
+ continue;
+ }
+
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ if(ctx->subType == TOPIC_SUB_TYPE__TABLE){
+ if ((me.uid != ctx->suid && me.type == TSDB_SUPER_TABLE) ||
+ (me.ctbEntry.suid != ctx->suid && me.type == TSDB_CHILD_TABLE)){
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ taosArrayPush(ctx->idList, &tmp->uid);
+ metaDebug("tmqsnap init idlist name:%s, uid:%" PRIi64, me.name, tmp->uid);
+ SIdInfo info = {0};
+ taosHashPut(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t), &info, sizeof(SIdInfo));
+
+ tDecoderClear(&dc);
+ }
+ taosHashClear(ctx->idVersion);
+
+ MoveToSnapShotVersion(ctx);
+ while(1){
+ int32_t ret = tdbTbcPrev(ctx->pCur, &pKey, &kLen, &pVal, &vLen);
+ if (ret < 0) break;
+
+ STbDbKey *tmp = (STbDbKey*)pKey;
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t));
+ if(idData){
+ continue;
+ }
+ SIdInfo info = {.version = tmp->version, .index = 0};
+ taosHashPut(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t), &info, sizeof(SIdInfo));
+
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ if(ctx->subType == TOPIC_SUB_TYPE__TABLE){
+ if ((me.uid != ctx->suid && me.type == TSDB_SUPER_TABLE) ||
+ (me.ctbEntry.suid != ctx->suid && me.type == TSDB_CHILD_TABLE)){
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_SUPER_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.uid == ctx->suid)) {
+ saveSuperTableInfoForChildTable(&me, ctx->suidInfo);
+ }
+ tDecoderClear(&dc);
+ }
+
+ for(int i = 0; i < taosArrayGetSize(ctx->idList); i++){
+ int64_t *uid = taosArrayGet(ctx->idList, i);
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, uid, sizeof(int64_t));
+ ASSERT(idData);
+ idData->index = i;
+ metaDebug("tmqsnap init idVersion uid:%" PRIi64 " version:%" PRIi64 " index:%d", *uid, idData->version, idData->index);
+ }
+
+ return TDB_CODE_SUCCESS;
+}
+
+int32_t destroySnapContext(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ taosArrayDestroy(ctx->idList);
+ taosHashCleanup(ctx->idVersion);
+ taosHashCleanup(ctx->suidInfo);
+ taosMemoryFree(ctx);
+ return 0;
+}
+
+static int32_t buildNormalChildTableInfo(SVCreateTbReq *req, void **pBuf, int32_t *contLen){
+ int32_t ret = 0;
+ SVCreateTbBatchReq reqs = {0};
+
+ reqs.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq));
+ if (NULL == reqs.pArray){
+ ret = -1;
+ goto end;
+ }
+ taosArrayPush(reqs.pArray, req);
+ reqs.nReqs = 1;
+
+ tEncodeSize(tEncodeSVCreateTbBatchReq, &reqs, *contLen, ret);
+ if(ret < 0){
+ ret = -1;
+ goto end;
+ }
+ *contLen += sizeof(SMsgHead);
+ *pBuf = taosMemoryMalloc(*contLen);
+ if (NULL == *pBuf) {
+ ret = -1;
+ goto end;
+ }
+ SEncoder coder = {0};
+ tEncoderInit(&coder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), *contLen);
+ if (tEncodeSVCreateTbBatchReq(&coder, &reqs) < 0) {
+ taosMemoryFreeClear(*pBuf);
+ tEncoderClear(&coder);
+ ret = -1;
+ goto end;
+ }
+ tEncoderClear(&coder);
+
+end:
+ taosArrayDestroy(reqs.pArray);
+ return ret;
+}
+
+static int32_t buildSuperTableInfo(SVCreateStbReq *req, void **pBuf, int32_t *contLen){
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVCreateStbReq, req, *contLen, ret);
+ if (ret < 0) {
+ return -1;
+ }
+
+ *contLen += sizeof(SMsgHead);
+ *pBuf = taosMemoryMalloc(*contLen);
+ if (NULL == *pBuf) {
+ return -1;
+ }
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), *contLen);
+ if (tEncodeSVCreateStbReq(&encoder, req) < 0) {
+ taosMemoryFreeClear(*pBuf);
+ tEncoderClear(&encoder);
+ return -1;
+ }
+ tEncoderClear(&encoder);
+ return 0;
+}
+
+int32_t setForSnapShot(SSnapContext* ctx, int64_t uid){
+ int c = 0;
+
+ if(uid == 0){
+ ctx->index = 0;
+ return c;
+ }
+
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, &uid, sizeof(tb_uid_t));
+ if(!idInfo){
+ return -1;
+ }
+
+ ctx->index = idInfo->index;
+
+ return c;
+}
+
+int32_t getMetafromSnapShot(SSnapContext* ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid){
+ int32_t ret = 0;
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen = 0, kLen = 0;
+
+ while(1){
+ if(ctx->index >= taosArrayGetSize(ctx->idList)){
+ metaDebug("tmqsnap get meta end");
+ ctx->index = 0;
+ ctx->queryMetaOrData = false; // change to get data
+ return 0;
+ }
+
+ int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index);
+ ctx->index++;
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t));
+ ASSERT(idInfo);
+
+ *uid = *uidTmp;
+ ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
+ if(ret == 0){
+ break;
+ }
+ metaDebug("tmqsnap get meta not exist uid:%" PRIi64 " version:%" PRIi64, *uid, idInfo->version);
+ }
+
+ tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ metaDebug("tmqsnap get meta uid:%" PRIi64 " name:%s index:%d", *uid, me.name, ctx->index-1);
+
+ if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_SUPER_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.uid == ctx->suid)) {
+ SVCreateStbReq req = {0};
+ req.name = me.name;
+ req.suid = me.uid;
+ req.schemaRow = me.stbEntry.schemaRow;
+ req.schemaTag = me.stbEntry.schemaTag;
+ req.schemaRow.version = 1;
+ req.schemaTag.version = 1;
+
+ ret = buildSuperTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_STB;
+
+ } else if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_CHILD_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid)) {
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ ASSERT(data);
+ SVCreateTbReq req = {0};
+
+ req.type = TSDB_CHILD_TABLE;
+ req.name = me.name;
+ req.uid = me.uid;
+ req.commentLen = -1;
+ req.ctb.suid = me.ctbEntry.suid;
+ req.ctb.tagNum = data->tagRow->nCols;
+ req.ctb.name = data->tableName;
+
+ SArray* tagName = taosArrayInit(req.ctb.tagNum, TSDB_COL_NAME_LEN);
+ STag* p = (STag*)me.ctbEntry.pTags;
+ if(tTagIsJson(p)){
+ if (p->nTag != 0) {
+ SSchema* schema = &data->tagRow->pSchema[0];
+ taosArrayPush(tagName, schema->name);
+ }
+ }else{
+ SArray* pTagVals = NULL;
+ if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
+ ASSERT(0);
+ }
+ int16_t nCols = taosArrayGetSize(pTagVals);
+ for (int j = 0; j < nCols; ++j) {
+ STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
+ for(int i = 0; i < data->tagRow->nCols; i++){
+ SSchema *schema = &data->tagRow->pSchema[i];
+ if(schema->colId == pTagVal->cid){
+ taosArrayPush(tagName, schema->name);
+ }
+ }
+ }
+ }
+// SIdInfo* sidInfo = (SIdInfo*)taosHashGet(ctx->idVersion, &me.ctbEntry.suid, sizeof(tb_uid_t));
+// if(sidInfo->version >= idInfo->version){
+// // need parse tag
+// STag* p = (STag*)me.ctbEntry.pTags;
+// SArray* pTagVals = NULL;
+// if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
+// }
+//
+// int16_t nCols = taosArrayGetSize(pTagVals);
+// for (int j = 0; j < nCols; ++j) {
+// STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
+// }
+// }else{
+ req.ctb.pTag = me.ctbEntry.pTags;
+// }
+
+ req.ctb.tagName = tagName;
+ ret = buildNormalChildTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_TABLE;
+ taosArrayDestroy(tagName);
+ } else if(ctx->subType == TOPIC_SUB_TYPE__DB){
+ SVCreateTbReq req = {0};
+ req.type = TSDB_NORMAL_TABLE;
+ req.name = me.name;
+ req.uid = me.uid;
+ req.commentLen = -1;
+ req.ntb.schemaRow = me.ntbEntry.schemaRow;
+ ret = buildNormalChildTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_TABLE;
+ } else{
+ ASSERT(0);
+ }
+ tDecoderClear(&dc);
+
+ return ret;
+}
+
+SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx){
+ SMetaTableInfo result = {0};
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen, kLen;
+
+ while(1){
+ if(ctx->index >= taosArrayGetSize(ctx->idList)){
+ metaDebug("tmqsnap get uid info end");
+ return result;
+ }
+ int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index);
+ ctx->index++;
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t));
+ ASSERT(idInfo);
+
+ int32_t ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
+ if(ret != 0) {
+ metaDebug("tmqsnap getUidfromSnapShot not exist uid:%" PRIi64 " version:%" PRIi64, *uidTmp, idInfo->version);
+ continue;
+ }
+ tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ metaDebug("tmqsnap get uid info uid:%" PRIi64 " name:%s index:%d", me.uid, me.name, ctx->index-1);
+
+ if (ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_CHILD_TABLE){
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ result.uid = me.uid;
+ result.suid = me.ctbEntry.suid;
+ result.schema = tCloneSSchemaWrapper(data->schemaRow);
+ strcpy(result.tbName, me.name);
+ tDecoderClear(&dc);
+ break;
+ } else if (ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_NORMAL_TABLE) {
+ result.uid = me.uid;
+ result.suid = 0;
+ strcpy(result.tbName, me.name);
+ result.schema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow);
+ tDecoderClear(&dc);
+ break;
+ } else if(ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid) {
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ result.uid = me.uid;
+ result.suid = me.ctbEntry.suid;
+ strcpy(result.tbName, me.name);
+ result.schema = tCloneSSchemaWrapper(data->schemaRow);
+ tDecoderClear(&dc);
+ break;
+ } else{
+ metaDebug("tmqsnap get uid continue");
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ return result;
+}
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index aa107ab2532b83b40abe8b1abdc60e059ab1de34..22ec8118a2911f738b2aa7ce27f8b32b7fd4d461 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -99,6 +99,7 @@ static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const
memcpy(val, (uint16_t *)&len, VARSTR_HEADER_SIZE);
type = TSDB_DATA_TYPE_VARCHAR;
term = indexTermCreate(suid, ADD_VALUE, type, key, nKey, val, len);
+ taosMemoryFree(val);
} else if (pTagVal->nData == 0) {
term = indexTermCreate(suid, ADD_VALUE, TSDB_DATA_TYPE_VARCHAR, key, nKey, pTagVal->pData, 0);
}
@@ -115,6 +116,7 @@ static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const
indexMultiTermAdd(terms, term);
}
}
+ taosArrayDestroy(pTagVals);
indexJsonPut(pMeta->pTagIvtIdx, terms, tuid);
indexMultiTermDestroy(terms);
#endif
@@ -242,6 +244,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq, SArray *tb
// check if super table exists
rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData);
if (rc < 0 || *(tb_uid_t *)pData != pReq->suid) {
+ tdbFree(pData);
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
return -1;
}
@@ -307,7 +310,7 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
int64_t oversion;
SDecoder dc = {0};
int32_t ret;
- int32_t c;
+ int32_t c = -2;
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
ret = tdbTbcMoveTo(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &c);
@@ -367,7 +370,7 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
return 0;
}
-int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
+int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMetaRsp **pMetaRsp) {
SMetaEntry me = {0};
SMetaReader mr = {0};
@@ -413,6 +416,27 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
me.ctbEntry.suid = pReq->ctb.suid;
me.ctbEntry.pTags = pReq->ctb.pTag;
+#ifdef TAG_FILTER_DEBUG
+ SArray *pTagVals = NULL;
+ int32_t code = tTagToValArray((STag *)pReq->ctb.pTag, &pTagVals);
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
+
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
+ memcpy(buf, pTagVal->pData, pTagVal->nData);
+ metaDebug("metaTag table:%s varchar index:%d cid:%d type:%d value:%s", pReq->name, i, pTagVal->cid,
+ pTagVal->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double val = 0;
+ GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
+ metaDebug("metaTag table:%s number index:%d cid:%d type:%d value:%f", pReq->name, i, pTagVal->cid,
+ pTagVal->type, val);
+ }
+ }
+#endif
+
++pMeta->pVnode->config.vndStats.numOfCTables;
} else {
me.ntbEntry.ctime = pReq->ctime;
@@ -423,10 +447,26 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) {
me.ntbEntry.ncid = me.ntbEntry.schemaRow.pSchema[me.ntbEntry.schemaRow.nCols - 1].colId + 1;
++pMeta->pVnode->config.vndStats.numOfNTables;
+ pMeta->pVnode->config.vndStats.numOfNTimeSeries += me.ntbEntry.schemaRow.nCols - 1;
}
if (metaHandleEntry(pMeta, &me) < 0) goto _err;
+ if (pMetaRsp) {
+ *pMetaRsp = taosMemoryCalloc(1, sizeof(STableMetaRsp));
+
+ if (*pMetaRsp) {
+ if (me.type == TSDB_CHILD_TABLE) {
+ (*pMetaRsp)->tableType = TSDB_CHILD_TABLE;
+ (*pMetaRsp)->tuid = pReq->uid;
+ (*pMetaRsp)->suid = pReq->ctb.suid;
+ strcpy((*pMetaRsp)->tbName, pReq->name);
+ } else {
+ metaUpdateMetaRsp(pReq->uid, pReq->name, &pReq->ntb.schemaRow, *pMetaRsp);
+ }
+ }
+ }
+
metaDebug("vgId:%d, table:%s uid %" PRId64 " is created, type:%" PRId8, TD_VID(pMeta->pVnode), pReq->name, pReq->uid,
pReq->type);
return 0;
@@ -437,7 +477,7 @@ _err:
return -1;
}
-int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) {
+int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids, tb_uid_t *tbUid) {
void *pData = NULL;
int nData = 0;
int rc = 0;
@@ -459,6 +499,10 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
taosArrayPush(tbUids, &uid);
}
+ if ((type == TSDB_CHILD_TABLE) && tbUid) {
+ *tbUid = uid;
+ }
+
tdbFree(pData);
return 0;
}
@@ -516,6 +560,9 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
SDecoder dc = {0};
rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData);
+ if (rc < 0) {
+ return -1;
+ }
int64_t version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData);
@@ -562,6 +609,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
// drop schema.db (todo)
--pMeta->pVnode->config.vndStats.numOfNTables;
+ pMeta->pVnode->config.vndStats.numOfNTimeSeries -= e.ntbEntry.schemaRow.nCols - 1;
} else if (e.type == TSDB_SUPER_TABLE) {
tdbTbDelete(pMeta->pSuidIdx, &e.uid, sizeof(tb_uid_t), &pMeta->txn);
// drop schema.db (todo)
@@ -664,6 +712,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].flags = pAlterTbReq->flags;
pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].colId = entry.ntbEntry.ncid++;
strcpy(pSchema->pSchema[entry.ntbEntry.schemaRow.nCols - 1].name, pAlterTbReq->colName);
+
+ ++pMeta->pVnode->config.vndStats.numOfNTimeSeries;
break;
case TSDB_ALTER_TABLE_DROP_COLUMN:
if (pColumn == NULL) {
@@ -684,6 +734,8 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
memmove(pColumn, pColumn + 1, tlen);
}
pSchema->nCols--;
+
+ --pMeta->pVnode->config.vndStats.numOfNTimeSeries;
break;
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
if (pColumn == NULL) {
diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c
index ca5367f39714ed1f3a979068b0a9a7204d385f8c..07ec7d06947b569b0452b283120430bb44e919ca 100644
--- a/source/dnode/vnode/src/sma/smaCommit.c
+++ b/source/dnode/vnode/src/sma/smaCommit.c
@@ -15,13 +15,15 @@
#include "sma.h"
+extern SSmaMgmt smaMgmt;
+
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma);
-static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat);
+static int32_t tdUpdateQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat);
/**
* @brief Only applicable to Rollup SMA
@@ -166,114 +168,65 @@ static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
-static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
- SVnode *pVnode = pSma->pVnode;
- int64_t committed = pRSmaStat->commitAppliedVer;
- TdDirPtr pDir = NULL;
- TdDirEntryPtr pDirEntry = NULL;
- char dir[TSDB_FILENAME_LEN];
- const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$";
- regex_t regex;
- int code = 0;
-
- tdGetVndDirName(TD_VID(pVnode), tfsGetPrimaryPath(pVnode->pTfs), VNODE_RSMA_DIR, true, dir);
-
- // Resource allocation and init
- if ((code = regcomp(®ex, pattern, REG_EXTENDED)) != 0) {
- char errbuf[128];
- regerror(code, ®ex, errbuf, sizeof(errbuf));
- smaWarn("vgId:%d, rsma post commit, regcomp for %s failed since %s", TD_VID(pVnode), dir, errbuf);
- return TSDB_CODE_FAILED;
- }
-
- if ((pDir = taosOpenDir(dir)) == NULL) {
- regfree(®ex);
- terrno = TAOS_SYSTEM_ERROR(errno);
- smaDebug("vgId:%d, rsma post commit, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr());
- return TSDB_CODE_FAILED;
- }
-
- int32_t dirLen = strlen(dir);
- char *dirEnd = POINTER_SHIFT(dir, dirLen);
- regmatch_t regMatch[2];
- while ((pDirEntry = taosReadDir(pDir)) != NULL) {
- char *entryName = taosGetDirEntryName(pDirEntry);
- if (!entryName) {
- continue;
- }
-
- code = regexec(®ex, entryName, 2, regMatch, 0);
-
- if (code == 0) {
- // match
- int64_t version = -1;
- sscanf((const char *)POINTER_SHIFT(entryName, regMatch[1].rm_so), "%" PRIi64, &version);
- if ((version < committed) && (version > -1)) {
- strncpy(dirEnd, entryName, TSDB_FILENAME_LEN - dirLen);
- if (taosRemoveFile(dir) != 0) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- smaWarn("vgId:%d, committed version:%" PRIi64 ", failed to remove %s since %s", TD_VID(pVnode), committed,
- dir, terrstr());
- } else {
- smaDebug("vgId:%d, committed version:%" PRIi64 ", success to remove %s", TD_VID(pVnode), committed, dir);
- }
- }
- } else if (code == REG_NOMATCH) {
- // not match
- smaTrace("vgId:%d, rsma post commit, not match %s", TD_VID(pVnode), entryName);
- continue;
- } else {
- // has other error
- char errbuf[128];
- regerror(code, ®ex, errbuf, sizeof(errbuf));
- smaWarn("vgId:%d, rsma post commit, regexec failed since %s", TD_VID(pVnode), errbuf);
-
- taosCloseDir(&pDir);
- regfree(®ex);
- return TSDB_CODE_FAILED;
- }
- }
-
- taosCloseDir(&pDir);
- regfree(®ex);
-
- return TSDB_CODE_SUCCESS;
-}
-
// SQTaskFile ======================================================
-// int32_t tCmprQTaskFile(void const *lhs, void const *rhs) {
-// int64_t *lCommitted = *(int64_t *)lhs;
-// SQTaskFile *rQTaskF = (SQTaskFile *)rhs;
-
-// if (lCommitted < rQTaskF->commitID) {
-// return -1;
-// } else if (lCommitted > rQTaskF->commitID) {
-// return 1;
-// }
-// return 0;
-// }
-
-#if 0
/**
* @brief At most time, there is only one qtaskinfo file committed latest in aTaskFile. Sometimes, there would be
* multiple qtaskinfo files supporting snapshot replication.
*
* @param pSma
- * @param pRSmaStat
+ * @param pStat
* @return int32_t
*/
-static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
- SVnode *pVnode = pSma->pVnode;
- int64_t committed = pRSmaStat->commitAppliedVer;
- SArray *aTaskFile = pRSmaStat->aTaskFile;
+static int32_t tdUpdateQTaskInfoFiles(SSma *pSma, SRSmaStat *pStat) {
+ SVnode *pVnode = pSma->pVnode;
+ SRSmaFS *pFS = RSMA_FS(pStat);
+ int64_t committed = pStat->commitAppliedVer;
+ int64_t fsMaxVer = -1;
+ char qTaskInfoFullName[TSDB_FILENAME_LEN];
+
+ taosWLockLatch(RSMA_FS_LOCK(pStat));
+
+ for (int32_t i = 0; i < taosArrayGetSize(pFS->aQTaskInf);) {
+ SQTaskFile *pTaskF = taosArrayGet(pFS->aQTaskInf, i);
+ int32_t oldVal = atomic_fetch_sub_32(&pTaskF->nRef, 1);
+ if ((oldVal <= 1) && (pTaskF->version < committed)) {
+ tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->version, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
+ if (taosRemoveFile(qTaskInfoFullName) < 0) {
+ smaWarn("vgId:%d, cleanup qinf, committed %" PRIi64 ", failed to remove %s since %s", TD_VID(pVnode), committed,
+ qTaskInfoFullName, tstrerror(TAOS_SYSTEM_ERROR(errno)));
+ } else {
+ smaDebug("vgId:%d, cleanup qinf, committed %" PRIi64 ", success to remove %s", TD_VID(pVnode), committed,
+ qTaskInfoFullName);
+ }
+ taosArrayRemove(pFS->aQTaskInf, i);
+ continue;
+ }
+ ++i;
+ }
- void *qTaskFile = taosArraySearch(aTaskFile, committed, tCmprQTaskFile, TD_LE);
-
+ if (taosArrayGetSize(pFS->aQTaskInf) > 0) {
+ fsMaxVer = ((SQTaskFile *)taosArrayGetLast(pFS->aQTaskInf))->version;
+ }
+
+ if (fsMaxVer < committed) {
+ tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), committed, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
+ if (taosCheckExistFile(qTaskInfoFullName)) {
+ SQTaskFile qFile = {.nRef = 1, .padding = 0, .version = committed, .size = 0};
+ if (taosArrayPush(pFS->aQTaskInf, &qFile) < 0) {
+ taosWUnLockLatch(RSMA_FS_LOCK(pStat));
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ }
+ } else {
+ smaDebug("vgId:%d, update qinf, no need as committed %" PRIi64 " not larger than fsMaxVer %" PRIi64, TD_VID(pVnode),
+ committed, fsMaxVer);
+ }
+ taosWUnLockLatch(RSMA_FS_LOCK(pStat));
return TSDB_CODE_SUCCESS;
}
-#endif
/**
* @brief post-commit for rollup sma
@@ -290,8 +243,7 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
- // cleanup outdated qtaskinfo files
- tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
+ tdUpdateQTaskInfoFiles(pSma, pRSmaStat);
return TSDB_CODE_SUCCESS;
}
@@ -312,15 +264,22 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
+ int32_t nLoops = 0;
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
- atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
+ while (atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 1) != 0) {
+ ++nLoops;
+ if (nLoops > 1000) {
+ sched_yield();
+ nLoops = 0;
+ }
+ }
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
ASSERT(pRSmaStat->commitAppliedVer > 0);
// step 2: wait for all triggered fetch tasks to finish
- int32_t nLoops = 0;
+ nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma));
@@ -344,7 +303,8 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
return TSDB_CODE_FAILED;
}
- smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId());
+ smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma),
+ (void *)taosGetSelfPthreadId());
nLoops = 0;
while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
++nLoops;
@@ -357,12 +317,12 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) {
return TSDB_CODE_FAILED;
}
- smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
+ smaInfo("vgId:%d, rsma commit, operator state committed, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
-#if 0 // consuming task of qTaskInfo clone
+#if 0 // consuming task of qTaskInfo clone
// step 4: swap queue/qall and iQueue/iQall
// lock
- // taosWLockLatch(SMA_ENV_LOCK(pEnv));
+ taosWLockLatch(SMA_ENV_LOCK(pEnv));
ASSERT(RSMA_INFO_HASH(pRSmaStat));
@@ -378,7 +338,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
// unlock
- // taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+ taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
#endif
return TSDB_CODE_SUCCESS;
@@ -420,33 +380,29 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
}
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
- SArray *rsmaDeleted = NULL;
// step 1: merge qTaskInfo and iQTaskInfo
// lock
- // taosWLockLatch(SMA_ENV_LOCK(pEnv));
-
- void *pIter = NULL;
- while ((pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter))) {
- tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
- SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
- if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
- int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
- if (refVal == 0) {
- if (!rsmaDeleted) {
- if ((rsmaDeleted = taosArrayInit(1, sizeof(tb_uid_t)))) {
- taosArrayPush(rsmaDeleted, pSuid);
- }
+ if (1 == atomic_val_compare_exchange_8(&pRSmaStat->delFlag, 1, 0)) {
+ taosWLockLatch(SMA_ENV_LOCK(pEnv));
+
+ void *pIter = NULL;
+ while ((pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter))) {
+ tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
+ SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
+ if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
+ int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
+ if (refVal == 0) {
+ taosHashRemove(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(*pSuid));
+ } else {
+ smaDebug(
+ "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
+ "table:%" PRIi64,
+ SMA_VID(pSma), refVal, *pSuid);
}
- } else {
- smaDebug(
- "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
- "table:%" PRIi64,
- SMA_VID(pSma), refVal, *pSuid);
- }
- continue;
- }
+ continue;
+ }
#if 0
if (pRSmaInfo->taskInfo[0]) {
if (pRSmaInfo->iTaskInfo[0]) {
@@ -461,27 +417,13 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), *pSuid);
#endif
- }
-
- for (int32_t i = 0; i < taosArrayGetSize(rsmaDeleted); ++i) {
- tb_uid_t *pSuid = taosArrayGet(rsmaDeleted, i);
- void *pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t));
- if ((pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
- tdFreeRSmaInfo(pSma, pRSmaInfo, true);
- smaDebug(
- "vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for "
- "table:%" PRIi64,
- SMA_VID(pSma), *pSuid);
}
- taosHashRemove(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t));
- }
- taosArrayDestroy(rsmaDeleted);
- // unlock
- // taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+ // unlock
+ taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+ }
- // step 2: cleanup outdated qtaskinfo files
- tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
+ tdUpdateQTaskInfoFiles(pSma, pRSmaStat);
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c
index e3b83f9955faf7a8000d18974cb6ec3639948c47..b870ea1b6223ce0d2fd1cf527cd5b0c20c27a5b7 100644
--- a/source/dnode/vnode/src/sma/smaEnv.c
+++ b/source/dnode/vnode/src/sma/smaEnv.c
@@ -23,11 +23,15 @@ extern SSmaMgmt smaMgmt;
// declaration of static functions
-static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma);
-static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path);
-static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv);
-static void *tdFreeTSmaStat(STSmaStat *pStat);
-static void tdDestroyRSmaStat(void *pRSmaStat);
+static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv);
+static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv);
+static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma);
+static int32_t tdRsmaStartExecutor(const SSma *pSma);
+static int32_t tdRsmaStopExecutor(const SSma *pSma);
+static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
+static void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
+static void *tdFreeTSmaStat(STSmaStat *pStat);
+static void tdDestroyRSmaStat(void *pRSmaStat);
/**
* @brief rsma init
@@ -57,12 +61,23 @@ int32_t smaInit() {
return TSDB_CODE_FAILED;
}
+ int32_t type = (8 == POINTER_BYTES) ? TSDB_DATA_TYPE_UBIGINT : TSDB_DATA_TYPE_UINT;
+ smaMgmt.refHash = taosHashInit(64, taosGetDefaultHashFunction(type), true, HASH_ENTRY_LOCK);
+ if (!smaMgmt.refHash) {
+ taosCloseRef(smaMgmt.rsetId);
+ atomic_store_8(&smaMgmt.inited, 0);
+ smaError("failed to init sma tmr hanle since %s", terrstr());
+ return TSDB_CODE_FAILED;
+ }
+
// init fetch timer handle
smaMgmt.tmrHandle = taosTmrInit(10000, 100, 10000, "RSMA");
if (!smaMgmt.tmrHandle) {
taosCloseRef(smaMgmt.rsetId);
+ taosHashCleanup(smaMgmt.refHash);
+ smaMgmt.refHash = NULL;
atomic_store_8(&smaMgmt.inited, 0);
- smaError("failed to init sma tmr hanle since %s", terrstr());
+ smaError("failed to init sma tmr handle since %s", terrstr());
return TSDB_CODE_FAILED;
}
@@ -91,41 +106,50 @@ void smaCleanUp() {
if (old == 1) {
taosCloseRef(smaMgmt.rsetId);
+ taosHashCleanup(smaMgmt.refHash);
+ smaMgmt.refHash = NULL;
taosTmrCleanUp(smaMgmt.tmrHandle);
smaInfo("sma mgmt env is cleaned up, rsetId:%d, tmrHandle:%p", smaMgmt.rsetId, smaMgmt.tmrHandle);
atomic_store_8(&smaMgmt.inited, 0);
}
}
-static SSmaEnv *tdNewSmaEnv(const SSma *pSma, int8_t smaType, const char *path) {
+static int32_t tdNewSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) {
SSmaEnv *pEnv = NULL;
pEnv = (SSmaEnv *)taosMemoryCalloc(1, sizeof(SSmaEnv));
+ *ppEnv = pEnv;
if (!pEnv) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- return NULL;
+ return TSDB_CODE_FAILED;
}
SMA_ENV_TYPE(pEnv) = smaType;
taosInitRWLatch(&(pEnv->lock));
+ (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), *ppEnv)
+ : atomic_store_ptr(&SMA_RSMA_ENV(pSma), *ppEnv);
+
if (tdInitSmaStat(&SMA_ENV_STAT(pEnv), smaType, pSma) != TSDB_CODE_SUCCESS) {
tdFreeSmaEnv(pEnv);
- return NULL;
+ *ppEnv = NULL;
+ (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), NULL)
+ : atomic_store_ptr(&SMA_RSMA_ENV(pSma), NULL);
+ return TSDB_CODE_FAILED;
}
- return pEnv;
+ return TSDB_CODE_SUCCESS;
}
-static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, const char *path, SSmaEnv **pEnv) {
- if (!pEnv) {
+static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv) {
+ if (!ppEnv) {
terrno = TSDB_CODE_INVALID_PTR;
return TSDB_CODE_FAILED;
}
- if (!(*pEnv)) {
- if (!(*pEnv = tdNewSmaEnv(pSma, smaType, path))) {
+ if (!(*ppEnv)) {
+ if (tdNewSmaEnv(pSma, smaType, ppEnv) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_FAILED;
}
}
@@ -153,37 +177,19 @@ void *tdFreeSmaEnv(SSmaEnv *pSmaEnv) {
return NULL;
}
-int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat) {
- if (!pStat) return 0;
-
- int ref = T_REF_INC(pStat);
- smaDebug("vgId:%d, ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
- return 0;
-}
-
-int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
- if (!pStat) return 0;
+static void tRSmaInfoHashFreeNode(void *data) {
+ SRSmaInfo *pRSmaInfo = NULL;
+ SRSmaInfoItem *pItem = NULL;
- int ref = T_REF_DEC(pStat);
- smaDebug("vgId:%d, unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
- return 0;
-}
-
-int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
- if (!pRSmaInfo) return 0;
-
- int ref = T_REF_INC(pRSmaInfo);
- smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
- return 0;
-}
-
-int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
- if (!pRSmaInfo) return 0;
-
- int ref = T_REF_DEC(pRSmaInfo);
- smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
-
- return 0;
+ if ((pRSmaInfo = *(SRSmaInfo **)data)) {
+ if ((pItem = RSMA_INFO_ITEM((SRSmaInfo *)pRSmaInfo, 0)) && pItem->level) {
+ taosHashRemove(smaMgmt.refHash, &pItem, POINTER_BYTES);
+ }
+ if ((pItem = RSMA_INFO_ITEM((SRSmaInfo *)pRSmaInfo, 1)) && pItem->level) {
+ taosHashRemove(smaMgmt.refHash, &pItem, POINTER_BYTES);
+ }
+ tdFreeRSmaInfo(pRSmaInfo->pSma, pRSmaInfo, true);
+ }
}
static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma) {
@@ -199,7 +205,7 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
* tdInitSmaStat invoked in other multithread environment later.
*/
if (!(*pSmaStat)) {
- *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat));
+ *pSmaStat = (SSmaStat *)taosMemoryCalloc(1, sizeof(SSmaStat) + sizeof(TdThread) * tsNumOfVnodeRsmaThreads);
if (!(*pSmaStat)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_FAILED;
@@ -231,6 +237,16 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
if (!RSMA_INFO_HASH(pRSmaStat)) {
return TSDB_CODE_FAILED;
}
+ taosHashSetFreeFp(RSMA_INFO_HASH(pRSmaStat), tRSmaInfoHashFreeNode);
+
+ if (tdRsmaStartExecutor(pSma) < 0) {
+ return TSDB_CODE_FAILED;
+ }
+
+ if (!(RSMA_FS(pRSmaStat)->aQTaskInf = taosArrayInit(1, sizeof(SQTaskFile)))) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
} else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
// TODO
} else {
@@ -265,14 +281,6 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
tsem_destroy(&(pStat->notEmpty));
// step 2: destroy the rsma info and associated fetch tasks
- if (taosHashGetSize(RSMA_INFO_HASH(pStat)) > 0) {
- void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL);
- while (infoHash) {
- SRSmaInfo *pSmaInfo = *(SRSmaInfo **)infoHash;
- tdFreeRSmaInfo(pSma, pSmaInfo, true);
- infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash);
- }
- }
taosHashCleanup(RSMA_INFO_HASH(pStat));
// step 3: wait for all triggered fetch tasks to finish
@@ -291,12 +299,18 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
}
}
- // step 5: free pStat
+ // step 4:
+ tdRsmaStopExecutor(pSma);
+
+ // step 5:
+ tdRSmaFSClose(RSMA_FS(pStat));
+
+ // step 6: free pStat
taosMemoryFreeClear(pStat);
}
}
-void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) {
+static void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) {
tdDestroySmaState(pSmaStat, smaType);
if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
taosMemoryFreeClear(pSmaStat);
@@ -313,7 +327,7 @@ void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) {
* @return int32_t
*/
-int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
+static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
if (pSmaStat) {
if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
tdDestroyTSmaStat(SMA_STAT_TSMA(pSmaStat));
@@ -321,7 +335,7 @@ int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
SRSmaStat *pRSmaStat = &pSmaStat->rsmaStat;
int32_t vid = SMA_VID(pRSmaStat->pSma);
int64_t refId = RSMA_REF_ID(pRSmaStat);
- if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) {
+ if (taosRemoveRef(smaMgmt.rsetId, refId) < 0) {
smaError("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " failed since %s", vid, refId,
smaMgmt.rsetId, terrstr());
} else {
@@ -381,17 +395,72 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) {
pEnv = (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_load_ptr(&SMA_TSMA_ENV(pSma))
: atomic_load_ptr(&SMA_RSMA_ENV(pSma));
if (!pEnv) {
- char rname[TSDB_FILENAME_LEN] = {0};
-
- if (tdInitSmaEnv(pSma, smaType, rname, &pEnv) < 0) {
+ if (tdInitSmaEnv(pSma, smaType, &pEnv) < 0) {
tdUnLockSma(pSma);
return TSDB_CODE_FAILED;
}
-
- (smaType == TSDB_SMA_TYPE_TIME_RANGE) ? atomic_store_ptr(&SMA_TSMA_ENV(pSma), pEnv)
- : atomic_store_ptr(&SMA_RSMA_ENV(pSma), pEnv);
}
tdUnLockSma(pSma);
return TSDB_CODE_SUCCESS;
};
+
+void *tdRSmaExecutorFunc(void *param) {
+ setThreadName("vnode-rsma");
+
+ tdRSmaProcessExecImpl((SSma *)param, RSMA_EXEC_OVERFLOW);
+ return NULL;
+}
+
+static int32_t tdRsmaStartExecutor(const SSma *pSma) {
+ TdThreadAttr thAttr = {0};
+ taosThreadAttrInit(&thAttr);
+ taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
+
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ SSmaStat *pStat = SMA_ENV_STAT(pEnv);
+ TdThread *pthread = (TdThread *)&pStat->data;
+
+ for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) {
+ if (taosThreadCreate(&pthread[i], &thAttr, tdRSmaExecutorFunc, (void *)pSma) != 0) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ smaError("vgId:%d, failed to create pthread for rsma since %s", SMA_VID(pSma), terrstr());
+ return -1;
+ }
+ smaDebug("vgId:%d, success to create pthread for rsma", SMA_VID(pSma));
+ }
+
+ taosThreadAttrDestroy(&thAttr);
+ return 0;
+}
+
+static int32_t tdRsmaStopExecutor(const SSma *pSma) {
+ if (pSma && VND_IS_RSMA(pSma->pVnode)) {
+ SSmaEnv *pEnv = NULL;
+ SSmaStat *pStat = NULL;
+ SRSmaStat *pRSmaStat = NULL;
+ TdThread *pthread = NULL;
+
+ if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = SMA_ENV_STAT(pEnv))) {
+ return 0;
+ }
+
+ pEnv->flag |= SMA_ENV_FLG_CLOSE;
+ pRSmaStat = (SRSmaStat *)pStat;
+ pthread = (TdThread *)&pStat->data;
+
+ for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) {
+ tsem_post(&(pRSmaStat->notEmpty));
+ }
+
+ for (int32_t i = 0; i < tsNumOfVnodeRsmaThreads; ++i) {
+ if (taosCheckPthreadValid(pthread[i])) {
+ smaDebug("vgId:%d, start to join pthread for rsma:%" PRId64, SMA_VID(pSma), pthread[i]);
+ taosThreadJoin(pthread[i], NULL);
+ }
+ }
+
+ smaInfo("vgId:%d, rsma executor stopped, number:%d", SMA_VID(pSma), tsNumOfVnodeRsmaThreads);
+ }
+ return 0;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaFS.c b/source/dnode/vnode/src/sma/smaFS.c
new file mode 100644
index 0000000000000000000000000000000000000000..8e8611f0e869d9e3d29c7456af142f05dba15b91
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaFS.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "sma.h"
+
+// =================================================================================================
+
+static int32_t tdFetchQTaskInfoFiles(SSma *pSma, int64_t version, SArray **output);
+static int32_t tdQTaskInfCmprFn1(const void *p1, const void *p2);
+static int32_t tdQTaskInfCmprFn2(const void *p1, const void *p2);
+/**
+ * @brief Open RSma FS from qTaskInfo files
+ *
+ * @param pSma
+ * @param version
+ * @return int32_t
+ */
+int32_t tdRSmaFSOpen(SSma *pSma, int64_t version) {
+ SVnode *pVnode = pSma->pVnode;
+ int64_t commitID = pVnode->state.commitID;
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ SRSmaStat *pStat = NULL;
+ SArray *output = NULL;
+
+ terrno = TSDB_CODE_SUCCESS;
+
+ if (!pEnv) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (tdFetchQTaskInfoFiles(pSma, version, &output) < 0) {
+ goto _end;
+ }
+
+ pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
+
+ for (int32_t i = 0; i < taosArrayGetSize(output); ++i) {
+ int32_t vid = 0;
+ int64_t version = -1;
+ sscanf((const char *)taosArrayGetP(output, i), "v%dqinf.v%" PRIi64, &vid, &version);
+ SQTaskFile qTaskFile = {.version = version, .nRef = 1};
+ if ((terrno = tdRSmaFSUpsertQTaskFile(RSMA_FS(pStat), &qTaskFile)) < 0) {
+ goto _end;
+ }
+ smaInfo("vgId:%d, open fs, version:%" PRIi64 ", ref:%" PRIi64, TD_VID(pVnode), qTaskFile.version, qTaskFile.nRef);
+ }
+
+_end:
+ for (int32_t i = 0; i < taosArrayGetSize(output); ++i) {
+ void *ptr = taosArrayGetP(output, i);
+ taosMemoryFreeClear(ptr);
+ }
+ taosArrayDestroy(output);
+
+ if (terrno != TSDB_CODE_SUCCESS) {
+ smaError("vgId:%d, open rsma fs failed since %s", TD_VID(pVnode), terrstr());
+ return TSDB_CODE_FAILED;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void tdRSmaFSClose(SRSmaFS *fs) { taosArrayDestroy(fs->aQTaskInf); }
+
+static int32_t tdQTaskInfCmprFn1(const void *p1, const void *p2) {
+ if (*(int64_t *)p1 < ((SQTaskFile *)p2)->version) {
+ return -1;
+ } else if (*(int64_t *)p1 > ((SQTaskFile *)p2)->version) {
+ return 1;
+ }
+ return 0;
+}
+
+int32_t tdRSmaFSRef(SSma *pSma, SRSmaStat *pStat, int64_t version) {
+ SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
+ SQTaskFile *pTaskF = NULL;
+ int32_t oldVal = 0;
+
+ taosRLockLatch(RSMA_FS_LOCK(pStat));
+ if ((pTaskF = taosArraySearch(aQTaskInf, &version, tdQTaskInfCmprFn1, TD_EQ))) {
+ oldVal = atomic_fetch_add_32(&pTaskF->nRef, 1);
+ ASSERT(oldVal > 0);
+ }
+ taosRUnLockLatch(RSMA_FS_LOCK(pStat));
+ return oldVal;
+}
+
+int64_t tdRSmaFSMaxVer(SSma *pSma, SRSmaStat *pStat) {
+ SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
+ int64_t version = -1;
+
+ taosRLockLatch(RSMA_FS_LOCK(pStat));
+ if (taosArrayGetSize(aQTaskInf) > 0) {
+ version = ((SQTaskFile *)taosArrayGetLast(aQTaskInf))->version;
+ }
+ taosRUnLockLatch(RSMA_FS_LOCK(pStat));
+ return version;
+}
+
+void tdRSmaFSUnRef(SSma *pSma, SRSmaStat *pStat, int64_t version) {
+ SVnode *pVnode = pSma->pVnode;
+ SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
+ char qTaskFullName[TSDB_FILENAME_LEN];
+ SQTaskFile *pTaskF = NULL;
+ int32_t idx = -1;
+
+ taosWLockLatch(RSMA_FS_LOCK(pStat));
+ if ((idx = taosArraySearchIdx(aQTaskInf, &version, tdQTaskInfCmprFn1, TD_EQ)) >= 0) {
+ ASSERT(idx < taosArrayGetSize(aQTaskInf));
+ pTaskF = taosArrayGet(aQTaskInf, idx);
+ if (atomic_sub_fetch_32(&pTaskF->nRef, 1) <= 0) {
+ tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->version, tfsGetPrimaryPath(pVnode->pTfs), qTaskFullName);
+ if (taosRemoveFile(qTaskFullName) < 0) {
+ smaWarn("vgId:%d, failed to remove %s since %s", TD_VID(pVnode), qTaskFullName,
+ tstrerror(TAOS_SYSTEM_ERROR(errno)));
+ } else {
+ smaDebug("vgId:%d, success to remove %s", TD_VID(pVnode), qTaskFullName);
+ }
+ taosArrayRemove(aQTaskInf, idx);
+ }
+ }
+ taosWUnLockLatch(RSMA_FS_LOCK(pStat));
+}
+
+/**
+ * @brief Fetch qtaskfiles LE than version
+ *
+ * @param pSma
+ * @param version
+ * @param output
+ * @return int32_t
+ */
+static int32_t tdFetchQTaskInfoFiles(SSma *pSma, int64_t version, SArray **output) {
+ SVnode *pVnode = pSma->pVnode;
+ TdDirPtr pDir = NULL;
+ TdDirEntryPtr pDirEntry = NULL;
+ char dir[TSDB_FILENAME_LEN];
+ const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$";
+ regex_t regex;
+ int code = 0;
+
+ tdGetVndDirName(TD_VID(pVnode), tfsGetPrimaryPath(pVnode->pTfs), VNODE_RSMA_DIR, true, dir);
+
+ if (!taosCheckExistFile(dir)) {
+ smaDebug("vgId:%d, fetch qtask files, no need as dir %s not exist", TD_VID(pVnode), dir);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ // Resource allocation and init
+ if ((code = regcomp(®ex, pattern, REG_EXTENDED)) != 0) {
+ terrno = TSDB_CODE_RSMA_REGEX_MATCH;
+ char errbuf[128];
+ regerror(code, ®ex, errbuf, sizeof(errbuf));
+ smaWarn("vgId:%d, fetch qtask files, regcomp for %s failed since %s", TD_VID(pVnode), dir, errbuf);
+ return TSDB_CODE_FAILED;
+ }
+
+ if (!(pDir = taosOpenDir(dir))) {
+ regfree(®ex);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ smaError("vgId:%d, fetch qtask files, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr());
+ return TSDB_CODE_FAILED;
+ }
+
+ int32_t dirLen = strlen(dir);
+ char *dirEnd = POINTER_SHIFT(dir, dirLen);
+ regmatch_t regMatch[2];
+ while ((pDirEntry = taosReadDir(pDir))) {
+ char *entryName = taosGetDirEntryName(pDirEntry);
+ if (!entryName) {
+ continue;
+ }
+
+ code = regexec(®ex, entryName, 2, regMatch, 0);
+
+ if (code == 0) {
+ // match
+ smaInfo("vgId:%d, fetch qtask files, max ver:%" PRIi64 ", %s found", TD_VID(pVnode), version, entryName);
+
+ int64_t ver = -1;
+ sscanf((const char *)POINTER_SHIFT(entryName, regMatch[1].rm_so), "%" PRIi64, &ver);
+ if ((ver <= version) && (ver > -1)) {
+ if (!(*output)) {
+ if (!(*output = taosArrayInit(1, POINTER_BYTES))) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _end;
+ }
+ }
+ char *entryDup = strdup(entryName);
+ if (!entryDup) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _end;
+ }
+ if (!taosArrayPush(*output, &entryDup)) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _end;
+ }
+ } else {
+ }
+ } else if (code == REG_NOMATCH) {
+ // not match
+ smaTrace("vgId:%d, fetch qtask files, not match %s", TD_VID(pVnode), entryName);
+ continue;
+ } else {
+ // has other error
+ char errbuf[128];
+ regerror(code, ®ex, errbuf, sizeof(errbuf));
+ smaWarn("vgId:%d, fetch qtask files, regexec failed since %s", TD_VID(pVnode), errbuf);
+ terrno = TSDB_CODE_RSMA_REGEX_MATCH;
+ goto _end;
+ }
+ }
+_end:
+ taosCloseDir(&pDir);
+ regfree(®ex);
+ return terrno == 0 ? TSDB_CODE_SUCCESS : TSDB_CODE_FAILED;
+}
+
+static int32_t tdQTaskFileCmprFn2(const void *p1, const void *p2) {
+ if (((SQTaskFile *)p1)->version < ((SQTaskFile *)p2)->version) {
+ return -1;
+ } else if (((SQTaskFile *)p1)->version > ((SQTaskFile *)p2)->version) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int32_t tdRSmaFSUpsertQTaskFile(SRSmaFS *pFS, SQTaskFile *qTaskFile) {
+ int32_t code = 0;
+ int32_t idx = taosArraySearchIdx(pFS->aQTaskInf, qTaskFile, tdQTaskFileCmprFn2, TD_GE);
+
+ if (idx < 0) {
+ idx = taosArrayGetSize(pFS->aQTaskInf);
+ } else {
+ SQTaskFile *pTaskF = (SQTaskFile *)taosArrayGet(pFS->aQTaskInf, idx);
+ int32_t c = tdQTaskFileCmprFn2(pTaskF, qTaskFile);
+ if (c == 0) {
+ pTaskF->nRef = qTaskFile->nRef;
+ pTaskF->version = qTaskFile->version;
+ pTaskF->size = qTaskFile->size;
+ goto _exit;
+ }
+ }
+
+ if (taosArrayInsert(pFS->aQTaskInf, idx, qTaskFile) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+_exit:
+ return code;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c
index e2710b26e3d3672aac1b8053b019aa0addd37920..d9ffda279f16501ac8f39fde7fe14728640db676 100644
--- a/source/dnode/vnode/src/sma/smaOpen.c
+++ b/source/dnode/vnode/src/sma/smaOpen.c
@@ -16,17 +16,17 @@
#include "sma.h"
#include "tsdb.h"
-static int32_t smaEvalDays(SRetention *r, int8_t precision);
-static int32_t smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
+static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration);
+static int32_t smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
static int32_t rsmaRestore(SSma *pSma);
-#define SMA_SET_KEEP_CFG(l) \
+#define SMA_SET_KEEP_CFG(v, l) \
do { \
SRetention *r = &pCfg->retentions[l]; \
pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \
pKeepCfg->keep0 = pKeepCfg->keep2; \
pKeepCfg->keep1 = pKeepCfg->keep2; \
- pKeepCfg->days = smaEvalDays(r, pCfg->precision); \
+ pKeepCfg->days = smaEvalDays(v, pCfg->retentions, l, pCfg->precision, pCfg->days); \
} while (0)
#define SMA_OPEN_RSMA_IMPL(v, l) \
@@ -38,51 +38,78 @@ static int32_t rsmaRestore(SSma *pSma);
} \
break; \
} \
- smaSetKeepCfg(&keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
+ smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg) < 0) { \
goto _err; \
} \
} while (0)
-#define RETENTION_DAYS_SPLIT_RATIO 10
-#define RETENTION_DAYS_SPLIT_MIN 1
-#define RETENTION_DAYS_SPLIT_MAX 30
+/**
+ * @brief Evaluate days(duration) for rsma level 1/2/3.
+ * 1) level 1: duration from "create database"
+ * 2) level 2/3: duration * (freq/freqL1)
+ * @param pVnode
+ * @param r
+ * @param level
+ * @param precision
+ * @param duration
+ * @return int32_t
+ */
+static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration) {
+ int32_t freqDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->freq, precision, TIME_UNIT_MINUTE);
+ int32_t keepDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->keep, precision, TIME_UNIT_MINUTE);
+ int32_t days = duration; // min
-static int32_t smaEvalDays(SRetention *r, int8_t precision) {
- int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY);
- int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY);
+ if (days < freqDuration) {
+ days = freqDuration;
+ }
- int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO;
- if (days <= RETENTION_DAYS_SPLIT_MIN) {
- days = RETENTION_DAYS_SPLIT_MIN;
- if (days < freqDays) {
- days = freqDays + 1;
- }
- } else {
- if (days > RETENTION_DAYS_SPLIT_MAX) {
- days = RETENTION_DAYS_SPLIT_MAX;
- }
- if (days < freqDays) {
- days = freqDays + 1;
- }
+ if (days > keepDuration) {
+ days = keepDuration;
+ }
+
+ if (level == TSDB_RETENTION_L0) {
+ goto end;
+ }
+
+ ASSERT(level >= TSDB_RETENTION_L1 && level <= TSDB_RETENTION_L2);
+
+ freqDuration = convertTimeFromPrecisionToUnit((r + level)->freq, precision, TIME_UNIT_MINUTE);
+ keepDuration = convertTimeFromPrecisionToUnit((r + level)->keep, precision, TIME_UNIT_MINUTE);
+
+ int32_t nFreqTimes = (r + level)->freq / (r + TSDB_RETENTION_L0)->freq;
+ days *= (nFreqTimes > 1 ? nFreqTimes : 1);
+
+ if (days > keepDuration) {
+ days = keepDuration;
}
- return days * 1440;
+
+ if (days > TSDB_MAX_DURATION_PER_FILE) {
+ days = TSDB_MAX_DURATION_PER_FILE;
+ }
+
+ if (days < freqDuration) {
+ days = freqDuration;
+ }
+end:
+ smaInfo("vgId:%d, evaluated duration for level %" PRIi8 " is %d, raw val:%d", TD_VID(pVnode), level + 1, days, duration);
+ return days;
}
-int smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
+int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
pKeepCfg->precision = pCfg->precision;
switch (type) {
case TSDB_TYPE_TSMA:
ASSERT(0);
break;
case TSDB_TYPE_RSMA_L0:
- SMA_SET_KEEP_CFG(0);
+ SMA_SET_KEEP_CFG(pVnode, 0);
break;
case TSDB_TYPE_RSMA_L1:
- SMA_SET_KEEP_CFG(1);
+ SMA_SET_KEEP_CFG(pVnode, 1);
break;
case TSDB_TYPE_RSMA_L2:
- SMA_SET_KEEP_CFG(2);
+ SMA_SET_KEEP_CFG(pVnode, 2);
break;
default:
ASSERT(0);
@@ -123,7 +150,7 @@ int32_t smaOpen(SVnode *pVnode) {
}
// restore the rsma
- if (tdRsmaRestore(pSma, RSMA_RESTORE_REBOOT, pVnode->state.committed) < 0) {
+ if (tdRSmaRestore(pSma, RSMA_RESTORE_REBOOT, pVnode->state.committed) < 0) {
goto _err;
}
}
@@ -146,30 +173,16 @@ int32_t smaClose(SSma *pSma) {
return 0;
}
-int32_t smaPreClose(SSma *pSma) {
- if (pSma && VND_IS_RSMA(pSma->pVnode)) {
- SSmaEnv *pEnv = NULL;
- SRSmaStat *pStat = NULL;
- if (!(pEnv = SMA_RSMA_ENV(pSma)) || !(pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv))) {
- return 0;
- }
- for (int32_t i = 0; i < RSMA_EXECUTOR_MAX; ++i) {
- tsem_post(&(pStat->notEmpty));
- }
- }
- return 0;
-}
-
/**
* @brief rsma env restore
- *
- * @param pSma
- * @param type
- * @param committedVer
- * @return int32_t
+ *
+ * @param pSma
+ * @param type
+ * @param committedVer
+ * @return int32_t
*/
-int32_t tdRsmaRestore(SSma *pSma, int8_t type, int64_t committedVer) {
+int32_t tdRSmaRestore(SSma *pSma, int8_t type, int64_t committedVer) {
ASSERT(VND_IS_RSMA(pSma->pVnode));
- return tdProcessRSmaRestoreImpl(pSma, type, committedVer);
+ return tdRSmaProcessRestoreImpl(pSma, type, committedVer);
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 448b8ab50862cea44390b7b0c8cbc4d27d96c20c..ec8fcb29328ae7beb698be287c77b1ed86dbd7b8 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -19,22 +19,22 @@
#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid
#define RSMA_QTASKEXEC_SMOOTH_SIZE (100) // cnt
#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt
-#define RSMA_FETCH_DELAY_MAX (900000) // ms
-#define RSMA_FETCH_ACTIVE_MAX (1800) // ms
+#define RSMA_FETCH_DELAY_MAX (120000) // ms
+#define RSMA_FETCH_ACTIVE_MAX (1000) // ms
+#define RSMA_FETCH_INTERVAL (5000) // ms
SSmaMgmt smaMgmt = {
.inited = 0,
.rsetId = -1,
};
-#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver"
-#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
+#define TD_QTASKINFO_FNAME_PREFIX "qinf.v"
+
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
-typedef struct SRSmaExecQItem SRSmaExecQItem;
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
-static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
+static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids, bool isAdd);
static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo,
int8_t idx);
static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int32_t inputType, SRSmaInfo *pInfo,
@@ -42,10 +42,12 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSiz
static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid);
static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
static void tdFreeRSmaSubmitItems(SArray *pItems);
-static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr);
+static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo);
static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
int64_t suid);
static void tdRSmaFetchTrigger(void *param, void *tmrId);
+static int32_t tdRSmaInfoClone(SSma *pSma, SRSmaInfo *pInfo);
+static void tdRSmaQTaskInfoFree(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile);
static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish);
static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter);
@@ -82,11 +84,6 @@ struct SRSmaQTaskInfoIter {
int32_t nBufPos;
};
-struct SRSmaExecQItem {
- void *pRSmaInfo;
- void *qall;
-};
-
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
@@ -101,7 +98,7 @@ static FORCE_INLINE int32_t tdRSmaQTaskInfoContLen(int32_t lenWithHead) {
static FORCE_INLINE void tdRSmaQTaskInfoIterDestroy(SRSmaQTaskInfoIter *pIter) { taosMemoryFreeClear(pIter->pBuf); }
-void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
+static void tdRSmaQTaskInfoFree(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
// Note: free/kill may in RC
if (!taskHandle || !(*taskHandle)) return;
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
@@ -128,20 +125,20 @@ void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) {
SRSmaInfoItem *pItem = &pInfo->items[i];
if (isDeepFree && pItem->tmrId) {
- smaDebug("vgId:%d, stop fetch timer %p for table %" PRIi64 " level %d", SMA_VID(pSma), pInfo->suid,
- pItem->tmrId, i + 1);
+ smaDebug("vgId:%d, stop fetch timer %p for table %" PRIi64 " level %d", SMA_VID(pSma), pItem->tmrId,
+ pInfo->suid, i + 1);
taosTmrStopA(&pItem->tmrId);
}
if (isDeepFree && pInfo->taskInfo[i]) {
- tdFreeQTaskInfo(&pInfo->taskInfo[i], SMA_VID(pSma), i + 1);
+ tdRSmaQTaskInfoFree(&pInfo->taskInfo[i], SMA_VID(pSma), i + 1);
} else {
smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma),
pInfo->suid, i + 1);
}
if (pInfo->iTaskInfo[i]) {
- tdFreeQTaskInfo(&pInfo->iTaskInfo[i], SMA_VID(pSma), i + 1);
+ tdRSmaQTaskInfoFree(&pInfo->iTaskInfo[i], SMA_VID(pSma), i + 1);
} else {
smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty iTaskInfo",
SMA_VID(pSma), pInfo->suid, i + 1);
@@ -178,7 +175,7 @@ static FORCE_INLINE int32_t tdUidStoreInit(STbUidStore **pStore) {
return TSDB_CODE_SUCCESS;
}
-static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids) {
+static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids, bool isAdd) {
SRSmaInfo *pRSmaInfo = NULL;
if (!suid || !tbUids) {
@@ -202,7 +199,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pRSmaInfo->taskInfo[i]) {
- if (((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true)) < 0)) {
+ if (((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, isAdd)) < 0)) {
tdReleaseRSmaInfo(pSma, pRSmaInfo);
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " level %d since %s", SMA_VID(pSma), *suid, i,
terrstr());
@@ -218,12 +215,12 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
return TSDB_CODE_SUCCESS;
}
-int32_t tdUpdateTbUidList(SSma *pSma, STbUidStore *pStore) {
+int32_t tdUpdateTbUidList(SSma *pSma, STbUidStore *pStore, bool isAdd) {
if (!pStore || (taosArrayGetSize(pStore->tbUids) == 0)) {
return TSDB_CODE_SUCCESS;
}
- if (tdUpdateTbUidListImpl(pSma, &pStore->suid, pStore->tbUids) != TSDB_CODE_SUCCESS) {
+ if (tdUpdateTbUidListImpl(pSma, &pStore->suid, pStore->tbUids, isAdd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_FAILED;
}
@@ -232,7 +229,7 @@ int32_t tdUpdateTbUidList(SSma *pSma, STbUidStore *pStore) {
tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
SArray *pTbUids = *(SArray **)pIter;
- if (tdUpdateTbUidListImpl(pSma, pTbSuid, pTbUids) != TSDB_CODE_SUCCESS) {
+ if (tdUpdateTbUidListImpl(pSma, pTbSuid, pTbUids, isAdd) != TSDB_CODE_SUCCESS) {
taosHashCancelIterate(pStore->uidHash, pIter);
return TSDB_CODE_FAILED;
}
@@ -305,7 +302,7 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
return TSDB_CODE_FAILED;
}
SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]);
- pItem->triggerStat = TASK_TRIGGER_STAT_INACTIVE;
+ pItem->triggerStat = TASK_TRIGGER_STAT_ACTIVE; // fetch the data when reboot
if (param->maxdelay[idx] < TSDB_MIN_ROLLUP_MAX_DELAY) {
int64_t msInterval =
convertTimeFromPrecisionToUnit(pRetention[idx + 1].freq, pTsdbCfg->precision, TIME_UNIT_MILLISECOND);
@@ -318,10 +315,19 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
}
pItem->level = idx == 0 ? TSDB_RETENTION_L1 : TSDB_RETENTION_L2;
- taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
- smaInfo("vgId:%d, table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
+ ASSERT(pItem->level > 0);
+
+ SRSmaRef rsmaRef = {.refId = pStat->refId, .suid = pRSmaInfo->suid};
+ taosHashPut(smaMgmt.refHash, &pItem, POINTER_BYTES, &rsmaRef, sizeof(rsmaRef));
+
+ pItem->fetchLevel = pItem->level;
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+
+
+ smaInfo("vgId:%d, item:%p table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
", finally maxdelay:%" PRIi32,
- TD_VID(pVnode), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
+ TD_VID(pVnode), pItem, pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx],
+ pItem->maxDelay);
}
return TSDB_CODE_SUCCESS;
}
@@ -335,7 +341,7 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
* @param tbName
* @return int32_t
*/
-int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName) {
+int32_t tdRSmaProcessCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName) {
if ((param->qmsgLen[0] == 0) && (param->qmsgLen[1] == 0)) {
smaDebug("vgId:%d, no qmsg1/qmsg2 for rollup table %s %" PRIi64, SMA_VID(pSma), tbName, suid);
return TSDB_CODE_SUCCESS;
@@ -373,7 +379,10 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _err;
}
+ pRSmaInfo->pSma = pSma;
pRSmaInfo->pTSchema = pTSchema;
+ pRSmaInfo->suid = suid;
+ T_REF_INIT_VAL(pRSmaInfo, 1);
if (!(pRSmaInfo->queue = taosOpenQueue())) {
goto _err;
}
@@ -387,9 +396,6 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
if (!(pRSmaInfo->iQall = taosAllocateQall())) {
goto _err;
}
- pRSmaInfo->suid = suid;
- pRSmaInfo->refId = RSMA_REF_ID(pStat);
- T_REF_INIT_VAL(pRSmaInfo, 1);
if (tdSetRSmaInfoItemParams(pSma, param, pStat, pRSmaInfo, 0) < 0) {
goto _err;
@@ -432,7 +438,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SVCreateStbReq *pReq) {
return TSDB_CODE_SUCCESS;
}
- return tdProcessRSmaCreateImpl(pSma, &pReq->rsmaParam, pReq->suid, pReq->name);
+ return tdRSmaProcessCreateImpl(pSma, &pReq->rsmaParam, pReq->suid, pReq->name);
}
/**
@@ -466,6 +472,7 @@ int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) {
}
// set del flag for data in mem
+ atomic_store_8(&pRSmaStat->delFlag, 1);
RSMA_INFO_SET_DEL(pRSmaInfo);
tdUnRefRSmaInfo(pSma, pRSmaInfo);
@@ -621,7 +628,7 @@ static int32_t tdFetchSubmitReqSuids(SSubmitReq *pMsg, STbUidStore *pStore) {
*/
int32_t smaDoRetention(SSma *pSma, int64_t now) {
int32_t code = TSDB_CODE_SUCCESS;
- if (VND_IS_RSMA(pSma->pVnode)) {
+ if (!VND_IS_RSMA(pSma->pVnode)) {
return code;
}
@@ -659,15 +666,15 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
if (taosArrayGetSize(pResList) == 0) {
if (terrno == 0) {
- // smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
+ // smaDebug("vgId:%d, no rsma level %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
} else {
- smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
+ smaDebug("vgId:%d, no rsma level %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
goto _err;
}
break;
} else {
- smaDebug("vgId:%d, rsma %" PRIi8 " data fetched", SMA_VID(pSma), pItem->level);
+ smaDebug("vgId:%d, rsma level %" PRIi8 " data fetched", SMA_VID(pSma), pItem->level);
}
#if 0
char flag[10] = {0};
@@ -681,21 +688,22 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
// TODO: the schema update should be handled later(TD-17965)
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) {
- smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
- SMA_VID(pSma), suid, pItem->level, terrstr());
+ smaError("vgId:%d, build submit req for rsma table %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma),
+ suid, pItem->level, terrstr());
goto _err;
}
if (pReq && tdProcessSubmitReq(sinkTsdb, output->info.version, pReq) < 0) {
taosMemoryFreeClear(pReq);
- smaError("vgId:%d, process submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
+ smaError("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " failed since %s",
SMA_VID(pSma), suid, pItem->level, terrstr());
goto _err;
}
- taosMemoryFreeClear(pReq);
- smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%" PRIi64,
- SMA_VID(pSma), suid, pItem->level, output->info.version);
+ smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " ver %" PRIi64 " len %" PRIu32,
+ SMA_VID(pSma), suid, pItem->level, output->info.version, htonl(pReq->header.contLen));
+
+ taosMemoryFreeClear(pReq);
}
}
@@ -734,10 +742,12 @@ static int32_t tdExecuteRSmaImplAsync(SSma *pSma, const void *pMsg, int32_t inpu
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
- tsem_post(&(pRSmaStat->notEmpty));
-
int64_t nItems = atomic_fetch_add_64(&pRSmaStat->nBufItems, 1);
+ if (atomic_load_8(&pInfo->assigned) == 0) {
+ tsem_post(&(pRSmaStat->notEmpty));
+ }
+
// smoothing consume
int32_t n = nItems / RSMA_QTASKEXEC_SMOOTH_SIZE;
if (n > 1) {
@@ -820,6 +830,95 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize,
return TSDB_CODE_SUCCESS;
}
+static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t srcTaskInfo, SRSmaParam *param,
+ tb_uid_t suid, int8_t idx) {
+ SVnode *pVnode = pSma->pVnode;
+ char *pOutput = NULL;
+ int32_t len = 0;
+
+ if ((terrno = qSerializeTaskStatus(srcTaskInfo, &pOutput, &len)) < 0) {
+ smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid,
+ terrstr());
+ goto _err;
+ }
+
+ SReadHandle handle = {
+ .meta = pVnode->pMeta,
+ .vnode = pVnode,
+ .initTqReader = 1,
+ };
+ ASSERT(!dstTaskInfo);
+ dstTaskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
+ if (!dstTaskInfo) {
+ terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
+ goto _err;
+ }
+
+ if (qDeserializeTaskStatus(dstTaskInfo, pOutput, len) < 0) {
+ smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
+ terrstr());
+ goto _err;
+ }
+
+ smaDebug("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid);
+
+ taosMemoryFreeClear(pOutput);
+ return TSDB_CODE_SUCCESS;
+_err:
+ taosMemoryFreeClear(pOutput);
+ tdRSmaQTaskInfoFree(dstTaskInfo, TD_VID(pVnode), idx + 1);
+ smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
+ terrstr());
+ return TSDB_CODE_FAILED;
+}
+
+/**
+ * @brief Clone qTaskInfo of SRSmaInfo
+ *
+ * @param pSma
+ * @param pInfo
+ * @return int32_t
+ */
+static int32_t tdRSmaInfoClone(SSma *pSma, SRSmaInfo *pInfo) {
+ SRSmaParam *param = NULL;
+ if (!pInfo) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SMetaReader mr = {0};
+ metaReaderInit(&mr, SMA_META(pSma), 0);
+ smaDebug("vgId:%d, rsma clone qTaskInfo for suid:%" PRIi64, SMA_VID(pSma), pInfo->suid);
+ if (metaGetTableEntryByUid(&mr, pInfo->suid) < 0) {
+ smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid,
+ terrstr());
+ goto _err;
+ }
+ ASSERT(mr.me.type == TSDB_SUPER_TABLE);
+ ASSERT(mr.me.uid == pInfo->suid);
+ if (TABLE_IS_ROLLUP(mr.me.flags)) {
+ param = &mr.me.stbEntry.rsmaParam;
+ for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+ if (!pInfo->iTaskInfo[i]) {
+ continue;
+ }
+ if (tdCloneQTaskInfo(pSma, pInfo->taskInfo[i], pInfo->iTaskInfo[i], param, pInfo->suid, i) < 0) {
+ goto _err;
+ }
+ }
+ smaDebug("vgId:%d, rsma clone env success for %" PRIi64, SMA_VID(pSma), pInfo->suid);
+ } else {
+ terrno = TSDB_CODE_RSMA_INVALID_SCHEMA;
+ goto _err;
+ }
+
+ metaReaderClear(&mr);
+ return TSDB_CODE_SUCCESS;
+_err:
+ metaReaderClear(&mr);
+ smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid, terrstr());
+ return TSDB_CODE_FAILED;
+}
+
/**
* @brief During async commit, the SRSmaInfo object would be COW from iRSmaInfoHash and write lock should be applied.
*
@@ -843,25 +942,25 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
return NULL;
}
- // taosRLockLatch(SMA_ENV_LOCK(pEnv));
+ taosRLockLatch(SMA_ENV_LOCK(pEnv));
pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
- // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
if (!pRSmaInfo->taskInfo[0]) {
- if (tdCloneRSmaInfo(pSma, pRSmaInfo) < 0) {
- // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ if (tdRSmaInfoClone(pSma, pRSmaInfo) < 0) {
+ taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
}
tdRefRSmaInfo(pSma, pRSmaInfo);
- // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
ASSERT(pRSmaInfo->suid == suid);
return pRSmaInfo;
}
- // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
@@ -911,39 +1010,6 @@ static int32_t tdExecuteRSmaAsync(SSma *pSma, const void *pMsg, int32_t inputTyp
return TSDB_CODE_SUCCESS;
}
-static int32_t tdRSmaExecCheck(SSma *pSma) {
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
-
- if (atomic_load_8(&pRSmaStat->nExecutor) >= TMIN(RSMA_EXECUTOR_MAX, tsNumOfVnodeQueryThreads / 2)) {
- return TSDB_CODE_SUCCESS;
- }
-
- SRSmaExecMsg fetchMsg;
- int32_t contLen = sizeof(SMsgHead);
- void *pBuf = rpcMallocCont(0 + contLen);
-
- ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma);
- ((SMsgHead *)pBuf)->contLen = sizeof(SMsgHead);
-
- SRpcMsg rpcMsg = {
- .code = 0,
- .msgType = TDMT_VND_EXEC_RSMA,
- .pCont = pBuf,
- .contLen = contLen,
- };
-
- if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, QUERY_QUEUE, &rpcMsg)) != 0) {
- smaError("vgId:%d, failed to put rsma exec msg into query-queue since %s", SMA_VID(pSma), terrstr());
- goto _err;
- }
-
- smaDebug("vgId:%d, success to put rsma fetch msg into query-queue", SMA_VID(pSma));
-
- return TSDB_CODE_SUCCESS;
-_err:
- return TSDB_CODE_FAILED;
-}
-
int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
if (!pEnv) {
@@ -974,10 +1040,6 @@ int32_t tdProcessRSmaSubmit(SSma *pSma, void *pMsg, int32_t inputType) {
goto _err;
}
}
-
- if (tdRSmaExecCheck(pSma) < 0) {
- goto _err;
- }
}
}
tdUidStoreDestory(&uidStore);
@@ -1046,7 +1108,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
" qmsgLen:%" PRIi32,
TD_VID(pVnode), suid, i, param->maxdelay[i], param->watermark[i], param->qmsgLen[i]);
}
- if (tdProcessRSmaCreateImpl(pSma, &mr.me.stbEntry.rsmaParam, suid, mr.me.name) < 0) {
+ if (tdRSmaProcessCreateImpl(pSma, &mr.me.stbEntry.rsmaParam, suid, mr.me.name) < 0) {
smaError("vgId:%d, rsma restore env failed for %" PRIi64 " since %s", TD_VID(pVnode), suid, terrstr());
goto _err;
}
@@ -1059,7 +1121,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
goto _err;
}
- if (tdUpdateTbUidList(pVnode->pSma, &uidStore) < 0) {
+ if (tdUpdateTbUidList(pVnode->pSma, &uidStore, true) < 0) {
smaError("vgId:%d, rsma restore, update tb uid list failed for %" PRIi64 " since %s", TD_VID(pVnode), suid,
terrstr());
goto _err;
@@ -1118,9 +1180,6 @@ static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTa
goto _err;
}
- SSmaEnv *pRSmaEnv = pSma->pRSmaEnv;
- SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pRSmaEnv);
-
SRSmaQTaskInfoIter fIter = {0};
if (tdRSmaQTaskInfoIterInit(&fIter, &tFile) < 0) {
tdRSmaQTaskInfoIterDestroy(&fIter);
@@ -1161,7 +1220,7 @@ static int32_t tdRSmaRestoreTSDataReload(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
-int32_t tdProcessRSmaRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer) {
+int32_t tdRSmaProcessRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer) {
// step 1: iterate all stables to restore the rsma env
int64_t nTables = 0;
if (tdRSmaRestoreQTaskInfoInit(pSma, &nTables) < 0) {
@@ -1182,6 +1241,12 @@ int32_t tdProcessRSmaRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer)
if (tdRSmaRestoreTSDataReload(pSma) < 0) {
goto _err;
}
+
+ // step 4: open SRSmaFS for qTaskFiles
+ if (tdRSmaFSOpen(pSma, qtaskFileVer) < 0) {
+ goto _err;
+ }
+
smaInfo("vgId:%d, restore rsma task %" PRIi8 " from qtaskf %" PRIi64 " succeed", SMA_VID(pSma), type, qtaskFileVer);
return TSDB_CODE_SUCCESS;
_err:
@@ -1280,29 +1345,31 @@ static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isF
return TSDB_CODE_FAILED;
}
- if (tdReadTFile(pTFile, pIter->qBuf, nBytes) != nBytes) {
+ if (tdReadTFile(pTFile, pIter->pBuf, nBytes) != nBytes) {
return TSDB_CODE_FAILED;
}
int32_t infoLen = 0;
- taosDecodeFixedI32(pIter->qBuf, &infoLen);
+ taosDecodeFixedI32(pIter->pBuf, &infoLen);
if (infoLen > nBytes) {
if (infoLen <= RSMA_QTASKINFO_BUFSIZE) {
terrno = TSDB_CODE_RSMA_FILE_CORRUPTED;
smaError("iterate rsma qtaskinfo file %s failed since %s", TD_TFILE_FULL_NAME(pIter->pTFile), terrstr());
return TSDB_CODE_FAILED;
}
- pIter->nAlloc = infoLen;
- void *pBuf = taosMemoryRealloc(pIter->pBuf, infoLen);
- if (!pBuf) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
+ if (pIter->nAlloc < infoLen) {
+ pIter->nAlloc = infoLen;
+ void *pBuf = taosMemoryRealloc(pIter->pBuf, infoLen);
+ if (!pBuf) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ pIter->pBuf = pBuf;
}
- pIter->pBuf = pBuf;
- pIter->qBuf = pIter->pBuf;
+
nBytes = infoLen;
- if (tdSeekTFile(pTFile, pIter->offset, SEEK_SET)) {
+ if (tdSeekTFile(pTFile, pIter->offset, SEEK_SET) < 0) {
return TSDB_CODE_FAILED;
}
@@ -1311,6 +1378,7 @@ static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isF
}
}
+ pIter->qBuf = pIter->pBuf;
pIter->offset += nBytes;
pIter->nBytes = nBytes;
pIter->nBufPos = 0;
@@ -1388,17 +1456,24 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
return TSDB_CODE_SUCCESS;
}
+ int64_t fsMaxVer = tdRSmaFSMaxVer(pSma, pRSmaStat);
+ if (pRSmaStat->commitAppliedVer <= fsMaxVer) {
+ smaDebug("vgId:%d, rsma persist, no need as applied %" PRIi64 " not larger than fsMaxVer %" PRIi64, vid,
+ pRSmaStat->commitAppliedVer, fsMaxVer);
+ return TSDB_CODE_SUCCESS;
+ }
+
STFile tFile = {0};
#if 0
if (pRSmaStat->commitAppliedVer > 0) {
char qTaskInfoFName[TSDB_FILENAME_LEN];
tdRSmaQTaskInfoGetFileName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
- smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
+ smaError("vgId:%d, rsma persist, init %s failed since %s", vid, qTaskInfoFName, terrstr());
goto _err;
}
if (tdCreateTFile(&tFile, true, TD_FTYPE_RSMA_QTASKINFO) < 0) {
- smaError("vgId:%d, rsma persit, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr());
+ smaError("vgId:%d, rsma persist, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr());
goto _err;
}
smaDebug("vgId:%d, rsma, serialize qTaskInfo, file %s created", vid, TD_TFILE_FULL_NAME(&tFile));
@@ -1448,11 +1523,11 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
char qTaskInfoFName[TSDB_FILENAME_LEN];
tdRSmaQTaskInfoGetFileName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
- smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
+ smaError("vgId:%d, rsma persist, init %s failed since %s", vid, qTaskInfoFName, terrstr());
goto _err;
}
if (tdCreateTFile(&tFile, true, TD_FTYPE_RSMA_QTASKINFO) < 0) {
- smaError("vgId:%d, rsma persit, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr());
+ smaError("vgId:%d, rsma persist, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr());
goto _err;
}
smaDebug("vgId:%d, rsma, table %" PRIi64 " serialize qTaskInfo, file %s created", vid, pRSmaInfo->suid,
@@ -1496,7 +1571,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
}
return TSDB_CODE_SUCCESS;
_err:
- smaError("vgId:%d, rsma persit failed since %s", vid, terrstr());
+ smaError("vgId:%d, rsma persist failed since %s", vid, terrstr());
if (isFileCreated) {
tdRemoveTFile(&tFile);
tdDestroyTFile(&tFile);
@@ -1511,38 +1586,59 @@ _err:
* @param tmrId
*/
static void tdRSmaFetchTrigger(void *param, void *tmrId) {
- SRSmaInfoItem *pItem = param;
+ SRSmaRef *pRSmaRef = NULL;
SSma *pSma = NULL;
- SRSmaInfo *pRSmaInfo = tdGetRSmaInfoByItem(pItem);
+ SRSmaStat *pStat = NULL;
+ SRSmaInfo *pRSmaInfo = NULL;
+ SRSmaInfoItem *pItem = NULL;
- if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
- smaDebug("rsma fetch task not start since rsma info already deleted, rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId,
- pRSmaInfo->refId);
+ if (!(pRSmaRef = taosHashGet(smaMgmt.refHash, ¶m, POINTER_BYTES))) {
+ smaDebug("rsma fetch task not start since rsma info item:%p not exist in refHash:%p, rsetId:%" PRIi64, param,
+ *(int64_t *)¶m, smaMgmt.refHash, smaMgmt.rsetId);
return;
}
- SRSmaStat *pStat = (SRSmaStat *)tdAcquireSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
-
- if (!pStat) {
+ if (!(pStat = (SRSmaStat *)tdAcquireSmaRef(smaMgmt.rsetId, pRSmaRef->refId))) {
smaDebug("rsma fetch task not start since rsma stat already destroyed, rsetId:%" PRIi64 " refId:%d)",
- smaMgmt.rsetId, pRSmaInfo->refId);
+ smaMgmt.rsetId, pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
+ taosHashRemove(smaMgmt.refHash, ¶m, POINTER_BYTES);
return;
}
pSma = pStat->pSma;
+ if (!(pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, pRSmaRef->suid))) {
+ smaDebug("rsma fetch task not start since rsma info not exist, rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId,
+ pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
+ taosHashRemove(smaMgmt.refHash, ¶m, POINTER_BYTES);
+ return;
+ }
+
+ if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
+ smaDebug("rsma fetch task not start since rsma info already deleted, rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId,
+ pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
+ tdReleaseRSmaInfo(pSma, pRSmaInfo);
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
+ taosHashRemove(smaMgmt.refHash, ¶m, POINTER_BYTES);
+ return;
+ }
+
+ pItem = *(SRSmaInfoItem **)¶m;
+
// if rsma trigger stat in paused, cancelled or finished, not start fetch task
int8_t rsmaTriggerStat = atomic_load_8(RSMA_TRIGGER_STAT(pStat));
switch (rsmaTriggerStat) {
case TASK_TRIGGER_STAT_PAUSED:
case TASK_TRIGGER_STAT_CANCELLED: {
- tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8
", rsetId rsetId:%" PRIi64 " refId:%d",
- SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
+ SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaRef->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
- taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
}
+ tdReleaseRSmaInfo(pSma, pRSmaInfo);
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
return;
}
default:
@@ -1553,7 +1649,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE);
switch (fetchTriggerStat) {
case TASK_TRIGGER_STAT_ACTIVE: {
- smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
+ smaDebug("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
// async process
pItem->fetchLevel = pItem->level;
@@ -1563,9 +1659,9 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
ASSERT(qItem->level == pItem->level);
ASSERT(qItem->fetchLevel == pItem->fetchLevel);
#endif
- tsem_post(&(pStat->notEmpty));
- smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level,
- pRSmaInfo->suid);
+ if (atomic_load_8(&pRSmaInfo->assigned) == 0) {
+ tsem_post(&(pStat->notEmpty));
+ }
} break;
case TASK_TRIGGER_STAT_PAUSED: {
smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused",
@@ -1587,13 +1683,15 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
_end:
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
- tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
+ tdReleaseRSmaInfo(pSma, pRSmaInfo);
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
}
static void tdFreeRSmaSubmitItems(SArray *pItems) {
for (int32_t i = 0; i < taosArrayGetSize(pItems); ++i) {
taosFreeQitem(*(void **)taosArrayGet(pItems, i));
}
+ taosArrayClear(pItems);
}
/**
@@ -1601,10 +1699,9 @@ static void tdFreeRSmaSubmitItems(SArray *pItems) {
*
* @param pSma
* @param pInfo
- * @param pSubmitArr
* @return int32_t
*/
-static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr) {
+static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo) {
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
for (int8_t i = 1; i <= TSDB_RETENTION_L2; ++i) {
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, i - 1);
@@ -1615,21 +1712,23 @@ static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmi
continue;
}
- int64_t curMs = taosGetTimestampMs();
- if ((pItem->nSkipped * pItem->maxDelay) > RSMA_FETCH_DELAY_MAX) {
- smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch executed",
- SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay);
- } else if (((curMs - pInfo->lastRecv) < RSMA_FETCH_ACTIVE_MAX)) {
- ++pItem->nSkipped;
- smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch skipped ",
- SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
- continue;
+ if ((++pItem->nScanned * pItem->maxDelay) > RSMA_FETCH_DELAY_MAX) {
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nScanned:%" PRIi8 " maxDelay:%d, fetch executed",
+ SMA_VID(pSma), pInfo->suid, i, pItem->nScanned, pItem->maxDelay);
} else {
- smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch executed ",
- SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
+ int64_t curMs = taosGetTimestampMs();
+ if ((curMs - pInfo->lastRecv) < RSMA_FETCH_ACTIVE_MAX) {
+ smaTrace("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch skipped ",
+ SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
+ atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); // restore the active stat
+ continue;
+ } else {
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch executed ",
+ SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
+ }
}
- pItem->nSkipped = 0;
+ pItem->nScanned = 0;
if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) {
goto _err;
@@ -1640,20 +1739,18 @@ static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmi
}
tdCleanupStreamInputDataBlock(taskInfo);
- smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch finished",
- SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay);
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nScanned:%" PRIi8 " maxDelay:%d, fetch finished",
+ SMA_VID(pSma), pInfo->suid, i, pItem->nScanned, pItem->maxDelay);
} else {
- smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nScanned:%" PRIi8
" maxDelay:%d, fetch not executed as fetch level is %" PRIi8,
- SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay, pItem->fetchLevel);
+ SMA_VID(pSma), pInfo->suid, i, pItem->nScanned, pItem->maxDelay, pItem->fetchLevel);
}
}
_end:
- tdReleaseRSmaInfo(pSma, pInfo);
return TSDB_CODE_SUCCESS;
_err:
- tdReleaseRSmaInfo(pSma, pInfo);
return TSDB_CODE_FAILED;
}
@@ -1703,6 +1800,7 @@ _err:
* @param type
* @return int32_t
*/
+
int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
SVnode *pVnode = pSma->pVnode;
SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
@@ -1722,41 +1820,71 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
goto _err;
}
- bool isBusy = false;
while (true) {
- isBusy = false;
// step 1: rsma exec - consume data in buffer queue for all suids
if (type == RSMA_EXEC_OVERFLOW || type == RSMA_EXEC_COMMIT) {
- void *pIter = taosHashIterate(infoHash, NULL); // infoHash has r/w lock
- while (pIter) {
+ void *pIter = NULL;
+ while ((pIter = taosHashIterate(infoHash, pIter))) {
SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
- int64_t itemSize = 0;
- if ((itemSize = taosQueueItemSize(pInfo->queue)) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel ||
- RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
- smaDebug("vgId:%d, queueItemSize is %" PRIi64 " execType:%" PRIi8, SMA_VID(pSma), itemSize, type);
- if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) {
- taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock
- int32_t qallItemSize = taosQallItemSize(pInfo->qall);
- if (qallItemSize > 0) {
- tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type);
+ if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) {
+ if ((taosQueueItemSize(pInfo->queue) > 0) || RSMA_INFO_ITEM(pInfo, 0)->fetchLevel ||
+ RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
+ int32_t batchCnt = -1;
+ int32_t batchMax = taosHashGetSize(infoHash) / tsNumOfVnodeRsmaThreads;
+ bool occupied = (batchMax <= 1);
+ if (batchMax > 1) {
+ batchMax = 100 / batchMax;
+ batchMax = TMAX(batchMax, 4);
}
-
- if (type == RSMA_EXEC_OVERFLOW) {
- tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
- }
-
- if (qallItemSize > 0) {
- // subtract the item size after the task finished, commit should wait for all items be consumed
- atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
- isBusy = true;
+ while (occupied || (++batchCnt < batchMax)) { // greedy mode
+ taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock
+ int32_t qallItemSize = taosQallItemSize(pInfo->qall);
+ if (qallItemSize > 0) {
+ tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type);
+ smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type);
+ }
+
+ if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
+ int8_t oldStat = atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2);
+ if (oldStat == 0 ||
+ ((oldStat == 2) && atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)) < TASK_TRIGGER_STAT_PAUSED)) {
+ atomic_fetch_add_32(&pRSmaStat->nFetchAll, 1);
+ tdRSmaFetchAllResult(pSma, pInfo);
+ if (0 == atomic_sub_fetch_32(&pRSmaStat->nFetchAll, 1)) {
+ atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
+ }
+ }
+ }
+
+ if (qallItemSize > 0) {
+ atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
+ continue;
+ } else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
+ if (atomic_load_8(RSMA_COMMIT_STAT(pRSmaStat)) == 0) {
+ continue;
+ }
+ for (int32_t j = 0; j < TSDB_RETENTION_L2; ++j) {
+ SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, j);
+ if (pItem->fetchLevel) {
+ pItem->fetchLevel = 0;
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ }
+ }
+ }
+
+ break;
}
- ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
}
+ atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
- pIter = taosHashIterate(infoHash, pIter);
}
if (type == RSMA_EXEC_COMMIT) {
- break;
+ if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) {
+ break;
+ } else {
+ // commit should wait for all items be consumed
+ continue;
+ }
}
}
#if 0
@@ -1776,7 +1904,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
}
// tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
- ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
+ atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
}
ASSERT(taosQueueItemSize(pInfo->iQueue) == 0);
@@ -1790,16 +1918,19 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
}
if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) {
- if (pVnode->inClose) {
+ if (pEnv->flag & SMA_ENV_FLG_CLOSE) {
break;
}
+
tsem_wait(&pRSmaStat->notEmpty);
- if (pVnode->inClose && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
- smaInfo("vgId:%d, exec task end, inClose:%d, nBufItems:%" PRIi64, SMA_VID(pSma), pVnode->inClose,
+
+ if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
+ smaDebug("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag,
atomic_load_64(&pRSmaStat->nBufItems));
break;
}
}
+
} // end of while(true)
_end:
@@ -1809,39 +1940,3 @@ _err:
taosArrayDestroy(pSubmitArr);
return TSDB_CODE_FAILED;
}
-
-/**
- * @brief exec rsma level 1data, fetch result of level 2/3 and submit
- *
- * @param pSma
- * @param pMsg
- * @return int32_t
- */
-int32_t smaProcessExec(SSma *pSma, void *pMsg) {
- SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg;
- SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
-
- if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) {
- terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP;
- goto _err;
- }
- smaDebug("vgId:%d, begin to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
-
- int8_t nOld = atomic_fetch_add_8(&pRSmaStat->nExecutor, 1);
-
- if (nOld < TMIN(RSMA_EXECUTOR_MAX, tsNumOfVnodeQueryThreads / 2)) {
- if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_OVERFLOW) < 0) {
- goto _err;
- }
- } else {
- atomic_fetch_sub_8(&pRSmaStat->nExecutor, 1);
- }
-
- smaDebug("vgId:%d, success to process rsma exec msg by TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
- return TSDB_CODE_SUCCESS;
-_err:
- atomic_fetch_sub_8(&pRSmaStat->nExecutor, 1);
- smaError("vgId:%d, failed to process rsma exec msg by TID:%p since %s", SMA_VID(pSma), (void *)taosGetSelfPthreadId(),
- terrstr());
- return TSDB_CODE_FAILED;
-}
diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c
index 335c15a539ef31d66d83377f90da225e45ffd893..5a0167a75fc799366396015f8323e770a217a1cb 100644
--- a/source/dnode/vnode/src/sma/smaSnapshot.c
+++ b/source/dnode/vnode/src/sma/smaSnapshot.c
@@ -15,11 +15,13 @@
#include "sma.h"
-static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppData);
-static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
+static int32_t rsmaSnapReadQTaskInfo(SRSmaSnapReader* pReader, uint8_t** ppData);
+static int32_t rsmaSnapWriteQTaskInfo(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
+static int32_t rsmaQTaskInfSnapReaderOpen(SRSmaSnapReader* pReader, int64_t version);
+static int32_t rsmaQTaskInfSnapReaderClose(SQTaskFReader** ppReader);
-// SRsmaSnapReader ========================================
-struct SRsmaSnapReader {
+// SRSmaSnapReader ========================================
+struct SRSmaSnapReader {
SSma* pSma;
int64_t sver;
int64_t ever;
@@ -33,13 +35,13 @@ struct SRsmaSnapReader {
SQTaskFReader* pQTaskFReader;
};
-int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader) {
+int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapReader** ppReader) {
int32_t code = 0;
SVnode* pVnode = pSma->pVnode;
- SRsmaSnapReader* pReader = NULL;
+ SRSmaSnapReader* pReader = NULL;
// alloc
- pReader = (SRsmaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
+ pReader = (SRSmaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
if (pReader == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@@ -48,7 +50,7 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapRead
pReader->sver = sver;
pReader->ever = ever;
- // rsma1/rsma2
+ // open rsma1/rsma2
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pSma->pRSmaTsdb[i]) {
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, i == 0 ? SNAP_DATA_RSMA1 : SNAP_DATA_RSMA2,
@@ -59,51 +61,112 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapRead
}
}
- // qtaskinfo
- // 1. add ref to qtaskinfo.v${ever} if exists and then start to replicate
+ // open qtaskinfo
+ if ((code = rsmaQTaskInfSnapReaderOpen(pReader, ever)) < 0) {
+ goto _err;
+ }
+
+ *ppReader = pReader;
+
+ return TSDB_CODE_SUCCESS;
+_err:
+ smaError("vgId:%d, vnode snapshot rsma reader open failed since %s", TD_VID(pVnode), tstrerror(code));
+ return TSDB_CODE_FAILED;
+}
+
+static int32_t rsmaQTaskInfSnapReaderOpen(SRSmaSnapReader* pReader, int64_t version) {
+ int32_t code = 0;
+ SSma* pSma = pReader->pSma;
+ SVnode* pVnode = pSma->pVnode;
+ SSmaEnv* pEnv = NULL;
+ SRSmaStat* pStat = NULL;
+
+ if (!(pEnv = SMA_RSMA_ENV(pSma))) {
+ smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo version %" PRIi64 " not need as env is NULL",
+ TD_VID(pVnode), version);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pStat = (SRSmaStat*)SMA_ENV_STAT(pEnv);
+
+ int32_t ref = tdRSmaFSRef(pReader->pSma, pStat, version);
+ if (ref < 1) {
+ smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo version %" PRIi64 " not need as ref is %d",
+ TD_VID(pVnode), version, ref);
+ return TSDB_CODE_SUCCESS;
+ }
+
char qTaskInfoFullName[TSDB_FILENAME_LEN];
- tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), ever, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
+ tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), version, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
if (!taosCheckExistFile(qTaskInfoFullName)) {
- smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo not need as %s not exists", TD_VID(pVnode),
- qTaskInfoFullName);
- } else {
- pReader->pQTaskFReader = taosMemoryCalloc(1, sizeof(SQTaskFReader));
- if (!pReader->pQTaskFReader) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ tdRSmaFSUnRef(pSma, pStat, version);
+ smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo version %" PRIi64 " not need as %s not exists",
+ TD_VID(pVnode), qTaskInfoFullName);
+ return TSDB_CODE_SUCCESS;
+ }
- TdFilePtr qTaskF = taosOpenFile(qTaskInfoFullName, TD_FILE_READ);
- if (!qTaskF) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- pReader->pQTaskFReader->pReadH = qTaskF;
-#if 0
- SQTaskFile* pQTaskF = &pReader->pQTaskFReader->fTask;
- pQTaskF->nRef = 1;
-#endif
+ pReader->pQTaskFReader = taosMemoryCalloc(1, sizeof(SQTaskFReader));
+ if (!pReader->pQTaskFReader) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _end;
}
- *ppReader = pReader;
- smaInfo("vgId:%d, vnode snapshot rsma reader opened %s succeed", TD_VID(pVnode), qTaskInfoFullName);
+ TdFilePtr fp = taosOpenFile(qTaskInfoFullName, TD_FILE_READ);
+ if (!fp) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ taosMemoryFreeClear(pReader->pQTaskFReader);
+ goto _end;
+ }
+
+ pReader->pQTaskFReader->pReadH = fp;
+ pReader->pQTaskFReader->pSma = pSma;
+ pReader->pQTaskFReader->version = pReader->ever;
+
+_end:
+ if (code < 0) {
+ tdRSmaFSUnRef(pSma, pStat, version);
+ smaError("vgId:%d, vnode snapshot rsma reader open %s succeed", TD_VID(pVnode), qTaskInfoFullName);
+ return TSDB_CODE_FAILED;
+ }
+
+ smaInfo("vgId:%d, vnode snapshot rsma reader open %s succeed", TD_VID(pVnode), qTaskInfoFullName);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t rsmaQTaskInfSnapReaderClose(SQTaskFReader** ppReader) {
+ if (!(*ppReader)) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SSma* pSma = (*ppReader)->pSma;
+ SRSmaStat* pStat = SMA_RSMA_STAT(pSma);
+ int64_t version = (*ppReader)->version;
+
+ taosCloseFile(&(*ppReader)->pReadH);
+ tdRSmaFSUnRef(pSma, pStat, version);
+ taosMemoryFreeClear(*ppReader);
+ smaInfo("vgId:%d, vnode snapshot rsma reader closed for qTaskInfo version %" PRIi64, SMA_VID(pSma), version);
+
return TSDB_CODE_SUCCESS;
-_err:
- smaError("vgId:%d, vnode snapshot rsma reader opened failed since %s", TD_VID(pVnode), tstrerror(code));
- return TSDB_CODE_FAILED;
}
-static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppBuf) {
+static int32_t rsmaSnapReadQTaskInfo(SRSmaSnapReader* pReader, uint8_t** ppBuf) {
int32_t code = 0;
SSma* pSma = pReader->pSma;
int64_t n = 0;
uint8_t* pBuf = NULL;
SQTaskFReader* qReader = pReader->pQTaskFReader;
+ if (!qReader) {
+ *ppBuf = NULL;
+ smaInfo("vgId:%d, vnode snapshot rsma reader qtaskinfo, qTaskReader is NULL", SMA_VID(pSma));
+ return 0;
+ }
+
if (!qReader->pReadH) {
*ppBuf = NULL;
- smaInfo("vgId:%d, vnode snapshot rsma reader qtaskinfo, readh is empty", SMA_VID(pSma));
+ smaInfo("vgId:%d, vnode snapshot rsma reader qtaskinfo, readh is NULL", SMA_VID(pSma));
return 0;
}
@@ -153,7 +216,7 @@ _err:
return code;
}
-int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData) {
+int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData) {
int32_t code = 0;
*ppData = NULL;
@@ -205,9 +268,9 @@ _err:
return code;
}
-int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
+int32_t rsmaSnapReaderClose(SRSmaSnapReader** ppReader) {
int32_t code = 0;
- SRsmaSnapReader* pReader = *ppReader;
+ SRSmaSnapReader* pReader = *ppReader;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pReader->pDataReader[i]) {
@@ -215,11 +278,7 @@ int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
}
}
- if (pReader->pQTaskFReader) {
- taosCloseFile(&pReader->pQTaskFReader->pReadH);
- taosMemoryFreeClear(pReader->pQTaskFReader);
- smaInfo("vgId:%d, vnode snapshot rsma reader closed for qTaskInfo", SMA_VID(pReader->pSma));
- }
+ rsmaQTaskInfSnapReaderClose(&pReader->pQTaskFReader);
smaInfo("vgId:%d, vnode snapshot rsma reader closed", SMA_VID(pReader->pSma));
@@ -227,8 +286,8 @@ int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
return code;
}
-// SRsmaSnapWriter ========================================
-struct SRsmaSnapWriter {
+// SRSmaSnapWriter ========================================
+struct SRSmaSnapWriter {
SSma* pSma;
int64_t sver;
int64_t ever;
@@ -244,13 +303,13 @@ struct SRsmaSnapWriter {
SQTaskFWriter* pQTaskFWriter;
};
-int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter) {
+int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWriter** ppWriter) {
int32_t code = 0;
- SRsmaSnapWriter* pWriter = NULL;
+ SRSmaSnapWriter* pWriter = NULL;
SVnode* pVnode = pSma->pVnode;
// alloc
- pWriter = (SRsmaSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter));
+ pWriter = (SRSmaSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter));
if (pWriter == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@@ -301,9 +360,9 @@ _err:
return code;
}
-int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
+int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback) {
int32_t code = 0;
- SRsmaSnapWriter* pWriter = *ppWriter;
+ SRSmaSnapWriter* pWriter = *ppWriter;
SVnode* pVnode = pWriter->pSma->pVnode;
if (rollback) {
@@ -332,7 +391,7 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
pWriter->pQTaskFWriter->fname, qTaskInfoFullName);
// rsma restore
- if ((code = tdRsmaRestore(pWriter->pSma, RSMA_RESTORE_SYNC, pWriter->ever)) < 0) {
+ if ((code = tdRSmaRestore(pWriter->pSma, RSMA_RESTORE_SYNC, pWriter->ever)) < 0) {
goto _err;
}
smaInfo("vgId:%d, vnode snapshot rsma writer restore from %s succeed", SMA_VID(pWriter->pSma), qTaskInfoFullName);
@@ -349,7 +408,7 @@ _err:
return code;
}
-int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
+int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
@@ -377,7 +436,7 @@ _err:
return code;
}
-static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
+static int32_t rsmaSnapWriteQTaskInfo(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
SQTaskFWriter* qWriter = pWriter->pQTaskFWriter;
diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c
index 1687cd46a07a7b0a70107eb825fb06b6f9314441..e2cb51f586b2ec0306d15a39ed3048a321ff1179 100644
--- a/source/dnode/vnode/src/sma/smaTimeRange.c
+++ b/source/dnode/vnode/src/sma/smaTimeRange.c
@@ -20,6 +20,10 @@
#define SMA_STORAGE_MINUTES_DAY 1440
#define SMA_STORAGE_SPLIT_FACTOR 14400 // least records in tsma file
+static int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg);
+static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg);
+static int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
+
// TODO: Who is responsible for resource allocate and release?
int32_t tdProcessTSmaInsert(SSma *pSma, int64_t indexUid, const char *msg) {
int32_t code = TSDB_CODE_SUCCESS;
@@ -59,7 +63,7 @@ int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *
* @param days unit is minute
* @return int32_t
*/
-int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days) {
+static int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days) {
SDecoder coder = {0};
tDecoderInit(&coder, pCont, contLen);
@@ -106,7 +110,7 @@ _err:
* @param pMsg
* @return int32_t
*/
-int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
+static int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
SSmaCfg *pCfg = (SSmaCfg *)pMsg;
if (TD_VID(pSma->pVnode) == pCfg->dstVgId) {
@@ -145,7 +149,7 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
* @param msg
* @return int32_t
*/
-int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
+static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
const SArray *pDataBlocks = (const SArray *)msg;
// TODO: destroy SSDataBlocks(msg)
if (!pDataBlocks) {
@@ -174,7 +178,6 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
return TSDB_CODE_FAILED;
}
- tdRefSmaStat(pSma, pStat);
pTsmaStat = SMA_STAT_TSMA(pStat);
if (!pTsmaStat->pTSma) {
@@ -226,9 +229,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
goto _err;
}
- tdUnRefSmaStat(pSma, pStat);
return TSDB_CODE_SUCCESS;
_err:
- tdUnRefSmaStat(pSma, pStat);
return TSDB_CODE_FAILED;
}
diff --git a/source/dnode/vnode/src/sma/smaUtil.c b/source/dnode/vnode/src/sma/smaUtil.c
index d771797963a5cd9d242fea1f4d65a5634f12b5e8..a4ba0a61a57b781ff062328fe18920f3ce15dacc 100644
--- a/source/dnode/vnode/src/sma/smaUtil.c
+++ b/source/dnode/vnode/src/sma/smaUtil.c
@@ -305,93 +305,4 @@ int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId) {
smaDebug("rsma release ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId);
return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t srcTaskInfo, SRSmaParam *param,
- tb_uid_t suid, int8_t idx) {
- SVnode *pVnode = pSma->pVnode;
- char *pOutput = NULL;
- int32_t len = 0;
-
- if ((terrno = qSerializeTaskStatus(srcTaskInfo, &pOutput, &len)) < 0) {
- smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid,
- terrstr());
- goto _err;
- }
-
- SReadHandle handle = {
- .meta = pVnode->pMeta,
- .vnode = pVnode,
- .initTqReader = 1,
- };
- ASSERT(!dstTaskInfo);
- dstTaskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
- if (!dstTaskInfo) {
- terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
- goto _err;
- }
-
- if (qDeserializeTaskStatus(dstTaskInfo, pOutput, len) < 0) {
- smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
- terrstr());
- goto _err;
- }
-
- smaDebug("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid);
-
- taosMemoryFreeClear(pOutput);
- return TSDB_CODE_SUCCESS;
-_err:
- taosMemoryFreeClear(pOutput);
- tdFreeQTaskInfo(dstTaskInfo, TD_VID(pVnode), idx + 1);
- smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
- terrstr());
- return TSDB_CODE_FAILED;
-}
-
-/**
- * @brief Clone qTaskInfo of SRSmaInfo
- *
- * @param pSma
- * @param pInfo
- * @return int32_t
- */
-int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
- SRSmaParam *param = NULL;
- if (!pInfo) {
- return TSDB_CODE_SUCCESS;
- }
-
- SMetaReader mr = {0};
- metaReaderInit(&mr, SMA_META(pSma), 0);
- smaDebug("vgId:%d, rsma clone qTaskInfo for suid:%" PRIi64, SMA_VID(pSma), pInfo->suid);
- if (metaGetTableEntryByUid(&mr, pInfo->suid) < 0) {
- smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid,
- terrstr());
- goto _err;
- }
- ASSERT(mr.me.type == TSDB_SUPER_TABLE);
- ASSERT(mr.me.uid == pInfo->suid);
- if (TABLE_IS_ROLLUP(mr.me.flags)) {
- param = &mr.me.stbEntry.rsmaParam;
- for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
- if (!pInfo->iTaskInfo[i]) {
- continue;
- }
- if (tdCloneQTaskInfo(pSma, pInfo->taskInfo[i], pInfo->iTaskInfo[i], param, pInfo->suid, i) < 0) {
- goto _err;
- }
- }
- smaDebug("vgId:%d, rsma clone env success for %" PRIi64, SMA_VID(pSma), pInfo->suid);
- } else {
- terrno = TSDB_CODE_RSMA_INVALID_SCHEMA;
- goto _err;
- }
-
- metaReaderClear(&mr);
- return TSDB_CODE_SUCCESS;
-_err:
- metaReaderClear(&mr);
- smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid, terrstr());
- return TSDB_CODE_FAILED;
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index c6bc8e6e598507ea820bb109e84fbb41a0ed099b..f9c2757c3778446dfc75cca2fa55556e54d28d44 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -79,6 +79,10 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
ASSERT(0);
}
+ if (streamLoadTasks(pTq->pStreamMeta) < 0) {
+ ASSERT(0);
+ }
+
return pTq;
}
@@ -96,7 +100,13 @@ void tqClose(STQ* pTq) {
}
int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp) {
- int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqMetaRsp(NULL, pRsp);
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSMqMetaRsp, pRsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+ int32_t tlen = sizeof(SMqRspHead) + len;
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
return -1;
@@ -107,7 +117,11 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq,
((SMqRspHead*)buf)->consumerId = pReq->consumerId;
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
- tEncodeSMqMetaRsp(&abuf, pRsp);
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, abuf, len);
+ tEncodeSMqMetaRsp(&encoder, pRsp);
+ tEncoderClear(&encoder);
SRpcMsg resp = {
.info = pMsg->info,
@@ -117,9 +131,8 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq,
};
tmsgSendRsp(&resp);
- tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, res msg type %d, reqOffset:%" PRId64
- ", rspOffset:%" PRId64,
- TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->reqOffset, pRsp->rspOffset);
+ tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, res msg type %d, offset type:%d",
+ TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->rspOffset.type);
return 0;
}
@@ -183,6 +196,66 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
return 0;
}
+int32_t tqSendTaosxRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const STaosxRsp* pRsp) {
+ ASSERT(taosArrayGetSize(pRsp->blockData) == pRsp->blockNum);
+ ASSERT(taosArrayGetSize(pRsp->blockDataLen) == pRsp->blockNum);
+
+ if (pRsp->withSchema) {
+ ASSERT(taosArrayGetSize(pRsp->blockSchema) == pRsp->blockNum);
+ } else {
+ ASSERT(taosArrayGetSize(pRsp->blockSchema) == 0);
+ }
+
+ if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
+ if (pRsp->blockNum > 0) {
+ ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version);
+ } else {
+ ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version);
+ }
+ }
+
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSTaosxRsp, pRsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+ int32_t tlen = sizeof(SMqRspHead) + len;
+ void* buf = rpcMallocCont(tlen);
+ if (buf == NULL) {
+ return -1;
+ }
+
+ ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__TAOSX_RSP;
+ ((SMqRspHead*)buf)->epoch = pReq->epoch;
+ ((SMqRspHead*)buf)->consumerId = pReq->consumerId;
+
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, abuf, len);
+ tEncodeSTaosxRsp(&encoder, pRsp);
+ tEncoderClear(&encoder);
+
+ SRpcMsg rsp = {
+ .info = pMsg->info,
+ .pCont = buf,
+ .contLen = tlen,
+ .code = 0,
+ };
+ tmsgSendRsp(&rsp);
+
+ char buf1[80] = {0};
+ char buf2[80] = {0};
+ tFormatOffset(buf1, 80, &pRsp->reqOffset);
+ tFormatOffset(buf2, 80, &pRsp->rspOffset);
+ tqDebug("taosx rsp, vgId:%d, from consumer:%" PRId64
+ ", (epoch %d) send rsp, block num: %d, reqOffset:%s, rspOffset:%s",
+ TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2);
+
+ return 0;
+}
+
static FORCE_INLINE bool tqOffsetLessOrEqual(const STqOffset* pLeft, const STqOffset* pRight) {
return pLeft->val.type == TMQ_OFFSET__LOG && pRight->val.type == TMQ_OFFSET__LOG &&
pLeft->val.version <= pRight->val.version;
@@ -198,7 +271,7 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t m
}
tDecoderClear(&decoder);
- if (offset.val.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (offset.val.type == TMQ_OFFSET__SNAPSHOT_DATA || offset.val.type == TMQ_OFFSET__SNAPSHOT_META) {
tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:snapshot) uid:%" PRId64 ", ts:%" PRId64,
offset.subKey, TD_VID(pTq->pVnode), offset.val.uid, offset.val.ts);
} else if (offset.val.type == TMQ_OFFSET__LOG) {
@@ -290,10 +363,25 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su
return 0;
}
+static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
+ pRsp->reqOffset = pReq->reqOffset;
+
+ pRsp->withTbName = 1;
+ pRsp->withSchema = 1;
+ pRsp->blockData = taosArrayInit(0, sizeof(void*));
+ pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
+ pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
+ pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
+
+ if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
+ return -1;
+ }
+ return 0;
+}
+
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq* pReq = pMsg->pCont;
int64_t consumerId = pReq->consumerId;
- int64_t timeout = pReq->timeout;
int32_t reqEpoch = pReq->epoch;
int32_t code = 0;
STqOffsetVal reqOffset = pReq->reqOffset;
@@ -329,9 +417,6 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
tqDebug("tmq poll: consumer %" PRId64 " (epoch %d), subkey %s, recv poll req in vg %d, req offset %s", consumerId,
pReq->epoch, pHandle->subKey, TD_VID(pTq->pVnode), buf);
- SMqDataRsp dataRsp = {0};
- tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
-
// 2.reset offset if needed
if (reqOffset.type > 0) {
fetchOffsetNew = reqOffset;
@@ -345,62 +430,97 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
TD_VID(pTq->pVnode), formatBuf);
} else {
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
- if (pReq->useSnapshot && pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- if (!pHandle->fetchMeta) {
- tqOffsetResetToData(&fetchOffsetNew, 0, 0);
+ if (pReq->useSnapshot) {
+ if (pHandle->fetchMeta) {
+ tqOffsetResetToMeta(&fetchOffsetNew, 0);
} else {
- // reset to meta
- ASSERT(0);
+ tqOffsetResetToData(&fetchOffsetNew, 0, 0);
}
} else {
tqOffsetResetToLog(&fetchOffsetNew, walGetFirstVer(pTq->pVnode->pWal));
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
+ SMqDataRsp dataRsp = {0};
+ tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
+
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, vg %d, offset reset to %" PRId64, consumerId,
pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.rspOffset.version);
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSMqDataRsp(&dataRsp);
+ return code;
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
tqError("tmq poll: subkey %s, no offset committed for consumer %" PRId64
" in vg %d, subkey %s, reset none failed",
pHandle->subKey, consumerId, TD_VID(pTq->pVnode), pReq->subKey);
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
- code = -1;
- goto OVER;
+ return -1;
}
}
}
- // 3.query
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- /*if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {*/
- /*fetchOffsetNew.version++;*/
- /*}*/
- if (tqScan(pTq, pHandle, &dataRsp, &fetchOffsetNew) < 0) {
- ASSERT(0);
+ SMqDataRsp dataRsp = {0};
+ tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
+ tqScanData(pTq, pHandle, &dataRsp, &fetchOffsetNew);
+
+ if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
- goto OVER;
}
- if (dataRsp.blockNum == 0) {
- // TODO add to async task pool
- /*dataRsp.rspOffset.version--;*/
+
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d, uid:%ld, version:%ld",
+ consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type,
+ dataRsp.rspOffset.uid, dataRsp.rspOffset.version);
+
+ tDeleteSMqDataRsp(&dataRsp);
+ return code;
+ }
+
+ // for taosx
+ ASSERT(pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN);
+
+ SMqMetaRsp metaRsp = {0};
+
+ STaosxRsp taosxRsp = {0};
+ tqInitTaosxRsp(&taosxRsp, pReq);
+
+ if (fetchOffsetNew.type != TMQ_OFFSET__LOG) {
+ tqScan(pTq, pHandle, &taosxRsp, &metaRsp, &fetchOffsetNew);
+
+ if (metaRsp.metaRspLen > 0) {
+ if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
+ code = -1;
+ }
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId,
+ pHandle->subKey, TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid,
+ metaRsp.rspOffset.version);
+ taosMemoryFree(metaRsp.metaRsp);
+ tDeleteSTaosxRsp(&taosxRsp);
+ return code;
}
- if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
- code = -1;
+
+ if (taosxRsp.blockNum > 0) {
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
+ code = -1;
+ }
+ tDeleteSTaosxRsp(&taosxRsp);
+ return code;
+ } else {
+ fetchOffsetNew = taosxRsp.rspOffset;
}
- goto OVER;
+
+ tqDebug("taosx poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld",
+ consumerId, pHandle->subKey, TD_VID(pTq->pVnode), taosxRsp.blockNum, taosxRsp.rspOffset.type,
+ taosxRsp.rspOffset.uid, taosxRsp.rspOffset.version);
}
- if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN) {
- ASSERT(fetchOffsetNew.type == TMQ_OFFSET__LOG);
+ if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {
int64_t fetchVer = fetchOffsetNew.version + 1;
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
if (pCkHead == NULL) {
- code = -1;
- goto OVER;
+ return -1;
}
walSetReaderCapacity(pHandle->pWalReader, 2048);
@@ -415,14 +535,13 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
}
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead) < 0) {
- // TODO add push mgr
-
- tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer);
- ASSERT(dataRsp.rspOffset.version >= dataRsp.reqOffset.version);
- if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
+ tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSTaosxRsp(&taosxRsp);
+ if (pCkHead) taosMemoryFree(pCkHead);
+ return code;
}
SWalCont* pHead = &pCkHead->head;
@@ -433,19 +552,19 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
- if (tqLogScanExec(pTq, &pHandle->execHandle, pCont, &dataRsp) < 0) {
+ if (tqTaosxScanLog(pTq, pHandle, pCont, &taosxRsp) < 0) {
/*ASSERT(0);*/
}
// TODO batch optimization:
// TODO continue scan until meeting batch requirement
- if (dataRsp.blockNum > 0 /* threshold */) {
- tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer);
- ASSERT(dataRsp.rspOffset.version >= dataRsp.reqOffset.version);
-
- if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
+ if (taosxRsp.blockNum > 0 /* threshold */) {
+ tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSTaosxRsp(&taosxRsp);
+ if (pCkHead) taosMemoryFree(pCkHead);
+ return code;
} else {
fetchVer++;
}
@@ -454,40 +573,22 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
ASSERT(pHandle->fetchMeta);
ASSERT(IS_META_MSG(pHead->msgType));
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
- SMqMetaRsp metaRsp = {0};
- /*metaRsp.reqOffset = pReq->reqOffset.version;*/
- metaRsp.rspOffset = fetchVer;
- /*metaRsp.rspOffsetNew.version = fetchVer;*/
- tqOffsetResetToLog(&metaRsp.reqOffsetNew, pReq->reqOffset.version);
- tqOffsetResetToLog(&metaRsp.rspOffsetNew, fetchVer);
+ tqOffsetResetToLog(&metaRsp.rspOffset, fetchVer);
metaRsp.resMsgType = pHead->msgType;
metaRsp.metaRspLen = pHead->bodyLen;
metaRsp.metaRsp = pHead->body;
if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
code = -1;
- goto OVER;
+ taosMemoryFree(pCkHead);
+ return code;
}
code = 0;
- goto OVER;
+ if (pCkHead) taosMemoryFree(pCkHead);
+ return code;
}
}
}
-
-OVER:
- if (pCkHead) taosMemoryFree(pCkHead);
- // TODO wrap in destroy func
- taosArrayDestroy(dataRsp.blockDataLen);
- taosArrayDestroyP(dataRsp.blockData, (FDelete)taosMemoryFree);
-
- if (dataRsp.withSchema) {
- taosArrayDestroyP(dataRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
- }
-
- if (dataRsp.withTbName) {
- taosArrayDestroyP(dataRsp.blockTbName, (FDelete)taosMemoryFree);
- }
-
- return code;
+ return 0;
}
int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
@@ -557,6 +658,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
pHandle->execHandle.subType = req.subType;
pHandle->fetchMeta = req.withMeta;
+
// TODO version should be assigned and refed during preprocess
SWalRef* pRef = walRefCommittedVer(pTq->pVnode->pWal);
if (pRef == NULL) {
@@ -566,36 +668,42 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
int64_t ver = pRef->refVer;
pHandle->pRef = pRef;
+ SReadHandle handle = {
+ .meta = pTq->pVnode->pMeta,
+ .vnode = pTq->pVnode,
+ .initTableReader = true,
+ .initTqReader = true,
+ .version = ver,
+ };
+ pHandle->snapshotVer = ver;
+
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
pHandle->execHandle.execCol.qmsg = req.qmsg;
- pHandle->snapshotVer = ver;
req.qmsg = NULL;
- SReadHandle handle = {
- .meta = pTq->pVnode->pMeta,
- .vnode = pTq->pVnode,
- .initTableReader = true,
- .initTqReader = true,
- .version = ver,
- };
- pHandle->execHandle.execCol.task =
+
+ pHandle->execHandle.task =
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols,
&pHandle->execHandle.pSchemaWrapper);
- ASSERT(pHandle->execHandle.execCol.task);
+ ASSERT(pHandle->execHandle.task);
void* scanner = NULL;
- qExtractStreamScanner(pHandle->execHandle.execCol.task, &scanner);
+ qExtractStreamScanner(pHandle->execHandle.task, &scanner);
ASSERT(scanner);
pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
ASSERT(pHandle->execHandle.pExecReader);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
-
pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode);
pHandle->execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ buildSnapContext(handle.meta, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta,
+ (SSnapContext**)(&handle.sContext));
+
+ pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
pHandle->execHandle.execTb.suid = req.suid;
+
SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
vnodeGetCtbIdList(pTq->pVnode, req.suid, tbUidList);
tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, req.suid);
@@ -606,6 +714,10 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode);
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
taosArrayDestroy(tbUidList);
+
+ buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
+ (SSnapContext**)(&handle.sContext));
+ pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
}
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);
@@ -648,17 +760,28 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
// expand executor
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
+ pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask);
+ if (pTask->pState == NULL) {
+ return -1;
+ }
+
SReadHandle handle = {
.meta = pTq->pVnode->pMeta,
.vnode = pTq->pVnode,
.initTqReader = 1,
+ .pStateBackend = pTask->pState,
};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle);
ASSERT(pTask->exec.executor);
} else if (pTask->taskLevel == TASK_LEVEL__AGG) {
+ pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask);
+ if (pTask->pState == NULL) {
+ return -1;
+ }
SReadHandle mgHandle = {
.vnode = NULL,
.numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo),
+ .pStateBackend = pTask->pState,
};
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle);
ASSERT(pTask->exec.executor);
@@ -693,7 +816,86 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msg
return streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen);
}
-int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
+int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
+ bool failed = false;
+ SDecoder* pCoder = &(SDecoder){0};
+ SDeleteRes* pRes = &(SDeleteRes){0};
+
+ pRes->uidList = taosArrayInit(0, sizeof(tb_uid_t));
+ if (pRes->uidList == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ failed = true;
+ }
+
+ tDecoderInit(pCoder, pReq, len);
+ tDecodeDeleteRes(pCoder, pRes);
+ tDecoderClear(pCoder);
+
+ int32_t sz = taosArrayGetSize(pRes->uidList);
+ if (sz == 0) {
+ taosArrayDestroy(pRes->uidList);
+ return 0;
+ }
+ SSDataBlock* pDelBlock = createSpecialDataBlock(STREAM_DELETE_DATA);
+ blockDataEnsureCapacity(pDelBlock, sz);
+ pDelBlock->info.rows = sz;
+ pDelBlock->info.version = ver;
+
+ for (int32_t i = 0; i < sz; i++) {
+ // start key column
+ SColumnInfoData* pStartCol = taosArrayGet(pDelBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ colDataAppend(pStartCol, i, (const char*)&pRes->skey, false); // end key column
+ SColumnInfoData* pEndCol = taosArrayGet(pDelBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ colDataAppend(pEndCol, i, (const char*)&pRes->ekey, false);
+ // uid column
+ SColumnInfoData* pUidCol = taosArrayGet(pDelBlock->pDataBlock, UID_COLUMN_INDEX);
+ int64_t* pUid = taosArrayGet(pRes->uidList, i);
+ colDataAppend(pUidCol, i, (const char*)pUid, false);
+
+ colDataAppendNULL(taosArrayGet(pDelBlock->pDataBlock, GROUPID_COLUMN_INDEX), i);
+ colDataAppendNULL(taosArrayGet(pDelBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), i);
+ colDataAppendNULL(taosArrayGet(pDelBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), i);
+ }
+
+ taosArrayDestroy(pRes->uidList);
+
+ void* pIter = NULL;
+ while (1) {
+ pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
+ if (pIter == NULL) break;
+ SStreamTask* pTask = *(SStreamTask**)pIter;
+ if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue;
+
+ qDebug("delete req enqueue stream task: %d, ver: %" PRId64, pTask->taskId, ver);
+
+ SStreamDataBlock* pStreamBlock = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
+ pStreamBlock->type = STREAM_INPUT__DATA_BLOCK;
+ pStreamBlock->blocks = taosArrayInit(0, sizeof(SSDataBlock));
+ SSDataBlock block = {0};
+ assignOneDataBlock(&block, pDelBlock);
+ block.info.type = STREAM_DELETE_DATA;
+ taosArrayPush(pStreamBlock->blocks, &block);
+
+ if (!failed) {
+ if (streamTaskInput(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
+ qError("stream task input del failed, task id %d", pTask->taskId);
+ continue;
+ }
+
+ if (streamSchedExec(pTask) < 0) {
+ qError("stream task launch failed, task id %d", pTask->taskId);
+ continue;
+ }
+ } else {
+ streamTaskInputFail(pTask);
+ }
+ }
+ blockDataDestroy(pDelBlock);
+
+ return 0;
+}
+
+int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
void* pIter = NULL;
bool failed = false;
SStreamDataSubmit* pSubmit = NULL;
diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c
index 435bbb77b8cab0b6c631f98e30444501ae8faf03..d00907f6778d176d1eabe77f92a243a1c5dd3cbd 100644
--- a/source/dnode/vnode/src/tq/tqExec.c
+++ b/source/dnode/vnode/src/tq/tqExec.c
@@ -60,18 +60,20 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) {
return 0;
}
-int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
+int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
const STqExecHandle* pExec = &pHandle->execHandle;
- qTaskInfo_t task = pExec->execCol.task;
+ ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
+
+ qTaskInfo_t task = pExec->task;
- if (qStreamPrepareScan(task, pOffset) < 0) {
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
if (pOffset->type == TMQ_OFFSET__LOG) {
pRsp->rspOffset = *pOffset;
return 0;
} else {
tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
- if (qStreamPrepareScan(task, pOffset) < 0) {
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
pRsp->rspOffset = *pOffset;
return 0;
@@ -83,124 +85,148 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVa
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
- tqDebug("task start to execute");
+ tqDebug("tmq task start to execute");
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
- tqDebug("task execute end, get %p", pDataBlock);
+ tqDebug("tmq task executed, get %p", pDataBlock);
- if (pDataBlock != NULL) {
- if (pRsp->withTbName) {
- if (pOffset->type == TMQ_OFFSET__LOG) {
- int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
- continue;
- }
- } else {
- pRsp->withTbName = 0;
- }
- }
- tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
- pRsp->blockNum++;
- if (pOffset->type == TMQ_OFFSET__LOG) {
- continue;
- } else {
- rowCnt += pDataBlock->info.rows;
- if (rowCnt <= 4096) continue;
- }
+ if (pDataBlock == NULL) {
+ break;
}
- if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
- tqDebug("vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
- pHandle->snapshotVer + 1);
- tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
- qStreamPrepareScan(task, pOffset);
- continue;
- }
-
- void* meta = qStreamExtractMetaMsg(task);
- if (meta != NULL) {
- // tq add meta to rsp
- }
+ tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
+ pRsp->blockNum++;
- if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
- ASSERT(0);
+ if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ rowCnt += pDataBlock->info.rows;
+ if (rowCnt >= 4096) break;
}
+ }
- ASSERT(pRsp->rspOffset.type != 0);
+ if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+ ASSERT(pRsp->rspOffset.type != 0);
-#if 0
- if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
- if (pRsp->blockNum > 0) {
- ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version);
- } else {
- ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version);
- }
+ if (pRsp->withTbName) {
+ if (pRsp->rspOffset.type == TMQ_OFFSET__LOG) {
+ int64_t uid = pExec->pExecReader->msgIter.uid;
+ tqAddTbNameToRsp(pTq, uid, pRsp);
+ } else {
+ pRsp->withTbName = false;
}
-#endif
-
- tqDebug("task exec exited");
- break;
}
+ ASSERT(pRsp->withSchema == false);
return 0;
}
-#if 0
-int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal offset, int32_t workerId) {
- ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
- qTaskInfo_t task = pExec->execCol.task[workerId];
+int32_t tqScan(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* pOffset) {
+ const STqExecHandle* pExec = &pHandle->execHandle;
+ qTaskInfo_t task = pExec->task;
- if (qStreamPrepareTsdbScan(task, offset.uid, offset.ts) < 0) {
- ASSERT(0);
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
+ tqDebug("prepare scan failed, return");
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ pRsp->rspOffset = *pOffset;
+ return 0;
+ } else {
+ tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
+ tqDebug("prepare scan failed, return");
+ pRsp->rspOffset = *pOffset;
+ return 0;
+ }
+ }
}
int32_t rowCnt = 0;
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
+ tqDebug("tmqsnap task start to execute");
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
- if (pDataBlock == NULL) break;
+ tqDebug("tmqsnap task execute end, get %p", pDataBlock);
- ASSERT(pDataBlock->info.rows != 0);
- ASSERT(taosArrayGetSize(pDataBlock->pDataBlock) != 0);
+ if (pDataBlock != NULL) {
+ if (pRsp->withTbName) {
+ int64_t uid = 0;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ uid = pExec->pExecReader->msgIter.uid;
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
+ continue;
+ }
+ } else {
+ char* tbName = strdup(qExtractTbnameFromTask(task));
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ }
+ }
+ if (pRsp->withSchema) {
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
+ } else {
+ SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ }
+ }
- tqAddBlockDataToRsp(pDataBlock, pRsp);
+ tqAddBlockDataToRsp(pDataBlock, (SMqDataRsp*)pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
+ pRsp->blockNum++;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ continue;
+ } else {
+ rowCnt += pDataBlock->info.rows;
+ if (rowCnt <= 4096) continue;
+ }
+ }
- if (pRsp->withTbName) {
- pRsp->withTbName = 0;
-#if 0
- int64_t uid;
- int64_t ts;
- if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
- ASSERT(0);
+ if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (qStreamExtractPrepareUid(task) != 0) {
+ continue;
}
- tqAddTbNameToRsp(pTq, uid, pRsp);
-#endif
+ tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
+ pHandle->snapshotVer + 1);
+ break;
}
- pRsp->blockNum++;
- rowCnt += pDataBlock->info.rows;
- if (rowCnt >= 4096) break;
+ if (pRsp->blockNum > 0) {
+ tqDebug("tmqsnap task exec exited, get data");
+ break;
+ }
+
+ SMqMetaRsp* tmp = qStreamExtractMetaMsg(task);
+ if (tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ tqOffsetResetToData(pOffset, tmp->rspOffset.uid, tmp->rspOffset.ts);
+ qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
+ tmp->rspOffset.type = TMQ_OFFSET__SNAPSHOT_META;
+ tqDebug("tmqsnap task exec change to get data");
+ continue;
+ }
+
+ *pMetaRsp = *tmp;
+ tqDebug("tmqsnap task exec exited, get meta");
+
+ tqDebug("task exec exited");
+ break;
}
- int64_t uid;
- int64_t ts;
- if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
+
+ if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
ASSERT(0);
}
- tqOffsetResetToData(&pRsp->rspOffset, uid, ts);
+ ASSERT(pRsp->rspOffset.type != 0);
return 0;
}
-#endif
-int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp) {
+int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp) {
+ STqExecHandle* pExec = &pHandle->execHandle;
ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN);
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
- pRsp->withSchema = 1;
STqReader* pReader = pExec->pExecReader;
tqReaderSetDataMsg(pReader, pReq, 0);
while (tqNextDataBlock(pReader)) {
@@ -210,18 +236,31 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
}
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
blockDataFreeRes(&block);
continue;
}
}
- tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
+ if (pHandle->fetchMeta) {
+ SSubmitBlk* pBlk = pReader->pBlock;
+ if (pBlk->schemaLen > 0) {
+ if (pRsp->createTableNum == 0) {
+ pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(0, sizeof(void*));
+ }
+ void* createReq = taosMemoryCalloc(1, pBlk->schemaLen);
+ memcpy(createReq, pBlk->data, pBlk->schemaLen);
+ taosArrayPush(pRsp->createTableLen, &pBlk->schemaLen);
+ taosArrayPush(pRsp->createTableReq, &createReq);
+ pRsp->createTableNum++;
+ }
+ }
+ tqAddBlockDataToRsp(&block, (SMqDataRsp*)pRsp, taosArrayGetSize(block.pDataBlock));
blockDataFreeRes(&block);
- tqAddBlockSchemaToRsp(pExec, pRsp);
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
pRsp->blockNum++;
}
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
- pRsp->withSchema = 1;
STqReader* pReader = pExec->pExecReader;
tqReaderSetDataMsg(pReader, pReq, 0);
while (tqNextDataBlockFilterOut(pReader, pExec->execDb.pFilterOutTbUid)) {
@@ -231,14 +270,28 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
}
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
blockDataFreeRes(&block);
continue;
}
}
- tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
+ if (pHandle->fetchMeta) {
+ SSubmitBlk* pBlk = pReader->pBlock;
+ if (pBlk->schemaLen > 0) {
+ if (pRsp->createTableNum == 0) {
+ pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(0, sizeof(void*));
+ }
+ void* createReq = taosMemoryCalloc(1, pBlk->schemaLen);
+ memcpy(createReq, pBlk->data, pBlk->schemaLen);
+ taosArrayPush(pRsp->createTableLen, &pBlk->schemaLen);
+ taosArrayPush(pRsp->createTableReq, &createReq);
+ pRsp->createTableNum++;
+ }
+ }
+ tqAddBlockDataToRsp(&block, (SMqDataRsp*)pRsp, taosArrayGetSize(block.pDataBlock));
blockDataFreeRes(&block);
- tqAddBlockSchemaToRsp(pExec, pRsp);
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
pRsp->blockNum++;
}
}
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index 405bc669bd23c27b2b234d2b60be4ef6def8bc80..62f8debccb8ff9c478de0fb331cc5741b503b011 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -18,12 +18,25 @@
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1;
+ if (tEncodeI8(pEncoder, pHandle->fetchMeta) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->snapshotVer) < 0) return -1;
if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1;
if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tEncodeCStr(pEncoder, pHandle->execHandle.execCol.qmsg) < 0) return -1;
+ } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB){
+ int32_t size = taosHashGetSize(pHandle->execHandle.execDb.pFilterOutTbUid);
+ if (tEncodeI32(pEncoder, size) < 0) return -1;
+ void *pIter = NULL;
+ pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter);
+ while(pIter){
+ int64_t *tbUid = (int64_t *)taosHashGetKey(pIter, NULL);
+ if (tEncodeI64(pEncoder, *tbUid) < 0) return -1;
+ pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter);
+ }
+ } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE){
+ if (tEncodeI64(pEncoder, pHandle->execHandle.execTb.suid) < 0) return -1;
}
tEndEncode(pEncoder);
return pEncoder->pos;
@@ -32,12 +45,25 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pHandle->fetchMeta) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->snapshotVer) < 0) return -1;
if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1;
if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.execCol.qmsg) < 0) return -1;
+ }else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB){
+ pHandle->execHandle.execDb.pFilterOutTbUid =
+ taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ int32_t size = 0;
+ if (tDecodeI32(pDecoder, &size) < 0) return -1;
+ for(int32_t i = 0; i < size; i++){
+ int64_t tbUid = 0;
+ if (tDecodeI64(pDecoder, &tbUid) < 0) return -1;
+ taosHashPut(pHandle->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
+ }
+ } else if(pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE){
+ if (tDecodeI64(pDecoder, &pHandle->execHandle.execTb.suid) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
@@ -249,27 +275,48 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
}
walRefVer(handle.pRef, handle.snapshotVer);
+ SReadHandle reader = {
+ .meta = pTq->pVnode->pMeta,
+ .vnode = pTq->pVnode,
+ .initTableReader = true,
+ .initTqReader = true,
+ .version = handle.snapshotVer,
+ };
+
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- SReadHandle reader = {
- .meta = pTq->pVnode->pMeta,
- .vnode = pTq->pVnode,
- .initTableReader = true,
- .initTqReader = true,
- .version = handle.snapshotVer,
- };
-
- handle.execHandle.execCol.task = qCreateQueueExecTaskInfo(
+
+ handle.execHandle.task = qCreateQueueExecTaskInfo(
handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper);
- ASSERT(handle.execHandle.execCol.task);
+ ASSERT(handle.execHandle.task);
void* scanner = NULL;
- qExtractStreamScanner(handle.execHandle.execCol.task, &scanner);
+ qExtractStreamScanner(handle.execHandle.task, &scanner);
ASSERT(scanner);
handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
ASSERT(handle.execHandle.pExecReader);
- } else {
+ } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
- handle.execHandle.execDb.pFilterOutTbUid =
- taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
+
+ buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext));
+ handle.execHandle.task =
+ qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL);
+ } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
+
+ SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
+ vnodeGetCtbIdList(pTq->pVnode, handle.execHandle.execTb.suid, tbUidList);
+ tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
+ for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
+ int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
+ tqDebug("vgId:%d, idx %d, uid:%" PRId64, TD_VID(pTq->pVnode), i, tbUid);
+ }
+ handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
+ tqReaderSetTbUidList(handle.execHandle.pExecReader, tbUidList);
+ taosArrayDestroy(tbUidList);
+
+ buildSnapContext(reader.meta, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType, handle.fetchMeta, (SSnapContext **)(&reader.sContext));
+ handle.execHandle.task =
+ qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL);
}
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, TD_VID(pTq->pVnode));
taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index ed7fa80c476fff2d6436232b0e610f0b6f61f1cd..a57e8174fee9f82fd35c425e9214e48fba91f709 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -213,20 +213,25 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
#endif
int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
- if (vnodeIsRoleLeader(pTq->pVnode) && msgType == TDMT_VND_SUBMIT) {
- if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
-
- void* data = taosMemoryMalloc(msgLen);
- if (data == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- tqError("failed to copy data for stream since out of memory");
- return -1;
+ if (vnodeIsRoleLeader(pTq->pVnode)) {
+ if (msgType == TDMT_VND_SUBMIT) {
+ if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
+
+ void* data = taosMemoryMalloc(msgLen);
+ if (data == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ tqError("failed to copy data for stream since out of memory");
+ return -1;
+ }
+ memcpy(data, msg, msgLen);
+ SSubmitReq* pReq = (SSubmitReq*)data;
+ pReq->version = ver;
+
+ tqProcessSubmitReq(pTq, data, ver);
+ }
+ if (msgType == TDMT_VND_DELETE) {
+ tqProcessDelReq(pTq, POINTER_SHIFT(msg, sizeof(SMsgHead)), msgLen - sizeof(SMsgHead), ver);
}
- memcpy(data, msg, msgLen);
- SSubmitReq* pReq = (SSubmitReq*)data;
- pReq->version = ver;
-
- tqProcessStreamTrigger(pTq, data, ver);
}
return 0;
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index e6a331f20e1943a3e40b672a0ef214322db09c5c..375130fa2c34f5b954e3fe3710914dc01e8f6363 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -15,6 +15,162 @@
#include "tq.h"
+
+bool isValValidForTable(STqHandle* pHandle, SWalCont *pHead){
+ if(pHandle->execHandle.subType != TOPIC_SUB_TYPE__TABLE){
+ return true;
+ }
+
+ int16_t msgType = pHead->msgType;
+ char* body = pHead->body;
+ int32_t bodyLen = pHead->bodyLen;
+
+ int64_t tbSuid = pHandle->execHandle.execTb.suid;
+ int64_t realTbSuid = 0;
+ SDecoder coder;
+ void* data = POINTER_SHIFT(body, sizeof(SMsgHead));
+ int32_t len = bodyLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (msgType == TDMT_VND_CREATE_STB || msgType == TDMT_VND_ALTER_STB) {
+ SVCreateStbReq req = {0};
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else if (msgType == TDMT_VND_DROP_STB) {
+ SVDropStbReq req = {0};
+ if (tDecodeSVDropStbReq(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else if (msgType == TDMT_VND_CREATE_TABLE) {
+ SVCreateTbBatchReq req = {0};
+ if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ int32_t needRebuild = 0;
+ SVCreateTbReq* pCreateReq = NULL;
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){
+ needRebuild++;
+ }
+ }
+ if(needRebuild == 0){
+ // do nothing
+ }else if(needRebuild == req.nReqs){
+ realTbSuid = tbSuid;
+ }else{
+ realTbSuid = tbSuid;
+ SVCreateTbBatchReq reqNew = {0};
+ reqNew.pArray = taosArrayInit(req.nReqs, sizeof(struct SVCreateTbReq));
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if(pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid){
+ reqNew.nReqs++;
+ taosArrayPush(reqNew.pArray, pCreateReq);
+ }
+ }
+
+ int tlen;
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVCreateTbBatchReq, &reqNew, tlen, ret);
+ void* buf = taosMemoryMalloc(tlen);
+ if (NULL == buf) {
+ taosArrayDestroy(reqNew.pArray);
+ goto end;
+ }
+ SEncoder coderNew = {0};
+ tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead));
+ tEncodeSVCreateTbBatchReq(&coderNew, &reqNew);
+ tEncoderClear(&coderNew);
+ memcpy(pHead->body + sizeof(SMsgHead), buf, tlen);
+ pHead->bodyLen = tlen + sizeof(SMsgHead);
+ taosMemoryFree(buf);
+ taosArrayDestroy(reqNew.pArray);
+ }
+ } else if (msgType == TDMT_VND_ALTER_TABLE) {
+ SVAlterTbReq req = {0};
+
+ if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ SMetaReader mr = {0};
+ metaReaderInit(&mr, pHandle->execHandle.pExecReader->pVnodeMeta, 0);
+
+ if (metaGetTableEntryByName(&mr, req.tbName) < 0) {
+ metaReaderClear(&mr);
+ goto end;
+ }
+ realTbSuid = mr.me.ctbEntry.suid;
+ metaReaderClear(&mr);
+ } else if (msgType == TDMT_VND_DROP_TABLE) {
+ SVDropTbBatchReq req = {0};
+
+ if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ int32_t needRebuild = 0;
+ SVDropTbReq* pDropReq = NULL;
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+
+ if(pDropReq->suid == tbSuid){
+ needRebuild++;
+ }
+ }
+ if(needRebuild == 0){
+ // do nothing
+ }else if(needRebuild == req.nReqs){
+ realTbSuid = tbSuid;
+ }else{
+ realTbSuid = tbSuid;
+ SVDropTbBatchReq reqNew = {0};
+ reqNew.pArray = taosArrayInit(req.nReqs, sizeof(SVDropTbReq));
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+ if(pDropReq->suid == tbSuid){
+ reqNew.nReqs++;
+ taosArrayPush(reqNew.pArray, pDropReq);
+ }
+ }
+
+ int tlen;
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVDropTbBatchReq, &reqNew, tlen, ret);
+ void* buf = taosMemoryMalloc(tlen);
+ if (NULL == buf) {
+ taosArrayDestroy(reqNew.pArray);
+ goto end;
+ }
+ SEncoder coderNew = {0};
+ tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead));
+ tEncodeSVDropTbBatchReq(&coderNew, &reqNew);
+ tEncoderClear(&coderNew);
+ memcpy(pHead->body + sizeof(SMsgHead), buf, tlen);
+ pHead->bodyLen = tlen + sizeof(SMsgHead);
+ taosMemoryFree(buf);
+ taosArrayDestroy(reqNew.pArray);
+ }
+ } else if (msgType == TDMT_VND_DELETE) {
+ SDeleteRes req = {0};
+ if (tDecodeDeleteRes(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else{
+ ASSERT(0);
+ }
+
+ end:
+ tDecoderClear(&coder);
+ return tbSuid == realTbSuid;
+}
+
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead) {
int32_t code = 0;
taosThreadMutexLock(&pHandle->pWalReader->mutex);
@@ -53,9 +209,11 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
code = -1;
goto END;
}
- *fetchOffset = offset;
- code = 0;
- goto END;
+ if(isValValidForTable(pHandle, pHead)){
+ *fetchOffset = offset;
+ code = 0;
+ goto END;
+ }
}
}
code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead);
@@ -68,7 +226,7 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
offset++;
}
}
-END:
+ END:
taosThreadMutexUnlock(&pHandle->pWalReader->mutex);
return code;
}
@@ -398,7 +556,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
if (pIter == NULL) break;
STqHandle* pExec = (STqHandle*)pIter;
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- int32_t code = qUpdateQualifiedTableId(pExec->execHandle.execCol.task, tbUidList, isAdd);
+ int32_t code = qUpdateQualifiedTableId(pExec->execHandle.task, tbUidList, isAdd);
ASSERT(code == 0);
} else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) {
if (!isAdd) {
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index b9f38976747f7e73f6bc6b40fe9dd968a3b8cabe..8da783a5bd82ccd23fe051e76396489cc6516058 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -418,31 +418,17 @@ typedef enum {
} SFSLASTNEXTROWSTATES;
typedef struct {
- SFSLASTNEXTROWSTATES state; // [input]
- STsdb *pTsdb; // [input]
- SBlockIdx *pBlockIdxExp; // [input]
- STSchema *pTSchema; // [input]
+ SFSLASTNEXTROWSTATES state; // [input]
+ STsdb *pTsdb; // [input]
tb_uid_t suid;
tb_uid_t uid;
int32_t nFileSet;
int32_t iFileSet;
SArray *aDFileSet;
SDataFReader *pDataFReader;
- SArray *aBlockL;
- SBlockL *pBlockL;
- SBlockData *pBlockDataL;
- SBlockData blockDataL;
- int32_t nRow;
- int32_t iRow;
TSDBROW row;
- /*
- SArray *aBlockIdx;
- SBlockIdx *pBlockIdx;
- SMapData blockMap;
- int32_t nBlock;
- int32_t iBlock;
- SBlock block;
- */
+
+ SMergeTree mergeTree;
} SFSLastNextRowIter;
static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
@@ -451,22 +437,16 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
switch (state->state) {
case SFSLASTNEXTROW_FS:
- // state->aDFileSet = state->pTsdb->pFS->cState->aDFileSet;
state->nFileSet = taosArrayGetSize(state->aDFileSet);
state->iFileSet = state->nFileSet;
- state->pBlockDataL = NULL;
-
case SFSLASTNEXTROW_FILESET: {
SDFileSet *pFileSet = NULL;
_next_fileset:
if (--state->iFileSet >= 0) {
pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
} else {
- if (state->pBlockDataL) {
- tBlockDataDestroy(state->pBlockDataL, 1);
- state->pBlockDataL = NULL;
- }
+ // tMergeTreeClose(&state->mergeTree);
*ppRow = NULL;
return code;
@@ -475,68 +455,24 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
if (code) goto _err;
- if (!state->aBlockL) {
- state->aBlockL = taosArrayInit(0, sizeof(SBlockIdx));
- } else {
- taosArrayClear(state->aBlockL);
- }
-
- code = tsdbReadBlockL(state->pDataFReader, state->aBlockL);
- if (code) goto _err;
-
- // SBlockL *pBlockL = (SBlockL *)taosArrayGet(state->aBlockL, state->iBlockL);
-
- state->pBlockL = taosArraySearch(state->aBlockL, state->pBlockIdxExp, tCmprBlockL, TD_EQ);
- if (!state->pBlockL) {
+ tMergeTreeOpen(&state->mergeTree, 1, state->pDataFReader, state->suid, state->uid,
+ &(STimeWindow){.skey = TSKEY_MIN, .ekey = TSKEY_MAX},
+ &(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, NULL, NULL);
+ bool hasVal = tMergeTreeNext(&state->mergeTree);
+ if (!hasVal) {
+ state->state = SFSLASTNEXTROW_FILESET;
+ // tMergeTreeClose(&state->mergeTree);
goto _next_fileset;
}
-
- int64_t suid = state->pBlockL->suid;
- int64_t uid = state->pBlockL->maxUid;
-
- if (!state->pBlockDataL) {
- state->pBlockDataL = &state->blockDataL;
-
- tBlockDataCreate(state->pBlockDataL);
- }
- code = tBlockDataInit(state->pBlockDataL, suid, suid ? 0 : uid, state->pTSchema);
- if (code) goto _err;
- }
- case SFSLASTNEXTROW_BLOCKDATA:
- code = tsdbReadLastBlock(state->pDataFReader, state->pBlockL, state->pBlockDataL);
- if (code) goto _err;
-
- state->nRow = state->blockDataL.nRow;
- state->iRow = state->nRow - 1;
-
- if (!state->pBlockDataL->uid) {
- while (state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) {
- --state->iRow;
- }
- }
-
state->state = SFSLASTNEXTROW_BLOCKROW;
+ }
case SFSLASTNEXTROW_BLOCKROW:
- if (state->pBlockDataL->uid) {
- if (state->iRow >= 0) {
- state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow);
- *ppRow = &state->row;
-
- if (--state->iRow < 0) {
- state->state = SFSLASTNEXTROW_FILESET;
- }
- }
- } else {
- if (state->iRow >= 0 && state->pBlockIdxExp->uid == state->pBlockDataL->aUid[state->iRow]) {
- state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow);
- *ppRow = &state->row;
-
- if (--state->iRow < 0 || state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) {
- state->state = SFSLASTNEXTROW_FILESET;
- }
- }
+ state->row = tMergeTreeGetRow(&state->mergeTree);
+ *ppRow = &state->row;
+ bool hasVal = tMergeTreeNext(&state->mergeTree);
+ if (!hasVal) {
+ state->state = SFSLASTNEXTROW_FILESET;
}
-
return code;
default:
ASSERT(0);
@@ -548,15 +484,6 @@ _err:
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
- if (state->aBlockL) {
- taosArrayDestroy(state->aBlockL);
- state->aBlockL = NULL;
- }
- if (state->pBlockDataL) {
- tBlockDataDestroy(state->pBlockDataL, 1);
- state->pBlockDataL = NULL;
- }
-
*ppRow = NULL;
return code;
@@ -574,14 +501,6 @@ int32_t clearNextRowFromFSLast(void *iter) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
- if (state->aBlockL) {
- taosArrayDestroy(state->aBlockL);
- state->aBlockL = NULL;
- }
- if (state->pBlockDataL) {
- tBlockDataDestroy(state->pBlockDataL, 1);
- state->pBlockDataL = NULL;
- }
return code;
}
@@ -609,7 +528,7 @@ typedef struct SFSNextRowIter {
SMapData blockMap;
int32_t nBlock;
int32_t iBlock;
- SBlock block;
+ SDataBlk block;
SBlockData blockData;
SBlockData *pBlockData;
int32_t nRow;
@@ -670,7 +589,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
tMapDataReset(&state->blockMap);
- code = tsdbReadBlock(state->pDataFReader, state->pBlockIdx, &state->blockMap);
+ code = tsdbReadDataBlk(state->pDataFReader, state->pBlockIdx, &state->blockMap);
if (code) goto _err;
state->nBlock = state->blockMap.nItem;
@@ -684,13 +603,13 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
case SFSNEXTROW_BLOCKDATA:
if (state->iBlock >= 0) {
- SBlock block = {0};
+ SDataBlk block = {0};
- tBlockReset(&block);
+ tDataBlkReset(&block);
// tBlockDataReset(&state->blockData);
tBlockDataReset(state->pBlockData);
- tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetBlock);
+ tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetDataBlk);
/* code = tsdbReadBlockData(state->pDataFReader, &state->blockIdx, &block, &state->blockData, NULL, NULL); */
tBlockDataReset(state->pBlockData);
code = tBlockDataInit(state->pBlockData, state->suid, state->uid, state->pTSchema);
@@ -878,7 +797,7 @@ static bool tsdbKeyDeleted(TSDBKEY *key, SArray *pSkyline, int64_t *iSkyline) {
if (key->ts > pItemBack->ts) {
return false;
} else if (key->ts >= pItemFront->ts && key->ts <= pItemBack->ts) {
- if ((key->version <= pItemFront->version || key->ts == pItemBack->ts && key->version <= pItemBack->version)) {
+ if (key->version <= pItemFront->version || (key->ts == pItemBack->ts && key->version <= pItemBack->version)) {
return true;
} else {
return false;
@@ -972,8 +891,6 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->fsLastState.state = (SFSLASTNEXTROWSTATES)SFSNEXTROW_FS;
pIter->fsLastState.pTsdb = pTsdb;
pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
- pIter->fsLastState.pBlockIdxExp = &pIter->idx;
- pIter->fsLastState.pTSchema = pTSchema;
pIter->fsLastState.suid = suid;
pIter->fsLastState.uid = uid;
@@ -1372,25 +1289,33 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand
// getTableCacheKeyS(uid, "l", key, &keyLen);
getTableCacheKey(uid, 1, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
- if (h) {
- } else {
- SArray *pLastArray = NULL;
- code = mergeLast(uid, pTsdb, &pLastArray);
- // if table's empty or error, return code of -1
- // if (code < 0 || pRow == NULL) {
- if (code < 0 || pLastArray == NULL) {
- *handle = NULL;
- return 0;
- }
-
- _taos_lru_deleter_t deleter = deleteTableCacheLast;
- LRUStatus status =
- taosLRUCacheInsert(pCache, key, keyLen, pLastArray, pLastArray->capacity, deleter, NULL, TAOS_LRU_PRIORITY_LOW);
- if (status != TAOS_LRU_STATUS_OK) {
- code = -1;
- }
+ if (!h) {
+ taosThreadMutexLock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
+ if (!h) {
+ SArray *pLastArray = NULL;
+ code = mergeLast(uid, pTsdb, &pLastArray);
+ // if table's empty or error, return code of -1
+ // if (code < 0 || pRow == NULL) {
+ if (code < 0 || pLastArray == NULL) {
+ *handle = NULL;
+ return 0;
+ }
+
+ _taos_lru_deleter_t deleter = deleteTableCacheLast;
+ LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pLastArray, pLastArray->capacity, deleter, NULL,
+ TAOS_LRU_PRIORITY_LOW);
+ if (status != TAOS_LRU_STATUS_OK) {
+ code = -1;
+ }
+
+ taosThreadMutexUnlock(&pTsdb->lruMutex);
+
+ h = taosLRUCacheLookup(pCache, key, keyLen);
+ } else {
+ taosThreadMutexUnlock(&pTsdb->lruMutex);
+ }
}
*handle = h;
@@ -1411,3 +1336,5 @@ void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity) {
}
size_t tsdbCacheGetCapacity(SVnode *pVnode) { return taosLRUCacheGetCapacity(pVnode->pTsdb->lruCache); }
+
+size_t tsdbCacheGetUsage(SVnode *pVnode) { return taosLRUCacheGetUsage(pVnode->pTsdb->lruCache); }
diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
index 66843d9a2844c44e77e798ab47032ef75370a544..ea9a7ec7d9b3df80edbb1e5f93db5b2420f908e5 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
@@ -18,7 +18,7 @@
#include "tcommon.h"
#include "tsdb.h"
-typedef struct SLastrowReader {
+typedef struct SCacheRowsReader {
SVnode* pVnode;
STSchema* pSchema;
uint64_t uid;
@@ -27,9 +27,9 @@ typedef struct SLastrowReader {
int32_t type;
int32_t tableIndex; // currently returned result tables
SArray* pTableList; // table id list
-} SLastrowReader;
+} SCacheRowsReader;
-static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) {
+static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds) {
ASSERT(pReader->numOfCols <= taosArrayGetSize(pBlock->pDataBlock));
int32_t numOfRows = pBlock->info.rows;
@@ -61,8 +61,10 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade
pBlock->info.rows += 1;
}
-int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
- SLastrowReader* p = taosMemoryCalloc(1, sizeof(SLastrowReader));
+int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
+ *pReader = NULL;
+
+ SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -81,9 +83,17 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
p->pTableList = pTableIdList;
p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES);
+ if (p->transferBuf == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) {
p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes);
+ if (p->transferBuf[i] == NULL) {
+ tsdbCacherowsReaderClose(p);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
}
}
@@ -91,8 +101,8 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
return TSDB_CODE_SUCCESS;
}
-int32_t tsdbLastrowReaderClose(void* pReader) {
- SLastrowReader* p = pReader;
+int32_t tsdbCacherowsReaderClose(void* pReader) {
+ SCacheRowsReader* p = pReader;
if (p->pSchema != NULL) {
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
@@ -107,28 +117,56 @@ int32_t tsdbLastrowReaderClose(void* pReader) {
return TSDB_CODE_SUCCESS;
}
-int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
+static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint64_t uid, STSRow** pRow, LRUHandle** h) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if ((pr->type & CACHESCAN_RETRIEVE_LAST_ROW) == CACHESCAN_RETRIEVE_LAST_ROW) {
+ code = tsdbCacheGetLastrowH(lruCache, uid, pr->pVnode->pTsdb, h);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // no data in the table of Uid
+ if (*h != NULL) {
+ *pRow = (STSRow*)taosLRUCacheValue(lruCache, *h);
+ }
+ } else {
+ code = tsdbCacheGetLastH(lruCache, uid, pr->pVnode->pTsdb, h);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // no data in the table of Uid
+ if (*h != NULL) {
+ SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, *h);
+ tsdbCacheLastArray2Row(pLast, pRow, pr->pSchema);
+ }
+ }
+
+ return code;
+}
+
+int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
if (pReader == NULL || pResBlock == NULL) {
return TSDB_CODE_INVALID_PARA;
}
- SLastrowReader* pr = pReader;
+ SCacheRowsReader* pr = pReader;
+ int32_t code = TSDB_CODE_SUCCESS;
SLRUCache* lruCache = pr->pVnode->pTsdb->lruCache;
LRUHandle* h = NULL;
STSRow* pRow = NULL;
size_t numOfTables = taosArrayGetSize(pr->pTableList);
// retrieve the only one last row of all tables in the uid list.
- if (pr->type == LASTROW_RETRIEVE_TYPE_SINGLE) {
+ if ((pr->type & CACHESCAN_RETRIEVE_TYPE_SINGLE) == CACHESCAN_RETRIEVE_TYPE_SINGLE) {
int64_t lastKey = INT64_MIN;
bool internalResult = false;
for (int32_t i = 0; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = taosArrayGet(pr->pTableList, i);
- int32_t code = tsdbCacheGetLastrowH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- // int32_t code = tsdbCacheGetLastH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- if (code != TSDB_CODE_SUCCESS) {
+ code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
+ if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -136,9 +174,6 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t
continue;
}
- pRow = (STSRow*)taosLRUCacheValue(lruCache, h);
- // SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, h);
- // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema);
if (pRow->ts > lastKey) {
// Set result row into the same rowIndex repeatly, so we need to check if the internal result row has already
// appended or not.
@@ -155,25 +190,18 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t
tsdbCacheRelease(lruCache, h);
}
- } else if (pr->type == LASTROW_RETRIEVE_TYPE_ALL) {
+ } else if ((pr->type & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) {
for (int32_t i = pr->tableIndex; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = taosArrayGet(pr->pTableList, i);
-
- int32_t code = tsdbCacheGetLastrowH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- // int32_t code = tsdbCacheGetLastH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- if (code != TSDB_CODE_SUCCESS) {
+ code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
+ if (code != TSDB_CODE_SUCCESS) {
return code;
}
- // no data in the table of Uid
if (h == NULL) {
continue;
}
- pRow = (STSRow*)taosLRUCacheValue(lruCache, h);
- // SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, h);
- // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema);
-
saveOneRow(pRow, pResBlock, pr, slotIds);
taosArrayPush(pTableUidList, &pKeyInfo->uid);
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c
index 020f3b0bc6dedf70efa45a835056c8dd41d9a158..a619b9f2e4f827d72f2aad5fd752ae002ac2fc74 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c
@@ -14,17 +14,27 @@
*/
#include "tsdb.h"
-typedef struct {
- int64_t suid;
- int64_t uid;
- STSchema *pTSchema;
-} SSkmInfo;
+
+typedef enum { MEMORY_DATA_ITER = 0, STT_DATA_ITER } EDataIterT;
typedef struct {
- int64_t suid;
- int64_t uid;
- TSDBROW row;
-} SRowInfo;
+ SRBTreeNode n;
+ SRowInfo r;
+ EDataIterT type;
+ union {
+ struct {
+ int32_t iTbDataP;
+ STbDataIter iter;
+ }; // memory data iter
+ struct {
+ int32_t iStt;
+ SArray *aSttBlk;
+ int32_t iSttBlk;
+ SBlockData bData;
+ int32_t iRow;
+ }; // stt file data iter
+ };
+} SDataIter;
typedef struct {
STsdb *pTsdb;
@@ -35,8 +45,9 @@ typedef struct {
int32_t minRow;
int32_t maxRow;
int8_t cmprAlg;
- SArray *aTbDataP;
- STsdbFS fs;
+ int8_t sttTrigger;
+ SArray *aTbDataP; // memory
+ STsdbFS fs; // disk
// --------------
TSKEY nextKey; // reset by each table commit
int32_t commitFid;
@@ -45,25 +56,24 @@ typedef struct {
// commit file data
struct {
SDataFReader *pReader;
- // data
- SArray *aBlockIdx; // SArray
- int32_t iBlockIdx;
- SBlockIdx *pBlockIdx;
- SMapData mBlock; // SMapData
- SBlockData bData;
- // last
- SArray *aBlockL; // SArray
- int32_t iBlockL;
- SBlockData bDatal;
- int32_t iRow;
- SRowInfo *pRowInfo;
- SRowInfo rowInfo;
+ SArray *aBlockIdx; // SArray
+ int32_t iBlockIdx;
+ SBlockIdx *pBlockIdx;
+ SMapData mBlock; // SMapData
+ SBlockData bData;
} dReader;
+ struct {
+ SDataIter *pIter;
+ SRBTree rbt;
+ SDataIter dataIter;
+ SDataIter aDataIter[TSDB_MAX_STT_TRIGGER];
+ int8_t toLastOnly;
+ };
struct {
SDataFWriter *pWriter;
SArray *aBlockIdx; // SArray
- SArray *aBlockL; // SArray
- SMapData mBlock; // SMapData
+ SArray *aSttBlk; // SArray
+ SMapData mBlock; // SMapData
SBlockData bData;
SBlockData bDatal;
} dWriter;
@@ -82,6 +92,26 @@ static int32_t tsdbCommitData(SCommitter *pCommitter);
static int32_t tsdbCommitDel(SCommitter *pCommitter);
static int32_t tsdbCommitCache(SCommitter *pCommitter);
static int32_t tsdbEndCommit(SCommitter *pCommitter, int32_t eno);
+static int32_t tsdbNextCommitRow(SCommitter *pCommitter);
+
+int32_t tRowInfoCmprFn(const void *p1, const void *p2) {
+ SRowInfo *pInfo1 = (SRowInfo *)p1;
+ SRowInfo *pInfo2 = (SRowInfo *)p2;
+
+ if (pInfo1->suid < pInfo2->suid) {
+ return -1;
+ } else if (pInfo1->suid > pInfo2->suid) {
+ return 1;
+ }
+
+ if (pInfo1->uid < pInfo2->uid) {
+ return -1;
+ } else if (pInfo1->uid > pInfo2->uid) {
+ return 1;
+ }
+
+ return tsdbRowCmprFn(&pInfo1->row, &pInfo2->row);
+}
int32_t tsdbBegin(STsdb *pTsdb) {
int32_t code = 0;
@@ -290,19 +320,22 @@ _err:
return code;
}
-static int32_t tsdbCommitterUpdateTableSchema(SCommitter *pCommitter, int64_t suid, int64_t uid) {
+int32_t tsdbUpdateTableSchema(SMeta *pMeta, int64_t suid, int64_t uid, SSkmInfo *pSkmInfo) {
int32_t code = 0;
if (suid) {
- if (pCommitter->skmTable.suid == suid) goto _exit;
+ if (pSkmInfo->suid == suid) {
+ pSkmInfo->uid = uid;
+ goto _exit;
+ }
} else {
- if (pCommitter->skmTable.uid == uid) goto _exit;
+ if (pSkmInfo->uid == uid) goto _exit;
}
- pCommitter->skmTable.suid = suid;
- pCommitter->skmTable.uid = uid;
- tTSchemaDestroy(pCommitter->skmTable.pTSchema);
- code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, -1, &pCommitter->skmTable.pTSchema);
+ pSkmInfo->suid = suid;
+ pSkmInfo->uid = uid;
+ tTSchemaDestroy(pSkmInfo->pTSchema);
+ code = metaGetTbTSchemaEx(pMeta, suid, uid, -1, &pSkmInfo->pTSchema);
if (code) goto _exit;
_exit:
@@ -334,54 +367,6 @@ _exit:
return code;
}
-static int32_t tsdbCommitterNextLastRow(SCommitter *pCommitter) {
- int32_t code = 0;
-
- ASSERT(pCommitter->dReader.pReader);
- ASSERT(pCommitter->dReader.pRowInfo);
-
- SBlockData *pBlockDatal = &pCommitter->dReader.bDatal;
- pCommitter->dReader.iRow++;
- if (pCommitter->dReader.iRow < pBlockDatal->nRow) {
- if (pBlockDatal->uid) {
- pCommitter->dReader.pRowInfo->uid = pBlockDatal->uid;
- } else {
- pCommitter->dReader.pRowInfo->uid = pBlockDatal->aUid[pCommitter->dReader.iRow];
- }
- pCommitter->dReader.pRowInfo->row = tsdbRowFromBlockData(pBlockDatal, pCommitter->dReader.iRow);
- } else {
- pCommitter->dReader.iBlockL++;
- if (pCommitter->dReader.iBlockL < taosArrayGetSize(pCommitter->dReader.aBlockL)) {
- SBlockL *pBlockL = (SBlockL *)taosArrayGet(pCommitter->dReader.aBlockL, pCommitter->dReader.iBlockL);
- int64_t suid = pBlockL->suid;
- int64_t uid = pBlockL->maxUid;
-
- code = tsdbCommitterUpdateTableSchema(pCommitter, suid, uid);
- if (code) goto _exit;
-
- code = tBlockDataInit(pBlockDatal, suid, suid ? 0 : uid, pCommitter->skmTable.pTSchema);
- if (code) goto _exit;
-
- code = tsdbReadLastBlock(pCommitter->dReader.pReader, pBlockL, pBlockDatal);
- if (code) goto _exit;
-
- pCommitter->dReader.iRow = 0;
- pCommitter->dReader.pRowInfo->suid = pBlockDatal->suid;
- if (pBlockDatal->uid) {
- pCommitter->dReader.pRowInfo->uid = pBlockDatal->uid;
- } else {
- pCommitter->dReader.pRowInfo->uid = pBlockDatal->aUid[0];
- }
- pCommitter->dReader.pRowInfo->row = tsdbRowFromBlockData(pBlockDatal, pCommitter->dReader.iRow);
- } else {
- pCommitter->dReader.pRowInfo = NULL;
- }
- }
-
-_exit:
- return code;
-}
-
static int32_t tsdbCommitterNextTableData(SCommitter *pCommitter) {
int32_t code = 0;
@@ -392,7 +377,7 @@ static int32_t tsdbCommitterNextTableData(SCommitter *pCommitter) {
pCommitter->dReader.pBlockIdx =
(SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx);
- code = tsdbReadBlock(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
+ code = tsdbReadDataBlk(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
if (code) goto _exit;
ASSERT(pCommitter->dReader.mBlock.nItem > 0);
@@ -404,6 +389,85 @@ _exit:
return code;
}
+static int32_t tsdbOpenCommitIter(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ pCommitter->pIter = NULL;
+ tRBTreeCreate(&pCommitter->rbt, tRowInfoCmprFn);
+
+ // memory
+ TSDBKEY tKey = {.ts = pCommitter->minKey, .version = VERSION_MIN};
+ SDataIter *pIter = &pCommitter->dataIter;
+ pIter->type = MEMORY_DATA_ITER;
+ pIter->iTbDataP = 0;
+ for (; pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP); pIter->iTbDataP++) {
+ STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, pIter->iTbDataP);
+ tsdbTbDataIterOpen(pTbData, &tKey, 0, &pIter->iter);
+ TSDBROW *pRow = tsdbTbDataIterGet(&pIter->iter);
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow));
+ pRow = NULL;
+ }
+
+ if (pRow == NULL) continue;
+
+ pIter->r.suid = pTbData->suid;
+ pIter->r.uid = pTbData->uid;
+ pIter->r.row = *pRow;
+ break;
+ }
+ ASSERT(pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP));
+ tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pIter);
+
+ // disk
+ pCommitter->toLastOnly = 0;
+ SDataFReader *pReader = pCommitter->dReader.pReader;
+ if (pReader) {
+ if (pReader->pSet->nSttF >= pCommitter->sttTrigger) {
+ int8_t iIter = 0;
+ for (int32_t iStt = 0; iStt < pReader->pSet->nSttF; iStt++) {
+ pIter = &pCommitter->aDataIter[iIter];
+ pIter->type = STT_DATA_ITER;
+ pIter->iStt = iStt;
+
+ code = tsdbReadSttBlk(pCommitter->dReader.pReader, iStt, pIter->aSttBlk);
+ if (code) goto _err;
+
+ if (taosArrayGetSize(pIter->aSttBlk) == 0) continue;
+
+ pIter->iSttBlk = 0;
+ SSttBlk *pSttBlk = (SSttBlk *)taosArrayGet(pIter->aSttBlk, 0);
+ code = tsdbReadSttBlock(pCommitter->dReader.pReader, iStt, pSttBlk, &pIter->bData);
+ if (code) goto _err;
+
+ pIter->iRow = 0;
+ pIter->r.suid = pIter->bData.suid;
+ pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[0];
+ pIter->r.row = tsdbRowFromBlockData(&pIter->bData, 0);
+
+ tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pIter);
+ iIter++;
+ }
+ } else {
+ for (int32_t iStt = 0; iStt < pReader->pSet->nSttF; iStt++) {
+ SSttFile *pSttFile = pReader->pSet->aSttF[iStt];
+ if (pSttFile->size > pSttFile->offset) {
+ pCommitter->toLastOnly = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ code = tsdbNextCommitRow(pCommitter);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ return code;
+}
+
static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
int32_t code = 0;
STsdb *pTsdb = pCommitter->pTsdb;
@@ -416,8 +480,8 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
pCommitter->nextKey = TSKEY_MAX;
// Reader
- pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &(SDFileSet){.fid = pCommitter->commitFid},
- tDFileSetCmprFn, TD_EQ);
+ SDFileSet tDFileSet = {.fid = pCommitter->commitFid};
+ pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &tDFileSet, tDFileSetCmprFn, TD_EQ);
if (pRSet) {
code = tsdbDataFReaderOpen(&pCommitter->dReader.pReader, pTsdb, pRSet);
if (code) goto _err;
@@ -427,68 +491,58 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
if (code) goto _err;
pCommitter->dReader.iBlockIdx = 0;
- if (pCommitter->dReader.iBlockIdx < taosArrayGetSize(pCommitter->dReader.aBlockIdx)) {
- pCommitter->dReader.pBlockIdx =
- (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx);
-
- code = tsdbReadBlock(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
+ if (taosArrayGetSize(pCommitter->dReader.aBlockIdx) > 0) {
+ pCommitter->dReader.pBlockIdx = (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, 0);
+ code = tsdbReadDataBlk(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
if (code) goto _err;
} else {
pCommitter->dReader.pBlockIdx = NULL;
}
tBlockDataReset(&pCommitter->dReader.bData);
-
- // last
- code = tsdbReadBlockL(pCommitter->dReader.pReader, pCommitter->dReader.aBlockL);
- if (code) goto _err;
-
- pCommitter->dReader.iBlockL = -1;
- pCommitter->dReader.iRow = -1;
- pCommitter->dReader.pRowInfo = &pCommitter->dReader.rowInfo;
- tBlockDataReset(&pCommitter->dReader.bDatal);
- code = tsdbCommitterNextLastRow(pCommitter);
- if (code) goto _err;
} else {
pCommitter->dReader.pBlockIdx = NULL;
- pCommitter->dReader.pRowInfo = NULL;
}
// Writer
- SHeadFile fHead;
- SDataFile fData;
- SLastFile fLast;
- SSmaFile fSma;
- SDFileSet wSet = {.pHeadF = &fHead, .pDataF = &fData, .pLastF = &fLast, .pSmaF = &fSma};
+ SHeadFile fHead = {.commitID = pCommitter->commitID};
+ SDataFile fData = {.commitID = pCommitter->commitID};
+ SSmaFile fSma = {.commitID = pCommitter->commitID};
+ SSttFile fStt = {.commitID = pCommitter->commitID};
+ SDFileSet wSet = {.fid = pCommitter->commitFid, .pHeadF = &fHead, .pDataF = &fData, .pSmaF = &fSma};
if (pRSet) {
- wSet.diskId = pRSet->diskId;
- wSet.fid = pCommitter->commitFid;
- fHead = (SHeadFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
+ ASSERT(pRSet->nSttF <= pCommitter->sttTrigger);
fData = *pRSet->pDataF;
- fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
fSma = *pRSet->pSmaF;
+ wSet.diskId = pRSet->diskId;
+ if (pRSet->nSttF < pCommitter->sttTrigger) {
+ for (int32_t iStt = 0; iStt < pRSet->nSttF; iStt++) {
+ wSet.aSttF[iStt] = pRSet->aSttF[iStt];
+ }
+ wSet.nSttF = pRSet->nSttF + 1;
+ } else {
+ wSet.nSttF = 1;
+ }
} else {
SDiskID did = {0};
-
tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
-
tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
-
wSet.diskId = did;
- wSet.fid = pCommitter->commitFid;
- fHead = (SHeadFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
- fData = (SDataFile){.commitID = pCommitter->commitID, .size = 0};
- fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
- fSma = (SSmaFile){.commitID = pCommitter->commitID, .size = 0};
+ wSet.nSttF = 1;
}
+ wSet.aSttF[wSet.nSttF - 1] = &fStt;
code = tsdbDataFWriterOpen(&pCommitter->dWriter.pWriter, pTsdb, &wSet);
if (code) goto _err;
taosArrayClear(pCommitter->dWriter.aBlockIdx);
- taosArrayClear(pCommitter->dWriter.aBlockL);
+ taosArrayClear(pCommitter->dWriter.aSttBlk);
tMapDataReset(&pCommitter->dWriter.mBlock);
tBlockDataReset(&pCommitter->dWriter.bData);
tBlockDataReset(&pCommitter->dWriter.bDatal);
+ // open iter
+ code = tsdbOpenCommitIter(pCommitter);
+ if (code) goto _err;
+
_exit:
return code;
@@ -497,50 +551,45 @@ _err:
return code;
}
-static int32_t tsdbCommitDataBlock(SCommitter *pCommitter, SBlock *pBlock) {
- int32_t code = 0;
- SBlockData *pBlockData = &pCommitter->dWriter.bData;
- SBlock block;
+int32_t tsdbWriteDataBlock(SDataFWriter *pWriter, SBlockData *pBlockData, SMapData *mDataBlk, int8_t cmprAlg) {
+ int32_t code = 0;
- ASSERT(pBlockData->nRow > 0);
+ if (pBlockData->nRow == 0) return code;
- if (pBlock) {
- block = *pBlock; // as a subblock
- } else {
- tBlockReset(&block); // as a new block
- }
+ SDataBlk dataBlk;
+ tDataBlkReset(&dataBlk);
// info
- block.nRow += pBlockData->nRow;
+ dataBlk.nRow += pBlockData->nRow;
for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]};
if (iRow == 0) {
- if (tsdbKeyCmprFn(&block.minKey, &key) > 0) {
- block.minKey = key;
+ if (tsdbKeyCmprFn(&dataBlk.minKey, &key) > 0) {
+ dataBlk.minKey = key;
}
} else {
if (pBlockData->aTSKEY[iRow] == pBlockData->aTSKEY[iRow - 1]) {
- block.hasDup = 1;
+ dataBlk.hasDup = 1;
}
}
- if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&block.maxKey, &key) < 0) {
- block.maxKey = key;
+ if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&dataBlk.maxKey, &key) < 0) {
+ dataBlk.maxKey = key;
}
- block.minVer = TMIN(block.minVer, key.version);
- block.maxVer = TMAX(block.maxVer, key.version);
+ dataBlk.minVer = TMIN(dataBlk.minVer, key.version);
+ dataBlk.maxVer = TMAX(dataBlk.maxVer, key.version);
}
// write
- block.nSubBlock++;
- code = tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, &block.aSubBlock[block.nSubBlock - 1],
- ((block.nSubBlock == 1) && !block.hasDup) ? &block.smaInfo : NULL, pCommitter->cmprAlg, 0);
+ dataBlk.nSubBlock++;
+ code = tsdbWriteBlockData(pWriter, pBlockData, &dataBlk.aSubBlock[dataBlk.nSubBlock - 1],
+ ((dataBlk.nSubBlock == 1) && !dataBlk.hasDup) ? &dataBlk.smaInfo : NULL, cmprAlg, 0);
if (code) goto _err;
- // put SBlock
- code = tMapDataPutItem(&pCommitter->dWriter.mBlock, &block, tPutBlock);
+ // put SDataBlk
+ code = tMapDataPutItem(mDataBlk, &dataBlk, tPutDataBlk);
if (code) goto _err;
// clear
@@ -549,39 +598,38 @@ static int32_t tsdbCommitDataBlock(SCommitter *pCommitter, SBlock *pBlock) {
return code;
_err:
- tsdbError("vgId:%d tsdb commit data block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb commit data block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbCommitLastBlock(SCommitter *pCommitter) {
- int32_t code = 0;
- SBlockL blockL;
- SBlockData *pBlockData = &pCommitter->dWriter.bDatal;
+int32_t tsdbWriteSttBlock(SDataFWriter *pWriter, SBlockData *pBlockData, SArray *aSttBlk, int8_t cmprAlg) {
+ int32_t code = 0;
+ SSttBlk sstBlk;
- ASSERT(pBlockData->nRow > 0);
+ if (pBlockData->nRow == 0) return code;
// info
- blockL.suid = pBlockData->suid;
- blockL.nRow = pBlockData->nRow;
- blockL.minKey = TSKEY_MAX;
- blockL.maxKey = TSKEY_MIN;
- blockL.minVer = VERSION_MAX;
- blockL.maxVer = VERSION_MIN;
+ sstBlk.suid = pBlockData->suid;
+ sstBlk.nRow = pBlockData->nRow;
+ sstBlk.minKey = TSKEY_MAX;
+ sstBlk.maxKey = TSKEY_MIN;
+ sstBlk.minVer = VERSION_MAX;
+ sstBlk.maxVer = VERSION_MIN;
for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
- blockL.minKey = TMIN(blockL.minKey, pBlockData->aTSKEY[iRow]);
- blockL.maxKey = TMAX(blockL.maxKey, pBlockData->aTSKEY[iRow]);
- blockL.minVer = TMIN(blockL.minVer, pBlockData->aVersion[iRow]);
- blockL.maxVer = TMAX(blockL.maxVer, pBlockData->aVersion[iRow]);
+ sstBlk.minKey = TMIN(sstBlk.minKey, pBlockData->aTSKEY[iRow]);
+ sstBlk.maxKey = TMAX(sstBlk.maxKey, pBlockData->aTSKEY[iRow]);
+ sstBlk.minVer = TMIN(sstBlk.minVer, pBlockData->aVersion[iRow]);
+ sstBlk.maxVer = TMAX(sstBlk.maxVer, pBlockData->aVersion[iRow]);
}
- blockL.minUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[0];
- blockL.maxUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[pBlockData->nRow - 1];
+ sstBlk.minUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[0];
+ sstBlk.maxUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[pBlockData->nRow - 1];
// write
- code = tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, &blockL.bInfo, NULL, pCommitter->cmprAlg, 1);
+ code = tsdbWriteBlockData(pWriter, pBlockData, &sstBlk.bInfo, NULL, cmprAlg, 1);
if (code) goto _err;
- // push SBlockL
- if (taosArrayPush(pCommitter->dWriter.aBlockL, &blockL) == NULL) {
+ // push SSttBlk
+ if (taosArrayPush(aSttBlk, &sstBlk) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
@@ -592,757 +640,197 @@ static int32_t tsdbCommitLastBlock(SCommitter *pCommitter) {
return code;
_err:
- tsdbError("vgId:%d tsdb commit last block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb commit last block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbMergeCommitData(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) {
- int32_t code = 0;
- STbData *pTbData = pIter->pTbData;
- SBlockData *pBlockDataR = &pCommitter->dReader.bData;
- SBlockData *pBlockDataW = &pCommitter->dWriter.bData;
+static int32_t tsdbCommitFileDataEnd(SCommitter *pCommitter) {
+ int32_t code = 0;
- code = tsdbReadDataBlock(pCommitter->dReader.pReader, pBlock, pBlockDataR);
+ // write aBlockIdx
+ code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx);
if (code) goto _err;
- tBlockDataClear(pBlockDataW);
- int32_t iRow = 0;
- TSDBROW row;
- TSDBROW *pRow1 = tsdbTbDataIterGet(pIter);
- TSDBROW *pRow2 = &row;
- *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
- while (pRow1 && pRow2) {
- int32_t c = tsdbRowCmprFn(pRow1, pRow2);
-
- if (c < 0) {
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow1));
- if (code) goto _err;
-
- code = tBlockDataAppendRow(pBlockDataW, pRow1, pCommitter->skmRow.pTSchema, pTbData->uid);
- if (code) goto _err;
-
- // next
- tsdbTbDataIterNext(pIter);
- pRow1 = tsdbTbDataIterGet(pIter);
- } else if (c > 0) {
- code = tBlockDataAppendRow(pBlockDataW, pRow2, NULL, pTbData->uid);
- if (code) goto _err;
-
- iRow++;
- if (iRow < pBlockDataR->nRow) {
- *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
- } else {
- pRow2 = NULL;
- }
- } else {
- ASSERT(0);
- }
-
- // check
- if (pBlockDataW->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- }
- }
+ // write aSttBlk
+ code = tsdbWriteSttBlk(pCommitter->dWriter.pWriter, pCommitter->dWriter.aSttBlk);
+ if (code) goto _err;
- while (pRow2) {
- code = tBlockDataAppendRow(pBlockDataW, pRow2, NULL, pTbData->uid);
- if (code) goto _err;
+ // update file header
+ code = tsdbUpdateDFileSetHeader(pCommitter->dWriter.pWriter);
+ if (code) goto _err;
- iRow++;
- if (iRow < pBlockDataR->nRow) {
- *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
- } else {
- pRow2 = NULL;
- }
+ // upsert SDFileSet
+ code = tsdbFSUpsertFSet(&pCommitter->fs, &pCommitter->dWriter.pWriter->wSet);
+ if (code) goto _err;
- // check
- if (pBlockDataW->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- }
- }
+ // close and sync
+ code = tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 1);
+ if (code) goto _err;
- // check
- if (pBlockDataW->nRow > 0) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
+ if (pCommitter->dReader.pReader) {
+ code = tsdbDataFReaderClose(&pCommitter->dReader.pReader);
if (code) goto _err;
}
+_exit:
return code;
_err:
- tsdbError("vgId:%d, tsdb merge commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, commit file data end failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter, TSDBKEY toKey) {
- int32_t code = 0;
- STbData *pTbData = pIter->pTbData;
- SBlockData *pBlockData = &pCommitter->dWriter.bData;
-
- tBlockDataClear(pBlockData);
- TSDBROW *pRow = tsdbTbDataIterGet(pIter);
- while (true) {
- if (pRow == NULL) {
- if (pBlockData->nRow > 0) {
- goto _write_block;
- } else {
- break;
- }
- }
-
- // update schema
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
+static int32_t tsdbMoveCommitData(SCommitter *pCommitter, TABLEID toTable) {
+ int32_t code = 0;
- // append
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
+ while (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, &toTable) < 0) {
+ SBlockIdx blockIdx = *pCommitter->dReader.pBlockIdx;
+ code = tsdbWriteDataBlk(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx);
if (code) goto _err;
- tsdbTbDataIterNext(pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow) {
- TSDBKEY rowKey = TSDBROW_KEY(pRow);
- if (tsdbKeyCmprFn(&rowKey, &toKey) >= 0) {
- pRow = NULL;
- }
+ if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
}
- if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- _write_block:
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- }
+ code = tsdbCommitterNextTableData(pCommitter);
+ if (code) goto _err;
}
return code;
_err:
- tsdbError("vgId:%d, tsdb commit table mem data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb move commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbGetNumOfRowsLessThan(STbDataIter *pIter, TSDBKEY key) {
- int32_t nRow = 0;
-
- STbDataIter iter = *pIter;
- while (true) {
- TSDBROW *pRow = tsdbTbDataIterGet(&iter);
- if (pRow == NULL) break;
+static int32_t tsdbCommitFileDataImpl(SCommitter *pCommitter);
+static int32_t tsdbCommitFileData(SCommitter *pCommitter) {
+ int32_t code = 0;
+ STsdb *pTsdb = pCommitter->pTsdb;
+ SMemTable *pMemTable = pTsdb->imem;
- int32_t c = tsdbKeyCmprFn(&TSDBROW_KEY(pRow), &key);
- if (c < 0) {
- nRow++;
- tsdbTbDataIterNext(&iter);
- } else if (c > 0) {
- break;
- } else {
- ASSERT(0);
- }
- }
+ // commit file data start
+ code = tsdbCommitFileDataStart(pCommitter);
+ if (code) goto _err;
- return nRow;
-}
+ // impl
+ code = tsdbCommitFileDataImpl(pCommitter);
+ if (code) goto _err;
-static int32_t tsdbMergeAsSubBlock(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) {
- int32_t code = 0;
- STbData *pTbData = pIter->pTbData;
- SBlockData *pBlockData = &pCommitter->dWriter.bData;
+ // commit file data end
+ code = tsdbCommitFileDataEnd(pCommitter);
+ if (code) goto _err;
- tBlockDataClear(pBlockData);
- TSDBROW *pRow = tsdbTbDataIterGet(pIter);
- while (true) {
- if (pRow == NULL) break;
+ return code;
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
+_err:
+ tsdbError("vgId:%d, commit file data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbDataFReaderClose(&pCommitter->dReader.pReader);
+ tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 0);
+ return code;
+}
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
- if (code) goto _err;
+// ----------------------------------------------------------------------------
+static int32_t tsdbStartCommit(STsdb *pTsdb, SCommitter *pCommitter) {
+ int32_t code = 0;
- tsdbTbDataIterNext(pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow) {
- TSDBKEY rowKey = TSDBROW_KEY(pRow);
- if (tsdbKeyCmprFn(&rowKey, &pBlock->maxKey) > 0) {
- pRow = NULL;
- }
- }
- }
+ memset(pCommitter, 0, sizeof(*pCommitter));
+ ASSERT(pTsdb->mem && pTsdb->imem == NULL);
- ASSERT(pBlockData->nRow > 0 && pBlock->nRow + pBlockData->nRow <= pCommitter->maxRow);
+ taosThreadRwlockWrlock(&pTsdb->rwLock);
+ pTsdb->imem = pTsdb->mem;
+ pTsdb->mem = NULL;
+ taosThreadRwlockUnlock(&pTsdb->rwLock);
- code = tsdbCommitDataBlock(pCommitter, pBlock);
+ pCommitter->pTsdb = pTsdb;
+ pCommitter->commitID = pTsdb->pVnode->state.commitID;
+ pCommitter->minutes = pTsdb->keepCfg.days;
+ pCommitter->precision = pTsdb->keepCfg.precision;
+ pCommitter->minRow = pTsdb->pVnode->config.tsdbCfg.minRows;
+ pCommitter->maxRow = pTsdb->pVnode->config.tsdbCfg.maxRows;
+ pCommitter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression;
+ pCommitter->sttTrigger = pTsdb->pVnode->config.sttTrigger;
+ pCommitter->aTbDataP = tsdbMemTableGetTbDataArray(pTsdb->imem);
+ if (pCommitter->aTbDataP == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ code = tsdbFSCopy(pTsdb, &pCommitter->fs);
if (code) goto _err;
return code;
_err:
- tsdbError("vgId:%d, tsdb merge as subblock failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, tsdb start commit failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbMergeCommitLast(SCommitter *pCommitter, STbDataIter *pIter) {
- int32_t code = 0;
- STbData *pTbData = pIter->pTbData;
- int32_t nRow = tsdbGetNumOfRowsLessThan(pIter, (TSDBKEY){.ts = pCommitter->maxKey + 1, .version = VERSION_MIN});
+static int32_t tsdbCommitDataStart(SCommitter *pCommitter) {
+ int32_t code = 0;
- if (pCommitter->dReader.pRowInfo && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pRowInfo) == 0) {
- if (pCommitter->dReader.pRowInfo->suid) { // super table
- for (int32_t iRow = pCommitter->dReader.iRow; iRow < pCommitter->dReader.bDatal.nRow; iRow++) {
- if (pTbData->uid != pCommitter->dReader.bDatal.aUid[iRow]) break;
- nRow++;
- }
- } else { // normal table
- ASSERT(pCommitter->dReader.iRow == 0);
- nRow += pCommitter->dReader.bDatal.nRow;
- }
+ // reader
+ pCommitter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pCommitter->dReader.aBlockIdx == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
}
- if (nRow == 0) goto _exit;
+ code = tBlockDataCreate(&pCommitter->dReader.bData);
+ if (code) goto _exit;
- TSDBROW *pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
+ // merger
+ for (int32_t iStt = 0; iStt < TSDB_MAX_STT_TRIGGER; iStt++) {
+ SDataIter *pIter = &pCommitter->aDataIter[iStt];
+ pIter->aSttBlk = taosArrayInit(0, sizeof(SSttBlk));
+ if (pIter->aSttBlk == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
- SRowInfo *pRowInfo = pCommitter->dReader.pRowInfo;
- if (pRowInfo && pRowInfo->uid != pTbData->uid) {
- pRowInfo = NULL;
+ code = tBlockDataCreate(&pIter->bData);
+ if (code) goto _exit;
}
- while (nRow) {
- SBlockData *pBlockData;
- int8_t toData;
+ // writer
+ pCommitter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pCommitter->dWriter.aBlockIdx == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
- if (nRow < pCommitter->minRow) { // to .last
- toData = 0;
- pBlockData = &pCommitter->dWriter.bDatal;
+ pCommitter->dWriter.aSttBlk = taosArrayInit(0, sizeof(SSttBlk));
+ if (pCommitter->dWriter.aSttBlk == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
- // commit and reset block data schema if need
- // QUESTION: Is there a case that pBlockData->nRow == 0 but need to change schema ?
- if (pBlockData->suid || pBlockData->uid) {
- if (pBlockData->suid != pTbData->suid || pBlockData->suid == 0) {
- if (pBlockData->nRow > 0) {
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
+ code = tBlockDataCreate(&pCommitter->dWriter.bData);
+ if (code) goto _exit;
- tBlockDataReset(pBlockData);
- }
- }
+ code = tBlockDataCreate(&pCommitter->dWriter.bDatal);
+ if (code) goto _exit;
- // set block data schema if need
- if (pBlockData->suid == 0 && pBlockData->uid == 0) {
- code =
- tBlockDataInit(pBlockData, pTbData->suid, pTbData->suid ? 0 : pTbData->uid, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
- }
-
- if (pBlockData->nRow + nRow > pCommitter->maxRow) {
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
- } else { // to .data
- toData = 1;
- pBlockData = &pCommitter->dWriter.bData;
- ASSERT(pBlockData->nRow == 0);
- }
-
- while (pRow && pRowInfo) {
- int32_t c = tsdbRowCmprFn(pRow, &pRowInfo->row);
- if (c < 0) {
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
-
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
- if (code) goto _err;
-
- tsdbTbDataIterNext(pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
- } else if (c > 0) {
- code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pTbData->uid);
- if (code) goto _err;
-
- code = tsdbCommitterNextLastRow(pCommitter);
- if (code) goto _err;
-
- pRowInfo = pCommitter->dReader.pRowInfo;
- if (pRowInfo && pRowInfo->uid != pTbData->uid) {
- pRowInfo = NULL;
- }
- } else {
- ASSERT(0);
- }
-
- nRow--;
- if (toData) {
- if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- goto _outer_break;
- }
- }
- }
-
- while (pRow) {
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
-
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
- if (code) goto _err;
-
- tsdbTbDataIterNext(pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
-
- nRow--;
- if (toData) {
- if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- goto _outer_break;
- }
- }
- }
-
- while (pRowInfo) {
- code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pTbData->uid);
- if (code) goto _err;
-
- code = tsdbCommitterNextLastRow(pCommitter);
- if (code) goto _err;
-
- pRowInfo = pCommitter->dReader.pRowInfo;
- if (pRowInfo && pRowInfo->uid != pTbData->uid) {
- pRowInfo = NULL;
- }
-
- nRow--;
- if (toData) {
- if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- goto _outer_break;
- }
- }
- }
-
- _outer_break:
- ASSERT(nRow >= 0);
- }
-
-_exit:
- return code;
-
-_err:
- tsdbError("vgId:%d tsdb merge commit last failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData) {
- int32_t code = 0;
-
- ASSERT(pCommitter->dReader.pBlockIdx == NULL || tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, pTbData) >= 0);
- ASSERT(pCommitter->dReader.pRowInfo == NULL || tTABLEIDCmprFn(pCommitter->dReader.pRowInfo, pTbData) >= 0);
-
- // merge commit table data
- STbDataIter iter = {0};
- STbDataIter *pIter = &iter;
- TSDBROW *pRow;
-
- tsdbTbDataIterOpen(pTbData, &(TSDBKEY){.ts = pCommitter->minKey, .version = VERSION_MIN}, 0, pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
-
- if (pRow == NULL) goto _exit;
-
- int32_t iBlock = 0;
- SBlock block;
- SBlock *pBlock = █
- if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pBlockIdx) == 0) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
-
- code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid);
- if (code) goto _err;
-
- tMapDataReset(&pCommitter->dWriter.mBlock);
- code = tBlockDataInit(&pCommitter->dReader.bData, pTbData->suid, pTbData->uid, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
- code = tBlockDataInit(&pCommitter->dWriter.bData, pTbData->suid, pTbData->uid, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
-
- // .data merge
- while (pBlock && pRow) {
- int32_t c = tBlockCmprFn(pBlock, &(SBlock){.minKey = TSDBROW_KEY(pRow), .maxKey = TSDBROW_KEY(pRow)});
- if (c < 0) { // disk
- code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock);
- if (code) goto _err;
-
- // next
- iBlock++;
- if (iBlock < pCommitter->dReader.mBlock.nItem) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
- } else if (c > 0) { // memory
- code = tsdbCommitTableMemData(pCommitter, pIter, pBlock->minKey);
- if (code) goto _err;
-
- // next
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
- } else { // merge
- int32_t nOvlp = tsdbGetNumOfRowsLessThan(pIter, pBlock->maxKey);
-
- ASSERT(nOvlp > 0);
-
- if (pBlock->nRow + nOvlp <= pCommitter->maxRow && pBlock->nSubBlock < TSDB_MAX_SUBBLOCKS) {
- code = tsdbMergeAsSubBlock(pCommitter, pIter, pBlock);
- if (code) goto _err;
- } else {
- code = tsdbMergeCommitData(pCommitter, pIter, pBlock);
- if (code) goto _err;
- }
-
- // next
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
- iBlock++;
- if (iBlock < pCommitter->dReader.mBlock.nItem) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
- }
- }
-
- while (pBlock) {
- code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock);
- if (code) goto _err;
-
- // next
- iBlock++;
- if (iBlock < pCommitter->dReader.mBlock.nItem) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
- }
-
- // .data append and .last merge
- code = tsdbMergeCommitLast(pCommitter, pIter);
- if (code) goto _err;
-
- // end
- if (pCommitter->dWriter.mBlock.nItem > 0) {
- SBlockIdx blockIdx = {.suid = pTbData->suid, .uid = pTbData->uid};
- code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx);
- if (code) goto _err;
-
- if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
-
-_exit:
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow) {
- pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow));
- }
-
- return code;
-
-_err:
- tsdbError("vgId:%d tsdb commit table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitFileDataEnd(SCommitter *pCommitter) {
- int32_t code = 0;
-
- // write aBlockIdx
- code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx);
- if (code) goto _err;
-
- // write aBlockL
- code = tsdbWriteBlockL(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockL);
- if (code) goto _err;
-
- // update file header
- code = tsdbUpdateDFileSetHeader(pCommitter->dWriter.pWriter);
- if (code) goto _err;
-
- // upsert SDFileSet
- code = tsdbFSUpsertFSet(&pCommitter->fs, &pCommitter->dWriter.pWriter->wSet);
- if (code) goto _err;
-
- // close and sync
- code = tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 1);
- if (code) goto _err;
-
- if (pCommitter->dReader.pReader) {
- code = tsdbDataFReaderClose(&pCommitter->dReader.pReader);
- if (code) goto _err;
- }
-
-_exit:
- return code;
-
-_err:
- tsdbError("vgId:%d, commit file data end failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbMoveCommitData(SCommitter *pCommitter, TABLEID toTable) {
- int32_t code = 0;
-
- // .data
- while (true) {
- if (pCommitter->dReader.pBlockIdx == NULL || tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, &toTable) >= 0) break;
-
- SBlockIdx blockIdx = *pCommitter->dReader.pBlockIdx;
- code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx);
- if (code) goto _err;
-
- if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- code = tsdbCommitterNextTableData(pCommitter);
- if (code) goto _err;
- }
-
- // .last
- while (true) {
- if (pCommitter->dReader.pRowInfo == NULL || tTABLEIDCmprFn(pCommitter->dReader.pRowInfo, &toTable) >= 0) break;
-
- SBlockData *pBlockDataR = &pCommitter->dReader.bDatal;
- SBlockData *pBlockDataW = &pCommitter->dWriter.bDatal;
- tb_uid_t suid = pCommitter->dReader.pRowInfo->suid;
- tb_uid_t uid = pCommitter->dReader.pRowInfo->uid;
-
- ASSERT((pBlockDataR->suid && !pBlockDataR->uid) || (!pBlockDataR->suid && pBlockDataR->uid));
- ASSERT(pBlockDataR->nRow > 0);
-
- // commit and reset block data schema if need
- if (pBlockDataW->suid || pBlockDataW->uid) {
- if (pBlockDataW->suid != suid || pBlockDataW->suid == 0) {
- if (pBlockDataW->nRow > 0) {
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
- tBlockDataReset(pBlockDataW);
- }
- }
-
- // set block data schema if need
- if (pBlockDataW->suid == 0 && pBlockDataW->uid == 0) {
- code = tsdbCommitterUpdateTableSchema(pCommitter, suid, uid);
- if (code) goto _err;
-
- code = tBlockDataInit(pBlockDataW, suid, suid ? 0 : uid, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
- }
-
- // check if it can make sure that one table data in one block
- int32_t nRow = 0;
- if (pBlockDataR->suid) {
- int32_t iRow = pCommitter->dReader.iRow;
- while ((iRow < pBlockDataR->nRow) && (pBlockDataR->aUid[iRow] == uid)) {
- nRow++;
- iRow++;
- }
- } else {
- ASSERT(pCommitter->dReader.iRow == 0);
- nRow = pBlockDataR->nRow;
- }
-
- ASSERT(nRow > 0 && nRow < pCommitter->minRow);
-
- if (pBlockDataW->nRow + nRow > pCommitter->maxRow) {
- ASSERT(pBlockDataW->nRow > 0);
-
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
-
- while (nRow > 0) {
- code = tBlockDataAppendRow(pBlockDataW, &pCommitter->dReader.pRowInfo->row, NULL, uid);
- if (code) goto _err;
-
- code = tsdbCommitterNextLastRow(pCommitter);
- if (code) goto _err;
-
- nRow--;
- }
- }
-
- return code;
-
-_err:
- tsdbError("vgId:%d tsdb move commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitFileData(SCommitter *pCommitter) {
- int32_t code = 0;
- STsdb *pTsdb = pCommitter->pTsdb;
- SMemTable *pMemTable = pTsdb->imem;
-
- // commit file data start
- code = tsdbCommitFileDataStart(pCommitter);
- if (code) goto _err;
-
- // commit file data impl
- for (int32_t iTbData = 0; iTbData < taosArrayGetSize(pCommitter->aTbDataP); iTbData++) {
- STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData);
-
- // move commit until current (suid, uid)
- code = tsdbMoveCommitData(pCommitter, *(TABLEID *)pTbData);
- if (code) goto _err;
-
- // commit current table data
- code = tsdbCommitTableData(pCommitter, pTbData);
- if (code) goto _err;
-
- // move next reader table data if need
- if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pBlockIdx) == 0) {
- code = tsdbCommitterNextTableData(pCommitter);
- if (code) goto _err;
- }
- }
-
- code = tsdbMoveCommitData(pCommitter, (TABLEID){.suid = INT64_MAX, .uid = INT64_MAX});
- if (code) goto _err;
-
- if (pCommitter->dWriter.bDatal.nRow > 0) {
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
-
- // commit file data end
- code = tsdbCommitFileDataEnd(pCommitter);
- if (code) goto _err;
-
- return code;
-
-_err:
- tsdbError("vgId:%d, commit file data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- tsdbDataFReaderClose(&pCommitter->dReader.pReader);
- tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 0);
- return code;
-}
-
-// ----------------------------------------------------------------------------
-static int32_t tsdbStartCommit(STsdb *pTsdb, SCommitter *pCommitter) {
- int32_t code = 0;
-
- memset(pCommitter, 0, sizeof(*pCommitter));
- ASSERT(pTsdb->mem && pTsdb->imem == NULL);
-
- taosThreadRwlockWrlock(&pTsdb->rwLock);
- pTsdb->imem = pTsdb->mem;
- pTsdb->mem = NULL;
- taosThreadRwlockUnlock(&pTsdb->rwLock);
-
- pCommitter->pTsdb = pTsdb;
- pCommitter->commitID = pTsdb->pVnode->state.commitID;
- pCommitter->minutes = pTsdb->keepCfg.days;
- pCommitter->precision = pTsdb->keepCfg.precision;
- pCommitter->minRow = pTsdb->pVnode->config.tsdbCfg.minRows;
- pCommitter->maxRow = pTsdb->pVnode->config.tsdbCfg.maxRows;
- pCommitter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression;
- pCommitter->aTbDataP = tsdbMemTableGetTbDataArray(pTsdb->imem);
- if (pCommitter->aTbDataP == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- code = tsdbFSCopy(pTsdb, &pCommitter->fs);
- if (code) goto _err;
-
- return code;
-
-_err:
- tsdbError("vgId:%d, tsdb start commit failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitDataStart(SCommitter *pCommitter) {
- int32_t code = 0;
-
- // Reader
- pCommitter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
- if (pCommitter->dReader.aBlockIdx == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- code = tBlockDataCreate(&pCommitter->dReader.bData);
- if (code) goto _exit;
-
- pCommitter->dReader.aBlockL = taosArrayInit(0, sizeof(SBlockL));
- if (pCommitter->dReader.aBlockL == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- code = tBlockDataCreate(&pCommitter->dReader.bDatal);
- if (code) goto _exit;
-
- // Writer
- pCommitter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
- if (pCommitter->dWriter.aBlockIdx == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- pCommitter->dWriter.aBlockL = taosArrayInit(0, sizeof(SBlockL));
- if (pCommitter->dWriter.aBlockL == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- code = tBlockDataCreate(&pCommitter->dWriter.bData);
- if (code) goto _exit;
-
- code = tBlockDataCreate(&pCommitter->dWriter.bDatal);
- if (code) goto _exit;
-
-_exit:
- return code;
-}
+_exit:
+ return code;
+}
static void tsdbCommitDataEnd(SCommitter *pCommitter) {
- // Reader
+ // reader
taosArrayDestroy(pCommitter->dReader.aBlockIdx);
tMapDataClear(&pCommitter->dReader.mBlock);
tBlockDataDestroy(&pCommitter->dReader.bData, 1);
- taosArrayDestroy(pCommitter->dReader.aBlockL);
- tBlockDataDestroy(&pCommitter->dReader.bDatal, 1);
- // Writer
+ // merger
+ for (int32_t iStt = 0; iStt < TSDB_MAX_STT_TRIGGER; iStt++) {
+ SDataIter *pIter = &pCommitter->aDataIter[iStt];
+ taosArrayDestroy(pIter->aSttBlk);
+ tBlockDataDestroy(&pIter->bData, 1);
+ }
+
+ // writer
taosArrayDestroy(pCommitter->dWriter.aBlockIdx);
- taosArrayDestroy(pCommitter->dWriter.aBlockL);
+ taosArrayDestroy(pCommitter->dWriter.aSttBlk);
tMapDataClear(&pCommitter->dWriter.mBlock);
tBlockDataDestroy(&pCommitter->dWriter.bData, 1);
tBlockDataDestroy(&pCommitter->dWriter.bDatal, 1);
@@ -1373,7 +861,7 @@ static int32_t tsdbCommitData(SCommitter *pCommitter) {
tsdbCommitDataEnd(pCommitter);
_exit:
- tsdbDebug("vgId:%d, commit data done, nRow:%" PRId64, TD_VID(pTsdb->pVnode), pMemTable->nRow);
+ tsdbInfo("vgId:%d, commit data done, nRow:%" PRId64, TD_VID(pTsdb->pVnode), pMemTable->nRow);
return code;
_err:
@@ -1499,6 +987,11 @@ static int32_t tsdbEndCommit(SCommitter *pCommitter, int32_t eno) {
tsdbFSDestroy(&pCommitter->fs);
taosArrayDestroy(pCommitter->aTbDataP);
+ // if (pCommitter->toMerge) {
+ // code = tsdbMerge(pTsdb);
+ // if (code) goto _err;
+ // }
+
tsdbInfo("vgId:%d, tsdb end commit", TD_VID(pTsdb->pVnode));
return code;
@@ -1506,3 +999,473 @@ _err:
tsdbError("vgId:%d, tsdb end commit failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
+
+// ================================================================================
+
+static FORCE_INLINE SRowInfo *tsdbGetCommitRow(SCommitter *pCommitter) {
+ return (pCommitter->pIter) ? &pCommitter->pIter->r : NULL;
+}
+
+static int32_t tsdbNextCommitRow(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ if (pCommitter->pIter) {
+ SDataIter *pIter = pCommitter->pIter;
+ if (pCommitter->pIter->type == MEMORY_DATA_ITER) { // memory
+ tsdbTbDataIterNext(&pIter->iter);
+ TSDBROW *pRow = tsdbTbDataIterGet(&pIter->iter);
+ while (true) {
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow));
+ pRow = NULL;
+ }
+
+ if (pRow) {
+ pIter->r.suid = pIter->iter.pTbData->suid;
+ pIter->r.uid = pIter->iter.pTbData->uid;
+ pIter->r.row = *pRow;
+ break;
+ }
+
+ pIter->iTbDataP++;
+ if (pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP)) {
+ STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, pIter->iTbDataP);
+ TSDBKEY keyFrom = {.ts = pCommitter->minKey, .version = VERSION_MIN};
+ tsdbTbDataIterOpen(pTbData, &keyFrom, 0, &pIter->iter);
+ pRow = tsdbTbDataIterGet(&pIter->iter);
+ continue;
+ } else {
+ pCommitter->pIter = NULL;
+ break;
+ }
+ }
+ } else if (pCommitter->pIter->type == STT_DATA_ITER) { // last file
+ pIter->iRow++;
+ if (pIter->iRow < pIter->bData.nRow) {
+ pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[pIter->iRow];
+ pIter->r.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
+ } else {
+ pIter->iSttBlk++;
+ if (pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk)) {
+ SSttBlk *pSttBlk = (SSttBlk *)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk);
+
+ code = tsdbReadSttBlock(pCommitter->dReader.pReader, pIter->iStt, pSttBlk, &pIter->bData);
+ if (code) goto _exit;
+
+ pIter->iRow = 0;
+ pIter->r.suid = pIter->bData.suid;
+ pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[0];
+ pIter->r.row = tsdbRowFromBlockData(&pIter->bData, 0);
+ } else {
+ pCommitter->pIter = NULL;
+ }
+ }
+ } else {
+ ASSERT(0);
+ }
+
+ // compare with min in RB Tree
+ pIter = (SDataIter *)tRBTreeMin(&pCommitter->rbt);
+ if (pCommitter->pIter && pIter) {
+ int32_t c = tRowInfoCmprFn(&pCommitter->pIter->r, &pIter->r);
+ if (c > 0) {
+ tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pCommitter->pIter);
+ pCommitter->pIter = NULL;
+ } else {
+ ASSERT(c);
+ }
+ }
+ }
+
+ if (pCommitter->pIter == NULL) {
+ pCommitter->pIter = (SDataIter *)tRBTreeMin(&pCommitter->rbt);
+ if (pCommitter->pIter) {
+ tRBTreeDrop(&pCommitter->rbt, (SRBTreeNode *)pCommitter->pIter);
+ }
+ }
+
+_exit:
+ return code;
+}
+
+static int32_t tsdbCommitAheadBlock(SCommitter *pCommitter, SDataBlk *pDataBlk) {
+ int32_t code = 0;
+ SBlockData *pBlockData = &pCommitter->dWriter.bData;
+ SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter);
+ TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid};
+
+ tBlockDataClear(pBlockData);
+ while (pRowInfo) {
+ ASSERT(pRowInfo->row.type == 0);
+ code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row));
+ if (code) goto _err;
+
+ code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, pCommitter->skmRow.pTSchema, id.uid);
+ if (code) goto _err;
+
+ code = tsdbNextCommitRow(pCommitter);
+ if (code) goto _err;
+
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo) {
+ if (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid) {
+ pRowInfo = NULL;
+ } else {
+ TSDBKEY tKey = TSDBROW_KEY(&pRowInfo->row);
+ if (tsdbKeyCmprFn(&tKey, &pDataBlk->minKey) >= 0) pRowInfo = NULL;
+ }
+ }
+
+ if (pBlockData->nRow >= pCommitter->maxRow) {
+ code =
+ tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBlockData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBlockData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d, tsdb commit ahead block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbCommitMergeBlock(SCommitter *pCommitter, SDataBlk *pDataBlk) {
+ int32_t code = 0;
+ SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter);
+ TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid};
+ SBlockData *pBDataR = &pCommitter->dReader.bData;
+ SBlockData *pBDataW = &pCommitter->dWriter.bData;
+
+ code = tsdbReadDataBlock(pCommitter->dReader.pReader, pDataBlk, pBDataR);
+ if (code) goto _err;
+
+ tBlockDataClear(pBDataW);
+ int32_t iRow = 0;
+ TSDBROW row = tsdbRowFromBlockData(pBDataR, 0);
+ TSDBROW *pRow = &row;
+
+ while (pRow && pRowInfo) {
+ int32_t c = tsdbRowCmprFn(pRow, &pRowInfo->row);
+ if (c < 0) {
+ code = tBlockDataAppendRow(pBDataW, pRow, NULL, id.uid);
+ if (code) goto _err;
+
+ iRow++;
+ if (iRow < pBDataR->nRow) {
+ row = tsdbRowFromBlockData(pBDataR, iRow);
+ } else {
+ pRow = NULL;
+ }
+ } else if (c > 0) {
+ ASSERT(pRowInfo->row.type == 0);
+ code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row));
+ if (code) goto _err;
+
+ code = tBlockDataAppendRow(pBDataW, &pRowInfo->row, pCommitter->skmRow.pTSchema, id.uid);
+ if (code) goto _err;
+
+ code = tsdbNextCommitRow(pCommitter);
+ if (code) goto _err;
+
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo) {
+ if (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid) {
+ pRowInfo = NULL;
+ } else {
+ TSDBKEY tKey = TSDBROW_KEY(&pRowInfo->row);
+ if (tsdbKeyCmprFn(&tKey, &pDataBlk->maxKey) > 0) pRowInfo = NULL;
+ }
+ }
+ } else {
+ ASSERT(0);
+ }
+
+ if (pBDataW->nRow >= pCommitter->maxRow) {
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+
+ while (pRow) {
+ code = tBlockDataAppendRow(pBDataW, pRow, NULL, id.uid);
+ if (code) goto _err;
+
+ iRow++;
+ if (iRow < pBDataR->nRow) {
+ row = tsdbRowFromBlockData(pBDataR, iRow);
+ } else {
+ pRow = NULL;
+ }
+
+ if (pBDataW->nRow >= pCommitter->maxRow) {
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d, tsdb commit merge block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbMergeTableData(SCommitter *pCommitter, TABLEID id) {
+ int32_t code = 0;
+ SBlockIdx *pBlockIdx = pCommitter->dReader.pBlockIdx;
+
+ ASSERT(pBlockIdx == NULL || tTABLEIDCmprFn(pBlockIdx, &id) >= 0);
+ if (pBlockIdx && pBlockIdx->suid == id.suid && pBlockIdx->uid == id.uid) {
+ int32_t iBlock = 0;
+ SDataBlk block;
+ SDataBlk *pDataBlk = █
+ SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter);
+
+ ASSERT(pRowInfo->suid == id.suid && pRowInfo->uid == id.uid);
+
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk);
+ while (pDataBlk && pRowInfo) {
+ SDataBlk tBlock = {.minKey = TSDBROW_KEY(&pRowInfo->row), .maxKey = TSDBROW_KEY(&pRowInfo->row)};
+ int32_t c = tDataBlkCmprFn(pDataBlk, &tBlock);
+
+ if (c < 0) {
+ code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pDataBlk, tPutDataBlk);
+ if (code) goto _err;
+
+ iBlock++;
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk);
+ } else {
+ pDataBlk = NULL;
+ }
+ } else if (c > 0) {
+ code = tsdbCommitAheadBlock(pCommitter, pDataBlk);
+ if (code) goto _err;
+
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) pRowInfo = NULL;
+ } else {
+ code = tsdbCommitMergeBlock(pCommitter, pDataBlk);
+ if (code) goto _err;
+
+ iBlock++;
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk);
+ } else {
+ pDataBlk = NULL;
+ }
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) pRowInfo = NULL;
+ }
+ }
+
+ while (pDataBlk) {
+ code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pDataBlk, tPutDataBlk);
+ if (code) goto _err;
+
+ iBlock++;
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk);
+ } else {
+ pDataBlk = NULL;
+ }
+ }
+
+ code = tsdbCommitterNextTableData(pCommitter);
+ if (code) goto _err;
+ }
+
+_exit:
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb merge table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbInitLastBlockIfNeed(SCommitter *pCommitter, TABLEID id) {
+ int32_t code = 0;
+
+ SBlockData *pBDatal = &pCommitter->dWriter.bDatal;
+ if (pBDatal->suid || pBDatal->uid) {
+ if ((pBDatal->suid != id.suid) || (id.suid == 0)) {
+ code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, pBDatal, pCommitter->dWriter.aSttBlk, pCommitter->cmprAlg);
+ if (code) goto _exit;
+ tBlockDataReset(pBDatal);
+ }
+ }
+
+ if (!pBDatal->suid && !pBDatal->uid) {
+ ASSERT(pCommitter->skmTable.suid == id.suid);
+ ASSERT(pCommitter->skmTable.uid == id.uid);
+ code = tBlockDataInit(pBDatal, id.suid, id.suid ? 0 : id.uid, pCommitter->skmTable.pTSchema);
+ if (code) goto _exit;
+ }
+
+_exit:
+ return code;
+}
+
+static int32_t tsdbAppendLastBlock(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ SBlockData *pBData = &pCommitter->dWriter.bData;
+ SBlockData *pBDatal = &pCommitter->dWriter.bDatal;
+
+ TABLEID id = {.suid = pBData->suid, .uid = pBData->uid};
+ code = tsdbInitLastBlockIfNeed(pCommitter, id);
+ if (code) goto _err;
+
+ for (int32_t iRow = 0; iRow < pBData->nRow; iRow++) {
+ TSDBROW row = tsdbRowFromBlockData(pBData, iRow);
+ code = tBlockDataAppendRow(pBDatal, &row, NULL, pBData->uid);
+ if (code) goto _err;
+
+ if (pBDatal->nRow >= pCommitter->maxRow) {
+ code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, pBDatal, pCommitter->dWriter.aSttBlk, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+
+ return code;
+
+_err:
+ return code;
+}
+
+static int32_t tsdbCommitTableData(SCommitter *pCommitter, TABLEID id) {
+ int32_t code = 0;
+
+ SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) {
+ pRowInfo = NULL;
+ }
+
+ if (pRowInfo == NULL) goto _exit;
+
+ SBlockData *pBData;
+ if (pCommitter->toLastOnly) {
+ pBData = &pCommitter->dWriter.bDatal;
+ code = tsdbInitLastBlockIfNeed(pCommitter, id);
+ if (code) goto _err;
+ } else {
+ pBData = &pCommitter->dWriter.bData;
+ ASSERT(pBData->nRow == 0);
+ }
+
+ while (pRowInfo) {
+ STSchema *pTSchema = NULL;
+ if (pRowInfo->row.type == 0) {
+ code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row));
+ if (code) goto _err;
+ pTSchema = pCommitter->skmRow.pTSchema;
+ }
+
+ code = tBlockDataAppendRow(pBData, &pRowInfo->row, pTSchema, id.uid);
+ if (code) goto _err;
+
+ code = tsdbNextCommitRow(pCommitter);
+ if (code) goto _err;
+
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) {
+ pRowInfo = NULL;
+ }
+
+ if (pBData->nRow >= pCommitter->maxRow) {
+ if (pCommitter->toLastOnly) {
+ code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, pBData, pCommitter->dWriter.aSttBlk, pCommitter->cmprAlg);
+ if (code) goto _err;
+ } else {
+ code =
+ tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+ }
+
+ if (!pCommitter->toLastOnly && pBData->nRow) {
+ if (pBData->nRow > pCommitter->minRow) {
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ } else {
+ code = tsdbAppendLastBlock(pCommitter);
+ if (code) goto _err;
+ }
+ }
+
+_exit:
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb commit table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbCommitFileDataImpl(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ SRowInfo *pRowInfo;
+ TABLEID id = {0};
+ while ((pRowInfo = tsdbGetCommitRow(pCommitter)) != NULL) {
+ ASSERT(pRowInfo->suid != id.suid || pRowInfo->uid != id.uid);
+ id.suid = pRowInfo->suid;
+ id.uid = pRowInfo->uid;
+
+ code = tsdbMoveCommitData(pCommitter, id);
+ if (code) goto _err;
+
+ // start
+ tMapDataReset(&pCommitter->dWriter.mBlock);
+
+ // impl
+ code = tsdbUpdateTableSchema(pCommitter->pTsdb->pVnode->pMeta, id.suid, id.uid, &pCommitter->skmTable);
+ if (code) goto _err;
+ code = tBlockDataInit(&pCommitter->dReader.bData, id.suid, id.uid, pCommitter->skmTable.pTSchema);
+ if (code) goto _err;
+ code = tBlockDataInit(&pCommitter->dWriter.bData, id.suid, id.uid, pCommitter->skmTable.pTSchema);
+ if (code) goto _err;
+
+ /* merge with data in .data file */
+ code = tsdbMergeTableData(pCommitter, id);
+ if (code) goto _err;
+
+ /* handle remain table data */
+ code = tsdbCommitTableData(pCommitter, id);
+ if (code) goto _err;
+
+ // end
+ if (pCommitter->dWriter.mBlock.nItem > 0) {
+ SBlockIdx blockIdx = {.suid = id.suid, .uid = id.uid};
+ code = tsdbWriteDataBlk(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx);
+ if (code) goto _err;
+
+ if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+ }
+
+ id.suid = INT64_MAX;
+ id.uid = INT64_MAX;
+ code = tsdbMoveCommitData(pCommitter, id);
+ if (code) goto _err;
+
+ code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.bDatal, pCommitter->dWriter.aSttBlk,
+ pCommitter->cmprAlg);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb commit file data impl failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCompact.c b/source/dnode/vnode/src/tsdb/tsdbCompact.c
new file mode 100644
index 0000000000000000000000000000000000000000..fb3917be64faa058b52f1f13a86ec0034486f279
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbCompact.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdb.h"
+
+typedef struct {
+ STsdb *pTsdb;
+ STsdbFS fs;
+} STsdbCompactor;
+
+int32_t tsdbCompact(STsdb *pTsdb) {
+ int32_t code = 0;
+ // TODO
+ return code;
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCompress.c b/source/dnode/vnode/src/tsdb/tsdbCompress.c
new file mode 100644
index 0000000000000000000000000000000000000000..76be7c10708a1150b26953745b3f2cb21aaeb6fa
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbCompress.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdb.h"
+
+// Integer =====================================================
+typedef struct {
+ int8_t rawCopy;
+ int64_t prevVal;
+ int32_t nVal;
+ int32_t nBuf;
+ uint8_t *pBuf;
+} SIntCompressor;
+
+#define I64_SAFE_ADD(a, b) (((a) >= 0 && (b) <= INT64_MAX - (b)) || ((a) < 0 && (b) >= INT64_MIN - (a)))
+#define SIMPLE8B_MAX ((uint64_t)1152921504606846974LL)
+
+static int32_t tsdbCmprI64(SIntCompressor *pCompressor, int64_t val) {
+ int32_t code = 0;
+
+ // raw copy
+ if (pCompressor->rawCopy) {
+ memcpy(pCompressor->pBuf + pCompressor->nBuf, &val, sizeof(val));
+ pCompressor->nBuf += sizeof(val);
+ pCompressor->nVal++;
+ goto _exit;
+ }
+
+ if (!I64_SAFE_ADD(val, pCompressor->prevVal)) {
+ pCompressor->rawCopy = 1;
+ // TODO: decompress and copy
+ pCompressor->nVal++;
+ goto _exit;
+ }
+
+ int64_t diff = val - pCompressor->prevVal;
+ uint8_t zigzag = ZIGZAGE(int64_t, diff);
+
+ if (zigzag >= SIMPLE8B_MAX) {
+ pCompressor->rawCopy = 1;
+ // TODO: decompress and copy
+ pCompressor->nVal++;
+ goto _exit;
+ }
+
+_exit:
+ return code;
+}
+
+// Timestamp =====================================================
+
+// Float =====================================================
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbDiskData.c b/source/dnode/vnode/src/tsdb/tsdbDiskData.c
new file mode 100644
index 0000000000000000000000000000000000000000..3bd71f0ea6465cadcda3924247715a88721be3d8
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbDiskData.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdb.h"
+
+typedef struct SDiskColBuilder SDiskColBuilder;
+struct SDiskColBuilder {
+ uint8_t flags;
+ uint8_t *pBitMap;
+ int32_t *aOffset;
+ int32_t nData;
+ uint8_t *pData;
+};
+
+int32_t tDiskColAddVal(SDiskColBuilder *pBuilder, SColVal *pColVal) {
+ int32_t code = 0;
+ // TODO
+ return code;
+}
+
+// ================================================================
+typedef struct SDiskDataBuilder SDiskDataBuilder;
+struct SDiskDataBuilder {
+ SDiskDataHdr hdr;
+ SArray *aBlockCol; // SArray
+};
+
+int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder) {
+ int32_t code = 0;
+ // TODO
+ return code;
+}
+
+void tDiskDataBuilderDestroy(SDiskDataBuilder *pBuilder) {
+ // TODO
+}
+
+void tDiskDataBuilderInit(SDiskDataBuilder *pBuilder, int64_t suid, int64_t uid, STSchema *pTSchema, int8_t cmprAlg) {
+ pBuilder->hdr = (SDiskDataHdr){.delimiter = TSDB_FILE_DLMT, //
+ .fmtVer = 0,
+ .suid = suid,
+ .uid = uid,
+ .cmprAlg = cmprAlg};
+}
+
+void tDiskDataBuilderReset(SDiskDataBuilder *pBuilder) {
+ // TODO
+}
+
+int32_t tDiskDataBuilderAddRow(SDiskDataBuilder *pBuilder, TSDBROW *pRow, STSchema *pTSchema, int64_t uid) {
+ int32_t code = 0;
+
+ // uid (todo)
+
+ // version (todo)
+
+ // TSKEY (todo)
+
+ SRowIter iter = {0};
+ tRowIterInit(&iter, pRow, pTSchema);
+
+ for (int32_t iDiskCol = 0; iDiskCol < 0; iDiskCol++) {
+ }
+
+ return code;
+}
+
+int32_t tDiskDataBuilderGet(SDiskDataBuilder *pBuilder, uint8_t **ppData) {
+ int32_t code = 0;
+ // TODO
+ return code;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c
index 247de993381d98713fa6a4ca1938c11b044c8cd6..6b4134f41694ef00c3e3003893064f8fce981f6d 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFS.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFS.c
@@ -21,6 +21,9 @@ static int32_t tsdbEncodeFS(uint8_t *p, STsdbFS *pFS) {
int8_t hasDel = pFS->pDelFile ? 1 : 0;
uint32_t nSet = taosArrayGetSize(pFS->aDFileSet);
+ // version
+ n += tPutI8(p ? p + n : p, 0);
+
// SDelFile
n += tPutI8(p ? p + n : p, hasDel);
if (hasDel) {
@@ -110,7 +113,7 @@ _err:
// taosRemoveFile(fname);
// }
-// // last
+// // stt
// if (isSameDisk && pFrom->pLastF->commitID == pTo->pLastF->commitID) {
// if (pFrom->pLastF->size > pTo->pLastF->size) {
// code = tsdbDFileRollback(pFS->pTsdb, pTo, TSDB_LAST_FILE);
@@ -140,7 +143,7 @@ _err:
// tsdbDataFileName(pFS->pTsdb, pFrom->diskId, pFrom->fid, pFrom->pDataF, fname);
// taosRemoveFile(fname);
-// // last
+// // stt
// tsdbLastFileName(pFS->pTsdb, pFrom->diskId, pFrom->fid, pFrom->pLastF, fname);
// taosRemoveFile(fname);
@@ -254,8 +257,10 @@ void tsdbFSDestroy(STsdbFS *pFS) {
SDFileSet *pSet = (SDFileSet *)taosArrayGet(pFS->aDFileSet, iSet);
taosMemoryFree(pSet->pHeadF);
taosMemoryFree(pSet->pDataF);
- taosMemoryFree(pSet->pLastF);
taosMemoryFree(pSet->pSmaF);
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ taosMemoryFree(pSet->aSttF[iStt]);
+ }
}
taosArrayDestroy(pFS->aDFileSet);
@@ -290,7 +295,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
- if (size != pSet->pHeadF->size) {
+ if (size != tsdbLogicToFileSize(pSet->pHeadF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
@@ -301,38 +306,40 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
- if (size < pSet->pDataF->size) {
+ if (size < tsdbLogicToFileSize(pSet->pDataF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
- } else if (size > pSet->pDataF->size) {
+ } else if (size > tsdbLogicToFileSize(pSet->pDataF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = tsdbDFileRollback(pTsdb, pSet, TSDB_DATA_FILE);
if (code) goto _err;
}
- // last ===========
- tsdbLastFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pLastF, fname);
- if (taosStatFile(fname, &size, NULL)) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- if (size != pSet->pLastF->size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
// sma =============
tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname);
if (taosStatFile(fname, &size, NULL)) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
- if (size < pSet->pSmaF->size) {
+ if (size < tsdbLogicToFileSize(pSet->pSmaF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
- } else if (size > pSet->pSmaF->size) {
+ } else if (size > tsdbLogicToFileSize(pSet->pSmaF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = tsdbDFileRollback(pTsdb, pSet, TSDB_SMA_FILE);
if (code) goto _err;
}
+
+ // stt ===========
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname);
+ if (taosStatFile(fname, &size, NULL)) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ if (size != tsdbLogicToFileSize(pSet->aSttF[iStt]->size, pTsdb->pVnode->config.tsdbPageSize)) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _err;
+ }
+ }
}
{
@@ -360,10 +367,12 @@ static int32_t tsdbRecoverFS(STsdb *pTsdb, uint8_t *pData, int64_t nData) {
int32_t code = 0;
int8_t hasDel;
uint32_t nSet;
- int32_t n;
+ int32_t n = 0;
+
+ // version
+ n += tGetI8(pData + n, NULL);
// SDelFile
- n = 0;
n += tGetI8(pData + n, &hasDel);
if (hasDel) {
pTsdb->fs.pDelFile = (SDelFile *)taosMemoryMalloc(sizeof(SDelFile));
@@ -382,41 +391,15 @@ static int32_t tsdbRecoverFS(STsdb *pTsdb, uint8_t *pData, int64_t nData) {
taosArrayClear(pTsdb->fs.aDFileSet);
n += tGetU32v(pData + n, &nSet);
for (uint32_t iSet = 0; iSet < nSet; iSet++) {
- SDFileSet fSet;
-
- // head
- fSet.pHeadF = (SHeadFile *)taosMemoryCalloc(1, sizeof(SHeadFile));
- if (fSet.pHeadF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- fSet.pHeadF->nRef = 1;
-
- // data
- fSet.pDataF = (SDataFile *)taosMemoryCalloc(1, sizeof(SDataFile));
- if (fSet.pDataF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- fSet.pDataF->nRef = 1;
-
- // last
- fSet.pLastF = (SLastFile *)taosMemoryCalloc(1, sizeof(SLastFile));
- if (fSet.pLastF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- fSet.pLastF->nRef = 1;
+ SDFileSet fSet = {0};
- // sma
- fSet.pSmaF = (SSmaFile *)taosMemoryCalloc(1, sizeof(SSmaFile));
- if (fSet.pSmaF == NULL) {
+ int32_t nt = tGetDFileSet(pData + n, &fSet);
+ if (nt < 0) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- fSet.pSmaF->nRef = 1;
- n += tGetDFileSet(pData + n, &fSet);
+ n += nt;
if (taosArrayPush(pTsdb->fs.aDFileSet, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -532,13 +515,15 @@ int32_t tsdbFSClose(STsdb *pTsdb) {
ASSERT(pSet->pDataF->nRef == 1);
taosMemoryFree(pSet->pDataF);
- // last
- ASSERT(pSet->pLastF->nRef == 1);
- taosMemoryFree(pSet->pLastF);
-
// sma
ASSERT(pSet->pSmaF->nRef == 1);
taosMemoryFree(pSet->pSmaF);
+
+ // stt
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ ASSERT(pSet->aSttF[iStt]->nRef == 1);
+ taosMemoryFree(pSet->aSttF[iStt]);
+ }
}
taosArrayDestroy(pTsdb->fs.aDFileSet);
@@ -586,15 +571,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
}
*fSet.pDataF = *pSet->pDataF;
- // data
- fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
- if (fSet.pLastF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
- *fSet.pLastF = *pSet->pLastF;
-
- // last
+ // sma
fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
if (fSet.pSmaF == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -602,6 +579,16 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
}
*fSet.pSmaF = *pSet->pSmaF;
+ // stt
+ for (fSet.nSttF = 0; fSet.nSttF < pSet->nSttF; fSet.nSttF++) {
+ fSet.aSttF[fSet.nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (fSet.aSttF[fSet.nSttF] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+ *fSet.aSttF[fSet.nSttF] = *pSet->aSttF[fSet.nSttF];
+ }
+
if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@@ -651,14 +638,38 @@ int32_t tsdbFSUpsertFSet(STsdbFS *pFS, SDFileSet *pSet) {
if (c == 0) {
*pDFileSet->pHeadF = *pSet->pHeadF;
*pDFileSet->pDataF = *pSet->pDataF;
- *pDFileSet->pLastF = *pSet->pLastF;
*pDFileSet->pSmaF = *pSet->pSmaF;
+ // stt
+ if (pSet->nSttF > pDFileSet->nSttF) {
+ ASSERT(pSet->nSttF == pDFileSet->nSttF + 1);
+
+ pDFileSet->aSttF[pDFileSet->nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pDFileSet->aSttF[pDFileSet->nSttF] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+ *pDFileSet->aSttF[pDFileSet->nSttF] = *pSet->aSttF[pSet->nSttF - 1];
+ pDFileSet->nSttF++;
+ } else if (pSet->nSttF < pDFileSet->nSttF) {
+ ASSERT(pSet->nSttF == 1);
+ for (int32_t iStt = 1; iStt < pDFileSet->nSttF; iStt++) {
+ taosMemoryFree(pDFileSet->aSttF[iStt]);
+ }
+
+ *pDFileSet->aSttF[0] = *pSet->aSttF[0];
+ pDFileSet->nSttF = 1;
+ } else {
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ *pDFileSet->aSttF[iStt] = *pSet->aSttF[iStt];
+ }
+ }
goto _exit;
}
}
- SDFileSet fSet = {.diskId = pSet->diskId, .fid = pSet->fid};
+ ASSERT(pSet->nSttF == 1);
+ SDFileSet fSet = {.diskId = pSet->diskId, .fid = pSet->fid, .nSttF = 1};
// head
fSet.pHeadF = (SHeadFile *)taosMemoryMalloc(sizeof(SHeadFile));
@@ -676,21 +687,21 @@ int32_t tsdbFSUpsertFSet(STsdbFS *pFS, SDFileSet *pSet) {
}
*fSet.pDataF = *pSet->pDataF;
- // data
- fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
- if (fSet.pLastF == NULL) {
+ // sma
+ fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
+ if (fSet.pSmaF == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- *fSet.pLastF = *pSet->pLastF;
+ *fSet.pSmaF = *pSet->pSmaF;
- // last
- fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
- if (fSet.pSmaF == NULL) {
+ // stt
+ fSet.aSttF[0] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (fSet.aSttF[0] == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- *fSet.pSmaF = *pSet->pSmaF;
+ *fSet.aSttF[0] = *pSet->aSttF[0];
if (taosArrayInsert(pFS->aDFileSet, idx, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -836,27 +847,6 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
pSetOld->pDataF->size = pSetNew->pDataF->size;
}
- // last
- fSet.pLastF = pSetOld->pLastF;
- if ((!sameDisk) || (pSetOld->pLastF->commitID != pSetNew->pLastF->commitID)) {
- pSetOld->pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
- if (pSetOld->pLastF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- *pSetOld->pLastF = *pSetNew->pLastF;
- pSetOld->pLastF->nRef = 1;
-
- nRef = atomic_sub_fetch_32(&fSet.pLastF->nRef, 1);
- if (nRef == 0) {
- tsdbLastFileName(pTsdb, pSetOld->diskId, pSetOld->fid, fSet.pLastF, fname);
- taosRemoveFile(fname);
- taosMemoryFree(fSet.pLastF);
- }
- } else {
- ASSERT(pSetOld->pLastF->size == pSetNew->pLastF->size);
- }
-
// sma
fSet.pSmaF = pSetOld->pSmaF;
if ((!sameDisk) || (pSetOld->pSmaF->commitID != pSetNew->pSmaF->commitID)) {
@@ -879,6 +869,84 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
pSetOld->pSmaF->size = pSetNew->pSmaF->size;
}
+ // stt
+ if (sameDisk) {
+ if (pSetNew->nSttF > pSetOld->nSttF) {
+ ASSERT(pSetNew->nSttF = pSetOld->nSttF + 1);
+ pSetOld->aSttF[pSetOld->nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pSetOld->aSttF[pSetOld->nSttF] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *pSetOld->aSttF[pSetOld->nSttF] = *pSetNew->aSttF[pSetOld->nSttF];
+ pSetOld->aSttF[pSetOld->nSttF]->nRef = 1;
+ pSetOld->nSttF++;
+ } else if (pSetNew->nSttF < pSetOld->nSttF) {
+ ASSERT(pSetNew->nSttF == 1);
+ for (int32_t iStt = 0; iStt < pSetOld->nSttF; iStt++) {
+ SSttFile *pSttFile = pSetOld->aSttF[iStt];
+ nRef = atomic_sub_fetch_32(&pSttFile->nRef, 1);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSttFile, fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSttFile);
+ }
+ pSetOld->aSttF[iStt] = NULL;
+ }
+
+ pSetOld->nSttF = 1;
+ pSetOld->aSttF[0] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pSetOld->aSttF[0] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *pSetOld->aSttF[0] = *pSetNew->aSttF[0];
+ pSetOld->aSttF[0]->nRef = 1;
+ } else {
+ for (int32_t iStt = 0; iStt < pSetOld->nSttF; iStt++) {
+ if (pSetOld->aSttF[iStt]->commitID != pSetNew->aSttF[iStt]->commitID) {
+ SSttFile *pSttFile = pSetOld->aSttF[iStt];
+ nRef = atomic_sub_fetch_32(&pSttFile->nRef, 1);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSttFile, fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSttFile);
+ }
+
+ pSetOld->aSttF[iStt] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pSetOld->aSttF[iStt] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *pSetOld->aSttF[iStt] = *pSetNew->aSttF[iStt];
+ pSetOld->aSttF[iStt]->nRef = 1;
+ } else {
+ ASSERT(pSetOld->aSttF[iStt]->size == pSetOld->aSttF[iStt]->size);
+ ASSERT(pSetOld->aSttF[iStt]->offset == pSetOld->aSttF[iStt]->offset);
+ }
+ }
+ }
+ } else {
+ ASSERT(pSetOld->nSttF == pSetNew->nSttF);
+ for (int32_t iStt = 0; iStt < pSetOld->nSttF; iStt++) {
+ SSttFile *pSttFile = pSetOld->aSttF[iStt];
+ nRef = atomic_sub_fetch_32(&pSttFile->nRef, 1);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSttFile, fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSttFile);
+ }
+
+ pSetOld->aSttF[iStt] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pSetOld->aSttF[iStt] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *pSetOld->aSttF[iStt] = *pSetNew->aSttF[iStt];
+ pSetOld->aSttF[iStt]->nRef = 1;
+ }
+ }
+
if (!sameDisk) {
pSetOld->diskId = pSetNew->diskId;
}
@@ -902,13 +970,6 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
taosMemoryFree(pSetOld->pDataF);
}
- nRef = atomic_sub_fetch_32(&pSetOld->pLastF->nRef, 1);
- if (nRef == 0) {
- tsdbLastFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSetOld->pLastF, fname);
- taosRemoveFile(fname);
- taosMemoryFree(pSetOld->pLastF);
- }
-
nRef = atomic_sub_fetch_32(&pSetOld->pSmaF->nRef, 1);
if (nRef == 0) {
tsdbSmaFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSetOld->pSmaF, fname);
@@ -916,12 +977,20 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
taosMemoryFree(pSetOld->pSmaF);
}
+ for (int8_t iStt = 0; iStt < pSetOld->nSttF; iStt++) {
+ nRef = atomic_sub_fetch_32(&pSetOld->aSttF[iStt]->nRef, 1);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSetOld->aSttF[iStt], fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSetOld->aSttF[iStt]);
+ }
+ }
+
taosArrayRemove(pTsdb->fs.aDFileSet, iOld);
continue;
_add_new:
- fSet.diskId = pSetNew->diskId;
- fSet.fid = pSetNew->fid;
+ fSet = (SDFileSet){.diskId = pSetNew->diskId, .fid = pSetNew->fid, .nSttF = 1};
// head
fSet.pHeadF = (SHeadFile *)taosMemoryMalloc(sizeof(SHeadFile));
@@ -941,15 +1010,6 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
*fSet.pDataF = *pSetNew->pDataF;
fSet.pDataF->nRef = 1;
- // last
- fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
- if (fSet.pLastF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- *fSet.pLastF = *pSetNew->pLastF;
- fSet.pLastF->nRef = 1;
-
// sma
fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
if (fSet.pSmaF == NULL) {
@@ -959,6 +1019,16 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
*fSet.pSmaF = *pSetNew->pSmaF;
fSet.pSmaF->nRef = 1;
+ // stt
+ ASSERT(pSetNew->nSttF == 1);
+ fSet.aSttF[0] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (fSet.aSttF[0] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *fSet.aSttF[0] = *pSetNew->aSttF[0];
+ fSet.aSttF[0]->nRef = 1;
+
if (taosArrayInsert(pTsdb->fs.aDFileSet, iOld, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@@ -1002,12 +1072,14 @@ int32_t tsdbFSRef(STsdb *pTsdb, STsdbFS *pFS) {
nRef = atomic_fetch_add_32(&pSet->pDataF->nRef, 1);
ASSERT(nRef > 0);
- nRef = atomic_fetch_add_32(&pSet->pLastF->nRef, 1);
- ASSERT(nRef > 0);
-
nRef = atomic_fetch_add_32(&pSet->pSmaF->nRef, 1);
ASSERT(nRef > 0);
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ nRef = atomic_fetch_add_32(&pSet->aSttF[iStt]->nRef, 1);
+ ASSERT(nRef > 0);
+ }
+
if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@@ -1053,15 +1125,6 @@ void tsdbFSUnref(STsdb *pTsdb, STsdbFS *pFS) {
taosMemoryFree(pSet->pDataF);
}
- // last
- nRef = atomic_sub_fetch_32(&pSet->pLastF->nRef, 1);
- ASSERT(nRef >= 0);
- if (nRef == 0) {
- tsdbLastFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pLastF, fname);
- taosRemoveFile(fname);
- taosMemoryFree(pSet->pLastF);
- }
-
// sma
nRef = atomic_sub_fetch_32(&pSet->pSmaF->nRef, 1);
ASSERT(nRef >= 0);
@@ -1070,6 +1133,18 @@ void tsdbFSUnref(STsdb *pTsdb, STsdbFS *pFS) {
taosRemoveFile(fname);
taosMemoryFree(pSet->pSmaF);
}
+
+ // stt
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ nRef = atomic_sub_fetch_32(&pSet->aSttF[iStt]->nRef, 1);
+ ASSERT(nRef >= 0);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSet->aSttF[iStt]);
+ /* code */
+ }
+ }
}
taosArrayDestroy(pFS->aDFileSet);
diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c
index 00d2ac848f6d599fef54d9957047521e27062c89..3c944584de7ae10b21cb75913d1f920198288fe4 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFile.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFile.c
@@ -53,22 +53,22 @@ static int32_t tGetDataFile(uint8_t *p, SDataFile *pDataFile) {
return n;
}
-int32_t tPutLastFile(uint8_t *p, SLastFile *pLastFile) {
+int32_t tPutSttFile(uint8_t *p, SSttFile *pSttFile) {
int32_t n = 0;
- n += tPutI64v(p ? p + n : p, pLastFile->commitID);
- n += tPutI64v(p ? p + n : p, pLastFile->size);
- n += tPutI64v(p ? p + n : p, pLastFile->offset);
+ n += tPutI64v(p ? p + n : p, pSttFile->commitID);
+ n += tPutI64v(p ? p + n : p, pSttFile->size);
+ n += tPutI64v(p ? p + n : p, pSttFile->offset);
return n;
}
-static int32_t tGetLastFile(uint8_t *p, SLastFile *pLastFile) {
+static int32_t tGetSttFile(uint8_t *p, SSttFile *pSttFile) {
int32_t n = 0;
- n += tGetI64v(p + n, &pLastFile->commitID);
- n += tGetI64v(p + n, &pLastFile->size);
- n += tGetI64v(p + n, &pLastFile->offset);
+ n += tGetI64v(p + n, &pSttFile->commitID);
+ n += tGetI64v(p + n, &pSttFile->size);
+ n += tGetI64v(p + n, &pSttFile->offset);
return n;
}
@@ -102,9 +102,9 @@ void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF,
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pDataF->commitID, ".data");
}
-void tsdbLastFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SLastFile *pLastF, char fname[]) {
+void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
- TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pLastF->commitID, ".last");
+ TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSttF->commitID, ".stt");
}
void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]) {
@@ -148,7 +148,7 @@ int32_t tsdbDFileRollback(STsdb *pTsdb, SDFileSet *pSet, EDataFileT ftype) {
}
// ftruncate
- if (taosFtruncateFile(pFD, size) < 0) {
+ if (taosFtruncateFile(pFD, tsdbLogicToFileSize(size, pTsdb->pVnode->config.tsdbPageSize)) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
@@ -194,9 +194,11 @@ int32_t tPutDFileSet(uint8_t *p, SDFileSet *pSet) {
n += tPutDataFile(p ? p + n : p, pSet->pDataF);
n += tPutSmaFile(p ? p + n : p, pSet->pSmaF);
- // last
- n += tPutU8(p ? p + n : p, 1); // for future compatibility
- n += tPutLastFile(p ? p + n : p, pSet->pLastF);
+ // stt
+ n += tPutU8(p ? p + n : p, pSet->nSttF);
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ n += tPutSttFile(p ? p + n : p, pSet->aSttF[iStt]);
+ }
return n;
}
@@ -208,15 +210,40 @@ int32_t tGetDFileSet(uint8_t *p, SDFileSet *pSet) {
n += tGetI32v(p + n, &pSet->diskId.id);
n += tGetI32v(p + n, &pSet->fid);
- // data
+ // head
+ pSet->pHeadF = (SHeadFile *)taosMemoryCalloc(1, sizeof(SHeadFile));
+ if (pSet->pHeadF == NULL) {
+ return -1;
+ }
+ pSet->pHeadF->nRef = 1;
n += tGetHeadFile(p + n, pSet->pHeadF);
+
+ // data
+ pSet->pDataF = (SDataFile *)taosMemoryCalloc(1, sizeof(SDataFile));
+ if (pSet->pDataF == NULL) {
+ return -1;
+ }
+ pSet->pDataF->nRef = 1;
n += tGetDataFile(p + n, pSet->pDataF);
+
+ // sma
+ pSet->pSmaF = (SSmaFile *)taosMemoryCalloc(1, sizeof(SSmaFile));
+ if (pSet->pSmaF == NULL) {
+ return -1;
+ }
+ pSet->pSmaF->nRef = 1;
n += tGetSmaFile(p + n, pSet->pSmaF);
- // last
- uint8_t nLast;
- n += tGetU8(p + n, &nLast);
- n += tGetLastFile(p + n, pSet->pLastF);
+ // stt
+ n += tGetU8(p + n, &pSet->nSttF);
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ pSet->aSttF[iStt] = (SSttFile *)taosMemoryCalloc(1, sizeof(SSttFile));
+ if (pSet->aSttF[iStt] == NULL) {
+ return -1;
+ }
+ pSet->aSttF[iStt]->nRef = 1;
+ n += tGetSttFile(p + n, pSet->aSttF[iStt]);
+ }
return n;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
new file mode 100644
index 0000000000000000000000000000000000000000..45fe29f0faba9fbcb43e81ad1e3022d408d8d927
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdb.h"
+
+// SLDataIter =================================================
+struct SLDataIter {
+ SRBTreeNode node;
+ SSttBlk *pSttBlk;
+ SDataFReader *pReader;
+ int32_t iStt;
+ int8_t backward;
+ int32_t iSttBlk;
+ int32_t iRow;
+ SRowInfo rInfo;
+ uint64_t uid;
+ STimeWindow timeWindow;
+ SVersionRange verRange;
+
+ SSttBlockLoadInfo* pBlockLoadInfo;
+};
+
+SSttBlockLoadInfo* tCreateLastBlockLoadInfo() {
+ SSttBlockLoadInfo* pLoadInfo = taosMemoryCalloc(TSDB_DEFAULT_STT_FILE, sizeof(SSttBlockLoadInfo));
+ if (pLoadInfo == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ for(int32_t i = 0; i < TSDB_DEFAULT_STT_FILE; ++i) {
+ pLoadInfo[i].blockIndex[0] = -1;
+ pLoadInfo[i].blockIndex[1] = -1;
+ pLoadInfo[i].currentLoadBlockIndex = 1;
+
+ int32_t code = tBlockDataCreate(&pLoadInfo[i].blockData[0]);
+ if (code) {
+ terrno = code;
+ }
+
+ code = tBlockDataCreate(&pLoadInfo[i].blockData[1]);
+ if (code) {
+ terrno = code;
+ }
+
+ pLoadInfo[i].aSttBlk = taosArrayInit(4, sizeof(SSttBlk));
+ }
+
+ return pLoadInfo;
+}
+
+void resetLastBlockLoadInfo(SSttBlockLoadInfo* pLoadInfo) {
+ for(int32_t i = 0; i < TSDB_DEFAULT_STT_FILE; ++i) {
+ pLoadInfo[i].currentLoadBlockIndex = 1;
+ pLoadInfo[i].blockIndex[0] = -1;
+ pLoadInfo[i].blockIndex[1] = -1;
+
+ taosArrayClear(pLoadInfo[i].aSttBlk);
+
+ pLoadInfo[i].elapsedTime = 0;
+ pLoadInfo[i].loadBlocks = 0;
+ }
+}
+
+void getLastBlockLoadInfo(SSttBlockLoadInfo* pLoadInfo, int64_t* blocks, double* el) {
+ for(int32_t i = 0; i < TSDB_DEFAULT_STT_FILE; ++i) {
+ *el += pLoadInfo[i].elapsedTime;
+ *blocks += pLoadInfo[i].loadBlocks;
+ }
+}
+
+void* destroyLastBlockLoadInfo(SSttBlockLoadInfo* pLoadInfo) {
+ for(int32_t i = 0; i < TSDB_DEFAULT_STT_FILE; ++i) {
+ pLoadInfo[i].currentLoadBlockIndex = 1;
+ pLoadInfo[i].blockIndex[0] = -1;
+ pLoadInfo[i].blockIndex[1] = -1;
+
+ tBlockDataDestroy(&pLoadInfo[i].blockData[0], true);
+ tBlockDataDestroy(&pLoadInfo[i].blockData[1], true);
+
+ taosArrayDestroy(pLoadInfo[i].aSttBlk);
+ }
+
+ taosMemoryFree(pLoadInfo);
+ return NULL;
+}
+
+static SBlockData* loadLastBlock(SLDataIter *pIter, const char* idStr) {
+ int32_t code = 0;
+
+ SSttBlockLoadInfo* pInfo = pIter->pBlockLoadInfo;
+ if (pInfo->blockIndex[0] == pIter->iSttBlk) {
+ return &pInfo->blockData[0];
+ }
+
+ if (pInfo->blockIndex[1] == pIter->iSttBlk) {
+ return &pInfo->blockData[1];
+ }
+
+ pInfo->currentLoadBlockIndex ^= 1;
+ if (pIter->pSttBlk != NULL) { // current block not loaded yet
+ int64_t st = taosGetTimestampUs();
+ code = tsdbReadSttBlock(pIter->pReader, pIter->iStt, pIter->pSttBlk, &pInfo->blockData[pInfo->currentLoadBlockIndex]);
+ double el = (taosGetTimestampUs() - st)/ 1000.0;
+ pInfo->elapsedTime += el;
+ pInfo->loadBlocks += 1;
+
+ tsdbDebug("read last block, index:%d, last file index:%d, elapsed time:%.2f ms, %s", pIter->iSttBlk, pIter->iStt, el, idStr);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _exit;
+ }
+
+ pInfo->blockIndex[pInfo->currentLoadBlockIndex] = pIter->iSttBlk;
+ pIter->iRow = (pIter->backward) ? pInfo->blockData[pInfo->currentLoadBlockIndex].nRow : -1;
+ }
+
+ return &pInfo->blockData[pInfo->currentLoadBlockIndex];
+
+ _exit:
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ }
+
+ return NULL;
+}
+
+// find the earliest block that contains the required records
+static FORCE_INLINE int32_t findEarliestIndex(int32_t index, uint64_t uid, const SSttBlk* pBlockList, int32_t num, int32_t backward) {
+ int32_t i = index;
+ int32_t step = backward? 1:-1;
+ while (i >= 0 && i < num && uid >= pBlockList[i].minUid && uid <= pBlockList[i].maxUid) {
+ i += step;
+ }
+ return i - step;
+}
+
+static int32_t binarySearchForStartBlock(SSttBlk*pBlockList, int32_t num, uint64_t uid, int32_t backward) {
+ int32_t midPos = -1;
+ if (num <= 0) {
+ return -1;
+ }
+
+ int32_t firstPos = 0;
+ int32_t lastPos = num - 1;
+
+ // find the first position which is bigger than the key
+ if ((uid > pBlockList[lastPos].maxUid) || (uid < pBlockList[firstPos].minUid)) {
+ return -1;
+ }
+
+ while (1) {
+ if (uid >= pBlockList[firstPos].minUid && uid <= pBlockList[firstPos].maxUid) {
+ return findEarliestIndex(firstPos, uid, pBlockList, num, backward);
+ }
+
+ if (uid > pBlockList[lastPos].maxUid || uid < pBlockList[firstPos].minUid) {
+ return -1;
+ }
+
+ int32_t numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1u) + firstPos;
+
+ if (uid < pBlockList[midPos].minUid) {
+ lastPos = midPos - 1;
+ } else if (uid > pBlockList[midPos].maxUid) {
+ firstPos = midPos + 1;
+ } else {
+ return findEarliestIndex(midPos, uid, pBlockList, num, backward);
+ }
+ }
+}
+
+static FORCE_INLINE int32_t findEarliestRow(int32_t index, uint64_t uid, const uint64_t* uidList, int32_t num, int32_t backward) {
+ int32_t i = index;
+ int32_t step = backward? 1:-1;
+ while (i >= 0 && i < num && uid == uidList[i]) {
+ i += step;
+ }
+ return i - step;
+}
+
+static int32_t binarySearchForStartRowIndex(uint64_t* uidList, int32_t num, uint64_t uid, int32_t backward) {
+ int32_t firstPos = 0;
+ int32_t lastPos = num - 1;
+
+ // find the first position which is bigger than the key
+ if ((uid > uidList[lastPos]) || (uid < uidList[firstPos])) {
+ return -1;
+ }
+
+ while (1) {
+ if (uid == uidList[firstPos]) {
+ return findEarliestRow(firstPos, uid, uidList, num, backward);
+ }
+
+ if (uid > uidList[lastPos] || uid < uidList[firstPos]) {
+ return -1;
+ }
+
+ int32_t numOfRows = lastPos - firstPos + 1;
+ int32_t midPos = (numOfRows >> 1u) + firstPos;
+
+ if (uid < uidList[midPos]) {
+ lastPos = midPos - 1;
+ } else if (uid > uidList[midPos]) {
+ firstPos = midPos + 1;
+ } else {
+ return findEarliestRow(midPos, uid, uidList, num, backward);
+ }
+ }
+}
+
+int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t iStt, int8_t backward, uint64_t suid,
+ uint64_t uid, STimeWindow *pTimeWindow, SVersionRange *pRange, SSttBlockLoadInfo* pBlockLoadInfo) {
+ int32_t code = 0;
+ *pIter = taosMemoryCalloc(1, sizeof(SLDataIter));
+ if (*pIter == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ (*pIter)->uid = uid;
+ (*pIter)->pReader = pReader;
+ (*pIter)->iStt = iStt;
+ (*pIter)->backward = backward;
+ (*pIter)->verRange = *pRange;
+ (*pIter)->timeWindow = *pTimeWindow;
+
+ (*pIter)->pBlockLoadInfo = pBlockLoadInfo;
+ if (taosArrayGetSize(pBlockLoadInfo->aSttBlk) == 0) {
+ code = tsdbReadSttBlk(pReader, iStt, pBlockLoadInfo->aSttBlk);
+ if (code) {
+ goto _exit;
+ } else {
+ size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
+ SArray* pTmp = taosArrayInit(size, sizeof(SSttBlk));
+ for(int32_t i = 0; i < size; ++i) {
+ SSttBlk* p = taosArrayGet(pBlockLoadInfo->aSttBlk, i);
+ if (p->suid == suid) {
+ taosArrayPush(pTmp, p);
+ }
+ }
+
+ taosArrayDestroy(pBlockLoadInfo->aSttBlk);
+ pBlockLoadInfo->aSttBlk = pTmp;
+ }
+ }
+
+ size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
+
+ // find the start block
+ (*pIter)->iSttBlk = binarySearchForStartBlock(pBlockLoadInfo->aSttBlk->pData, size, uid, backward);
+ if ((*pIter)->iSttBlk != -1) {
+ (*pIter)->pSttBlk = taosArrayGet(pBlockLoadInfo->aSttBlk, (*pIter)->iSttBlk);
+ (*pIter)->iRow = ((*pIter)->backward) ? (*pIter)->pSttBlk->nRow : -1;
+ }
+
+_exit:
+ return code;
+}
+
+void tLDataIterClose(SLDataIter *pIter) {
+ taosMemoryFree(pIter);
+}
+
+void tLDataIterNextBlock(SLDataIter *pIter) {
+ int32_t step = pIter->backward ? -1 : 1;
+ pIter->iSttBlk += step;
+
+ int32_t index = -1;
+ size_t size = pIter->pBlockLoadInfo->aSttBlk->size;
+ for (int32_t i = pIter->iSttBlk; i < size && i >= 0; i += step) {
+ SSttBlk *p = taosArrayGet(pIter->pBlockLoadInfo->aSttBlk, i);
+ if ((!pIter->backward) && p->minUid > pIter->uid) {
+ break;
+ }
+
+ if (pIter->backward && p->maxUid < pIter->uid) {
+ break;
+ }
+
+ // check uid firstly
+ if (p->minUid <= pIter->uid && p->maxUid >= pIter->uid) {
+ if ((!pIter->backward) && p->minKey > pIter->timeWindow.ekey) {
+ break;
+ }
+
+ if (pIter->backward && p->maxKey < pIter->timeWindow.skey) {
+ break;
+ }
+
+ // check time range secondly
+ if (p->minKey <= pIter->timeWindow.ekey && p->maxKey >= pIter->timeWindow.skey) {
+ if ((!pIter->backward) && p->minVer > pIter->verRange.maxVer) {
+ break;
+ }
+
+ if (pIter->backward && p->maxVer < pIter->verRange.minVer) {
+ break;
+ }
+
+ if (p->minVer <= pIter->verRange.maxVer && p->maxVer >= pIter->verRange.minVer) {
+ index = i;
+ break;
+ }
+ }
+ }
+ }
+
+ pIter->pSttBlk = NULL;
+ if (index != -1) {
+ pIter->pSttBlk = (SSttBlk *)taosArrayGet(pIter->pBlockLoadInfo->aSttBlk, pIter->iSttBlk);
+ }
+}
+
+static void findNextValidRow(SLDataIter *pIter, const char* idStr) {
+ int32_t step = pIter->backward ? -1 : 1;
+
+ bool hasVal = false;
+ int32_t i = pIter->iRow;
+
+ SBlockData *pBlockData = loadLastBlock(pIter, idStr);
+
+ // mostly we only need to find the start position for a given table
+ if ((((i == 0) && (!pIter->backward)) || (i == pBlockData->nRow - 1 && pIter->backward)) && pBlockData->aUid != NULL) {
+ i = binarySearchForStartRowIndex((uint64_t*)pBlockData->aUid, pBlockData->nRow, pIter->uid, pIter->backward);
+ if (i == -1) {
+ pIter->iRow = -1;
+ return;
+ }
+ }
+
+ for (; i < pBlockData->nRow && i >= 0; i += step) {
+ if (pBlockData->aUid != NULL) {
+ if (!pIter->backward) {
+ /*if (pBlockData->aUid[i] < pIter->uid) {
+ continue;
+ } else */if (pBlockData->aUid[i] > pIter->uid) {
+ break;
+ }
+ } else {
+ /*if (pBlockData->aUid[i] > pIter->uid) {
+ continue;
+ } else */if (pBlockData->aUid[i] < pIter->uid) {
+ break;
+ }
+ }
+ }
+
+ int64_t ts = pBlockData->aTSKEY[i];
+ if (!pIter->backward) { // asc
+ if (ts > pIter->timeWindow.ekey) { // no more data
+ break;
+ } else if (ts < pIter->timeWindow.skey) {
+ continue;
+ }
+ } else {
+ if (ts < pIter->timeWindow.skey) {
+ break;
+ } else if (ts > pIter->timeWindow.ekey) {
+ continue;
+ }
+ }
+
+ int64_t ver = pBlockData->aVersion[i];
+ if (ver < pIter->verRange.minVer) {
+ continue;
+ }
+
+ // todo opt handle desc case
+ if (ver > pIter->verRange.maxVer) {
+ continue;
+ }
+
+ hasVal = true;
+ break;
+ }
+
+ pIter->iRow = (hasVal) ? i : -1;
+}
+
+bool tLDataIterNextRow(SLDataIter *pIter, const char* idStr) {
+ int32_t code = 0;
+ int32_t step = pIter->backward ? -1 : 1;
+
+ // no qualified last file block in current file, no need to fetch row
+ if (pIter->pSttBlk == NULL) {
+ return false;
+ }
+
+ int32_t iBlockL = pIter->iSttBlk;
+ SBlockData *pBlockData = loadLastBlock(pIter, idStr);
+ pIter->iRow += step;
+
+ while (1) {
+ findNextValidRow(pIter, idStr);
+
+ if (pIter->iRow >= pBlockData->nRow || pIter->iRow < 0) {
+ tLDataIterNextBlock(pIter);
+ if (pIter->pSttBlk == NULL) { // no more data
+ goto _exit;
+ }
+ } else {
+ break;
+ }
+
+ if (iBlockL != pIter->iSttBlk) {
+ pBlockData = loadLastBlock(pIter, idStr);
+ pIter->iRow += step;
+ }
+ }
+
+ pIter->rInfo.suid = pBlockData->suid;
+ pIter->rInfo.uid = pBlockData->uid;
+ pIter->rInfo.row = tsdbRowFromBlockData(pBlockData, pIter->iRow);
+
+_exit:
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ }
+
+ return (code == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL);
+}
+
+SRowInfo *tLDataIterGet(SLDataIter *pIter) { return &pIter->rInfo; }
+
+// SMergeTree =================================================
+static FORCE_INLINE int32_t tLDataIterCmprFn(const void *p1, const void *p2) {
+ SLDataIter *pIter1 = (SLDataIter *)(((uint8_t *)p1) - sizeof(SRBTreeNode));
+ SLDataIter *pIter2 = (SLDataIter *)(((uint8_t *)p2) - sizeof(SRBTreeNode));
+
+ TSDBKEY key1 = TSDBROW_KEY(&pIter1->rInfo.row);
+ TSDBKEY key2 = TSDBROW_KEY(&pIter2->rInfo.row);
+
+ if (key1.ts < key2.ts) {
+ return -1;
+ } else if (key1.ts > key2.ts) {
+ return 1;
+ } else {
+ if (key1.version < key2.version) {
+ return -1;
+ } else if (key1.version > key2.version) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
+ STimeWindow *pTimeWindow, SVersionRange *pVerRange, void* pBlockLoadInfo, const char* idStr) {
+ pMTree->backward = backward;
+ pMTree->pIter = NULL;
+ pMTree->pIterList = taosArrayInit(4, POINTER_BYTES);
+ if (pMTree->pIterList == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pMTree->idStr = idStr;
+
+ tRBTreeCreate(&pMTree->rbt, tLDataIterCmprFn);
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ SSttBlockLoadInfo* pLoadInfo = NULL;
+ if (pBlockLoadInfo == NULL) {
+ if (pMTree->pLoadInfo == NULL) {
+ pMTree->destroyLoadInfo = true;
+ pMTree->pLoadInfo = tCreateLastBlockLoadInfo();
+ }
+
+ pLoadInfo = pMTree->pLoadInfo;
+ } else {
+ pLoadInfo = pBlockLoadInfo;
+ }
+
+ for (int32_t i = 0; i < pFReader->pSet->nSttF; ++i) { // open all last file
+ struct SLDataIter* pIter = NULL;
+ code = tLDataIterOpen(&pIter, pFReader, i, pMTree->backward, suid, uid, pTimeWindow, pVerRange, &pLoadInfo[i]);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _end;
+ }
+
+ bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr);
+ if (hasVal) {
+ taosArrayPush(pMTree->pIterList, &pIter);
+ tMergeTreeAddIter(pMTree, pIter);
+ } else {
+ tLDataIterClose(pIter);
+ }
+ }
+
+ return code;
+
+_end:
+ tMergeTreeClose(pMTree);
+ return code;
+}
+
+void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter) { tRBTreePut(&pMTree->rbt, (SRBTreeNode *)pIter); }
+
+bool tMergeTreeNext(SMergeTree *pMTree) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pMTree->pIter) {
+ SLDataIter *pIter = pMTree->pIter;
+
+ bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr);
+ if (!hasVal) {
+ pMTree->pIter = NULL;
+ }
+
+ // compare with min in RB Tree
+ pIter = (SLDataIter *)tRBTreeMin(&pMTree->rbt);
+ if (pMTree->pIter && pIter) {
+ int32_t c = pMTree->rbt.cmprFn(RBTREE_NODE_PAYLOAD(&pMTree->pIter->node), RBTREE_NODE_PAYLOAD(&pIter->node));
+ if (c > 0) {
+ tRBTreePut(&pMTree->rbt, (SRBTreeNode *)pMTree->pIter);
+ pMTree->pIter = NULL;
+ } else {
+ ASSERT(c);
+ }
+ }
+ }
+
+ if (pMTree->pIter == NULL) {
+ pMTree->pIter = (SLDataIter *)tRBTreeMin(&pMTree->rbt);
+ if (pMTree->pIter) {
+ tRBTreeDrop(&pMTree->rbt, (SRBTreeNode *)pMTree->pIter);
+ }
+ }
+
+ return pMTree->pIter != NULL;
+}
+
+TSDBROW tMergeTreeGetRow(SMergeTree *pMTree) { return pMTree->pIter->rInfo.row; }
+
+void tMergeTreeClose(SMergeTree *pMTree) {
+ size_t size = taosArrayGetSize(pMTree->pIterList);
+ for (int32_t i = 0; i < size; ++i) {
+ SLDataIter *pIter = taosArrayGetP(pMTree->pIterList, i);
+ tLDataIterClose(pIter);
+ }
+
+ pMTree->pIterList = taosArrayDestroy(pMTree->pIterList);
+ pMTree->pIter = NULL;
+
+ if (pMTree->destroyLoadInfo) {
+ pMTree->pLoadInfo = destroyLastBlockLoadInfo(pMTree->pLoadInfo);
+ pMTree->destroyLoadInfo = false;
+ }
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index bbef24907967df9221c09515038786d8acbcbde1..60d967681bc3851f0438f4fb9221f46db326acdc 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -17,8 +17,6 @@
#include "tsdb.h"
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
-#define ALL_ROWS_CHECKED_INDEX (INT16_MIN)
-#define DEFAULT_ROW_INDEX_VAL (-1)
typedef enum {
EXTERNAL_ROWS_PREV = 0x1,
@@ -34,21 +32,20 @@ typedef struct {
typedef struct {
int32_t numOfBlocks;
- int32_t numOfLastBlocks;
+ int32_t numOfLastFiles;
} SBlockNumber;
typedef struct STableBlockScanInfo {
uint64_t uid;
TSKEY lastKey;
- SMapData mapData; // block info (compressed)
- SArray* pBlockList; // block data index list
- SIterInfo iter; // mem buffer skip list iterator
- SIterInfo iiter; // imem buffer skip list iterator
- SArray* delSkyline; // delete info for this table
- int32_t fileDelIndex; // file block delete index
- int32_t lastBlockDelIndex;// delete index for last block
- bool iterInit; // whether to initialize the in-memory skip list iterator or not
- int16_t indexInBlockL;// row position in last block
+ SMapData mapData; // block info (compressed)
+ SArray* pBlockList; // block data index list
+ SIterInfo iter; // mem buffer skip list iterator
+ SIterInfo iiter; // imem buffer skip list iterator
+ SArray* delSkyline; // delete info for this table
+ int32_t fileDelIndex; // file block delete index
+ int32_t lastBlockDelIndex; // delete index for last block
+ bool iterInit; // whether to initialize the in-memory skip list iterator or not
} STableBlockScanInfo;
typedef struct SBlockOrderWrapper {
@@ -73,6 +70,8 @@ typedef struct SIOCostSummary {
double smaLoadTime;
int64_t lastBlockLoad;
double lastBlockLoadTime;
+ int64_t composedBlocks;
+ double buildComposedBlockTime;
} SIOCostSummary;
typedef struct SBlockLoadSuppInfo {
@@ -83,28 +82,21 @@ typedef struct SBlockLoadSuppInfo {
char** buildBuf; // build string tmp buffer, todo remove it later after all string format being updated.
} SBlockLoadSuppInfo;
-typedef struct SVersionRange {
- uint64_t minVer;
- uint64_t maxVer;
-} SVersionRange;
-
typedef struct SLastBlockReader {
- SArray* pBlockL;
- int32_t currentBlockIndex;
- SBlockData lastBlockData;
- STimeWindow window;
- SVersionRange verRange;
- int32_t order;
- uint64_t uid;
- int16_t* rowIndex; // row index ptr, usually from the STableBlockScanInfo->indexInBlockL
+ STimeWindow window;
+ SVersionRange verRange;
+ int32_t order;
+ uint64_t uid;
+ SMergeTree mergeTree;
+ SSttBlockLoadInfo* pInfo;
} SLastBlockReader;
typedef struct SFilesetIter {
- int32_t numOfFiles; // number of total files
- int32_t index; // current accessed index in the list
- SArray* pFileList; // data file list
+ int32_t numOfFiles; // number of total files
+ int32_t index; // current accessed index in the list
+ SArray* pFileList; // data file list
int32_t order;
- SLastBlockReader* pLastBlockReader; // last file block reader
+ SLastBlockReader* pLastBlockReader; // last file block reader
} SFilesetIter;
typedef struct SFileDataBlockInfo {
@@ -116,9 +108,9 @@ typedef struct SFileDataBlockInfo {
typedef struct SDataBlockIter {
int32_t numOfBlocks;
int32_t index;
- SArray* blockList; // SArray
+ SArray* blockList; // SArray
int32_t order;
- SBlock block; // current SBlock data
+ SDataBlk block; // current SDataBlk data
SHashObj* pTableMap;
} SDataBlockIter;
@@ -129,16 +121,22 @@ typedef struct SFileBlockDumpInfo {
bool allDumped;
} SFileBlockDumpInfo;
+typedef struct SUidOrderCheckInfo {
+ uint64_t* tableUidList; // access table uid list in uid ascending order list
+ int32_t currentIndex; // index in table uid list
+} SUidOrderCheckInfo;
+
typedef struct SReaderStatus {
- bool loadFromFile; // check file stage
- SHashObj* pTableMap; // SHash
- STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks.
+ bool loadFromFile; // check file stage
+ bool composedDataBlock; // the returned data block is a composed block or not
+ SHashObj* pTableMap; // SHash
+ STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks.
+ SUidOrderCheckInfo uidCheckInfo; // check all table in uid order
SFileBlockDumpInfo fBlockDumpInfo;
SDFileSet* pCurrentFileset; // current opened file set
SBlockData fileBlockData;
SFilesetIter fileIter;
SDataBlockIter blockIter;
- bool composedDataBlock; // the returned data block is a composed block or not
} SReaderStatus;
struct STsdbReader {
@@ -166,30 +164,37 @@ struct STsdbReader {
static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter);
static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t endKey, int32_t capacity,
STsdbReader* pReader);
-static TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader);
+static TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader);
static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader,
SRowMerger* pMerger);
-static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger);
+static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts,
+ SRowMerger* pMerger);
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader);
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid);
static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
- int32_t rowIndex);
+ int32_t rowIndex);
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
-static void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
- STsdbReader* pReader, bool* freeTSRow);
-static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
- STSRow** pTSRow);
+static int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList,
+ STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow);
+static int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo,
+ STsdbReader* pReader, STSRow** pTSRow);
+static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
+ STsdbReader* pReader);
+
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
STbData* piMemTbData);
static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* retentions, const char* idstr,
int8_t* pLevel);
static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level);
-static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader);
-static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
-static int32_t doBuildDataBlock(STsdbReader* pReader);
+static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader);
+static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
+static int32_t doBuildDataBlock(STsdbReader* pReader);
+static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
+
+static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
@@ -226,15 +231,13 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
}
for (int32_t j = 0; j < numOfTables; ++j) {
- STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid, .indexInBlockL = DEFAULT_ROW_INDEX_VAL};
+ STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid};
if (ASCENDING_TRAVERSE(pTsdbReader->order)) {
- if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReader->window.skey) {
- info.lastKey = pTsdbReader->window.skey;
- }
-
- ASSERT(info.lastKey >= pTsdbReader->window.skey && info.lastKey <= pTsdbReader->window.ekey);
+ int64_t skey = pTsdbReader->window.skey;
+ info.lastKey = (skey > INT64_MIN) ? (skey - 1) : skey;
} else {
- info.lastKey = pTsdbReader->window.skey;
+ int64_t ekey = pTsdbReader->window.ekey;
+ info.lastKey = (ekey < INT64_MAX) ? (ekey + 1) : ekey;
}
taosHashPut(pTableMap, &info.uid, sizeof(uint64_t), &info, sizeof(info));
@@ -248,7 +251,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
return pTableMap;
}
-static void resetDataBlockScanInfo(SHashObj* pTableMap) {
+static void resetDataBlockScanInfo(SHashObj* pTableMap, int64_t ts) {
STableBlockScanInfo* p = NULL;
while ((p = taosHashIterate(pTableMap, p)) != NULL) {
@@ -259,6 +262,7 @@ static void resetDataBlockScanInfo(SHashObj* pTableMap) {
}
p->delSkyline = taosArrayDestroy(p->delSkyline);
+ p->lastKey = ts;
}
}
@@ -320,7 +324,7 @@ static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* cap
}
// init file iterator
-static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader/*int32_t order, const char* idstr*/) {
+static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader) {
size_t numOfFileset = taosArrayGetSize(aDFileSet);
pIter->index = ASCENDING_TRAVERSE(pReader->order) ? -1 : numOfFileset;
@@ -335,17 +339,21 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb
tsdbError("failed to prepare the last block iterator, code:%d %s", tstrerror(code), pReader->idStr);
return code;
}
+ }
- SLastBlockReader* pLReader = pIter->pLastBlockReader;
- pLReader->pBlockL = taosArrayInit(4, sizeof(SBlockL));
- pLReader->order = pReader->order;
- pLReader->window = pReader->window;
- pLReader->verRange = pReader->verRange;
- pLReader->currentBlockIndex = -1;
+ SLastBlockReader* pLReader = pIter->pLastBlockReader;
+ pLReader->order = pReader->order;
+ pLReader->window = pReader->window;
+ pLReader->verRange = pReader->verRange;
- int32_t code = tBlockDataCreate(&pLReader->lastBlockData);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
+ pLReader->uid = 0;
+ tMergeTreeClose(&pLReader->mergeTree);
+
+ if (pLReader->pInfo == NULL) {
+ pLReader->pInfo = tCreateLastBlockLoadInfo();
+ if (pLReader->pInfo == NULL) {
+ tsdbDebug("init fileset iterator failed, code:%s %s", tstrerror(terrno), pReader->idStr);
+ return terrno;
}
}
@@ -362,6 +370,13 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
return false;
}
+ SIOCostSummary* pSum = &pReader->cost;
+ getLastBlockLoadInfo(pIter->pLastBlockReader->pInfo, &pSum->lastBlockLoad, &pReader->cost.lastBlockLoadTime);
+
+ pIter->pLastBlockReader->uid = 0;
+ tMergeTreeClose(&pIter->pLastBlockReader->mergeTree);
+ resetLastBlockLoadInfo(pIter->pLastBlockReader->pInfo);
+
// check file the time range of coverage
STimeWindow win = {0};
@@ -406,7 +421,7 @@ _err:
return false;
}
-static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, SHashObj* pTableMap) {
+static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) {
pIter->order = order;
pIter->index = -1;
pIter->numOfBlocks = 0;
@@ -415,7 +430,6 @@ static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, SHashOb
} else {
taosArrayClear(pIter->blockList);
}
- pIter->pTableMap = pTableMap;
}
static void cleanupDataBlockIterator(SDataBlockIter* pIter) { taosArrayDestroy(pIter->blockList); }
@@ -511,86 +525,6 @@ _end:
return code;
}
-// void tsdbResetQueryHandleForNewTable(STsdbReader* queryHandle, SQueryTableDataCond* pCond, STableListInfo* tableList,
-// int32_t tWinIdx) {
-// STsdbReader* pTsdbReadHandle = queryHandle;
-
-// pTsdbReadHandle->order = pCond->order;
-// pTsdbReadHandle->window = pCond->twindows[tWinIdx];
-// pTsdbReadHandle->type = TSDB_QUERY_TYPE_ALL;
-// pTsdbReadHandle->cur.fid = -1;
-// pTsdbReadHandle->cur.win = TSWINDOW_INITIALIZER;
-// pTsdbReadHandle->checkFiles = true;
-// pTsdbReadHandle->activeIndex = 0; // current active table index
-// pTsdbReadHandle->locateStart = false;
-// pTsdbReadHandle->loadExternalRow = pCond->loadExternalRows;
-
-// if (ASCENDING_TRAVERSE(pCond->order)) {
-// assert(pTsdbReadHandle->window.skey <= pTsdbReadHandle->window.ekey);
-// } else {
-// assert(pTsdbReadHandle->window.skey >= pTsdbReadHandle->window.ekey);
-// }
-
-// // allocate buffer in order to load data blocks from file
-// memset(pTsdbReadHandle->suppInfo.pstatis, 0, sizeof(SColumnDataAgg));
-// memset(pTsdbReadHandle->suppInfo.plist, 0, POINTER_BYTES);
-
-// tsdbInitDataBlockLoadInfo(&pTsdbReadHandle->dataBlockLoadInfo);
-// tsdbInitCompBlockLoadInfo(&pTsdbReadHandle->compBlockLoadInfo);
-
-// SArray* pTable = NULL;
-// // STsdbMeta* pMeta = tsdbGetMeta(pTsdbReadHandle->pTsdb);
-
-// // pTsdbReadHandle->pTableCheckInfo = destroyTableCheckInfo(pTsdbReadHandle->pTableCheckInfo);
-
-// pTsdbReadHandle->pTableCheckInfo = NULL; // createDataBlockScanInfo(pTsdbReadHandle, groupList, pMeta,
-// // &pTable);
-// if (pTsdbReadHandle->pTableCheckInfo == NULL) {
-// // tsdbReaderClose(pTsdbReadHandle);
-// terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
-// }
-
-// // pTsdbReadHandle->prev = doFreeColumnInfoData(pTsdbReadHandle->prev);
-// // pTsdbReadHandle->next = doFreeColumnInfoData(pTsdbReadHandle->next);
-// }
-
-// SArray* tsdbGetQueriedTableList(STsdbReader** pHandle) {
-// assert(pHandle != NULL);
-
-// STsdbReader* pTsdbReadHandle = (STsdbReader*)pHandle;
-
-// size_t size = taosArrayGetSize(pTsdbReadHandle->pTableCheckInfo);
-// SArray* res = taosArrayInit(size, POINTER_BYTES);
-// return res;
-// }
-
-// static int32_t binarySearchForBlock(SBlock* pBlock, int32_t numOfBlocks, TSKEY skey, int32_t order) {
-// int32_t firstSlot = 0;
-// int32_t lastSlot = numOfBlocks - 1;
-
-// int32_t midSlot = firstSlot;
-
-// while (1) {
-// numOfBlocks = lastSlot - firstSlot + 1;
-// midSlot = (firstSlot + (numOfBlocks >> 1));
-
-// if (numOfBlocks == 1) break;
-
-// if (skey > pBlock[midSlot].maxKey.ts) {
-// if (numOfBlocks == 2) break;
-// if ((order == TSDB_ORDER_DESC) && (skey < pBlock[midSlot + 1].minKey.ts)) break;
-// firstSlot = midSlot + 1;
-// } else if (skey < pBlock[midSlot].minKey.ts) {
-// if ((order == TSDB_ORDER_ASC) && (skey > pBlock[midSlot - 1].maxKey.ts)) break;
-// lastSlot = midSlot - 1;
-// } else {
-// break; // got the slot
-// }
-// }
-
-// return midSlot;
-// }
-
static int32_t doLoadBlockIndex(STsdbReader* pReader, SDataFReader* pFileReader, SArray* pIndexList) {
SArray* aBlockIdx = taosArrayInit(8, sizeof(SBlockIdx));
@@ -651,14 +585,12 @@ static void cleanupTableScanInfo(SHashObj* pTableMap) {
}
// reset the index in last block when handing a new file
- px->indexInBlockL = -1;
tMapDataClear(&px->mapData);
taosArrayClear(px->pBlockList);
}
}
-static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray* pLastBlockIndex,
- SBlockNumber * pBlockNum, SArray* pQualifiedLastBlock) {
+static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockNumber* pBlockNum) {
int32_t numOfQTable = 0;
size_t sizeInDisk = 0;
size_t numOfTables = taosArrayGetSize(pIndexList);
@@ -672,12 +604,12 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray*
STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(int64_t));
tMapDataReset(&pScanInfo->mapData);
- tsdbReadBlock(pReader->pFileReader, pBlockIdx, &pScanInfo->mapData);
+ tsdbReadDataBlk(pReader->pFileReader, pBlockIdx, &pScanInfo->mapData);
sizeInDisk += pScanInfo->mapData.nData;
for (int32_t j = 0; j < pScanInfo->mapData.nItem; ++j) {
- SBlock block = {0};
- tMapDataGetItemByIdx(&pScanInfo->mapData, j, &block, tGetBlock);
+ SDataBlk block = {0};
+ tMapDataGetItemByIdx(&pScanInfo->mapData, j, &block, tGetDataBlk);
// 1. time range check
if (block.minKey.ts > pReader->window.ekey || block.maxKey.ts < pReader->window.skey) {
@@ -703,35 +635,15 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray*
}
}
- size_t numOfLast = taosArrayGetSize(pLastBlockIndex);
- for(int32_t i = 0; i < numOfLast; ++i) {
- SBlockL* pLastBlock = taosArrayGet(pLastBlockIndex, i);
- if (pLastBlock->suid != pReader->suid) {
- continue;
- }
-
- {
- // 1. time range check
- if (pLastBlock->minKey > pReader->window.ekey || pLastBlock->maxKey < pReader->window.skey) {
- continue;
- }
-
- // 2. version range check
- if (pLastBlock->minVer > pReader->verRange.maxVer || pLastBlock->maxVer < pReader->verRange.minVer) {
- continue;
- }
-
- pBlockNum->numOfLastBlocks += 1;
- taosArrayPush(pQualifiedLastBlock, pLastBlock);
- }
- }
-
- int32_t total = pBlockNum->numOfLastBlocks + pBlockNum->numOfBlocks;
+ pBlockNum->numOfLastFiles = pReader->pFileReader->pSet->nSttF;
+ int32_t total = pBlockNum->numOfLastFiles + pBlockNum->numOfBlocks;
double el = (taosGetTimestampUs() - st) / 1000.0;
- tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, lastBlock:%d, size:%.2f Kb, elapsed time:%.2f ms %s",
- numOfTables, pBlockNum->numOfBlocks, numOfQTable, pBlockNum->numOfLastBlocks, sizeInDisk
- / 1000.0, el, pReader->idStr);
+ tsdbDebug(
+ "load block of %d tables completed, blocks:%d in %d tables, last-files:%d, block-info-size:%.2f Kb, elapsed "
+ "time:%.2f ms %s",
+ numOfTables, pBlockNum->numOfBlocks, numOfQTable, pBlockNum->numOfLastFiles, sizeInDisk / 1000.0, el,
+ pReader->idStr);
pReader->cost.numOfBlocks += total;
pReader->cost.headFileLoadTime += el;
@@ -771,15 +683,156 @@ static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter) {
return pBlockInfo;
}
-static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; }
+static SDataBlk* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; }
+
+int32_t binarySearchForTs(char* pValue, int num, TSKEY key, int order) {
+ int32_t midPos = -1;
+ int32_t numOfRows;
+
+ ASSERT(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
+
+ TSKEY* keyList = (TSKEY*)pValue;
+ int32_t firstPos = 0;
+ int32_t lastPos = num - 1;
+
+ if (order == TSDB_ORDER_DESC) {
+ // find the first position which is smaller than the key
+ while (1) {
+ if (key >= keyList[firstPos]) return firstPos;
+ if (key == keyList[lastPos]) return lastPos;
+
+ if (key < keyList[lastPos]) {
+ lastPos += 1;
+ if (lastPos >= num) {
+ return -1;
+ } else {
+ return lastPos;
+ }
+ }
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1) + firstPos;
+
+ if (key < keyList[midPos]) {
+ firstPos = midPos + 1;
+ } else if (key > keyList[midPos]) {
+ lastPos = midPos - 1;
+ } else {
+ break;
+ }
+ }
+
+ } else {
+ // find the first position which is bigger than the key
+ while (1) {
+ if (key <= keyList[firstPos]) return firstPos;
+ if (key == keyList[lastPos]) return lastPos;
+
+ if (key > keyList[lastPos]) {
+ lastPos = lastPos + 1;
+ if (lastPos >= num)
+ return -1;
+ else
+ return lastPos;
+ }
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1u) + firstPos;
+
+ if (key < keyList[midPos]) {
+ lastPos = midPos - 1;
+ } else if (key > keyList[midPos]) {
+ firstPos = midPos + 1;
+ } else {
+ break;
+ }
+ }
+ }
+
+ return midPos;
+}
+
+static int doBinarySearchKey(TSKEY* keyList, int num, int pos, TSKEY key, int order) {
+ // start end position
+ int s, e;
+ s = pos;
+
+ // check
+ assert(pos >=0 && pos < num);
+ assert(num > 0);
+
+ if (order == TSDB_ORDER_ASC) {
+ // find the first position which is smaller than the key
+ e = num - 1;
+ if (key < keyList[pos])
+ return -1;
+ while (1) {
+ // check can return
+ if (key >= keyList[e])
+ return e;
+ if (key <= keyList[s])
+ return s;
+ if (e - s <= 1)
+ return s;
+
+ // change start or end position
+ int mid = s + (e - s + 1)/2;
+ if (keyList[mid] > key)
+ e = mid;
+ else if(keyList[mid] < key)
+ s = mid;
+ else
+ return mid;
+ }
+ } else { // DESC
+ // find the first position which is bigger than the key
+ e = 0;
+ if (key > keyList[pos])
+ return -1;
+ while (1) {
+ // check can return
+ if (key <= keyList[e])
+ return e;
+ if (key >= keyList[s])
+ return s;
+ if (s - e <= 1)
+ return s;
+
+ // change start or end position
+ int mid = s - (s - e + 1)/2;
+ if (keyList[mid] < key)
+ e = mid;
+ else if(keyList[mid] > key)
+ s = mid;
+ else
+ return mid;
+ }
+ }
+}
+
+int32_t getEndPosInDataBlock(STsdbReader* pReader, SBlockData* pBlockData, SDataBlk* pBlock, int32_t pos) {
+ // NOTE: reverse the order to find the end position in data block
+ int32_t endPos = -1;
+ bool asc = ASCENDING_TRAVERSE(pReader->order);
+
+ if (asc && pReader->window.ekey >= pBlock->maxKey.ts) {
+ endPos = pBlock->nRow - 1;
+ } else if (!asc && pReader->window.skey <= pBlock->minKey.ts) {
+ endPos = 0;
+ } else {
+ endPos = doBinarySearchKey(pBlockData->aTSKEY, pBlock->nRow, pos, pReader->window.ekey, pReader->order);
+ }
+
+ return endPos;
+}
static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) {
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
SBlockData* pBlockData = &pStatus->fileBlockData;
- SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
- SBlock* pBlock = getCurrentBlock(pBlockIter);
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
+ SDataBlk* pBlock = getCurrentBlock(pBlockIter);
SSDataBlock* pResBlock = pReader->pResBlock;
int32_t numOfOutputCols = blockDataGetNumOfCols(pResBlock);
@@ -791,23 +844,42 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
bool asc = ASCENDING_TRAVERSE(pReader->order);
int32_t step = asc ? 1 : -1;
- int32_t rowIndex = 0;
- int32_t remain = asc ? (pBlockData->nRow - pDumpInfo->rowIndex) : (pDumpInfo->rowIndex + 1);
-
- int32_t endIndex = 0;
- if (remain <= pReader->capacity) {
- endIndex = pBlockData->nRow;
+ if (asc && pReader->window.skey <= pBlock->minKey.ts) {
+ pDumpInfo->rowIndex = 0;
+ } else if (!asc && pReader->window.ekey >= pBlock->maxKey.ts) {
+ pDumpInfo->rowIndex = pBlock->nRow - 1;
} else {
- endIndex = pDumpInfo->rowIndex + step * pReader->capacity;
+ int32_t pos = asc? pBlock->nRow-1:0;
+ int32_t order = (pReader->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC;
+ pDumpInfo->rowIndex = doBinarySearchKey(pBlockData->aTSKEY, pBlock->nRow, pos, pReader->window.skey, order);
+ }
+
+ // time window check
+ int32_t endIndex = getEndPosInDataBlock(pReader, pBlockData, pBlock, pDumpInfo->rowIndex);
+ if (endIndex == -1) {
+ setBlockAllDumped(pDumpInfo, pReader->window.ekey, pReader->order);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ endIndex += step;
+ int32_t remain = asc ? (endIndex - pDumpInfo->rowIndex) : (pDumpInfo->rowIndex - endIndex);
+ if (remain > pReader->capacity) { // output buffer check
remain = pReader->capacity;
}
- int32_t i = 0;
+ int32_t rowIndex = 0;
+
+ int32_t i = 0;
SColumnInfoData* pColData = taosArrayGet(pResBlock->pDataBlock, i);
if (pColData->info.colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
- for (int32_t j = pDumpInfo->rowIndex; j < endIndex && j >= 0; j += step) {
- colDataAppend(pColData, rowIndex++, (const char*)&pBlockData->aTSKEY[j], false);
+ if (asc) {
+ memcpy(pColData->pData, &pBlockData->aTSKEY[pDumpInfo->rowIndex], remain * sizeof(int64_t));
+ } else {
+ for (int32_t j = pDumpInfo->rowIndex; rowIndex < remain; j += step) {
+ colDataAppendInt64(pColData, rowIndex++, &pBlockData->aTSKEY[j]);
+ }
}
+
i += 1;
}
@@ -821,13 +893,32 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
if (pData->cid < pColData->info.colId) {
colIndex += 1;
} else if (pData->cid == pColData->info.colId) {
- for (int32_t j = pDumpInfo->rowIndex; j < endIndex && j >= 0; j += step) {
- tColDataGetValue(pData, j, &cv);
- doCopyColVal(pColData, rowIndex++, i, &cv, pSupInfo);
+ if (pData->flag == HAS_NONE || pData->flag == HAS_NULL) {
+ colDataAppendNNULL(pColData, 0, remain);
+ } else {
+ if (IS_NUMERIC_TYPE(pColData->info.type) && asc) {
+ uint8_t* p = pData->pData + tDataTypes[pData->type].bytes * pDumpInfo->rowIndex;
+ memcpy(pColData->pData, p, remain * tDataTypes[pData->type].bytes);
+
+ // null value exists, check one-by-one
+ if (pData->flag != HAS_VALUE) {
+ for (int32_t j = pDumpInfo->rowIndex; rowIndex < remain; j += step, rowIndex++) {
+ uint8_t v = GET_BIT2(pData->pBitMap, j);
+ if (v == 0 || v == 1) {
+ colDataSetNull_f(pColData->nullbitmap, rowIndex);
+ }
+ }
+ }
+ } else {
+ for (int32_t j = pDumpInfo->rowIndex; rowIndex < remain; j += step) {
+ tColDataGetValue(pData, j, &cv);
+ doCopyColVal(pColData, rowIndex++, i, &cv, pSupInfo);
+ }
+ }
}
+
colIndex += 1;
i += 1;
- ASSERT(rowIndex == remain);
} else { // the specified column does not exist in file block, fill with null data
colDataAppendNNULL(pColData, 0, remain);
i += 1;
@@ -843,7 +934,13 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
pResBlock->info.rows = remain;
pDumpInfo->rowIndex += step * remain;
- setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
+ if (pDumpInfo->rowIndex >= 0 && pDumpInfo->rowIndex < pBlock->nRow) {
+ int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex];
+ setBlockAllDumped(pDumpInfo, ts, pReader->order);
+ } else {
+ int64_t k = asc? pBlock->maxKey.ts:pBlock->minKey.ts;
+ setBlockAllDumped(pDumpInfo, k, pReader->order);
+ }
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
pReader->cost.blockLoadTime += elapsedTime;
@@ -851,7 +948,7 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
int32_t unDumpedRows = asc ? pBlock->nRow - pDumpInfo->rowIndex : pDumpInfo->rowIndex + 1;
tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
- pReader, pBlockIter->index, pFBlock->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, remain, unDumpedRows,
+ pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, remain, unDumpedRows,
pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
return TSDB_CODE_SUCCESS;
@@ -859,71 +956,32 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData) {
int64_t st = taosGetTimestampUs();
- double elapsedTime = 0;
- int32_t code = 0;
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ ASSERT(pBlockInfo != NULL);
- if (pBlockInfo != NULL) {
- SBlock* pBlock = getCurrentBlock(pBlockIter);
- code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
- if (code != TSDB_CODE_SUCCESS) {
- tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
- ", rows:%d, code:%s %s",
- pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
- tstrerror(code), pReader->idStr);
- goto _error;
- }
-
- elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
-
- tsdbDebug("%p load file block into buffer, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
- ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
+ SDataBlk* pBlock = getCurrentBlock(pBlockIter);
+ int32_t code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
+ if (code != TSDB_CODE_SUCCESS) {
+ tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
+ ", rows:%d, code:%s %s",
pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
- pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
- } else {
-#if 0
- SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
-
- uint64_t uid = pBlockInfo->uid;
- SArray* pBlocks = pLastBlockReader->pBlockL;
-
- pLastBlockReader->currentBlockIndex = -1;
-
- // find the correct SBlockL
- for(int32_t i = 0; i < taosArrayGetSize(pBlocks); ++i) {
- SBlockL* pBlock = taosArrayGet(pBlocks, i);
- if (pBlock->minUid >= uid && pBlock->maxUid <= uid) {
- pLastBlockReader->currentBlockIndex = i;
- break;
- }
- }
+ tstrerror(code), pReader->idStr);
+ return code;
+ }
-// SBlockL* pBlockL = taosArrayGet(pLastBlockReader->pBlockL, *index);
- code = tsdbReadLastBlock(pReader->pFileReader, pBlockL, pBlockData);
- if (code != TSDB_CODE_SUCCESS) {
- tsdbDebug("%p error occurs in loading last block into buffer, last block index:%d, total:%d brange:%" PRId64 "-%" PRId64
- ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", code:%s %s",
- pReader, *index, pBlockIter->numOfBlocks.numOfLastBlocks, 0, 0, pBlockL->nRow,
- pBlockL->minVer, pBlockL->maxVer, tstrerror(code), pReader->idStr);
- goto _error;
- }
+ double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
- tsdbDebug("%p load last file block into buffer, last block index:%d, total:%d brange:%" PRId64 "-%" PRId64
- ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
- pReader, *index, pBlockIter->numOfBlocks.numOfLastBlocks, 0, 0, pBlockL->nRow,
- pBlockL->minVer, pBlockL->maxVer, elapsedTime, pReader->idStr);
-#endif
- }
+ tsdbDebug("%p load file block into buffer, global index:%d, index in table block list:%d, brange:%" PRId64 "-%" PRId64
+ ", rows:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
+ pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, pBlock->nRow,
+ pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
pReader->cost.blockLoadTime += elapsedTime;
pDumpInfo->allDumped = false;
return TSDB_CODE_SUCCESS;
-
-_error:
- return code;
}
static void cleanupBlockOrderSupporter(SBlockOrderSupporter* pSup) {
@@ -977,11 +1035,11 @@ static int32_t fileDataBlockOrderCompar(const void* pLeft, const void* pRight, v
}
static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) {
- SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
- if (pFBlock != NULL) {
- STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
- int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pFBlock->tbBlockIdx);
- tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetBlock);
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
+ if (pBlockInfo != NULL) {
+ STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
+ int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pBlockInfo->tbBlockIdx);
+ tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetDataBlk);
}
#if 0
@@ -996,6 +1054,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
pBlockIter->numOfBlocks = numOfBlocks;
taosArrayClear(pBlockIter->blockList);
+ pBlockIter->pTableMap = pReader->status.pTableMap;
// access data blocks according to the offset of each block in asc/desc order.
int32_t numOfTables = (int32_t)taosHashGetSize(pReader->status.pTableMap);
@@ -1031,12 +1090,12 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
}
sup.pDataBlockInfo[sup.numOfTables] = (SBlockOrderWrapper*)buf;
- SBlock block = {0};
+ SDataBlk block = {0};
for (int32_t k = 0; k < num; ++k) {
SBlockOrderWrapper wrapper = {0};
int32_t* mapDataIndex = taosArrayGet(pTableScanInfo->pBlockList, k);
- tMapDataGetItemByIdx(&pTableScanInfo->mapData, *mapDataIndex, &block, tGetBlock);
+ tMapDataGetItemByIdx(&pTableScanInfo->mapData, *mapDataIndex, &block, tGetDataBlk);
wrapper.uid = pTableScanInfo->uid;
wrapper.offset = block.aSubBlock[0].offset;
@@ -1097,8 +1156,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
}
int64_t et = taosGetTimestampUs();
- tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, numOfBlocks, (et - st) / 1000.0,
- pReader->idStr);
+ tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, numOfBlocks,
+ (et - st) / 1000.0, pReader->idStr);
cleanupBlockOrderSupporter(&sup);
taosMemoryFree(pTree);
@@ -1125,15 +1184,15 @@ static bool blockIteratorNext(SDataBlockIter* pBlockIter) {
/**
* This is an two rectangles overlap cases.
*/
-static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* pVerRange, SBlock* pBlock) {
+static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* pVerRange, SDataBlk* pBlock) {
return (pWindow->ekey < pBlock->maxKey.ts && pWindow->ekey >= pBlock->minKey.ts) ||
(pWindow->skey > pBlock->minKey.ts && pWindow->skey <= pBlock->maxKey.ts) ||
(pVerRange->minVer > pBlock->minVer && pVerRange->minVer <= pBlock->maxVer) ||
(pVerRange->maxVer < pBlock->maxVer && pVerRange->maxVer >= pBlock->minVer);
}
-static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STableBlockScanInfo* pTableBlockScanInfo,
- int32_t* nextIndex, int32_t order) {
+static SDataBlk* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STableBlockScanInfo* pTableBlockScanInfo,
+ int32_t* nextIndex, int32_t order) {
bool asc = ASCENDING_TRAVERSE(order);
if (asc && pFBlockInfo->tbBlockIdx >= taosArrayGetSize(pTableBlockScanInfo->pBlockList) - 1) {
return NULL;
@@ -1146,10 +1205,10 @@ static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STab
int32_t step = asc ? 1 : -1;
*nextIndex = pFBlockInfo->tbBlockIdx + step;
- SBlock* pBlock = taosMemoryCalloc(1, sizeof(SBlock));
- int32_t* indexInMapdata = taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex);
+ SDataBlk* pBlock = taosMemoryCalloc(1, sizeof(SDataBlk));
+ int32_t* indexInMapdata = taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex);
- tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, *indexInMapdata, pBlock, tGetBlock);
+ tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, *indexInMapdata, pBlock, tGetDataBlk);
return pBlock;
}
@@ -1192,7 +1251,7 @@ static int32_t setFileBlockActiveInBlockIter(SDataBlockIter* pBlockIter, int32_t
return TSDB_CODE_SUCCESS;
}
-static bool overlapWithNeighborBlock(SBlock* pBlock, SBlock* pNeighbor, int32_t order) {
+static bool overlapWithNeighborBlock(SDataBlk* pBlock, SDataBlk* pNeighbor, int32_t order) {
// it is the last block in current file, no chance to overlap with neighbor blocks.
if (ASCENDING_TRAVERSE(order)) {
return pBlock->maxKey.ts == pNeighbor->minKey.ts;
@@ -1201,19 +1260,19 @@ static bool overlapWithNeighborBlock(SBlock* pBlock, SBlock* pNeighbor, int32_t
}
}
-static bool bufferDataInFileBlockGap(int32_t order, TSDBKEY key, SBlock* pBlock) {
+static bool bufferDataInFileBlockGap(int32_t order, TSDBKEY key, SDataBlk* pBlock) {
bool ascScan = ASCENDING_TRAVERSE(order);
return (ascScan && (key.ts != TSKEY_INITIAL_VAL && key.ts <= pBlock->minKey.ts)) ||
(!ascScan && (key.ts != TSKEY_INITIAL_VAL && key.ts >= pBlock->maxKey.ts));
}
-static bool keyOverlapFileBlock(TSDBKEY key, SBlock* pBlock, SVersionRange* pVerRange) {
+static bool keyOverlapFileBlock(TSDBKEY key, SDataBlk* pBlock, SVersionRange* pVerRange) {
return (key.ts >= pBlock->minKey.ts && key.ts <= pBlock->maxKey.ts) && (pBlock->maxVer >= pVerRange->minVer) &&
(pBlock->minVer <= pVerRange->maxVer);
}
-static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock) {
+static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SDataBlk* pBlock) {
size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline);
for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += 1) {
@@ -1247,7 +1306,7 @@ static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, cons
return false;
}
-static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock, int32_t order) {
+static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SDataBlk* pBlock, int32_t order) {
if (pBlockScanInfo->delSkyline == NULL) {
return false;
}
@@ -1277,53 +1336,84 @@ static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBl
}
}
-// 1. the version of all rows should be less than the endVersion
-// 2. current block should not overlap with next neighbor block
-// 3. current timestamp should not be overlap with each other
-// 4. output buffer should be large enough to hold all rows in current block
-// 5. delete info should not overlap with current block data
-static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBlock, SBlock* pBlock,
- STableBlockScanInfo* pScanInfo, TSDBKEY key, SLastBlockReader* pLastBlockReader) {
- int32_t neighborIndex = 0;
- SBlock* pNeighbor = getNeighborBlockOfSameTable(pFBlock, pScanInfo, &neighborIndex, pReader->order);
+typedef struct {
+ bool overlapWithNeighborBlock;
+ bool hasDupTs;
+ bool overlapWithDelInfo;
+ bool overlapWithLastBlock;
+ bool overlapWithKeyInBuf;
+ bool partiallyRequired;
+ bool moreThanCapcity;
+} SDataBlockToLoadInfo;
+
+static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo* pBlockInfo, SDataBlk* pBlock,
+ STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, SLastBlockReader* pLastBlockReader,
+ STsdbReader* pReader) {
+ int32_t neighborIndex = 0;
+ SDataBlk* pNeighbor = getNeighborBlockOfSameTable(pBlockInfo, pScanInfo, &neighborIndex, pReader->order);
// overlap with neighbor
- bool overlapWithNeighbor = false;
if (pNeighbor) {
- overlapWithNeighbor = overlapWithNeighborBlock(pBlock, pNeighbor, pReader->order);
+ pInfo->overlapWithNeighborBlock = overlapWithNeighborBlock(pBlock, pNeighbor, pReader->order);
taosMemoryFree(pNeighbor);
}
// has duplicated ts of different version in this block
- bool hasDup = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true;
- bool overlapWithDel = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order);
+ pInfo->hasDupTs = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true;
+ pInfo->overlapWithDelInfo = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order);
// todo here we need to each key in the last files to identify if it is really overlapped with last block
+ // todo
bool overlapWithlastBlock = false;
- if (taosArrayGetSize(pLastBlockReader->pBlockL) > 0 && (pLastBlockReader->currentBlockIndex != -1)) {
- SBlockL *pBlockL = taosArrayGet(pLastBlockReader->pBlockL, pLastBlockReader->currentBlockIndex);
- overlapWithlastBlock = !(pBlock->maxKey.ts < pBlockL->minKey || pBlock->minKey.ts > pBlockL->maxKey);
+#if 0
+ if (taosArrayGetSize(pLastBlockReader->pSstBlk) > 0 && (pLastBlockReader->currentBlockIndex != -1)) {
+ SSttBlk* pSstBlk = taosArrayGet(pLastBlockReader->pSstBlk, pLastBlockReader->currentBlockIndex);
+ overlapWithlastBlock = !(pBlock->maxKey.ts < pSstBlk->minKey || pBlock->minKey.ts > pSstBlk->maxKey);
}
+#endif
- bool moreThanOutputCapacity = pBlock->nRow > pReader->capacity;
- bool partiallyRequired = dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock);
- bool overlapWithKey = keyOverlapFileBlock(key, pBlock, &pReader->verRange);
+ pInfo->moreThanCapcity = pBlock->nRow > pReader->capacity;
+ pInfo->partiallyRequired = dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock);
+ pInfo->overlapWithKeyInBuf = keyOverlapFileBlock(keyInBuf, pBlock, &pReader->verRange);
+}
- bool loadDataBlock = (overlapWithNeighbor || hasDup || partiallyRequired || overlapWithKey ||
- moreThanOutputCapacity || overlapWithDel || overlapWithlastBlock);
+// 1. the version of all rows should be less than the endVersion
+// 2. current block should not overlap with next neighbor block
+// 3. current timestamp should not be overlap with each other
+// 4. output buffer should be large enough to hold all rows in current block
+// 5. delete info should not overlap with current block data
+// 6. current block should not contain the duplicated ts
+static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pBlockInfo, SDataBlk* pBlock,
+ STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, SLastBlockReader* pLastBlockReader) {
+ SDataBlockToLoadInfo info = {0};
+ getBlockToLoadInfo(&info, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader, pReader);
+
+ bool loadDataBlock =
+ (info.overlapWithNeighborBlock || info.hasDupTs || info.partiallyRequired || info.overlapWithKeyInBuf ||
+ info.moreThanCapcity || info.overlapWithDelInfo || info.overlapWithLastBlock);
// log the reason why load the datablock for profile
if (loadDataBlock) {
tsdbDebug("%p uid:%" PRIu64
" need to load the datablock, overlapwithneighborblock:%d, hasDup:%d, partiallyRequired:%d, "
"overlapWithKey:%d, greaterThanBuf:%d, overlapWithDel:%d, overlapWithlastBlock:%d, %s",
- pReader, pFBlock->uid, overlapWithNeighbor, hasDup, partiallyRequired, overlapWithKey,
- moreThanOutputCapacity, overlapWithDel, overlapWithlastBlock, pReader->idStr);
+ pReader, pBlockInfo->uid, info.overlapWithNeighborBlock, info.hasDupTs, info.partiallyRequired,
+ info.overlapWithKeyInBuf, info.moreThanCapcity, info.overlapWithDelInfo, info.overlapWithLastBlock,
+ pReader->idStr);
}
return loadDataBlock;
}
+static bool isCleanFileDataBlock(STsdbReader* pReader, SFileDataBlockInfo* pBlockInfo, SDataBlk* pBlock,
+ STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, SLastBlockReader* pLastBlockReader) {
+ SDataBlockToLoadInfo info = {0};
+ getBlockToLoadInfo(&info, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader, pReader);
+ bool isCleanFileBlock = !(info.overlapWithNeighborBlock || info.hasDupTs || info.overlapWithKeyInBuf ||
+ info.overlapWithDelInfo || info.overlapWithLastBlock);
+ return isCleanFileBlock;
+}
+
static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, int64_t endKey) {
if (!(pBlockScanInfo->iiter.hasVal || pBlockScanInfo->iter.hasVal)) {
return TSDB_CODE_SUCCESS;
@@ -1369,6 +1459,38 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB
return false;
}
+static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pBlockScanInfo) {
+ while (1) {
+ bool hasVal = tMergeTreeNext(&pLastBlockReader->mergeTree);
+ if (!hasVal) {
+ return false;
+ }
+
+ TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ TSDBKEY k = TSDBROW_KEY(&row);
+ if (!hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order)) {
+ return true;
+ }
+ }
+}
+
+static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLastBlockReader,
+ STableBlockScanInfo* pScanInfo, int64_t ts, STsdbReader* pReader) {
+ bool hasVal = nextRowFromLastBlocks(pLastBlockReader, pScanInfo);
+ if (hasVal) {
+ int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
+ if (next1 != ts) {
+ doAppendRowFromFileBlock(pReader->pResBlock, pReader, fRow->pBlockData, fRow->iRow);
+ return true;
+ }
+ } else {
+ doAppendRowFromFileBlock(pReader->pResBlock, pReader, fRow->pBlockData, fRow->iRow);
+ return true;
+ }
+
+ return false;
+}
+
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
// always set the newest schema version in pReader->pSchema
if (pReader->pSchema == NULL) {
@@ -1394,7 +1516,7 @@ static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader*
return pReader->pMemSchema;
}
-static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
+static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) {
SRowMerger merge = {0};
STSRow* pTSRow = NULL;
@@ -1402,19 +1524,17 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
int64_t tsLast = INT64_MIN;
- if ((pLastBlockReader->lastBlockData.nRow > 0) && hasDataInLastBlock(pLastBlockReader)) {
+ if (hasDataInLastBlock(pLastBlockReader)) {
tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
}
- TSDBKEY k = TSDBROW_KEY(pRow);
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
-
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
+ TSDBKEY k = TSDBROW_KEY(pRow);
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
int64_t minKey = 0;
if (pReader->order == TSDB_ORDER_ASC) {
- minKey = INT64_MAX; // chosen the minimum value
- if (minKey > tsLast && pLastBlockReader->lastBlockData.nRow > 0) {
+ minKey = INT64_MAX; // chosen the minimum value
+ if (minKey > tsLast && hasDataInLastBlock(pLastBlockReader)) {
minKey = tsLast;
}
@@ -1427,7 +1547,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
}
} else {
minKey = INT64_MIN;
- if (minKey < tsLast && pLastBlockReader->lastBlockData.nRow > 0) {
+ if (minKey < tsLast && hasDataInLastBlock(pLastBlockReader)) {
minKey = tsLast;
}
@@ -1443,7 +1563,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
bool init = false;
// ASC: file block ---> last block -----> imem -----> mem
- //DESC: mem -----> imem -----> last block -----> file block
+ // DESC: mem -----> imem -----> last block -----> file block
if (pReader->order == TSDB_ORDER_ASC) {
if (minKey == key) {
init = true;
@@ -1452,7 +1572,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
tRowMerge(&merge, &fRow1);
} else {
@@ -1481,7 +1601,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
tRowMerge(&merge, &fRow1);
} else {
@@ -1502,7 +1622,11 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
}
}
- tRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
@@ -1510,94 +1634,125 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf
return TSDB_CODE_SUCCESS;
}
-#if 0
-static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow,
- SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) {
- SRowMerger merge = {0};
- STSRow* pTSRow = NULL;
- SBlockData* pBlockData = &pReader->status.fileBlockData;
+static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader, STsdbReader* pReader,
+ STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData,
+ bool mergeBlockData) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
- TSDBKEY k = TSDBROW_KEY(pRow);
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- SArray* pDelList = pBlockScanInfo->delSkyline;
- bool freeTSRow = false;
- uint64_t uid = pBlockScanInfo->uid;
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
+ TSDBROW fRow = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
- // ascending order traverse
- if (ASCENDING_TRAVERSE(pReader->order)) {
- if (key < k.ts) {
- // imem & mem are all empty, only file exist
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- freeTSRow = true;
- }
- } else if (k.ts < key) { // k.ts < key
- doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
- } else { // k.ts == key, ascending order: file block ----> imem rows -----> mem rows
+ // only last block exists
+ if ((!mergeBlockData) || (tsLastBlock != pBlockData->aTSKEY[pDumpInfo->rowIndex])) {
+ if (tryCopyDistinctRowFromSttBlock(&fRow, pLastBlockReader, pBlockScanInfo, tsLastBlock, pReader)) {
+ return TSDB_CODE_SUCCESS;
+ } else {
tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMerge(&merge, pRow);
- doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ tRowMerge(&merge, &fRow1);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- freeTSRow = true;
- }
- } else { // descending order scan
- if (key < k.ts) {
- doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow);
- } else if (k.ts < key) {
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- freeTSRow = true;
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
}
- } else { // descending order: mem rows -----> imem rows ------> file block
- STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
- tRowMergerInit(&merge, pRow, pSchema);
- doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
- tRowMerge(&merge, &fRow);
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ }
+ } else { // not merge block data
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
+ ASSERT(mergeBlockData);
+
+ // merge with block data if ts == key
+ if (tsLastBlock == pBlockData->aTSKEY[pDumpInfo->rowIndex]) {
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ }
- tRowMergerGetRow(&merge, &pTSRow);
- freeTSRow = true;
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
}
- }
- tRowMergerClear(&merge);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
- if (freeTSRow) {
taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
}
return TSDB_CODE_SUCCESS;
}
-#endif
+static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader* pLastBlockReader, int64_t key,
+ STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+
+ if (pBlockData->nRow > 0) {
+ // no last block available, only data block exists
+ if (!hasDataInLastBlock(pLastBlockReader)) {
+ return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
+ }
+
+ // row in last file block
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+ int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
+ ASSERT(ts >= key);
+
+ if (ASCENDING_TRAVERSE(pReader->order)) {
+ if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist
+ return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
+ } else if (key == ts) {
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
+
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ tRowMerge(&merge, &fRow1);
+
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
-static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return code;
+ } else {
+ ASSERT(0);
+ return TSDB_CODE_SUCCESS;
+ }
+ } else { // desc order
+ return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, pBlockData, true);
+ }
+ } else { // only last block exists
+ return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false);
+ }
+}
+
+static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData,
+ SLastBlockReader* pLastBlockReader) {
SRowMerger merge = {0};
STSRow* pTSRow = NULL;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SArray* pDelList = pBlockScanInfo->delSkyline;
- TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader);
- TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pDelList, pReader);
+ TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader);
+ TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader);
ASSERT(pRow != NULL && piRow != NULL);
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
int64_t tsLast = INT64_MIN;
if (hasDataInLastBlock(pLastBlockReader)) {
tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
@@ -1608,7 +1763,7 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
- int64_t minKey = 0;//INT64_MAX;
+ int64_t minKey = 0;
if (ASCENDING_TRAVERSE(pReader->order)) {
minKey = INT64_MAX; // let's find the minimum
if (minKey > k.ts) {
@@ -1627,7 +1782,7 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo
minKey = tsLast;
}
} else {
- minKey = INT64_MIN; // let find the maximum ts value
+ minKey = INT64_MIN; // let find the maximum ts value
if (minKey < k.ts) {
minKey = k.ts;
}
@@ -1658,7 +1813,7 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
tRowMerge(&merge, &fRow1);
} else {
@@ -1708,7 +1863,7 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
tRowMerge(&merge, &fRow1);
} else {
@@ -1729,12 +1884,16 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo
}
}
- tRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
+ return code;
}
#if 0
@@ -1745,8 +1904,8 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SArray* pDelList = pBlockScanInfo->delSkyline;
- TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pDelList, pReader);
- TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pDelList, pReader);
+ TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader);
+ TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader);
ASSERT(pRow != NULL && piRow != NULL);
int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
@@ -1882,6 +2041,70 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo*
}
#endif
+static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
+ if (pBlockScanInfo->iterInit) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ TSDBKEY startKey = {0};
+ if (ASCENDING_TRAVERSE(pReader->order)) {
+ startKey = (TSDBKEY){.ts = pReader->window.skey, .version = pReader->verRange.minVer};
+ } else {
+ startKey = (TSDBKEY){.ts = pReader->window.ekey, .version = pReader->verRange.maxVer};
+ }
+
+ int32_t backward = (!ASCENDING_TRAVERSE(pReader->order));
+
+ STbData* d = NULL;
+ if (pReader->pReadSnap->pMem != NULL) {
+ d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid);
+ if (d != NULL) {
+ code = tsdbTbDataIterCreate(d, &startKey, backward, &pBlockScanInfo->iter.iter);
+ if (code == TSDB_CODE_SUCCESS) {
+ pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL);
+
+ tsdbDebug("%p uid:%" PRId64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
+ "-%" PRId64 " %s",
+ pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, d->minKey, d->maxKey, pReader->idStr);
+ } else {
+ tsdbError("%p uid:%" PRId64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid,
+ tstrerror(code), pReader->idStr);
+ return code;
+ }
+ }
+ } else {
+ tsdbDebug("%p uid:%" PRId64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
+ }
+
+ STbData* di = NULL;
+ if (pReader->pReadSnap->pIMem != NULL) {
+ di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid);
+ if (di != NULL) {
+ code = tsdbTbDataIterCreate(di, &startKey, backward, &pBlockScanInfo->iiter.iter);
+ if (code == TSDB_CODE_SUCCESS) {
+ pBlockScanInfo->iiter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iiter.iter) != NULL);
+
+ tsdbDebug("%p uid:%" PRId64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
+ "-%" PRId64 " %s",
+ pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, di->minKey, di->maxKey, pReader->idStr);
+ } else {
+ tsdbError("%p uid:%" PRId64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid,
+ tstrerror(code), pReader->idStr);
+ return code;
+ }
+ }
+ } else {
+ tsdbDebug("%p uid:%" PRId64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
+ }
+
+ initDelSkylineIterator(pBlockScanInfo, pReader, d, di);
+
+ pBlockScanInfo->iterInit = true;
+ return TSDB_CODE_SUCCESS;
+}
+
static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo,
STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
// it is an multi-table data block
@@ -1911,235 +2134,134 @@ static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDum
return true;
}
-static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
-
-static void initLastBlockReader(SLastBlockReader* pLastBlockReader, uint64_t uid, int16_t* startPos) {
- pLastBlockReader->uid = uid;
- pLastBlockReader->rowIndex = startPos;
-
- if (*startPos == -1) {
- if (ASCENDING_TRAVERSE(pLastBlockReader->order)) {
- // do nothing
- } else {
- *startPos = pLastBlockReader->lastBlockData.nRow;
- }
- }
-}
-
-static void setAllRowsChecked(SLastBlockReader *pLastBlockReader) {
- *pLastBlockReader->rowIndex = ALL_ROWS_CHECKED_INDEX;
-}
-
-static bool nextRowInLastBlock(SLastBlockReader *pLastBlockReader, STableBlockScanInfo* pBlockScanInfo) {
- int32_t step = (pLastBlockReader->order == TSDB_ORDER_ASC) ? 1 : -1;
- if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
- return false;
- }
-
- *(pLastBlockReader->rowIndex) += step;
-
- SBlockData* pBlockData = &pLastBlockReader->lastBlockData;
- for(int32_t i = *(pLastBlockReader->rowIndex); i < pBlockData->nRow && i >= 0; i += step) {
- if (pBlockData->aUid != NULL && pBlockData->aUid[i] != pLastBlockReader->uid) {
- continue;
- }
-
- int64_t ts = pBlockData->aTSKEY[i];
- if (ts < pLastBlockReader->window.skey) {
- continue;
- }
-
- int64_t ver = pBlockData->aVersion[i];
- if (ver < pLastBlockReader->verRange.minVer) {
- continue;
- }
-
- // no data any more, todo opt handle desc case
- if (ts > pLastBlockReader->window.ekey) {
- continue;
- }
-
- // todo opt handle desc case
- if (ver > pLastBlockReader->verRange.maxVer) {
- continue;
- }
-
- TSDBKEY k = {.ts = ts, .version = ver};
- if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order)) {
- continue;
- }
-
- *(pLastBlockReader->rowIndex) = i;
+static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
+ // the last block reader has been initialized for this table.
+ if (pLBlockReader->uid == pScanInfo->uid) {
return true;
}
- // set all data is consumed in last block
- setAllRowsChecked(pLastBlockReader);
- return false;
-}
-
-static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) {
- SBlockData* pBlockData = &pLastBlockReader->lastBlockData;
- return pBlockData->aTSKEY[*pLastBlockReader->rowIndex];
-}
-
-static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) {
- if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
- return false;
+ if (pLBlockReader->uid != 0) {
+ tMergeTreeClose(&pLBlockReader->mergeTree);
}
- return true;
-}
-
-// todo refactor
-static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo,
- SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
-
- int64_t key = (pBlockData->nRow > 0)? pBlockData->aTSKEY[pDumpInfo->rowIndex]:INT64_MIN;
- TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
- TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
-
- if (pBlockScanInfo->iter.hasVal && pBlockScanInfo->iiter.hasVal) {
- return doMergeMultiLevelRowsRv(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
- } else {
- // imem + file + last block
- if (pBlockScanInfo->iiter.hasVal) {
- return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader);
- }
-
- // mem + file
- if (pBlockScanInfo->iter.hasVal) {
- return doMergeBufAndFileRows_Rv(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader);
- }
-
- if (pBlockData->nRow > 0) {
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
-
- // no last block available, only data block exists
- if (pLastBlockReader->lastBlockData.nRow == 0 || (!hasDataInLastBlock(pLastBlockReader))) {
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
-
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
-
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- }
- }
- // row in last file block
- int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader);
- ASSERT(ts >= key);
+ initMemDataIterator(pScanInfo, pReader);
+ pLBlockReader->uid = pScanInfo->uid;
- if (ASCENDING_TRAVERSE(pReader->order)) {
- if (key < ts) {
- // imem & mem are all empty, only file exist
- if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
- return TSDB_CODE_SUCCESS;
- } else {
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
+ int32_t step = ASCENDING_TRAVERSE(pLBlockReader->order) ? 1 : -1;
+ STimeWindow w = pLBlockReader->window;
+ if (ASCENDING_TRAVERSE(pLBlockReader->order)) {
+ w.skey = pScanInfo->lastKey + step;
+ } else {
+ w.ekey = pScanInfo->lastKey + step;
+ }
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ int32_t code =
+ tMergeTreeOpen(&pLBlockReader->mergeTree, (pLBlockReader->order == TSDB_ORDER_DESC), pReader->pFileReader,
+ pReader->suid, pScanInfo->uid, &w, &pLBlockReader->verRange, pLBlockReader->pInfo, pReader->idStr);
+ if (code != TSDB_CODE_SUCCESS) {
+ return false;
+ }
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- }
- } else if (key == ts) {
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
+ return nextRowFromLastBlocks(pLBlockReader, pScanInfo);
+}
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
+static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) {
+ TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ return TSDBROW_TS(&row);
+}
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) { return pLastBlockReader->mergeTree.pIter != NULL; }
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- } else {
- ASSERT(0);
- return TSDB_CODE_SUCCESS;
- }
- } else { // desc order
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
+ STsdbReader* pReader) {
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
+ return TSDB_CODE_SUCCESS;
+ } else {
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
- tRowMergerInit(&merge, &fRow1, pReader->pSchema);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
+ STSRow* pTSRow = NULL;
+ SRowMerger merge = {0};
- if (ts == key) {
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- }
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
- }
- } else { // only last block exists
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
- int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ return TSDB_CODE_SUCCESS;
+ }
+}
- STSRow* pTSRow = NULL;
- SRowMerger merge = {0};
+static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo,
+ SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ int64_t key = (pBlockData->nRow > 0) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN;
+ if (pBlockScanInfo->iter.hasVal && pBlockScanInfo->iiter.hasVal) {
+ return doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
+ } else {
+ TSDBROW *pRow = NULL, *piRow = NULL;
+ if (pBlockScanInfo->iter.hasVal) {
+ pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
+ }
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
+ if (pBlockScanInfo->iiter.hasVal) {
+ piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
+ }
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ // imem + file + last block
+ if (pBlockScanInfo->iiter.hasVal) {
+ return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader);
+ }
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
+ // mem + file + last block
+ if (pBlockScanInfo->iter.hasVal) {
+ return doMergeBufAndFileRows(pReader, pBlockScanInfo, pRow, &pBlockScanInfo->iter, key, pLastBlockReader);
}
+
+ // files data blocks + last block
+ return mergeFileBlockAndLastBlock(pReader, pLastBlockReader, key, pBlockScanInfo, pBlockData);
}
}
static int32_t buildComposedDataBlock(STsdbReader* pReader) {
SSDataBlock* pResBlock = pReader->pResBlock;
- SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
+ SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
+
+ int64_t st = taosGetTimestampUs();
STableBlockScanInfo* pBlockScanInfo = NULL;
if (pBlockInfo != NULL) {
pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
- } else {
+ SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader);
+
+ // it is a clean block, load it directly
+ if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader)) {
+ copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
+ goto _end;
+ }
+ } else { // file blocks not exist
pBlockScanInfo = pReader->status.pTableIter;
}
- SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SBlockData* pBlockData = &pReader->status.fileBlockData;
int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
- int64_t st = taosGetTimestampUs();
while (1) {
// todo check the validate of row in file block
+ bool hasBlockData = false;
{
- bool hasBlockData = false;
-
while (pBlockData->nRow > 0) { // find the first qualified row in data block
if (isValidFileBlockRow(pBlockData, pDumpInfo, pBlockScanInfo, pReader)) {
hasBlockData = true;
@@ -2148,26 +2270,26 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
pDumpInfo->rowIndex += step;
- SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) {
setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
break;
}
}
+ }
- bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
+ bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
- // no data in last block and block, no need to proceed.
- if ((hasBlockData == false) && (hasBlockLData == false)) {
- break;
- }
+ // no data in last block and block, no need to proceed.
+ if ((hasBlockData == false) && (hasBlockLData == false)) {
+ break;
}
buildComposedDataBlockImpl(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
// currently loaded file data block is consumed
if ((pBlockData->nRow > 0) && (pDumpInfo->rowIndex >= pBlockData->nRow || pDumpInfo->rowIndex < 0)) {
- SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
break;
}
@@ -2177,85 +2299,28 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
}
}
+ _end:
pResBlock->info.uid = pBlockScanInfo->uid;
blockDataUpdateTsWindow(pResBlock, 0);
setComposedBlockFlag(pReader, true);
- int64_t et = taosGetTimestampUs();
-
- tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, elapsed time:%.2f ms %s",
- pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
- pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr);
-
- return TSDB_CODE_SUCCESS;
-}
-
-void setComposedBlockFlag(STsdbReader* pReader, bool composed) { pReader->status.composedDataBlock = composed; }
-
-static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
- if (pBlockScanInfo->iterInit) {
- return TSDB_CODE_SUCCESS;
- }
-
- int32_t code = TSDB_CODE_SUCCESS;
-
- TSDBKEY startKey = {0};
- if (ASCENDING_TRAVERSE(pReader->order)) {
- startKey = (TSDBKEY){.ts = pReader->window.skey, .version = pReader->verRange.minVer};
- } else {
- startKey = (TSDBKEY){.ts = pReader->window.ekey, .version = pReader->verRange.maxVer};
- }
-
- int32_t backward = (!ASCENDING_TRAVERSE(pReader->order));
-
- STbData* d = NULL;
- if (pReader->pReadSnap->pMem != NULL) {
- d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid);
- if (d != NULL) {
- code = tsdbTbDataIterCreate(d, &startKey, backward, &pBlockScanInfo->iter.iter);
- if (code == TSDB_CODE_SUCCESS) {
- pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL);
-
- tsdbDebug("%p uid:%" PRId64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
- "-%" PRId64 " %s",
- pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, d->minKey, d->maxKey, pReader->idStr);
- } else {
- tsdbError("%p uid:%" PRId64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid,
- tstrerror(code), pReader->idStr);
- return code;
- }
- }
- } else {
- tsdbDebug("%p uid:%" PRId64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
- }
+ double el = (taosGetTimestampUs() - st)/1000.0;
- STbData* di = NULL;
- if (pReader->pReadSnap->pIMem != NULL) {
- di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid);
- if (di != NULL) {
- code = tsdbTbDataIterCreate(di, &startKey, backward, &pBlockScanInfo->iiter.iter);
- if (code == TSDB_CODE_SUCCESS) {
- pBlockScanInfo->iiter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iiter.iter) != NULL);
+ pReader->cost.composedBlocks += 1;
+ pReader->cost.buildComposedBlockTime += el;
- tsdbDebug("%p uid:%" PRId64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
- "-%" PRId64 " %s",
- pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, di->minKey, di->maxKey, pReader->idStr);
- } else {
- tsdbError("%p uid:%" PRId64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid,
- tstrerror(code), pReader->idStr);
- return code;
- }
- }
- } else {
- tsdbDebug("%p uid:%" PRId64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
+ if (pResBlock->info.rows > 0) {
+ tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64
+ " rows:%d, elapsed time:%.2f ms %s",
+ pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
+ pResBlock->info.rows, el, pReader->idStr);
}
- initDelSkylineIterator(pBlockScanInfo, pReader, d, di);
-
- pBlockScanInfo->iterInit = true;
return TSDB_CODE_SUCCESS;
}
+void setComposedBlockFlag(STsdbReader* pReader, bool composed) { pReader->status.composedDataBlock = composed; }
+
int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
STbData* piMemTbData) {
if (pBlockScanInfo->delSkyline != NULL) {
@@ -2338,16 +2403,14 @@ _err:
return code;
}
-static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
- TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
-
- initMemDataIterator(pScanInfo, pReader);
- TSDBROW* pRow = getValidRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader);
+TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
+ TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
+ TSDBROW* pRow = getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader);
if (pRow != NULL) {
key = TSDBROW_KEY(pRow);
}
- pRow = getValidRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader);
+ pRow = getValidMemRow(&pScanInfo->iiter, pScanInfo->delSkyline, pReader);
if (pRow != NULL) {
TSDBKEY k = TSDBROW_KEY(pRow);
if (key.ts > k.ts) {
@@ -2361,12 +2424,10 @@ static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* p
static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
SReaderStatus* pStatus = &pReader->status;
pBlockNum->numOfBlocks = 0;
- pBlockNum->numOfLastBlocks = 0;
+ pBlockNum->numOfLastFiles = 0;
size_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx));
- SArray* pLastBlocks = pStatus->fileIter.pLastBlockReader->pBlockL;
- taosArrayClear(pLastBlocks);
while (1) {
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
@@ -2381,32 +2442,16 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
return code;
}
- code = tsdbReadBlockL(pReader->pFileReader, pLastBlocks);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(pIndexList);
- return code;
- }
-
- if (taosArrayGetSize(pIndexList) > 0 || taosArrayGetSize(pLastBlocks) > 0) {
- SArray* pQLastBlock = taosArrayInit(4, sizeof(SBlockL));
-
- code = doLoadFileBlock(pReader, pIndexList, pLastBlocks, pBlockNum, pQLastBlock);
+ if (taosArrayGetSize(pIndexList) > 0 || pReader->pFileReader->pSet->nSttF > 0) {
+ code = doLoadFileBlock(pReader, pIndexList, pBlockNum);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(pIndexList);
- taosArrayDestroy(pQLastBlock);
return code;
}
- if (pBlockNum->numOfBlocks + pBlockNum->numOfLastBlocks > 0) {
- ASSERT(taosArrayGetSize(pQLastBlock) == pBlockNum->numOfLastBlocks);
- taosArrayClear(pLastBlocks);
- taosArrayAddAll(pLastBlocks, pQLastBlock);
-
- taosArrayDestroy(pQLastBlock);
+ if (pBlockNum->numOfBlocks + pBlockNum->numOfLastFiles > 0) {
break;
}
-
- taosArrayDestroy(pQLastBlock);
}
// no blocks in current file, try next files
@@ -2416,101 +2461,101 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
return TSDB_CODE_SUCCESS;
}
-static int32_t doLoadRelatedLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo *pBlockScanInfo, STsdbReader* pReader) {
- SArray* pBlocks = pLastBlockReader->pBlockL;
- SBlockL* pBlock = NULL;
-
- uint64_t uid = pBlockScanInfo->uid;
- int32_t totalLastBlocks = (int32_t)taosArrayGetSize(pBlocks);
+static int32_t uidComparFunc(const void* p1, const void* p2) {
+ uint64_t pu1 = *(uint64_t*)p1;
+ uint64_t pu2 = *(uint64_t*)p2;
+ if (pu1 == pu2) {
+ return 0;
+ } else {
+ return (pu1 < pu2) ? -1 : 1;
+ }
+}
- initMemDataIterator(pBlockScanInfo, pReader);
+static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) {
+ int32_t index = 0;
+ int32_t total = taosHashGetSize(pStatus->pTableMap);
- // find the correct SBlockL. todo binary search
- int32_t index = -1;
- for (int32_t i = 0; i < totalLastBlocks; ++i) {
- SBlockL* p = taosArrayGet(pBlocks, i);
- if (p->minUid <= uid && p->maxUid >= uid) {
- index = i;
- pBlock = p;
- break;
- }
+ void* p = taosHashIterate(pStatus->pTableMap, NULL);
+ while (p != NULL) {
+ STableBlockScanInfo* pScanInfo = p;
+ pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid;
+ p = taosHashIterate(pStatus->pTableMap, p);
}
- if (index == -1) {
- pLastBlockReader->currentBlockIndex = index;
- tBlockDataReset(&pLastBlockReader->lastBlockData);
- return TSDB_CODE_SUCCESS;
- }
+ taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc);
+}
- // the required last datablock has already loaded
- if (index == pLastBlockReader->currentBlockIndex) {
+static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) {
+ int32_t total = taosHashGetSize(pStatus->pTableMap);
+ if (total == 0) {
return TSDB_CODE_SUCCESS;
}
- int64_t st = taosGetTimestampUs();
- int32_t code = tBlockDataInit(&pLastBlockReader->lastBlockData, pReader->suid, pReader->suid ? 0 : uid, pReader->pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- tsdbError("%p init block data failed, code:%s %s", pReader, tstrerror(code), pReader->idStr);
- return code;
- }
-
- code = tsdbReadLastBlock(pReader->pFileReader, pBlock, &pLastBlockReader->lastBlockData);
+ if (pOrderCheckInfo->tableUidList == NULL) {
+ pOrderCheckInfo->currentIndex = 0;
+ pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t));
+ if (pOrderCheckInfo->tableUidList == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
- double el = (taosGetTimestampUs() - st) / 1000.0;
- if (code != TSDB_CODE_SUCCESS) {
- tsdbError("%p error occurs in loading last block into buffer, last block index:%d, total:%d code:%s %s", pReader,
- pLastBlockReader->currentBlockIndex, totalLastBlocks, tstrerror(code), pReader->idStr);
+ extractOrderedTableUidList(pOrderCheckInfo, pStatus);
+ uint64_t uid = pOrderCheckInfo->tableUidList[0];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
} else {
- tsdbDebug("%p load last block completed, uid:%" PRIu64
- " last block index:%d, total:%d rows:%d, minVer:%d, maxVer:%d, brange:%" PRId64 "-%" PRId64
- " elapsed time:%.2f ms, %s",
- pReader, uid, index, totalLastBlocks, pBlock->nRow, pBlock->minVer, pBlock->maxVer, pBlock->minKey,
- pBlock->maxKey, el, pReader->idStr);
- }
+ if (pStatus->pTableIter == NULL) { // it is the last block of a new file
+ pOrderCheckInfo->currentIndex = 0;
+ uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+
+ // the tableMap has already updated
+ if (pStatus->pTableIter == NULL) {
+ void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t));
+ if (p == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
- pLastBlockReader->currentBlockIndex = index;
- pReader->cost.lastBlockLoad += 1;
- pReader->cost.lastBlockLoadTime += el;
+ pOrderCheckInfo->tableUidList = p;
+ extractOrderedTableUidList(pOrderCheckInfo, pStatus);
+
+ uid = pOrderCheckInfo->tableUidList[0];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+ }
+ }
+ }
return TSDB_CODE_SUCCESS;
}
+static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus* pStatus) {
+ pOrderedCheckInfo->currentIndex += 1;
+ if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) {
+ pStatus->pTableIter = NULL;
+ return false;
+ }
+
+ uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+ ASSERT(pStatus->pTableIter != NULL);
+ return true;
+}
+
static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
- SReaderStatus* pStatus = &pReader->status;
+ SReaderStatus* pStatus = &pReader->status;
SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader;
- while(1) {
- if (pStatus->pTableIter == NULL) {
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
- if (pStatus->pTableIter == NULL) {
- return TSDB_CODE_SUCCESS;
- }
- }
+ SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo;
+ int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pStatus);
+ if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) {
+ return code;
+ }
+ while (1) {
// load the last data block of current table
- // todo opt perf by avoiding load last block repeatly
STableBlockScanInfo* pScanInfo = pStatus->pTableIter;
- int32_t code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- if (pLastBlockReader->currentBlockIndex != -1) {
- initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL);
- int32_t index = pScanInfo->indexInBlockL;
- if (index == DEFAULT_ROW_INDEX_VAL || index == pLastBlockReader->lastBlockData.nRow) {
- bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo);
- if (!hasData) { // current table does not have rows in last block, try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
- return TSDB_CODE_SUCCESS;
- }
- continue;
- }
- }
- } else { // no data in last block, try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
+ bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
+ if (!hasVal) {
+ bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
+ if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
continue;
@@ -2526,17 +2571,16 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
}
// current table is exhausted, let's try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
+ bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
+ if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
}
}
static int32_t doBuildDataBlock(STsdbReader* pReader) {
- TSDBKEY key = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SBlock* pBlock = NULL;
+ int32_t code = TSDB_CODE_SUCCESS;
+ SDataBlk* pBlock = NULL;
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
@@ -2554,26 +2598,13 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
pBlock = getCurrentBlock(pBlockIter);
}
- {
- key = getCurrentKeyInBuf(pScanInfo, pReader);
-
- // load the last data block of current table
- code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- // note: the lastblock may be null here
- initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL);
- if (pScanInfo->indexInBlockL == DEFAULT_ROW_INDEX_VAL || pScanInfo->indexInBlockL == pLastBlockReader->lastBlockData.nRow) {
- bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo);
- }
- }
+ initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
+ TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader);
if (pBlockInfo == NULL) { // build data block from last data file
ASSERT(pBlockIter->numOfBlocks == 0);
code = buildComposedDataBlock(pReader);
- } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, key, pLastBlockReader)) {
+ } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) {
tBlockDataReset(&pStatus->fileBlockData);
code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pScanInfo->uid, pReader->pSchema);
if (code != TSDB_CODE_SUCCESS) {
@@ -2587,7 +2618,7 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
// build composed data block
code = buildComposedDataBlock(pReader);
- } else if (bufferDataInFileBlockGap(pReader->order, key, pBlock)) {
+ } else if (bufferDataInFileBlockGap(pReader->order, keyInBuf, pBlock)) {
// data in memory that are earlier than current file block
// todo rows in buffer should be less than the file block in asc, greater than file block in desc
int64_t endKey = (ASCENDING_TRAVERSE(pReader->order)) ? pBlock->minKey.ts : pBlock->maxKey.ts;
@@ -2596,11 +2627,11 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
if (hasDataInLastBlock(pLastBlockReader) && !ASCENDING_TRAVERSE(pReader->order)) {
// only return the rows in last block
int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
- ASSERT (tsLast >= pBlock->maxKey.ts);
+ ASSERT(tsLast >= pBlock->maxKey.ts);
tBlockDataReset(&pReader->status.fileBlockData);
code = buildComposedDataBlock(pReader);
- } else { // whole block is required, return it directly
+ } else { // whole block is required, return it directly
SDataBlockInfo* pInfo = &pReader->pResBlock->info;
pInfo->rows = pBlock->nRow;
pInfo->uid = pScanInfo->uid;
@@ -2647,7 +2678,7 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
// set the correct start position in case of the first/last file block, according to the query time window
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
- SBlock* pBlock = getCurrentBlock(pBlockIter);
+ SDataBlk* pBlock = getCurrentBlock(pBlockIter);
SReaderStatus* pStatus = &pReader->status;
@@ -2667,7 +2698,7 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
}
// all data files are consumed, try data in buffer
- if (num.numOfBlocks + num.numOfLastBlocks == 0) {
+ if (num.numOfBlocks + num.numOfLastFiles == 0) {
pReader->status.loadFromFile = false;
return code;
}
@@ -2675,14 +2706,11 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
// initialize the block iterator for a new fileset
if (num.numOfBlocks > 0) {
code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks);
- } else { // no block data, only last block exists
+ } else { // no block data, only last block exists
tBlockDataReset(&pReader->status.fileBlockData);
- resetDataBlockIterator(pBlockIter, pReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(pBlockIter, pReader->order);
}
- SLastBlockReader* pLReader = pReader->status.fileIter.pLastBlockReader;
- pLReader->currentBlockIndex = -1;
-
// set the correct start position according to the query time window
initBlockDumpInfo(pReader, pBlockIter);
return code;
@@ -2700,7 +2728,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
if (pBlockIter->numOfBlocks == 0) {
- _begin:
+ _begin:
code = doLoadLastBlockSequentially(pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -2747,21 +2775,24 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
bool hasNext = blockIteratorNext(&pReader->status.blockIter);
if (hasNext) { // check for the next block in the block accessed order list
initBlockDumpInfo(pReader, pBlockIter);
- } else if (taosArrayGetSize(pReader->status.fileIter.pLastBlockReader->pBlockL) > 0) { // data blocks in current file are exhausted, let's try the next file now
- tBlockDataReset(&pReader->status.fileBlockData);
- resetDataBlockIterator(pBlockIter, pReader->order, pReader->status.pTableMap);
- goto _begin;
} else {
- code = initForFirstBlockInFile(pReader, pBlockIter);
+ if (pReader->status.pCurrentFileset->nSttF > 0) {
+ // data blocks in current file are exhausted, let's try the next file now
+ tBlockDataReset(&pReader->status.fileBlockData);
+ resetDataBlockIterator(pBlockIter, pReader->order);
+ goto _begin;
+ } else {
+ code = initForFirstBlockInFile(pReader, pBlockIter);
- // error happens or all the data files are completely checked
- if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
- return code;
- }
+ // error happens or all the data files are completely checked
+ if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
+ return code;
+ }
- // this file does not have blocks, let's start check the last block file
- if (pBlockIter->numOfBlocks == 0) {
- goto _begin;
+ // this file does not have blocks, let's start check the last block file
+ if (pBlockIter->numOfBlocks == 0) {
+ goto _begin;
+ }
}
}
}
@@ -2784,6 +2815,7 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* ret
if (VND_IS_RSMA(pVnode)) {
int8_t level = 0;
int64_t now = taosGetTimestamp(pVnode->config.tsdbCfg.precision);
+ int64_t offset = TSDB_TICK_PER_SECOND(pVnode->config.tsdbCfg.precision);
for (int8_t i = 0; i < TSDB_RETENTION_MAX; ++i) {
SRetention* pRetention = retentions + level;
@@ -2793,7 +2825,7 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* ret
}
break;
}
- if ((now - pRetention->keep) <= winSKey) {
+ if ((now - pRetention->keep) <= (winSKey + offset)) {
break;
}
++level;
@@ -2933,7 +2965,7 @@ bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32
return false;
}
-TSDBROW* getValidRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader) {
+TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader) {
if (!pIter->hasVal) {
return NULL;
}
@@ -2981,7 +3013,7 @@ int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDe
}
// data exists but not valid
- TSDBROW* pRow = getValidRow(pIter, pDelList, pReader);
+ TSDBROW* pRow = getValidMemRow(pIter, pDelList, pReader);
if (pRow == NULL) {
break;
}
@@ -3020,7 +3052,7 @@ typedef enum {
CHECK_FILEBLOCK_QUIT = 0x2,
} CHECK_FILEBLOCK_STATE;
-static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanInfo* pScanInfo, SBlock* pBlock,
+static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanInfo* pScanInfo, SDataBlk* pBlock,
SFileDataBlockInfo* pFBlock, SRowMerger* pMerger, int64_t key,
CHECK_FILEBLOCK_STATE* state) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
@@ -3029,8 +3061,8 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn
*state = CHECK_FILEBLOCK_QUIT;
int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
- int32_t nextIndex = -1;
- SBlock* pNeighborBlock = getNeighborBlockOfSameTable(pFBlock, pScanInfo, &nextIndex, pReader->order);
+ int32_t nextIndex = -1;
+ SDataBlk* pNeighborBlock = getNeighborBlockOfSameTable(pFBlock, pScanInfo, &nextIndex, pReader->order);
if (pNeighborBlock == NULL) { // do nothing
return 0;
}
@@ -3094,7 +3126,7 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
CHECK_FILEBLOCK_STATE st;
SFileDataBlockInfo* pFileBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
- SBlock* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter);
+ SDataBlk* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter);
checkForNeighborFileBlock(pReader, pScanInfo, pCurrentBlock, pFileBlockInfo, pMerger, key, &st);
if (st == CHECK_FILEBLOCK_QUIT) {
break;
@@ -3105,12 +3137,13 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
return TSDB_CODE_SUCCESS;
}
-// todo check if the rows are dropped or not
-int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger) {
- while(nextRowInLastBlock(pLastBlockReader, pScanInfo)) {
+int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts,
+ SRowMerger* pMerger) {
+ pScanInfo->lastKey = ts;
+ while (nextRowFromLastBlocks(pLastBlockReader, pScanInfo)) {
int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
if (next1 == ts) {
- TSDBROW fRow1 = tsdbRowFromBlockData(&pLastBlockReader->lastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
tRowMerge(pMerger, &fRow1);
} else {
break;
@@ -3120,8 +3153,8 @@ int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockSc
return TSDB_CODE_SUCCESS;
}
-void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
- STsdbReader* pReader, bool* freeTSRow) {
+int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
+ STsdbReader* pReader, bool* freeTSRow) {
TSDBROW* pNextRow = NULL;
TSDBROW current = *pRow;
@@ -3131,19 +3164,19 @@ void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SAr
if (!pIter->hasVal) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
} else { // has next point in mem/imem
- pNextRow = getValidRow(pIter, pDelList, pReader);
+ pNextRow = getValidMemRow(pIter, pDelList, pReader);
if (pNextRow == NULL) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
}
if (current.pTSRow->ts != pNextRow->pTSRow->ts) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
}
}
}
@@ -3163,14 +3196,18 @@ void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SAr
tRowMergerAdd(&merge, pNextRow, pTSchema1);
doMergeRowsInBuf(pIter, uid, current.pTSRow->ts, pDelList, &merge, pReader);
- tRowMergerGetRow(&merge, pTSRow);
- tRowMergerClear(&merge);
+ int32_t code = tRowMergerGetRow(&merge, pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ tRowMergerClear(&merge);
*freeTSRow = true;
+ return TSDB_CODE_SUCCESS;
}
-void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
- STSRow** pTSRow) {
+int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
+ STSRow** pTSRow) {
SRowMerger merge = {0};
TSDBKEY k = TSDBROW_KEY(pRow);
@@ -3194,13 +3231,14 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
}
- tRowMergerGetRow(&merge, pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, pTSRow);
+ return code;
}
int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow, int64_t endKey,
bool* freeTSRow) {
- TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
- TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
+ TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
+ TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
SArray* pDelList = pBlockScanInfo->delSkyline;
uint64_t uid = pBlockScanInfo->uid;
@@ -3224,26 +3262,31 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
- if (ik.ts < k.ts) { // ik.ts < k.ts
- doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
- } else if (k.ts < ik.ts) {
- doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (ik.ts != k.ts) {
+ if (((ik.ts < k.ts) && asc) || ((ik.ts > k.ts) && (!asc))) { // ik.ts < k.ts
+ code = doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
+ } else if (((k.ts < ik.ts) && asc) || ((k.ts > ik.ts) && (!asc))) {
+ code = doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
+ }
} else { // ik.ts == k.ts
- doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
*freeTSRow = true;
+ code = doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
}
- return TSDB_CODE_SUCCESS;
+ return code;
}
if (pBlockScanInfo->iter.hasVal && pRow != NULL) {
- doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
- return TSDB_CODE_SUCCESS;
+ return doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader,
+ freeTSRow);
}
if (pBlockScanInfo->iiter.hasVal && piRow != NULL) {
- doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
- return TSDB_CODE_SUCCESS;
+ return doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
}
return TSDB_CODE_SUCCESS;
@@ -3293,7 +3336,8 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
return TSDB_CODE_SUCCESS;
}
-int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex) {
+int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
+ int32_t rowIndex) {
int32_t i = 0, j = 0;
int32_t outputRowIndex = pResBlock->info.rows;
@@ -3306,18 +3350,23 @@ int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, S
}
SColVal cv = {0};
- int32_t numOfInputCols = taosArrayGetSize(pBlockData->aIdx);
- int32_t numOfOutputCols = blockDataGetNumOfCols(pResBlock);
+ int32_t numOfInputCols = pBlockData->aIdx->size;
+ int32_t numOfOutputCols = pResBlock->pDataBlock->size;
while (i < numOfOutputCols && j < numOfInputCols) {
- SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, i);
+ SColumnInfoData* pCol = TARRAY_GET_ELEM(pResBlock->pDataBlock, i);
SColData* pData = tBlockDataGetColDataByIdx(pBlockData, j);
+ if (pData->cid < pCol->info.colId) {
+ j += 1;
+ continue;
+ }
+
if (pData->cid == pCol->info.colId) {
tColDataGetValue(pData, rowIndex, &cv);
doCopyColVal(pCol, outputRowIndex, i, &cv, pSupInfo);
j += 1;
- } else { // the specified column does not exist in file block, fill with null data
+ } else if (pData->cid > pCol->info.colId) { // the specified column does not exist in file block, fill with null data
colDataAppendNULL(pCol, outputRowIndex);
}
@@ -3370,7 +3419,7 @@ int32_t tsdbSetTableId(STsdbReader* pReader, int64_t uid) {
ASSERT(pReader != NULL);
taosHashClear(pReader->status.pTableMap);
- STableBlockScanInfo info = {.lastKey = 0, .uid = uid, .indexInBlockL = DEFAULT_ROW_INDEX_VAL};
+ STableBlockScanInfo info = {.lastKey = 0, .uid = uid};
taosHashPut(pReader->status.pTableMap, &info.uid, sizeof(uint64_t), &info, sizeof(info));
return TDB_CODE_SUCCESS;
}
@@ -3391,7 +3440,6 @@ void* tsdbGetIvtIdx(SMeta* pMeta) {
uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
-
// ====================================== EXPOSED APIs ======================================
int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTableList, STsdbReader** ppReader,
const char* idstr) {
@@ -3440,11 +3488,18 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
}
}
+ // NOTE: the endVersion in pCond is the data version not schema version, so pCond->endVersion is not correct here.
if (pCond->suid != 0) {
- pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, -1);
+ pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, /*pCond->endVersion*/ -1);
+ if (pReader->pSchema == NULL) {
+ tsdbError("failed to get table schema, suid:%"PRIu64", ver:%"PRId64" , %s", pReader->suid, -1, pReader->idStr);
+ }
} else if (taosArrayGetSize(pTableList) > 0) {
STableKeyInfo* pKey = taosArrayGet(pTableList, 0);
- pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pKey->uid, -1);
+ pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pKey->uid, /*pCond->endVersion*/ -1);
+ if (pReader->pSchema == NULL) {
+ tsdbError("failed to get table schema, uid:%"PRIu64", ver:%"PRId64" , %s", pKey->uid, -1, pReader->idStr);
+ }
}
int32_t numOfTables = taosArrayGetSize(pTableList);
@@ -3466,7 +3521,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
- resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
// no data in files, let's try buffer in memory
if (pReader->status.fileIter.numOfFiles == 0) {
@@ -3487,7 +3542,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
}
initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader);
- resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order);
// no data in files, let's try buffer in memory
if (pPrevReader->status.fileIter.numOfFiles == 0) {
@@ -3514,7 +3569,6 @@ void tsdbReaderClose(STsdbReader* pReader) {
}
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
- tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap);
taosMemoryFreeClear(pSupInfo->plist);
taosMemoryFree(pSupInfo->colIds);
@@ -3525,6 +3579,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
taosMemoryFreeClear(pSupInfo->buildBuf[i]);
}
}
+
taosMemoryFree(pSupInfo->buildBuf);
tBlockDataDestroy(&pReader->status.fileBlockData, true);
@@ -3538,23 +3593,30 @@ void tsdbReaderClose(STsdbReader* pReader) {
tsdbDataFReaderClose(&pReader->pFileReader);
}
+ tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap);
+
+ taosMemoryFree(pReader->status.uidCheckInfo.tableUidList);
+ SIOCostSummary* pCost = &pReader->cost;
+
SFilesetIter* pFilesetIter = &pReader->status.fileIter;
if (pFilesetIter->pLastBlockReader != NULL) {
- tBlockDataDestroy(&pFilesetIter->pLastBlockReader->lastBlockData, true);
- taosArrayDestroy(pFilesetIter->pLastBlockReader->pBlockL);
- taosMemoryFree(pFilesetIter->pLastBlockReader);
- }
+ SLastBlockReader* pLReader = pFilesetIter->pLastBlockReader;
+ tMergeTreeClose(&pLReader->mergeTree);
- SIOCostSummary* pCost = &pReader->cost;
+ getLastBlockLoadInfo(pLReader->pInfo, &pCost->lastBlockLoad, &pCost->lastBlockLoadTime);
+
+ pLReader->pInfo = destroyLastBlockLoadInfo(pLReader->pInfo);
+ taosMemoryFree(pLReader);
+ }
- tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
- " SMA-time:%.2f ms, fileBlocks:%" PRId64
- ", fileBlocks-time:%.2f ms, "
- "build in-memory-block-time:%.2f ms, lastBlocks:%" PRId64
- ", lastBlocks-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s",
- pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaDataLoad, pCost->smaLoadTime,
- pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock, pCost->lastBlockLoad,
- pCost->lastBlockLoadTime, numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr);
+ tsdbDebug(
+ "%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
+ " SMA-time:%.2f ms, fileBlocks:%" PRId64 ", fileBlocks-load-time:%.2f ms, "
+ "build in-memory-block-time:%.2f ms, lastBlocks:%" PRId64 ", lastBlocks-time:%.2f ms, composed-blocks:%" PRId64
+ ", composed-blocks-time:%.2fms, STableBlockScanInfo size:%.2f Kb %s",
+ pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaDataLoad, pCost->smaLoadTime, pCost->numOfBlocks,
+ pCost->blockLoadTime, pCost->buildmemBlock, pCost->lastBlockLoad, pCost->lastBlockLoadTime, pCost->composedBlocks,
+ pCost->buildComposedBlockTime, numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr);
taosMemoryFree(pReader->idStr);
taosMemoryFree(pReader->pSchema);
@@ -3665,12 +3727,12 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter);
- SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
- int64_t stime = taosGetTimestampUs();
+ SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ int64_t stime = taosGetTimestampUs();
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
- if (tBlockHasSma(pBlock)) {
+ if (tDataBlkHasSma(pBlock)) {
code = tsdbReadBlockSma(pReader->pFileReader, pBlock, pSup->pColAgg);
if (code != TSDB_CODE_SUCCESS) {
tsdbDebug("vgId:%d, failed to load block SMA for uid %" PRIu64 ", code:%s, %s", 0, pFBlock->uid, tstrerror(code),
@@ -3785,11 +3847,12 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
tsdbDataFReaderClose(&pReader->pFileReader);
int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
- tsdbDataFReaderClose(&pReader->pFileReader);
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
- resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
- resetDataBlockScanInfo(pReader->status.pTableMap);
+ resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
+
+ int64_t ts = ASCENDING_TRAVERSE(pReader->order) ? pReader->window.skey - 1 : pReader->window.ekey + 1;
+ resetDataBlockScanInfo(pReader->status.pTableMap, ts);
int32_t code = 0;
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
@@ -3847,7 +3910,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
while (true) {
if (hasNext) {
- SBlock* pBlock = getCurrentBlock(pBlockIter);
+ SDataBlk* pBlock = getCurrentBlock(pBlockIter);
int32_t numOfRows = pBlock->nRow;
pTableBlockInfo->totalRows += numOfRows;
@@ -4015,4 +4078,4 @@ void tsdbUntakeReadSnap(STsdb* pTsdb, STsdbReadSnap* pSnap) {
}
tsdbTrace("vgId:%d, untake read snapshot", TD_VID(pTsdb->pVnode));
-}
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
index c8f3862071b3628fdefd26df58ea3cb01e80d302..5fe0b408b1396c52fd1fbdf5ad2b6e37c0e5e7be 100644
--- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
+++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
@@ -15,743 +15,933 @@
#include "tsdb.h"
-// SDelFWriter ====================================================
-int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb) {
- int32_t code = 0;
- char fname[TSDB_FILENAME_LEN];
- char hdr[TSDB_FHDR_SIZE] = {0};
- SDelFWriter *pDelFWriter;
- int64_t n;
+// =============== PAGE-WISE FILE ===============
+static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) {
+ int32_t code = 0;
+ STsdbFD *pFD;
- // alloc
- pDelFWriter = (SDelFWriter *)taosMemoryCalloc(1, sizeof(*pDelFWriter));
- if (pDelFWriter == NULL) {
+ *ppFD = NULL;
+
+ pFD = (STsdbFD *)taosMemoryCalloc(1, sizeof(*pFD) + strlen(path) + 1);
+ if (pFD == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ goto _exit;
}
- pDelFWriter->pTsdb = pTsdb;
- pDelFWriter->fDel = *pFile;
- tsdbDelFileName(pTsdb, pFile, fname);
- pDelFWriter->pWriteH = taosOpenFile(fname, TD_FILE_WRITE | TD_FILE_CREATE);
- if (pDelFWriter->pWriteH == NULL) {
+ pFD->path = (char *)&pFD[1];
+ strcpy(pFD->path, path);
+ pFD->szPage = szPage;
+ pFD->flag = flag;
+ pFD->pFD = taosOpenFile(path, flag);
+ if (pFD->pFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ goto _exit;
}
-
- // update header
- n = taosWriteFile(pDelFWriter->pWriteH, &hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
+ pFD->szPage = szPage;
+ pFD->pgno = 0;
+ pFD->pBuf = taosMemoryCalloc(1, szPage);
+ if (pFD->pBuf == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pFD);
+ goto _exit;
+ }
+ if (taosStatFile(path, &pFD->szFile, NULL) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ goto _exit;
}
+ ASSERT(pFD->szFile % szPage == 0);
+ pFD->szFile = pFD->szFile / szPage;
+ *ppFD = pFD;
- pDelFWriter->fDel.size = TSDB_FHDR_SIZE;
- pDelFWriter->fDel.offset = 0;
-
- *ppWriter = pDelFWriter;
+_exit:
return code;
+}
-_err:
- tsdbError("vgId:%d, failed to open del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- *ppWriter = NULL;
+static void tsdbCloseFile(STsdbFD **ppFD) {
+ STsdbFD *pFD = *ppFD;
+ taosMemoryFree(pFD->pBuf);
+ taosCloseFile(&pFD->pFD);
+ taosMemoryFree(pFD);
+ *ppFD = NULL;
+}
+
+static int32_t tsdbWriteFilePage(STsdbFD *pFD) {
+ int32_t code = 0;
+
+ if (pFD->pgno > 0) {
+ int64_t n = taosLSeekFile(pFD->pFD, PAGE_OFFSET(pFD->pgno, pFD->szPage), SEEK_SET);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _exit;
+ }
+
+ taosCalcChecksumAppend(0, pFD->pBuf, pFD->szPage);
+
+ n = taosWriteFile(pFD->pFD, pFD->pBuf, pFD->szPage);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _exit;
+ }
+
+ if (pFD->szFile < pFD->pgno) {
+ pFD->szFile = pFD->pgno;
+ }
+ }
+ pFD->pgno = 0;
+
+_exit:
return code;
}
-int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) {
- int32_t code = 0;
- SDelFWriter *pWriter = *ppWriter;
- STsdb *pTsdb = pWriter->pTsdb;
+static int32_t tsdbReadFilePage(STsdbFD *pFD, int64_t pgno) {
+ int32_t code = 0;
- // sync
- if (sync && taosFsyncFile(pWriter->pWriteH) < 0) {
+ ASSERT(pgno <= pFD->szFile);
+
+ // seek
+ int64_t offset = PAGE_OFFSET(pgno, pFD->szPage);
+ int64_t n = taosLSeekFile(pFD->pFD, offset, SEEK_SET);
+ if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ goto _exit;
}
- // close
- if (taosCloseFile(&pWriter->pWriteH) < 0) {
+ // read
+ n = taosReadFile(pFD->pFD, pFD->pBuf, pFD->szPage);
+ if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ goto _exit;
+ } else if (n < pFD->szPage) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _exit;
}
- for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t *); iBuf++) {
- tFree(pWriter->aBuf[iBuf]);
+ // check
+ if (!taosCheckChecksumWhole(pFD->pBuf, pFD->szPage)) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _exit;
}
- taosMemoryFree(pWriter);
- *ppWriter = NULL;
- return code;
+ pFD->pgno = pgno;
-_err:
- tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+_exit:
return code;
}
-int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx) {
+static int32_t tsdbWriteFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t size) {
int32_t code = 0;
- int64_t size;
- int64_t n;
+ int64_t fOffset = LOGIC_TO_FILE_OFFSET(offset, pFD->szPage);
+ int64_t pgno = OFFSET_PGNO(fOffset, pFD->szPage);
+ int64_t bOffset = fOffset % pFD->szPage;
+ int64_t n = 0;
+
+ do {
+ if (pFD->pgno != pgno) {
+ code = tsdbWriteFilePage(pFD);
+ if (code) goto _exit;
+
+ if (pgno <= pFD->szFile) {
+ code = tsdbReadFilePage(pFD, pgno);
+ if (code) goto _exit;
+ } else {
+ pFD->pgno = pgno;
+ }
+ }
- // prepare
- size = sizeof(uint32_t);
- for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
- size += tPutDelData(NULL, taosArrayGet(aDelData, iDelData));
- }
- size += sizeof(TSCKSUM);
+ int64_t nWrite = TMIN(PAGE_CONTENT_SIZE(pFD->szPage) - bOffset, size - n);
+ memcpy(pFD->pBuf + bOffset, pBuf + n, nWrite);
- // alloc
- code = tRealloc(&pWriter->aBuf[0], size);
- if (code) goto _err;
+ pgno++;
+ bOffset = 0;
+ n += nWrite;
+ } while (n < size);
- // build
- n = 0;
- n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
- n += tPutDelData(pWriter->aBuf[0] + n, taosArrayGet(aDelData, iDelData));
- }
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
+_exit:
+ return code;
+}
- ASSERT(n + sizeof(TSCKSUM) == size);
+static int32_t tsdbReadFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t size) {
+ int32_t code = 0;
+ int64_t n = 0;
+ int64_t fOffset = LOGIC_TO_FILE_OFFSET(offset, pFD->szPage);
+ int64_t pgno = OFFSET_PGNO(fOffset, pFD->szPage);
+ int32_t szPgCont = PAGE_CONTENT_SIZE(pFD->szPage);
+ int64_t bOffset = fOffset % pFD->szPage;
+
+ ASSERT(pgno && pgno <= pFD->szFile);
+ ASSERT(bOffset < szPgCont);
+
+ while (n < size) {
+ if (pFD->pgno != pgno) {
+ code = tsdbReadFilePage(pFD, pgno);
+ if (code) goto _exit;
+ }
- // write
- n = taosWriteFile(pWriter->pWriteH, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ int64_t nRead = TMIN(szPgCont - bOffset, size - n);
+ memcpy(pBuf + n, pFD->pBuf + bOffset, nRead);
+
+ n += nRead;
+ pgno++;
+ bOffset = 0;
}
- ASSERT(n == size);
+_exit:
+ return code;
+}
- // update
- pDelIdx->offset = pWriter->fDel.size;
- pDelIdx->size = size;
- pWriter->fDel.size += size;
+static int32_t tsdbFsyncFile(STsdbFD *pFD) {
+ int32_t code = 0;
- return code;
+ code = tsdbWriteFilePage(pFD);
+ if (code) goto _exit;
-_err:
- tsdbError("vgId:%d, failed to write del data since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ if (taosFsyncFile(pFD->pFD) < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _exit;
+ }
+
+_exit:
return code;
}
-int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx) {
- int32_t code = 0;
- int64_t size;
- int64_t n;
- SDelIdx *pDelIdx;
+// SDataFWriter ====================================================
+int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet) {
+ int32_t code = 0;
+ int32_t flag;
+ int64_t n;
+ int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
+ SDataFWriter *pWriter = NULL;
+ char fname[TSDB_FILENAME_LEN];
+ char hdr[TSDB_FHDR_SIZE] = {0};
- // prepare
- size = sizeof(uint32_t);
- for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
- size += tPutDelIdx(NULL, taosArrayGet(aDelIdx, iDelIdx));
+ // alloc
+ pWriter = taosMemoryCalloc(1, sizeof(*pWriter));
+ if (pWriter == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pWriter->pTsdb = pTsdb;
+ pWriter->wSet = (SDFileSet){.diskId = pSet->diskId,
+ .fid = pSet->fid,
+ .pHeadF = &pWriter->fHead,
+ .pDataF = &pWriter->fData,
+ .pSmaF = &pWriter->fSma,
+ .nSttF = pSet->nSttF};
+ pWriter->fHead = *pSet->pHeadF;
+ pWriter->fData = *pSet->pDataF;
+ pWriter->fSma = *pSet->pSmaF;
+ for (int8_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ pWriter->wSet.aSttF[iStt] = &pWriter->fStt[iStt];
+ pWriter->fStt[iStt] = *pSet->aSttF[iStt];
}
- size += sizeof(TSCKSUM);
- // alloc
- code = tRealloc(&pWriter->aBuf[0], size);
+ // head
+ flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
+ tsdbHeadFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fHead, fname);
+ code = tsdbOpenFile(fname, szPage, flag, &pWriter->pHeadFD);
if (code) goto _err;
- // build
- n = 0;
- n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
- n += tPutDelIdx(pWriter->aBuf[0] + n, taosArrayGet(aDelIdx, iDelIdx));
+ code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
+ pWriter->fHead.size += TSDB_FHDR_SIZE;
+
+ // data
+ if (pWriter->fData.size == 0) {
+ flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
+ } else {
+ flag = TD_FILE_READ | TD_FILE_WRITE;
+ }
+ tsdbDataFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fData, fname);
+ code = tsdbOpenFile(fname, szPage, flag, &pWriter->pDataFD);
+ if (code) goto _err;
+ if (pWriter->fData.size == 0) {
+ code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
+ pWriter->fData.size += TSDB_FHDR_SIZE;
}
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
- ASSERT(n + sizeof(TSCKSUM) == size);
+ // sma
+ if (pWriter->fSma.size == 0) {
+ flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
+ } else {
+ flag = TD_FILE_READ | TD_FILE_WRITE;
+ }
+ tsdbSmaFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fSma, fname);
+ code = tsdbOpenFile(fname, szPage, flag, &pWriter->pSmaFD);
+ if (code) goto _err;
+ if (pWriter->fSma.size == 0) {
+ code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
- // write
- n = taosWriteFile(pWriter->pWriteH, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ pWriter->fSma.size += TSDB_FHDR_SIZE;
}
- // update
- pWriter->fDel.offset = pWriter->fDel.size;
- pWriter->fDel.size += size;
+ // stt
+ ASSERT(pWriter->fStt[pSet->nSttF - 1].size == 0);
+ flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
+ tsdbSttFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fStt[pSet->nSttF - 1], fname);
+ code = tsdbOpenFile(fname, szPage, flag, &pWriter->pSttFD);
+ if (code) goto _err;
+ code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
+ pWriter->fStt[pWriter->wSet.nSttF - 1].size += TSDB_FHDR_SIZE;
+ *ppWriter = pWriter;
return code;
_err:
- tsdbError("vgId:%d, write del idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, tsdb data file writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ *ppWriter = NULL;
return code;
}
-int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter) {
+int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) {
int32_t code = 0;
- char hdr[TSDB_FHDR_SIZE];
- int64_t size = TSDB_FHDR_SIZE;
- int64_t n;
+ STsdb *pTsdb = NULL;
- // build
- memset(hdr, 0, size);
- tPutDelFile(hdr, &pWriter->fDel);
- taosCalcChecksumAppend(0, hdr, size);
+ if (*ppWriter == NULL) goto _exit;
- // seek
- if (taosLSeekFile(pWriter->pWriteH, 0, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ pTsdb = (*ppWriter)->pTsdb;
+ if (sync) {
+ code = tsdbFsyncFile((*ppWriter)->pHeadFD);
+ if (code) goto _err;
- // write
- n = taosWriteFile(pWriter->pWriteH, hdr, size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ code = tsdbFsyncFile((*ppWriter)->pDataFD);
+ if (code) goto _err;
+
+ code = tsdbFsyncFile((*ppWriter)->pSmaFD);
+ if (code) goto _err;
+
+ code = tsdbFsyncFile((*ppWriter)->pSttFD);
+ if (code) goto _err;
}
+ tsdbCloseFile(&(*ppWriter)->pHeadFD);
+ tsdbCloseFile(&(*ppWriter)->pDataFD);
+ tsdbCloseFile(&(*ppWriter)->pSmaFD);
+ tsdbCloseFile(&(*ppWriter)->pSttFD);
+
+ for (int32_t iBuf = 0; iBuf < sizeof((*ppWriter)->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree((*ppWriter)->aBuf[iBuf]);
+ }
+ taosMemoryFree(*ppWriter);
+_exit:
+ *ppWriter = NULL;
return code;
_err:
- tsdbError("vgId:%d, update del file hdr failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, data file writer close failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-// SDelFReader ====================================================
-struct SDelFReader {
- STsdb *pTsdb;
- SDelFile fDel;
- TdFilePtr pReadH;
+int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter) {
+ int32_t code = 0;
+ int64_t n;
+ char hdr[TSDB_FHDR_SIZE];
- uint8_t *aBuf[1];
-};
+ // head ==============
+ memset(hdr, 0, TSDB_FHDR_SIZE);
+ tPutHeadFile(hdr, &pWriter->fHead);
+ code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
-int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb) {
- int32_t code = 0;
- char fname[TSDB_FILENAME_LEN];
- SDelFReader *pDelFReader;
- int64_t n;
+ // data ==============
+ memset(hdr, 0, TSDB_FHDR_SIZE);
+ tPutDataFile(hdr, &pWriter->fData);
+ code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
- // alloc
- pDelFReader = (SDelFReader *)taosMemoryCalloc(1, sizeof(*pDelFReader));
- if (pDelFReader == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- // open impl
- pDelFReader->pTsdb = pTsdb;
- pDelFReader->fDel = *pFile;
+ // sma ==============
+ memset(hdr, 0, TSDB_FHDR_SIZE);
+ tPutSmaFile(hdr, &pWriter->fSma);
+ code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
- tsdbDelFileName(pTsdb, pFile, fname);
- pDelFReader->pReadH = taosOpenFile(fname, TD_FILE_READ);
- if (pDelFReader->pReadH == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- taosMemoryFree(pDelFReader);
- goto _err;
- }
+ // stt ==============
+ memset(hdr, 0, TSDB_FHDR_SIZE);
+ tPutSttFile(hdr, &pWriter->fStt[pWriter->wSet.nSttF - 1]);
+ code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
-_exit:
- *ppReader = pDelFReader;
return code;
_err:
- tsdbError("vgId:%d, del file reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- *ppReader = NULL;
+ tsdbError("vgId:%d, update DFileSet header failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbDelFReaderClose(SDelFReader **ppReader) {
- int32_t code = 0;
- SDelFReader *pReader = *ppReader;
+int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) {
+ int32_t code = 0;
+ SHeadFile *pHeadFile = &pWriter->fHead;
+ int64_t size;
+ int64_t n;
- if (pReader) {
- if (taosCloseFile(&pReader->pReadH) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _exit;
- }
- for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(uint8_t *); iBuf++) {
- tFree(pReader->aBuf[iBuf]);
- }
- taosMemoryFree(pReader);
+ // check
+ if (taosArrayGetSize(aBlockIdx) == 0) {
+ pHeadFile->offset = pHeadFile->size;
+ goto _exit;
}
- *ppReader = NULL;
-
-_exit:
- return code;
-}
-
-int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData) {
- int32_t code = 0;
- int64_t offset = pDelIdx->offset;
- int64_t size = pDelIdx->size;
- int64_t n;
-
- taosArrayClear(aDelData);
- // seek
- if (taosLSeekFile(pReader->pReadH, offset, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // prepare
+ size = 0;
+ for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
+ size += tPutBlockIdx(NULL, taosArrayGet(aBlockIdx, iBlockIdx));
}
// alloc
- code = tRealloc(&pReader->aBuf[0], size);
+ code = tRealloc(&pWriter->aBuf[0], size);
if (code) goto _err;
- // read
- n = taosReadFile(pReader->pReadH, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // // decode
+ // build
n = 0;
-
- uint32_t delimiter;
- n += tGetU32(pReader->aBuf[0] + n, &delimiter);
- while (n < size - sizeof(TSCKSUM)) {
- SDelData delData;
- n += tGetDelData(pReader->aBuf[0] + n, &delData);
-
- if (taosArrayPush(aDelData, &delData) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
+ n += tPutBlockIdx(pWriter->aBuf[0] + n, taosArrayGet(aBlockIdx, iBlockIdx));
}
+ ASSERT(n == size);
+
+ // write
+ code = tsdbWriteFile(pWriter->pHeadFD, pHeadFile->size, pWriter->aBuf[0], size);
+ if (code) goto _err;
- ASSERT(n == size - sizeof(TSCKSUM));
+ // update
+ pHeadFile->offset = pHeadFile->size;
+ pHeadFile->size += size;
+_exit:
+ // tsdbTrace("vgId:%d write block idx, offset:%" PRId64 " size:%" PRId64 " nBlockIdx:%d",
+ // TD_VID(pWriter->pTsdb->pVnode),
+ // pHeadFile->offset, size, taosArrayGetSize(aBlockIdx));
return code;
_err:
- tsdbError("vgId:%d, read del data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, write block idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx) {
- int32_t code = 0;
- int32_t n;
- int64_t offset = pReader->fDel.offset;
- int64_t size = pReader->fDel.size - offset;
-
- taosArrayClear(aDelIdx);
+int32_t tsdbWriteDataBlk(SDataFWriter *pWriter, SMapData *mDataBlk, SBlockIdx *pBlockIdx) {
+ int32_t code = 0;
+ SHeadFile *pHeadFile = &pWriter->fHead;
+ int64_t size;
+ int64_t n;
- // seek
- if (taosLSeekFile(pReader->pReadH, offset, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ ASSERT(mDataBlk->nItem > 0);
// alloc
- code = tRealloc(&pReader->aBuf[0], size);
+ size = tPutMapData(NULL, mDataBlk);
+ code = tRealloc(&pWriter->aBuf[0], size);
if (code) goto _err;
- // read
- n = taosReadFile(pReader->pReadH, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // decode
- n = 0;
- uint32_t delimiter;
- n += tGetU32(pReader->aBuf[0] + n, &delimiter);
- ASSERT(delimiter == TSDB_FILE_DLMT);
-
- while (n < size - sizeof(TSCKSUM)) {
- SDelIdx delIdx;
-
- n += tGetDelIdx(pReader->aBuf[0] + n, &delIdx);
+ // build
+ n = tPutMapData(pWriter->aBuf[0], mDataBlk);
- if (taosArrayPush(aDelIdx, &delIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
+ // write
+ code = tsdbWriteFile(pWriter->pHeadFD, pHeadFile->size, pWriter->aBuf[0], size);
+ if (code) goto _err;
- ASSERT(n == size - sizeof(TSCKSUM));
+ // update
+ pBlockIdx->offset = pHeadFile->size;
+ pBlockIdx->size = size;
+ pHeadFile->size += size;
+ tsdbTrace("vgId:%d, write block, file ID:%d commit ID:%d suid:%" PRId64 " uid:%" PRId64 " offset:%" PRId64
+ " size:%" PRId64 " nItem:%d",
+ TD_VID(pWriter->pTsdb->pVnode), pWriter->wSet.fid, pHeadFile->commitID, pBlockIdx->suid, pBlockIdx->uid,
+ pBlockIdx->offset, pBlockIdx->size, mDataBlk->nItem);
return code;
_err:
- tsdbError("vgId:%d, read del idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, write block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-// SDataFReader ====================================================
-struct SDataFReader {
- STsdb *pTsdb;
- SDFileSet *pSet;
- TdFilePtr pHeadFD;
- TdFilePtr pDataFD;
- TdFilePtr pLastFD;
- TdFilePtr pSmaFD;
-
- uint8_t *aBuf[3];
-};
-
-int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet) {
- int32_t code = 0;
- SDataFReader *pReader;
- char fname[TSDB_FILENAME_LEN];
+int32_t tsdbWriteSttBlk(SDataFWriter *pWriter, SArray *aSttBlk) {
+ int32_t code = 0;
+ SSttFile *pSttFile = &pWriter->fStt[pWriter->wSet.nSttF - 1];
+ int64_t size;
+ int64_t n;
- // alloc
- pReader = (SDataFReader *)taosMemoryCalloc(1, sizeof(*pReader));
- if (pReader == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ // check
+ if (taosArrayGetSize(aSttBlk) == 0) {
+ pSttFile->offset = pSttFile->size;
+ goto _exit;
}
- pReader->pTsdb = pTsdb;
- pReader->pSet = pSet;
- // open impl
- // head
- tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname);
- pReader->pHeadFD = taosOpenFile(fname, TD_FILE_READ);
- if (pReader->pHeadFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // size
+ size = 0;
+ for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aSttBlk); iBlockL++) {
+ size += tPutSttBlk(NULL, taosArrayGet(aSttBlk, iBlockL));
}
- // data
- tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname);
- pReader->pDataFD = taosOpenFile(fname, TD_FILE_READ);
- if (pReader->pDataFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // alloc
+ code = tRealloc(&pWriter->aBuf[0], size);
+ if (code) goto _err;
- // last
- tsdbLastFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pLastF, fname);
- pReader->pLastFD = taosOpenFile(fname, TD_FILE_READ);
- if (pReader->pLastFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // encode
+ n = 0;
+ for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aSttBlk); iBlockL++) {
+ n += tPutSttBlk(pWriter->aBuf[0] + n, taosArrayGet(aSttBlk, iBlockL));
}
- // sma
- tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname);
- pReader->pSmaFD = taosOpenFile(fname, TD_FILE_READ);
- if (pReader->pSmaFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // write
+ code = tsdbWriteFile(pWriter->pSttFD, pSttFile->size, pWriter->aBuf[0], size);
+ if (code) goto _err;
- *ppReader = pReader;
+ // update
+ pSttFile->offset = pSttFile->size;
+ pSttFile->size += size;
+
+_exit:
+ tsdbTrace("vgId:%d tsdb write stt block, loffset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode),
+ pSttFile->offset, size);
return code;
_err:
- tsdbError("vgId:%d, tsdb data file reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- *ppReader = NULL;
+ tsdbError("vgId:%d tsdb write blockl failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbDataFReaderClose(SDataFReader **ppReader) {
+static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData, SSmaInfo *pSmaInfo) {
int32_t code = 0;
- if (*ppReader == NULL) goto _exit;
- if (taosCloseFile(&(*ppReader)->pHeadFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ pSmaInfo->offset = 0;
+ pSmaInfo->size = 0;
- if (taosCloseFile(&(*ppReader)->pDataFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // encode
+ for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
+ SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
- if (taosCloseFile(&(*ppReader)->pLastFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type)) continue;
- if (taosCloseFile(&(*ppReader)->pSmaFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ SColumnDataAgg sma;
+ tsdbCalcColDataSMA(pColData, &sma);
- for (int32_t iBuf = 0; iBuf < sizeof((*ppReader)->aBuf) / sizeof(uint8_t *); iBuf++) {
- tFree((*ppReader)->aBuf[iBuf]);
+ code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &sma));
+ if (code) goto _err;
+ pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &sma);
}
- taosMemoryFree(*ppReader);
+ // write
+ if (pSmaInfo->size) {
+ code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size);
+ if (code) goto _err;
+
+ code = tsdbWriteFile(pWriter->pSmaFD, pWriter->fSma.size, pWriter->aBuf[0], pSmaInfo->size);
+ if (code) goto _err;
+
+ pSmaInfo->offset = pWriter->fSma.size;
+ pWriter->fSma.size += pSmaInfo->size;
+ }
-_exit:
- *ppReader = NULL;
return code;
_err:
- tsdbError("vgId:%d, data file reader close failed since %s", TD_VID((*ppReader)->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb write block sma failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx) {
- int32_t code = 0;
- int64_t offset = pReader->pSet->pHeadF->offset;
- int64_t size = pReader->pSet->pHeadF->size - offset;
- int64_t n;
- uint32_t delimiter;
+int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
+ int8_t cmprAlg, int8_t toLast) {
+ int32_t code = 0;
- taosArrayClear(aBlockIdx);
- if (size == 0) {
- goto _exit;
- }
+ ASSERT(pBlockData->nRow > 0);
- // alloc
- code = tRealloc(&pReader->aBuf[0], size);
+ if (toLast) {
+ pBlkInfo->offset = pWriter->fStt[pWriter->wSet.nSttF - 1].size;
+ } else {
+ pBlkInfo->offset = pWriter->fData.size;
+ }
+ pBlkInfo->szBlock = 0;
+ pBlkInfo->szKey = 0;
+
+ int32_t aBufN[4] = {0};
+ code = tCmprBlockData(pBlockData, cmprAlg, NULL, NULL, pWriter->aBuf, aBufN);
if (code) goto _err;
- // seek
- if (taosLSeekFile(pReader->pHeadFD, offset, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // write =================
+ STsdbFD *pFD = toLast ? pWriter->pSttFD : pWriter->pDataFD;
- // read
- n = taosReadFile(pReader->pHeadFD, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ pBlkInfo->szKey = aBufN[3] + aBufN[2];
+ pBlkInfo->szBlock = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ int64_t offset = pBlkInfo->offset;
+ code = tsdbWriteFile(pFD, offset, pWriter->aBuf[3], aBufN[3]);
+ if (code) goto _err;
+ offset += aBufN[3];
- // decode
- n = 0;
- n = tGetU32(pReader->aBuf[0] + n, &delimiter);
- ASSERT(delimiter == TSDB_FILE_DLMT);
+ code = tsdbWriteFile(pFD, offset, pWriter->aBuf[2], aBufN[2]);
+ if (code) goto _err;
+ offset += aBufN[2];
- while (n < size - sizeof(TSCKSUM)) {
- SBlockIdx blockIdx;
- n += tGetBlockIdx(pReader->aBuf[0] + n, &blockIdx);
+ if (aBufN[1]) {
+ code = tsdbWriteFile(pFD, offset, pWriter->aBuf[1], aBufN[1]);
+ if (code) goto _err;
+ offset += aBufN[1];
+ }
- if (taosArrayPush(aBlockIdx, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ if (aBufN[0]) {
+ code = tsdbWriteFile(pFD, offset, pWriter->aBuf[0], aBufN[0]);
+ if (code) goto _err;
+ }
+
+ // update info
+ if (toLast) {
+ pWriter->fStt[pWriter->wSet.nSttF - 1].size += pBlkInfo->szBlock;
+ } else {
+ pWriter->fData.size += pBlkInfo->szBlock;
}
- ASSERT(n + sizeof(TSCKSUM) == size);
+ // ================= SMA ====================
+ if (pSmaInfo) {
+ code = tsdbWriteBlockSma(pWriter, pBlockData, pSmaInfo);
+ if (code) goto _err;
+ }
_exit:
+ tsdbTrace("vgId:%d tsdb write block data, suid:%" PRId64 " uid:%" PRId64 " nRow:%d, offset:%" PRId64 " size:%d",
+ TD_VID(pWriter->pTsdb->pVnode), pBlockData->suid, pBlockData->uid, pBlockData->nRow, pBlkInfo->offset,
+ pBlkInfo->szBlock);
return code;
_err:
- tsdbError("vgId:%d, read block idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL) {
- int32_t code = 0;
- int64_t offset = pReader->pSet->pLastF->offset;
- int64_t size = pReader->pSet->pLastF->size - offset;
- int64_t n;
- uint32_t delimiter;
+int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) {
+ int32_t code = 0;
+ int64_t n;
+ int64_t size;
+ TdFilePtr pOutFD = NULL;
+ TdFilePtr PInFD = NULL;
+ int32_t szPage = pTsdb->pVnode->config.szPage;
+ char fNameFrom[TSDB_FILENAME_LEN];
+ char fNameTo[TSDB_FILENAME_LEN];
- taosArrayClear(aBlockL);
- if (size == 0) {
- goto _exit;
+ // head
+ tsdbHeadFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pHeadF, fNameFrom);
+ tsdbHeadFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pHeadF, fNameTo);
+ pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
+ if (pOutFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
}
-
- // alloc
- code = tRealloc(&pReader->aBuf[0], size);
- if (code) goto _err;
-
- // seek
- if (taosLSeekFile(pReader->pLastFD, offset, SEEK_SET) < 0) {
+ PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
+ if (PInFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
-
- // read
- n = taosReadFile(pReader->pLastFD, pReader->aBuf[0], size);
+ n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pHeadF->size, szPage));
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
}
+ taosCloseFile(&pOutFD);
+ taosCloseFile(&PInFD);
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
+ // data
+ tsdbDataFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pDataF, fNameFrom);
+ tsdbDataFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pDataF, fNameTo);
+ pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
+ if (pOutFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
+ PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
+ if (PInFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ n = taosFSendFile(pOutFD, PInFD, 0, LOGIC_TO_FILE_OFFSET(pSetFrom->pDataF->size, szPage));
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ taosCloseFile(&pOutFD);
+ taosCloseFile(&PInFD);
- // decode
- n = 0;
- n = tGetU32(pReader->aBuf[0] + n, &delimiter);
- ASSERT(delimiter == TSDB_FILE_DLMT);
-
- while (n < size - sizeof(TSCKSUM)) {
- SBlockL blockl;
- n += tGetBlockL(pReader->aBuf[0] + n, &blockl);
+ // sma
+ tsdbSmaFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pSmaF, fNameFrom);
+ tsdbSmaFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pSmaF, fNameTo);
+ pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
+ if (pOutFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
+ if (PInFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pSmaF->size, szPage));
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ taosCloseFile(&pOutFD);
+ taosCloseFile(&PInFD);
- if (taosArrayPush(aBlockL, &blockl) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
+ // stt
+ for (int8_t iStt = 0; iStt < pSetFrom->nSttF; iStt++) {
+ tsdbSttFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->aSttF[iStt], fNameFrom);
+ tsdbSttFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->aSttF[iStt], fNameTo);
+ pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
+ if (pOutFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
+ if (PInFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->aSttF[iStt]->size, szPage));
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
+ taosCloseFile(&pOutFD);
+ taosCloseFile(&PInFD);
}
- ASSERT(n + sizeof(TSCKSUM) == size);
-
-_exit:
return code;
_err:
- tsdbError("vgId:%d read blockl failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBlock) {
- int32_t code = 0;
- int64_t offset = pBlockIdx->offset;
- int64_t size = pBlockIdx->size;
- int64_t n;
- int64_t tn;
+// SDataFReader ====================================================
+int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet) {
+ int32_t code = 0;
+ SDataFReader *pReader;
+ int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
+ char fname[TSDB_FILENAME_LEN];
// alloc
- code = tRealloc(&pReader->aBuf[0], size);
- if (code) goto _err;
-
- // seek
- if (taosLSeekFile(pReader->pHeadFD, offset, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
+ pReader = (SDataFReader *)taosMemoryCalloc(1, sizeof(*pReader));
+ if (pReader == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
+ pReader->pTsdb = pTsdb;
+ pReader->pSet = pSet;
- // read
- n = taosReadFile(pReader->pHeadFD, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ // head
+ tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname);
+ code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pHeadFD);
+ if (code) goto _err;
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
+ // data
+ tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname);
+ code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pDataFD);
+ if (code) goto _err;
+
+ // sma
+ tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname);
+ code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pSmaFD);
+ if (code) goto _err;
+
+ // stt
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname);
+ code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->aSttFD[iStt]);
+ if (code) goto _err;
}
- // decode
- n = 0;
+ *ppReader = pReader;
+ return code;
- uint32_t delimiter;
- n += tGetU32(pReader->aBuf[0] + n, &delimiter);
- ASSERT(delimiter == TSDB_FILE_DLMT);
+_err:
+ tsdbError("vgId:%d, tsdb data file reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ *ppReader = NULL;
+ return code;
+}
- tn = tGetMapData(pReader->aBuf[0] + n, mBlock);
- if (tn < 0) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+int32_t tsdbDataFReaderClose(SDataFReader **ppReader) {
+ int32_t code = 0;
+ if (*ppReader == NULL) return code;
+
+ // head
+ tsdbCloseFile(&(*ppReader)->pHeadFD);
+
+ // data
+ tsdbCloseFile(&(*ppReader)->pDataFD);
+
+ // sma
+ tsdbCloseFile(&(*ppReader)->pSmaFD);
+
+ // stt
+ for (int32_t iStt = 0; iStt < TSDB_MAX_STT_TRIGGER; iStt++) {
+ if ((*ppReader)->aSttFD[iStt]) {
+ tsdbCloseFile(&(*ppReader)->aSttFD[iStt]);
+ }
}
- n += tn;
- ASSERT(n + sizeof(TSCKSUM) == size);
+ for (int32_t iBuf = 0; iBuf < sizeof((*ppReader)->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree((*ppReader)->aBuf[iBuf]);
+ }
+ taosMemoryFree(*ppReader);
+ *ppReader = NULL;
return code;
_err:
- tsdbError("vgId:%d, read block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, data file reader close failed since %s", TD_VID((*ppReader)->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg) {
- int32_t code = 0;
- SSmaInfo *pSmaInfo = &pBlock->smaInfo;
-
- ASSERT(pSmaInfo->size > 0);
+int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx) {
+ int32_t code = 0;
+ SHeadFile *pHeadFile = pReader->pSet->pHeadF;
+ int64_t offset = pHeadFile->offset;
+ int64_t size = pHeadFile->size - offset;
- taosArrayClear(aColumnDataAgg);
+ taosArrayClear(aBlockIdx);
+ if (size == 0) return code;
// alloc
- int32_t size = pSmaInfo->size + sizeof(TSCKSUM);
code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
- // seek
- int64_t n = taosLSeekFile(pReader->pSmaFD, pSmaInfo->offset, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < pSmaInfo->offset) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
// read
- n = taosReadFile(pReader->pSmaFD, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ code = tsdbReadFile(pReader->pHeadFD, offset, pReader->aBuf[0], size);
+ if (code) goto _err;
// decode
- n = 0;
- while (n < pSmaInfo->size) {
- SColumnDataAgg sma;
+ int64_t n = 0;
+ while (n < size) {
+ SBlockIdx blockIdx;
+ n += tGetBlockIdx(pReader->aBuf[0] + n, &blockIdx);
- n += tGetColumnDataAgg(pReader->aBuf[0] + n, &sma);
- if (taosArrayPush(aColumnDataAgg, &sma) == NULL) {
+ if (taosArrayPush(aBlockIdx, &blockIdx) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
}
+ ASSERT(n == size);
return code;
_err:
- tsdbError("vgId:%d tsdb read block sma failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, read block idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo, int8_t fromLast,
- SBlockData *pBlockData) {
- int32_t code = 0;
-
- tBlockDataClear(pBlockData);
+int32_t tsdbReadSttBlk(SDataFReader *pReader, int32_t iStt, SArray *aSttBlk) {
+ int32_t code = 0;
+ SSttFile *pSttFile = pReader->pSet->aSttF[iStt];
+ int64_t offset = pSttFile->offset;
+ int64_t size = pSttFile->size - offset;
- TdFilePtr pFD = fromLast ? pReader->pLastFD : pReader->pDataFD;
+ taosArrayClear(aSttBlk);
+ if (size == 0) return code;
- // uid + version + tskey
- code = tsdbReadAndCheck(pFD, pBlkInfo->offset, &pReader->aBuf[0], pBlkInfo->szKey, 1);
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
- SDiskDataHdr hdr;
- uint8_t *p = pReader->aBuf[0] + tGetDiskDataHdr(pReader->aBuf[0], &hdr);
+
+ // read
+ code = tsdbReadFile(pReader->aSttFD[iStt], offset, pReader->aBuf[0], size);
+ if (code) goto _err;
+
+ // decode
+ int64_t n = 0;
+ while (n < size) {
+ SSttBlk sttBlk;
+ n += tGetSttBlk(pReader->aBuf[0] + n, &sttBlk);
+
+ if (taosArrayPush(aSttBlk, &sttBlk) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+ ASSERT(n == size);
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d read stt blk failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadDataBlk(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mDataBlk) {
+ int32_t code = 0;
+ int64_t offset = pBlockIdx->offset;
+ int64_t size = pBlockIdx->size;
+
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
+ if (code) goto _err;
+
+ // read
+ code = tsdbReadFile(pReader->pHeadFD, offset, pReader->aBuf[0], size);
+ if (code) goto _err;
+
+ // decode
+ int64_t n = tGetMapData(pReader->aBuf[0], mDataBlk);
+ if (n < 0) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ ASSERT(n == size);
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d, read block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadBlockSma(SDataFReader *pReader, SDataBlk *pDataBlk, SArray *aColumnDataAgg) {
+ int32_t code = 0;
+ SSmaInfo *pSmaInfo = &pDataBlk->smaInfo;
+
+ ASSERT(pSmaInfo->size > 0);
+
+ taosArrayClear(aColumnDataAgg);
+
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], pSmaInfo->size);
+ if (code) goto _err;
+
+ // read
+ code = tsdbReadFile(pReader->pSmaFD, pSmaInfo->offset, pReader->aBuf[0], pSmaInfo->size);
+ if (code) goto _err;
+
+ // decode
+ int32_t n = 0;
+ while (n < pSmaInfo->size) {
+ SColumnDataAgg sma;
+ n += tGetColumnDataAgg(pReader->aBuf[0] + n, &sma);
+
+ if (taosArrayPush(aColumnDataAgg, &sma) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+ ASSERT(n == pSmaInfo->size);
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb read block sma failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo, SBlockData *pBlockData) {
+ int32_t code = 0;
+
+ tBlockDataClear(pBlockData);
+
+ STsdbFD *pFD = pReader->pDataFD;
+
+ // uid + version + tskey
+ code = tRealloc(&pReader->aBuf[0], pBlkInfo->szKey);
+ if (code) goto _err;
+
+ code = tsdbReadFile(pFD, pBlkInfo->offset, pReader->aBuf[0], pBlkInfo->szKey);
+ if (code) goto _err;
+
+ SDiskDataHdr hdr;
+ uint8_t *p = pReader->aBuf[0] + tGetDiskDataHdr(pReader->aBuf[0], &hdr);
ASSERT(hdr.delimiter == TSDB_FILE_DLMT);
ASSERT(pBlockData->suid == hdr.suid);
@@ -782,14 +972,18 @@ static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo
if (code) goto _err;
p += hdr.szKey;
- ASSERT(p - pReader->aBuf[0] == pBlkInfo->szKey - sizeof(TSCKSUM));
+ ASSERT(p - pReader->aBuf[0] == pBlkInfo->szKey);
// read and decode columns
if (taosArrayGetSize(pBlockData->aIdx) == 0) goto _exit;
if (hdr.szBlkCol > 0) {
int64_t offset = pBlkInfo->offset + pBlkInfo->szKey;
- code = tsdbReadAndCheck(pFD, offset, &pReader->aBuf[0], hdr.szBlkCol + sizeof(TSCKSUM), 1);
+
+ code = tRealloc(&pReader->aBuf[0], hdr.szBlkCol);
+ if (code) goto _err;
+
+ code = tsdbReadFile(pFD, offset, pReader->aBuf[0], hdr.szBlkCol);
if (code) goto _err;
}
@@ -827,10 +1021,13 @@ static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo
}
} else {
// decode from binary
- int64_t offset = pBlkInfo->offset + pBlkInfo->szKey + hdr.szBlkCol + sizeof(TSCKSUM) + pBlockCol->offset;
- int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM);
+ int64_t offset = pBlkInfo->offset + pBlkInfo->szKey + hdr.szBlkCol + pBlockCol->offset;
+ int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue;
+
+ code = tRealloc(&pReader->aBuf[1], size);
+ if (code) goto _err;
- code = tsdbReadAndCheck(pFD, offset, &pReader->aBuf[1], size, 0);
+ code = tsdbReadFile(pFD, offset, pReader->aBuf[1], size);
if (code) goto _err;
code = tsdbDecmprColData(pReader->aBuf[1], pBlockCol, hdr.cmprAlg, hdr.nRow, pColData, &pReader->aBuf[2]);
@@ -847,13 +1044,36 @@ _err:
return code;
}
-int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData) {
+int32_t tsdbReadDataBlockEx(SDataFReader *pReader, SDataBlk *pDataBlk, SBlockData *pBlockData) {
+ int32_t code = 0;
+ SBlockInfo *pBlockInfo = &pDataBlk->aSubBlock[0];
+
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], pBlockInfo->szBlock);
+ if (code) goto _err;
+
+ // read
+ code = tsdbReadFile(pReader->pDataFD, pBlockInfo->offset, pReader->aBuf[0], pBlockInfo->szBlock);
+ if (code) goto _err;
+
+ // decmpr
+ code = tDecmprBlockData(pReader->aBuf[0], pBlockInfo->szBlock, pBlockData, &pReader->aBuf[1]);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb read data block ex failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pDataBlk, SBlockData *pBlockData) {
int32_t code = 0;
- code = tsdbReadBlockDataImpl(pReader, &pBlock->aSubBlock[0], 0, pBlockData);
+ code = tsdbReadBlockDataImpl(pReader, &pDataBlk->aSubBlock[0], pBlockData);
if (code) goto _err;
- if (pBlock->nSubBlock > 1) {
+ if (pDataBlk->nSubBlock > 1) {
SBlockData bData1;
SBlockData bData2;
@@ -867,8 +1087,8 @@ int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBl
tBlockDataInitEx(&bData1, pBlockData);
tBlockDataInitEx(&bData2, pBlockData);
- for (int32_t iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- code = tsdbReadBlockDataImpl(pReader, &pBlock->aSubBlock[iSubBlock], 0, &bData1);
+ for (int32_t iSubBlock = 1; iSubBlock < pDataBlk->nSubBlock; iSubBlock++) {
+ code = tsdbReadBlockDataImpl(pReader, &pDataBlk->aSubBlock[iSubBlock], &bData1);
if (code) {
tBlockDataDestroy(&bData1, 1);
tBlockDataDestroy(&bData2, 1);
@@ -901,325 +1121,142 @@ _err:
return code;
}
-int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData) {
+int32_t tsdbReadSttBlock(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData) {
int32_t code = 0;
- code = tsdbReadBlockDataImpl(pReader, &pBlockL->bInfo, 1, pBlockData);
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], pSttBlk->bInfo.szBlock);
+ if (code) goto _err;
+
+ // read
+ code = tsdbReadFile(pReader->aSttFD[iStt], pSttBlk->bInfo.offset, pReader->aBuf[0], pSttBlk->bInfo.szBlock);
+ if (code) goto _err;
+
+ // decmpr
+ code = tDecmprBlockData(pReader->aBuf[0], pSttBlk->bInfo.szBlock, pBlockData, &pReader->aBuf[1]);
if (code) goto _err;
return code;
_err:
- tsdbError("vgId:%d tsdb read last block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb read stt block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
return code;
}
-// SDataFWriter ====================================================
-int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet) {
- int32_t code = 0;
- int32_t flag;
- int64_t n;
- SDataFWriter *pWriter = NULL;
- char fname[TSDB_FILENAME_LEN];
- char hdr[TSDB_FHDR_SIZE] = {0};
+// SDelFWriter ====================================================
+int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb) {
+ int32_t code = 0;
+ char fname[TSDB_FILENAME_LEN];
+ uint8_t hdr[TSDB_FHDR_SIZE] = {0};
+ SDelFWriter *pDelFWriter;
+ int64_t n;
// alloc
- pWriter = taosMemoryCalloc(1, sizeof(*pWriter));
- if (pWriter == NULL) {
+ pDelFWriter = (SDelFWriter *)taosMemoryCalloc(1, sizeof(*pDelFWriter));
+ if (pDelFWriter == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- if (code) goto _err;
- pWriter->pTsdb = pTsdb;
- pWriter->wSet = (SDFileSet){.diskId = pSet->diskId,
- .fid = pSet->fid,
- .pHeadF = &pWriter->fHead,
- .pDataF = &pWriter->fData,
- .pLastF = &pWriter->fLast,
- .pSmaF = &pWriter->fSma};
- pWriter->fHead = *pSet->pHeadF;
- pWriter->fData = *pSet->pDataF;
- pWriter->fLast = *pSet->pLastF;
- pWriter->fSma = *pSet->pSmaF;
-
- // head
- flag = TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
- tsdbHeadFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fHead, fname);
- pWriter->pHeadFD = taosOpenFile(fname, flag);
- if (pWriter->pHeadFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosWriteFile(pWriter->pHeadFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- ASSERT(n == TSDB_FHDR_SIZE);
-
- pWriter->fHead.size += TSDB_FHDR_SIZE;
+ pDelFWriter->pTsdb = pTsdb;
+ pDelFWriter->fDel = *pFile;
- // data
- if (pWriter->fData.size == 0) {
- flag = TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
- } else {
- flag = TD_FILE_WRITE;
- }
- tsdbDataFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fData, fname);
- pWriter->pDataFD = taosOpenFile(fname, flag);
- if (pWriter->pDataFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- if (pWriter->fData.size == 0) {
- n = taosWriteFile(pWriter->pDataFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ tsdbDelFileName(pTsdb, pFile, fname);
+ code = tsdbOpenFile(fname, pTsdb->pVnode->config.tsdbPageSize, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE,
+ &pDelFWriter->pWriteH);
+ if (code) goto _err;
- pWriter->fData.size += TSDB_FHDR_SIZE;
- } else {
- n = taosLSeekFile(pWriter->pDataFD, 0, SEEK_END);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // update header
+ code = tsdbWriteFile(pDelFWriter->pWriteH, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
- ASSERT(n == pWriter->fData.size);
- }
+ pDelFWriter->fDel.size = TSDB_FHDR_SIZE;
+ pDelFWriter->fDel.offset = 0;
- // last
- if (pWriter->fLast.size == 0) {
- flag = TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
- } else {
- flag = TD_FILE_WRITE;
- }
- tsdbLastFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fLast, fname);
- pWriter->pLastFD = taosOpenFile(fname, flag);
- if (pWriter->pLastFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- if (pWriter->fLast.size == 0) {
- n = taosWriteFile(pWriter->pLastFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ *ppWriter = pDelFWriter;
+ return code;
- pWriter->fLast.size += TSDB_FHDR_SIZE;
- } else {
- n = taosLSeekFile(pWriter->pLastFD, 0, SEEK_END);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+_err:
+ tsdbError("vgId:%d, failed to open del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ *ppWriter = NULL;
+ return code;
+}
- ASSERT(n == pWriter->fLast.size);
- }
+int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) {
+ int32_t code = 0;
+ SDelFWriter *pWriter = *ppWriter;
+ STsdb *pTsdb = pWriter->pTsdb;
- // sma
- if (pWriter->fSma.size == 0) {
- flag = TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
- } else {
- flag = TD_FILE_WRITE;
- }
- tsdbSmaFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fSma, fname);
- pWriter->pSmaFD = taosOpenFile(fname, flag);
- if (pWriter->pSmaFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // sync
+ if (sync) {
+ code = tsdbFsyncFile(pWriter->pWriteH);
+ if (code) goto _err;
}
- if (pWriter->fSma.size == 0) {
- n = taosWriteFile(pWriter->pSmaFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- pWriter->fSma.size += TSDB_FHDR_SIZE;
- } else {
- n = taosLSeekFile(pWriter->pSmaFD, 0, SEEK_END);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // close
+ tsdbCloseFile(&pWriter->pWriteH);
- ASSERT(n == pWriter->fSma.size);
+ for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree(pWriter->aBuf[iBuf]);
}
+ taosMemoryFree(pWriter);
- *ppWriter = pWriter;
+ *ppWriter = NULL;
return code;
_err:
- tsdbError("vgId:%d, tsdb data file writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- *ppWriter = NULL;
+ tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) {
+int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx) {
int32_t code = 0;
- STsdb *pTsdb = NULL;
+ int64_t size;
+ int64_t n;
- if (*ppWriter == NULL) goto _exit;
+ // prepare
+ size = 0;
+ for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
+ size += tPutDelData(NULL, taosArrayGet(aDelData, iDelData));
+ }
- pTsdb = (*ppWriter)->pTsdb;
- if (sync) {
- if (taosFsyncFile((*ppWriter)->pHeadFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // alloc
+ code = tRealloc(&pWriter->aBuf[0], size);
+ if (code) goto _err;
- if (taosFsyncFile((*ppWriter)->pDataFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosFsyncFile((*ppWriter)->pLastFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosFsyncFile((*ppWriter)->pSmaFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- }
-
- if (taosCloseFile(&(*ppWriter)->pHeadFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosCloseFile(&(*ppWriter)->pDataFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosCloseFile(&(*ppWriter)->pLastFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosCloseFile(&(*ppWriter)->pSmaFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- for (int32_t iBuf = 0; iBuf < sizeof((*ppWriter)->aBuf) / sizeof(uint8_t *); iBuf++) {
- tFree((*ppWriter)->aBuf[iBuf]);
- }
- taosMemoryFree(*ppWriter);
-_exit:
- *ppWriter = NULL;
- return code;
-
-_err:
- tsdbError("vgId:%d, data file writer close failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter) {
- int32_t code = 0;
- int64_t n;
- char hdr[TSDB_FHDR_SIZE];
-
- // head ==============
- memset(hdr, 0, TSDB_FHDR_SIZE);
- tPutHeadFile(hdr, &pWriter->fHead);
- taosCalcChecksumAppend(0, hdr, TSDB_FHDR_SIZE);
-
- n = taosLSeekFile(pWriter->pHeadFD, 0, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosWriteFile(pWriter->pHeadFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- // data ==============
- memset(hdr, 0, TSDB_FHDR_SIZE);
- tPutDataFile(hdr, &pWriter->fData);
- taosCalcChecksumAppend(0, hdr, TSDB_FHDR_SIZE);
-
- n = taosLSeekFile(pWriter->pDataFD, 0, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosWriteFile(pWriter->pDataFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- // last ==============
- memset(hdr, 0, TSDB_FHDR_SIZE);
- tPutLastFile(hdr, &pWriter->fLast);
- taosCalcChecksumAppend(0, hdr, TSDB_FHDR_SIZE);
-
- n = taosLSeekFile(pWriter->pLastFD, 0, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosWriteFile(pWriter->pLastFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // build
+ n = 0;
+ for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
+ n += tPutDelData(pWriter->aBuf[0] + n, taosArrayGet(aDelData, iDelData));
}
+ ASSERT(n == size);
- // sma ==============
- memset(hdr, 0, TSDB_FHDR_SIZE);
- tPutSmaFile(hdr, &pWriter->fSma);
- taosCalcChecksumAppend(0, hdr, TSDB_FHDR_SIZE);
-
- n = taosLSeekFile(pWriter->pSmaFD, 0, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // write
+ code = tsdbWriteFile(pWriter->pWriteH, pWriter->fDel.size, pWriter->aBuf[0], size);
+ if (code) goto _err;
- n = taosWriteFile(pWriter->pSmaFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // update
+ pDelIdx->offset = pWriter->fDel.size;
+ pDelIdx->size = size;
+ pWriter->fDel.size += size;
return code;
_err:
- tsdbError("vgId:%d, update DFileSet header failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, failed to write del data since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) {
- int32_t code = 0;
- SHeadFile *pHeadFile = &pWriter->fHead;
- int64_t size = 0;
- int64_t n;
-
- // check
- if (taosArrayGetSize(aBlockIdx) == 0) {
- pHeadFile->offset = pHeadFile->size;
- goto _exit;
- }
+int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx) {
+ int32_t code = 0;
+ int64_t size;
+ int64_t n;
+ SDelIdx *pDelIdx;
// prepare
- size = sizeof(uint32_t);
- for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
- size += tPutBlockIdx(NULL, taosArrayGet(aBlockIdx, iBlockIdx));
+ size = 0;
+ for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
+ size += tPutDelIdx(NULL, taosArrayGet(aDelIdx, iDelIdx));
}
- size += sizeof(TSCKSUM);
// alloc
code = tRealloc(&pWriter->aBuf[0], size);
@@ -1227,383 +1264,170 @@ int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) {
// build
n = 0;
- n = tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
- n += tPutBlockIdx(pWriter->aBuf[0] + n, taosArrayGet(aBlockIdx, iBlockIdx));
+ for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
+ n += tPutDelIdx(pWriter->aBuf[0] + n, taosArrayGet(aDelIdx, iDelIdx));
}
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
-
- ASSERT(n + sizeof(TSCKSUM) == size);
+ ASSERT(n == size);
// write
- n = taosWriteFile(pWriter->pHeadFD, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ code = tsdbWriteFile(pWriter->pWriteH, pWriter->fDel.size, pWriter->aBuf[0], size);
+ if (code) goto _err;
// update
- pHeadFile->offset = pHeadFile->size;
- pHeadFile->size += size;
+ pWriter->fDel.offset = pWriter->fDel.size;
+ pWriter->fDel.size += size;
-_exit:
- tsdbTrace("vgId:%d write block idx, offset:%" PRId64 " size:%" PRId64 " nBlockIdx:%d", TD_VID(pWriter->pTsdb->pVnode),
- pHeadFile->offset, size, taosArrayGetSize(aBlockIdx));
return code;
_err:
- tsdbError("vgId:%d, write block idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, write del idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *mBlock, SBlockIdx *pBlockIdx) {
- int32_t code = 0;
- SHeadFile *pHeadFile = &pWriter->fHead;
- int64_t size;
- int64_t n;
-
- ASSERT(mBlock->nItem > 0);
-
- // alloc
- size = sizeof(uint32_t) + tPutMapData(NULL, mBlock) + sizeof(TSCKSUM);
- code = tRealloc(&pWriter->aBuf[0], size);
- if (code) goto _err;
+int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter) {
+ int32_t code = 0;
+ char hdr[TSDB_FHDR_SIZE] = {0};
+ int64_t size = TSDB_FHDR_SIZE;
+ int64_t n;
// build
- n = 0;
- n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- n += tPutMapData(pWriter->aBuf[0] + n, mBlock);
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
-
- ASSERT(n + sizeof(TSCKSUM) == size);
+ tPutDelFile(hdr, &pWriter->fDel);
// write
- n = taosWriteFile(pWriter->pHeadFD, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- // update
- pBlockIdx->offset = pHeadFile->size;
- pBlockIdx->size = size;
- pHeadFile->size += size;
+ code = tsdbWriteFile(pWriter->pWriteH, 0, hdr, size);
+ if (code) goto _err;
- tsdbTrace("vgId:%d, write block, file ID:%d commit ID:%d suid:%" PRId64 " uid:%" PRId64 " offset:%" PRId64
- " size:%" PRId64 " nItem:%d",
- TD_VID(pWriter->pTsdb->pVnode), pWriter->wSet.fid, pHeadFile->commitID, pBlockIdx->suid, pBlockIdx->uid,
- pBlockIdx->offset, pBlockIdx->size, mBlock->nItem);
return code;
_err:
- tsdbError("vgId:%d, write block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, update del file hdr failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
+// SDelFReader ====================================================
+struct SDelFReader {
+ STsdb *pTsdb;
+ SDelFile fDel;
+ STsdbFD *pReadH;
+ uint8_t *aBuf[1];
+};
-int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL) {
- int32_t code = 0;
- SLastFile *pLastFile = &pWriter->fLast;
- int64_t size;
- int64_t n;
-
- // check
- if (taosArrayGetSize(aBlockL) == 0) {
- pLastFile->offset = pLastFile->size;
- goto _exit;
- }
-
- // size
- size = sizeof(uint32_t); // TSDB_FILE_DLMT
- for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aBlockL); iBlockL++) {
- size += tPutBlockL(NULL, taosArrayGet(aBlockL, iBlockL));
- }
- size += sizeof(TSCKSUM);
+int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb) {
+ int32_t code = 0;
+ char fname[TSDB_FILENAME_LEN];
+ SDelFReader *pDelFReader;
+ int64_t n;
// alloc
- code = tRealloc(&pWriter->aBuf[0], size);
- if (code) goto _err;
-
- // encode
- n = 0;
- n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aBlockL); iBlockL++) {
- n += tPutBlockL(pWriter->aBuf[0] + n, taosArrayGet(aBlockL, iBlockL));
- }
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
-
- ASSERT(n + sizeof(TSCKSUM) == size);
-
- // write
- n = taosWriteFile(pWriter->pLastFD, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
+ pDelFReader = (SDelFReader *)taosMemoryCalloc(1, sizeof(*pDelFReader));
+ if (pDelFReader == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- // update
- pLastFile->offset = pLastFile->size;
- pLastFile->size += size;
+ // open impl
+ pDelFReader->pTsdb = pTsdb;
+ pDelFReader->fDel = *pFile;
-_exit:
- tsdbTrace("vgId:%d tsdb write blockl, loffset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode),
- pLastFile->offset, size);
+ tsdbDelFileName(pTsdb, pFile, fname);
+ code = tsdbOpenFile(fname, pTsdb->pVnode->config.tsdbPageSize, TD_FILE_READ, &pDelFReader->pReadH);
+ if (code) goto _err;
+
+ *ppReader = pDelFReader;
return code;
_err:
- tsdbError("vgId:%d tsdb write blockl failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, del file reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ *ppReader = NULL;
return code;
}
-static void tsdbUpdateBlockInfo(SBlockData *pBlockData, SBlock *pBlock) {
- for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
- TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]};
-
- if (iRow == 0) {
- if (tsdbKeyCmprFn(&pBlock->minKey, &key) > 0) {
- pBlock->minKey = key;
- }
- } else {
- if (pBlockData->aTSKEY[iRow] == pBlockData->aTSKEY[iRow - 1]) {
- pBlock->hasDup = 1;
- }
- }
+int32_t tsdbDelFReaderClose(SDelFReader **ppReader) {
+ int32_t code = 0;
+ SDelFReader *pReader = *ppReader;
- if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&pBlock->maxKey, &key) < 0) {
- pBlock->maxKey = key;
+ if (pReader) {
+ tsdbCloseFile(&pReader->pReadH);
+ for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree(pReader->aBuf[iBuf]);
}
-
- pBlock->minVer = TMIN(pBlock->minVer, key.version);
- pBlock->maxVer = TMAX(pBlock->maxVer, key.version);
+ taosMemoryFree(pReader);
}
- pBlock->nRow += pBlockData->nRow;
+ *ppReader = NULL;
+
+_exit:
+ return code;
}
-static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData, SSmaInfo *pSmaInfo) {
+int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData) {
int32_t code = 0;
+ int64_t offset = pDelIdx->offset;
+ int64_t size = pDelIdx->size;
+ int64_t n;
- pSmaInfo->offset = 0;
- pSmaInfo->size = 0;
-
- // encode
- for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
- SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
-
- if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type)) continue;
-
- SColumnDataAgg sma;
- tsdbCalcColDataSMA(pColData, &sma);
-
- code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &sma));
- if (code) goto _err;
- pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &sma);
- }
+ taosArrayClear(aDelData);
- // write
- if (pSmaInfo->size) {
- int32_t size = pSmaInfo->size + sizeof(TSCKSUM);
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
+ if (code) goto _err;
- code = tRealloc(&pWriter->aBuf[0], size);
- if (code) goto _err;
+ // read
+ code = tsdbReadFile(pReader->pReadH, offset, pReader->aBuf[0], size);
+ if (code) goto _err;
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
+ // // decode
+ n = 0;
+ while (n < size) {
+ SDelData delData;
+ n += tGetDelData(pReader->aBuf[0] + n, &delData);
- int64_t n = taosWriteFile(pWriter->pSmaFD, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
+ if (taosArrayPush(aDelData, &delData) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
-
- pSmaInfo->offset = pWriter->fSma.size;
- pWriter->fSma.size += size;
}
+ ASSERT(n == size);
return code;
_err:
- tsdbError("vgId:%d tsdb write block sma failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, read del data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
- int8_t cmprAlg, int8_t toLast) {
+int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx) {
int32_t code = 0;
+ int32_t n;
+ int64_t offset = pReader->fDel.offset;
+ int64_t size = pReader->fDel.size - offset;
- ASSERT(pBlockData->nRow > 0);
-
- pBlkInfo->offset = toLast ? pWriter->fLast.size : pWriter->fData.size;
- pBlkInfo->szBlock = 0;
- pBlkInfo->szKey = 0;
+ taosArrayClear(aDelIdx);
- int32_t aBufN[4] = {0};
- code = tCmprBlockData(pBlockData, cmprAlg, NULL, NULL, pWriter->aBuf, aBufN);
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
- // write =================
- TdFilePtr pFD = toLast ? pWriter->pLastFD : pWriter->pDataFD;
-
- pBlkInfo->szKey = aBufN[3] + aBufN[2];
- pBlkInfo->szBlock = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
-
- int64_t n = taosWriteFile(pFD, pWriter->aBuf[3], aBufN[3]);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // read
+ code = tsdbReadFile(pReader->pReadH, offset, pReader->aBuf[0], size);
+ if (code) goto _err;
- n = taosWriteFile(pFD, pWriter->aBuf[2], aBufN[2]);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // decode
+ n = 0;
+ while (n < size) {
+ SDelIdx delIdx;
- if (aBufN[1]) {
- n = taosWriteFile(pFD, pWriter->aBuf[1], aBufN[1]);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- }
+ n += tGetDelIdx(pReader->aBuf[0] + n, &delIdx);
- if (aBufN[0]) {
- n = taosWriteFile(pFD, pWriter->aBuf[0], aBufN[0]);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
+ if (taosArrayPush(aDelIdx, &delIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
}
- // update info
- if (toLast) {
- pWriter->fLast.size += pBlkInfo->szBlock;
- } else {
- pWriter->fData.size += pBlkInfo->szBlock;
- }
-
- // ================= SMA ====================
- if (pSmaInfo) {
- code = tsdbWriteBlockSma(pWriter, pBlockData, pSmaInfo);
- if (code) goto _err;
- }
-
-_exit:
- tsdbTrace("vgId:%d tsdb write block data, suid:%" PRId64 " uid:%" PRId64 " nRow:%d, offset:%" PRId64 " size:%d",
- TD_VID(pWriter->pTsdb->pVnode), pBlockData->suid, pBlockData->uid, pBlockData->nRow, pBlkInfo->offset,
- pBlkInfo->szBlock);
- return code;
-
-_err:
- tsdbError("vgId:%d tsdb write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) {
- int32_t code = 0;
- int64_t n;
- int64_t size;
- TdFilePtr pOutFD = NULL; // TODO
- TdFilePtr PInFD = NULL; // TODO
- char fNameFrom[TSDB_FILENAME_LEN];
- char fNameTo[TSDB_FILENAME_LEN];
-
- // head
- tsdbHeadFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pHeadF, fNameFrom);
- tsdbHeadFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pHeadF, fNameTo);
-
- pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
- if (pOutFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
- if (PInFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->pHeadF->size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- taosCloseFile(&pOutFD);
- taosCloseFile(&PInFD);
-
- // data
- tsdbDataFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pDataF, fNameFrom);
- tsdbDataFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pDataF, fNameTo);
-
- pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
- if (pOutFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
- if (PInFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->pDataF->size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- taosCloseFile(&pOutFD);
- taosCloseFile(&PInFD);
-
- // last
- tsdbLastFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pLastF, fNameFrom);
- tsdbLastFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pLastF, fNameTo);
-
- pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
- if (pOutFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
- if (PInFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->pLastF->size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- taosCloseFile(&pOutFD);
- taosCloseFile(&PInFD);
-
- // sma
- tsdbSmaFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pSmaF, fNameFrom);
- tsdbSmaFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pSmaF, fNameTo);
-
- pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
- if (pOutFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
- if (PInFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->pSmaF->size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- taosCloseFile(&pOutFD);
- taosCloseFile(&PInFD);
+ ASSERT(n == size);
return code;
_err:
- tsdbError("vgId:%d, tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, read del idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
return code;
-}
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c
index a30b9154ab07084adc31c65089d223ac728445ae..d99bf2aa5c773f09bf135eb6b7c18f5984ace083 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRetention.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c
@@ -60,7 +60,7 @@ int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) {
if (expLevel < 0) {
taosMemoryFree(pSet->pHeadF);
taosMemoryFree(pSet->pDataF);
- taosMemoryFree(pSet->pLastF);
+ taosMemoryFree(pSet->aSttF[0]);
taosMemoryFree(pSet->pSmaF);
taosArrayRemove(fs.aDFileSet, iSet);
iSet--;
@@ -82,8 +82,6 @@ int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) {
code = tsdbFSUpsertFSet(&fs, &fSet);
if (code) goto _err;
}
-
- /* code */
}
// do change fs
diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
index ab2b2b617a3d36dbc2c86c2a2207cffac8f087f6..99e88a442c16e77f2db2ca752eb54f4e120532f8 100644
--- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
+++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
@@ -16,6 +16,29 @@
#include "tsdb.h"
// STsdbSnapReader ========================================
+typedef enum { SNAP_DATA_FILE_ITER = 0, SNAP_STT_FILE_ITER } EFIterT;
+typedef struct {
+ SRBTreeNode n;
+ SRowInfo rInfo;
+ EFIterT type;
+ union {
+ struct {
+ SArray* aBlockIdx;
+ int32_t iBlockIdx;
+ SBlockIdx* pBlockIdx;
+ SMapData mBlock;
+ int32_t iBlock;
+ }; // .data file
+ struct {
+ int32_t iStt;
+ SArray* aSttBlk;
+ int32_t iSttBlk;
+ }; // .stt file
+ };
+ SBlockData bData;
+ int32_t iRow;
+} SFDataIter;
+
struct STsdbSnapReader {
STsdb* pTsdb;
int64_t sver;
@@ -26,146 +49,301 @@ struct STsdbSnapReader {
int8_t dataDone;
int32_t fid;
SDataFReader* pDataFReader;
- SArray* aBlockIdx; // SArray
- SArray* aBlockL; // SArray
- SBlockIdx* pBlockIdx;
- SBlockL* pBlockL;
-
- int32_t iBlockIdx;
- int32_t iBlockL;
- SMapData mBlock; // SMapData
- int32_t iBlock;
- SBlockData oBlockData;
- SBlockData nBlockData;
+ SFDataIter* pIter;
+ SRBTree rbt;
+ SFDataIter aFDataIter[TSDB_MAX_STT_TRIGGER + 1];
+ SBlockData bData;
+ SSkmInfo skmTable;
// for del file
int8_t delDone;
SDelFReader* pDelFReader;
SArray* aDelIdx; // SArray
int32_t iDelIdx;
SArray* aDelData; // SArray
+ uint8_t* aBuf[5];
};
-static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
+extern int32_t tRowInfoCmprFn(const void* p1, const void* p2);
+extern int32_t tsdbReadDataBlockEx(SDataFReader* pReader, SDataBlk* pDataBlk, SBlockData* pBlockData);
+extern int32_t tsdbUpdateTableSchema(SMeta* pMeta, int64_t suid, int64_t uid, SSkmInfo* pSkmInfo);
+
+static int32_t tsdbSnapReadOpenFile(STsdbSnapReader* pReader) {
int32_t code = 0;
- STsdb* pTsdb = pReader->pTsdb;
- while (true) {
- if (pReader->pDataFReader == NULL) {
- // next
- SDFileSet dFileSet = {.fid = pReader->fid};
- SDFileSet* pSet = taosArraySearch(pReader->fs.aDFileSet, &dFileSet, tDFileSetCmprFn, TD_GT);
- if (pSet == NULL) goto _exit;
- pReader->fid = pSet->fid;
-
- // load
- code = tsdbDataFReaderOpen(&pReader->pDataFReader, pTsdb, pSet);
- if (code) goto _err;
+ SDFileSet dFileSet = {.fid = pReader->fid};
+ SDFileSet* pSet = taosArraySearch(pReader->fs.aDFileSet, &dFileSet, tDFileSetCmprFn, TD_GT);
+ if (pSet == NULL) return code;
- code = tsdbReadBlockIdx(pReader->pDataFReader, pReader->aBlockIdx);
- if (code) goto _err;
+ pReader->fid = pSet->fid;
+ code = tsdbDataFReaderOpen(&pReader->pDataFReader, pReader->pTsdb, pSet);
+ if (code) goto _err;
+
+ pReader->pIter = NULL;
+ tRBTreeCreate(&pReader->rbt, tRowInfoCmprFn);
+
+ // .data file
+ SFDataIter* pIter = &pReader->aFDataIter[0];
+ pIter->type = SNAP_DATA_FILE_ITER;
+
+ code = tsdbReadBlockIdx(pReader->pDataFReader, pIter->aBlockIdx);
+ if (code) goto _err;
- code = tsdbReadBlockL(pReader->pDataFReader, pReader->aBlockL);
+ for (pIter->iBlockIdx = 0; pIter->iBlockIdx < taosArrayGetSize(pIter->aBlockIdx); pIter->iBlockIdx++) {
+ pIter->pBlockIdx = (SBlockIdx*)taosArrayGet(pIter->aBlockIdx, pIter->iBlockIdx);
+
+ code = tsdbReadDataBlk(pReader->pDataFReader, pIter->pBlockIdx, &pIter->mBlock);
+ if (code) goto _err;
+
+ for (pIter->iBlock = 0; pIter->iBlock < pIter->mBlock.nItem; pIter->iBlock++) {
+ SDataBlk dataBlk;
+ tMapDataGetItemByIdx(&pIter->mBlock, pIter->iBlock, &dataBlk, tGetDataBlk);
+
+ if (dataBlk.minVer > pReader->ever || dataBlk.maxVer < pReader->sver) continue;
+
+ code = tsdbReadDataBlockEx(pReader->pDataFReader, &dataBlk, &pIter->bData);
if (code) goto _err;
- // init
- pReader->iBlockIdx = 0;
- if (pReader->iBlockIdx < taosArrayGetSize(pReader->aBlockIdx)) {
- pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx);
+ ASSERT(pIter->pBlockIdx->suid == pIter->bData.suid);
+ ASSERT(pIter->pBlockIdx->uid == pIter->bData.uid);
- code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock);
- if (code) goto _err;
+ for (pIter->iRow = 0; pIter->iRow < pIter->bData.nRow; pIter->iRow++) {
+ int64_t rowVer = pIter->bData.aVersion[pIter->iRow];
- pReader->iBlock = 0;
- } else {
- pReader->pBlockIdx = NULL;
+ if (rowVer >= pReader->sver && rowVer <= pReader->ever) {
+ pIter->rInfo.suid = pIter->pBlockIdx->suid;
+ pIter->rInfo.uid = pIter->pBlockIdx->uid;
+ pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
+ goto _add_iter_and_break;
+ }
}
+ }
- pReader->iBlockL = 0;
- while (true) {
- if (pReader->iBlockL >= taosArrayGetSize(pReader->aBlockL)) {
- pReader->pBlockL = NULL;
- break;
- }
+ continue;
- pReader->pBlockL = (SBlockL*)taosArrayGet(pReader->aBlockL, pReader->iBlockL);
- if (pReader->pBlockL->minVer <= pReader->ever && pReader->pBlockL->maxVer >= pReader->sver) {
- // TODO
- break;
- }
+ _add_iter_and_break:
+ tRBTreePut(&pReader->rbt, (SRBTreeNode*)pIter);
+ break;
+ }
- pReader->iBlockL++;
- }
+ // .stt file
+ pIter = &pReader->aFDataIter[1];
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ pIter->type = SNAP_STT_FILE_ITER;
+ pIter->iStt = iStt;
- tsdbInfo("vgId:%d, vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path,
- pReader->fid);
+ code = tsdbReadSttBlk(pReader->pDataFReader, iStt, pIter->aSttBlk);
+ if (code) goto _err;
+
+ for (pIter->iSttBlk = 0; pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk); pIter->iSttBlk++) {
+ SSttBlk* pSttBlk = (SSttBlk*)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk);
+
+ if (pSttBlk->minVer > pReader->ever) continue;
+ if (pSttBlk->maxVer < pReader->sver) continue;
+
+ code = tsdbReadSttBlock(pReader->pDataFReader, iStt, pSttBlk, &pIter->bData);
+ if (code) goto _err;
+
+ for (pIter->iRow = 0; pIter->iRow < pIter->bData.nRow; pIter->iRow++) {
+ int64_t rowVer = pIter->bData.aVersion[pIter->iRow];
+
+ if (rowVer >= pReader->sver && rowVer <= pReader->ever) {
+ pIter->rInfo.suid = pIter->bData.suid;
+ pIter->rInfo.uid = pIter->bData.uid;
+ pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
+ goto _add_iter;
+ }
+ }
}
- while (true) {
- if (pReader->pBlockIdx && pReader->pBlockL) {
- TABLEID id = {.suid = pReader->pBlockL->suid, .uid = pReader->pBlockL->minUid};
+ continue;
- ASSERT(0);
+ _add_iter:
+ tRBTreePut(&pReader->rbt, (SRBTreeNode*)pIter);
+ pIter++;
+ }
- // if (tTABLEIDCmprFn(pReader->pBlockIdx, &minId) < 0) {
- // // TODO
- // } else if (tTABLEIDCmprFn(pReader->pBlockIdx, &maxId) < 0) {
- // // TODO
- // } else {
- // // TODO
- // }
- } else if (pReader->pBlockIdx) {
- while (pReader->iBlock < pReader->mBlock.nItem) {
- SBlock block;
- tMapDataGetItemByIdx(&pReader->mBlock, pReader->iBlock, &block, tGetBlock);
-
- if (block.minVer <= pReader->ever && block.maxVer >= pReader->sver) {
- // load data (todo)
- }
+ tsdbInfo("vgId:%d, vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pReader->pTsdb->pVnode),
+ pReader->pTsdb->path, pReader->fid);
+ return code;
- // next
- pReader->iBlock++;
- if (*ppData) break;
+_err:
+ tsdbError("vgId:%d vnode snapshot tsdb snap read open file failed since %s", TD_VID(pReader->pTsdb->pVnode),
+ tstrerror(code));
+ return code;
+}
+
+static SRowInfo* tsdbSnapGetRow(STsdbSnapReader* pReader) { return pReader->pIter ? &pReader->pIter->rInfo : NULL; }
+
+static int32_t tsdbSnapNextRow(STsdbSnapReader* pReader) {
+ int32_t code = 0;
+
+ if (pReader->pIter) {
+ SFDataIter* pIter = pReader->pIter;
+
+ while (true) {
+ _find_row:
+ for (pIter->iRow++; pIter->iRow < pIter->bData.nRow; pIter->iRow++) {
+ int64_t rowVer = pIter->bData.aVersion[pIter->iRow];
+
+ if (rowVer >= pReader->sver && rowVer <= pReader->ever) {
+ pIter->rInfo.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[pIter->iRow];
+ pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
+ goto _out;
}
+ }
- if (pReader->iBlock >= pReader->mBlock.nItem) {
- pReader->iBlockIdx++;
- if (pReader->iBlockIdx < taosArrayGetSize(pReader->aBlockIdx)) {
- pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx);
+ if (pIter->type == SNAP_DATA_FILE_ITER) {
+ while (true) {
+ for (pIter->iBlock++; pIter->iBlock < pIter->mBlock.nItem; pIter->iBlock++) {
+ SDataBlk dataBlk;
+ tMapDataGetItemByIdx(&pIter->mBlock, pIter->iBlock, &dataBlk, tGetDataBlk);
- code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock);
+ if (dataBlk.minVer > pReader->ever || dataBlk.maxVer < pReader->sver) continue;
+
+ code = tsdbReadDataBlockEx(pReader->pDataFReader, &dataBlk, &pIter->bData);
if (code) goto _err;
- pReader->iBlock = 0;
- } else {
- pReader->pBlockIdx = NULL;
+ pIter->iRow = -1;
+ goto _find_row;
}
+
+ pIter->iBlockIdx++;
+ if (pIter->iBlockIdx >= taosArrayGetSize(pIter->aBlockIdx)) break;
+
+ pIter->pBlockIdx = (SBlockIdx*)taosArrayGet(pIter->aBlockIdx, pIter->iBlockIdx);
+ code = tsdbReadDataBlk(pReader->pDataFReader, pIter->pBlockIdx, &pIter->mBlock);
+ if (code) goto _err;
+ pIter->iBlock = -1;
}
- if (*ppData) goto _exit;
- } else if (pReader->pBlockL) {
- while (pReader->pBlockL) {
- if (pReader->pBlockL->minVer <= pReader->ever && pReader->pBlockL->maxVer >= pReader->sver) {
- // load data (todo)
- }
+ pReader->pIter = NULL;
+ } else if (pIter->type == SNAP_STT_FILE_ITER) {
+ for (pIter->iSttBlk++; pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk); pIter->iSttBlk++) {
+ SSttBlk* pSttBlk = (SSttBlk*)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk);
- // next
- pReader->iBlockL++;
- if (pReader->iBlockL < taosArrayGetSize(pReader->aBlockL)) {
- pReader->pBlockL = (SBlockL*)taosArrayGetSize(pReader->aBlockL);
- } else {
- pReader->pBlockL = NULL;
- }
+ if (pSttBlk->minVer > pReader->ever || pSttBlk->maxVer < pReader->sver) continue;
- if (*ppData) goto _exit;
+ code = tsdbReadSttBlock(pReader->pDataFReader, pIter->iStt, pSttBlk, &pIter->bData);
+ if (code) goto _err;
+
+ pIter->iRow = -1;
+ goto _find_row;
}
+
+ pReader->pIter = NULL;
+ } else {
+ ASSERT(0);
+ }
+ }
+
+ _out:
+ pIter = (SFDataIter*)tRBTreeMin(&pReader->rbt);
+ if (pReader->pIter && pIter) {
+ int32_t c = tRowInfoCmprFn(&pReader->pIter->rInfo, &pIter->rInfo);
+ if (c > 0) {
+ tRBTreePut(&pReader->rbt, (SRBTreeNode*)pReader->pIter);
+ pReader->pIter = NULL;
} else {
+ ASSERT(c);
+ }
+ }
+ }
+
+ if (pReader->pIter == NULL) {
+ pReader->pIter = (SFDataIter*)tRBTreeMin(&pReader->rbt);
+ if (pReader->pIter) {
+ tRBTreeDrop(&pReader->rbt, (SRBTreeNode*)pReader->pIter);
+ }
+ }
+
+ return code;
+
+_err:
+ return code;
+}
+
+static int32_t tsdbSnapCmprData(STsdbSnapReader* pReader, uint8_t** ppData) {
+ int32_t code = 0;
+
+ ASSERT(pReader->bData.nRow);
+
+ int32_t aBufN[5] = {0};
+ code = tCmprBlockData(&pReader->bData, TWO_STAGE_COMP, NULL, NULL, pReader->aBuf, aBufN);
+ if (code) goto _exit;
+
+ int32_t size = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
+ *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + size);
+ if (*ppData == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ SSnapDataHdr* pHdr = (SSnapDataHdr*)*ppData;
+ pHdr->type = SNAP_DATA_TSDB;
+ pHdr->size = size;
+
+ memcpy(pHdr->data, pReader->aBuf[3], aBufN[3]);
+ memcpy(pHdr->data + aBufN[3], pReader->aBuf[2], aBufN[2]);
+ if (aBufN[1]) {
+ memcpy(pHdr->data + aBufN[3] + aBufN[2], pReader->aBuf[1], aBufN[1]);
+ }
+ if (aBufN[0]) {
+ memcpy(pHdr->data + aBufN[3] + aBufN[2] + aBufN[1], pReader->aBuf[0], aBufN[0]);
+ }
+
+_exit:
+ return code;
+}
+
+static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
+ int32_t code = 0;
+ STsdb* pTsdb = pReader->pTsdb;
+
+ while (true) {
+ if (pReader->pDataFReader == NULL) {
+ code = tsdbSnapReadOpenFile(pReader);
+ if (code) goto _err;
+ }
+
+ if (pReader->pDataFReader == NULL) break;
+
+ SRowInfo* pRowInfo = tsdbSnapGetRow(pReader);
+ if (pRowInfo == NULL) {
+ tsdbDataFReaderClose(&pReader->pDataFReader);
+ continue;
+ }
+
+ TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid};
+ SBlockData* pBlockData = &pReader->bData;
+
+ code = tsdbUpdateTableSchema(pTsdb->pVnode->pMeta, id.suid, id.uid, &pReader->skmTable);
+ if (code) goto _err;
+
+ code = tBlockDataInit(pBlockData, id.suid, id.uid, pReader->skmTable.pTSchema);
+ if (code) goto _err;
+
+ while (pRowInfo->suid == id.suid && pRowInfo->uid == id.uid) {
+ code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pRowInfo->uid);
+ if (code) goto _err;
+
+ code = tsdbSnapNextRow(pReader);
+ if (code) goto _err;
+
+ pRowInfo = tsdbSnapGetRow(pReader);
+ if (pRowInfo == NULL) {
tsdbDataFReaderClose(&pReader->pDataFReader);
break;
}
+
+ if (pBlockData->nRow >= 4096) break;
}
+
+ code = tsdbSnapCmprData(pReader, ppData);
+ if (code) goto _err;
+
+ break;
}
-_exit:
return code;
_err:
@@ -216,7 +394,6 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
size += tPutDelData(NULL, pDelData);
}
}
-
if (size == 0) continue;
// org data
@@ -292,23 +469,33 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type
goto _err;
}
+ // data
pReader->fid = INT32_MIN;
- pReader->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
- if (pReader->aBlockIdx == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- pReader->aBlockL = taosArrayInit(0, sizeof(SBlockL));
- if (pReader->aBlockL == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ for (int32_t iIter = 0; iIter < sizeof(pReader->aFDataIter) / sizeof(pReader->aFDataIter[0]); iIter++) {
+ SFDataIter* pIter = &pReader->aFDataIter[iIter];
+
+ if (iIter == 0) {
+ pIter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pIter->aBlockIdx == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ } else {
+ pIter->aSttBlk = taosArrayInit(0, sizeof(SSttBlk));
+ if (pIter->aSttBlk == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+
+ code = tBlockDataCreate(&pIter->bData);
+ if (code) goto _err;
}
- pReader->mBlock = tMapDataInit();
- code = tBlockDataCreate(&pReader->oBlockData);
- if (code) goto _err;
- code = tBlockDataCreate(&pReader->nBlockData);
+
+ code = tBlockDataCreate(&pReader->bData);
if (code) goto _err;
+ // del
pReader->aDelIdx = taosArrayInit(0, sizeof(SDelIdx));
if (pReader->aDelIdx == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -335,18 +522,26 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) {
int32_t code = 0;
STsdbSnapReader* pReader = *ppReader;
- if (pReader->pDataFReader) {
- tsdbDataFReaderClose(&pReader->pDataFReader);
- }
- taosArrayDestroy(pReader->aBlockL);
- taosArrayDestroy(pReader->aBlockIdx);
- tMapDataClear(&pReader->mBlock);
- tBlockDataDestroy(&pReader->oBlockData, 1);
- tBlockDataDestroy(&pReader->nBlockData, 1);
-
- if (pReader->pDelFReader) {
- tsdbDelFReaderClose(&pReader->pDelFReader);
+ // data
+ if (pReader->pDataFReader) tsdbDataFReaderClose(&pReader->pDataFReader);
+ for (int32_t iIter = 0; iIter < sizeof(pReader->aFDataIter) / sizeof(pReader->aFDataIter[0]); iIter++) {
+ SFDataIter* pIter = &pReader->aFDataIter[iIter];
+
+ if (iIter == 0) {
+ taosArrayDestroy(pIter->aBlockIdx);
+ tMapDataClear(&pIter->mBlock);
+ } else {
+ taosArrayDestroy(pIter->aSttBlk);
+ }
+
+ tBlockDataDestroy(&pIter->bData, 1);
}
+
+ tBlockDataDestroy(&pReader->bData, 1);
+ tTSchemaDestroy(pReader->skmTable.pTSchema);
+
+ // del
+ if (pReader->pDelFReader) tsdbDelFReaderClose(&pReader->pDelFReader);
taosArrayDestroy(pReader->aDelIdx);
taosArrayDestroy(pReader->aDelData);
@@ -354,6 +549,10 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) {
tsdbInfo("vgId:%d, vnode snapshot tsdb reader closed for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path);
+ for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(pReader->aBuf[0]); iBuf++) {
+ tFree(pReader->aBuf[iBuf]);
+ }
+
taosMemoryFree(pReader);
*ppReader = NULL;
return code;
@@ -410,40 +609,37 @@ struct STsdbSnapWriter {
STsdbFS fs;
// config
- int32_t minutes;
- int8_t precision;
- int32_t minRow;
- int32_t maxRow;
- int8_t cmprAlg;
- int64_t commitID;
-
+ int32_t minutes;
+ int8_t precision;
+ int32_t minRow;
+ int32_t maxRow;
+ int8_t cmprAlg;
+ int64_t commitID;
uint8_t* aBuf[5];
+
// for data file
SBlockData bData;
-
- int32_t fid;
- SDataFReader* pDataFReader;
- SArray* aBlockIdx; // SArray
- int32_t iBlockIdx;
- SBlockIdx* pBlockIdx;
- SMapData mBlock; // SMapData
- int32_t iBlock;
- SBlockData* pBlockData;
- int32_t iRow;
- SBlockData bDataR;
- SArray* aBlockL; // SArray
- int32_t iBlockL;
- SBlockData lDataR;
-
- SDataFWriter* pDataFWriter;
- SBlockIdx* pBlockIdxW; // NULL when no committing table
- SBlock blockW;
- SBlockData bDataW;
- SBlockIdx blockIdxW;
-
- SMapData mBlockW; // SMapData
- SArray* aBlockIdxW; // SArray
- SArray* aBlockLW; // SArray
+ int32_t fid;
+ TABLEID id;
+ SSkmInfo skmTable;
+ struct {
+ SDataFReader* pReader;
+ SArray* aBlockIdx;
+ int32_t iBlockIdx;
+ SBlockIdx* pBlockIdx;
+ SMapData mDataBlk;
+ int32_t iDataBlk;
+ SBlockData bData;
+ int32_t iRow;
+ } dReader;
+ struct {
+ SDataFWriter* pWriter;
+ SArray* aBlockIdx;
+ SMapData mDataBlk;
+ SArray* aSttBlk;
+ SBlockData bData;
+ SBlockData sData;
+ } dWriter;
// for del file
SDelFReader* pDelFReader;
@@ -454,518 +650,447 @@ struct STsdbSnapWriter {
SArray* aDelIdxW;
};
-static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
+// SNAP_DATA_TSDB
+extern int32_t tsdbWriteDataBlock(SDataFWriter* pWriter, SBlockData* pBlockData, SMapData* mDataBlk, int8_t cmprAlg);
+extern int32_t tsdbWriteSttBlock(SDataFWriter* pWriter, SBlockData* pBlockData, SArray* aSttBlk, int8_t cmprAlg);
+
+static int32_t tsdbSnapNextTableData(STsdbSnapWriter* pWriter) {
int32_t code = 0;
- ASSERT(pWriter->pDataFWriter);
+ ASSERT(pWriter->dReader.iRow >= pWriter->dReader.bData.nRow);
- if (pWriter->pBlockIdxW == NULL) goto _exit;
+ if (pWriter->dReader.iBlockIdx < taosArrayGetSize(pWriter->dReader.aBlockIdx)) {
+ pWriter->dReader.pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->dReader.aBlockIdx, pWriter->dReader.iBlockIdx);
- // consume remain rows
- if (pWriter->pBlockData) {
- ASSERT(pWriter->iRow < pWriter->pBlockData->nRow);
- while (pWriter->iRow < pWriter->pBlockData->nRow) {
- code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), NULL,
- 0); // todo
- if (code) goto _err;
+ code = tsdbReadDataBlk(pWriter->dReader.pReader, pWriter->dReader.pBlockIdx, &pWriter->dReader.mDataBlk);
+ if (code) goto _exit;
- if (pWriter->bDataW.nRow >= pWriter->maxRow * 4 / 5) {
- // pWriter->blockW.last = 0;
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- // &pWriter->blockW, pWriter->cmprAlg);
- if (code) goto _err;
+ pWriter->dReader.iBlockIdx++;
+ } else {
+ pWriter->dReader.pBlockIdx = NULL;
+ tMapDataReset(&pWriter->dReader.mDataBlk);
+ }
+ pWriter->dReader.iDataBlk = 0; // point to the next one
+ tBlockDataReset(&pWriter->dReader.bData);
+ pWriter->dReader.iRow = 0;
- code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
- if (code) goto _err;
+_exit:
+ return code;
+}
- tBlockReset(&pWriter->blockW);
- tBlockDataClear(&pWriter->bDataW);
- }
+static int32_t tsdbSnapWriteCopyData(STsdbSnapWriter* pWriter, TABLEID* pId) {
+ int32_t code = 0;
+
+ while (true) {
+ if (pWriter->dReader.pBlockIdx == NULL) break;
+ if (tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, pId) >= 0) break;
+
+ SBlockIdx blkIdx = *pWriter->dReader.pBlockIdx;
+ code = tsdbWriteDataBlk(pWriter->dWriter.pWriter, &pWriter->dReader.mDataBlk, &blkIdx);
+ if (code) goto _exit;
- pWriter->iRow++;
+ if (taosArrayPush(pWriter->dWriter.aBlockIdx, &blkIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
}
+
+ code = tsdbSnapNextTableData(pWriter);
+ if (code) goto _exit;
}
- // write remain data if has
- if (pWriter->bDataW.nRow > 0) {
- // pWriter->blockW.last = 0;
- if (pWriter->bDataW.nRow < pWriter->minRow) {
- if (pWriter->iBlock > pWriter->mBlock.nItem) {
- // pWriter->blockW.last = 1;
- }
- }
+_exit:
+ return code;
+}
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- // &pWriter->blockW, pWriter->cmprAlg);
- // if (code) goto _err;
+static int32_t tsdbSnapWriteTableDataStart(STsdbSnapWriter* pWriter, TABLEID* pId) {
+ int32_t code = 0;
- code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
- if (code) goto _err;
+ code = tsdbSnapWriteCopyData(pWriter, pId);
+ if (code) goto _err;
+
+ pWriter->id.suid = pId->suid;
+ pWriter->id.uid = pId->uid;
+
+ code = tsdbUpdateTableSchema(pWriter->pTsdb->pVnode->pMeta, pId->suid, pId->uid, &pWriter->skmTable);
+ if (code) goto _err;
+
+ tMapDataReset(&pWriter->dWriter.mDataBlk);
+ code = tBlockDataInit(&pWriter->dWriter.bData, pId->suid, pId->uid, pWriter->skmTable.pTSchema);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
+ int32_t code = 0;
+
+ if (pWriter->id.suid == 0 && pWriter->id.uid == 0) return code;
+
+ int32_t c = 1;
+ if (pWriter->dReader.pBlockIdx) {
+ c = tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, &pWriter->id);
+ ASSERT(c >= 0);
}
- while (true) {
- if (pWriter->iBlock >= pWriter->mBlock.nItem) break;
+ if (c == 0) {
+ SBlockData* pBData = &pWriter->dWriter.bData;
- SBlock block;
- tMapDataGetItemByIdx(&pWriter->mBlock, pWriter->iBlock, &block, tGetBlock);
+ for (; pWriter->dReader.iRow < pWriter->dReader.bData.nRow; pWriter->dReader.iRow++) {
+ TSDBROW row = tsdbRowFromBlockData(&pWriter->dReader.bData, pWriter->dReader.iRow);
- // if (block.last) {
- // code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, &pWriter->bDataR, NULL, NULL);
- // if (code) goto _err;
+ code = tBlockDataAppendRow(pBData, &row, NULL, pWriter->id.uid);
+ if (code) goto _err;
- // tBlockReset(&block);
- // block.last = 1;
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pWriter->pBlockIdxW, &block,
- // pWriter->cmprAlg);
- // if (code) goto _err;
- // }
+ if (pBData->nRow >= pWriter->maxRow) {
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, pBData, &pWriter->dWriter.mDataBlk, pWriter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
- code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, pBData, &pWriter->dWriter.mDataBlk, pWriter->cmprAlg);
if (code) goto _err;
- pWriter->iBlock++;
+ for (; pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem; pWriter->dReader.iDataBlk++) {
+ SDataBlk dataBlk;
+ tMapDataGetItemByIdx(&pWriter->dReader.mDataBlk, pWriter->dReader.iDataBlk, &dataBlk, tGetDataBlk);
+
+ code = tMapDataPutItem(&pWriter->dWriter.mDataBlk, &dataBlk, tPutDataBlk);
+ if (code) goto _err;
+ }
+
+ code = tsdbSnapNextTableData(pWriter);
+ if (code) goto _err;
}
- // SBlock
- // code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, NULL, pWriter->pBlockIdxW);
- // if (code) goto _err;
+ if (pWriter->dWriter.mDataBlk.nItem) {
+ SBlockIdx blockIdx = {.suid = pWriter->id.suid, .uid = pWriter->id.uid};
+ code = tsdbWriteDataBlk(pWriter->dWriter.pWriter, &pWriter->dWriter.mDataBlk, &blockIdx);
- // SBlockIdx
- if (taosArrayPush(pWriter->aBlockIdxW, pWriter->pBlockIdxW) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ if (taosArrayPush(pWriter->dWriter.aBlockIdx, &blockIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
}
-_exit:
- tsdbInfo("vgId:%d, tsdb snapshot write table data end for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
+ pWriter->id.suid = 0;
+ pWriter->id.uid = 0;
+
return code;
_err:
- tsdbError("vgId:%d, tsdb snapshot write table data end for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
return code;
}
-static int32_t tsdbSnapMoveWriteTableData(STsdbSnapWriter* pWriter, SBlockIdx* pBlockIdx) {
+static int32_t tsdbSnapWriteOpenFile(STsdbSnapWriter* pWriter, int32_t fid) {
int32_t code = 0;
+ STsdb* pTsdb = pWriter->pTsdb;
- code = tsdbReadBlock(pWriter->pDataFReader, pBlockIdx, &pWriter->mBlock);
- if (code) goto _err;
+ ASSERT(pWriter->dWriter.pWriter == NULL);
+
+ pWriter->fid = fid;
+ pWriter->id = (TABLEID){0};
+ SDFileSet* pSet = taosArraySearch(pWriter->fs.aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ);
- // SBlockData
- SBlock block;
- tMapDataReset(&pWriter->mBlockW);
- for (int32_t iBlock = 0; iBlock < pWriter->mBlock.nItem; iBlock++) {
- tMapDataGetItemByIdx(&pWriter->mBlock, iBlock, &block, tGetBlock);
-
- // if (block.last) {
- // code = tsdbReadBlockData(pWriter->pDataFReader, pBlockIdx, &block, &pWriter->bDataR, NULL, NULL);
- // if (code) goto _err;
-
- // tBlockReset(&block);
- // block.last = 1;
- // code =
- // tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pBlockIdx, &block,
- // pWriter->cmprAlg);
- // if (code) goto _err;
- // }
-
- code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
+ // Reader
+ if (pSet) {
+ code = tsdbDataFReaderOpen(&pWriter->dReader.pReader, pWriter->pTsdb, pSet);
if (code) goto _err;
- }
- // SBlock
- SBlockIdx blockIdx = {.suid = pBlockIdx->suid, .uid = pBlockIdx->uid};
- code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, &blockIdx);
+ code = tsdbReadBlockIdx(pWriter->dReader.pReader, pWriter->dReader.aBlockIdx);
+ if (code) goto _err;
+ } else {
+ ASSERT(pWriter->dReader.pReader == NULL);
+ taosArrayClear(pWriter->dReader.aBlockIdx);
+ }
+ pWriter->dReader.iBlockIdx = 0; // point to the next one
+ code = tsdbSnapNextTableData(pWriter);
if (code) goto _err;
- // SBlockIdx
- if (taosArrayPush(pWriter->aBlockIdxW, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ // Writer
+ SHeadFile fHead = {.commitID = pWriter->commitID};
+ SDataFile fData = {.commitID = pWriter->commitID};
+ SSmaFile fSma = {.commitID = pWriter->commitID};
+ SSttFile fStt = {.commitID = pWriter->commitID};
+ SDFileSet wSet = {.fid = pWriter->fid, .pHeadF = &fHead, .pDataF = &fData, .pSmaF = &fSma};
+ if (pSet) {
+ wSet.diskId = pSet->diskId;
+ fData = *pSet->pDataF;
+ fSma = *pSet->pSmaF;
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ wSet.aSttF[iStt] = pSet->aSttF[iStt];
+ }
+ wSet.nSttF = pSet->nSttF + 1; // TODO: fix pSet->nSttF == pTsdb->maxFile
+ } else {
+ SDiskID did = {0};
+ tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
+ tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
+ wSet.diskId = did;
+ wSet.nSttF = 1;
}
+ wSet.aSttF[wSet.nSttF - 1] = &fStt;
+
+ code = tsdbDataFWriterOpen(&pWriter->dWriter.pWriter, pWriter->pTsdb, &wSet);
+ if (code) goto _err;
+ taosArrayClear(pWriter->dWriter.aBlockIdx);
+ tMapDataReset(&pWriter->dWriter.mDataBlk);
+ taosArrayClear(pWriter->dWriter.aSttBlk);
+ tBlockDataReset(&pWriter->dWriter.bData);
+ tBlockDataReset(&pWriter->dWriter.sData);
-_exit:
return code;
_err:
- tsdbError("vgId:%d, tsdb snapshot move write table data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
return code;
}
-static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
- int32_t code = 0;
- SBlockData* pBlockData = &pWriter->bData;
- int32_t iRow = 0;
- TSDBROW row;
- TSDBROW* pRow = &row;
+static int32_t tsdbSnapWriteCloseFile(STsdbSnapWriter* pWriter) {
+ int32_t code = 0;
- // // correct schema
- // code = tBlockDataCorrectSchema(&pWriter->bDataW, pBlockData);
- // if (code) goto _err;
+ ASSERT(pWriter->dWriter.pWriter);
- // loop to merge
- *pRow = tsdbRowFromBlockData(pBlockData, iRow);
- while (true) {
- if (pRow == NULL) break;
+ code = tsdbSnapWriteTableDataEnd(pWriter);
+ if (code) goto _err;
- if (pWriter->pBlockData) {
- ASSERT(pWriter->iRow < pWriter->pBlockData->nRow);
+ // copy remain table data
+ TABLEID id = {.suid = INT64_MAX, .uid = INT64_MAX};
+ code = tsdbSnapWriteCopyData(pWriter, &id);
+ if (code) goto _err;
- int32_t c = tsdbRowCmprFn(pRow, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow));
+ code =
+ tsdbWriteSttBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.sData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg);
+ if (code) goto _err;
- ASSERT(c);
+ // Indices
+ code = tsdbWriteBlockIdx(pWriter->dWriter.pWriter, pWriter->dWriter.aBlockIdx);
+ if (code) goto _err;
- if (c < 0) {
- // code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL);
- // if (code) goto _err;
-
- iRow++;
- if (iRow < pWriter->pBlockData->nRow) {
- *pRow = tsdbRowFromBlockData(pBlockData, iRow);
- } else {
- pRow = NULL;
- }
- } else if (c > 0) {
- // code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow),
- // NULL); if (code) goto _err;
+ code = tsdbWriteSttBlk(pWriter->dWriter.pWriter, pWriter->dWriter.aSttBlk);
+ if (code) goto _err;
- pWriter->iRow++;
- if (pWriter->iRow >= pWriter->pBlockData->nRow) {
- pWriter->pBlockData = NULL;
- }
- }
- } else {
- TSDBKEY key = TSDBROW_KEY(pRow);
+ code = tsdbUpdateDFileSetHeader(pWriter->dWriter.pWriter);
+ if (code) goto _err;
- while (true) {
- if (pWriter->iBlock >= pWriter->mBlock.nItem) break;
+ code = tsdbFSUpsertFSet(&pWriter->fs, &pWriter->dWriter.pWriter->wSet);
+ if (code) goto _err;
- SBlock block;
- int32_t c;
+ code = tsdbDataFWriterClose(&pWriter->dWriter.pWriter, 1);
+ if (code) goto _err;
- tMapDataGetItemByIdx(&pWriter->mBlock, pWriter->iBlock, &block, tGetBlock);
+ if (pWriter->dReader.pReader) {
+ code = tsdbDataFReaderClose(&pWriter->dReader.pReader);
+ if (code) goto _err;
+ }
- // if (block.last) {
- // pWriter->pBlockData = &pWriter->bDataR;
+_exit:
+ return code;
- // code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL,
- // NULL); if (code) goto _err; pWriter->iRow = 0;
+_err:
+ return code;
+}
- // pWriter->iBlock++;
- // break;
- // }
+static int32_t tsdbSnapWriteToDataFile(STsdbSnapWriter* pWriter, int32_t iRow, int8_t* done) {
+ int32_t code = 0;
- c = tsdbKeyCmprFn(&block.maxKey, &key);
+ SBlockData* pBData = &pWriter->bData;
+ TABLEID id = {.suid = pBData->suid, .uid = pBData->uid ? pBData->uid : pBData->aUid[iRow]};
+ TSDBROW row = tsdbRowFromBlockData(pBData, iRow);
+ TSDBKEY key = TSDBROW_KEY(&row);
- ASSERT(c);
+ *done = 0;
+ while (pWriter->dReader.iRow < pWriter->dReader.bData.nRow ||
+ pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem) {
+ // Merge row by row
+ for (; pWriter->dReader.iRow < pWriter->dReader.bData.nRow; pWriter->dReader.iRow++) {
+ TSDBROW trow = tsdbRowFromBlockData(&pWriter->dReader.bData, pWriter->dReader.iRow);
+ TSDBKEY tKey = TSDBROW_KEY(&trow);
- if (c < 0) {
- if (pWriter->bDataW.nRow) {
- // pWriter->blockW.last = 0;
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- // &pWriter->blockW, pWriter->cmprAlg);
- // if (code) goto _err;
+ ASSERT(pWriter->dReader.bData.suid == id.suid && pWriter->dReader.bData.uid == id.uid);
- code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
- if (code) goto _err;
+ int32_t c = tsdbKeyCmprFn(&key, &tKey);
+ if (c < 0) {
+ code = tBlockDataAppendRow(&pWriter->dWriter.bData, &row, NULL, id.uid);
+ if (code) goto _err;
+ } else if (c > 0) {
+ code = tBlockDataAppendRow(&pWriter->dWriter.bData, &trow, NULL, id.uid);
+ if (code) goto _err;
+ } else {
+ ASSERT(0);
+ }
- tBlockReset(&pWriter->blockW);
- tBlockDataClear(&pWriter->bDataW);
- }
+ if (pWriter->dWriter.bData.nRow >= pWriter->maxRow) {
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk,
+ pWriter->cmprAlg);
+ if (code) goto _err;
+ }
- code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
- if (code) goto _err;
+ if (c < 0) {
+ *done = 1;
+ goto _exit;
+ }
+ }
- pWriter->iBlock++;
- } else {
- c = tsdbKeyCmprFn(&tBlockDataLastKey(pBlockData), &block.minKey);
+ // Merge row by block
+ SDataBlk tDataBlk = {.minKey = key, .maxKey = key};
+ for (; pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem; pWriter->dReader.iDataBlk++) {
+ SDataBlk dataBlk;
+ tMapDataGetItemByIdx(&pWriter->dReader.mDataBlk, pWriter->dReader.iDataBlk, &dataBlk, tGetDataBlk);
- ASSERT(c);
+ int32_t c = tDataBlkCmprFn(&dataBlk, &tDataBlk);
+ if (c < 0) {
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk,
+ pWriter->cmprAlg);
+ if (code) goto _err;
- if (c > 0) {
- pWriter->pBlockData = &pWriter->bDataR;
- // code =
- // tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL,
- // NULL);
- // if (code) goto _err;
- pWriter->iRow = 0;
+ code = tMapDataPutItem(&pWriter->dWriter.mDataBlk, &dataBlk, tPutDataBlk);
+ if (code) goto _err;
+ } else if (c > 0) {
+ code = tBlockDataAppendRow(&pWriter->dWriter.bData, &row, NULL, id.uid);
+ if (code) goto _err;
- pWriter->iBlock++;
- }
- break;
+ if (pWriter->dWriter.bData.nRow >= pWriter->maxRow) {
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk,
+ pWriter->cmprAlg);
+ if (code) goto _err;
}
- }
-
- if (pWriter->pBlockData) continue;
-
- // code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL);
- // if (code) goto _err;
- iRow++;
- if (iRow < pBlockData->nRow) {
- *pRow = tsdbRowFromBlockData(pBlockData, iRow);
+ *done = 1;
+ goto _exit;
} else {
- pRow = NULL;
+ code = tsdbReadDataBlockEx(pWriter->dReader.pReader, &dataBlk, &pWriter->dReader.bData);
+ if (code) goto _err;
+ pWriter->dReader.iRow = 0;
+
+ pWriter->dReader.iDataBlk++;
+ break;
}
}
-
- _check_write:
- if (pWriter->bDataW.nRow < pWriter->maxRow * 4 / 5) continue;
-
- _write_block:
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- // &pWriter->blockW, pWriter->cmprAlg);
- // if (code) goto _err;
-
- code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
- if (code) goto _err;
-
- tBlockReset(&pWriter->blockW);
- tBlockDataClear(&pWriter->bDataW);
}
+_exit:
return code;
_err:
- tsdbError("vgId:%d, vnode snapshot tsdb write table data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
+ tsdbError("vgId:%d %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code));
return code;
}
-static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) {
- int32_t code = 0;
- SBlockData* pBlockData = &pWriter->bData;
- TSDBKEY keyFirst = tBlockDataFirstKey(pBlockData);
- TSDBKEY keyLast = tBlockDataLastKey(pBlockData);
-
- // end last table write if should
- if (pWriter->pBlockIdxW) {
- int32_t c = tTABLEIDCmprFn(pWriter->pBlockIdxW, &id);
- if (c < 0) {
- // end
- code = tsdbSnapWriteTableDataEnd(pWriter);
- if (code) goto _err;
-
- // reset
- pWriter->pBlockIdxW = NULL;
- } else if (c > 0) {
- ASSERT(0);
- }
- }
-
- // start new table data write if need
- if (pWriter->pBlockIdxW == NULL) {
- // write table data ahead
- while (true) {
- if (pWriter->iBlockIdx >= taosArrayGetSize(pWriter->aBlockIdx)) break;
-
- SBlockIdx* pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx);
- int32_t c = tTABLEIDCmprFn(pBlockIdx, &id);
+static int32_t tsdbSnapWriteToSttFile(STsdbSnapWriter* pWriter, int32_t iRow) {
+ int32_t code = 0;
- if (c >= 0) break;
+ TABLEID id = {.suid = pWriter->bData.suid,
+ .uid = pWriter->bData.uid ? pWriter->bData.uid : pWriter->bData.aUid[iRow]};
+ TSDBROW row = tsdbRowFromBlockData(&pWriter->bData, iRow);
+ SBlockData* pBData = &pWriter->dWriter.sData;
- code = tsdbSnapMoveWriteTableData(pWriter, pBlockIdx);
+ if (pBData->suid || pBData->uid) {
+ if (!TABLE_SAME_SCHEMA(pBData->suid, pBData->uid, id.suid, id.uid)) {
+ code = tsdbWriteSttBlock(pWriter->dWriter.pWriter, pBData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg);
if (code) goto _err;
- pWriter->iBlockIdx++;
+ pBData->suid = 0;
+ pBData->uid = 0;
}
+ }
- // reader
- pWriter->pBlockIdx = NULL;
- if (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) {
- ASSERT(pWriter->pDataFReader);
-
- SBlockIdx* pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx);
- int32_t c = tTABLEIDCmprFn(pBlockIdx, &id);
-
- ASSERT(c >= 0);
-
- if (c == 0) {
- pWriter->pBlockIdx = pBlockIdx;
- pWriter->iBlockIdx++;
- }
- }
-
- if (pWriter->pBlockIdx) {
- code = tsdbReadBlock(pWriter->pDataFReader, pWriter->pBlockIdx, &pWriter->mBlock);
- if (code) goto _err;
- } else {
- tMapDataReset(&pWriter->mBlock);
- }
- pWriter->iBlock = 0;
- pWriter->pBlockData = NULL;
- pWriter->iRow = 0;
-
- // writer
- pWriter->pBlockIdxW = &pWriter->blockIdxW;
- pWriter->pBlockIdxW->suid = id.suid;
- pWriter->pBlockIdxW->uid = id.uid;
+ if (pBData->suid == 0 && pBData->uid == 0) {
+ code = tsdbUpdateTableSchema(pWriter->pTsdb->pVnode->pMeta, pWriter->id.suid, pWriter->id.uid, &pWriter->skmTable);
+ if (code) goto _err;
- tBlockReset(&pWriter->blockW);
- tBlockDataReset(&pWriter->bDataW);
- tMapDataReset(&pWriter->mBlockW);
+ code = tBlockDataInit(pBData, pWriter->id.suid, pWriter->id.suid ? 0 : pWriter->id.uid, pWriter->skmTable.pTSchema);
+ if (code) goto _err;
}
- ASSERT(pWriter->pBlockIdxW && pWriter->pBlockIdxW->suid == id.suid && pWriter->pBlockIdxW->uid == id.uid);
- ASSERT(pWriter->pBlockIdx == NULL || (pWriter->pBlockIdx->suid == id.suid && pWriter->pBlockIdx->uid == id.uid));
-
- code = tsdbSnapWriteTableDataImpl(pWriter);
+ code = tBlockDataAppendRow(pBData, &row, NULL, id.uid);
if (code) goto _err;
+ if (pBData->nRow >= pWriter->maxRow) {
+ code = tsdbWriteSttBlock(pWriter->dWriter.pWriter, pBData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg);
+ if (code) goto _err;
+ }
+
_exit:
- tsdbDebug("vgId:%d, vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d, vnode snapshot tsdb write data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
return code;
}
-static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) {
+static int32_t tsdbSnapWriteRowData(STsdbSnapWriter* pWriter, int32_t iRow) {
int32_t code = 0;
- STsdb* pTsdb = pWriter->pTsdb;
-
- if (pWriter->pDataFWriter == NULL) goto _exit;
- // finish current table
- code = tsdbSnapWriteTableDataEnd(pWriter);
- if (code) goto _err;
+ SBlockData* pBlockData = &pWriter->bData;
+ TABLEID id = {.suid = pBlockData->suid, .uid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[iRow]};
- // move remain table
- while (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) {
- code = tsdbSnapMoveWriteTableData(pWriter, (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx));
+ // End last table data write if need
+ if (tTABLEIDCmprFn(&pWriter->id, &id) != 0) {
+ code = tsdbSnapWriteTableDataEnd(pWriter);
if (code) goto _err;
-
- pWriter->iBlockIdx++;
}
- // write remain stuff
- if (taosArrayGetSize(pWriter->aBlockLW) > 0) {
- code = tsdbWriteBlockL(pWriter->pDataFWriter, pWriter->aBlockIdxW);
+ // Start new table data write if need
+ if (pWriter->id.suid == 0 && pWriter->id.uid == 0) {
+ code = tsdbSnapWriteTableDataStart(pWriter, &id);
if (code) goto _err;
}
- if (taosArrayGetSize(pWriter->aBlockIdx) > 0) {
- code = tsdbWriteBlockIdx(pWriter->pDataFWriter, pWriter->aBlockIdxW);
+ // Merge with .data file data
+ int8_t done = 0;
+ if (pWriter->dReader.pBlockIdx && tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, &id) == 0) {
+ code = tsdbSnapWriteToDataFile(pWriter, iRow, &done);
if (code) goto _err;
}
- code = tsdbFSUpsertFSet(&pWriter->fs, &pWriter->pDataFWriter->wSet);
- if (code) goto _err;
-
- code = tsdbDataFWriterClose(&pWriter->pDataFWriter, 1);
- if (code) goto _err;
-
- if (pWriter->pDataFReader) {
- code = tsdbDataFReaderClose(&pWriter->pDataFReader);
+ // Append to the .stt data block (todo: check if need to set/reload sst block)
+ if (!done) {
+ code = tsdbSnapWriteToSttFile(pWriter, iRow);
if (code) goto _err;
}
_exit:
- tsdbInfo("vgId:%d, vnode snapshot tsdb writer data end for %s", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d, vnode snapshot tsdb writer data end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
- tstrerror(code));
+ tsdbError("vgId:%d %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code));
return code;
}
static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
- int32_t code = 0;
- STsdb* pTsdb = pWriter->pTsdb;
- SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
- TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr));
- int64_t n;
-
- // decode
+ int32_t code = 0;
+ STsdb* pTsdb = pWriter->pTsdb;
SBlockData* pBlockData = &pWriter->bData;
- code = tDecmprBlockData(pData + sizeof(SSnapDataHdr) + sizeof(TABLEID), pHdr->size - sizeof(TABLEID), pBlockData,
- pWriter->aBuf);
- if (code) goto _err;
- // open file
- TSDBKEY keyFirst = {.version = pBlockData->aVersion[0], .ts = pBlockData->aTSKEY[0]};
- TSDBKEY keyLast = {.version = pBlockData->aVersion[pBlockData->nRow - 1],
- .ts = pBlockData->aTSKEY[pBlockData->nRow - 1]};
+ // Decode data
+ SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
+ code = tDecmprBlockData(pHdr->data, pHdr->size, pBlockData, pWriter->aBuf);
+ if (code) goto _err;
- int32_t fid = tsdbKeyFid(keyFirst.ts, pWriter->minutes, pWriter->precision);
- ASSERT(fid == tsdbKeyFid(keyLast.ts, pWriter->minutes, pWriter->precision));
- if (pWriter->pDataFWriter == NULL || pWriter->fid != fid) {
- // end last file data write if need
- code = tsdbSnapWriteDataEnd(pWriter);
- if (code) goto _err;
+ ASSERT(pBlockData->nRow > 0);
- pWriter->fid = fid;
+ // Loop to handle each row
+ for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
+ TSKEY ts = pBlockData->aTSKEY[iRow];
+ int32_t fid = tsdbKeyFid(ts, pWriter->minutes, pWriter->precision);
- // read
- SDFileSet* pSet = taosArraySearch(pWriter->fs.aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ);
- if (pSet) {
- code = tsdbDataFReaderOpen(&pWriter->pDataFReader, pTsdb, pSet);
- if (code) goto _err;
+ if (pWriter->dWriter.pWriter == NULL || pWriter->fid != fid) {
+ if (pWriter->dWriter.pWriter) {
+ ASSERT(fid > pWriter->fid);
- code = tsdbReadBlockIdx(pWriter->pDataFReader, pWriter->aBlockIdx);
- if (code) goto _err;
+ code = tsdbSnapWriteCloseFile(pWriter);
+ if (code) goto _err;
+ }
- code = tsdbReadBlockL(pWriter->pDataFReader, pWriter->aBlockL);
+ code = tsdbSnapWriteOpenFile(pWriter, fid);
if (code) goto _err;
- } else {
- ASSERT(pWriter->pDataFReader == NULL);
- taosArrayClear(pWriter->aBlockIdx);
- taosArrayClear(pWriter->aBlockL);
- }
- pWriter->iBlockIdx = 0;
- pWriter->pBlockIdx = NULL;
- tMapDataReset(&pWriter->mBlock);
- pWriter->iBlock = 0;
- pWriter->pBlockData = NULL;
- pWriter->iRow = 0;
- pWriter->iBlockL = 0;
- tBlockDataReset(&pWriter->bDataR);
- tBlockDataReset(&pWriter->lDataR);
-
- // write
- SHeadFile fHead;
- SDataFile fData;
- SLastFile fLast;
- SSmaFile fSma;
- SDFileSet wSet = {.pHeadF = &fHead, .pDataF = &fData, .pLastF = &fLast, .pSmaF = &fSma};
-
- if (pSet) {
- wSet.diskId = pSet->diskId;
- wSet.fid = fid;
- fHead = (SHeadFile){.commitID = pWriter->commitID, .offset = 0, .size = 0};
- fData = *pSet->pDataF;
- fLast = (SLastFile){.commitID = pWriter->commitID, .size = 0};
- fSma = *pSet->pSmaF;
- } else {
- wSet.diskId = (SDiskID){.level = 0, .id = 0};
- wSet.fid = fid;
- fHead = (SHeadFile){.commitID = pWriter->commitID, .offset = 0, .size = 0};
- fData = (SDataFile){.commitID = pWriter->commitID, .size = 0};
- fLast = (SLastFile){.commitID = pWriter->commitID, .size = 0, .offset = 0};
- fSma = (SSmaFile){.commitID = pWriter->commitID, .size = 0};
}
- code = tsdbDataFWriterOpen(&pWriter->pDataFWriter, pTsdb, &wSet);
+ code = tsdbSnapWriteRowData(pWriter, iRow);
if (code) goto _err;
-
- taosArrayClear(pWriter->aBlockIdxW);
- taosArrayClear(pWriter->aBlockLW);
- tMapDataReset(&pWriter->mBlockW);
- pWriter->pBlockIdxW = NULL;
- tBlockDataReset(&pWriter->bDataW);
}
- code = tsdbSnapWriteTableData(pWriter, id);
- if (code) goto _err;
-
- tsdbInfo("vgId:%d, vnode snapshot tsdb write data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d",
- TD_VID(pTsdb->pVnode), pTsdb->path, fid, id.suid, id.suid, pBlockData->nRow);
return code;
_err:
@@ -974,10 +1099,41 @@ _err:
return code;
}
+// SNAP_DATA_DEL
+static int32_t tsdbSnapMoveWriteDelData(STsdbSnapWriter* pWriter, TABLEID* pId) {
+ int32_t code = 0;
+
+ while (true) {
+ if (pWriter->iDelIdx >= taosArrayGetSize(pWriter->aDelIdxR)) break;
+
+ SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
+
+ if (tTABLEIDCmprFn(pDelIdx, pId) >= 0) break;
+
+ code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
+ if (code) goto _exit;
+
+ SDelIdx delIdx = *pDelIdx;
+ code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
+ if (code) goto _exit;
+
+ if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ pWriter->iDelIdx++;
+ }
+
+_exit:
+ return code;
+}
+
static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
STsdb* pTsdb = pWriter->pTsdb;
+ // Open del file if not opened yet
if (pWriter->pDelFWriter == NULL) {
SDelFile* pDelFile = pWriter->fs.pDelFile;
@@ -988,38 +1144,28 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32
code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR);
if (code) goto _err;
+ } else {
+ taosArrayClear(pWriter->aDelIdxR);
}
+ pWriter->iDelIdx = 0;
// writer
- SDelFile delFile = {.commitID = pWriter->commitID, .offset = 0, .size = 0};
+ SDelFile delFile = {.commitID = pWriter->commitID};
code = tsdbDelFWriterOpen(&pWriter->pDelFWriter, &delFile, pTsdb);
if (code) goto _err;
+ taosArrayClear(pWriter->aDelIdxW);
}
- // process the del data
- TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr));
-
- while (true) {
- if (pWriter->iDelIdx >= taosArrayGetSize(pWriter->aDelIdxR)) break;
- if (tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) >= 0) break;
-
- SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
-
- code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
- if (code) goto _err;
-
- SDelIdx delIdx = *pDelIdx;
- code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
- if (code) goto _err;
+ SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
+ TABLEID id = *(TABLEID*)pHdr->data;
- if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ ASSERT(pHdr->size + sizeof(SSnapDataHdr) == nData);
- pWriter->iDelIdx++;
- }
+ // Move write data < id
+ code = tsdbSnapMoveWriteDelData(pWriter, &id);
+ if (code) goto _err;
+ // Merge incoming data with current
if (pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR) &&
tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) == 0) {
SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
@@ -1053,7 +1199,6 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32
goto _err;
}
-_exit:
return code;
_err:
@@ -1066,23 +1211,14 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) {
int32_t code = 0;
STsdb* pTsdb = pWriter->pTsdb;
- if (pWriter->pDelFWriter == NULL) goto _exit;
-
- for (; pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR); pWriter->iDelIdx++) {
- SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
-
- code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
- if (code) goto _err;
+ if (pWriter->pDelFWriter == NULL) return code;
- SDelIdx delIdx = *pDelIdx;
- code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
- if (code) goto _err;
+ TABLEID id = {.suid = INT64_MAX, .uid = INT64_MAX};
+ code = tsdbSnapMoveWriteDelData(pWriter, &id);
+ if (code) goto _err;
- if (taosArrayPush(pWriter->aDelIdxR, &delIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
+ code = tsdbWriteDelIdx(pWriter->pDelFWriter, pWriter->aDelIdxW);
+ if (code) goto _err;
code = tsdbUpdateDelFileHdr(pWriter->pDelFWriter);
if (code) goto _err;
@@ -1098,7 +1234,6 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) {
if (code) goto _err;
}
-_exit:
tsdbInfo("vgId:%d, vnode snapshot tsdb write del for %s end", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
@@ -1108,6 +1243,7 @@ _err:
return code;
}
+// APIs
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter) {
int32_t code = 0;
STsdbSnapWriter* pWriter = NULL;
@@ -1133,39 +1269,38 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
pWriter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression;
pWriter->commitID = pTsdb->pVnode->state.commitID;
- // for data file
+ // SNAP_DATA_TSDB
code = tBlockDataCreate(&pWriter->bData);
-
if (code) goto _err;
- pWriter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
- if (pWriter->aBlockIdx == NULL) {
+
+ pWriter->fid = INT32_MIN;
+ pWriter->id = (TABLEID){0};
+ // Reader
+ pWriter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pWriter->dReader.aBlockIdx == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- code = tBlockDataCreate(&pWriter->bDataR);
+ code = tBlockDataCreate(&pWriter->dReader.bData);
if (code) goto _err;
- pWriter->aBlockL = taosArrayInit(0, sizeof(SBlockL));
- if (pWriter->aBlockL == NULL) {
+ // Writer
+ pWriter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pWriter->dWriter.aBlockIdx == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
-
- pWriter->aBlockIdxW = taosArrayInit(0, sizeof(SBlockIdx));
- if (pWriter->aBlockIdxW == NULL) {
+ pWriter->dWriter.aSttBlk = taosArrayInit(0, sizeof(SSttBlk));
+ if (pWriter->dWriter.aSttBlk == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- code = tBlockDataCreate(&pWriter->bDataW);
+ code = tBlockDataCreate(&pWriter->dWriter.bData);
+ if (code) goto _err;
+ code = tBlockDataCreate(&pWriter->dWriter.sData);
if (code) goto _err;
- pWriter->aBlockLW = taosArrayInit(0, sizeof(SBlockL));
- if (pWriter->aBlockLW == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- // for del file
+ // SNAP_DATA_DEL
pWriter->aDelIdxR = taosArrayInit(0, sizeof(SDelIdx));
if (pWriter->aDelIdxR == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -1186,6 +1321,7 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
tsdbInfo("vgId:%d, tsdb snapshot writer open for %s succeed", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
+
_err:
tsdbError("vgId:%d, tsdb snapshot writer open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
@@ -1196,14 +1332,17 @@ _err:
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
int32_t code = 0;
STsdbSnapWriter* pWriter = *ppWriter;
+ STsdb* pTsdb = pWriter->pTsdb;
if (rollback) {
ASSERT(0);
// code = tsdbFSRollback(pWriter->pTsdb->pFS);
// if (code) goto _err;
} else {
- code = tsdbSnapWriteDataEnd(pWriter);
- if (code) goto _err;
+ if (pWriter->dWriter.pWriter) {
+ code = tsdbSnapWriteCloseFile(pWriter);
+ if (code) goto _err;
+ }
code = tsdbSnapWriteDelEnd(pWriter);
if (code) goto _err;
@@ -1211,14 +1350,44 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
code = tsdbFSCommit1(pWriter->pTsdb, &pWriter->fs);
if (code) goto _err;
+ // lock
+ taosThreadRwlockWrlock(&pTsdb->rwLock);
+
code = tsdbFSCommit2(pWriter->pTsdb, &pWriter->fs);
- if (code) goto _err;
+ if (code) {
+ taosThreadRwlockUnlock(&pTsdb->rwLock);
+ goto _err;
+ }
+
+ // unlock
+ taosThreadRwlockUnlock(&pTsdb->rwLock);
}
+ // SNAP_DATA_DEL
+ taosArrayDestroy(pWriter->aDelIdxW);
+ taosArrayDestroy(pWriter->aDelData);
+ taosArrayDestroy(pWriter->aDelIdxR);
+
+ // SNAP_DATA_TSDB
+
+ // Writer
+ tBlockDataDestroy(&pWriter->dWriter.sData, 1);
+ tBlockDataDestroy(&pWriter->dWriter.bData, 1);
+ taosArrayDestroy(pWriter->dWriter.aSttBlk);
+ tMapDataClear(&pWriter->dWriter.mDataBlk);
+ taosArrayDestroy(pWriter->dWriter.aBlockIdx);
+
+ // Reader
+ tBlockDataDestroy(&pWriter->dReader.bData, 1);
+ tMapDataClear(&pWriter->dReader.mDataBlk);
+ taosArrayDestroy(pWriter->dReader.aBlockIdx);
+
+ tBlockDataDestroy(&pWriter->bData, 1);
+ tTSchemaDestroy(pWriter->skmTable.pTSchema);
+
for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t*); iBuf++) {
tFree(pWriter->aBuf[iBuf]);
}
-
tsdbInfo("vgId:%d, vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
taosMemoryFree(pWriter);
*ppWriter = NULL;
@@ -1243,8 +1412,8 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
goto _exit;
} else {
- if (pWriter->pDataFWriter) {
- code = tsdbSnapWriteDataEnd(pWriter);
+ if (pWriter->dWriter.pWriter) {
+ code = tsdbSnapWriteCloseFile(pWriter);
if (code) goto _err;
}
}
@@ -1257,7 +1426,6 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
_exit:
tsdbDebug("vgId:%d, tsdb snapshot write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
-
return code;
_err:
diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c
index 6db9d5e6f40c5d35e52d90dd86b28f4cb7a94676..caeca45e01d0ec2def492d65f4ce9c6bd112e322 100644
--- a/source/dnode/vnode/src/tsdb/tsdbUtil.c
+++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c
@@ -51,6 +51,22 @@ _exit:
return code;
}
+int32_t tMapDataCopy(SMapData *pFrom, SMapData *pTo) {
+ int32_t code = 0;
+
+ pTo->nItem = pFrom->nItem;
+ pTo->nData = pFrom->nData;
+ code = tRealloc((uint8_t **)&pTo->aOffset, sizeof(int32_t) * pFrom->nItem);
+ if (code) goto _exit;
+ code = tRealloc(&pTo->pData, pFrom->nData);
+ if (code) goto _exit;
+ memcpy(pTo->aOffset, pFrom->aOffset, sizeof(int32_t) * pFrom->nItem);
+ memcpy(pTo->pData, pFrom->pData, pFrom->nData);
+
+_exit:
+ return code;
+}
+
int32_t tMapDataSearch(SMapData *pMapData, void *pSearchItem, int32_t (*tGetItemFn)(uint8_t *, void *),
int32_t (*tItemCmprFn)(const void *, const void *), void *pItem) {
int32_t code = 0;
@@ -198,7 +214,7 @@ int32_t tCmprBlockIdx(void const *lhs, void const *rhs) {
int32_t tCmprBlockL(void const *lhs, void const *rhs) {
SBlockIdx *lBlockIdx = (SBlockIdx *)lhs;
- SBlockL *rBlockL = (SBlockL *)rhs;
+ SSttBlk *rBlockL = (SSttBlk *)rhs;
if (lBlockIdx->suid < rBlockL->suid) {
return -1;
@@ -215,69 +231,69 @@ int32_t tCmprBlockL(void const *lhs, void const *rhs) {
return 0;
}
-// SBlock ======================================================
-void tBlockReset(SBlock *pBlock) {
- *pBlock = (SBlock){.minKey = TSDBKEY_MAX, .maxKey = TSDBKEY_MIN, .minVer = VERSION_MAX, .maxVer = VERSION_MIN};
+// SDataBlk ======================================================
+void tDataBlkReset(SDataBlk *pDataBlk) {
+ *pDataBlk = (SDataBlk){.minKey = TSDBKEY_MAX, .maxKey = TSDBKEY_MIN, .minVer = VERSION_MAX, .maxVer = VERSION_MIN};
}
-int32_t tPutBlock(uint8_t *p, void *ph) {
- int32_t n = 0;
- SBlock *pBlock = (SBlock *)ph;
-
- n += tPutI64v(p ? p + n : p, pBlock->minKey.version);
- n += tPutI64v(p ? p + n : p, pBlock->minKey.ts);
- n += tPutI64v(p ? p + n : p, pBlock->maxKey.version);
- n += tPutI64v(p ? p + n : p, pBlock->maxKey.ts);
- n += tPutI64v(p ? p + n : p, pBlock->minVer);
- n += tPutI64v(p ? p + n : p, pBlock->maxVer);
- n += tPutI32v(p ? p + n : p, pBlock->nRow);
- n += tPutI8(p ? p + n : p, pBlock->hasDup);
- n += tPutI8(p ? p + n : p, pBlock->nSubBlock);
- for (int8_t iSubBlock = 0; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- n += tPutI64v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].offset);
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szBlock);
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szKey);
- }
- if (pBlock->nSubBlock == 1 && !pBlock->hasDup) {
- n += tPutI64v(p ? p + n : p, pBlock->smaInfo.offset);
- n += tPutI32v(p ? p + n : p, pBlock->smaInfo.size);
+int32_t tPutDataBlk(uint8_t *p, void *ph) {
+ int32_t n = 0;
+ SDataBlk *pDataBlk = (SDataBlk *)ph;
+
+ n += tPutI64v(p ? p + n : p, pDataBlk->minKey.version);
+ n += tPutI64v(p ? p + n : p, pDataBlk->minKey.ts);
+ n += tPutI64v(p ? p + n : p, pDataBlk->maxKey.version);
+ n += tPutI64v(p ? p + n : p, pDataBlk->maxKey.ts);
+ n += tPutI64v(p ? p + n : p, pDataBlk->minVer);
+ n += tPutI64v(p ? p + n : p, pDataBlk->maxVer);
+ n += tPutI32v(p ? p + n : p, pDataBlk->nRow);
+ n += tPutI8(p ? p + n : p, pDataBlk->hasDup);
+ n += tPutI8(p ? p + n : p, pDataBlk->nSubBlock);
+ for (int8_t iSubBlock = 0; iSubBlock < pDataBlk->nSubBlock; iSubBlock++) {
+ n += tPutI64v(p ? p + n : p, pDataBlk->aSubBlock[iSubBlock].offset);
+ n += tPutI32v(p ? p + n : p, pDataBlk->aSubBlock[iSubBlock].szBlock);
+ n += tPutI32v(p ? p + n : p, pDataBlk->aSubBlock[iSubBlock].szKey);
+ }
+ if (pDataBlk->nSubBlock == 1 && !pDataBlk->hasDup) {
+ n += tPutI64v(p ? p + n : p, pDataBlk->smaInfo.offset);
+ n += tPutI32v(p ? p + n : p, pDataBlk->smaInfo.size);
}
return n;
}
-int32_t tGetBlock(uint8_t *p, void *ph) {
- int32_t n = 0;
- SBlock *pBlock = (SBlock *)ph;
-
- n += tGetI64v(p + n, &pBlock->minKey.version);
- n += tGetI64v(p + n, &pBlock->minKey.ts);
- n += tGetI64v(p + n, &pBlock->maxKey.version);
- n += tGetI64v(p + n, &pBlock->maxKey.ts);
- n += tGetI64v(p + n, &pBlock->minVer);
- n += tGetI64v(p + n, &pBlock->maxVer);
- n += tGetI32v(p + n, &pBlock->nRow);
- n += tGetI8(p + n, &pBlock->hasDup);
- n += tGetI8(p + n, &pBlock->nSubBlock);
- for (int8_t iSubBlock = 0; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- n += tGetI64v(p + n, &pBlock->aSubBlock[iSubBlock].offset);
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szBlock);
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szKey);
- }
- if (pBlock->nSubBlock == 1 && !pBlock->hasDup) {
- n += tGetI64v(p + n, &pBlock->smaInfo.offset);
- n += tGetI32v(p + n, &pBlock->smaInfo.size);
+int32_t tGetDataBlk(uint8_t *p, void *ph) {
+ int32_t n = 0;
+ SDataBlk *pDataBlk = (SDataBlk *)ph;
+
+ n += tGetI64v(p + n, &pDataBlk->minKey.version);
+ n += tGetI64v(p + n, &pDataBlk->minKey.ts);
+ n += tGetI64v(p + n, &pDataBlk->maxKey.version);
+ n += tGetI64v(p + n, &pDataBlk->maxKey.ts);
+ n += tGetI64v(p + n, &pDataBlk->minVer);
+ n += tGetI64v(p + n, &pDataBlk->maxVer);
+ n += tGetI32v(p + n, &pDataBlk->nRow);
+ n += tGetI8(p + n, &pDataBlk->hasDup);
+ n += tGetI8(p + n, &pDataBlk->nSubBlock);
+ for (int8_t iSubBlock = 0; iSubBlock < pDataBlk->nSubBlock; iSubBlock++) {
+ n += tGetI64v(p + n, &pDataBlk->aSubBlock[iSubBlock].offset);
+ n += tGetI32v(p + n, &pDataBlk->aSubBlock[iSubBlock].szBlock);
+ n += tGetI32v(p + n, &pDataBlk->aSubBlock[iSubBlock].szKey);
+ }
+ if (pDataBlk->nSubBlock == 1 && !pDataBlk->hasDup) {
+ n += tGetI64v(p + n, &pDataBlk->smaInfo.offset);
+ n += tGetI32v(p + n, &pDataBlk->smaInfo.size);
} else {
- pBlock->smaInfo.offset = 0;
- pBlock->smaInfo.size = 0;
+ pDataBlk->smaInfo.offset = 0;
+ pDataBlk->smaInfo.size = 0;
}
return n;
}
-int32_t tBlockCmprFn(const void *p1, const void *p2) {
- SBlock *pBlock1 = (SBlock *)p1;
- SBlock *pBlock2 = (SBlock *)p2;
+int32_t tDataBlkCmprFn(const void *p1, const void *p2) {
+ SDataBlk *pBlock1 = (SDataBlk *)p1;
+ SDataBlk *pBlock2 = (SDataBlk *)p2;
if (tsdbKeyCmprFn(&pBlock1->maxKey, &pBlock2->minKey) < 0) {
return -1;
@@ -288,48 +304,48 @@ int32_t tBlockCmprFn(const void *p1, const void *p2) {
return 0;
}
-bool tBlockHasSma(SBlock *pBlock) {
- if (pBlock->nSubBlock > 1) return false;
- if (pBlock->hasDup) return false;
+bool tDataBlkHasSma(SDataBlk *pDataBlk) {
+ if (pDataBlk->nSubBlock > 1) return false;
+ if (pDataBlk->hasDup) return false;
- return pBlock->smaInfo.size > 0;
+ return pDataBlk->smaInfo.size > 0;
}
-// SBlockL ======================================================
-int32_t tPutBlockL(uint8_t *p, void *ph) {
+// SSttBlk ======================================================
+int32_t tPutSttBlk(uint8_t *p, void *ph) {
int32_t n = 0;
- SBlockL *pBlockL = (SBlockL *)ph;
-
- n += tPutI64(p ? p + n : p, pBlockL->suid);
- n += tPutI64(p ? p + n : p, pBlockL->minUid);
- n += tPutI64(p ? p + n : p, pBlockL->maxUid);
- n += tPutI64v(p ? p + n : p, pBlockL->minKey);
- n += tPutI64v(p ? p + n : p, pBlockL->maxKey);
- n += tPutI64v(p ? p + n : p, pBlockL->minVer);
- n += tPutI64v(p ? p + n : p, pBlockL->maxVer);
- n += tPutI32v(p ? p + n : p, pBlockL->nRow);
- n += tPutI64v(p ? p + n : p, pBlockL->bInfo.offset);
- n += tPutI32v(p ? p + n : p, pBlockL->bInfo.szBlock);
- n += tPutI32v(p ? p + n : p, pBlockL->bInfo.szKey);
+ SSttBlk *pSttBlk = (SSttBlk *)ph;
+
+ n += tPutI64(p ? p + n : p, pSttBlk->suid);
+ n += tPutI64(p ? p + n : p, pSttBlk->minUid);
+ n += tPutI64(p ? p + n : p, pSttBlk->maxUid);
+ n += tPutI64v(p ? p + n : p, pSttBlk->minKey);
+ n += tPutI64v(p ? p + n : p, pSttBlk->maxKey);
+ n += tPutI64v(p ? p + n : p, pSttBlk->minVer);
+ n += tPutI64v(p ? p + n : p, pSttBlk->maxVer);
+ n += tPutI32v(p ? p + n : p, pSttBlk->nRow);
+ n += tPutI64v(p ? p + n : p, pSttBlk->bInfo.offset);
+ n += tPutI32v(p ? p + n : p, pSttBlk->bInfo.szBlock);
+ n += tPutI32v(p ? p + n : p, pSttBlk->bInfo.szKey);
return n;
}
-int32_t tGetBlockL(uint8_t *p, void *ph) {
+int32_t tGetSttBlk(uint8_t *p, void *ph) {
int32_t n = 0;
- SBlockL *pBlockL = (SBlockL *)ph;
-
- n += tGetI64(p + n, &pBlockL->suid);
- n += tGetI64(p + n, &pBlockL->minUid);
- n += tGetI64(p + n, &pBlockL->maxUid);
- n += tGetI64v(p + n, &pBlockL->minKey);
- n += tGetI64v(p + n, &pBlockL->maxKey);
- n += tGetI64v(p + n, &pBlockL->minVer);
- n += tGetI64v(p + n, &pBlockL->maxVer);
- n += tGetI32v(p + n, &pBlockL->nRow);
- n += tGetI64v(p + n, &pBlockL->bInfo.offset);
- n += tGetI32v(p + n, &pBlockL->bInfo.szBlock);
- n += tGetI32v(p + n, &pBlockL->bInfo.szKey);
+ SSttBlk *pSttBlk = (SSttBlk *)ph;
+
+ n += tGetI64(p + n, &pSttBlk->suid);
+ n += tGetI64(p + n, &pSttBlk->minUid);
+ n += tGetI64(p + n, &pSttBlk->maxUid);
+ n += tGetI64v(p + n, &pSttBlk->minKey);
+ n += tGetI64v(p + n, &pSttBlk->maxKey);
+ n += tGetI64v(p + n, &pSttBlk->minVer);
+ n += tGetI64v(p + n, &pSttBlk->maxVer);
+ n += tGetI32v(p + n, &pSttBlk->nRow);
+ n += tGetI64v(p + n, &pSttBlk->bInfo.offset);
+ n += tGetI32v(p + n, &pSttBlk->bInfo.szBlock);
+ n += tGetI32v(p + n, &pSttBlk->bInfo.szKey);
return n;
}
@@ -1532,7 +1548,7 @@ int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut,
if (code) goto _exit;
blockCol.offset = aBufN[0];
- aBufN[0] = aBufN[0] + blockCol.szBitmap + blockCol.szOffset + blockCol.szValue + sizeof(TSCKSUM);
+ aBufN[0] = aBufN[0] + blockCol.szBitmap + blockCol.szOffset + blockCol.szValue;
}
code = tRealloc(&aBuf[1], hdr.szBlkCol + tPutBlockCol(NULL, &blockCol));
@@ -1540,15 +1556,8 @@ int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut,
hdr.szBlkCol += tPutBlockCol(aBuf[1] + hdr.szBlkCol, &blockCol);
}
- aBufN[1] = 0;
- if (hdr.szBlkCol > 0) {
- aBufN[1] = hdr.szBlkCol + sizeof(TSCKSUM);
-
- code = tRealloc(&aBuf[1], aBufN[1]);
- if (code) goto _exit;
-
- taosCalcChecksumAppend(0, aBuf[1], aBufN[1]);
- }
+ // SBlockCol
+ aBufN[1] = hdr.szBlkCol;
// uid + version + tskey
aBufN[2] = 0;
@@ -1569,16 +1578,11 @@ int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut,
if (code) goto _exit;
aBufN[2] += hdr.szKey;
- aBufN[2] += sizeof(TSCKSUM);
- code = tRealloc(&aBuf[2], aBufN[2]);
- if (code) goto _exit;
-
// hdr
aBufN[3] = tPutDiskDataHdr(NULL, &hdr);
code = tRealloc(&aBuf[3], aBufN[3]);
if (code) goto _exit;
tPutDiskDataHdr(aBuf[3], &hdr);
- taosCalcChecksumAppend(taosCalcChecksum(0, aBuf[3], aBufN[3]), aBuf[2], aBufN[2]);
// aggragate
if (ppOut) {
@@ -1603,17 +1607,13 @@ _exit:
int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]) {
int32_t code = 0;
- tBlockDataClear(pBlockData);
+ tBlockDataReset(pBlockData);
int32_t n = 0;
SDiskDataHdr hdr = {0};
// SDiskDataHdr
n += tGetDiskDataHdr(pIn + n, &hdr);
- if (!taosCheckChecksumWhole(pIn, n + hdr.szUid + hdr.szVer + hdr.szKey + sizeof(TSCKSUM))) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _exit;
- }
ASSERT(hdr.delimiter == TSDB_FILE_DLMT);
pBlockData->suid = hdr.suid;
@@ -1641,7 +1641,7 @@ int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uin
code = tsdbDecmprData(pIn + n, hdr.szKey, TSDB_DATA_TYPE_TIMESTAMP, hdr.cmprAlg, (uint8_t **)&pBlockData->aTSKEY,
sizeof(TSKEY) * hdr.nRow, &aBuf[0]);
if (code) goto _exit;
- n = n + hdr.szKey + sizeof(TSCKSUM);
+ n += hdr.szKey;
// loop to decode each column data
if (hdr.szBlkCol == 0) goto _exit;
@@ -1663,8 +1663,8 @@ int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uin
if (code) goto _exit;
}
} else {
- code = tsdbDecmprColData(pIn + n + hdr.szBlkCol + sizeof(TSCKSUM) + blockCol.offset, &blockCol, hdr.cmprAlg,
- hdr.nRow, pColData, &aBuf[0]);
+ code = tsdbDecmprColData(pIn + n + hdr.szBlkCol + blockCol.offset, &blockCol, hdr.cmprAlg, hdr.nRow, pColData,
+ &aBuf[0]);
if (code) goto _exit;
}
}
@@ -2039,19 +2039,13 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
size += pBlockCol->szOffset;
// value
- if (pColData->flag != (HAS_NULL | HAS_NONE)) {
+ if ((pColData->flag != (HAS_NULL | HAS_NONE)) && pColData->nData) {
code = tsdbCmprData((uint8_t *)pColData->pData, pColData->nData, pColData->type, cmprAlg, ppOut, nOut + size,
&pBlockCol->szValue, ppBuf);
if (code) goto _exit;
}
size += pBlockCol->szValue;
- // checksum
- size += sizeof(TSCKSUM);
- code = tRealloc(ppOut, nOut + size);
- if (code) goto _exit;
- taosCalcChecksumAppend(0, *ppOut + nOut, size);
-
_exit:
return code;
}
@@ -2060,12 +2054,6 @@ int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, in
uint8_t **ppBuf) {
int32_t code = 0;
- int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM);
- if (!taosCheckChecksumWhole(pIn, size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _exit;
- }
-
ASSERT(pColData->cid == pBlockCol->cid);
ASSERT(pColData->type == pBlockCol->type);
pColData->smaOn = pBlockCol->smaOn;
@@ -2137,37 +2125,3 @@ int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, in
_exit:
return code;
}
-
-int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck) {
- int32_t code = 0;
-
- // alloc
- code = tRealloc(ppOut, size);
- if (code) goto _exit;
-
- // seek
- int64_t n = taosLSeekFile(pFD, offset, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _exit;
- }
-
- // read
- n = taosReadFile(pFD, *ppOut, size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _exit;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _exit;
- }
-
- // check
- if (toCheck && !taosCheckChecksumWhole(*ppOut, size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _exit;
- }
-
-_exit:
- return code;
-}
diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c
index 383652531e211504983444d9d783ddf9189f5161..0a9fbf92a4bf62326aa9755b827b83d0d510d2f7 100644
--- a/source/dnode/vnode/src/tsdb/tsdbWrite.c
+++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c
@@ -39,7 +39,7 @@ int tsdbInsertData(STsdb *pTsdb, int64_t version, SSubmitReq *pMsg, SSubmitRsp *
SSubmitBlkRsp r = {0};
tGetSubmitMsgNext(&msgIter, &pBlock);
if (pBlock == NULL) break;
- if (tsdbInsertTableData(pTsdb, version, &msgIter, pBlock, &r) < 0) {
+ if ((terrno = tsdbInsertTableData(pTsdb, version, &msgIter, pBlock, &r)) < 0) {
return -1;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c
index 4418ce20e88b8c461e55fbe0d7b4a8348e032379..9ed72eeecdac435685b1b37d5faa60c9d91bee4c 100644
--- a/source/dnode/vnode/src/vnd/vnodeCfg.c
+++ b/source/dnode/vnode/src/vnd/vnodeCfg.c
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+#include "tutil.h"
#include "vnd.h"
const SVnodeCfg vnodeCfgDefault = {.vgId = -1,
@@ -47,7 +48,9 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1,
},
.hashBegin = 0,
.hashEnd = 0,
- .hashMethod = 0};
+ .hashMethod = 0,
+ .sttTrigger = TSDB_DEFAULT_STT_FILE,
+ .tsdbPageSize = TSDB_DEFAULT_PAGE_SIZE};
int vnodeCheckCfg(const SVnodeCfg *pCfg) {
// TODO
@@ -106,9 +109,12 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "wal.retentionSize", pCfg->walCfg.retentionSize) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "wal.segSize", pCfg->walCfg.segSize) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "wal.level", pCfg->walCfg.level) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "sstTrigger", pCfg->sttTrigger) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashBegin", pCfg->hashBegin) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "hashPrefix", pCfg->hashPrefix) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "hashSuffix", pCfg->hashSuffix) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;
@@ -117,6 +123,7 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "vndStats.ctables", pCfg->vndStats.numOfCTables) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "vndStats.ntables", pCfg->vndStats.numOfNTables) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "vndStats.timeseries", pCfg->vndStats.numOfTimeSeries) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "vndStats.ntimeseries", pCfg->vndStats.numOfNTimeSeries) < 0) return -1;
SJson *pNodeInfoArr = tjsonCreateArray();
tjsonAddItemToObject(pJson, "syncCfg.nodeInfo", pNodeInfoArr);
@@ -127,6 +134,9 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
tjsonAddItemToArray(pNodeInfoArr, pNodeInfo);
}
+ // add tsdb page size config
+ if (tjsonAddIntegerToObject(pJson, "tsdbPageSize", pCfg->tsdbPageSize) < 0) return -1;
+
return 0;
}
@@ -204,12 +214,18 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "wal.level", pCfg->walCfg.level, code);
if (code < 0) return -1;
+ tjsonGetNumberValue(pJson, "sstTrigger", pCfg->sttTrigger, code);
+ if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashBegin", pCfg->hashBegin, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod, code);
if (code < 0) return -1;
+ tjsonGetNumberValue(pJson, "hashPrefix", pCfg->hashPrefix, code);
+ if (code < 0) return -1;
+ tjsonGetNumberValue(pJson, "hashSuffix", pCfg->hashSuffix, code);
+ if (code < 0) return -1;
tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum, code);
if (code < 0) return -1;
@@ -224,6 +240,8 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "vndStats.timeseries", pCfg->vndStats.numOfTimeSeries, code);
if (code < 0) return -1;
+ tjsonGetNumberValue(pJson, "vndStats.ntimeseries", pCfg->vndStats.numOfNTimeSeries, code);
+ if (code < 0) return -1;
SJson *pNodeInfoArr = tjsonGetObjectItem(pJson, "syncCfg.nodeInfo");
int arraySize = tjsonGetArraySize(pNodeInfoArr);
@@ -236,6 +254,8 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
tjsonGetStringValue(pNodeInfo, "nodeFqdn", (pCfg->syncCfg.nodeInfo)[i].nodeFqdn);
}
+ tjsonGetNumberValue(pJson, "tsdbPageSize", pCfg->tsdbPageSize, code);
+
return 0;
}
@@ -244,7 +264,8 @@ int vnodeValidateTableHash(SVnode *pVnode, char *tableFName) {
switch (pVnode->config.hashMethod) {
default:
- hashValue = MurmurHash3_32(tableFName, strlen(tableFName));
+ hashValue = taosGetTbHashVal(tableFName, strlen(tableFName), pVnode->config.hashMethod, pVnode->config.hashPrefix,
+ pVnode->config.hashSuffix);
break;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index dcfbd33b903c9fcd55e216bd1b24c73f2845af7b..4ccfea40510a6148ceec57893629c693d9d7a1cd 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -60,6 +60,8 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
snprintf(dir, TSDB_FILENAME_LEN, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, path);
+ info.config = vnodeCfgDefault;
+
// load vnode info
ret = vnodeLoadInfo(dir, &info);
if (ret < 0) {
@@ -87,7 +89,6 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
pVnode->msgCb = msgCb;
taosThreadMutexInit(&pVnode->lock, NULL);
pVnode->blocked = false;
- pVnode->inClose = false;
tsem_init(&pVnode->syncSem, 0, 0);
tsem_init(&(pVnode->canCommit), 0, 1);
@@ -182,8 +183,6 @@ _err:
void vnodePreClose(SVnode *pVnode) {
if (pVnode) {
syncLeaderTransfer(pVnode->sync);
- pVnode->inClose = true;
- smaPreClose(pVnode->pSma);
}
}
diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c
index 8d799e919d1e4c06cfec6438d7a4a34fc336993d..8cfe1d8adfddb675856238501977fb9d9d8aee6e 100644
--- a/source/dnode/vnode/src/vnd/vnodeQuery.c
+++ b/source/dnode/vnode/src/vnd/vnodeQuery.c
@@ -368,6 +368,7 @@ _exit:
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
pLoad->vgId = TD_VID(pVnode);
pLoad->syncState = syncGetMyRole(pVnode->sync);
+ pLoad->cacheUsage = tsdbCacheGetUsage(pVnode);
pLoad->numOfTables = metaGetTbNum(pVnode->pMeta);
pLoad->numOfTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta);
pLoad->totalStorage = (int64_t)3 * 1073741824;
@@ -424,8 +425,8 @@ int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list) {
return TSDB_CODE_SUCCESS;
}
-int32_t vnodeGetStbIdList(SVnode* pVnode, int64_t suid, SArray* list) {
- SMStbCursor* pCur = metaOpenStbCursor(pVnode->pMeta, suid);
+int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list) {
+ SMStbCursor *pCur = metaOpenStbCursor(pVnode->pMeta, suid);
if (!pCur) {
return TSDB_CODE_FAILED;
}
@@ -467,9 +468,13 @@ static int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
STSchema *pTSchema = metaGetTbTSchema(pVnode->pMeta, suid, -1);
// metaGetTbTSchemaEx(pVnode->pMeta, suid, suid, -1, &pTSchema);
- *num = pTSchema->numOfCols;
+ if (pTSchema) {
+ *num = pTSchema->numOfCols;
- taosMemoryFree(pTSchema);
+ taosMemoryFree(pTSchema);
+ } else {
+ *num = 2;
+ }
return TSDB_CODE_SUCCESS;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c
index 5a81f9191920ffac0ae055e140e7e59448dfd406..08c3a34699b6f9f83b366e42f77e42a6094f09ed 100644
--- a/source/dnode/vnode/src/vnd/vnodeSnapshot.c
+++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c
@@ -39,7 +39,7 @@ struct SVSnapReader {
SStreamStateReader *pStreamStateReader;
// rsma
int8_t rsmaDone;
- SRsmaSnapReader *pRsmaReader;
+ SRSmaSnapReader *pRsmaReader;
};
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
@@ -241,7 +241,7 @@ struct SVSnapWriter {
SStreamTaskWriter *pStreamTaskWriter;
SStreamStateWriter *pStreamStateWriter;
// rsma
- SRsmaSnapWriter *pRsmaSnapWriter;
+ SRSmaSnapWriter *pRsmaSnapWriter;
};
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) {
@@ -354,7 +354,8 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
if (code) goto _err;
} break;
- case SNAP_DATA_TSDB: {
+ case SNAP_DATA_TSDB:
+ case SNAP_DATA_DEL: {
// tsdb
if (pWriter->pTsdbSnapWriter == NULL) {
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 7a8d168f4f1a2cb4b1379e9d5794ff58e83841bf..51d83d8eedf6845e67a5ee81ff96debe52275bd1 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -301,8 +301,6 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return qWorkerProcessQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
case TDMT_SCH_QUERY_CONTINUE:
return qWorkerProcessCQueryMsg(&handle, pVnode->pQuery, pMsg, 0);
- case TDMT_VND_EXEC_RSMA:
- return smaProcessExec(pVnode->pSma, pMsg);
default:
vError("unknown msg type:%d in query queue", pMsg->msgType);
return TSDB_CODE_VND_APP_ERROR;
@@ -370,6 +368,10 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
}
void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) {
+ if (NULL == pMetaRsp) {
+ return;
+ }
+
strcpy(pMetaRsp->dbFName, pVnode->config.dbname);
pMetaRsp->dbId = pVnode->config.dbId;
pMetaRsp->vgId = TD_VID(pVnode);
@@ -380,14 +382,14 @@ static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq,
int32_t code = 0;
SVTrimDbReq trimReq = {0};
- vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp);
-
// decode
if (tDeserializeSVTrimDbReq(pReq, len, &trimReq) != 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
+ vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp);
+
// process
code = tsdbDoRetention(pVnode->pTsdb, trimReq.timestamp);
if (code) goto _exit;
@@ -494,6 +496,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
// loop to create table
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
+ memset(&cRsp, 0, sizeof(cRsp));
if ((terrno = grantCheck(TSDB_GRANT_TIMESERIES)) < 0) {
rcode = -1;
@@ -514,7 +517,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
}
// do create table
- if (metaCreateTable(pVnode->pMeta, version, pCreateReq) < 0) {
+ if (metaCreateTable(pVnode->pMeta, version, pCreateReq, &cRsp.pMeta) < 0) {
if (pCreateReq->flags & TD_CREATE_IF_NOT_EXISTS && terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
cRsp.code = TSDB_CODE_SUCCESS;
} else {
@@ -524,13 +527,14 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
cRsp.code = TSDB_CODE_SUCCESS;
tdFetchTbUidList(pVnode->pSma, &pStore, pCreateReq->ctb.suid, pCreateReq->uid);
taosArrayPush(tbUids, &pCreateReq->uid);
+ vnodeUpdateMetaRsp(pVnode, cRsp.pMeta);
}
taosArrayPush(rsp.pArray, &cRsp);
}
tqUpdateTbUidList(pVnode->pTq, tbUids, true);
- if (tdUpdateTbUidList(pVnode->pSma, pStore) < 0) {
+ if (tdUpdateTbUidList(pVnode->pSma, pStore, true) < 0) {
goto _exit;
}
tdUidStoreFree(pStore);
@@ -552,7 +556,7 @@ _exit:
pCreateReq = req.pReqs + iReq;
taosArrayDestroy(pCreateReq->ctb.tagName);
}
- taosArrayDestroy(rsp.pArray);
+ taosArrayDestroyEx(rsp.pArray, tFreeSVCreateTbRsp);
taosArrayDestroy(tbUids);
tDecoderClear(&decoder);
tEncoderClear(&encoder);
@@ -688,6 +692,7 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq
SEncoder encoder = {0};
int32_t ret;
SArray *tbUids = NULL;
+ STbUidStore *pStore = NULL;
pRsp->msgType = TDMT_VND_DROP_TABLE_RSP;
pRsp->pCont = NULL;
@@ -711,9 +716,10 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
SVDropTbReq *pDropTbReq = req.pReqs + iReq;
SVDropTbRsp dropTbRsp = {0};
+ tb_uid_t tbUid = 0;
/* code */
- ret = metaDropTable(pVnode->pMeta, version, pDropTbReq, tbUids);
+ ret = metaDropTable(pVnode->pMeta, version, pDropTbReq, tbUids, &tbUid);
if (ret < 0) {
if (pDropTbReq->igNotExists && terrno == TSDB_CODE_VND_TABLE_NOT_EXIST) {
dropTbRsp.code = TSDB_CODE_SUCCESS;
@@ -722,15 +728,18 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq
}
} else {
dropTbRsp.code = TSDB_CODE_SUCCESS;
+ if (tbUid > 0) tdFetchTbUidList(pVnode->pSma, &pStore, pDropTbReq->suid, tbUid);
}
taosArrayPush(rsp.pArray, &dropTbRsp);
}
tqUpdateTbUidList(pVnode->pTq, tbUids, false);
+ tdUpdateTbUidList(pVnode->pSma, pStore, false);
_exit:
taosArrayDestroy(tbUids);
+ tdUidStoreFree(pStore);
tDecoderClear(&decoder);
tEncodeSize(tEncodeSVDropTbBatchRsp, &rsp, pRsp->contLen, ret);
pRsp->pCont = rpcMallocCont(pRsp->contLen);
@@ -864,7 +873,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
goto _exit;
}
- if (metaCreateTable(pVnode->pMeta, version, &createTbReq) < 0) {
+ if (metaCreateTable(pVnode->pMeta, version, &createTbReq, &submitBlkRsp.pMeta) < 0) {
if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
submitBlkRsp.code = terrno;
pRsp->code = terrno;
@@ -872,6 +881,10 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
taosArrayDestroy(createTbReq.ctb.tagName);
goto _exit;
}
+ } else {
+ if (NULL != submitBlkRsp.pMeta) {
+ vnodeUpdateMetaRsp(pVnode, submitBlkRsp.pMeta);
+ }
}
taosArrayPush(newTbUids, &createTbReq.uid);
@@ -915,11 +928,7 @@ _exit:
tEncodeSSubmitRsp(&encoder, &submitRsp);
tEncoderClear(&encoder);
- for (int32_t i = 0; i < taosArrayGetSize(submitRsp.pArray); i++) {
- taosMemoryFree(((SSubmitBlkRsp *)taosArrayGet(submitRsp.pArray, i))[0].tblFName);
- }
-
- taosArrayDestroy(submitRsp.pArray);
+ taosArrayDestroyEx(submitRsp.pArray, tFreeSSubmitBlkRsp);
// TODO: the partial success scenario and the error case
// => If partial success, extract the success submitted rows and reconstruct a new submit msg, and push to level
@@ -1103,6 +1112,7 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq
tDecoderInit(pCoder, pReq, len);
tDecodeDeleteRes(pCoder, pRes);
+ ASSERT(taosArrayGetSize(pRes->uidList) == 0 || (pRes->skey != 0 && pRes->ekey != 0));
for (int32_t iUid = 0; iUid < taosArrayGetSize(pRes->uidList); iUid++) {
code = tsdbDeleteTableData(pVnode->pTsdb, version, pRes->suid, *(uint64_t *)taosArrayGet(pRes->uidList, iUid),
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index 777dcd0592ae69de003d5df0d1d9d2592302d195..9b62581051daac9c232409c0cb30d379e3a4d596 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -188,7 +188,7 @@ typedef struct SCtgTbCache {
typedef struct SCtgVgCache {
SRWLatch vgLock;
- SDBVgInfo *vgInfo;
+ SDBVgInfo *vgInfo;
} SCtgVgCache;
typedef struct SCtgDBCache {
@@ -224,7 +224,7 @@ typedef struct SCtgUserAuth {
} SCtgUserAuth;
typedef struct SCatalog {
- uint64_t clusterId;
+ uint64_t clusterId;
SHashObj *userCache; //key:user, value:SCtgUserAuth
SHashObj *dbCache; //key:dbname, value:SCtgDBCache
SCtgRentMgmt dbRent;
@@ -253,9 +253,9 @@ typedef struct SCtgJob {
int32_t jobResCode;
int32_t taskIdx;
SRWLatch taskLock;
-
+
uint64_t queryId;
- SCatalog* pCtg;
+ SCatalog* pCtg;
SRequestConnInfo conn;
void* userParam;
catalogCallback userFp;
@@ -279,7 +279,7 @@ typedef struct SCtgMsgCtx {
void* lastOut;
void* out;
char* target;
- SHashObj* pBatchs;
+ SHashObj* pBatchs;
} SCtgMsgCtx;
@@ -364,7 +364,7 @@ typedef struct SCtgCacheStat {
uint64_t numOfMetaHit;
uint64_t numOfMetaMiss;
uint64_t numOfIndexHit;
- uint64_t numOfIndexMiss;
+ uint64_t numOfIndexMiss;
uint64_t numOfUserHit;
uint64_t numOfUserMiss;
uint64_t numOfClear;
@@ -451,7 +451,7 @@ typedef struct SCtgCacheOperation {
int32_t opId;
void *data;
bool syncOp;
- tsem_t rspSem;
+ tsem_t rspSem;
bool stopQueue;
bool unLocked;
} SCtgCacheOperation;
@@ -466,7 +466,7 @@ typedef struct SCtgQueue {
bool stopQueue;
SCtgQNode *head;
SCtgQNode *tail;
- tsem_t reqSem;
+ tsem_t reqSem;
uint64_t qRemainNum;
} SCtgQueue;
@@ -475,7 +475,7 @@ typedef struct SCatalogMgmt {
int32_t jobPool;
SRWLatch lock;
SCtgQueue queue;
- TdThread updateThread;
+ TdThread updateThread;
SHashObj *pCluster; //key: clusterId, value: SCatalog*
SCatalogStat stat;
SCatalogCfg cfg;
@@ -528,8 +528,8 @@ typedef struct SCtgOperation {
#define CTG_META_SIZE(pMeta) (sizeof(STableMeta) + ((pMeta)->tableInfo.numOfTags + (pMeta)->tableInfo.numOfColumns) * sizeof(SSchema))
-#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST)
-#define CTG_DB_NOT_EXIST(code) (code == TSDB_CODE_MND_DB_NOT_EXIST)
+#define CTG_TABLE_NOT_EXIST(code) (code == CTG_ERR_CODE_TABLE_NOT_EXIST)
+#define CTG_DB_NOT_EXIST(code) (code == TSDB_CODE_MND_DB_NOT_EXIST)
#define ctgFatal(param, ...) qFatal("CTG:%p " param, pCtg, __VA_ARGS__)
#define ctgError(param, ...) qError("CTG:%p " param, pCtg, __VA_ARGS__)
@@ -576,7 +576,7 @@ typedef struct SCtgOperation {
} \
} while (0)
-
+
#define CTG_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
#define CTG_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
#define CTG_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index b6e958e1929cc71dfa43ad018728e1f1844cb472..7b32eadcd415116f67db8526449c8a6759f45bcd 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -270,13 +270,22 @@ int32_t ctgUpdateTbMeta(SCatalog* pCtg, STableMetaRsp* rspMsg, bool syncOp) {
int32_t code = 0;
strcpy(output->dbFName, rspMsg->dbFName);
- strcpy(output->tbName, rspMsg->tbName);
output->dbId = rspMsg->dbId;
- SET_META_TYPE_TABLE(output->metaType);
+ if (TSDB_CHILD_TABLE == rspMsg->tableType && NULL == rspMsg->pSchemas) {
+ strcpy(output->ctbName, rspMsg->tbName);
- CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta));
+ SET_META_TYPE_CTABLE(output->metaType);
+
+ CTG_ERR_JRET(queryCreateCTableMetaFromMsg(rspMsg, &output->ctbMeta));
+ } else {
+ strcpy(output->tbName, rspMsg->tbName);
+
+ SET_META_TYPE_TABLE(output->metaType);
+
+ CTG_ERR_JRET(queryCreateTableMetaFromMsg(rspMsg, rspMsg->tableType == TSDB_SUPER_TABLE, &output->tbMeta));
+ }
CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, output, syncOp));
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index 64ca85edf45ac515bd7728883c171b04c399d148..585b33930c2cae0332ee77a3933d5a86288c77bc 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -39,7 +39,7 @@ int32_t ctgInitGetTbMetaTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(ctx->pName, name, sizeof(*name));
ctx->flag = CTG_FLAG_UNKNOWN_STB;
@@ -69,7 +69,7 @@ int32_t ctgInitGetTbMetasTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbMetaNum);
return TSDB_CODE_SUCCESS;
@@ -89,7 +89,7 @@ int32_t ctgInitGetDbVgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgDbVgCtx* ctx = task.taskCtx;
-
+
memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
taosArrayPush(pJob->pTasks, &task);
@@ -113,7 +113,7 @@ int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgDbCfgCtx* ctx = task.taskCtx;
-
+
memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
taosArrayPush(pJob->pTasks, &task);
@@ -137,7 +137,7 @@ int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgDbInfoCtx* ctx = task.taskCtx;
-
+
memcpy(ctx->dbFName, dbFName, sizeof(ctx->dbFName));
taosArrayPush(pJob->pTasks, &task);
@@ -167,7 +167,7 @@ int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(ctx->pName, name, sizeof(*name));
tNameGetFullDbName(ctx->pName, ctx->dbFName);
@@ -197,7 +197,7 @@ int32_t ctgInitGetTbHashsTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbHashNum);
return TSDB_CODE_SUCCESS;
@@ -248,7 +248,7 @@ int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgIndexCtx* ctx = task.taskCtx;
-
+
strcpy(ctx->indexFName, name);
taosArrayPush(pJob->pTasks, &task);
@@ -272,7 +272,7 @@ int32_t ctgInitGetUdfTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgUdfCtx* ctx = task.taskCtx;
-
+
strcpy(ctx->udfName, name);
taosArrayPush(pJob->pTasks, &task);
@@ -296,7 +296,7 @@ int32_t ctgInitGetUserTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
}
SCtgUserCtx* ctx = task.taskCtx;
-
+
memcpy(&ctx->user, user, sizeof(*user));
taosArrayPush(pJob->pTasks, &task);
@@ -339,7 +339,7 @@ int32_t ctgInitGetTbIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(ctx->pName, name, sizeof(*name));
taosArrayPush(pJob->pTasks, &task);
@@ -368,7 +368,7 @@ int32_t ctgInitGetTbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosMemoryFree(task.taskCtx);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(ctx->pName, name, sizeof(*name));
taosArrayPush(pJob->pTasks, &task);
@@ -387,7 +387,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con
taosHashCleanup(pTb);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
for (int32_t i = 0; i < pJob->dbVgNum; ++i) {
char* dbFName = taosArrayGet(pReq->pDbVgroup, i);
taosHashPut(pDb, dbFName, strlen(dbFName), dbFName, TSDB_DB_FNAME_LEN);
@@ -474,7 +474,7 @@ int32_t ctgInitTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, int32_t *tas
if (taskId) {
*taskId = tid;
}
-
+
return TSDB_CODE_SUCCESS;
}
@@ -510,7 +510,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
pJob->pCtg = pCtg;
pJob->conn = *pConn;
pJob->userParam = param;
-
+
pJob->tbMetaNum = tbMetaNum;
pJob->tbHashNum = tbHashNum;
pJob->qnodeNum = qnodeNum;
@@ -844,20 +844,20 @@ int32_t ctgDumpSvrVer(SCtgTask* pTask) {
pJob->jobRes.pSvrVer->code = pTask->code;
pJob->jobRes.pSvrVer->pRes = pTask->res;
-
+
return TSDB_CODE_SUCCESS;
}
int32_t ctgCallSubCb(SCtgTask *pTask) {
int32_t code = 0;
-
+
CTG_LOCK(CTG_WRITE, &pTask->lock);
-
+
int32_t parentNum = taosArrayGetSize(pTask->pParents);
for (int32_t i = 0; i < parentNum; ++i) {
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
SCtgTask* pParent = taosArrayGetP(pTask->pParents, i);
-
+
pParent->subRes.code = pTask->code;
if (TSDB_CODE_SUCCESS == pTask->code) {
code = (*gCtgAsyncFps[pTask->type].cloneFp)(pTask, &pParent->subRes.res);
@@ -868,22 +868,22 @@ int32_t ctgCallSubCb(SCtgTask *pTask) {
SCtgMsgCtx *pParMsgCtx = CTG_GET_TASK_MSGCTX(pParent, -1);
- pParMsgCtx->pBatchs = pMsgCtx->pBatchs;
+ pParMsgCtx->pBatchs = pMsgCtx->pBatchs;
CTG_ERR_JRET(pParent->subRes.fp(pParent));
}
-
+
_return:
CTG_UNLOCK(CTG_WRITE, &pTask->lock);
- CTG_RET(code);
+ CTG_RET(code);
}
int32_t ctgCallUserCb(void* param) {
SCtgJob* pJob = (SCtgJob*)param;
qDebug("QID:0x%" PRIx64 " ctg start to call user cb with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
-
+
(*pJob->userFp)(&pJob->jobRes, pJob->userParam, pJob->jobResCode);
qDebug("QID:0x%" PRIx64 " ctg end to call user cb", pJob->queryId);
@@ -922,9 +922,9 @@ _return:
//taosSsleep(2);
//qDebug("QID:0x%" PRIx64 " ctg after sleep", pJob->queryId);
-
+
taosAsyncExec(ctgCallUserCb, pJob, NULL);
-
+
CTG_RET(code);
}
@@ -932,7 +932,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
int32_t code = 0;
SCtgDBCache *dbCache = NULL;
SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
@@ -958,38 +958,38 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
}
case TDMT_MND_TABLE_META: {
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
-
+
if (CTG_IS_META_NULL(pOut->metaType)) {
if (CTG_FLAG_IS_STB(flag)) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pName, dbFName);
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (NULL != dbCache) {
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
-
+
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
- *vgId = vgInfo.vgId;
+ *vgId = vgInfo.vgId;
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq));
ctgReleaseVgInfoToCache(pCtg, dbCache);
} else {
SBuildUseDBInput input = {0};
-
+
tstrncpy(input.db, dbFName, tListLen(input.db));
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
-
+
CTG_ERR_JRET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, tReq));
}
return TSDB_CODE_SUCCESS;
}
-
+
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
-
+
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
}
@@ -998,12 +998,12 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
STableMetaOutput* pLastOut = (STableMetaOutput*)pMsgCtx->out;
TSWAP(pLastOut->tbMeta, pOut->tbMeta);
}
-
+
break;
}
case TDMT_VND_TABLE_META: {
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
-
+
if (CTG_IS_META_NULL(pOut->metaType)) {
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
@@ -1013,12 +1013,12 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
if (CTG_FLAG_IS_STB(flag)) {
break;
}
-
+
if (CTG_IS_META_TABLE(pOut->metaType) && TSDB_SUPER_TABLE == pOut->tbMeta->tableType) {
ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pName));
-
+
taosMemoryFreeClear(pOut->tbMeta);
-
+
CTG_RET(ctgGetTbMetaFromMnode(pCtg, pConn, pName, NULL, tReq));
} else if (CTG_IS_META_BOTH(pOut->metaType)) {
int32_t exist = 0;
@@ -1029,13 +1029,13 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
stbCtx.flag = flag;
stbCtx.pName = &stbName;
- taosMemoryFreeClear(pOut->tbMeta);
+ taosMemoryFreeClear(pOut->tbMeta);
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (pOut->tbMeta) {
exist = 1;
}
}
-
+
if (0 == exist) {
TSWAP(pMsgCtx->lastOut, pMsgCtx->out);
CTG_RET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, pOut->dbFName, pOut->tbName, NULL, tReq));
@@ -1056,7 +1056,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
if (CTG_IS_META_BOTH(pOut->metaType)) {
memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
}
-
+
/*
else if (CTG_IS_META_CTABLE(pOut->metaType)) {
SName stbName = *pName;
@@ -1064,7 +1064,7 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
SCtgTbMetaCtx stbCtx = {0};
stbCtx.flag = flag;
stbCtx.pName = &stbName;
-
+
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (NULL == pOut->tbMeta) {
ctgDebug("stb no longer exist, stbName:%s", stbName.tname);
@@ -1088,7 +1088,7 @@ _return:
if (pTask->res || code) {
ctgHandleTaskEnd(pTask, code);
}
-
+
CTG_RET(code);
}
@@ -1097,7 +1097,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
int32_t code = 0;
SCtgDBCache *dbCache = NULL;
SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx;
@@ -1125,38 +1125,38 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
}
case TDMT_MND_TABLE_META: {
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
-
+
if (CTG_IS_META_NULL(pOut->metaType)) {
if (CTG_FLAG_IS_STB(flag)) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pName, dbFName);
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (NULL != dbCache) {
SVgroupInfo vgInfo = {0};
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, dbCache->vgCache.vgInfo, pName, &vgInfo));
-
+
ctgDebug("will refresh tbmeta, supposed to be stb, tbName:%s, flag:%d", tNameGetTableName(pName), flag);
- *vgId = vgInfo.vgId;
+ *vgId = vgInfo.vgId;
CTG_ERR_JRET(ctgGetTbMetaFromVnode(pCtg, pConn, pName, &vgInfo, NULL, tReq));
ctgReleaseVgInfoToCache(pCtg, dbCache);
} else {
SBuildUseDBInput input = {0};
-
+
tstrncpy(input.db, dbFName, tListLen(input.db));
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
-
+
CTG_ERR_JRET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, tReq));
}
return TSDB_CODE_SUCCESS;
}
-
+
ctgError("no tbmeta got, tbName:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
-
+
CTG_ERR_JRET(CTG_ERR_CODE_TABLE_NOT_EXIST);
}
@@ -1165,12 +1165,12 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
STableMetaOutput* pLastOut = (STableMetaOutput*)pMsgCtx->out;
TSWAP(pLastOut->tbMeta, pOut->tbMeta);
}
-
+
break;
}
case TDMT_VND_TABLE_META: {
STableMetaOutput* pOut = (STableMetaOutput*)pMsgCtx->out;
-
+
if (CTG_IS_META_NULL(pOut->metaType)) {
ctgError("no tbmeta got, tbNmae:%s", tNameGetTableName(pName));
ctgRemoveTbMetaFromCache(pCtg, pName, false);
@@ -1180,12 +1180,12 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
if (CTG_FLAG_IS_STB(flag)) {
break;
}
-
+
if (CTG_IS_META_TABLE(pOut->metaType) && TSDB_SUPER_TABLE == pOut->tbMeta->tableType) {
ctgDebug("will continue to refresh tbmeta since got stb, tbName:%s", tNameGetTableName(pName));
-
+
taosMemoryFreeClear(pOut->tbMeta);
-
+
CTG_RET(ctgGetTbMetaFromMnode(pCtg, pConn, pName, NULL, tReq));
} else if (CTG_IS_META_BOTH(pOut->metaType)) {
int32_t exist = 0;
@@ -1196,14 +1196,14 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
stbCtx.flag = flag;
stbCtx.pName = &stbName;
- taosMemoryFreeClear(pOut->tbMeta);
+ taosMemoryFreeClear(pOut->tbMeta);
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (pOut->tbMeta) {
ctgDebug("use cached stb meta, tbName:%s", tNameGetTableName(pName));
exist = 1;
}
}
-
+
if (0 == exist) {
TSWAP(pMsgCtx->lastOut, pMsgCtx->out);
CTG_RET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, pOut->dbFName, pOut->tbName, NULL, tReq));
@@ -1224,7 +1224,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
if (CTG_IS_META_BOTH(pOut->metaType)) {
memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
}
-
+
/*
else if (CTG_IS_META_CTABLE(pOut->metaType)) {
SName stbName = *pName;
@@ -1232,7 +1232,7 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
SCtgTbMetaCtx stbCtx = {0};
stbCtx.flag = flag;
stbCtx.pName = &stbName;
-
+
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (NULL == pOut->tbMeta) {
ctgDebug("stb no longer exist, stbName:%s", stbName.tname);
@@ -1273,7 +1273,7 @@ _return:
if (pTask->res && taskDone) {
ctgHandleTaskEnd(pTask, code);
}
-
+
CTG_RET(code);
}
@@ -1282,7 +1282,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SCtgDbVgCtx* ctx = (SCtgDbVgCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1290,7 +1290,7 @@ int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *
case TDMT_MND_USE_DB: {
SUseDbOutput* pOut = (SUseDbOutput*)pTask->msgCtx.out;
SDBVgInfo* pDb = NULL;
-
+
CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res));
CTG_ERR_JRET(cloneDbVgInfo(pOut->dbVgroup, &pDb));
@@ -1316,7 +1316,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SCtgTbHashCtx* ctx = (SCtgTbHashCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1330,7 +1330,7 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
}
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pCtg, pOut->dbVgroup, ctx->pName, (SVgroupInfo*)pTask->res));
-
+
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, ctx->dbFName, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
@@ -1354,7 +1354,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SCtgTbHashsCtx* ctx = (SCtgTbHashsCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
SCtgFetch* pFetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
bool taskDone = false;
@@ -1367,7 +1367,7 @@ int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
STablesReq* pReq = taosArrayGet(ctx->pNames, pFetch->dbIdx);
CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, tReq, pOut->dbVgroup, ctx, pMsgCtx->target, pReq->pTables, true));
-
+
CTG_ERR_JRET(ctgUpdateVgroupEnqueue(pCtg, pMsgCtx->target, pOut->dbId, pOut->dbVgroup, false));
pOut->dbVgroup = NULL;
@@ -1394,7 +1394,7 @@ _return:
pRes->code = code;
pRes->pRes = NULL;
}
-
+
if (0 == atomic_sub_fetch_32(&ctx->fetchNum, 1)) {
TSWAP(pTask->res, ctx->pResList);
taskDone = true;
@@ -1419,9 +1419,9 @@ int32_t ctgHandleGetTbIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
CTG_ERR_JRET(ctgCloneTableIndex(pOut->pIndex, &pInfo));
pTask->res = pInfo;
- SCtgTbIndexCtx* ctx = pTask->taskCtx;
+ SCtgTbIndexCtx* ctx = pTask->taskCtx;
CTG_ERR_JRET(ctgUpdateTbIndexEnqueue(pTask->pJob->pCtg, (STableIndex**)&pTask->msgCtx.out, false));
-
+
_return:
if (TSDB_CODE_MND_DB_INDEX_NOT_EXIST == code) {
@@ -1438,7 +1438,7 @@ int32_t ctgHandleGetTbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1452,7 +1452,7 @@ int32_t ctgHandleGetDbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1471,7 +1471,7 @@ int32_t ctgHandleGetQnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1485,7 +1485,7 @@ int32_t ctgHandleGetDnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1499,7 +1499,7 @@ int32_t ctgHandleGetIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1513,7 +1513,7 @@ int32_t ctgHandleGetUdfRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *p
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1525,7 +1525,7 @@ int32_t ctgHandleGetUserRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *
int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
SCtgUserCtx* ctx = (SCtgUserCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
bool pass = false;
SGetUserAuthRsp* pOut = (SGetUserAuthRsp*)pTask->msgCtx.out;
@@ -1573,7 +1573,7 @@ int32_t ctgHandleGetSvrVerRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
-
+
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1583,7 +1583,7 @@ _return:
int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq *tReq, int32_t flag, SName* pName, int32_t* vgId) {
SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
int32_t code = 0;
@@ -1603,7 +1603,7 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq *tReq, int32_t flag, SName* pName, int
SCtgDBCache *dbCache = NULL;
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pName, dbFName);
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
if (dbCache) {
SVgroupInfo vgInfo = {0};
@@ -1632,7 +1632,7 @@ _return:
}
int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
@@ -1649,14 +1649,14 @@ int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
SCtgTbMetaCtx* pCtx = (SCtgTbMetaCtx*)pTask->taskCtx;
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_ERR_RET(ctgAsyncRefreshTbMeta(&tReq, pCtx->flag, pCtx->pName, &pCtx->vgId));
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbMetasCtx* pCtx = (SCtgTbMetasCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -1670,18 +1670,18 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
CTG_ERR_RET(ctgGetTbMetasFromCache(pCtg, pConn, pCtx, i, &fetchIdx, baseResIdx, pReq->pTables));
baseResIdx += taosArrayGetSize(pReq->pTables);
}
-
+
pCtx->fetchNum = taosArrayGetSize(pCtx->pFetchs);
if (pCtx->fetchNum <= 0) {
TSWAP(pTask->res, pCtx->pResList);
-
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
-
+
pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx));
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
-
+
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
SName* pName = ctgGetFetchName(pCtx->pNames, pFetch);
@@ -1689,19 +1689,19 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = pFetch->fetchIdx;
+ tReq.msgIdx = pFetch->fetchIdx;
CTG_ERR_RET(ctgAsyncRefreshTbMeta(&tReq, pFetch->flag, pName, &pFetch->vgId));
}
-
+
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDBCache *dbCache = NULL;
SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx;
@@ -1710,18 +1710,18 @@ int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache));
if (NULL != dbCache) {
CTG_ERR_JRET(ctgGenerateVgList(pCtg, dbCache->vgCache.vgInfo->vgHash, (SArray**)&pTask->res));
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
-
+
CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0));
} else {
SBuildUseDBInput input = {0};
-
+
tstrncpy(input.db, pCtx->dbFName, tListLen(input.db));
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
@@ -1742,7 +1742,7 @@ _return:
int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDBCache *dbCache = NULL;
SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx;
@@ -1751,7 +1751,7 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache));
if (NULL != dbCache) {
pTask->res = taosMemoryMalloc(sizeof(SVgroupInfo));
@@ -1762,17 +1762,17 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
-
+
CTG_ERR_JRET(ctgHandleTaskEnd(pTask, 0));
} else {
SBuildUseDBInput input = {0};
-
+
tstrncpy(input.db, pCtx->dbFName, tListLen(input.db));
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_ERR_RET(ctgGetDBVgInfoFromMnode(pCtg, pConn, &input, NULL, &tReq));
}
@@ -1786,16 +1786,16 @@ _return:
}
int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbHashsCtx* pCtx = (SCtgTbHashsCtx*)pTask->taskCtx;
SCtgDBCache *dbCache = NULL;
- SCtgJob* pJob = pTask->pJob;
+ SCtgJob* pJob = pTask->pJob;
int32_t dbNum = taosArrayGetSize(pCtx->pNames);
int32_t fetchIdx = 0;
int32_t baseResIdx = 0;
int32_t code = 0;
-
+
for (int32_t i = 0; i < dbNum; ++i) {
STablesReq* pReq = taosArrayGet(pCtx->pNames, i);
@@ -1804,7 +1804,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
if (NULL != dbCache) {
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
ctgReleaseVgInfoToCache(pCtg, dbCache);
@@ -1815,21 +1815,21 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
ctgAddFetch(&pCtx->pFetchs, i, -1, &fetchIdx, baseResIdx, 0);
baseResIdx += taosArrayGetSize(pReq->pTables);
- taosArraySetSize(pCtx->pResList, baseResIdx);
+ taosArraySetSize(pCtx->pResList, baseResIdx);
}
}
pCtx->fetchNum = taosArrayGetSize(pCtx->pFetchs);
if (pCtx->fetchNum <= 0) {
TSWAP(pTask->res, pCtx->pResList);
-
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
-
+
pTask->msgCtxs = taosArrayInit(pCtx->fetchNum, sizeof(SCtgMsgCtx));
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
-
+
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
STablesReq* pReq = taosArrayGet(pCtx->pNames, pFetch->dbIdx);
@@ -1837,10 +1837,10 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
SBuildUseDBInput input = {0};
strcpy(input.db, pReq->dbFName);
-
+
input.vgVersion = CTG_DEFAULT_INVALID_VERSION;
SCtgTaskReq tReq;
@@ -1854,14 +1854,14 @@ _return:
if (dbCache) {
ctgReleaseVgInfoToCache(pCtg, dbCache);
}
-
+
return code;
}
int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbIndexCtx* pCtx = (SCtgTbIndexCtx*)pTask->taskCtx;
SArray* pRes = NULL;
@@ -1874,18 +1874,18 @@ int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) {
CTG_ERR_RET(ctgReadTbIndexFromCache(pCtg, pCtx->pName, &pRes));
if (pRes) {
pTask->res = pRes;
-
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
-
+
CTG_ERR_RET(ctgGetTbIndexFromMnode(pCtg, pConn, pCtx->pName, NULL, pTask));
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgTbCfgCtx* pCtx = (SCtgTbCfgCtx*)pTask->taskCtx;
SArray* pRes = NULL;
@@ -1915,7 +1915,7 @@ int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
}
-
+
CTG_ERR_JRET(ctgGetTableCfgFromVnode(pCtg, pConn, pCtx->pName, pCtx->pVgInfo, NULL, pTask));
}
@@ -1926,13 +1926,13 @@ _return:
if (CTG_TASK_LAUNCHED == pTask->status) {
ctgHandleTaskEnd(pTask, code);
}
-
+
CTG_RET(code);
}
int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
@@ -1945,7 +1945,7 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
}
int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
@@ -1959,7 +1959,7 @@ int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) {
int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -1975,7 +1975,7 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) {
int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SCtgDBCache *dbCache = NULL;
SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -2014,7 +2014,7 @@ _return:
}
int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -2029,7 +2029,7 @@ int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
}
int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx;
SCtgJob* pJob = pTask->pJob;
@@ -2044,7 +2044,7 @@ int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
}
int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx;
bool inCache = false;
@@ -2054,7 +2054,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
-
+
CTG_ERR_RET(ctgChkAuthFromCache(pCtg, pCtx->user.user, pCtx->user.dbFName, pCtx->user.type, &inCache, &pass));
if (inCache) {
pTask->res = taosMemoryCalloc(1, sizeof(bool));
@@ -2062,7 +2062,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
*(bool*)pTask->res = pass;
-
+
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
}
@@ -2073,7 +2073,7 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
}
int32_t ctgLaunchGetSvrVerTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
SCtgJob* pJob = pTask->pJob;
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
@@ -2096,7 +2096,7 @@ int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask) {
int32_t ctgGetTbCfgCb(SCtgTask *pTask) {
int32_t code = 0;
-
+
CTG_ERR_JRET(pTask->subRes.code);
SCtgTbCfgCtx* pCtx = (SCtgTbCfgCtx*)pTask->taskCtx;
@@ -2104,7 +2104,7 @@ int32_t ctgGetTbCfgCb(SCtgTask *pTask) {
pCtx->tbType = ((STableMeta*)pTask->subRes.res)->tableType;
} else if (CTG_TASK_GET_DB_VGROUP == pTask->subRes.type) {
SDBVgInfo* pDb = (SDBVgInfo*)pTask->subRes.res;
-
+
pCtx->pVgInfo = taosMemoryCalloc(1, sizeof(SVgroupInfo));
CTG_ERR_JRET(ctgGetVgInfoFromHashValue(pTask->pJob->pCtg, pDb, pCtx->pName, pCtx->pVgInfo));
}
@@ -2167,7 +2167,7 @@ SCtgAsyncFps gCtgAsyncFps[] = {
int32_t ctgMakeAsyncRes(SCtgJob *pJob) {
int32_t code = 0;
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
-
+
for (int32_t i = 0; i < taskNum; ++i) {
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].dumpResFp)(pTask));
@@ -2180,16 +2180,16 @@ int32_t ctgSearchExistingTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, in
bool equal = false;
SCtgTask* pTask = NULL;
int32_t code = 0;
-
+
CTG_LOCK(CTG_READ, &pJob->taskLock);
-
+
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
for (int32_t i = 0; i < taskNum; ++i) {
pTask = taosArrayGet(pJob->pTasks, i);
if (type != pTask->type) {
continue;
}
-
+
CTG_ERR_JRET((*gCtgAsyncFps[type].compFp)(pTask, param, &equal));
if (equal) {
break;
@@ -2208,7 +2208,7 @@ _return:
int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) {
int32_t code = 0;
-
+
CTG_LOCK(CTG_WRITE, &pSub->lock);
if (CTG_TASK_DONE == pSub->status) {
pTask->subRes.code = pSub->code;
@@ -2216,7 +2216,7 @@ int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) {
SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1);
pMsgCtx->pBatchs = pSubMsgCtx->pBatchs;
-
+
CTG_ERR_JRET(pTask->subRes.fp(pTask));
} else {
if (NULL == pSub->pParents) {
@@ -2230,7 +2230,7 @@ _return:
CTG_UNLOCK(CTG_WRITE, &pSub->lock);
- CTG_RET(code);
+ CTG_RET(code);
}
@@ -2242,13 +2242,13 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
ctgClearSubTaskRes(&pTask->subRes);
pTask->subRes.type = type;
pTask->subRes.fp = fp;
-
+
CTG_ERR_RET(ctgSearchExistingTask(pJob, type, param, &subTaskId));
if (subTaskId < 0) {
CTG_ERR_RET(ctgInitTask(pJob, type, param, &subTaskId));
newTask = true;
}
-
+
SCtgTask* pSub = taosArrayGet(pJob->pTasks, subTaskId);
CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask));
@@ -2267,21 +2267,21 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
int32_t ctgLaunchJob(SCtgJob *pJob) {
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
-
+
for (int32_t i = 0; i < taskNum; ++i) {
SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));
-
+
pTask->status = CTG_TASK_LAUNCHED;
}
if (taskNum <= 0) {
qDebug("QID:0x%" PRIx64 " ctg call user callback with rsp %s", pJob->queryId, tstrerror(pJob->jobResCode));
-
+
taosAsyncExec(ctgCallUserCb, pJob, NULL);
-#if CTG_BATCH_FETCH
+#if CTG_BATCH_FETCH
} else {
ctgLaunchBatchs(pJob->pCtg, pJob, pJob->pBatchs);
#endif
diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c
index bd3402dc394186b03d116c2c2ebe5e83838bdddb..d21524230736c47fb3c81de0c00e1402f09a585e 100644
--- a/source/libs/catalog/src/ctgDbg.c
+++ b/source/libs/catalog/src/ctgDbg.c
@@ -367,18 +367,22 @@ void ctgdShowDBCache(SCatalog* pCtg, SHashObj *dbHash) {
int32_t stbNum = dbCache->stbCache ? taosHashGetSize(dbCache->stbCache) : 0;
int32_t vgVersion = CTG_DEFAULT_INVALID_VERSION;
int32_t hashMethod = -1;
+ int16_t hashPrefix = 0;
+ int16_t hashSuffix = 0;
int32_t vgNum = 0;
if (dbCache->vgCache.vgInfo) {
vgVersion = dbCache->vgCache.vgInfo->vgVersion;
hashMethod = dbCache->vgCache.vgInfo->hashMethod;
+ hashPrefix = dbCache->vgCache.vgInfo->hashPrefix;
+ hashSuffix = dbCache->vgCache.vgInfo->hashSuffix;
if (dbCache->vgCache.vgInfo->vgHash) {
vgNum = taosHashGetSize(dbCache->vgCache.vgInfo->vgHash);
}
}
- ctgDebug("[%d] db [%.*s][0x%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d",
- i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, vgNum);
+ ctgDebug("[%d] db [%.*s][0x%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, prefix:%d, suffix:%d, vgNum:%d",
+ i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, hashPrefix, hashSuffix, vgNum);
pIter = taosHashIterate(dbHash, pIter);
}
diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c
index e28234ab7603248e7261829bcb59e44ba24491fe..296100ce6d0569fdc1ab4e3571f1859b9938f37b 100644
--- a/source/libs/catalog/src/ctgUtil.c
+++ b/source/libs/catalog/src/ctgUtil.c
@@ -848,15 +848,11 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName
CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED);
}
- tableNameHashFp fp = NULL;
SVgroupInfo *vgInfo = NULL;
-
- CTG_ERR_RET(ctgGetHashFunction(dbInfo->hashMethod, &fp));
-
char tbFullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pTableName, tbFullName);
- uint32_t hashValue = (*fp)(tbFullName, (uint32_t)strlen(tbFullName));
+ uint32_t hashValue = taosGetTbHashVal(tbFullName, (uint32_t)strlen(tbFullName), dbInfo->hashMethod, dbInfo->hashPrefix, dbInfo->hashSuffix);
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
while (pIter) {
@@ -919,11 +915,7 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
- tableNameHashFp fp = NULL;
SVgroupInfo *vgInfo = NULL;
-
- CTG_ERR_RET(ctgGetHashFunction(dbInfo->hashMethod, &fp));
-
int32_t tbNum = taosArrayGetSize(pNames);
if (1 == vgNum) {
@@ -975,7 +967,7 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
tbNameLen = offset + strlen(pName->tname);
strcpy(tbFullName + offset, pName->tname);
- uint32_t hashValue = (*fp)(tbFullName, (uint32_t)tbNameLen);
+ uint32_t hashValue = taosGetTbHashVal(tbFullName, (uint32_t)strlen(tbFullName), dbInfo->hashMethod, dbInfo->hashPrefix, dbInfo->hashSuffix);
SVgroupInfo **p = taosArraySearch(pVgList, &hashValue, ctgHashValueComp, TD_EQ);
diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp
index 0be85333dca130884516c214e324fbab82e33953..c01c269e643b2a1f2a44817a74807c20db718d4b 100644
--- a/source/libs/catalog/test/catalogTests.cpp
+++ b/source/libs/catalog/test/catalogTests.cpp
@@ -218,6 +218,8 @@ void ctgTestBuildDBVgroup(SDBVgInfo **pdbVgroup) {
ctgTestCurrentVgVersion = dbVgroup->vgVersion;
dbVgroup->hashMethod = 0;
+ dbVgroup->hashPrefix = 0;
+ dbVgroup->hashSuffix = 0;
dbVgroup->vgHash = taosHashInit(ctgTestVgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
vgNum = ctgTestGetVgNumFromVgVersion(dbVgroup->vgVersion);
diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c
index 1b2489acd62bec88eac5bd5aca54a6d2f00ff1ab..18d839e1091e3fc5f1be2939a22345efe8ea8579 100644
--- a/source/libs/command/src/command.c
+++ b/source/libs/command/src/command.c
@@ -17,6 +17,7 @@
#include "catalog.h"
#include "commandInt.h"
#include "scheduler.h"
+#include "systable.h"
#include "tdatablock.h"
#include "tglobal.h"
#include "tgrant.h"
@@ -75,46 +76,41 @@ static SSDataBlock* buildDescResultDataBlock() {
return pBlock;
}
-static void setDescResultIntoDataBlock(SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) {
+static void setDescResultIntoDataBlock(bool sysInfoUser, SSDataBlock* pBlock, int32_t numOfRows, STableMeta* pMeta) {
blockDataEnsureCapacity(pBlock, numOfRows);
- pBlock->info.rows = numOfRows;
+ pBlock->info.rows = 0;
// field
SColumnInfoData* pCol1 = taosArrayGet(pBlock->pDataBlock, 0);
- char buf[DESCRIBE_RESULT_FIELD_LEN] = {0};
- for (int32_t i = 0; i < numOfRows; ++i) {
- STR_TO_VARSTR(buf, pMeta->schema[i].name);
- colDataAppend(pCol1, i, buf, false);
- }
-
// Type
SColumnInfoData* pCol2 = taosArrayGet(pBlock->pDataBlock, 1);
- for (int32_t i = 0; i < numOfRows; ++i) {
- STR_TO_VARSTR(buf, tDataTypes[pMeta->schema[i].type].name);
- colDataAppend(pCol2, i, buf, false);
- }
-
// Length
SColumnInfoData* pCol3 = taosArrayGet(pBlock->pDataBlock, 2);
- for (int32_t i = 0; i < numOfRows; ++i) {
- int32_t bytes = getSchemaBytes(pMeta->schema + i);
- colDataAppend(pCol3, i, (const char*)&bytes, false);
- }
-
// Note
SColumnInfoData* pCol4 = taosArrayGet(pBlock->pDataBlock, 3);
+ char buf[DESCRIBE_RESULT_FIELD_LEN] = {0};
for (int32_t i = 0; i < numOfRows; ++i) {
+ if (invisibleColumn(sysInfoUser, pMeta->tableType, pMeta->schema[i].flags)) {
+ continue;
+ }
+ STR_TO_VARSTR(buf, pMeta->schema[i].name);
+ colDataAppend(pCol1, pBlock->info.rows, buf, false);
+ STR_TO_VARSTR(buf, tDataTypes[pMeta->schema[i].type].name);
+ colDataAppend(pCol2, pBlock->info.rows, buf, false);
+ int32_t bytes = getSchemaBytes(pMeta->schema + i);
+ colDataAppend(pCol3, pBlock->info.rows, (const char*)&bytes, false);
STR_TO_VARSTR(buf, i >= pMeta->tableInfo.numOfColumns ? "TAG" : "");
- colDataAppend(pCol4, i, buf, false);
+ colDataAppend(pCol4, pBlock->info.rows, buf, false);
+ ++(pBlock->info.rows);
}
}
-static int32_t execDescribe(SNode* pStmt, SRetrieveTableRsp** pRsp) {
+static int32_t execDescribe(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) {
SDescribeStmt* pDesc = (SDescribeStmt*)pStmt;
int32_t numOfRows = TABLE_TOTAL_COL_NUM(pDesc->pMeta);
SSDataBlock* pBlock = buildDescResultDataBlock();
- setDescResultIntoDataBlock(pBlock, numOfRows, pDesc->pMeta);
+ setDescResultIntoDataBlock(sysInfoUser, pBlock, numOfRows, pDesc->pMeta);
return buildRetrieveTableRsp(pBlock, DESCRIBE_RESULT_COLS, pRsp);
}
@@ -475,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p
len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName);
appendColumnFields(buf2, &len, pCfg);
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")");
+ appendTableOptions(buf2, &len, pDbCfg, pCfg);
}
varDataLen(buf2) = len;
@@ -665,10 +662,10 @@ static int32_t execSelectWithoutFrom(SSelectStmt* pSelect, SRetrieveTableRsp** p
return code;
}
-int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) {
+int32_t qExecCommand(bool sysInfoUser, SNode* pStmt, SRetrieveTableRsp** pRsp) {
switch (nodeType(pStmt)) {
case QUERY_NODE_DESCRIBE_STMT:
- return execDescribe(pStmt, pRsp);
+ return execDescribe(sysInfoUser, pStmt, pRsp);
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
return execResetQueryCache();
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h
index 4da4747108d5b459eac03546d6f2cc661c3760a1..a26f1c74f8b7c376adf189dbb4416f6e6db9a00d 100644
--- a/source/libs/executor/inc/executil.h
+++ b/source/libs/executor/inc/executil.h
@@ -22,6 +22,13 @@
#include "tbuffer.h"
#include "tcommon.h"
#include "tpagedbuf.h"
+#include "tsimplehash.h"
+
+#define T_LONG_JMP(_obj, _c) \
+ do { \
+ ASSERT((_c) != -1); \
+ longjmp((_obj), (_c)); \
+ } while (0);
#define SET_RES_WINDOW_KEY(_k, _ori, _len, _uid) \
do { \
@@ -80,11 +87,7 @@ struct SqlFunctionCtx;
size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
void initResultRowInfo(SResultRowInfo* pResultRowInfo);
-void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo);
-
-void initResultRow(SResultRow* pResultRow);
-void closeResultRow(SResultRow* pResultRow);
-bool isResultRowClosed(SResultRow* pResultRow);
+void closeResultRow(SResultRow* pResultRow);
struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset);
@@ -102,7 +105,7 @@ static FORCE_INLINE void setResultBufPageDirty(SDiskbasedBuf* pBuf, SResultRowPo
setBufPageDirty(pPage, true);
}
-void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int32_t order);
+void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order);
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo);
void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList);
@@ -123,6 +126,7 @@ SArray* extractPartitionColInfo(SNodeList* pNodeList);
SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols,
int32_t type);
+void createExprFromTargetNode(SExprInfo* pExp, STargetNode* pTargetNode);
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs);
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset);
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index 601c22a3ba793b17a8ccc8de2cd4dfa37d9c7682..fc8f9420156f554477e25f36ec47a4a13d38456a 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -38,11 +38,11 @@ extern "C" {
#include "tlockfree.h"
#include "tmsg.h"
#include "tpagedbuf.h"
-#include "tstreamUpdate.h"
#include "tstream.h"
+#include "tstreamUpdate.h"
-#include "vnode.h"
#include "executorInt.h"
+#include "vnode.h"
typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order);
@@ -122,7 +122,7 @@ typedef int32_t (*__optr_decode_fn_t)(struct SOperatorInfo* pOperator, char* res
typedef int32_t (*__optr_open_fn_t)(struct SOperatorInfo* pOptr);
typedef SSDataBlock* (*__optr_fn_t)(struct SOperatorInfo* pOptr);
-typedef void (*__optr_close_fn_t)(void* param, int32_t num);
+typedef void (*__optr_close_fn_t)(void* param);
typedef int32_t (*__optr_explain_fn_t)(struct SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
typedef struct STaskIdInfo {
@@ -139,17 +139,23 @@ enum {
};
typedef struct {
- //TODO remove prepareStatus
- STqOffsetVal prepareStatus; // for tmq
- STqOffsetVal lastStatus; // for tmq
- void* metaBlk; // for tmq fetching meta
- SSDataBlock* pullOverBlk; // for streaming
- SWalFilterCond cond;
- int64_t lastScanUid;
- int8_t recoverStep;
+ // TODO remove prepareStatus
+ STqOffsetVal prepareStatus; // for tmq
+ STqOffsetVal lastStatus; // for tmq
+ SMqMetaRsp metaRsp; // for tmq fetching meta
+ int8_t returned;
+ int64_t snapshotVer;
+
+ SSchemaWrapper* schema;
+ char tbName[TSDB_TABLE_NAME_LEN];
+ SSDataBlock* pullOverBlk; // for streaming
+ SWalFilterCond cond;
+ int64_t lastScanUid;
+ int8_t recoverStep;
SQueryTableDataCond tableCond;
- int64_t recoverStartVer;
- int64_t recoverEndVer;
+ int64_t recoverStartVer;
+ int64_t recoverEndVer;
+ SStreamState* pState;
} SStreamTaskInfo;
typedef struct {
@@ -161,29 +167,29 @@ typedef struct {
} SSchemaInfo;
typedef struct SExecTaskInfo {
- STaskIdInfo id;
- uint32_t status;
- STimeWindow window;
- STaskCostInfo cost;
- int64_t owner; // if it is in execution
- int32_t code;
-
- int64_t version; // used for stream to record wal version
- SStreamTaskInfo streamInfo;
- SSchemaInfo schemaInfo;
- STableListInfo tableqinfoList; // this is a table list
- const char* sql; // query sql string
- jmp_buf env; // jump to this position when error happens.
- EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
- SSubplan* pSubplan;
+ STaskIdInfo id;
+ uint32_t status;
+ STimeWindow window;
+ STaskCostInfo cost;
+ int64_t owner; // if it is in execution
+ int32_t code;
+
+ int64_t version; // used for stream to record wal version
+ SStreamTaskInfo streamInfo;
+ SSchemaInfo schemaInfo;
+ STableListInfo tableqinfoList; // this is a table list
+ const char* sql; // query sql string
+ jmp_buf env; // jump to this position when error happens.
+ EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
+ SSubplan* pSubplan;
struct SOperatorInfo* pRoot;
} SExecTaskInfo;
enum {
- OP_NOT_OPENED = 0x0,
- OP_OPENED = 0x1,
+ OP_NOT_OPENED = 0x0,
+ OP_OPENED = 0x1,
OP_RES_TO_RETURN = 0x5,
- OP_EXEC_DONE = 0x9,
+ OP_EXEC_DONE = 0x9,
};
typedef struct SOperatorFpSet {
@@ -206,6 +212,7 @@ typedef struct SExprSupp {
typedef struct SOperatorInfo {
uint16_t operatorType;
+ int16_t resultDataBlockId;
bool blocking; // block operator or not
uint8_t status; // denote if current operator is completed
char* name; // name, for debug purpose
@@ -217,12 +224,11 @@ typedef struct SOperatorInfo {
struct SOperatorInfo** pDownstream; // downstram pointer list
int32_t numOfDownstream; // number of downstream. The value is always ONE expect for join operator
SOperatorFpSet fpSet;
- int16_t resultDataBlockId;
} SOperatorInfo;
typedef enum {
EX_SOURCE_DATA_NOT_READY = 0x1,
- EX_SOURCE_DATA_READY = 0x2,
+ EX_SOURCE_DATA_READY = 0x2,
EX_SOURCE_DATA_EXHAUSTED = 0x3,
} EX_SOURCE_STATUS;
@@ -245,26 +251,26 @@ typedef struct SLoadRemoteDataInfo {
} SLoadRemoteDataInfo;
typedef struct SLimitInfo {
- SLimit limit;
- SLimit slimit;
- uint64_t currentGroupId;
- int64_t remainGroupOffset;
- int64_t numOfOutputGroups;
- int64_t remainOffset;
- int64_t numOfOutputRows;
+ SLimit limit;
+ SLimit slimit;
+ uint64_t currentGroupId;
+ int64_t remainGroupOffset;
+ int64_t numOfOutputGroups;
+ int64_t remainOffset;
+ int64_t numOfOutputRows;
} SLimitInfo;
typedef struct SExchangeInfo {
- SArray* pSources;
- SArray* pSourceDataInfo;
- tsem_t ready;
- void* pTransporter;
+ SArray* pSources;
+ SArray* pSourceDataInfo;
+ tsem_t ready;
+ void* pTransporter;
// SArray, result block list, used to keep the multi-block that
// passed by downstream operator
SArray* pResultBlockList;
- int32_t rspBlockIndex; // indicate the return block index in pResultBlockList
- SSDataBlock* pDummyBlock; // dummy block, not keep data
- bool seqLoadData; // sequential load data or not, false by default
+ int32_t rspBlockIndex; // indicate the return block index in pResultBlockList
+ SSDataBlock* pDummyBlock; // dummy block, not keep data
+ bool seqLoadData; // sequential load data or not, false by default
int32_t current;
SLoadRemoteDataInfo loadInfo;
uint64_t self;
@@ -272,22 +278,22 @@ typedef struct SExchangeInfo {
} SExchangeInfo;
typedef struct SColMatchInfo {
- int32_t srcSlotId; // source slot id
+ int32_t srcSlotId; // source slot id
int32_t colId;
int32_t targetSlotId;
- bool output; // todo remove this?
+ bool output; // todo remove this?
bool reserved;
- int32_t matchType; // determinate the source according to col id or slot id
+ int32_t matchType; // determinate the source according to col id or slot id
} SColMatchInfo;
typedef struct SScanInfo {
- int32_t numOfAsc;
- int32_t numOfDesc;
+ int32_t numOfAsc;
+ int32_t numOfDesc;
} SScanInfo;
typedef struct SSampleExecInfo {
- double sampleRatio; // data block sample ratio, 1 by default
- uint32_t seed; // random seed value
+ double sampleRatio; // data block sample ratio, 1 by default
+ uint32_t seed; // random seed value
} SSampleExecInfo;
enum {
@@ -296,42 +302,43 @@ enum {
};
typedef struct SAggSupporter {
- SHashObj* pResultRowHashTable; // quick locate the window object for each result
- char* keyBuf; // window key buffer
- SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
- int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ SSHashObj* pResultRowHashTable; // quick locate the window object for each result
+ char* keyBuf; // window key buffer
+ SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // current write page id
} SAggSupporter;
typedef struct {
- // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
- SInterval interval;
- SAggSupporter *pAggSup;
- SExprSupp *pExprSup; // expr supporter of aggregate operator
+ // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if
+ // current data block needs to be loaded.
+ SInterval interval;
+ SAggSupporter* pAggSup;
+ SExprSupp* pExprSup; // expr supporter of aggregate operator
} SAggOptrPushDownInfo;
typedef struct STableScanInfo {
- STsdbReader* dataReader;
- SReadHandle readHandle;
+ STsdbReader* dataReader;
+ SReadHandle readHandle;
SFileBlockLoadRecorder readRecorder;
- SScanInfo scanInfo;
- int32_t scanTimes;
- SNode* pFilterNode; // filter info, which is push down by optimizer
+ SScanInfo scanInfo;
+ int32_t scanTimes;
+ SNode* pFilterNode; // filter info, which is push down by optimizer
- SSDataBlock* pResBlock;
- SArray* pColMatchInfo;
- SExprSupp pseudoSup;
- SQueryTableDataCond cond;
- int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
- int32_t dataBlockLoadFlag;
-// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
- SSampleExecInfo sample; // sample execution info
- int32_t currentGroupId;
- int32_t currentTable;
- int8_t scanMode;
- int8_t noTable;
+ SSDataBlock* pResBlock;
+ SArray* pColMatchInfo;
+ SExprSupp pseudoSup;
+ SQueryTableDataCond cond;
+ int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
+ int32_t dataBlockLoadFlag;
+ SSampleExecInfo sample; // sample execution info
+ int32_t currentGroupId;
+ int32_t currentTable;
+ int8_t scanMode;
+ int8_t noTable;
SAggOptrPushDownInfo pdInfo;
- int8_t assignBlockUid;
+ int8_t assignBlockUid;
} STableScanInfo;
typedef struct STableMergeScanInfo {
@@ -364,7 +371,7 @@ typedef struct STableMergeScanInfo {
SArray* pColMatchInfo;
int32_t numOfOutput;
- SExprSupp pseudoSup;
+ SExprSupp pseudoSup;
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
@@ -378,32 +385,33 @@ typedef struct STableMergeScanInfo {
} STableMergeScanInfo;
typedef struct STagScanInfo {
- SColumnInfo *pCols;
- SSDataBlock *pRes;
- SArray *pColMatchInfo;
- int32_t curPos;
- SReadHandle readHandle;
- STableListInfo *pTableList;
+ SColumnInfo* pCols;
+ SSDataBlock* pRes;
+ SArray* pColMatchInfo;
+ int32_t curPos;
+ SReadHandle readHandle;
+ STableListInfo* pTableList;
} STagScanInfo;
typedef struct SLastrowScanInfo {
- SSDataBlock *pRes;
- SReadHandle readHandle;
- void *pLastrowReader;
- SArray *pColMatchInfo;
- int32_t *pSlotIds;
- SExprSupp pseudoExprSup;
- int32_t retrieveType;
- int32_t currentGroupIndex;
- SSDataBlock *pBufferredRes;
- SArray *pUidList;
- int32_t indexOfBufferedRes;
+ SSDataBlock* pRes;
+ SReadHandle readHandle;
+ void* pLastrowReader;
+ SArray* pColMatchInfo;
+ int32_t* pSlotIds;
+ SExprSupp pseudoExprSup;
+ int32_t retrieveType;
+ int32_t currentGroupIndex;
+ SSDataBlock* pBufferredRes;
+ SArray* pUidList;
+ int32_t indexOfBufferedRes;
} SLastrowScanInfo;
typedef enum EStreamScanMode {
STREAM_SCAN_FROM_READERHANDLE = 1,
STREAM_SCAN_FROM_RES,
STREAM_SCAN_FROM_UPDATERES,
+ STREAM_SCAN_FROM_DELETE_DATA,
STREAM_SCAN_FROM_DATAREADER_RETRIEVE,
STREAM_SCAN_FROM_DATAREADER_RANGE,
} EStreamScanMode;
@@ -425,24 +433,37 @@ typedef struct SStreamAggSupporter {
SArray* pCurWins;
int32_t valueSize;
int32_t keySize;
- char* pKeyBuf; // window key buffer
- SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
- int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ char* pKeyBuf; // window key buffer
+ SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // buffer page that is active
SSDataBlock* pScanBlock;
} SStreamAggSupporter;
-typedef struct SessionWindowSupporter {
+typedef struct SWindowSupporter {
SStreamAggSupporter* pStreamAggSup;
int64_t gap;
uint16_t parentType;
SAggSupporter* pIntervalAggSup;
-} SessionWindowSupporter;
+} SWindowSupporter;
+
+typedef struct SPartitionBySupporter {
+ SArray* pGroupCols; // group by columns, SArray
+ SArray* pGroupColVals; // current group column values, SArray
+ char* keyBuf; // group by keys for hash
+ bool needCalc; // partition by column
+} SPartitionBySupporter;
+
+typedef struct SPartitionDataInfo {
+ uint64_t groupId;
+ SArray* rowIds;
+} SPartitionDataInfo;
typedef struct STimeWindowSupp {
- int8_t calTrigger;
- int64_t waterMark;
- TSKEY maxTs;
- SColumnInfoData timeWindowData; // query time window info for scalar function execution.
+ int8_t calTrigger;
+ int64_t waterMark;
+ TSKEY maxTs;
+ SColumnInfoData timeWindowData; // query time window info for scalar function execution.
} STimeWindowAggSupp;
typedef struct SStreamScanInfo {
@@ -467,27 +488,41 @@ typedef struct SStreamScanInfo {
uint64_t groupId;
SUpdateInfo* pUpdateInfo;
- EStreamScanMode scanMode;
- SOperatorInfo* pStreamScanOp;
- SOperatorInfo* pTableScanOp;
- SArray* childIds;
- SessionWindowSupporter sessionSup;
- bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
- int32_t scanWinIndex; // for state operator
- int32_t pullDataResIndex;
- SSDataBlock* pPullDataRes; // pull data SSDataBlock
- SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
- int32_t deleteDataIndex;
- STimeWindow updateWin;
- STimeWindowAggSupp twAggSup;
- SSDataBlock* pUpdateDataRes;
+ EStreamScanMode scanMode;
+ SOperatorInfo* pStreamScanOp;
+ SOperatorInfo* pTableScanOp;
+ SArray* childIds;
+ SWindowSupporter windowSup;
+ SPartitionBySupporter partitionSup;
+ SExprSupp* pPartScalarSup;
+ bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
+ int32_t scanWinIndex; // for state operator
+ int32_t pullDataResIndex;
+ SSDataBlock* pPullDataRes; // pull data SSDataBlock
+ SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
+ int32_t deleteDataIndex;
+ STimeWindow updateWin;
+ STimeWindowAggSupp twAggSup;
+ SSDataBlock* pUpdateDataRes;
// status for tmq
- // SSchemaWrapper schema;
- SNodeList* pGroupTags;
- SNode* pTagCond;
- SNode* pTagIndexCond;
+ SNodeList* pGroupTags;
+ SNode* pTagCond;
+ SNode* pTagIndexCond;
} SStreamScanInfo;
+typedef struct {
+ // int8_t subType;
+ // bool withMeta;
+ // int64_t suid;
+ // int64_t snapVersion;
+ // void *metaInfo;
+ // void *dataInfo;
+ SVnode* vnode;
+ SSDataBlock pRes; // result SSDataBlock
+ STsdbReader* dataReader;
+ SSnapContext* sContext;
+} SStreamRawScanInfo;
+
typedef struct SSysTableScanInfo {
SRetrieveMetaTableRsp* pRsp;
SRetrieveTableReq req;
@@ -496,6 +531,7 @@ typedef struct SSysTableScanInfo {
SReadHandle readHandle;
int32_t accountId;
const char* pUser;
+ bool sysInfo;
bool showRewrite;
SNode* pCondition; // db_name filter condition, to discard data that are not in current database
SMTbCursor* pCur; // cursor for iterate the local table meta store.
@@ -510,14 +546,14 @@ typedef struct SBlockDistInfo {
SSDataBlock* pResBlock;
void* pHandle;
SReadHandle readHandle;
- uint64_t uid; // table uid
+ uint64_t uid; // table uid
} SBlockDistInfo;
// todo remove this
typedef struct SOptrBasicInfo {
- SResultRowInfo resultRowInfo;
- SSDataBlock* pRes;
- bool mergeResultBlock;
+ SResultRowInfo resultRowInfo;
+ SSDataBlock* pRes;
+ bool mergeResultBlock;
} SOptrBasicInfo;
typedef struct SIntervalAggOperatorInfo {
@@ -536,17 +572,17 @@ typedef struct SIntervalAggOperatorInfo {
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
STimeWindowAggSupp twAggSup;
bool invertible;
- SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation.
+ SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation.
bool ignoreExpiredData;
SArray* pRecycledPages;
- SArray* pDelWins; // SWinRes
+ SArray* pDelWins; // SWinRes
int32_t delIndex;
SSDataBlock* pDelRes;
SNode* pCondition;
} SIntervalAggOperatorInfo;
typedef struct SMergeAlignedIntervalAggOperatorInfo {
- SIntervalAggOperatorInfo *intervalAggOperatorInfo;
+ SIntervalAggOperatorInfo* intervalAggOperatorInfo;
bool hasGroupId;
uint64_t groupId; // current groupId
@@ -555,7 +591,7 @@ typedef struct SMergeAlignedIntervalAggOperatorInfo {
SNode* pCondition;
} SMergeAlignedIntervalAggOperatorInfo;
-typedef struct SStreamFinalIntervalOperatorInfo {
+typedef struct SStreamIntervalOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo; // basic info
SAggSupporter aggSup; // aggregate supporter
@@ -563,57 +599,75 @@ typedef struct SStreamFinalIntervalOperatorInfo {
SGroupResInfo groupResInfo; // multiple results build supporter
SInterval interval; // interval info
int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
- int32_t order; // current SSDataBlock scan order
+ STimeWindowAggSupp twAggSup;
+ bool invertible;
+ bool ignoreExpiredData;
+ SArray* pRecycledPages;
+ SArray* pDelWins; // SWinRes
+ int32_t delIndex;
+ SSDataBlock* pDelRes;
+ bool isFinal;
+} SStreamIntervalOperatorInfo;
+
+typedef struct SStreamFinalIntervalOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
+ SOptrBasicInfo binfo; // basic info
+ SAggSupporter aggSup; // aggregate supporter
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo; // multiple results build supporter
+ SInterval interval; // interval info
+ int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
+ int32_t order; // current SSDataBlock scan order
STimeWindowAggSupp twAggSup;
SArray* pChildren;
SSDataBlock* pUpdateRes;
bool returnUpdate;
- SPhysiNode* pPhyNode; // create new child
+ SPhysiNode* pPhyNode; // create new child
bool isFinal;
SHashObj* pPullDataMap;
- SArray* pPullWins; // SPullWindowInfo
+ SArray* pPullWins; // SPullWindowInfo
int32_t pullIndex;
SSDataBlock* pPullDataRes;
bool ignoreExpiredData;
SArray* pRecycledPages;
- SArray* pDelWins; // SWinRes
+ SArray* pDelWins; // SWinRes
int32_t delIndex;
SSDataBlock* pDelRes;
} SStreamFinalIntervalOperatorInfo;
typedef struct SAggOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
- STableQueryInfo *current;
- uint64_t groupId;
- SGroupResInfo groupResInfo;
- SExprSupp scalarExprSup;
- SNode *pCondition;
+ STableQueryInfo* current;
+ uint64_t groupId;
+ SGroupResInfo groupResInfo;
+ SExprSupp scalarExprSup;
+ SNode* pCondition;
} SAggOperatorInfo;
typedef struct SProjectOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
- SNode* pFilterNode; // filter info, which is push down by optimizer
- SArray* pPseudoColInfo;
- SLimitInfo limitInfo;
- bool mergeDataBlocks;
- SSDataBlock* pFinalRes;
- SNode* pCondition;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+ SNode* pFilterNode; // filter info, which is push down by optimizer
+ SArray* pPseudoColInfo;
+ SLimitInfo limitInfo;
+ bool mergeDataBlocks;
+ SSDataBlock* pFinalRes;
+ SNode* pCondition;
} SProjectOperatorInfo;
typedef struct SIndefOperatorInfo {
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
- SArray* pPseudoColInfo;
- SExprSupp scalarSup;
- SNode* pCondition;
- uint64_t groupId;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+ SArray* pPseudoColInfo;
+ SExprSupp scalarSup;
+ SNode* pCondition;
+ uint64_t groupId;
- SSDataBlock* pNextGroupRes;
+ SSDataBlock* pNextGroupRes;
} SIndefOperatorInfo;
typedef struct SFillOperatorInfo {
@@ -628,7 +682,7 @@ typedef struct SFillOperatorInfo {
SArray* pColMatchColInfo;
int32_t primaryTsCol;
int32_t primarySrcSlotId;
- uint64_t curGroupId; // current handled group id
+ uint64_t curGroupId; // current handled group id
SExprInfo* pExprInfo;
int32_t numOfExpr;
SExprInfo* pNotFillExprInfo;
@@ -637,23 +691,23 @@ typedef struct SFillOperatorInfo {
typedef struct SGroupbyOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
-
- SArray* pGroupCols; // group by columns, SArray
- SArray* pGroupColVals; // current group column values, SArray
- SNode* pCondition;
- bool isInit; // denote if current val is initialized or not
- char* keyBuf; // group by keys for hash
- int32_t groupKeyLen; // total group by column width
- SGroupResInfo groupResInfo;
- SExprSupp scalarSup;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+
+ SArray* pGroupCols; // group by columns, SArray
+ SArray* pGroupColVals; // current group column values, SArray
+ SNode* pCondition;
+ bool isInit; // denote if current val is initialized or not
+ char* keyBuf; // group by keys for hash
+ int32_t groupKeyLen; // total group by column width
+ SGroupResInfo groupResInfo;
+ SExprSupp scalarSup;
} SGroupbyOperatorInfo;
typedef struct SDataGroupInfo {
- uint64_t groupId;
- int64_t numOfRows;
- SArray* pPageList;
+ uint64_t groupId;
+ int64_t numOfRows;
+ SArray* pPageList;
} SDataGroupInfo;
// The sort in partition may be needed later.
@@ -665,13 +719,12 @@ typedef struct SPartitionOperatorInfo {
int32_t groupKeyLen; // total group by column width
SHashObj* pGroupSet; // quick locate the window object for each result
- SDiskbasedBuf* pBuf; // query result buffer based on blocked-wised disk file
- int32_t rowCapacity; // maximum number of rows for each buffer page
- int32_t* columnOffset; // start position for each column data
- SArray* sortedGroupArray; // SDataGroupInfo sorted by group id
- int32_t groupIndex; // group index
- int32_t pageIndex; // page index of current group
- SSDataBlock* pUpdateRes;
+ SDiskbasedBuf* pBuf; // query result buffer based on blocked-wised disk file
+ int32_t rowCapacity; // maximum number of rows for each buffer page
+ int32_t* columnOffset; // start position for each column data
+ SArray* sortedGroupArray; // SDataGroupInfo sorted by group id
+ int32_t groupIndex; // group index
+ int32_t pageIndex; // page index of current group
SExprSupp scalarSup;
} SPartitionOperatorInfo;
@@ -685,75 +738,86 @@ typedef struct SWindowRowsSup {
typedef struct SSessionAggOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
- bool reptScan; // next round scan
- int64_t gap; // session window gap
- int32_t tsSlotId; // primary timestamp slot id
+ bool reptScan; // next round scan
+ int64_t gap; // session window gap
+ int32_t tsSlotId; // primary timestamp slot id
STimeWindowAggSupp twAggSup;
- const SNode* pCondition;
+ const SNode* pCondition;
} SSessionAggOperatorInfo;
typedef struct SResultWindowInfo {
SResultRowPosition pos;
- STimeWindow win;
- uint64_t groupId;
- bool isOutput;
- bool isClosed;
+ STimeWindow win;
+ uint64_t groupId;
+ bool isOutput;
+ bool isClosed;
} SResultWindowInfo;
typedef struct SStateWindowInfo {
SResultWindowInfo winInfo;
- SStateKeys stateKey;
+ SStateKeys stateKey;
} SStateWindowInfo;
typedef struct SStreamSessionAggOperatorInfo {
- SOptrBasicInfo binfo;
- SStreamAggSupporter streamAggSup;
- SExprSupp scalarSupp; // supporter for perform scalar function
- SGroupResInfo groupResInfo;
- int64_t gap; // session window gap
- int32_t primaryTsIndex; // primary timestamp slot id
- int32_t endTsIndex; // window end timestamp slot id
- int32_t order; // current SSDataBlock scan order
- STimeWindowAggSupp twAggSup;
- SSDataBlock* pWinBlock; // window result
- SqlFunctionCtx* pDummyCtx; // for combine
- SSDataBlock* pDelRes; // delete result
- bool returnDelete;
- SSDataBlock* pUpdateRes; // update window
- SHashObj* pStDeleted;
- void* pDelIterator;
- SArray* pChildren; // cache for children's result; final stream operator
- SPhysiNode* pPhyNode; // create new child
- bool isFinal;
- bool ignoreExpiredData;
+ SOptrBasicInfo binfo;
+ SStreamAggSupporter streamAggSup;
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo;
+ int64_t gap; // session window gap
+ int32_t primaryTsIndex; // primary timestamp slot id
+ int32_t endTsIndex; // window end timestamp slot id
+ int32_t order; // current SSDataBlock scan order
+ STimeWindowAggSupp twAggSup;
+ SSDataBlock* pWinBlock; // window result
+ SqlFunctionCtx* pDummyCtx; // for combine
+ SSDataBlock* pDelRes; // delete result
+ SSDataBlock* pUpdateRes; // update window
+ bool returnUpdate;
+ SHashObj* pStDeleted;
+ void* pDelIterator;
+ SArray* pChildren; // cache for children's result; final stream operator
+ SPhysiNode* pPhyNode; // create new child
+ bool isFinal;
+ bool ignoreExpiredData;
} SStreamSessionAggOperatorInfo;
+typedef struct SStreamPartitionOperatorInfo {
+ SOptrBasicInfo binfo;
+ SPartitionBySupporter partitionSup;
+ SExprSupp scalarSup;
+ SHashObj* pPartitions;
+ void* parIte;
+ SSDataBlock* pInputDataBlock;
+ int32_t tsColIndex;
+ SSDataBlock* pDelRes;
+} SStreamPartitionOperatorInfo;
+
typedef struct STimeSliceOperatorInfo {
- SSDataBlock* pRes;
- STimeWindow win;
- SInterval interval;
- int64_t current;
- SArray* pPrevRow; // SArray
- SArray* pNextRow; // SArray
- SArray* pLinearInfo; // SArray
- bool fillLastPoint;
- bool isPrevRowSet;
- bool isNextRowSet;
- int32_t fillType; // fill type
- SColumn tsCol; // primary timestamp column
- SExprSupp scalarSup; // scalar calculation
- struct SFillColInfo* pFillColInfo; // fill column info
+ SSDataBlock* pRes;
+ STimeWindow win;
+ SInterval interval;
+ int64_t current;
+ SArray* pPrevRow; // SArray
+ SArray* pNextRow; // SArray
+ SArray* pLinearInfo; // SArray
+ bool fillLastPoint;
+ bool isPrevRowSet;
+ bool isNextRowSet;
+ int32_t fillType; // fill type
+ SColumn tsCol; // primary timestamp column
+ SExprSupp scalarSup; // scalar calculation
+ struct SFillColInfo* pFillColInfo; // fill column info
} STimeSliceOperatorInfo;
typedef struct SStateWindowOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
@@ -767,52 +831,52 @@ typedef struct SStateWindowOperatorInfo {
} SStateWindowOperatorInfo;
typedef struct SStreamStateAggOperatorInfo {
- SOptrBasicInfo binfo;
- SStreamAggSupporter streamAggSup;
- SExprSupp scalarSupp; // supporter for perform scalar function
- SGroupResInfo groupResInfo;
- int32_t primaryTsIndex; // primary timestamp slot id
- int32_t order; // current SSDataBlock scan order
- STimeWindowAggSupp twAggSup;
- SColumn stateCol;
- SqlFunctionCtx* pDummyCtx; // for combine
- SSDataBlock* pDelRes;
- SHashObj* pSeDeleted;
- void* pDelIterator;
- SArray* pChildren; // cache for children's result;
- bool ignoreExpiredData;
+ SOptrBasicInfo binfo;
+ SStreamAggSupporter streamAggSup;
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo;
+ int32_t primaryTsIndex; // primary timestamp slot id
+ int32_t order; // current SSDataBlock scan order
+ STimeWindowAggSupp twAggSup;
+ SColumn stateCol;
+ SqlFunctionCtx* pDummyCtx; // for combine
+ SSDataBlock* pDelRes;
+ SHashObj* pSeDeleted;
+ void* pDelIterator;
+ SArray* pChildren; // cache for children's result;
+ bool ignoreExpiredData;
} SStreamStateAggOperatorInfo;
typedef struct SSortedMergeOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
-
- SArray* pSortInfo;
- int32_t numOfSources;
- SSortHandle *pSortHandle;
- int32_t bufPageSize;
- uint32_t sortBufSize; // max buffer size for in-memory sort
- int32_t resultRowFactor;
- bool hasGroupVal;
- SDiskbasedBuf *pTupleStore; // keep the final results
- int32_t numOfResPerPage;
- char** groupVal;
- SArray *groupInfo;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+
+ SArray* pSortInfo;
+ int32_t numOfSources;
+ SSortHandle* pSortHandle;
+ int32_t bufPageSize;
+ uint32_t sortBufSize; // max buffer size for in-memory sort
+ int32_t resultRowFactor;
+ bool hasGroupVal;
+ SDiskbasedBuf* pTupleStore; // keep the final results
+ int32_t numOfResPerPage;
+ char** groupVal;
+ SArray* groupInfo;
} SSortedMergeOperatorInfo;
typedef struct SSortOperatorInfo {
SOptrBasicInfo binfo;
- uint32_t sortBufSize; // max buffer size for in-memory sort
- SArray* pSortInfo;
- SSortHandle* pSortHandle;
- SArray* pColMatchInfo; // for index map from table scan output
- int32_t bufPageSize;
-
- int64_t startTs; // sort start time
- uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
- SLimitInfo limitInfo;
- SNode* pCondition;
+ uint32_t sortBufSize; // max buffer size for in-memory sort
+ SArray* pSortInfo;
+ SSortHandle* pSortHandle;
+ SArray* pColMatchInfo; // for index map from table scan output
+ int32_t bufPageSize;
+
+ int64_t startTs; // sort start time
+ uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
+ SLimitInfo limitInfo;
+ SNode* pCondition;
} SSortOperatorInfo;
typedef struct STagFilterOperatorInfo {
@@ -820,18 +884,18 @@ typedef struct STagFilterOperatorInfo {
} STagFilterOperatorInfo;
typedef struct SJoinOperatorInfo {
- SSDataBlock *pRes;
- int32_t joinType;
- int32_t inputOrder;
-
- SSDataBlock *pLeft;
- int32_t leftPos;
- SColumnInfo leftCol;
-
- SSDataBlock *pRight;
- int32_t rightPos;
- SColumnInfo rightCol;
- SNode *pCondAfterMerge;
+ SSDataBlock* pRes;
+ int32_t joinType;
+ int32_t inputOrder;
+
+ SSDataBlock* pLeft;
+ int32_t leftPos;
+ SColumnInfo leftCol;
+
+ SSDataBlock* pRight;
+ int32_t rightPos;
+ SColumnInfo rightCol;
+ SNode* pCondAfterMerge;
} SJoinOperatorInfo;
#define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED)
@@ -840,8 +904,8 @@ typedef struct SJoinOperatorInfo {
void doDestroyExchangeOperatorInfo(void* param);
SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t streamFn,
- __optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode,
- __optr_decode_fn_t decode, __optr_explain_fn_t explain);
+ __optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode,
+ __optr_decode_fn_t decode, __optr_explain_fn_t explain);
int32_t operatorDummyOpenFn(SOperatorInfo* pOperator);
void operatorDummyCloseFn(void* param, int32_t numOfCols);
@@ -852,24 +916,25 @@ void cleanupBasicInfo(SOptrBasicInfo* pInfo);
int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr);
void cleanupExprSupp(SExprSupp* pSup);
void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs);
-int32_t initAggInfo(SExprSupp *pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
+int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
const char* pkey);
-void initResultSizeInfo(SResultInfo * pResultInfo, int32_t numOfRows);
-void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pBuf);
-int32_t handleLimitOffset(SOperatorInfo *pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
+void initResultSizeInfo(SResultInfo* pResultInfo, int32_t numOfRows);
+void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
+ SDiskbasedBuf* pBuf);
+int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo);
-void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset,
- int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput);
+void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset,
+ int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput);
-int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList, char** pNextStart);
-void updateLoadRemoteInfo(SLoadRemoteDataInfo *pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
- SOperatorInfo* pOperator);
+int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pColList, char** pNextStart);
+void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
+ SOperatorInfo* pOperator);
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
-int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
+int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag);
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
void doSetOperatorCompleted(SOperatorInfo* pOperator);
@@ -877,10 +942,10 @@ void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, const SArray* pC
int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr,
SSDataBlock* pBlock, const char* idStr);
-void cleanupAggSup(SAggSupporter* pAggSup);
-void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
-void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
-void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId);
+void cleanupAggSup(SAggSupporter* pAggSup);
+void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
+void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
+void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId);
int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts);
int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts);
@@ -889,39 +954,41 @@ SSDataBlock* loadNextDataBlock(void* param);
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset);
-SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo,
- char* pData, int16_t bytes, bool masterscan, uint64_t groupId,
- SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup);
+SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
+ int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
+ bool isIntervalQuery, SAggSupporter* pSup);
SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode,
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode *pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode,
+ const char* pUser, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
+SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
+ SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
int32_t numOfScalarExpr, bool mergeResult, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode *pNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_t numStreams, SMergePhysiNode* pMergePhysiNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t num, SArray* pSortInfo, SArray* pGroupInfo, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_t numStreams,
+ SMergePhysiNode* pMergePhysiNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, bool isStream);
-
-SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- bool mergeResultBlock, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- SNode* pCondition, bool mergeResultBlocks, SExecTaskInfo* pTaskInfo);
-
+ STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo, bool isStream);
+SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode,
@@ -929,46 +996,49 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition,
SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid, SBlockDistScanPhysiNode* pBlockScanNode,
- SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid,
+ SBlockDistScanPhysiNode* pBlockScanNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond,
SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
- SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId,
- SColumn* pStateKeyCol, SNode* pCondition, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode,
+ SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream,
+ SSortMergeJoinPhysiNode* pJoinNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode,
- SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream,
- SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream,
- SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
+SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo, int32_t numOfChild);
+SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream,
+ SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-
-#if 0
-SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntimeEnv* pRuntimeEnv);
-#endif
+SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo);
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
- int32_t numOfOutput, SArray* pPseudoList);
+ int32_t numOfOutput, SArray* pPseudoList);
-void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, int32_t scanFlag, bool createDummyCol);
+void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order,
+ int32_t scanFlag, bool createDummyCol);
bool isTaskKilled(SExecTaskInfo* pTaskInfo);
int32_t checkForQueryBuf(size_t numOfTables);
-void setTaskKilled(SExecTaskInfo* pTaskInfo);
-void queryCostStatis(SExecTaskInfo* pTaskInfo);
+void setTaskKilled(SExecTaskInfo* pTaskInfo);
+void queryCostStatis(SExecTaskInfo* pTaskInfo);
void doDestroyTask(SExecTaskInfo* pTaskInfo);
int32_t getMaximumIdleDurationSec();
@@ -980,7 +1050,7 @@ int32_t getMaximumIdleDurationSec();
* nOptrWithVal: *nOptrWithVal save the number of optr with value
* return: result code, 0 means success
*/
-int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length, int32_t *nOptrWithVal);
+int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t* length, int32_t* nOptrWithVal);
/*
* ops: root operator, created by caller
@@ -993,7 +1063,7 @@ int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length);
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
char* sql, EOPTR_EXEC_MODEL model);
-int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
+int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result);
@@ -1002,41 +1072,50 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval,
int32_t order);
int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey,
- __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order);
+ __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order);
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
- int32_t size);
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
-SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
- TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
-SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
- TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
-bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
-bool functionNeedToExecute(SqlFunctionCtx* pCtx);
-bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
-bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
-bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
-void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID);
-void printDataBlock(SSDataBlock* pBlock, const char* flag);
+ int32_t size);
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
+SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId,
+ int64_t gap, int32_t* pIndex);
+SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId,
+ int64_t gap, int32_t* pIndex);
+bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
+bool functionNeedToExecute(SqlFunctionCtx* pCtx);
+bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
+bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
+bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
+void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp);
+void printDataBlock(SSDataBlock* pBlock, const char* flag);
+uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
- SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset,
- SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
+ SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs,
+ const int32_t* rowCellOffset, SSDataBlock* pBlock,
+ SExecTaskInfo* pTaskInfo);
-int32_t createScanTableListInfo(SScanPhysiNode *pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
- STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, const char* idstr);
+int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
+ STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
+ const char* idstr);
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode,
SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo *pTableListInfo,
+SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex);
-bool groupbyTbname(SNodeList* pGroupList);
+bool groupbyTbname(SNodeList* pGroupList);
int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* groupKey);
-SSDataBlock* createSpecialDataBlock(EStreamType type);
-void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput);
+void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput);
+int32_t buildDataBlockFromGroupRes(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup,
+ SGroupResInfo* pGroupResInfo);
+int32_t setOutputBuf(STimeWindow* win, SResultRow** pResult, int64_t tableGroupId, SqlFunctionCtx* pCtx,
+ int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup,
+ SExecTaskInfo* pTaskInfo);
+int32_t releaseOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult);
+int32_t saveOutput(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
#ifdef __cplusplus
}
diff --git a/source/libs/executor/inc/tsimplehash.h b/source/libs/executor/inc/tsimplehash.h
index 4c5a80e2f1954812a81665954d3dc448467f6ffc..27191e3b7e674df4dcec9dabc7b8cc6fbb35f9f2 100644
--- a/source/libs/executor/inc/tsimplehash.h
+++ b/source/libs/executor/inc/tsimplehash.h
@@ -28,7 +28,7 @@ typedef void (*_hash_free_fn_t)(void *);
/**
* @brief single thread hash
- *
+ *
*/
typedef struct SSHashObj SSHashObj;
@@ -52,13 +52,13 @@ int32_t tSimpleHashPrint(const SSHashObj *pHashObj);
/**
* @brief put element into hash table, if the element with the same key exists, update it
- *
- * @param pHashObj
- * @param key
- * @param keyLen
- * @param data
- * @param dataLen
- * @return int32_t
+ *
+ * @param pHashObj
+ * @param key
+ * @param keyLen
+ * @param data
+ * @param dataLen
+ * @return int32_t
*/
int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen);
@@ -80,6 +80,18 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key, size_t keyLen);
*/
int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen);
+/**
+ * remove item with the specified key during hash iterate
+ *
+ * @param pHashObj
+ * @param key
+ * @param keyLen
+ * @param pIter
+ * @param iter
+ * @return int32_t
+ */
+int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t keyLen, void **pIter, int32_t *iter);
+
/**
* Clear the hash table.
* @param pHashObj
@@ -99,13 +111,27 @@ void tSimpleHashCleanup(SSHashObj *pHashObj);
*/
size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj);
+#pragma pack(push, 4)
+typedef struct SHNode{
+ struct SHNode *next;
+ uint32_t keyLen : 20;
+ uint32_t dataLen : 12;
+ char data[];
+} SHNode;
+#pragma pack(pop)
+
/**
* Get the corresponding key information for a given data in hash table
* @param data
* @param keyLen
* @return
*/
-void *tSimpleHashGetKey(void *data, size_t* keyLen);
+static FORCE_INLINE void *tSimpleHashGetKey(void *data, size_t *keyLen) {
+ SHNode *node = (SHNode *)((char *)data - offsetof(SHNode, data));
+ if (keyLen) *keyLen = node->keyLen;
+
+ return POINTER_SHIFT(data, node->dataLen);
+}
/**
* Create the hash table iterator
@@ -116,17 +142,6 @@ void *tSimpleHashGetKey(void *data, size_t* keyLen);
*/
void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter);
-/**
- * Create the hash table iterator
- *
- * @param pHashObj
- * @param data
- * @param key
- * @param iter
- * @return void*
- */
-void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, int32_t *iter);
-
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c
index 94e4384b3025f0d2ecbbaafd9f92ad10aa84b926..94d9d0cadbd1cf21ac8303a4bee7b86da9695f3c 100644
--- a/source/libs/executor/src/cachescanoperator.c
+++ b/source/libs/executor/src/cachescanoperator.c
@@ -24,26 +24,28 @@
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
-#include "executorInt.h"
-static SSDataBlock* doScanLastrow(SOperatorInfo* pOperator);
-static void destroyLastrowScanOperator(void* param, int32_t numOfOutput);
+static SSDataBlock* doScanCache(SOperatorInfo* pOperator);
+static void destroyLastrowScanOperator(void* param);
static int32_t extractTargetSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds);
-SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle,
+ SExecTaskInfo* pTaskInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
SLastrowScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SLastrowScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _error;
}
pInfo->readHandle = *readHandle;
- pInfo->pRes = createResDataBlock(pScanNode->scan.node.pOutputDataBlockDesc);
+ pInfo->pRes = createResDataBlock(pScanNode->scan.node.pOutputDataBlockDesc);
int32_t numOfCols = 0;
pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->scan.pScanCols, pScanNode->scan.node.pOutputDataBlockDesc, &numOfCols,
COL_MATCH_FROM_COL_ID);
- int32_t code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds);
+ code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -56,13 +58,17 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
// partition by tbname
if (taosArrayGetSize(pTableList->pGroupList) == taosArrayGetSize(pTableList->pTableList)) {
- pInfo->retrieveType = LASTROW_RETRIEVE_TYPE_ALL;
- tsdbLastRowReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList,
- taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
+ pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_ALL|CACHESCAN_RETRIEVE_LAST_ROW;
+ code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList,
+ taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
pInfo->pBufferredRes = createOneDataBlock(pInfo->pRes, false);
blockDataEnsureCapacity(pInfo->pBufferredRes, pOperator->resultInfo.capacity);
} else { // by tags
- pInfo->retrieveType = LASTROW_RETRIEVE_TYPE_SINGLE;
+ pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE|CACHESCAN_RETRIEVE_LAST_ROW;
}
if (pScanNode->scan.pScanPseudoCols != NULL) {
@@ -81,19 +87,19 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->fpSet =
- createOperatorFpSet(operatorDummyOpenFn, doScanLastrow, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL);
+ createOperatorFpSet(operatorDummyOpenFn, doScanCache, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL);
pOperator->cost.openCost = 0;
return pOperator;
_error:
- pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
- taosMemoryFree(pInfo);
+ pTaskInfo->code = code;
+ destroyLastrowScanOperator(pInfo);
taosMemoryFree(pOperator);
return NULL;
}
-SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
+SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
@@ -110,14 +116,14 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
blockDataCleanup(pInfo->pRes);
// check if it is a group by tbname
- if (pInfo->retrieveType == LASTROW_RETRIEVE_TYPE_ALL) {
+ if ((pInfo->retrieveType & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) {
if (pInfo->indexOfBufferedRes >= pInfo->pBufferredRes->info.rows) {
blockDataCleanup(pInfo->pBufferredRes);
taosArrayClear(pInfo->pUidList);
- int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList);
+ int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// check for tag values
@@ -173,11 +179,11 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
while (pInfo->currentGroupIndex < totalGroups) {
SArray* pGroupTableList = taosArrayGetP(pTableList->pGroupList, pInfo->currentGroupIndex);
- tsdbLastRowReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList,
+ tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList,
taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
taosArrayClear(pInfo->pUidList);
- int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);
+ int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
@@ -201,7 +207,7 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
}
}
- tsdbLastrowReaderClose(pInfo->pLastrowReader);
+ tsdbCacherowsReaderClose(pInfo->pLastrowReader);
return pInfo->pRes;
}
}
@@ -211,7 +217,7 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
}
}
-void destroyLastrowScanOperator(void* param, int32_t numOfOutput) {
+void destroyLastrowScanOperator(void* param) {
SLastrowScanInfo* pInfo = (SLastrowScanInfo*)param;
blockDataDestroy(pInfo->pRes);
taosMemoryFreeClear(param);
diff --git a/source/libs/executor/src/dataDeleter.c b/source/libs/executor/src/dataDeleter.c
index 06b7c13fa2cb52b8255098c5efb652d56ec57974..40198615eab8a00a09479693193306026b4ba938 100644
--- a/source/libs/executor/src/dataDeleter.c
+++ b/source/libs/executor/src/dataDeleter.c
@@ -168,7 +168,9 @@ static void getDataLength(SDataSinkHandle* pHandle, int64_t* pLen, bool* pQueryE
taosReadQitem(pDeleter->pDataBlocks, (void**)&pBuf);
memcpy(&pDeleter->nextOutput, pBuf, sizeof(SDataDeleterBuf));
taosFreeQitem(pBuf);
- *pLen = ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->dataLen;
+
+ SDataCacheEntry* pEntry = (SDataCacheEntry*)pDeleter->nextOutput.pData;
+ *pLen = pEntry->dataLen;
*pQueryEnd = pDeleter->queryEnd;
qDebug("got data len %" PRId64 ", row num %d in sink", *pLen, ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->numOfRows);
}
diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c
index 20396046ba5daa34c3caaf76796c2b8a3b06527c..1697ed63fb196aa2a571aa26f8ffe29ee1d6c5d5 100644
--- a/source/libs/executor/src/dataDispatcher.c
+++ b/source/libs/executor/src/dataDispatcher.c
@@ -93,6 +93,8 @@ static void toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pIn
pBuf->useSize = sizeof(SDataCacheEntry);
blockEncode(pInput->pData, pEntry->data, &pEntry->dataLen, numOfCols, pEntry->compressed);
+ ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data+8));
+ ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data+8+4));
pBuf->useSize += pEntry->dataLen;
@@ -170,7 +172,13 @@ static void getDataLength(SDataSinkHandle* pHandle, int64_t* pLen, bool* pQueryE
taosReadQitem(pDispatcher->pDataBlocks, (void**)&pBuf);
memcpy(&pDispatcher->nextOutput, pBuf, sizeof(SDataDispatchBuf));
taosFreeQitem(pBuf);
- *pLen = ((SDataCacheEntry*)(pDispatcher->nextOutput.pData))->dataLen;
+
+ SDataCacheEntry* pEntry = (SDataCacheEntry*)pDispatcher->nextOutput.pData;
+ *pLen = pEntry->dataLen;
+
+ ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data+8));
+ ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data+8+4));
+
*pQueryEnd = pDispatcher->queryEnd;
qDebug("got data len %" PRId64 ", row num %d in sink", *pLen, ((SDataCacheEntry*)(pDispatcher->nextOutput.pData))->numOfRows);
}
@@ -191,6 +199,9 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
pOutput->numOfCols = pEntry->numOfCols;
pOutput->compressed = pEntry->compressed;
+ ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data+8));
+ ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data+8+4));
+
atomic_sub_fetch_64(&pDispatcher->cachedSize, pEntry->dataLen);
atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 118c061c3ab1c708a9dfbe2d5aad3f075cc6e5e4..0c1728386b4d88d0bebe66e6705c328803985eb0 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -31,20 +31,6 @@ void initResultRowInfo(SResultRowInfo* pResultRowInfo) {
pResultRowInfo->cur.pageId = -1;
}
-void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo) {
- if (pResultRowInfo == NULL) {
- return;
- }
-
- for (int32_t i = 0; i < pResultRowInfo->size; ++i) {
- // if (pResultRowInfo->pResult[i]) {
- // taosMemoryFreeClear(pResultRowInfo->pResult[i]->key);
- // }
- }
-}
-
-bool isResultRowClosed(SResultRow* pRow) { return (pRow->closed == true); }
-
void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; }
// TODO refactor: use macro
@@ -60,8 +46,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
rowSize += pCtx[i].resDataInfo.interBufSize;
}
- rowSize +=
- (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData)
+ rowSize += (numOfOutput * sizeof(bool));
+ // expand rowSize to mark if col is null for top/bottom result(saveTupleData)
return rowSize;
}
@@ -97,7 +83,7 @@ int32_t resultrowComparAsc(const void* p1, const void* p2) {
static int32_t resultrowComparDesc(const void* p1, const void* p2) { return resultrowComparAsc(p2, p1); }
-void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int32_t order) {
+void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order) {
if (pGroupResInfo->pRows != NULL) {
taosArrayDestroy(pGroupResInfo->pRows);
}
@@ -106,9 +92,10 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
void* pData = NULL;
pGroupResInfo->pRows = taosArrayInit(10, POINTER_BYTES);
- size_t keyLen = 0;
- while ((pData = taosHashIterate(pHashmap, pData)) != NULL) {
- void* key = taosHashGetKey(pData, &keyLen);
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pData, &keyLen);
SResKeyPos* p = taosMemoryMalloc(keyLen + sizeof(SResultRowPosition));
@@ -298,17 +285,17 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle,
return TSDB_CODE_SUCCESS;
}
-typedef struct tagFilterAssist{
- SHashObj *colHash;
+typedef struct tagFilterAssist {
+ SHashObj* colHash;
int32_t index;
- SArray *cInfoList;
-}tagFilterAssist;
+ SArray* cInfoList;
+} tagFilterAssist;
static EDealRes getColumn(SNode** pNode, void* pContext) {
SColumnNode* pSColumnNode = NULL;
if (QUERY_NODE_COLUMN == nodeType((*pNode))) {
pSColumnNode = *(SColumnNode**)pNode;
- }else if(QUERY_NODE_FUNCTION == nodeType((*pNode))){
+ } else if (QUERY_NODE_FUNCTION == nodeType((*pNode))) {
SFunctionNode* pFuncNode = *(SFunctionNode**)(pNode);
if (pFuncNode->funcType == FUNCTION_TYPE_TBNAME) {
pSColumnNode = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
@@ -321,24 +308,26 @@ static EDealRes getColumn(SNode** pNode, void* pContext) {
pSColumnNode->node.resType.bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE;
nodesDestroyNode(*pNode);
*pNode = (SNode*)pSColumnNode;
- }else{
+ } else {
return DEAL_RES_CONTINUE;
}
- }else{
+ } else {
return DEAL_RES_CONTINUE;
}
- tagFilterAssist *pData = (tagFilterAssist *)pContext;
- void *data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId));
- if(!data){
+ tagFilterAssist* pData = (tagFilterAssist*)pContext;
+ void* data = taosHashGet(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId));
+ if (!data) {
taosHashPut(pData->colHash, &pSColumnNode->colId, sizeof(pSColumnNode->colId), pNode, sizeof((*pNode)));
pSColumnNode->slotId = pData->index++;
- SColumnInfo cInfo = {.colId = pSColumnNode->colId, .type = pSColumnNode->node.resType.type, .bytes = pSColumnNode->node.resType.bytes};
+ SColumnInfo cInfo = {.colId = pSColumnNode->colId,
+ .type = pSColumnNode->node.resType.type,
+ .bytes = pSColumnNode->node.resType.bytes};
#if TAG_FILTER_DEBUG
qDebug("tagfilter build column info, slotId:%d, colId:%d, type:%d", pSColumnNode->slotId, cInfo.colId, cInfo.type);
#endif
taosArrayPush(pData->cInfoList, &cInfo);
- }else{
+ } else {
SColumnNode* col = *(SColumnNode**)data;
pSColumnNode->slotId = col->slotId;
}
@@ -353,14 +342,14 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara
return terrno;
}
- pColumnData->info.type = pType->type;
- pColumnData->info.bytes = pType->bytes;
- pColumnData->info.scale = pType->scale;
+ pColumnData->info.type = pType->type;
+ pColumnData->info.bytes = pType->bytes;
+ pColumnData->info.scale = pType->scale;
pColumnData->info.precision = pType->precision;
int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+ terrno = code;
taosMemoryFree(pColumnData);
return terrno;
}
@@ -370,27 +359,28 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara
return TSDB_CODE_SUCCESS;
}
-static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond){
- int32_t code = TSDB_CODE_SUCCESS;
- SArray* pBlockList = NULL;
+static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray* uidList, SNode* pTagCond) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
- SHashObj * tags = NULL;
+ SHashObj* tags = NULL;
SScalarParam output = {0};
tagFilterAssist ctx = {0};
+
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
- if(ctx.colHash == NULL){
+ if (ctx.colHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
ctx.index = 0;
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
- if(ctx.cInfoList == NULL){
+ if (ctx.cInfoList == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
- nodesRewriteExprPostOrder(&pTagCond, getColumn, (void *)&ctx);
+ nodesRewriteExprPostOrder(&pTagCond, getColumn, (void*)&ctx);
pResBlock = createDataBlock();
if (pResBlock == NULL) {
@@ -404,7 +394,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
blockDataAppendColInfo(pResBlock, &colInfo);
}
-// int64_t stt = taosGetTimestampUs();
+ // int64_t stt = taosGetTimestampUs();
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
code = metaGetTableTags(metaHandle, suid, uidList, tags);
if (code != TSDB_CODE_SUCCESS) {
@@ -414,11 +404,11 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
}
int32_t rows = taosArrayGetSize(uidList);
- if(rows == 0){
+ if (rows == 0) {
goto end;
}
-// int64_t stt1 = taosGetTimestampUs();
-// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
+ // int64_t stt1 = taosGetTimestampUs();
+ // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
code = blockDataEnsureCapacity(pResBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
@@ -426,46 +416,46 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
goto end;
}
-// int64_t st = taosGetTimestampUs();
+ // int64_t st = taosGetTimestampUs();
for (int32_t i = 0; i < rows; i++) {
int64_t* uid = taosArrayGet(uidList, i);
- for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){
+ for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
- if(pColInfo->info.colId == -1){ // tbname
+ if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
metaGetTableNameByUid(metaHandle, *uid, str);
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
- qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2);
+ qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
- }else{
+ } else {
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
ASSERT(tag);
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
- if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){
+ if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppend(pColInfo, i, p, true);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
- char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
+ char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
- qDebug("tagfilter varch:%s", tmp+2);
+ qDebug("tagfilter varch:%s", tmp + 2);
#endif
taosMemoryFree(tmp);
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
- if(pColInfo->info.type == TSDB_DATA_TYPE_INT){
+ if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
- }else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){
- qDebug("tagfilter double:%f", *(double *)(&tagVal.i64));
+ } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
+ qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
@@ -474,8 +464,8 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
}
pResBlock->info.rows = rows;
-// int64_t st1 = taosGetTimestampUs();
-// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
+ // int64_t st1 = taosGetTimestampUs();
+ // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
pBlockList = taosArrayInit(2, POINTER_BYTES);
taosArrayPush(pBlockList, &pResBlock);
@@ -483,17 +473,19 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
code = createResultData(&type, rows, &output);
if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
qError("failed to create result, reason:%s", tstrerror(code));
goto end;
}
code = scalarCalculate(pTagCond, pBlockList, &output);
- if(code != TSDB_CODE_SUCCESS){
+ if (code != TSDB_CODE_SUCCESS) {
qError("failed to calculate scalar, reason:%s", tstrerror(code));
terrno = code;
+ goto end;
}
-// int64_t st2 = taosGetTimestampUs();
-// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
+ // int64_t st2 = taosGetTimestampUs();
+ // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
end:
taosHashCleanup(tags);
@@ -505,43 +497,43 @@ end:
}
static void releaseColInfoData(void* pCol) {
- if(pCol){
- SColumnInfoData* col = (SColumnInfoData*) pCol;
+ if (pCol) {
+ SColumnInfoData* col = (SColumnInfoData*)pCol;
colDataDestroy(col);
taosMemoryFree(col);
}
}
-int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo){
- int32_t code = TSDB_CODE_SUCCESS;
- SArray *pBlockList = NULL;
- SSDataBlock *pResBlock = NULL;
- SHashObj *tags = NULL;
- SArray *uidList = NULL;
- void *keyBuf = NULL;
- SArray *groupData = NULL;
+int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SArray* pBlockList = NULL;
+ SSDataBlock* pResBlock = NULL;
+ SHashObj* tags = NULL;
+ SArray* uidList = NULL;
+ void* keyBuf = NULL;
+ SArray* groupData = NULL;
int32_t rows = taosArrayGetSize(pTableListInfo->pTableList);
- if(rows == 0){
+ if (rows == 0) {
return TDB_CODE_SUCCESS;
}
tagFilterAssist ctx = {0};
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
- if(ctx.colHash == NULL){
+ if (ctx.colHash == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
ctx.index = 0;
ctx.cInfoList = taosArrayInit(4, sizeof(SColumnInfo));
- if(ctx.cInfoList == NULL){
+ if (ctx.cInfoList == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
- SNode* pNode = NULL;
+ SNode* pNode = NULL;
FOREACH(pNode, group) {
- nodesRewriteExprPostOrder(&pNode, getColumn, (void *)&ctx);
+ nodesRewriteExprPostOrder(&pNode, getColumn, (void*)&ctx);
REPLACE_NODE(pNode);
}
@@ -563,61 +555,61 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
taosArrayPush(uidList, &pkeyInfo->uid);
}
-// int64_t stt = taosGetTimestampUs();
+ // int64_t stt = taosGetTimestampUs();
tags = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
code = metaGetTableTags(metaHandle, pTableListInfo->suid, uidList, tags);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
-// int64_t stt1 = taosGetTimestampUs();
-// qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
+ // int64_t stt1 = taosGetTimestampUs();
+ // qDebug("generate tag meta rows:%d, cost:%ld us", rows, stt1-stt);
code = blockDataEnsureCapacity(pResBlock, rows);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
-// int64_t st = taosGetTimestampUs();
+ // int64_t st = taosGetTimestampUs();
for (int32_t i = 0; i < rows; i++) {
int64_t* uid = taosArrayGet(uidList, i);
- for(int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++){
+ for (int32_t j = 0; j < taosArrayGetSize(pResBlock->pDataBlock); j++) {
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, j);
- if(pColInfo->info.colId == -1){ // tbname
+ if (pColInfo->info.colId == -1) { // tbname
char str[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
metaGetTableNameByUid(metaHandle, *uid, str);
colDataAppend(pColInfo, i, str, false);
#if TAG_FILTER_DEBUG
- qDebug("tagfilter uid:%ld, tbname:%s", *uid, str+2);
+ qDebug("tagfilter uid:%ld, tbname:%s", *uid, str + 2);
#endif
- }else{
+ } else {
void* tag = taosHashGet(tags, uid, sizeof(int64_t));
ASSERT(tag);
STagVal tagVal = {0};
tagVal.cid = pColInfo->info.colId;
const char* p = metaGetTableTagVal(tag, pColInfo->info.type, &tagVal);
- if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)){
+ if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataAppend(pColInfo, i, p, true);
} else if (pColInfo->info.type == TSDB_DATA_TYPE_JSON) {
colDataAppend(pColInfo, i, p, false);
} else if (IS_VAR_DATA_TYPE(pColInfo->info.type)) {
- char *tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
+ char* tmp = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
varDataSetLen(tmp, tagVal.nData);
memcpy(tmp + VARSTR_HEADER_SIZE, tagVal.pData, tagVal.nData);
colDataAppend(pColInfo, i, tmp, false);
#if TAG_FILTER_DEBUG
- qDebug("tagfilter varch:%s", tmp+2);
+ qDebug("tagfilter varch:%s", tmp + 2);
#endif
taosMemoryFree(tmp);
} else {
colDataAppend(pColInfo, i, (const char*)&tagVal.i64, false);
#if TAG_FILTER_DEBUG
- if(pColInfo->info.type == TSDB_DATA_TYPE_INT){
+ if (pColInfo->info.type == TSDB_DATA_TYPE_INT) {
qDebug("tagfilter int:%d", *(int*)(&tagVal.i64));
- }else if(pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE){
- qDebug("tagfilter double:%f", *(double *)(&tagVal.i64));
+ } else if (pColInfo->info.type == TSDB_DATA_TYPE_DOUBLE) {
+ qDebug("tagfilter double:%f", *(double*)(&tagVal.i64));
}
#endif
}
@@ -626,8 +618,8 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
}
pResBlock->info.rows = rows;
-// int64_t st1 = taosGetTimestampUs();
-// qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
+ // int64_t st1 = taosGetTimestampUs();
+ // qDebug("generate tag block rows:%d, cost:%ld us", rows, st1-st);
pBlockList = taosArrayInit(2, POINTER_BYTES);
taosArrayPush(pBlockList, &pResBlock);
@@ -641,7 +633,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
break;
case QUERY_NODE_COLUMN:
case QUERY_NODE_OPERATOR:
- case QUERY_NODE_FUNCTION:{
+ case QUERY_NODE_FUNCTION: {
SExprNode* expNode = (SExprNode*)pNode;
code = createResultData(&expNode->resType, rows, &output);
if (code != TSDB_CODE_SUCCESS) {
@@ -653,16 +645,16 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
code = TSDB_CODE_OPS_NOT_SUPPORT;
goto end;
}
- if(nodeType(pNode) == QUERY_NODE_COLUMN){
- SColumnNode* pSColumnNode = (SColumnNode*)pNode;
+ if (nodeType(pNode) == QUERY_NODE_COLUMN) {
+ SColumnNode* pSColumnNode = (SColumnNode*)pNode;
SColumnInfoData* pColInfo = (SColumnInfoData*)taosArrayGet(pResBlock->pDataBlock, pSColumnNode->slotId);
code = colDataAssign(output.columnData, pColInfo, rows, NULL);
- }else if(nodeType(pNode) == QUERY_NODE_VALUE){
+ } else if (nodeType(pNode) == QUERY_NODE_VALUE) {
continue;
- }else{
+ } else {
code = scalarCalculate(pNode, pBlockList, &output);
}
- if(code != TSDB_CODE_SUCCESS){
+ if (code != TSDB_CODE_SUCCESS) {
releaseColInfoData(output.columnData);
goto end;
}
@@ -670,7 +662,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
}
int32_t keyLen = 0;
- SNode* node;
+ SNode* node;
FOREACH(node, group) {
SExprNode* pExpr = (SExprNode*)node;
keyLen += pExpr->resType.bytes;
@@ -684,12 +676,12 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
- for(int i = 0; i < rows; i++){
+ for (int i = 0; i < rows; i++) {
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
char* isNull = (char*)keyBuf;
char* pStart = (char*)keyBuf + sizeof(int8_t) * LIST_LENGTH(group);
- for(int j = 0; j < taosArrayGetSize(groupData); j++){
+ for (int j = 0; j < taosArrayGetSize(groupData); j++) {
SColumnInfoData* pValue = (SColumnInfoData*)taosArrayGetP(groupData, j);
if (colDataIsNull_s(pValue, i)) {
@@ -702,7 +694,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
code = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
goto end;
}
- if(tTagIsJsonNull(data)){
+ if (tTagIsJsonNull(data)) {
isNull[j] = 1;
continue;
}
@@ -724,10 +716,10 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t));
}
-// int64_t st2 = taosGetTimestampUs();
-// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
+ // int64_t st2 = taosGetTimestampUs();
+ // qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
- end:
+end:
taosMemoryFreeClear(keyBuf);
taosHashCleanup(tags);
taosHashCleanup(ctx.colHash);
@@ -757,7 +749,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
SIndexMetaArg metaArg = {
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid};
-// int64_t stt = taosGetTimestampUs();
+ // int64_t stt = taosGetTimestampUs();
SIdxFltStatus status = SFLT_NOT_INDEX;
code = doFilterTag(pTagIndexCond, &metaArg, res, &status);
if (code != 0 || status == SFLT_NOT_INDEX) {
@@ -765,20 +757,21 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
code = TDB_CODE_SUCCESS;
}
-// int64_t stt1 = taosGetTimestampUs();
-// qDebug("generate table list, cost:%ld us", stt1-stt);
- }else if(!pTagCond){
+ // int64_t stt1 = taosGetTimestampUs();
+ // qDebug("generate table list, cost:%ld us", stt1-stt);
+ } else if (!pTagCond) {
vnodeGetCtbIdList(pVnode, pScanNode->suid, res);
}
} else { // Create one table group.
- if(metaIsTableExist(metaHandle, tableUid)){
+ if (metaIsTableExist(metaHandle, tableUid)) {
taosArrayPush(res, &tableUid);
}
}
if (pTagCond) {
+ terrno = TDB_CODE_SUCCESS;
SColumnInfoData* pColInfoData = getColInfoResult(metaHandle, pListInfo->suid, res, pTagCond);
- if(terrno != TDB_CODE_SUCCESS){
+ if (terrno != TDB_CODE_SUCCESS) {
colDataDestroy(pColInfoData);
taosMemoryFreeClear(pColInfoData);
taosArrayDestroy(res);
@@ -840,7 +833,7 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) {
int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) {
SMetaReader mr = {0};
metaReaderInit(&mr, pMeta, 0);
- if(metaGetTableEntryByUid(&mr, uid) != 0){ // table not exist
+ if (metaGetTableEntryByUid(&mr, uid) != 0) { // table not exist
metaReaderClear(&mr);
return TSDB_CODE_PAR_TABLE_NOT_EXIST;
}
@@ -944,15 +937,17 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
for (int32_t i = 0; i < numOfCols; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pNodeList, i);
- SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
-
- SColMatchInfo c = {0};
- c.output = true;
- c.colId = pColNode->colId;
- c.srcSlotId = pColNode->slotId;
- c.matchType = type;
- c.targetSlotId = pNode->slotId;
- taosArrayPush(pList, &c);
+ if (nodeType(pNode->pExpr) == QUERY_NODE_COLUMN) {
+ SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
+
+ SColMatchInfo c = {0};
+ c.output = true;
+ c.colId = pColNode->colId;
+ c.srcSlotId = pColNode->slotId;
+ c.matchType = type;
+ c.targetSlotId = pNode->slotId;
+ taosArrayPush(pList, &c);
+ }
}
*numOfOutputCols = 0;
@@ -998,7 +993,7 @@ static SResSchema createResSchema(int32_t type, int32_t bytes, int32_t slotId, i
return s;
}
-static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType) {
+static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDataType* pType, EColumnType colType) {
SColumn* pCol = taosMemoryCalloc(1, sizeof(SColumn));
if (pCol == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -1012,10 +1007,104 @@ static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDa
pCol->scale = pType->scale;
pCol->precision = pType->precision;
pCol->dataBlockId = blockId;
-
+ pCol->colType = colType;
return pCol;
}
+void createExprFromTargetNode(SExprInfo* pExp, STargetNode* pTargetNode) {
+ pExp->pExpr = taosMemoryCalloc(1, sizeof(tExprNode));
+ pExp->pExpr->_function.num = 1;
+ pExp->pExpr->_function.functionId = -1;
+
+ int32_t type = nodeType(pTargetNode->pExpr);
+ // it is a project query, or group by column
+ if (type == QUERY_NODE_COLUMN) {
+ pExp->pExpr->nodeType = QUERY_NODE_COLUMN;
+ SColumnNode* pColNode = (SColumnNode*)pTargetNode->pExpr;
+
+ pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
+ pExp->base.numOfParams = 1;
+
+ SDataType* pType = &pColNode->node.resType;
+ pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
+ pType->precision, pColNode->colName);
+ pExp->base.pParam[0].pCol =
+ createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType, pColNode->colType);
+ pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN;
+ } else if (type == QUERY_NODE_VALUE) {
+ pExp->pExpr->nodeType = QUERY_NODE_VALUE;
+ SValueNode* pValNode = (SValueNode*)pTargetNode->pExpr;
+
+ pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
+ pExp->base.numOfParams = 1;
+
+ SDataType* pType = &pValNode->node.resType;
+ pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
+ pType->precision, pValNode->node.aliasName);
+ pExp->base.pParam[0].type = FUNC_PARAM_TYPE_VALUE;
+ nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param);
+ } else if (type == QUERY_NODE_FUNCTION) {
+ pExp->pExpr->nodeType = QUERY_NODE_FUNCTION;
+ SFunctionNode* pFuncNode = (SFunctionNode*)pTargetNode->pExpr;
+
+ SDataType* pType = &pFuncNode->node.resType;
+ pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
+ pType->precision, pFuncNode->node.aliasName);
+
+ pExp->pExpr->_function.functionId = pFuncNode->funcId;
+ pExp->pExpr->_function.pFunctNode = pFuncNode;
+
+ strncpy(pExp->pExpr->_function.functionName, pFuncNode->functionName,
+ tListLen(pExp->pExpr->_function.functionName));
+#if 1
+ // todo refactor: add the parameter for tbname function
+ if (!pFuncNode->pParameterList && (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0)) {
+ pFuncNode->pParameterList = nodesMakeList();
+ ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0);
+ SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
+ if (NULL == res) { // todo handle error
+ } else {
+ res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
+ nodesListAppend(pFuncNode->pParameterList, (SNode*)res);
+ }
+ }
+#endif
+
+ int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList);
+
+ pExp->base.pParam = taosMemoryCalloc(numOfParam, sizeof(SFunctParam));
+ pExp->base.numOfParams = numOfParam;
+
+ for (int32_t j = 0; j < numOfParam; ++j) {
+ SNode* p1 = nodesListGetNode(pFuncNode->pParameterList, j);
+ if (p1->type == QUERY_NODE_COLUMN) {
+ SColumnNode* pcn = (SColumnNode*)p1;
+
+ pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN;
+ pExp->base.pParam[j].pCol =
+ createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType, pcn->colType);
+ } else if (p1->type == QUERY_NODE_VALUE) {
+ SValueNode* pvn = (SValueNode*)p1;
+ pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
+ nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param);
+ }
+ }
+ } else if (type == QUERY_NODE_OPERATOR) {
+ pExp->pExpr->nodeType = QUERY_NODE_OPERATOR;
+ SOperatorNode* pNode = (SOperatorNode*)pTargetNode->pExpr;
+
+ pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
+ pExp->base.numOfParams = 1;
+
+ SDataType* pType = &pNode->node.resType;
+ pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
+ pType->precision, pNode->node.aliasName);
+ pExp->pExpr->_optrRoot.pRootNode = pTargetNode->pExpr;
+ } else {
+ ASSERT(0);
+ }
+}
+
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs) {
int32_t numOfFuncs = LIST_LENGTH(pNodeList);
int32_t numOfGroupKeys = 0;
@@ -1039,96 +1128,7 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
}
SExprInfo* pExp = &pExprs[i];
-
- pExp->pExpr = taosMemoryCalloc(1, sizeof(tExprNode));
- pExp->pExpr->_function.num = 1;
- pExp->pExpr->_function.functionId = -1;
-
- int32_t type = nodeType(pTargetNode->pExpr);
- // it is a project query, or group by column
- if (type == QUERY_NODE_COLUMN) {
- pExp->pExpr->nodeType = QUERY_NODE_COLUMN;
- SColumnNode* pColNode = (SColumnNode*)pTargetNode->pExpr;
-
- pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
- pExp->base.numOfParams = 1;
-
- SDataType* pType = &pColNode->node.resType;
- pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
- pType->precision, pColNode->colName);
- pExp->base.pParam[0].pCol = createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType);
- pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN;
- } else if (type == QUERY_NODE_VALUE) {
- pExp->pExpr->nodeType = QUERY_NODE_VALUE;
- SValueNode* pValNode = (SValueNode*)pTargetNode->pExpr;
-
- pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
- pExp->base.numOfParams = 1;
-
- SDataType* pType = &pValNode->node.resType;
- pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
- pType->precision, pValNode->node.aliasName);
- pExp->base.pParam[0].type = FUNC_PARAM_TYPE_VALUE;
- nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param);
- } else if (type == QUERY_NODE_FUNCTION) {
- pExp->pExpr->nodeType = QUERY_NODE_FUNCTION;
- SFunctionNode* pFuncNode = (SFunctionNode*)pTargetNode->pExpr;
-
- SDataType* pType = &pFuncNode->node.resType;
- pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
- pType->precision, pFuncNode->node.aliasName);
-
- pExp->pExpr->_function.functionId = pFuncNode->funcId;
- pExp->pExpr->_function.pFunctNode = pFuncNode;
-
- strncpy(pExp->pExpr->_function.functionName, pFuncNode->functionName,
- tListLen(pExp->pExpr->_function.functionName));
-#if 1
- // todo refactor: add the parameter for tbname function
- if (!pFuncNode->pParameterList && (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0)) {
- pFuncNode->pParameterList = nodesMakeList();
- ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0);
- SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
- if (NULL == res) { // todo handle error
- } else {
- res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
- nodesListAppend(pFuncNode->pParameterList, (SNode*)res);
- }
- }
-#endif
-
- int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList);
-
- pExp->base.pParam = taosMemoryCalloc(numOfParam, sizeof(SFunctParam));
- pExp->base.numOfParams = numOfParam;
-
- for (int32_t j = 0; j < numOfParam; ++j) {
- SNode* p1 = nodesListGetNode(pFuncNode->pParameterList, j);
- if (p1->type == QUERY_NODE_COLUMN) {
- SColumnNode* pcn = (SColumnNode*)p1;
-
- pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN;
- pExp->base.pParam[j].pCol = createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType);
- } else if (p1->type == QUERY_NODE_VALUE) {
- SValueNode* pvn = (SValueNode*)p1;
- pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
- nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param);
- }
- }
- } else if (type == QUERY_NODE_OPERATOR) {
- pExp->pExpr->nodeType = QUERY_NODE_OPERATOR;
- SOperatorNode* pNode = (SOperatorNode*)pTargetNode->pExpr;
-
- pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
- pExp->base.numOfParams = 1;
-
- SDataType* pType = &pNode->node.resType;
- pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
- pType->precision, pNode->node.aliasName);
- pExp->pExpr->_optrRoot.pRootNode = pTargetNode->pExpr;
- } else {
- ASSERT(0);
- }
+ createExprFromTargetNode(pExp, pTargetNode);
}
return pExprs;
@@ -1182,7 +1182,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
SqlFunctionCtx* pCtx = &pFuncCtx[i];
pCtx->functionId = -1;
- pCtx->curBufPage = -1;
pCtx->pExpr = pExpr;
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
@@ -1195,7 +1194,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
- strncpy(pCtx->udfName, udfName, strlen(udfName));
+ strncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN);
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
@@ -1223,10 +1222,10 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->start.key = INT64_MIN;
pCtx->end.key = INT64_MIN;
pCtx->numOfParams = pExpr->base.numOfParams;
- pCtx->increase = false;
pCtx->isStream = false;
pCtx->param = pFunct->pParam;
+ pCtx->saveHandle.currentPage = -1;
}
for (int32_t i = 1; i < numOfOutput; ++i) {
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index fe1f4911cae75aa4f972670126b9e94735dd1a56..95415e1113f0fe807157ac5a0e6509caef97def6 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -52,7 +52,11 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
// TODO: if a block was set but not consumed,
// prevent setting a different type of block
pInfo->validBlockIndex = 0;
- taosArrayClear(pInfo->pBlockLists);
+ if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
+ taosArrayClearP(pInfo->pBlockLists, taosMemoryFree);
+ } else {
+ taosArrayClear(pInfo->pBlockLists);
+ }
if (type == STREAM_INPUT__MERGED_SUBMIT) {
// ASSERT(numOfBlocks > 1);
@@ -93,6 +97,8 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
}
}
+static FORCE_INLINE void streamInputBlockDataDestory(void* pBlock) { blockDataDestroy((SSDataBlock*)pBlock); }
+
void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (!pTaskInfo || !pTaskInfo->pRoot || pTaskInfo->pRoot->numOfDownstream <= 0) {
@@ -103,11 +109,7 @@ void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo) {
if (pOptrInfo->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pInfo = pOptrInfo->info;
if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
- for (int32_t i = 0; i < taosArrayGetSize(pInfo->pBlockLists); ++i) {
- SSDataBlock* p = *(SSDataBlock**)taosArrayGet(pInfo->pBlockLists, i);
- taosArrayDestroy(p->pDataBlock);
- taosMemoryFreeClear(p);
- }
+ taosArrayClearP(pInfo->pBlockLists, streamInputBlockDataDestory);
} else {
ASSERT(0);
}
@@ -139,8 +141,24 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema) {
if (msg == NULL) {
- // TODO create raw scan
- return NULL;
+ // create raw scan
+
+ SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
+ if (NULL == pTaskInfo) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+ setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
+
+ pTaskInfo->cost.created = taosGetTimestampMs();
+ pTaskInfo->execModel = OPTR_EXEC_MODEL_QUEUE;
+ pTaskInfo->pRoot = createRawScanOperatorInfo(readers, pTaskInfo);
+ if (NULL == pTaskInfo->pRoot) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pTaskInfo);
+ return NULL;
+ }
+ return pTaskInfo;
}
struct SSubplan* pPlan = NULL;
@@ -669,15 +687,26 @@ void* qExtractReaderFromStreamScanner(void* scanner) {
return (void*)pInfo->tqReader;
}
-const SSchemaWrapper* qExtractSchemaFromStreamScanner(void* scanner) {
- SStreamScanInfo* pInfo = scanner;
- return pInfo->tqReader->pSchemaWrapper;
+const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ return pTaskInfo->streamInfo.schema;
+}
+
+const char* qExtractTbnameFromTask(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ return pTaskInfo->streamInfo.tbName;
}
-void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
+SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
- return pTaskInfo->streamInfo.metaBlk;
+ return &pTaskInfo->streamInfo.metaRsp;
+}
+
+int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
+ return pTaskInfo->streamInfo.prepareStatus.uid;
}
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
@@ -687,102 +716,166 @@ int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
return 0;
}
-int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
+int32_t initQueryTableDataCondForTmq(SQueryTableDataCond* pCond, SSnapContext* sContext, SMetaTableInfo mtInfo) {
+ memset(pCond, 0, sizeof(SQueryTableDataCond));
+ pCond->order = TSDB_ORDER_ASC;
+ pCond->numOfCols = mtInfo.schema->nCols;
+ pCond->colList = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnInfo));
+ if (pCond->colList == NULL) {
+ terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ return terrno;
+ }
+
+ pCond->twindows = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
+ pCond->suid = mtInfo.suid;
+ pCond->type = TIMEWINDOW_RANGE_CONTAINED;
+ pCond->startVersion = -1;
+ pCond->endVersion = sContext->snapVersion;
+
+ for (int32_t i = 0; i < pCond->numOfCols; ++i) {
+ pCond->colList[i].type = mtInfo.schema->pSchema[i].type;
+ pCond->colList[i].bytes = mtInfo.schema->pSchema[i].bytes;
+ pCond->colList[i].colId = mtInfo.schema->pSchema[i].colId;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
pTaskInfo->streamInfo.prepareStatus = *pOffset;
- if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
- while (1) {
- uint16_t type = pOperator->operatorType;
- pOperator->status = OP_OPENED;
- // TODO add more check
- if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- ASSERT(pOperator->numOfDownstream == 1);
- pOperator = pOperator->pDownstream[0];
- }
+ pTaskInfo->streamInfo.returned = 0;
+ if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
+ return 0;
+ }
+ if (subType == TOPIC_SUB_TYPE__COLUMN) {
+ uint16_t type = pOperator->operatorType;
+ pOperator->status = OP_OPENED;
+ // TODO add more check
+ if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ ASSERT(pOperator->numOfDownstream == 1);
+ pOperator = pOperator->pDownstream[0];
+ }
- SStreamScanInfo* pInfo = pOperator->info;
- if (pOffset->type == TMQ_OFFSET__LOG) {
- STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTSInfo->dataReader);
- pTSInfo->dataReader = NULL;
+ SStreamScanInfo* pInfo = pOperator->info;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
+ tsdbReaderClose(pTSInfo->dataReader);
+ pTSInfo->dataReader = NULL;
#if 0
- if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
- pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
- qError("prepare scan ver %" PRId64 " actual ver %" PRId64 ", last %" PRId64, pOffset->version,
- pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
- ASSERT(0);
- }
+ if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
+ pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
+ qError("prepare scan ver %" PRId64 " actual ver %" PRId64 ", last %" PRId64, pOffset->version,
+ pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
+ ASSERT(0);
+ }
#endif
- if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
+ if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
+ return -1;
+ }
+ ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
+ int64_t uid = pOffset->uid;
+ int64_t ts = pOffset->ts;
+
+ if (uid == 0) {
+ if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
+ STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
+ uid = pTableInfo->uid;
+ ts = INT64_MIN;
+ } else {
return -1;
}
- ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
- } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
- /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
- int64_t uid = pOffset->uid;
- int64_t ts = pOffset->ts;
-
- if (uid == 0) {
- if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
- uid = pTableInfo->uid;
- ts = INT64_MIN;
- } else {
- return -1;
- }
- }
+ }
- /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
- /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
- STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
+ /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
+ /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
+ STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
+ int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
#ifndef NDEBUG
-
- qDebug("switch to next table %" PRId64 " (cursor %d), %" PRId64 " rows returned", uid,
- pTableScanInfo->currentTable, pInfo->pTableScanOp->resultInfo.totalRows);
- pInfo->pTableScanOp->resultInfo.totalRows = 0;
+ qDebug("switch to next table %" PRId64 " (cursor %d), %" PRId64 " rows returned", uid,
+ pTableScanInfo->currentTable, pInfo->pTableScanOp->resultInfo.totalRows);
+ pInfo->pTableScanOp->resultInfo.totalRows = 0;
#endif
- bool found = false;
- for (int32_t i = 0; i < tableSz; i++) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
- if (pTableInfo->uid == uid) {
- found = true;
- pTableScanInfo->currentTable = i;
- break;
- }
+ bool found = false;
+ for (int32_t i = 0; i < tableSz; i++) {
+ STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
+ if (pTableInfo->uid == uid) {
+ found = true;
+ pTableScanInfo->currentTable = i;
+ break;
}
+ }
- // TODO after dropping table, table may be not found
- ASSERT(found);
+ // TODO after dropping table, table may not found
+ ASSERT(found);
- if (pTableScanInfo->dataReader == NULL) {
- if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
- pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
- pTableScanInfo->dataReader == NULL) {
- ASSERT(0);
- }
+ if (pTableScanInfo->dataReader == NULL) {
+ if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
+ pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
+ pTableScanInfo->dataReader == NULL) {
+ ASSERT(0);
}
+ }
- tsdbSetTableId(pTableScanInfo->dataReader, uid);
- int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
- pTableScanInfo->cond.twindows.skey = ts + 1;
- tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
- pTableScanInfo->cond.twindows.skey = oldSkey;
- pTableScanInfo->scanTimes = 0;
+ tsdbSetTableId(pTableScanInfo->dataReader, uid);
+ int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
+ pTableScanInfo->cond.twindows.skey = ts + 1;
+ tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ pTableScanInfo->cond.twindows.skey = oldSkey;
+ pTableScanInfo->scanTimes = 0;
- qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
- ts, pTableScanInfo->currentTable, tableSz);
- /*}*/
+ qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
+ ts, pTableScanInfo->currentTable, tableSz);
+ /*}*/
+ } else {
+ ASSERT(0);
+ }
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ SSnapContext* sContext = pInfo->sContext;
+ if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ qError("setDataForSnapShot error. uid:%" PRIi64, pOffset->uid);
+ return -1;
+ }
- } else {
- ASSERT(0);
- }
- return 0;
+ SMetaTableInfo mtInfo = getUidfromSnapShot(sContext);
+ tsdbReaderClose(pInfo->dataReader);
+ pInfo->dataReader = NULL;
+ cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
+ taosArrayDestroy(pTaskInfo->tableqinfoList.pTableList);
+ if (mtInfo.uid == 0) return 0; // no data
+
+ initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, mtInfo);
+ pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts;
+ pTaskInfo->tableqinfoList.pTableList = taosArrayInit(1, sizeof(STableKeyInfo));
+ taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &(STableKeyInfo){.uid = mtInfo.uid, .groupId = 0});
+ tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList,
+ &pInfo->dataReader, NULL);
+
+ strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName);
+ tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
+ pTaskInfo->streamInfo.schema = mtInfo.schema;
+
+ qDebug("tmqsnap qStreamPrepareScan snapshot data uid %ld ts %ld", mtInfo.uid, pOffset->ts);
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ SSnapContext* sContext = pInfo->sContext;
+ if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ qError("setForSnapShot error. uid:%" PRIi64 " ,version:%" PRIi64, pOffset->uid);
+ return -1;
}
+ qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %ld ts %ld", pOffset->uid);
+ } else if (pOffset->type == TMQ_OFFSET__LOG) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ tsdbReaderClose(pInfo->dataReader);
+ pInfo->dataReader = NULL;
+ qDebug("tmqsnap qStreamPrepareScan snapshot log");
}
return 0;
}
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 46ef5700fc5f55e322dbc170a85a18030634510f..17954178b137cf3074236c3b37be0d2da32efa82 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -76,12 +76,6 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
#define realloc u_realloc
#endif
-#define T_LONG_JMP(_obj, _c) \
- do { \
- assert((_c) != -1); \
- longjmp((_obj), (_c)); \
- } while (0);
-
#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0)
@@ -92,19 +86,17 @@ static int32_t getExprFunctionId(SExprInfo* pExprInfo) {
return 0;
}
-static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes);
-
-static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pSDataBlock);
+static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pBlock);
static void releaseQueryBuf(size_t numOfTables);
-static void destroyFillOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyAggOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyFillOperatorInfo(void* param);
+static void destroyProjectOperatorInfo(void* param);
+static void destroyOrderOperatorInfo(void* param);
+static void destroyAggOperatorInfo(void* param);
-static void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput);
-static void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyIntervalOperatorInfo(void* param);
+static void destroyExchangeOperatorInfo(void* param);
static void destroyOperatorInfo(SOperatorInfo* pOperator);
@@ -148,20 +140,6 @@ static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock,
static void initCtxOutputBuffer(SqlFunctionCtx* pCtx, int32_t size);
static void doSetTableGroupOutputBuf(SOperatorInfo* pOperator, int32_t numOfOutput, uint64_t groupId);
-// setup the output buffer for each operator
-static bool hasNull(SColumn* pColumn, SColumnDataAgg* pStatis) {
- if (TSDB_COL_IS_TAG(pColumn->flag) || TSDB_COL_IS_UD_COL(pColumn->flag) ||
- pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
- return false;
- }
-
- if (pStatis != NULL && pStatis->numOfNull == 0) {
- return false;
- }
-
- return true;
-}
-
#if 0
static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo, char* pData,
int16_t bytes, bool masterscan, uint64_t uid) {
@@ -201,26 +179,23 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR
}
#endif
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) {
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) {
SFilePage* pData = NULL;
// in the first scan, new space needed for results
int32_t pageId = -1;
- SIDList list = getDataBufPagesIdList(pResultBuf, tableGroupId);
-
- if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ if (*currentPageId == -1) {
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
- SPageInfo* pi = getLastPageInfo(list);
- pData = getBufPage(pResultBuf, getPageId(pi));
- pageId = getPageId(pi);
+ pData = getBufPage(pResultBuf, *currentPageId);
+ pageId = *currentPageId;
if (pData->num + interBufSize > getBufPageSize(pResultBuf)) {
// release current page first, and prepare the next one
- releaseBufPageInfo(pResultBuf, pi);
+ releaseBufPage(pResultBuf, pData);
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -237,9 +212,9 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int
SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num);
pResultRow->pageId = pageId;
pResultRow->offset = (int32_t)pData->num;
+ *currentPageId = pageId;
pData->num += interBufSize;
-
return pResultRow;
}
@@ -256,7 +231,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
SResultRow* pResult = NULL;
@@ -278,9 +253,6 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// 1. close current opened time window
if (pResultRowInfo->cur.pageId != -1 && ((pResult == NULL) || (pResult->pageId != pResultRowInfo->cur.pageId))) {
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_1");
-#endif
SResultRowPosition pos = pResultRowInfo->cur;
SFilePage* pPage = getBufPage(pResultBuf, pos.pageId);
releaseBufPage(pResultBuf, pPage);
@@ -288,18 +260,13 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// allocate a new buffer page
if (pResult == NULL) {
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_2");
-#endif
ASSERT(pSup->resultRowSize > 0);
- pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
-
- initResultRow(pResult);
+ pResult = getNewResultRow(pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
// add a new result set for a new group
SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset};
- taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
- sizeof(SResultRowPosition));
+ tSimpleHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
+ sizeof(SResultRowPosition));
}
// 2. set the new time window to be the new active time window
@@ -307,8 +274,8 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// too many time window in query
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH &&
- taosHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
+ tSimpleHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
}
return pResult;
@@ -324,10 +291,10 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// in the first scan, new space needed for results
int32_t pageId = -1;
- SIDList list = getDataBufPagesIdList(pResultBuf, tid);
+ SIDList list = getDataBufPagesIdList(pResultBuf);
if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
SPageInfo* pi = getLastPageInfo(list);
@@ -338,7 +305,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// release current page first, and prepare the next one
releaseBufPageInfo(pResultBuf, pi);
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -392,7 +359,7 @@ static void functionCtxSave(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) {
static void functionCtxRestore(SqlFunctionCtx* pCtx, SFunctionCtxStatus* pStatus) {
pCtx->input.colDataAggIsSet = pStatus->hasAgg;
- pCtx->input.numOfRows = pStatus->numOfRows;
+ pCtx->input.numOfRows = pStatus->numOfRows;
pCtx->input.startRowIndex = pStatus->startOffset;
}
@@ -434,7 +401,7 @@ void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfo
if (code != TSDB_CODE_SUCCESS) {
qError("%s apply functions error, code: %s", GET_TASKID(taskInfo), tstrerror(code));
taskInfo->code = code;
- longjmp(taskInfo->env, code);
+ T_LONG_JMP(taskInfo->env, code);
}
}
@@ -625,7 +592,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
if (pExpr[k].pExpr->nodeType == QUERY_NODE_COLUMN) { // it is a project query
SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId);
if (pResult->info.rows > 0 && !createNewColModel) {
- colDataMergeCol(pColInfoData, pResult->info.rows, &pResult->info.capacity, pInputData->pData[0],
+ colDataMergeCol(pColInfoData, pResult->info.rows, (int32_t*)&pResult->info.capacity, pInputData->pData[0],
pInputData->numOfRows);
} else {
colDataAssign(pColInfoData, pInputData->pData[0], pInputData->numOfRows, &pResult->info);
@@ -663,7 +630,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
int32_t startOffset = createNewColModel ? 0 : pResult->info.rows;
ASSERT(pResult->info.capacity > 0);
- colDataMergeCol(pResColData, startOffset, &pResult->info.capacity, &idata, dest.numOfRows);
+ colDataMergeCol(pResColData, startOffset, (int32_t*)&pResult->info.capacity, &idata, dest.numOfRows);
colDataDestroy(&idata);
numOfRows = dest.numOfRows;
@@ -728,7 +695,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
int32_t startOffset = createNewColModel ? 0 : pResult->info.rows;
ASSERT(pResult->info.capacity > 0);
- colDataMergeCol(pResColData, startOffset, &pResult->info.capacity, &idata, dest.numOfRows);
+ colDataMergeCol(pResColData, startOffset, (int32_t*)&pResult->info.capacity, &idata, dest.numOfRows);
colDataDestroy(&idata);
numOfRows = dest.numOfRows;
@@ -848,13 +815,6 @@ void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pB
} else {
pInput->colDataAggIsSet = false;
}
-
- // set the statistics data for primary time stamp column
- // if (pCtx->functionId == FUNCTION_SPREAD && pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
- // pCtx->isAggSet = true;
- // pCtx->agg.min = pBlock->info.window.skey;
- // pCtx->agg.max = pBlock->info.window.ekey;
- // }
}
bool isTaskKilled(SExecTaskInfo* pTaskInfo) {
@@ -891,146 +851,6 @@ STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int
return win;
}
-#if 0
-static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) {
-
- bool hasFirstLastFunc = false;
- bool hasOtherFunc = false;
-
- if (status == BLK_DATA_DATA_LOAD || status == BLK_DATA_FILTEROUT) {
- return status;
- }
-
- for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
- int32_t functionId = getExprFunctionId(&pQuery->pExpr1[i]);
-
- if (functionId == FUNCTION_TS || functionId == FUNCTION_TS_DUMMY || functionId == FUNCTION_TAG ||
- functionId == FUNCTION_TAG_DUMMY) {
- continue;
- }
-
- if (functionId == FUNCTION_FIRST_DST || functionId == FUNCTION_LAST_DST) {
- hasFirstLastFunc = true;
- } else {
- hasOtherFunc = true;
- }
-
- }
-
- if (hasFirstLastFunc && status == BLK_DATA_NOT_LOAD) {
- if (!hasOtherFunc) {
- return BLK_DATA_FILTEROUT;
- } else {
- return BLK_DATA_DATA_LOAD;
- }
- }
-
- return status;
-}
-
-#endif
-
-// static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableReq* pQueryMsg, bool stableQuery) {
-// STaskAttr* pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
-//
-// // in case of point-interpolation query, use asc order scan
-// char msg[] = "QInfo:0x%"PRIx64" scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%"
-// PRId64
-// "-%" PRId64 ", new qrange:%" PRId64 "-%" PRId64;
-//
-// // todo handle the case the the order irrelevant query type mixed up with order critical query type
-// // descending order query for last_row query
-// if (isFirstLastRowQuery(pQueryAttr)) {
-// //qDebug("QInfo:0x%"PRIx64" scan order changed for last_row query, old:%d, new:%d", pQInfo->qId,
-// pQueryAttr->order.order, TSDB_ORDER_ASC);
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// if (pQueryAttr->window.skey > pQueryAttr->window.ekey) {
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// }
-//
-// pQueryAttr->needReverseScan = false;
-// return;
-// }
-//
-// if (pQueryAttr->groupbyColumn && pQueryAttr->order.order == TSDB_ORDER_DESC) {
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// if (pQueryAttr->window.skey > pQueryAttr->window.ekey) {
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// }
-//
-// pQueryAttr->needReverseScan = false;
-// doUpdateLastKey(pQueryAttr);
-// return;
-// }
-//
-// if (pQueryAttr->pointInterpQuery && pQueryAttr->interval.interval == 0) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "interp", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey,
-// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); TSWAP(pQueryAttr->window.skey,
-// pQueryAttr->window.ekey, TSKEY);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// return;
-// }
-//
-// if (pQueryAttr->interval.interval == 0) {
-// if (onlyFirstQuery(pQueryAttr)) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-first", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey,
-//// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// pQueryAttr->needReverseScan = false;
-// } else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) {
-// if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey,
-//// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_DESC;
-// pQueryAttr->needReverseScan = false;
-// }
-//
-// } else { // interval query
-// if (stableQuery) {
-// if (onlyFirstQuery(pQueryAttr)) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-first stable", pQueryAttr->order.order, TSDB_ORDER_ASC,
-//// pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey,
-/// pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// pQueryAttr->needReverseScan = false;
-// } else if (onlyLastQuery(pQueryAttr)) {
-// if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC,
-//// pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey,
-/// pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_DESC;
-// pQueryAttr->needReverseScan = false;
-// }
-// }
-// }
-//}
-
#if 0
static bool overlapWithTimeWindow(STaskAttr* pQueryAttr, SDataBlockInfo* pBlockInfo) {
STimeWindow w = {0};
@@ -1152,7 +972,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowEntryInfoOffset) != TSDB_CODE_SUCCESS) {
- longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
}
} else if (pQueryAttr->stableQuery && (!pQueryAttr->tsCompQuery) && (!pQueryAttr->diffQuery)) { // stable aggregate, not interval aggregate or normal column aggregate
@@ -1203,7 +1023,7 @@ int32_t loadDataBlockOnDemand(SExecTaskInfo* pTaskInfo, STableScanInfo* pTableSc
if (setResultOutputBufByKey(pRuntimeEnv, pTableScanInfo->pResultRowInfo, pBlock->info.uid, &win, masterScan, &pResult, groupId,
pTableScanInfo->pCtx, pTableScanInfo->numOfOutput,
pTableScanInfo->rowEntryInfoOffset) != TSDB_CODE_SUCCESS) {
- longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
}
}
@@ -1256,24 +1076,6 @@ static void updateTableQueryInfoForReverseScan(STableQueryInfo* pTableQueryInfo)
if (pTableQueryInfo == NULL) {
return;
}
-
- // TSWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey);
- // pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
-
- // SWITCH_ORDER(pTableQueryInfo->cur.order);
- // pTableQueryInfo->cur.vgroupIndex = -1;
-
- // set the index to be the end slot of result rows array
- // SResultRowInfo* pResultRowInfo = &pTableQueryInfo->resInfo;
- // if (pResultRowInfo->size > 0) {
- // pResultRowInfo->curPos = pResultRowInfo->size - 1;
- // } else {
- // pResultRowInfo->curPos = -1;
- // }
-}
-
-void initResultRow(SResultRow* pResultRow) {
- // pResultRow->pEntryInfo = (struct SResultRowEntryInfo*)((char*)pResultRow + sizeof(SResultRow));
}
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
@@ -1286,15 +1088,6 @@ void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
}
}
-void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) {
- if (pTableQueryInfo == NULL) {
- return;
- }
-
- // taosVariantDestroy(&pTableQueryInfo->tag);
- // cleanupResultRowInfo(&pTableQueryInfo->resInfo);
-}
-
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset) {
bool init = false;
for (int32_t i = 0; i < numOfOutput; ++i) {
@@ -1495,7 +1288,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
if (TAOS_FAILED(code)) {
releaseBufPage(pBuf, page);
qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -1507,7 +1300,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
// do nothing, todo refactor
@@ -1581,7 +1374,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
// do nothing, todo refactor
@@ -1590,16 +1383,8 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
// the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows.
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId);
char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo);
- if (pCtx[j].increase) {
- int64_t ts = *(int64_t*)in;
- for (int32_t k = 0; k < pRow->numOfRows; ++k) {
- colDataAppend(pColInfoData, pBlock->info.rows + k, (const char*)&ts, pCtx[j].resultInfo->isNullRes);
- ts++;
- }
- } else {
- for (int32_t k = 0; k < pRow->numOfRows; ++k) {
- colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
- }
+ for (int32_t k = 0; k < pRow->numOfRows; ++k) {
+ colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
}
}
}
@@ -1736,7 +1521,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
// SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER;
// while (tsdbNextDataBlock(pTsdbReadHandle)) {
// if (isTaskKilled(pRuntimeEnv->qinfo)) {
-// longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+// T_LONG_JMP(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED);
// }
//
// tsdbRetrieveDataBlockInfo(pTsdbReadHandle, &blockInfo);
@@ -1755,7 +1540,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
// }
//
// if (terrno != TSDB_CODE_SUCCESS) {
-// longjmp(pRuntimeEnv->env, terrno);
+// T_LONG_JMP(pRuntimeEnv->env, terrno);
// }
// }
@@ -1919,7 +1704,7 @@ void queryCostStatis(SExecTaskInfo* pTaskInfo) {
//
// // check for error
// if (terrno != TSDB_CODE_SUCCESS) {
-// longjmp(pRuntimeEnv->env, terrno);
+// T_LONG_JMP(pRuntimeEnv->env, terrno);
// }
//
// return true;
@@ -2081,13 +1866,11 @@ void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int32_t numOfRows, int32_t
pOperator->resultInfo.totalRows += numOfRows;
}
-int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList,
- char** pNextStart) {
+int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pColList, char** pNextStart) {
if (pColList == NULL) { // data from other sources
blockDataCleanup(pRes);
*pNextStart = (char*)blockDecode(pRes, pData);
} else { // extract data according to pColList
- ASSERT(numOfOutput == taosArrayGetSize(pColList));
char* pStart = pData;
int32_t numOfCols = htonl(*(int32_t*)pStart);
@@ -2185,7 +1968,7 @@ static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeIn
char* pStart = pRetrieveRsp->data;
while (index++ < pRetrieveRsp->numOfBlocks) {
SSDataBlock* pb = createOneDataBlock(pExchangeInfo->pDummyBlock, false);
- code = extractDataBlockFromFetchRsp(pb, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
+ code = extractDataBlockFromFetchRsp(pb, pStart, NULL, &pStart);
if (code != 0) {
taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
@@ -2310,7 +2093,7 @@ static int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
char* pStart = pRetrieveRsp->data;
- int32_t code = extractDataBlockFromFetchRsp(NULL, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
+ int32_t code = extractDataBlockFromFetchRsp(NULL, pStart, NULL, &pStart);
if (pRsp->completed == 1) {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, rowsOfSource:%" PRIu64
@@ -2771,7 +2554,7 @@ static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) {
int32_t code = tsortOpen(pInfo->pSortHandle);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pOperator->status = OP_RES_TO_RETURN;
@@ -2854,92 +2637,6 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
}
}
}
-#if 0
-int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
- uint8_t type = pOperator->operatorType;
-
- pOperator->status = OP_OPENED;
-
- if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- pScanInfo->blockType = STREAM_INPUT__TABLE_SCAN;
-
- pScanInfo->pTableScanOp->status = OP_OPENED;
-
- STableScanInfo* pInfo = pScanInfo->pTableScanOp->info;
- ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER);
-
- if (uid == 0) {
- pInfo->noTable = 1;
- return TSDB_CODE_SUCCESS;
- }
-
- /*if (pSnapShotScanInfo->dataReader == NULL) {*/
- /*pSnapShotScanInfo->dataReader = tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, 0, 0);*/
- /*pSnapShotScanInfo->scanMode = TABLE_SCAN__TABLE_ORDER;*/
- /*}*/
-
- pInfo->noTable = 0;
-
- if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
-
- int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
- bool found = false;
- for (int32_t i = 0; i < tableSz; i++) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
- if (pTableInfo->uid == uid) {
- found = true;
- pInfo->currentTable = i;
- }
- }
- // TODO after processing drop, found can be false
- ASSERT(found);
-
- tsdbSetTableId(pInfo->dataReader, uid);
- int64_t oldSkey = pInfo->cond.twindows.skey;
- pInfo->cond.twindows.skey = ts + 1;
- tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
- pInfo->cond.twindows.skey = oldSkey;
- pInfo->scanTimes = 0;
-
- qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, ts,
- pInfo->currentTable, tableSz);
- }
-
- return TSDB_CODE_SUCCESS;
-
- } else {
- if (pOperator->numOfDownstream == 1) {
- return doPrepareScan(pOperator->pDownstream[0], uid, ts);
- } else if (pOperator->numOfDownstream == 0) {
- qError("failed to find stream scan operator to set the input data block");
- return TSDB_CODE_QRY_APP_ERROR;
- } else {
- qError("join not supported for stream block scan");
- return TSDB_CODE_QRY_APP_ERROR;
- }
- }
-}
-
-int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts) {
- int32_t type = pOperator->operatorType;
- if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- STableScanInfo* pSnapShotScanInfo = pScanInfo->pTableScanOp->info;
- *uid = pSnapShotScanInfo->lastStatus.uid;
- *ts = pSnapShotScanInfo->lastStatus.ts;
- } else {
- if (pOperator->pDownstream[0] == NULL) {
- return TSDB_CODE_INVALID_PARA;
- } else {
- doGetScanStatus(pOperator->pDownstream[0], uid, ts);
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-#endif
// this is a blocking operator
static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
@@ -2966,7 +2663,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// there is an scalar expression that needs to be calculated before apply the group aggregation.
@@ -2974,7 +2671,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
SExprSupp* pSup1 = &pAggInfo->scalarExprSup;
code = projectApplyFunctions(pSup1->pExprInfo, pBlock, pBlock, pSup1->pCtx, pSup1->numOfExprs, NULL);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -2983,7 +2680,7 @@ static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, true);
code = doAggregateImpl(pOperator, pSup->pCtx);
if (code != 0) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -3036,7 +2733,7 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
}
SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
- int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
+ int32_t size = tSimpleHashGetSize(pSup->pResultRowHashTable);
size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length
int32_t totalSize =
sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
@@ -3064,9 +2761,10 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
setBufPageDirty(pPage, true);
releaseBufPage(pSup->pResultBuf, pPage);
- void* pIter = taosHashIterate(pSup->pResultRowHashTable, NULL);
- while (pIter) {
- void* key = taosHashGetKey(pIter, &keyLen);
+ int32_t iter = 0;
+ void* pIter = NULL;
+ while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) {
+ void* key = tSimpleHashGetKey(pIter, &keyLen);
SResultRowPosition* p1 = (SResultRowPosition*)pIter;
pPage = (SFilePage*)getBufPage(pSup->pResultBuf, p1->pageId);
@@ -3097,8 +2795,6 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
offset += sizeof(int32_t);
memcpy(*result + offset, pRow, pSup->resultRowSize);
offset += pSup->resultRowSize;
-
- pIter = taosHashIterate(pSup->pResultRowHashTable, pIter);
}
*(int32_t*)(*result) = offset;
@@ -3126,14 +2822,14 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
offset += sizeof(int32_t);
uint64_t tableGroupId = *(uint64_t*)(result + offset);
- SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
+ SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
if (!resultRow) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
// add a new result set for a new group
SResultRowPosition pos = {.pageId = resultRow->pageId, .offset = resultRow->offset};
- taosHashPut(pSup->pResultRowHashTable, result + offset, keyLen, &pos, sizeof(SResultRowPosition));
+ tSimpleHashPut(pSup->pResultRowHashTable, result + offset, keyLen, &pos, sizeof(SResultRowPosition));
offset += keyLen;
int32_t valueLen = *(int32_t*)(result + offset);
@@ -3148,7 +2844,6 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
resultRow->offset = pOffset;
offset += valueLen;
- initResultRow(resultRow);
pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
// releaseBufPage(pSup->pResultBuf, getBufPage(pSup->pResultBuf, pageId));
}
@@ -3250,6 +2945,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp
Q_STATUS_EQUAL(pTaskInfo->status, TASK_COMPLETED) ? pInfo->win.ekey : pInfo->existNewGroupBlock->info.window.ekey;
taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo));
+ blockDataCleanup(pInfo->pRes);
doApplyScalarCalculation(pOperator, pInfo->existNewGroupBlock, order, scanFlag);
taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, ekey);
@@ -3312,7 +3008,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
SSDataBlock* pResBlock = pInfo->pFinalRes;
blockDataCleanup(pResBlock);
- blockDataCleanup(pInfo->pRes);
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
@@ -3336,6 +3031,8 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey);
} else {
blockDataUpdateTsWindow(pBlock, pInfo->primarySrcSlotId);
+
+ blockDataCleanup(pInfo->pRes);
doApplyScalarCalculation(pOperator, pBlock, order, scanFlag);
if (pInfo->curGroupId == 0 || pInfo->curGroupId == pInfo->pRes->info.groupId) {
@@ -3378,7 +3075,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
assert(pBlock != NULL);
blockDataCleanup(pResBlock);
- blockDataCleanup(pInfo->pRes);
doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, pTaskInfo);
if (pResBlock->info.rows > pResultInfo->threshold) {
@@ -3440,7 +3136,7 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) {
}
if (pOperator->fpSet.closeFn != NULL) {
- pOperator->fpSet.closeFn(pOperator->info, pOperator->exprSupp.numOfExprs);
+ pOperator->fpSet.closeFn(pOperator->info);
}
if (pOperator->pDownstream != NULL) {
@@ -3473,11 +3169,13 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
+ int32_t code = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pAggSup->currentPageId = -1;
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
- pAggSup->pResultRowHashTable = taosHashInit(10, hashFn, true, HASH_NO_LOCK);
+ pAggSup->pResultRowHashTable = tSimpleHashInit(10, hashFn);
if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -3488,23 +3186,23 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
if (!osTempSpaceAvailable()) {
- terrno = TSDB_CODE_NO_AVAIL_DISK;
- qError("Init stream agg supporter failed since %s", terrstr(terrno));
- return terrno;
+ code = TSDB_CODE_NO_AVAIL_DISK;
+ qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey);
+ return code;
}
- int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
+ code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
if (code != TSDB_CODE_SUCCESS) {
- qError("Create agg result buf failed since %s", tstrerror(code));
+ qError("Create agg result buf failed since %s, %s", tstrerror(code), pKey);
return code;
}
- return TSDB_CODE_SUCCESS;
+ return code;
}
void cleanupAggSup(SAggSupporter* pAggSup) {
taosMemoryFreeClear(pAggSup->keyBuf);
- taosHashCleanup(pAggSup->pResultRowHashTable);
+ tSimpleHashCleanup(pAggSup->pResultRowHashTable);
destroyDiskbasedBuf(pAggSup->pResultBuf);
}
@@ -3521,7 +3219,7 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
}
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = pAggSup->pResultBuf;
+ pSup->pCtx[i].saveHandle.pBuf = pAggSup->pResultBuf;
}
return TSDB_CODE_SUCCESS;
@@ -3553,6 +3251,7 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
}
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
+ taosMemoryFreeClear(pCtx[i].subsidiaries.buf);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
}
@@ -3632,7 +3331,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo*
return pOperator;
_error:
- destroyAggOperatorInfo(pInfo, numOfCols);
+ destroyAggOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
@@ -3640,7 +3339,6 @@ _error:
void cleanupBasicInfo(SOptrBasicInfo* pInfo) {
assert(pInfo != NULL);
- cleanupResultRowInfo(&pInfo->resultRowInfo);
pInfo->pRes = blockDataDestroy(pInfo->pRes);
}
@@ -3658,7 +3356,7 @@ static void freeItem(void* pItem) {
}
}
-void destroyAggOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyAggOperatorInfo(void* param) {
SAggOperatorInfo* pInfo = (SAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
@@ -3668,7 +3366,7 @@ void destroyAggOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-void destroyFillOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyFillOperatorInfo(void* param) {
SFillOperatorInfo* pInfo = (SFillOperatorInfo*)param;
pInfo->pFillInfo = taosDestroyFillInfo(pInfo->pFillInfo);
pInfo->pRes = blockDataDestroy(pInfo->pRes);
@@ -3684,7 +3382,7 @@ void destroyFillOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-void destroyExchangeOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyExchangeOperatorInfo(void* param) {
SExchangeInfo* pExInfo = (SExchangeInfo*)param;
taosRemoveRef(exchangeObjRefPool, pExInfo->self);
}
@@ -3716,7 +3414,7 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t
const char* id, SInterval* pInterval, int32_t fillType, int32_t order) {
SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode);
- int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey;
+ int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey;
STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, startKey);
w = getFirstQualifiedTimeWindow(startKey, &w, pInterval, order);
@@ -3741,6 +3439,44 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t
}
}
+static bool isWstartColumnExist(SFillOperatorInfo* pInfo) {
+ if (pInfo->numOfNotFillExpr == 0) {
+ return false;
+ }
+ for (int32_t i = 0; i < pInfo->numOfNotFillExpr; ++i) {
+ SExprInfo* exprInfo = pInfo->pNotFillExprInfo + i;
+ if (exprInfo->pExpr->nodeType == QUERY_NODE_COLUMN && exprInfo->base.numOfParams == 1 &&
+ exprInfo->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_START) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static int32_t createWStartTsAsNotFillExpr(SFillOperatorInfo* pInfo, SFillPhysiNode* pPhyFillNode) {
+ bool wstartExist = isWstartColumnExist(pInfo);
+ if (wstartExist == false) {
+ if (pPhyFillNode->pWStartTs->type != QUERY_NODE_TARGET) {
+ qError("pWStartTs of fill physical node is not a target node");
+ return TSDB_CODE_QRY_SYS_ERROR;
+ }
+
+ SExprInfo* notFillExprs =
+ taosMemoryRealloc(pInfo->pNotFillExprInfo, (pInfo->numOfNotFillExpr + 1) * sizeof(SExprInfo));
+ if (notFillExprs == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ createExprFromTargetNode(notFillExprs + pInfo->numOfNotFillExpr, (STargetNode*)pPhyFillNode->pWStartTs);
+
+ ++pInfo->numOfNotFillExpr;
+ pInfo->pNotFillExprInfo = notFillExprs;
+ return TSDB_CODE_SUCCESS;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode,
SExecTaskInfo* pTaskInfo) {
SFillOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SFillOperatorInfo));
@@ -3752,7 +3488,10 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
SSDataBlock* pResBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc);
SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pInfo->numOfExpr);
pInfo->pNotFillExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pInfo->numOfNotFillExpr);
-
+ int32_t code = createWStartTsAsNotFillExpr(pInfo, pPhyFillNode);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
SInterval* pInterval =
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType
? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
@@ -3773,9 +3512,9 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc,
&numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
- int32_t code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr,
- (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity,
- pTaskInfo->id.str, pInterval, type, order);
+ code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr,
+ (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity,
+ pTaskInfo->id.str, pInterval, type, order);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -3989,15 +3728,15 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
bool assignUid = groupbyTbname(group);
- size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList);
+ size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList);
- if(assignUid){
+ if (assignUid) {
for (int32_t i = 0; i < numOfTables; i++) {
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
info->groupId = info->uid;
taosHashPut(pTableListInfo->map, &(info->uid), sizeof(uint64_t), &info->groupId, sizeof(uint64_t));
}
- }else{
+ } else {
int32_t code = getColInfoResultForGroupby(pHandle->meta, group, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -4050,7 +3789,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo));
if (code) {
pTaskInfo->code = code;
- qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
+ qError("failed to createScanTableListInfo, code:%s, %s", tstrerror(code), GET_TASKID(pTaskInfo));
return NULL;
}
@@ -4104,8 +3843,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STableKeyInfo* pKeyInfo = taosArrayGet(pTableListInfo->pTableList, i);
qDebug("creating stream task: add table %" PRId64, pKeyInfo->uid);
}
- }
#endif
+ }
pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan);
pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTaskInfo);
@@ -4164,7 +3903,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return NULL;
}
- pOperator = createLastrowScanOperator(pScanNode, pHandle, pTaskInfo);
+ pOperator = createCacherowsScanOperator(pScanNode, pHandle, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) {
pOperator = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo);
} else {
@@ -4188,9 +3927,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
if (ops[i] == NULL) {
taosMemoryFree(ops);
return NULL;
- } else {
- ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId;
}
+
+ ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId;
}
SOperatorInfo* pOptr = NULL;
@@ -4215,7 +3954,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pAggNode->node.pConditions,
pScalarExprInfo, numOfScalarExpr, pAggNode->mergeDataBlock, pTaskInfo);
}
- } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
+ } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
@@ -4240,39 +3979,14 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pIntervalPhyNode,
pTaskInfo, isStream);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
+ pOptr = createStreamIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) {
SMergeAlignedIntervalPhysiNode* pIntervalPhyNode = (SMergeAlignedIntervalPhysiNode*)pPhyNode;
-
- SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
-
- SInterval interval = {.interval = pIntervalPhyNode->interval,
- .sliding = pIntervalPhyNode->sliding,
- .intervalUnit = pIntervalPhyNode->intervalUnit,
- .slidingUnit = pIntervalPhyNode->slidingUnit,
- .offset = pIntervalPhyNode->offset,
- .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
-
- int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
- pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
- pPhyNode->pConditions, pIntervalPhyNode->window.mergeDataBlock,
- pTaskInfo);
+ pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) {
SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode;
-
- SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
-
- SInterval interval = {.interval = pIntervalPhyNode->interval,
- .sliding = pIntervalPhyNode->sliding,
- .intervalUnit = pIntervalPhyNode->intervalUnit,
- .slidingUnit = pIntervalPhyNode->slidingUnit,
- .offset = pIntervalPhyNode->offset,
- .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
-
- int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
- pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
- pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
+ pOptr = createMergeIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
int32_t children = 0;
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
@@ -4299,19 +4013,11 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
} else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) {
pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION == type) {
+ pOptr = createStreamPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) {
SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode;
-
- STimeWindowAggSupp as = {.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType};
-
- SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
- int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
-
- SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
- SColumn col = extractColumnFromColumnNode(pColNode);
- pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, &col, pPhyNode->pConditions,
- pTaskInfo);
+ pOptr = createStatewindowOperatorInfo(ops[0], pStateNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) {
pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) {
@@ -4325,8 +4031,12 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else {
ASSERT(0);
}
+
taosMemoryFree(ops);
- if (pOptr) pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
+ if (pOptr) {
+ pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
+ }
+
return pOptr;
}
@@ -4363,42 +4073,6 @@ SArray* extractColumnInfo(SNodeList* pNodeList) {
return pList;
}
-#if 0
-STsdbReader* doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
- STableListInfo* pTableListInfo, const char* idstr) {
- int32_t code = getTableList(pHandle->meta, pHandle->vnode, &pTableScanNode->scan, pTableListInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
- code = 0;
- qDebug("no table qualified for query, %s", idstr);
- goto _error;
- }
-
- SQueryTableDataCond cond = {0};
- code = initQueryTableDataCond(&cond, pTableScanNode);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- STsdbReader* pReader;
- code = tsdbReaderOpen(pHandle->vnode, &cond, pTableListInfo->pTableList, &pReader, idstr);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- cleanupQueryTableDataCond(&cond);
-
- return pReader;
-
-_error:
- terrno = code;
- return NULL;
-}
-#endif
-
static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
@@ -4616,6 +4290,10 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
goto _complete;
}
+ if (pHandle && pHandle->pStateBackend) {
+ (*pTaskInfo)->streamInfo.pState = pHandle->pStateBackend;
+ }
+
(*pTaskInfo)->sql = sql;
sql = NULL;
(*pTaskInfo)->pSubplan = pPlan;
@@ -4636,7 +4314,7 @@ _complete:
return code;
}
-static void doDestroyTableList(STableListInfo* pTableqinfoList) {
+void doDestroyTableList(STableListInfo* pTableqinfoList) {
taosArrayDestroy(pTableqinfoList->pTableList);
taosHashCleanup(pTableqinfoList->map);
if (pTableqinfoList->needSortTableByGroupId) {
@@ -4668,27 +4346,6 @@ void doDestroyTask(SExecTaskInfo* pTaskInfo) {
taosMemoryFreeClear(pTaskInfo);
}
-static void doSetTagValueToResultBuf(char* output, const char* val, int16_t type, int16_t bytes) {
- if (val == NULL) {
- setNull(output, type, bytes);
- return;
- }
-
- if (IS_VAR_DATA_TYPE(type)) {
- // Binary data overflows for sort of unknown reasons. Let trim the overflow data
- if (varDataTLen(val) > bytes) {
- int32_t maxLen = bytes - VARSTR_HEADER_SIZE;
- int32_t len = (varDataLen(val) > maxLen) ? maxLen : varDataLen(val);
- memcpy(varDataVal(output), varDataVal(val), len);
- varDataSetLen(output, len);
- } else {
- varDataCopy(output, val);
- }
- } else {
- memcpy(output, val, bytes);
- }
-}
-
static int64_t getQuerySupportBufSize(size_t numOfTables) {
size_t s1 = sizeof(STableQueryInfo);
// size_t s3 = sizeof(STableCheckInfo); buffer consumption in tsdb
@@ -4761,6 +4418,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size) {
+ pSup->currentPageId = -1;
pSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pSup->keySize = sizeof(int64_t) + sizeof(TSKEY);
pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize);
@@ -4788,7 +4446,117 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF
}
int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, tsTempDir);
for (int32_t i = 0; i < numOfOutput; ++i) {
- pCtx[i].pBuf = pSup->pResultBuf;
+ pCtx[i].saveHandle.pBuf = pSup->pResultBuf;
}
+
return code;
}
+
+int32_t setOutputBuf(STimeWindow* win, SResultRow** pResult, int64_t tableGroupId, SqlFunctionCtx* pCtx,
+ int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup,
+ SExecTaskInfo* pTaskInfo) {
+ SWinKey key = {
+ .ts = win->skey,
+ .groupId = tableGroupId,
+ };
+ char* value = NULL;
+ int32_t size = pAggSup->resultRowSize;
+ /*if (streamStateGet(pTaskInfo->streamInfo.pState, &key, (void**)&value, &size) < 0) {*/
+ /*value = taosMemoryCalloc(1, size);*/
+ /*}*/
+ if (streamStateAddIfNotExist(pTaskInfo->streamInfo.pState, &key, (void**)&value, &size) < 0) {
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+ *pResult = (SResultRow*)value;
+ ASSERT(*pResult);
+ // set time window for current result
+ (*pResult)->win = (*win);
+ setResultRowInitCtx(*pResult, pCtx, numOfOutput, rowEntryInfoOffset);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t releaseOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult) {
+ streamStateReleaseBuf(pTaskInfo->streamInfo.pState, pKey, pResult);
+ /*taosMemoryFree((*(void**)pResult));*/
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t saveOutput(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult, int32_t resSize) {
+ streamStatePut(pTaskInfo->streamInfo.pState, pKey, pResult, resSize);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t buildDataBlockFromGroupRes(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup,
+ SGroupResInfo* pGroupResInfo) {
+ SExprInfo* pExprInfo = pSup->pExprInfo;
+ int32_t numOfExprs = pSup->numOfExprs;
+ int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
+ SqlFunctionCtx* pCtx = pSup->pCtx;
+
+ int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
+
+ for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) {
+ SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
+ int32_t size = 0;
+ void* pVal = NULL;
+ SWinKey key = {
+ .ts = *(TSKEY*)pPos->key,
+ .groupId = pPos->groupId,
+ };
+ int32_t code = streamStateGet(pTaskInfo->streamInfo.pState, &key, &pVal, &size);
+ ASSERT(code == 0);
+ SResultRow* pRow = (SResultRow*)pVal;
+ doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset);
+ // no results, continue to check the next one
+ if (pRow->numOfRows == 0) {
+ pGroupResInfo->index += 1;
+ releaseOutputBuf(pTaskInfo, &key, pRow);
+ continue;
+ }
+
+ if (pBlock->info.groupId == 0) {
+ pBlock->info.groupId = pPos->groupId;
+ } else {
+ // current value belongs to different group, it can't be packed into one datablock
+ if (pBlock->info.groupId != pPos->groupId) {
+ releaseOutputBuf(pTaskInfo, &key, pRow);
+ break;
+ }
+ }
+
+ if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
+ ASSERT(pBlock->info.rows > 0);
+ releaseOutputBuf(pTaskInfo, &key, pRow);
+ break;
+ }
+
+ pGroupResInfo->index += 1;
+
+ for (int32_t j = 0; j < numOfExprs; ++j) {
+ int32_t slotId = pExprInfo[j].base.resSchema.slotId;
+
+ pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
+ if (pCtx[j].fpSet.finalize) {
+ int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
+ if (TAOS_FAILED(code)) {
+ qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
+ T_LONG_JMP(pTaskInfo->env, code);
+ }
+ } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
+ // do nothing, todo refactor
+ } else {
+ // expand the result into multiple rows. E.g., _wstart, top(k, 20)
+ // the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows.
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId);
+ char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo);
+ for (int32_t k = 0; k < pRow->numOfRows; ++k) {
+ colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
+ }
+ }
+ }
+ releaseOutputBuf(pTaskInfo, &key, pRow);
+ pBlock->info.rows += pRow->numOfRows;
+ }
+ blockDataUpdateTsWindow(pBlock, 0);
+ return TSDB_CODE_SUCCESS;
+}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 05dffc658b29bb5eb6675edae62d04bb6442cc48..5eb6557dbd4093f8c4599eb5d35107c95673e702 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -13,31 +13,35 @@
* along with this program. If not, see .
*/
-#include "os.h"
#include "function.h"
+#include "os.h"
#include "tname.h"
#include "tdatablock.h"
#include "tmsg.h"
+#include "executorInt.h"
#include "executorimpl.h"
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
-#include "executorInt.h"
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len);
static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity);
-static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
- uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
+static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
+ int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
static void freeGroupKey(void* param) {
- SGroupKeys* pKey = (SGroupKeys*) param;
+ SGroupKeys* pKey = (SGroupKeys*)param;
taosMemoryFree(pKey->pData);
}
-static void destroyGroupOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyGroupOperatorInfo(void* param) {
SGroupbyOperatorInfo* pInfo = (SGroupbyOperatorInfo*)param;
+ if (pInfo == NULL) {
+ return;
+ }
+
cleanupBasicInfo(&pInfo->binfo);
taosMemoryFreeClear(pInfo->keyBuf);
taosArrayDestroy(pInfo->pGroupCols);
@@ -58,13 +62,13 @@ static int32_t initGroupOptrInfo(SArray** pGroupColVals, int32_t* keyLen, char**
int32_t numOfGroupCols = taosArrayGetSize(pGroupColList);
for (int32_t i = 0; i < numOfGroupCols; ++i) {
SColumn* pCol = taosArrayGet(pGroupColList, i);
- (*keyLen) += pCol->bytes; // actual data + null_flag
+ (*keyLen) += pCol->bytes; // actual data + null_flag
SGroupKeys key = {0};
- key.bytes = pCol->bytes;
- key.type = pCol->type;
+ key.bytes = pCol->bytes;
+ key.type = pCol->type;
key.isNull = false;
- key.pData = taosMemoryCalloc(1, pCol->bytes);
+ key.pData = taosMemoryCalloc(1, pCol->bytes);
if (key.pData == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -83,7 +87,8 @@ static int32_t initGroupOptrInfo(SArray** pGroupColVals, int32_t* keyLen, char**
return TSDB_CODE_SUCCESS;
}
-static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex, int32_t numOfGroupCols) {
+static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex,
+ int32_t numOfGroupCols) {
SColumnDataAgg* pColAgg = NULL;
for (int32_t i = 0; i < numOfGroupCols; ++i) {
SColumn* pCol = taosArrayGet(pGroupCols, i);
@@ -108,7 +113,7 @@ static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlo
if (pkey->type == TSDB_DATA_TYPE_JSON) {
int32_t dataLen = getJsonValueLen(val);
- if (memcmp(pkey->pData, val, dataLen) == 0){
+ if (memcmp(pkey->pData, val, dataLen) == 0) {
continue;
} else {
return false;
@@ -150,7 +155,7 @@ static void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSData
pkey->isNull = false;
char* val = colDataGetData(pColInfoData, rowIndex);
if (pkey->type == TSDB_DATA_TYPE_JSON) {
- if(tTagIsJson(val)){
+ if (tTagIsJson(val)) {
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
return;
}
@@ -194,13 +199,13 @@ static int32_t buildGroupKeys(void* pKey, const SArray* pGroupColVals) {
}
}
- return (int32_t) (pStart - (char*)pKey);
+ return (int32_t)(pStart - (char*)pKey);
}
// assign the group keys or user input constant values if required
static void doAssignGroupKeys(SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t totalRows, int32_t rowIndex) {
for (int32_t i = 0; i < numOfOutput; ++i) {
- if (pCtx[i].functionId == -1) { // select count(*),key from t group by key.
+ if (pCtx[i].functionId == -1) { // select count(*),key from t group by key.
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[i]);
SColumnInfoData* pColInfoData = pCtx[i].input.pData[0];
@@ -217,7 +222,7 @@ static void doAssignGroupKeys(SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t
} else {
memcpy(dest, data, pColInfoData->info.bytes);
}
- } else { // it is a NULL value
+ } else { // it is a NULL value
pEntryInfo->isNullRes = 1;
}
@@ -247,7 +252,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
if (!pInfo->isInit) {
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pInfo->isInit = true;
num++;
@@ -265,15 +270,16 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
num++;
recordNewGroupKeys(pInfo->pGroupCols, pInfo->pGroupColVals, pBlock, j);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
continue;
}
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
- int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
+ int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf,
+ len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
int32_t rowIndex = j - num;
@@ -287,11 +293,10 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
if (num > 0) {
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
- int32_t ret =
- setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len,
- pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
+ int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf,
+ len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
int32_t rowIndex = pBlock->info.rows - num;
@@ -304,7 +309,7 @@ static SSDataBlock* buildGroupResultDataBlock(SOperatorInfo* pOperator) {
SGroupbyOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->binfo.pRes;
- while(1) {
+ while (1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pRes, NULL);
@@ -319,7 +324,7 @@ static SSDataBlock* buildGroupResultDataBlock(SOperatorInfo* pOperator) {
}
pOperator->resultInfo.totalRows += pRes->info.rows;
- return (pRes->info.rows == 0)? NULL:pRes;
+ return (pRes->info.rows == 0) ? NULL : pRes;
}
static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
@@ -330,7 +335,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SGroupbyOperatorInfo* pInfo = pOperator->info;
- SSDataBlock* pRes = pInfo->binfo.pRes;
+ SSDataBlock* pRes = pInfo->binfo.pRes;
if (pOperator->status == OP_RES_TO_RETURN) {
return buildGroupResultDataBlock(pOperator);
@@ -339,7 +344,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
- int64_t st = taosGetTimestampUs();
+ int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
@@ -350,7 +355,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
int32_t code = getTableScanInfo(pOperator, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// the pDataBlock are always the same one, no need to call this again
@@ -358,9 +363,10 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
if (pInfo->scalarSup.pExprInfo != NULL) {
- pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL);
+ pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
+ pInfo->scalarSup.numOfExprs, NULL);
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, pTaskInfo->code);
+ T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
}
@@ -399,8 +405,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
goto _error;
}
- pInfo->pGroupCols = pGroupColList;
- pInfo->pCondition = pCondition;
+ pInfo->pGroupCols = pGroupColList;
+ pInfo->pCondition = pCondition;
int32_t code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr);
if (code != TSDB_CODE_SUCCESS) {
@@ -413,30 +419,39 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
}
initResultSizeInfo(&pOperator->resultInfo, 4096);
- initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str);
+ code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, pInfo->groupKeyLen, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
initBasicInfo(&pInfo->binfo, pResultBlock);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
- pOperator->name = "GroupbyAggOperator";
- pOperator->blocking = true;
- pOperator->status = OP_NOT_OPENED;
+ pOperator->name = "GroupbyAggOperator";
+ pOperator->blocking = true;
+ pOperator->status = OP_NOT_OPENED;
// pOperator->operatorType = OP_Groupby;
- pOperator->info = pInfo;
- pOperator->pTaskInfo = pTaskInfo;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
- pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashGroupbyAggregate, NULL, NULL, destroyGroupOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashGroupbyAggregate, NULL, NULL,
+ destroyGroupOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
return pOperator;
- _error:
+_error:
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
- taosMemoryFreeClear(pInfo);
+ destroyGroupOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
return NULL;
}
static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
-// SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ // SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SPartitionOperatorInfo* pInfo = pOperator->info;
@@ -445,7 +460,7 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
int32_t len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
SDataGroupInfo* pGroupInfo = NULL;
- void *pPage = getCurrentDataGroupInfo(pInfo, &pGroupInfo, len);
+ void* pPage = getCurrentDataGroupInfo(pInfo, &pGroupInfo, len);
pGroupInfo->numOfRows += 1;
@@ -455,32 +470,32 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
}
// number of rows
- int32_t* rows = (int32_t*) pPage;
+ int32_t* rows = (int32_t*)pPage;
size_t numOfCols = pOperator->exprSupp.numOfExprs;
- for(int32_t i = 0; i < numOfCols; ++i) {
+ for (int32_t i = 0; i < numOfCols; ++i) {
SExprInfo* pExpr = &pOperator->exprSupp.pExprInfo[i];
- int32_t slotId = pExpr->base.pParam[0].pCol->slotId;
+ int32_t slotId = pExpr->base.pParam[0].pCol->slotId;
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId);
int32_t bytes = pColInfoData->info.bytes;
int32_t startOffset = pInfo->columnOffset[i];
- int32_t* columnLen = NULL;
+ int32_t* columnLen = NULL;
int32_t contentLen = 0;
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
int32_t* offset = (int32_t*)((char*)pPage + startOffset);
- columnLen = (int32_t*) ((char*)pPage + startOffset + sizeof(int32_t) * pInfo->rowCapacity);
- char* data = (char*)((char*) columnLen + sizeof(int32_t));
+ columnLen = (int32_t*)((char*)pPage + startOffset + sizeof(int32_t) * pInfo->rowCapacity);
+ char* data = (char*)((char*)columnLen + sizeof(int32_t));
if (colDataIsNull_s(pColInfoData, j)) {
offset[(*rows)] = -1;
contentLen = 0;
- } else if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){
+ } else if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) {
offset[*rows] = (*columnLen);
- char* src = colDataGetData(pColInfoData, j);
+ char* src = colDataGetData(pColInfoData, j);
int32_t dataLen = getJsonValueLen(src);
memcpy(data + (*columnLen), src, dataLen);
@@ -499,8 +514,8 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
}
} else {
char* bitmap = (char*)pPage + startOffset;
- columnLen = (int32_t*) ((char*)pPage + startOffset + BitmapLen(pInfo->rowCapacity));
- char* data = (char*) columnLen + sizeof(int32_t);
+ columnLen = (int32_t*)((char*)pPage + startOffset + BitmapLen(pInfo->rowCapacity));
+ char* data = (char*)columnLen + sizeof(int32_t);
bool isNull = colDataIsNull_f(pColInfoData->nullbitmap, j);
if (isNull) {
@@ -527,7 +542,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
SDataGroupInfo* p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len);
void* pPage = NULL;
- if (p == NULL) { // it is a new group
+ if (p == NULL) { // it is a new group
SDataGroupInfo gi = {0};
gi.pPageList = taosArrayInit(100, sizeof(int32_t));
taosHashPut(pInfo->pGroupSet, pInfo->keyBuf, len, &gi, sizeof(SDataGroupInfo));
@@ -535,22 +550,22 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len);
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
- *(int32_t *) pPage = 0;
+ *(int32_t*)pPage = 0;
} else {
int32_t* curId = taosArrayGetLast(p->pPageList);
pPage = getBufPage(pInfo->pBuf, *curId);
- int32_t *rows = (int32_t*) pPage;
+ int32_t* rows = (int32_t*)pPage;
if (*rows >= pInfo->rowCapacity) {
// release buffer
releaseBufPage(pInfo->pBuf, pPage);
// add a new page for current group
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
memset(pPage, 0, getBufPageSize(pInfo->pBuf));
}
@@ -573,17 +588,18 @@ uint64_t calcGroupId(char* pData, int32_t len) {
}
int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity) {
- size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
+ size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
int32_t* offset = taosMemoryCalloc(numOfCols, sizeof(int32_t));
- offset[0] = sizeof(int32_t) + sizeof(uint64_t); // the number of rows in current page, ref to SSDataBlock paged serialization format
+ offset[0] = sizeof(int32_t) +
+ sizeof(uint64_t); // the number of rows in current page, ref to SSDataBlock paged serialization format
- for(int32_t i = 0; i < numOfCols - 1; ++i) {
+ for (int32_t i = 0; i < numOfCols - 1; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
int32_t bytes = pColInfoData->info.bytes;
int32_t payloadLen = bytes * rowCapacity;
-
+
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
// offset segment + content length + payload
offset[i + 1] = rowCapacity * sizeof(int32_t) + sizeof(int32_t) + payloadLen + offset[i];
@@ -597,9 +613,9 @@ int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity) {
}
static void clearPartitionOperator(SPartitionOperatorInfo* pInfo) {
- void *ite = NULL;
- while( (ite = taosHashIterate(pInfo->pGroupSet, ite)) != NULL ) {
- taosArrayDestroy( ((SDataGroupInfo *)ite)->pPageList);
+ void* ite = NULL;
+ while ((ite = taosHashIterate(pInfo->pGroupSet, ite)) != NULL) {
+ taosArrayDestroy(((SDataGroupInfo*)ite)->pPageList);
}
taosArrayClear(pInfo->sortedGroupArray);
clearDiskbasedBuf(pInfo->pBuf);
@@ -614,13 +630,14 @@ static int compareDataGroupInfo(const void* group1, const void* group2) {
return 0;
}
- return (pGroupInfo1->groupId < pGroupInfo2->groupId)? -1:1;
+ return (pGroupInfo1->groupId < pGroupInfo2->groupId) ? -1 : 1;
}
static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
SPartitionOperatorInfo* pInfo = pOperator->info;
- SDataGroupInfo* pGroupInfo = (pInfo->groupIndex != -1) ? taosArrayGet(pInfo->sortedGroupArray, pInfo->groupIndex) : NULL;
+ SDataGroupInfo* pGroupInfo =
+ (pInfo->groupIndex != -1) ? taosArrayGet(pInfo->sortedGroupArray, pInfo->groupIndex) : NULL;
if (pInfo->groupIndex == -1 || pInfo->pageIndex >= taosArrayGetSize(pGroupInfo->pPageList)) {
// try next group data
++pInfo->groupIndex;
@@ -635,7 +652,7 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
}
int32_t* pageId = taosArrayGet(pGroupInfo->pPageList, pInfo->pageIndex);
- void* page = getBufPage(pInfo->pBuf, *pageId);
+ void* page = getBufPage(pInfo->pBuf, *pageId);
blockDataEnsureCapacity(pInfo->binfo.pRes, pInfo->rowCapacity);
blockDataFromBuf1(pInfo->binfo.pRes, page, pInfo->rowCapacity);
@@ -658,14 +675,14 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SPartitionOperatorInfo* pInfo = pOperator->info;
- SSDataBlock* pRes = pInfo->binfo.pRes;
+ SSDataBlock* pRes = pInfo->binfo.pRes;
if (pOperator->status == OP_RES_TO_RETURN) {
blockDataCleanup(pRes);
return buildPartitionResult(pOperator);
}
- int64_t st = taosGetTimestampUs();
+ int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
@@ -676,22 +693,23 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
if (pInfo->scalarSup.pExprInfo != NULL) {
- pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL);
+ pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
+ pInfo->scalarSup.numOfExprs, NULL);
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, pTaskInfo->code);
+ T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
}
terrno = TSDB_CODE_SUCCESS;
doHashPartition(pOperator, pBlock);
if (terrno != TSDB_CODE_SUCCESS) { // group by json error
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
}
SArray* groupArray = taosArrayInit(taosHashGetSize(pInfo->pGroupSet), sizeof(SDataGroupInfo));
- void* pGroupIter = NULL;
- pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL);
+
+ void* pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL);
while (pGroupIter != NULL) {
SDataGroupInfo* pGroupInfo = pGroupIter;
taosArrayPush(groupArray, pGroupInfo);
@@ -710,12 +728,12 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
return buildPartitionResult(pOperator);
}
-static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyPartitionOperatorInfo(void* param) {
SPartitionOperatorInfo* pInfo = (SPartitionOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
taosArrayDestroy(pInfo->pGroupCols);
- for(int i = 0; i < taosArrayGetSize(pInfo->pGroupColVals); i++){
+ for (int i = 0; i < taosArrayGetSize(pInfo->pGroupColVals); i++) {
SGroupKeys key = *(SGroupKeys*)taosArrayGet(pInfo->pGroupColVals, i);
taosMemoryFree(key.pData);
}
@@ -731,24 +749,25 @@ static void destroyPartitionOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode,
+ SExecTaskInfo* pTaskInfo) {
SPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SPartitionOperatorInfo));
- SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
SSDataBlock* pResBlock = createResDataBlock(pPartNode->node.pOutputDataBlockDesc);
- int32_t numOfCols = 0;
+ int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &numOfCols);
pInfo->pGroupCols = extractPartitionColInfo(pPartNode->pPartitionKeys);
if (pPartNode->pExprs != NULL) {
- int32_t num = 0;
+ int32_t num = 0;
SExprInfo* pExprInfo1 = createExprInfo(pPartNode->pExprs, NULL, &num);
- int32_t code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num);
+ int32_t code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -760,7 +779,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
goto _error;
}
- uint32_t defaultPgsz = 0;
+ uint32_t defaultPgsz = 0;
uint32_t defaultBufsz = 0;
getBufferPgSize(pResBlock->info.rowSize, &defaultPgsz, &defaultBufsz);
@@ -782,15 +801,15 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
goto _error;
}
- pOperator->name = "PartitionOperator";
- pOperator->blocking = true;
- pOperator->status = OP_NOT_OPENED;
+ pOperator->name = "PartitionOperator";
+ pOperator->blocking = true;
+ pOperator->status = OP_NOT_OPENED;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PARTITION;
- pInfo->binfo.pRes = pResBlock;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->info = pInfo;
- pOperator->pTaskInfo = pTaskInfo;
+ pInfo->binfo.pRes = pResBlock;
+ pOperator->exprSupp.numOfExprs = numOfCols;
+ pOperator->exprSupp.pExprInfo = pExprInfo;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashPartition, NULL, NULL, destroyPartitionOperatorInfo,
NULL, NULL, NULL);
@@ -798,16 +817,16 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
- _error:
+_error:
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
return NULL;
}
-int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
- uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
+ int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo;
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
@@ -818,3 +837,211 @@ int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo,
setResultRowInitCtx(pResultRow, pCtx, numOfCols, pOperator->exprSupp.rowEntryInfoOffset);
return TSDB_CODE_SUCCESS;
}
+
+uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId) {
+ if (pExprSup->pExprInfo != NULL) {
+ int32_t code =
+ projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("calaculate group id error, code:%d", code);
+ }
+ }
+ recordNewGroupKeys(pParSup->pGroupCols, pParSup->pGroupColVals, pBlock, rowId);
+ int32_t len = buildGroupKeys(pParSup->keyBuf, pParSup->pGroupColVals);
+ uint64_t groupId = calcGroupId(pParSup->keyBuf, len);
+ return groupId;
+}
+
+static bool hasRemainPartion(SStreamPartitionOperatorInfo* pInfo) { return pInfo->parIte != NULL; }
+
+static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
+ SStreamPartitionOperatorInfo* pInfo = pOperator->info;
+ SSDataBlock* pDest = pInfo->binfo.pRes;
+ ASSERT(hasRemainPartion(pInfo));
+ SPartitionDataInfo* pParInfo = (SPartitionDataInfo*)pInfo->parIte;
+ blockDataCleanup(pDest);
+ int32_t rows = taosArrayGetSize(pParInfo->rowIds);
+ SSDataBlock* pSrc = pInfo->pInputDataBlock;
+ for (int32_t i = 0; i < rows; i++) {
+ int32_t rowIndex = *(int32_t*)taosArrayGet(pParInfo->rowIds, i);
+ for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; j++) {
+ int32_t slotId = pOperator->exprSupp.pExprInfo[j].base.pParam[0].pCol->slotId;
+ SColumnInfoData* pSrcCol = taosArrayGet(pSrc->pDataBlock, slotId);
+ SColumnInfoData* pDestCol = taosArrayGet(pDest->pDataBlock, j);
+ bool isNull = colDataIsNull(pSrcCol, pSrc->info.rows, rowIndex, NULL);
+ char* pSrcData = colDataGetData(pSrcCol, rowIndex);
+ colDataAppend(pDestCol, pDest->info.rows, pSrcData, isNull);
+ }
+ pDest->info.rows++;
+ }
+ blockDataUpdateTsWindow(pDest, pInfo->tsColIndex);
+ pDest->info.groupId = pParInfo->groupId;
+ pOperator->resultInfo.totalRows += pDest->info.rows;
+ pInfo->parIte = taosHashIterate(pInfo->pPartitions, pInfo->parIte);
+ ASSERT(pDest->info.rows > 0);
+ printDataBlock(pDest, "stream partitionby");
+ return pDest;
+}
+
+static void doStreamHashPartitionImpl(SStreamPartitionOperatorInfo* pInfo, SSDataBlock* pBlock) {
+ pInfo->pInputDataBlock = pBlock;
+ for (int32_t i = 0; i < pBlock->info.rows; ++i) {
+ recordNewGroupKeys(pInfo->partitionSup.pGroupCols, pInfo->partitionSup.pGroupColVals, pBlock, i);
+ int32_t keyLen = buildGroupKeys(pInfo->partitionSup.keyBuf, pInfo->partitionSup.pGroupColVals);
+ SPartitionDataInfo* pParData =
+ (SPartitionDataInfo*)taosHashGet(pInfo->pPartitions, pInfo->partitionSup.keyBuf, keyLen);
+ if (pParData) {
+ taosArrayPush(pParData->rowIds, &i);
+ } else {
+ SPartitionDataInfo newParData = {0};
+ newParData.groupId = calcGroupId(pInfo->partitionSup.keyBuf, keyLen);
+ newParData.rowIds = taosArrayInit(64, sizeof(int32_t));
+ taosArrayPush(newParData.rowIds, &i);
+ taosHashPut(pInfo->pPartitions, pInfo->partitionSup.keyBuf, keyLen, &newParData, sizeof(SPartitionDataInfo));
+ }
+ }
+}
+
+static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) {
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamPartitionOperatorInfo* pInfo = pOperator->info;
+ if (hasRemainPartion(pInfo)) {
+ return buildStreamPartitionResult(pOperator);
+ }
+
+ int64_t st = taosGetTimestampUs();
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+ {
+ pInfo->pInputDataBlock = NULL;
+ SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
+ if (pBlock == NULL) {
+ doSetOperatorCompleted(pOperator);
+ return NULL;
+ }
+ printDataBlock(pBlock, "stream partitionby recv");
+ switch (pBlock->info.type) {
+ case STREAM_NORMAL:
+ case STREAM_PULL_DATA:
+ case STREAM_INVALID:
+ pInfo->binfo.pRes->info.type = pBlock->info.type;
+ break;
+ case STREAM_DELETE_DATA: {
+ copyDataBlock(pInfo->pDelRes, pBlock);
+ pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;
+ return pInfo->pDelRes;
+ } break;
+ default:
+ return pBlock;
+ }
+
+ // there is an scalar expression that needs to be calculated right before apply the group aggregation.
+ if (pInfo->scalarSup.pExprInfo != NULL) {
+ pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
+ pInfo->scalarSup.numOfExprs, NULL);
+ if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
+ longjmp(pTaskInfo->env, pTaskInfo->code);
+ }
+ }
+ taosHashClear(pInfo->pPartitions);
+ doStreamHashPartitionImpl(pInfo, pBlock);
+ }
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
+
+ pInfo->parIte = taosHashIterate(pInfo->pPartitions, NULL);
+ return buildStreamPartitionResult(pOperator);
+}
+
+static void destroyStreamPartitionOperatorInfo(void* param) {
+ SStreamPartitionOperatorInfo* pInfo = (SStreamPartitionOperatorInfo*)param;
+ cleanupBasicInfo(&pInfo->binfo);
+ taosArrayDestroy(pInfo->partitionSup.pGroupCols);
+
+ for (int i = 0; i < taosArrayGetSize(pInfo->partitionSup.pGroupColVals); i++) {
+ SGroupKeys key = *(SGroupKeys*)taosArrayGet(pInfo->partitionSup.pGroupColVals, i);
+ taosMemoryFree(key.pData);
+ }
+ taosArrayDestroy(pInfo->partitionSup.pGroupColVals);
+
+ taosMemoryFree(pInfo->partitionSup.keyBuf);
+ cleanupExprSupp(&pInfo->scalarSup);
+ blockDataDestroy(pInfo->pDelRes);
+ taosMemoryFreeClear(param);
+}
+
+void initParDownStream(SOperatorInfo* downstream, SPartitionBySupporter* pParSup, SExprSupp* pExpr) {
+ if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ return;
+ }
+ SStreamScanInfo* pScanInfo = downstream->info;
+ pScanInfo->partitionSup = *pParSup;
+ pScanInfo->pPartScalarSup = pExpr;
+}
+
+SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPartitionPhysiNode* pPartNode,
+ SExecTaskInfo* pTaskInfo) {
+ SStreamPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamPartitionOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ goto _error;
+ }
+ int32_t code = TSDB_CODE_SUCCESS;
+ pInfo->partitionSup.pGroupCols = extractPartitionColInfo(pPartNode->pPartitionKeys);
+
+ if (pPartNode->pExprs != NULL) {
+ int32_t num = 0;
+ SExprInfo* pCalExprInfo = createExprInfo(pPartNode->pExprs, NULL, &num);
+ code = initExprSupp(&pInfo->scalarSup, pCalExprInfo, num);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ }
+
+ int32_t keyLen = 0;
+ code = initGroupOptrInfo(&pInfo->partitionSup.pGroupColVals, &keyLen, &pInfo->partitionSup.keyBuf,
+ pInfo->partitionSup.pGroupCols);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ pInfo->partitionSup.needCalc = true;
+
+ SSDataBlock* pResBlock = createResDataBlock(pPartNode->node.pOutputDataBlockDesc);
+ if (!pResBlock) {
+ goto _error;
+ }
+ blockDataEnsureCapacity(pResBlock, 4096);
+ pInfo->binfo.pRes = pResBlock;
+ pInfo->parIte = NULL;
+ pInfo->pInputDataBlock = NULL;
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pInfo->pPartitions = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
+ pInfo->tsColIndex = 0;
+ pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
+
+ int32_t numOfCols = 0;
+ SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &numOfCols);
+
+ pOperator->name = "StreamPartitionOperator";
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION;
+ pOperator->exprSupp.numOfExprs = numOfCols;
+ pOperator->exprSupp.pExprInfo = pExprInfo;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamHashPartition, NULL, NULL,
+ destroyStreamPartitionOperatorInfo, NULL, NULL, NULL);
+
+ initParDownStream(downstream, &pInfo->partitionSup, &pInfo->scalarSup);
+ code = appendDownstream(pOperator, &downstream, 1);
+ return pOperator;
+
+_error:
+ pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
+ destroyStreamPartitionOperatorInfo(pInfo);
+ taosMemoryFreeClear(pOperator);
+ return NULL;
+}
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index 7d2b84d0f053a7c8c6e3f63db719f67b3d9e99f3..1bc7d458e0ee16decabea988a16713996d2468ce 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -25,7 +25,7 @@
static void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode);
static SSDataBlock* doMergeJoin(struct SOperatorInfo* pOperator);
-static void destroyMergeJoinOperator(void* param, int32_t numOfOutput);
+static void destroyMergeJoinOperator(void* param);
static void extractTimeCondition(SJoinOperatorInfo* pInfo, SOperatorInfo** pDownstream, int32_t numOfDownstream,
SSortMergeJoinPhysiNode* pJoinNode);
@@ -128,12 +128,11 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) {
pColumn->scale = pColumnNode->node.resType.scale;
}
-void destroyMergeJoinOperator(void* param, int32_t numOfOutput) {
+void destroyMergeJoinOperator(void* param) {
SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes);
-
taosMemoryFreeClear(param);
}
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index 94da3e23e1423d0954721059f5e1d62abdd8e872..2f12a0d19bdf74e7b0b2ab94c373a31cbe7d8316 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -23,7 +23,7 @@ static SArray* setRowTsColumnOutputInfo(SqlFunctionCtx* pCtx, int32_t numOf
static void setFunctionResultOutput(SOperatorInfo* pOperator, SOptrBasicInfo* pInfo, SAggSupporter* pSup, int32_t stage,
int32_t numOfExprs);
-static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyProjectOperatorInfo(void* param) {
if (NULL == param) {
return;
}
@@ -37,10 +37,13 @@ static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-static void destroyIndefinitOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyIndefinitOperatorInfo(void* param) {
SIndefOperatorInfo* pInfo = (SIndefOperatorInfo*)param;
- cleanupBasicInfo(&pInfo->binfo);
+ if (pInfo == NULL) {
+ return;
+ }
+ cleanupBasicInfo(&pInfo->binfo);
taosArrayDestroy(pInfo->pPseudoColInfo);
cleanupAggSup(&pInfo->aggSup);
cleanupExprSupp(&pInfo->scalarSup);
@@ -112,7 +115,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
return pOperator;
_error:
- destroyProjectOperatorInfo(pInfo, numOfCols);
+ destroyProjectOperatorInfo(pInfo);
taosMemoryFree(pOperator);
pTaskInfo->code = code;
return NULL;
@@ -192,16 +195,6 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
return PROJECT_RETRIEVE_DONE;
}
-void printDataBlock1(SSDataBlock* pBlock, const char* flag) {
- if (!pBlock || pBlock->info.rows == 0) {
- qDebug("===stream===printDataBlock: Block is Null or Empty");
- return;
- }
- char* pBuf = NULL;
- qDebug("%s", dumpBlockData(pBlock, flag, &pBuf));
- taosMemoryFreeClear(pBuf);
-}
-
SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SProjectOperatorInfo* pProjectInfo = pOperator->info;
SOptrBasicInfo* pInfo = &pProjectInfo->binfo;
@@ -268,7 +261,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(downstream, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
setInputDataBlock(pOperator, pSup->pCtx, pBlock, order, scanFlag, false);
@@ -277,7 +270,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs,
pProjectInfo->pPseudoColInfo);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
status = doIngroupLimitOffset(pLimitInfo, pBlock->info.groupId, pInfo->pRes, pOperator);
@@ -371,9 +364,12 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
initResultSizeInfo(&pOperator->resultInfo, numOfRows);
- initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&pInfo->binfo, pResBlock);
+ int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ initBasicInfo(&pInfo->binfo, pResBlock);
setFunctionResultOutput(pOperator, &pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfExpr);
pInfo->binfo.pRes = pResBlock;
@@ -389,7 +385,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doApplyIndefinitFunction, NULL, NULL,
destroyIndefinitOperatorInfo, NULL, NULL, NULL);
- int32_t code = appendDownstream(pOperator, &downstream, 1);
+ code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -397,7 +393,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
return pOperator;
_error:
- taosMemoryFree(pInfo);
+ destroyIndefinitOperatorInfo(pInfo);
taosMemoryFree(pOperator);
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
@@ -415,7 +411,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp
// the pDataBlock are always the same one, no need to call this again
int32_t code = getTableScanInfo(downstream, &order, &scanFlag);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// there is an scalar expression that needs to be calculated before apply the group aggregation.
@@ -424,7 +420,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp
code = projectApplyFunctions(pScalarSup->pExprInfo, pBlock, pBlock, pScalarSup->pCtx, pScalarSup->numOfExprs,
pIndefInfo->pPseudoColInfo);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -434,7 +430,7 @@ static void doHandleDataBlock(SOperatorInfo* pOperator, SSDataBlock* pBlock, SOp
code = projectApplyFunctions(pSup->pExprInfo, pInfo->pRes, pBlock, pSup->pCtx, pSup->numOfExprs,
pIndefInfo->pPseudoColInfo);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 599f86f4fa4b1d8e68a9030206fe445e060d11d9..ad9cd1ffe7909c9a67e5af2e98193995757a05c2 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -36,8 +36,8 @@
#define SWITCH_ORDER(n) (((n) = ((n) == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC))
static int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity);
-static int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size,
- const char* dbName);
+static int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta,
+ size_t size, const char* dbName);
static bool processBlockWithProbability(const SSampleExecInfo* pInfo);
@@ -178,8 +178,8 @@ static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t gro
STableScanInfo* pTableScanInfo = pOperator->info;
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
- GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
+ GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
if (p1 == NULL) {
return NULL;
@@ -250,7 +250,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, &pColAgg, &allColumnsHaveAgg);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
if (!allColumnsHaveAgg) {
@@ -264,7 +264,7 @@ static bool doLoadBlockSMA(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock,
if (pBlock->pBlockAgg == NULL) {
pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
if (pBlock->pBlockAgg == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
}
}
@@ -374,7 +374,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanInfo* pTableSca
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pSup->pExprInfo, pSup->numOfExprs, pBlock,
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -495,7 +495,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
while (tsdbNextDataBlock(pTableScanInfo->dataReader)) {
if (isTaskKilled(pTaskInfo)) {
- longjmp(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
}
// process this data block based on the probabilities
@@ -523,7 +523,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
int32_t code = loadDataBlock(pOperator, pTableScanInfo, pBlock, &status);
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pOperator->pTaskInfo->env, code);
+ T_LONG_JMP(pOperator->pTaskInfo->env, code);
}
// current block is filter out according to filter condition, continue load the next block
@@ -649,7 +649,7 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
int32_t code = tsdbReaderOpen(pInfo->readHandle.vnode, &pInfo->cond, tableList, (STsdbReader**)&pInfo->dataReader,
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
return NULL;
}
}
@@ -689,12 +689,13 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr
return 0;
}
-static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyTableScanOperatorInfo(void* param) {
STableScanInfo* pTableScanInfo = (STableScanInfo*)param;
blockDataDestroy(pTableScanInfo->pResBlock);
cleanupQueryTableDataCond(&pTableScanInfo->cond);
tsdbReaderClose(pTableScanInfo->dataReader);
+ pTableScanInfo->dataReader = NULL;
if (pTableScanInfo->pColMatchInfo != NULL) {
taosArrayDestroy(pTableScanInfo->pColMatchInfo);
@@ -837,7 +838,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
int32_t code = doGetTableRowSize(pBlockScanInfo->readHandle.meta, pBlockScanInfo->uid, &blockDistInfo.rowSize,
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
tsdbGetFileBlocksDistInfo(pBlockScanInfo->pHandle, &blockDistInfo);
@@ -863,7 +864,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
return pBlock;
}
-static void destroyBlockDistScanOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyBlockDistScanOperatorInfo(void* param) {
SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param;
blockDataDestroy(pDistInfo->pResBlock);
tsdbReaderClose(pDistInfo->pHandle);
@@ -920,49 +921,28 @@ static void doClearBufferedBlocks(SStreamScanInfo* pInfo) {
}
static bool isSessionWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
}
static bool isStateWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
}
static bool isIntervalWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
}
static bool isSignleIntervalWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
}
static bool isSlidingWindow(SStreamScanInfo* pInfo) {
return isIntervalWindow(pInfo) && pInfo->interval.interval != pInfo->interval.sliding;
}
-static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) {
- uint64_t* groupId = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &uid, sizeof(int64_t));
- if (groupId) {
- return *groupId;
- }
- return 0;
- /* Todo(liuyao) for partition by column
- recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, rowId);
- int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals);
- uint64_t resId = 0;
- uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len);
- if (groupId) {
- return *groupId;
- } else if (len != 0) {
- resId = calcGroupId(pTableScanInfo->keyBuf, len);
- taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &resId, sizeof(uint64_t));
- }
- return resId;
- */
-}
-
static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
@@ -976,6 +956,61 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin) {
pTableScanInfo->currentGroupId = -1;
}
+static void freeArray(void* array) { taosArrayDestroy(array); }
+
+static void resetTableScanOperator(SOperatorInfo* pTableScanOp) {
+ STableScanInfo* pTableScanInfo = pTableScanOp->info;
+ pTableScanInfo->cond.startVersion = -1;
+ pTableScanInfo->cond.endVersion = -1;
+ SArray* gpTbls = pTableScanOp->pTaskInfo->tableqinfoList.pGroupList;
+ SArray* allTbls = pTableScanOp->pTaskInfo->tableqinfoList.pTableList;
+ taosArrayClearP(gpTbls, freeArray);
+ taosArrayPush(gpTbls, &allTbls);
+ STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
+ resetTableScanInfo(pTableScanOp->info, &win);
+}
+
+static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbUid, TSKEY startTs, TSKEY endTs,
+ int64_t maxVersion) {
+ SArray* gpTbls = pTableScanOp->pTaskInfo->tableqinfoList.pGroupList;
+ taosArrayClear(gpTbls);
+ STableKeyInfo tblInfo = {.uid = tbUid, .groupId = 0};
+ SArray* tbls = taosArrayInit(1, sizeof(STableKeyInfo));
+ taosArrayPush(tbls, &tblInfo);
+ taosArrayPush(gpTbls, &tbls);
+
+ STimeWindow win = {.skey = startTs, .ekey = endTs};
+ STableScanInfo* pTableScanInfo = pTableScanOp->info;
+ pTableScanInfo->cond.startVersion = -1;
+ pTableScanInfo->cond.endVersion = maxVersion;
+ resetTableScanInfo(pTableScanOp->info, &win);
+ SSDataBlock* pRes = doTableScan(pTableScanOp);
+ resetTableScanOperator(pTableScanOp);
+ return pRes;
+}
+
+static uint64_t getGroupIdByCol(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts, int64_t maxVersion) {
+ SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, uid, ts, ts, maxVersion);
+ if (!pPreRes || pPreRes->info.rows == 0) {
+ return 0;
+ }
+ ASSERT(pPreRes->info.rows == 1);
+ return calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pPreRes, 0);
+}
+
+static uint64_t getGroupIdByData(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts, int64_t maxVersion) {
+ if (pInfo->partitionSup.needCalc) {
+ return getGroupIdByCol(pInfo, uid, ts, maxVersion);
+ }
+
+ SHashObj* map = pInfo->pTableScanOp->pTaskInfo->tableqinfoList.map;
+ uint64_t* groupId = taosHashGet(map, &uid, sizeof(int64_t));
+ if (groupId) {
+ return *groupId;
+ }
+ return 0;
+}
+
static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t* pRowIndex) {
if ((*pRowIndex) == pBlock->info.rows) {
return false;
@@ -987,6 +1022,9 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
STimeWindow win = {.skey = startData[*pRowIndex], .ekey = endData[*pRowIndex]};
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* gpData = (uint64_t*)pGpCol->pData;
+ uint64_t groupId = gpData[*pRowIndex];
SColumnInfoData* pCalStartTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
TSKEY* calStartData = (TSKEY*)pCalStartTsCol->pData;
@@ -1001,11 +1039,11 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
(*pRowIndex)++;
for (; *pRowIndex < pBlock->info.rows; (*pRowIndex)++) {
- if (win.skey == startData[*pRowIndex]) {
+ if (win.skey == startData[*pRowIndex] && groupId == gpData[*pRowIndex]) {
win.ekey = TMAX(win.ekey, endData[*pRowIndex]);
continue;
}
- if (win.skey == endData[*pRowIndex]) {
+ if (win.skey == endData[*pRowIndex] && groupId == gpData[*pRowIndex]) {
win.skey = TMIN(win.skey, startData[*pRowIndex]);
continue;
}
@@ -1019,20 +1057,24 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
return true;
}
-static STimeWindow getSlidingWindow(TSKEY* tsCol, SInterval* pInterval, SDataBlockInfo* pDataBlockInfo,
- int32_t* pRowIndex) {
+static STimeWindow getSlidingWindow(TSKEY* startTsCol, TSKEY* endTsCol, SInterval* pInterval,
+ SDataBlockInfo* pDataBlockInfo, int32_t* pRowIndex, bool hasGroup) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
- STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[*pRowIndex], pInterval, TSDB_ORDER_ASC);
+ STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCol[*pRowIndex], pInterval, TSDB_ORDER_ASC);
STimeWindow endWin = win;
STimeWindow preWin = win;
while (1) {
- (*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, tsCol, *pRowIndex, endWin.ekey, binarySearchForKey, NULL,
- TSDB_ORDER_ASC);
+ if (hasGroup) {
+ (*pRowIndex) += 1;
+ } else {
+ (*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, startTsCol, *pRowIndex, endWin.ekey, binarySearchForKey,
+ NULL, TSDB_ORDER_ASC);
+ }
do {
preWin = endWin;
getNextTimeWindow(pInterval, &endWin, TSDB_ORDER_ASC);
- } while (tsCol[(*pRowIndex) - 1] >= endWin.skey);
+ } while (endTsCol[(*pRowIndex) - 1] >= endWin.skey);
endWin = preWin;
if (win.ekey == endWin.ekey || (*pRowIndex) == pDataBlockInfo->rows) {
win.ekey = endWin.ekey;
@@ -1060,7 +1102,31 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32
return NULL;
}
- if (pResult->info.groupId == pInfo->groupId) {
+ doFilter(pInfo->pCondition, pResult, NULL);
+ if (pResult->info.rows == 0) {
+ continue;
+ }
+
+ if (pInfo->partitionSup.needCalc) {
+ SSDataBlock* tmpBlock = createOneDataBlock(pResult, true);
+ blockDataCleanup(pResult);
+ for (int32_t i = 0; i < tmpBlock->info.rows; i++) {
+ if (calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, tmpBlock, i) == pInfo->groupId) {
+ for (int32_t j = 0; j < pInfo->pTableScanOp->exprSupp.numOfExprs; j++) {
+ SColumnInfoData* pSrcCol = taosArrayGet(tmpBlock->pDataBlock, j);
+ SColumnInfoData* pDestCol = taosArrayGet(pResult->pDataBlock, j);
+ bool isNull = colDataIsNull(pSrcCol, tmpBlock->info.rows, i, NULL);
+ char* pSrcData = colDataGetData(pSrcCol, i);
+ colDataAppend(pDestCol, pResult->info.rows, pSrcData, isNull);
+ }
+ pResult->info.rows++;
+ }
+ }
+ if (pResult->info.rows > 0) {
+ pResult->info.calWin = pInfo->updateWin;
+ return pResult;
+ }
+ } else if (pResult->info.groupId == pInfo->groupId) {
pResult->info.calWin = pInfo->updateWin;
return pResult;
}
@@ -1091,17 +1157,18 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
int32_t dummy = 0;
+ int64_t version = pSrcBlock->info.version - 1;
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
- uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[i]);
+ uint64_t groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], version);
// gap must be 0.
SResultWindowInfo* pStartWin =
- getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, startData[i], endData[i], groupId, 0, &dummy);
+ getCurSessionWindow(pInfo->windowSup.pStreamAggSup, startData[i], endData[i], groupId, 0, &dummy);
if (!pStartWin) {
// window has been closed.
continue;
}
SResultWindowInfo* pEndWin =
- getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, endData[i], endData[i], groupId, 0, &dummy);
+ getCurSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, 0, &dummy);
ASSERT(pEndWin);
TSKEY ts = INT64_MIN;
colDataAppend(pDestStartCol, i, (const char*)&pStartWin->win.skey, false);
@@ -1121,34 +1188,90 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
if (rows == 0) {
return TSDB_CODE_SUCCESS;
}
- int32_t code = blockDataEnsureCapacity(pDestBlock, rows);
+ int32_t code = blockDataEnsureCapacity(pDestBlock, rows * 2);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- SColumnInfoData* pTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
- SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
- uint64_t* uidCol = (uint64_t*)pUidCol->pData;
- ASSERT(pTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
- TSKEY* tsCol = (TSKEY*)pTsCol->pData;
+ SColumnInfoData* pSrcStartTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pSrcEndTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pSrcUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
+ uint64_t* srcUidData = (uint64_t*)pSrcUidCol->pData;
+ SColumnInfoData* pSrcGpCol = taosArrayGet(pSrcBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* srcGp = (uint64_t*)pSrcGpCol->pData;
+ ASSERT(pSrcStartTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
+ TSKEY* srcStartTsCol = (TSKEY*)pSrcStartTsCol->pData;
+ TSKEY* srcEndTsCol = (TSKEY*)pSrcEndTsCol->pData;
SColumnInfoData* pStartTsCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pDeUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX);
SColumnInfoData* pGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
- uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[0]);
+ int64_t version = pSrcBlock->info.version - 1;
for (int32_t i = 0; i < rows;) {
- colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(tsCol + i), false);
- STimeWindow win = getSlidingWindow(tsCol, &pInfo->interval, &pSrcBlock->info, &i);
- colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(tsCol + i - 1), false);
-
+ uint64_t srcUid = srcUidData[i];
+ uint64_t groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
+ uint64_t srcGpId = srcGp[i];
+ TSKEY calStartTs = srcStartTsCol[i];
+ colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
+ STimeWindow win = getSlidingWindow(srcStartTsCol, srcEndTsCol, &pInfo->interval, &pSrcBlock->info, &i,
+ pInfo->partitionSup.needCalc);
+ TSKEY calEndTs = srcStartTsCol[i - 1];
+ colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(&calEndTs), false);
+ colDataAppend(pDeUidCol, pDestBlock->info.rows, (const char*)(&srcUid), false);
colDataAppend(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false);
colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false);
pDestBlock->info.rows++;
+ if (pInfo->partitionSup.needCalc && srcGpId != 0 && groupId != srcGpId) {
+ colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
+ colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(&calEndTs), false);
+ colDataAppend(pDeUidCol, pDestBlock->info.rows, (const char*)(&srcUid), false);
+ colDataAppend(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false);
+ colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
+ colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&srcGpId), false);
+ pDestBlock->info.rows++;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t generateDeleteResultBlock(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) {
+ if (pSrcBlock->info.rows == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+ blockDataCleanup(pDestBlock);
+ int32_t code = blockDataEnsureCapacity(pDestBlock, pSrcBlock->info.rows);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ ASSERT(taosArrayGetSize(pSrcBlock->pDataBlock) >= 3);
+ SColumnInfoData* pStartTsCol = taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ TSKEY* startData = (TSKEY*)pStartTsCol->pData;
+ SColumnInfoData* pEndTsCol = taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ TSKEY* endData = (TSKEY*)pEndTsCol->pData;
+ SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
+ uint64_t* uidCol = (uint64_t*)pUidCol->pData;
+
+ SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pDestUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX);
+ SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
+ SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
+ int32_t dummy = 0;
+ int64_t version = pSrcBlock->info.version - 1;
+ for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
+ uint64_t groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], version);
+ colDataAppend(pDestStartCol, i, (const char*)(startData + i), false);
+ colDataAppend(pDestEndCol, i, (const char*)(endData + i), false);
+ colDataAppendNULL(pDestUidCol, i);
+ colDataAppend(pDestGpCol, i, (const char*)&groupId, false);
+ colDataAppendNULL(pDestCalStartTsCol, i);
+ colDataAppendNULL(pDestCalEndTsCol, i);
+ pDestBlock->info.rows++;
}
- // all rows have same group id
- pDestBlock->info.groupId = groupId;
return TSDB_CODE_SUCCESS;
}
@@ -1156,21 +1279,24 @@ static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock,
int32_t code = TSDB_CODE_SUCCESS;
if (isIntervalWindow(pInfo)) {
code = generateIntervalScanRange(pInfo, pSrcBlock, pDestBlock);
- } else {
+ } else if (isSessionWindow(pInfo) || isStateWindow(pInfo)) {
code = generateSessionScanRange(pInfo, pSrcBlock, pDestBlock);
}
pDestBlock->info.type = STREAM_CLEAR;
+ pDestBlock->info.version = pSrcBlock->info.version;
blockDataUpdateTsWindow(pDestBlock, 0);
return code;
}
-void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID) {
+void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp) {
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
- SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, uidCol);
+ SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)pStartTs, false);
colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false);
- colDataAppend(pUidCol, pBlock->info.rows, (const char*)pID, false);
+ colDataAppend(pUidCol, pBlock->info.rows, (const char*)pUid, false);
+ colDataAppend(pGpCol, pBlock->info.rows, (const char*)pGp, false);
pBlock->info.rows++;
}
@@ -1195,31 +1321,25 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
// must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
bool closedWin = isClosed && isSignleIntervalWindow(pInfo) &&
- isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup);
+ isDeletedWindow(&win, pBlock->info.groupId, pInfo->windowSup.pIntervalAggSup);
if ((update || closedWin) && out) {
- appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, UID_COLUMN_INDEX, &pBlock->info.uid);
+ uint64_t gpId = closedWin && pInfo->partitionSup.needCalc
+ ? calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pBlock, rowId)
+ : 0;
+ appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid, &gpId);
}
}
- if (out) {
+ if (out && pInfo->pUpdateDataRes->info.rows > 0) {
+ pInfo->pUpdateDataRes->info.version = pBlock->info.version;
blockDataUpdateTsWindow(pInfo->pUpdateDataRes, 0);
- pInfo->pUpdateDataRes->info.type = STREAM_CLEAR;
- }
-}
-
-static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t uidColIndex) {
- ASSERT(taosArrayGetSize(pBlock->pDataBlock) >= 3);
- SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, uidColIndex);
- uint64_t* uidCol = (uint64_t*)pColDataInfo->pData;
- ASSERT(pBlock->info.rows > 0);
- for (int32_t i = 0; i < pBlock->info.rows; i++) {
- uidCol[i] = getGroupId(pOperator, uidCol[i]);
+ pInfo->pUpdateDataRes->info.type = pInfo->partitionSup.needCalc ? STREAM_DELETE_DATA : STREAM_CLEAR;
}
}
static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock) {
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
SOperatorInfo* pOperator = pInfo->pStreamScanOp;
- SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
@@ -1228,7 +1348,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
pInfo->pRes->info.type = STREAM_NORMAL;
pInfo->pRes->info.version = pBlock->info.version;
- uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
+ uint64_t* groupIdPre = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupIdPre) {
pInfo->pRes->info.groupId = *groupIdPre;
} else {
@@ -1266,7 +1386,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
blockDataFreeRes((SSDataBlock*)pBlock);
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -1276,12 +1396,34 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
return 0;
}
-static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
- // NOTE: this operator does never check if current status is done or not
+static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamScanInfo* pInfo = pOperator->info;
- qDebug("stream scan called");
+ qDebug("queue scan called");
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
+ if (pResult && pResult->info.rows > 0) {
+ qDebug("queue scan tsdb return %d rows", pResult->info.rows);
+ pTaskInfo->streamInfo.returned = 1;
+ return pResult;
+ } else {
+ if (!pTaskInfo->streamInfo.returned) {
+ STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
+ tsdbReaderClose(pTSInfo->dataReader);
+ pTSInfo->dataReader = NULL;
+ tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer);
+ qDebug("queue scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1);
+ if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) {
+ return NULL;
+ }
+ ASSERT(pInfo->tqReader->pWalReader->curVersion == pTaskInfo->streamInfo.snapshotVer + 1);
+ } else {
+ return NULL;
+ }
+ }
+ }
+
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
while (1) {
SFetchRet ret = {0};
@@ -1293,21 +1435,21 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
}
// TODO clean data block
if (pInfo->pRes->info.rows > 0) {
- qDebug("stream scan log return %d rows", pInfo->pRes->info.rows);
+ qDebug("queue scan log return %d rows", pInfo->pRes->info.rows);
return pInfo->pRes;
}
} else if (ret.fetchType == FETCH_TYPE__META) {
ASSERT(0);
- pTaskInfo->streamInfo.lastStatus = ret.offset;
- pTaskInfo->streamInfo.metaBlk = ret.meta;
- return NULL;
+ // pTaskInfo->streamInfo.lastStatus = ret.offset;
+ // pTaskInfo->streamInfo.metaBlk = ret.meta;
+ // return NULL;
} else if (ret.fetchType == FETCH_TYPE__NONE) {
pTaskInfo->streamInfo.lastStatus = ret.offset;
ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version);
ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion);
char formatBuf[80];
tFormatOffset(formatBuf, 80, &ret.offset);
- qDebug("stream scan log return null, offset %s", formatBuf);
+ qDebug("queue scan log return null, offset %s", formatBuf);
return NULL;
} else {
ASSERT(0);
@@ -1321,11 +1463,53 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
}
qDebug("stream scan tsdb return null");
return NULL;
- } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
- // TODO scan meta
+ } else {
ASSERT(0);
return NULL;
}
+}
+
+static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
+ // NOTE: this operator does never check if current status is done or not
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamScanInfo* pInfo = pOperator->info;
+
+ qDebug("stream scan called");
+#if 0
+ SStreamState* pState = pTaskInfo->streamInfo.pState;
+ if (pState) {
+ printf(">>>>>>>> stream write backend\n");
+ SWinKey key = {
+ .ts = 1,
+ .groupId = 2,
+ };
+ char tmp[100] = "abcdefg1";
+ if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ key.ts = 2;
+ char tmp2[100] = "abcdefg2";
+ if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ key.groupId = 5;
+ key.ts = 1;
+ char tmp3[100] = "abcdefg3";
+ if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ char* val2 = NULL;
+ int32_t sz;
+ if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) {
+ ASSERT(0);
+ }
+ printf("stream read %s %d\n", val2, sz);
+ streamFreeVal(val2);
+ }
+#endif
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
@@ -1372,17 +1556,28 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo);
} break;
case STREAM_DELETE_DATA: {
- pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
- pInfo->updateResIndex = 0;
- generateScanRange(pInfo, pBlock, pInfo->pUpdateRes);
- prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
- copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
- pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
- return pInfo->pDeleteDataRes;
+ printDataBlock(pBlock, "stream scan delete recv");
+ if (!isIntervalWindow(pInfo) && !isSessionWindow(pInfo) && !isStateWindow(pInfo)) {
+ generateDeleteResultBlock(pInfo, pBlock, pInfo->pDeleteDataRes);
+ pInfo->pDeleteDataRes->info.type = STREAM_DELETE_RESULT;
+ printDataBlock(pBlock, "stream scan delete result");
+ return pInfo->pDeleteDataRes;
+ } else {
+ pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
+ pInfo->updateResIndex = 0;
+ generateScanRange(pInfo, pBlock, pInfo->pUpdateRes);
+ prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
+ copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
+ pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA;
+ pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
+ printDataBlock(pBlock, "stream scan delete data");
+ return pInfo->pDeleteDataRes;
+ }
} break;
default:
break;
}
+ // printDataBlock(pBlock, "stream scan recv");
return pBlock;
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
qDebug("scan mode %d", pInfo->scanMode);
@@ -1392,6 +1587,14 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
return pInfo->pRes;
} break;
+ case STREAM_SCAN_FROM_DELETE_DATA: {
+ generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
+ prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
+ pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
+ copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
+ pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA;
+ return pInfo->pDeleteDataRes;
+ } break;
case STREAM_SCAN_FROM_UPDATERES: {
generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
@@ -1407,6 +1610,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId, version);
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
+ // printDataBlock(pSDB, "stream scan update");
return pSDB;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
@@ -1415,7 +1619,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
break;
}
- SStreamAggSupporter* pSup = pInfo->sessionSup.pStreamAggSup;
+ SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup;
if (isStateWindow(pInfo) && pSup->pScanBlock->info.rows > 0) {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
pInfo->updateResIndex = 0;
@@ -1481,7 +1685,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
// record the scan action.
pInfo->numOfExec++;
pOperator->resultInfo.totalRows += pBlockInfo->rows;
- printDataBlock(pInfo->pRes, "stream scan");
+ // printDataBlock(pInfo->pRes, "stream scan");
if (pBlockInfo->rows == 0) {
updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
@@ -1490,30 +1694,26 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
checkUpdateData(pInfo, true, pInfo->pRes, true);
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlockInfo->window.ekey);
if (pInfo->pUpdateDataRes->info.rows > 0) {
+ pInfo->updateResIndex = 0;
if (pInfo->pUpdateDataRes->info.type == STREAM_CLEAR) {
- pInfo->updateResIndex = 0;
pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
} else if (pInfo->pUpdateDataRes->info.type == STREAM_INVERT) {
pInfo->scanMode = STREAM_SCAN_FROM_RES;
return pInfo->pUpdateDataRes;
+ } else if (pInfo->pUpdateDataRes->info.type == STREAM_DELETE_DATA) {
+ pInfo->scanMode = STREAM_SCAN_FROM_DELETE_DATA;
}
}
}
qDebug("scan rows: %d", pBlockInfo->rows);
return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes;
-
} else {
ASSERT(0);
return NULL;
}
}
-static SSDataBlock* doRawScan(SOperatorInfo* pInfo) {
- //
- return NULL;
-}
-
static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
SArray* tableIdList = taosArrayInit(4, sizeof(uint64_t));
@@ -1526,24 +1726,162 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
return tableIdList;
}
+static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
+ // NOTE: this operator does never check if current status is done or not
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta
+ pTaskInfo->streamInfo.metaRsp.metaRsp = NULL;
+
+ qDebug("tmqsnap doRawScan called");
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SSDataBlock* pBlock = &pInfo->pRes;
+
+ if (pInfo->dataReader && tsdbNextDataBlock(pInfo->dataReader)) {
+ if (isTaskKilled(pTaskInfo)) {
+ longjmp(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+ }
+
+ tsdbRetrieveDataBlockInfo(pInfo->dataReader, &pBlock->info);
+
+ SArray* pCols = tsdbRetrieveDataBlock(pInfo->dataReader, NULL);
+ pBlock->pDataBlock = pCols;
+ if (pCols == NULL) {
+ longjmp(pTaskInfo->env, terrno);
+ }
+
+ qDebug("tmqsnap doRawScan get data uid:%ld", pBlock->info.uid);
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.uid;
+ pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
+ return pBlock;
+ }
+
+ SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
+ if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
+ qDebug("tmqsnap read snapshot done, change to get data from wal");
+ pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ pTaskInfo->streamInfo.lastStatus.version = pInfo->sContext->snapVersion;
+ } else {
+ pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
+ pTaskInfo->streamInfo.prepareStatus.ts = INT64_MIN;
+ qDebug("tmqsnap change get data uid:%ld", mtInfo.uid);
+ qStreamPrepareScan(pTaskInfo, &pTaskInfo->streamInfo.prepareStatus, pInfo->sContext->subType);
+ }
+ qDebug("tmqsnap stream scan tsdb return null");
+ return NULL;
+ } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
+ SSnapContext* sContext = pInfo->sContext;
+ void* data = NULL;
+ int32_t dataLen = 0;
+ int16_t type = 0;
+ int64_t uid = 0;
+ if (getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0) {
+ qError("tmqsnap getMetafromSnapShot error");
+ taosMemoryFreeClear(data);
+ return NULL;
+ }
+
+ if (!sContext->queryMetaOrData) { // change to get data next poll request
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
+ pTaskInfo->streamInfo.lastStatus.uid = uid;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.uid = 0;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.ts = INT64_MIN;
+ } else {
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
+ pTaskInfo->streamInfo.lastStatus.uid = uid;
+ pTaskInfo->streamInfo.metaRsp.rspOffset = pTaskInfo->streamInfo.lastStatus;
+ pTaskInfo->streamInfo.metaRsp.resMsgType = type;
+ pTaskInfo->streamInfo.metaRsp.metaRspLen = dataLen;
+ pTaskInfo->streamInfo.metaRsp.metaRsp = data;
+ }
+
+ return NULL;
+ }
+ // else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
+ // int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1;
+ //
+ // while(1){
+ // if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) {
+ // qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer);
+ // pTaskInfo->streamInfo.lastStatus.version = fetchVer;
+ // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ // return NULL;
+ // }
+ // SWalCont* pHead = &pInfo->pCkHead->head;
+ // qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType);
+ //
+ // if (pHead->msgType == TDMT_VND_SUBMIT) {
+ // SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
+ // tqReaderSetDataMsg(pInfo->tqReader, pCont, 0);
+ // SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid,
+ // &pInfo->pRes); if(block){
+ // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ // pTaskInfo->streamInfo.lastStatus.version = fetchVer;
+ // qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
+ // return block;
+ // }else{
+ // fetchVer++;
+ // }
+ // } else{
+ // ASSERT(pInfo->sContext->withMeta);
+ // ASSERT(IS_META_MSG(pHead->msgType));
+ // qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
+ // pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer;
+ // pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG;
+ // pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType;
+ // pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen;
+ // pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen);
+ // memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen);
+ // return NULL;
+ // }
+ // }
+ return NULL;
+}
+
+static void destroyRawScanOperatorInfo(void* param) {
+ SStreamRawScanInfo* pRawScan = (SStreamRawScanInfo*)param;
+ tsdbReaderClose(pRawScan->dataReader);
+ destroySnapContext(pRawScan->sContext);
+ taosMemoryFree(pRawScan);
+}
+
// for subscribing db or stb (not including column),
// if this scan is used, meta data can be return
// and schemas are decided when scanning
-SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode,
- SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup) {
+SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo) {
// create operator
// create tb reader
// create meta reader
// create tq reader
- return NULL;
+ SStreamRawScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamRawScanInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ pInfo->vnode = pHandle->vnode;
+
+ pInfo->sContext = pHandle->sContext;
+ pOperator->name = "RawStreamScanOperator";
+ // pOperator->blocking = false;
+ // pOperator->status = OP_NOT_OPENED;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+
+ pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo, NULL, NULL, NULL);
+ return pOperator;
}
-static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyStreamScanOperatorInfo(void* param) {
SStreamScanInfo* pStreamScan = (SStreamScanInfo*)param;
if (pStreamScan->pTableScanOp && pStreamScan->pTableScanOp->info) {
STableScanInfo* pTableScanInfo = pStreamScan->pTableScanOp->info;
- destroyTableScanOperatorInfo(pTableScanInfo, numOfOutput);
+ destroyTableScanOperatorInfo(pTableScanInfo);
taosMemoryFreeClear(pStreamScan->pTableScanOp);
}
if (pStreamScan->tqReader) {
@@ -1554,7 +1892,7 @@ static void destroyStreamScanOperatorInfo(void* param, int32_t numOfOutput) {
}
if (pStreamScan->pPseudoExpr) {
destroyExprInfo(pStreamScan->pPseudoExpr, pStreamScan->numOfPseudoExpr);
- taosMemoryFreeClear(pStreamScan->pPseudoExpr);
+ taosMemoryFree(pStreamScan->pPseudoExpr);
}
updateInfoDestroy(pStreamScan->pUpdateInfo);
@@ -1634,17 +1972,13 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->tqReader = pHandle->tqReader;
}
- if (pTSInfo->pdInfo.interval.interval > 0) {
- pInfo->pUpdateInfo = updateInfoInitP(&pTSInfo->pdInfo.interval, pInfo->twAggSup.waterMark);
- } else {
- pInfo->pUpdateInfo = NULL;
- }
-
+ pInfo->pUpdateInfo = NULL;
pInfo->pTableScanOp = pTableScanOp;
pInfo->interval = pTSInfo->pdInfo.interval;
pInfo->readHandle = *pHandle;
pInfo->tableUid = pScanPhyNode->uid;
+ pTaskInfo->streamInfo.snapshotVer = pHandle->version;
// set the extract column id to streamHandle
tqReaderSetColIdList(pInfo->tqReader, pColIds);
@@ -1669,8 +2003,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR);
pInfo->pCondition = pScanPhyNode->node.pConditions;
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
- pInfo->sessionSup =
- (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
+ pInfo->windowSup = (SWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
pInfo->groupId = 0;
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
pInfo->pStreamScanOp = pOperator;
@@ -1679,6 +2012,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
pInfo->pUpdateDataRes = createSpecialDataBlock(STREAM_CLEAR);
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
+ pInfo->partitionSup.needCalc = false;
pOperator->name = "StreamScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
@@ -1688,8 +2022,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
- pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamScan, NULL, NULL, destroyStreamScanOperatorInfo,
- NULL, NULL, NULL);
+ __optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan;
+ pOperator->fpSet =
+ createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL, NULL, NULL);
return pOperator;
@@ -1699,7 +2034,7 @@ _error:
return NULL;
}
-static void destroySysScanOperator(void* param, int32_t numOfOutput) {
+static void destroySysScanOperator(void* param) {
SSysTableScanInfo* pInfo = (SSysTableScanInfo*)param;
tsem_destroy(&pInfo->ready);
blockDataDestroy(pInfo->pRes);
@@ -1957,7 +2292,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
metaReaderClear(&smr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
char stableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
@@ -2160,7 +2495,7 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
metaReaderClear(&mr);
metaCloseTbCursor(pInfo->pCur);
pInfo->pCur = NULL;
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
// number of columns
@@ -2369,7 +2704,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
}
char* pStart = pRsp->data;
- extractDataBlockFromFetchRsp(pInfo->pRes, pRsp->data, pOperator->exprSupp.numOfExprs, pInfo->scanCols, &pStart);
+ extractDataBlockFromFetchRsp(pInfo->pRes, pRsp->data, pInfo->scanCols, &pStart);
updateLoadRemoteInfo(&pInfo->loadInfo, pRsp->numOfRows, pRsp->compLen, startTs, pOperator);
// todo log the filter info
@@ -2392,10 +2727,10 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) {
const SSysTableMeta* pSysDbTableMeta = NULL;
getInfosDbMeta(&pSysDbTableMeta, &size);
- p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB);
+ p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_INFORMATION_SCHEMA_DB);
getPerfDbMeta(&pSysDbTableMeta, &size);
- p->info.rows = buildDbTableInfoBlock(p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB);
+ p->info.rows = buildDbTableInfoBlock(pInfo->sysInfo, p, pSysDbTableMeta, size, TSDB_PERFORMANCE_SCHEMA_DB);
pInfo->pRes->info.rows = p->info.rows;
relocateColumnData(pInfo->pRes, pInfo->scanCols, p->pDataBlock, false);
@@ -2404,13 +2739,16 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) {
return pInfo->pRes->info.rows;
}
-int32_t buildDbTableInfoBlock(const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size,
+int32_t buildDbTableInfoBlock(bool sysInfo, const SSDataBlock* p, const SSysTableMeta* pSysDbTableMeta, size_t size,
const char* dbName) {
char n[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t numOfRows = p->info.rows;
for (int32_t i = 0; i < size; ++i) {
const SSysTableMeta* pm = &pSysDbTableMeta[i];
+ if (!sysInfo && pm->sysInfo) {
+ continue;
+ }
SColumnInfoData* pColInfoData = taosArrayGet(p->pDataBlock, 0);
@@ -2464,6 +2802,7 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
pInfo->accountId = pScanPhyNode->accountId;
pInfo->pUser = taosMemoryStrDup((void*)pUser);
+ pInfo->sysInfo = pScanPhyNode->sysInfo;
pInfo->showRewrite = pScanPhyNode->showRewrite;
pInfo->pRes = pResBlock;
pInfo->pCondition = pScanNode->node.pConditions;
@@ -2534,7 +2873,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", item->uid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
metaReaderClear(&mr);
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
@@ -2584,12 +2923,10 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
return (pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
-static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyTagScanOperatorInfo(void* param) {
STagScanInfo* pInfo = (STagScanInfo*)param;
pInfo->pRes = blockDataDestroy(pInfo->pRes);
-
taosArrayDestroy(pInfo->pColMatchInfo);
-
taosMemoryFreeClear(param);
}
@@ -2785,7 +3122,7 @@ static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeSc
int32_t code = addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pseudoSup.pExprInfo,
pTableScanInfo->pseudoSup.numOfExprs, pBlock, GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
@@ -2828,7 +3165,7 @@ static SSDataBlock* getTableDataBlock(void* param) {
STsdbReader* reader = taosArrayGetP(pTableScanInfo->dataReaders, readerIdx);
while (tsdbNextDataBlock(reader)) {
if (isTaskKilled(pOperator->pTaskInfo)) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+ T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
}
// process this data block based on the probabilities
@@ -2851,7 +3188,7 @@ static SSDataBlock* getTableDataBlock(void* param) {
int32_t code = loadDataBlockFromOneTable(pOperator, pTableScanInfo, readerIdx, pBlock, &status);
// int32_t code = loadDataBlockOnDemand(pOperator->pRuntimeEnv, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pOperator->pTaskInfo->env, code);
+ T_LONG_JMP(pOperator->pTaskInfo->env, code);
}
// current block is filter out according to filter condition, continue load the next block
@@ -2944,7 +3281,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
int32_t code = tsortOpen(pInfo->pSortHandle);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
return TSDB_CODE_SUCCESS;
@@ -3014,7 +3351,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
int32_t code = pOperator->fpSet._openFn(pOperator);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList);
if (!pInfo->hasGroupId) {
@@ -3052,7 +3389,7 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
return pBlock;
}
-void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyTableMergeScanOperatorInfo(void* param) {
STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param;
cleanupQueryTableDataCond(&pTableScanInfo->cond);
taosArrayDestroy(pTableScanInfo->sortSourceParams);
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index 4dd5e4ec15e9521b6c2cdc39562313592242773c..e2014ec97320c863a6857e94c538bd8d8319c2a1 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -20,7 +20,7 @@ static SSDataBlock* doSort(SOperatorInfo* pOperator);
static int32_t doOpenSortOperator(SOperatorInfo* pOperator);
static int32_t getExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, uint32_t* len);
-static void destroyOrderOperatorInfo(void* param, int32_t numOfOutput);
+static void destroyOrderOperatorInfo(void* param);
// todo add limit/offset impl
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo) {
@@ -156,7 +156,7 @@ void applyScalarFunction(SSDataBlock* pBlock, void* param) {
int32_t code = projectApplyFunctions(pOperator->exprSupp.pExprInfo, pBlock, pBlock, pOperator->exprSupp.pCtx,
pOperator->exprSupp.numOfExprs, NULL);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pOperator->pTaskInfo->env, code);
+ T_LONG_JMP(pOperator->pTaskInfo->env, code);
}
}
}
@@ -184,7 +184,7 @@ int32_t doOpenSortOperator(SOperatorInfo* pOperator) {
taosMemoryFreeClear(ps);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs) / 1000.0;
@@ -204,7 +204,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
int32_t code = pOperator->fpSet._openFn(pOperator);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
SSDataBlock* pBlock = NULL;
@@ -250,7 +250,7 @@ SSDataBlock* doSort(SOperatorInfo* pOperator) {
return blockDataGetNumOfRows(pBlock) > 0 ? pBlock : NULL;
}
-void destroyOrderOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyOrderOperatorInfo(void* param) {
SSortOperatorInfo* pInfo = (SSortOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
@@ -388,7 +388,7 @@ int32_t beginSortGroup(SOperatorInfo* pOperator) {
taosMemoryFreeClear(ps);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
return TSDB_CODE_SUCCESS;
@@ -420,7 +420,7 @@ SSDataBlock* doGroupSort(SOperatorInfo* pOperator) {
int32_t code = pOperator->fpSet._openFn(pOperator);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
if (!pInfo->hasGroupId) {
@@ -468,7 +468,7 @@ int32_t getGroupSortExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, u
return TSDB_CODE_SUCCESS;
}
-void destroyGroupSortOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyGroupSortOperatorInfo(void* param) {
SGroupSortOperatorInfo* pInfo = (SGroupSortOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
@@ -575,7 +575,7 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) {
int32_t code = tsortOpen(pInfo->pSortHandle);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, terrno);
+ T_LONG_JMP(pTaskInfo->env, terrno);
}
pOperator->cost.openCost = (taosGetTimestampUs() - pInfo->startTs) / 1000.0;
@@ -672,7 +672,7 @@ SSDataBlock* doMultiwayMerge(SOperatorInfo* pOperator) {
int32_t code = pOperator->fpSet._openFn(pOperator);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
SSDataBlock* pBlock = getMultiwaySortedBlockData(pInfo->pSortHandle, pInfo->binfo.pRes,
@@ -685,7 +685,7 @@ SSDataBlock* doMultiwayMerge(SOperatorInfo* pOperator) {
return pBlock;
}
-void destroyMultiwayMergeOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyMultiwayMergeOperatorInfo(void* param) {
SMultiwayMergeOperatorInfo* pInfo = (SMultiwayMergeOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
pInfo->pInputBlock = blockDataDestroy(pInfo->pInputBlock);
diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c
index 59dd58070d1b96fa5003c0162957d6e8adedb061..f23552c5a7b82207ffc368dbae7c1894cb6a8edd 100644
--- a/source/libs/executor/src/tfill.c
+++ b/source/libs/executor/src/tfill.c
@@ -36,6 +36,7 @@
#define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId)
static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey);
+static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex);
static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowIndex) {
for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
@@ -43,9 +44,8 @@ static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowInd
int32_t dstSlotId = GET_DEST_SLOT_ID(pCol);
SColumnInfoData* pDstColInfo = taosArrayGet(pBlock->pDataBlock, dstSlotId);
if (pCol->notFillCol) {
- if (pDstColInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDstColInfo, rowIndex, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfo, rowIndex);
+ if (!filled) {
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfo, rowIndex, pKey);
@@ -76,6 +76,35 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32
}
}
+//fill windows pseudo column, _wstart, _wend, _wduration and return true, otherwise return false
+static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex) {
+ if (!pCol->notFillCol) {
+ return false;
+ }
+ if (pCol->pExpr->pExpr->nodeType == QUERY_NODE_COLUMN) {
+ if (pCol->pExpr->base.numOfParams != 1) {
+ return false;
+ }
+ if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_START) {
+ colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->currentKey, false);
+ return true;
+ } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_END) {
+ //TODO: include endpoint
+ SInterval* pInterval = &pFillInfo->interval;
+ int32_t step = (pFillInfo->order == TSDB_ORDER_ASC) ? 1 : -1;
+ int64_t windowEnd =
+ taosTimeAdd(pFillInfo->currentKey, pInterval->sliding * step, pInterval->slidingUnit, pInterval->precision);
+ colDataAppend(pDstColInfoData, rowIndex, (const char*)&windowEnd, false);
+ return true;
+ } else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_DURATION) {
+ //TODO: include endpoint
+ colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->interval.sliding, false);
+ return true;
+ }
+ }
+ return false;
+}
+
static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock* pSrcBlock, int64_t ts,
bool outOfBound) {
SPoint point1, point2, point;
@@ -92,10 +121,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol));
-
- if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index);
+ if (!filled) {
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfoData, index, pKey);
}
@@ -106,10 +133,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol));
-
- if (pDstColInfoData->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDstColInfoData, index, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index);
+ if (!filled) {
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfoData, index, pKey);
}
@@ -127,9 +152,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
int16_t type = pDstCol->info.type;
if (pCol->notFillCol) {
- if (type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDstCol, index, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstCol, index);
+ if (!filled) {
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstCol, index, pKey);
@@ -170,9 +194,8 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, slotId);
if (pCol->notFillCol) {
- if (pDst->info.type == TSDB_DATA_TYPE_TIMESTAMP) {
- colDataAppend(pDst, index, (const char*)&pFillInfo->currentKey, false);
- } else {
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDst, index);
+ if (!filled) {
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDst, index, pKey);
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index e9298487e760bee4cf28f47ab11a104b82cba509..a19b6f817936b8a360298b5b7de627b1de24f5f8 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -15,6 +15,7 @@
#include "executorimpl.h"
#include "function.h"
#include "functionMgt.h"
+#include "tcommon.h"
#include "tcompare.h"
#include "tdatablock.h"
#include "tfill.h"
@@ -27,21 +28,22 @@ typedef enum SResultTsInterpType {
#define IS_FINAL_OP(op) ((op)->isFinal)
-typedef struct SWinRes {
- TSKEY ts;
- uint64_t groupId;
-} SWinRes;
-
typedef struct SPullWindowInfo {
STimeWindow window;
uint64_t groupId;
} SPullWindowInfo;
+typedef struct SOpenWindowInfo {
+ SResultRowPosition pos;
+ uint64_t groupId;
+} SOpenWindowInfo;
+
static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator);
static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo);
-static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult);
+static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult,
+ uint64_t groupId);
static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult);
///*
@@ -600,16 +602,16 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
SExprSupp* pSup = &pOperatorInfo->exprSupp;
- int32_t startPos = 0;
- int32_t numOfOutput = pSup->numOfExprs;
- uint64_t groupId = pBlock->info.groupId;
+ int32_t startPos = 0;
+ int32_t numOfOutput = pSup->numOfExprs;
SResultRow* pResult = NULL;
while (1) {
- SListNode* pn = tdListGetHead(pResultRowInfo->openWindow);
-
- SResultRowPosition* p1 = (SResultRowPosition*)pn->data;
+ SListNode* pn = tdListGetHead(pResultRowInfo->openWindow);
+ SOpenWindowInfo* pOpenWin = (SOpenWindowInfo*)pn->data;
+ uint64_t groupId = pOpenWin->groupId;
+ SResultRowPosition* p1 = &pOpenWin->pos;
if (p->pageId == p1->pageId && p->offset == p1->offset) {
break;
}
@@ -620,7 +622,8 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
if (pr->closed) {
ASSERT(isResultRowInterpolated(pr, RESULT_ROW_START_INTERP) &&
isResultRowInterpolated(pr, RESULT_ROW_END_INTERP));
- tdListPopHead(pResultRowInfo->openWindow);
+ SListNode* pNode = tdListPopHead(pResultRowInfo->openWindow);
+ taosMemoryFree(pNode);
continue;
}
@@ -628,24 +631,29 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &w, (scanFlag == MAIN_SCAN), &pResult, groupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
ASSERT(!isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP));
SGroupKeys* pTsKey = taosArrayGet(pInfo->pPrevValues, 0);
int64_t prevTs = *(int64_t*)pTsKey->pData;
- doTimeWindowInterpolation(pInfo->pPrevValues, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, w.ekey,
- RESULT_ROW_END_INTERP, pSup);
+ if (groupId == pBlock->info.groupId) {
+ doTimeWindowInterpolation(pInfo->pPrevValues, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, w.ekey,
+ RESULT_ROW_END_INTERP, pSup);
+ }
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows, numOfExprs);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &w, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows,
+ numOfExprs);
if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) {
closeResultRow(pr);
- tdListPopHead(pResultRowInfo->openWindow);
+ SListNode* pNode = tdListPopHead(pResultRowInfo->openWindow);
+ taosMemoryFree(pNode);
} else { // the remains are can not be closed yet.
break;
}
@@ -812,7 +820,7 @@ static int32_t savePullWindow(SPullWindowInfo* pPullInfo, SArray* pPullWins) {
int32_t compareResKey(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
SResKeyPos* pos = taosArrayGetP(res, index);
- SWinRes* pData = (SWinRes*)pKey;
+ SWinKey* pData = (SWinKey*)pKey;
if (pData->ts == *(int64_t*)pos->key) {
if (pData->groupId > pos->groupId) {
return 1;
@@ -828,7 +836,7 @@ int32_t compareResKey(void* pKey, void* data, int32_t index) {
static int32_t saveResult(int64_t ts, int32_t pageId, int32_t offset, uint64_t groupId, SArray* pUpdated) {
int32_t size = taosArrayGetSize(pUpdated);
- SWinRes data = {.ts = ts, .groupId = groupId};
+ SWinKey data = {.ts = ts, .groupId = groupId};
int32_t index = binarySearchCom(pUpdated, size, &data, TSDB_ORDER_DESC, compareResKey);
if (index == -1) {
index = 0;
@@ -861,8 +869,8 @@ static int32_t saveWinResult(int64_t ts, int32_t pageId, int32_t offset, uint64_
newPos->groupId = groupId;
newPos->pos = (SResultRowPosition){.pageId = pageId, .offset = offset};
*(int64_t*)newPos->key = ts;
- SWinRes key = {.ts = ts, .groupId = groupId};
- if (taosHashPut(pUpdatedMap, &key, sizeof(SWinRes), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) {
+ SWinKey key = {.ts = ts, .groupId = groupId};
+ if (taosHashPut(pUpdatedMap, &key, sizeof(SWinKey), &newPos, sizeof(void*)) != TSDB_CODE_SUCCESS) {
taosMemoryFree(newPos);
}
return TSDB_CODE_SUCCESS;
@@ -879,20 +887,20 @@ static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpda
static void removeResults(SArray* pWins, SHashObj* pUpdatedMap) {
int32_t size = taosArrayGetSize(pWins);
for (int32_t i = 0; i < size; i++) {
- SWinRes* pW = taosArrayGet(pWins, i);
- taosHashRemove(pUpdatedMap, pW, sizeof(SWinRes));
+ SWinKey* pW = taosArrayGet(pWins, i);
+ taosHashRemove(pUpdatedMap, pW, sizeof(SWinKey));
}
}
int64_t getWinReskey(void* data, int32_t index) {
SArray* res = (SArray*)data;
- SWinRes* pos = taosArrayGet(res, index);
+ SWinKey* pos = taosArrayGet(res, index);
return pos->ts;
}
int32_t compareWinRes(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
- SWinRes* pos = taosArrayGetP(res, index);
+ SWinKey* pos = taosArrayGet(res, index);
SResKeyPos* pData = (SResKeyPos*)pKey;
if (*(int64_t*)pData->key == pos->ts) {
if (pData->groupId > pos->groupId) {
@@ -914,10 +922,11 @@ static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) {
}
void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
- SResKeyPos* pResKey = (SResKeyPos*)pIte;
+ SResKeyPos* pResKey = *(SResKeyPos**)pIte;
int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes);
if (index >= 0 && 0 == compareWinRes(pResKey, pDelWins, index)) {
taosArrayRemove(pDelWins, index);
+ delSize = taosArrayGetSize(pDelWins);
}
}
}
@@ -930,7 +939,7 @@ bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup) {
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup) { return isOverdue(pWin->ekey, pSup); }
static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
- int32_t scanFlag, SHashObj* pUpdatedMap) {
+ int32_t scanFlag) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
@@ -946,21 +955,11 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
STimeWindow win =
getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->inputOrder);
- int32_t ret = TSDB_CODE_SUCCESS;
- if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
- inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
- ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
- numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
- if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
- saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
- setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
- }
+ int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
+ pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
+ if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
-
TSKEY ekey = ascScan ? win.ekey : win.skey;
int32_t forwardRows =
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
@@ -968,26 +967,23 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
// prev time window not interpolation yet.
if (pInfo->timeWindowInterpo) {
- SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult);
+ SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos);
// restore current time window
ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
// window start key interpolation
doWindowBorderInterpolation(pInfo, pBlock, pResult, &win, startPos, forwardRows, pSup);
}
- if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
- inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
- updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
- pBlock->info.rows, numOfOutput);
- }
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
doCloseWindow(pResultRowInfo, pInfo, pResult);
@@ -998,35 +994,31 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
if (startPos < 0) {
break;
}
- if (pInfo->ignoreExpiredData && isCloseWindow(&nextWin, &pInfo->twAggSup)) {
- ekey = ascScan ? nextWin.ekey : nextWin.skey;
- forwardRows =
- getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
- continue;
- }
-
// null data, failed to allocate more memory buffer
int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
- saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
- setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
ekey = ascScan ? nextWin.ekey : nextWin.skey;
forwardRows =
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
-
// window start(end) key interpolation
doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup);
-
+ // TODO: add to open window? how to close the open windows after input blocks exhausted?
+#if 0
+ if ((ascScan && ekey <= pBlock->info.window.ekey) ||
+ (!ascScan && ekey >= pBlock->info.window.skey)) {
+ // window start(end) key interpolation
+ doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup);
+ } else if (pInfo->timeWindowInterpo) {
+ addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
+ }
+#endif
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
- pBlock->info.rows, numOfOutput);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
doCloseWindow(pResultRowInfo, pInfo, pResult);
}
@@ -1043,20 +1035,23 @@ void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInf
}
}
-SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult) {
- SResultRowPosition pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
- SListNode* pn = tdListGetTail(pResultRowInfo->openWindow);
+SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult, uint64_t groupId) {
+ SOpenWindowInfo openWin = {0};
+ openWin.pos.pageId = pResult->pageId;
+ openWin.pos.offset = pResult->offset;
+ openWin.groupId = groupId;
+ SListNode* pn = tdListGetTail(pResultRowInfo->openWindow);
if (pn == NULL) {
- tdListAppend(pResultRowInfo->openWindow, &pos);
- return pos;
+ tdListAppend(pResultRowInfo->openWindow, &openWin);
+ return openWin.pos;
}
- SResultRowPosition* px = (SResultRowPosition*)pn->data;
- if (px->pageId != pos.pageId || px->offset != pos.offset) {
- tdListAppend(pResultRowInfo->openWindow, &pos);
+ SOpenWindowInfo* px = (SOpenWindowInfo*)pn->data;
+ if (px->pos.pageId != openWin.pos.pageId || px->pos.offset != openWin.pos.offset || px->groupId != openWin.groupId) {
+ tdListAppend(pResultRowInfo->openWindow, &openWin);
}
- return pos;
+ return openWin.pos;
}
int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo) {
@@ -1110,7 +1105,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->inputOrder, scanFlag, true);
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
- hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
+ hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag);
}
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->resultTsOrder);
@@ -1185,7 +1180,7 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &window, masterScan, &pResult, gid, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &window, false);
@@ -1210,12 +1205,12 @@ static void doStateWindowAggImpl(SOperatorInfo* pOperator, SStateWindowOperatorI
int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &pRowSup->win, masterScan, &pResult, gid,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
- pRowSup->numOfRows, pBlock->info.rows, numOfOutput);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows,
+ pBlock->info.rows, numOfOutput);
}
static SSDataBlock* doStateWindowAgg(SOperatorInfo* pOperator) {
@@ -1383,7 +1378,7 @@ bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t
int32_t numOfOutput) {
SET_RES_WINDOW_KEY(pAggSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
if (!p1) {
// window has been closed
return false;
@@ -1396,31 +1391,40 @@ bool doDeleteIntervalWindow(SAggSupporter* pAggSup, TSKEY ts, uint64_t groupId)
size_t bytes = sizeof(TSKEY);
SET_RES_WINDOW_KEY(pAggSup->keyBuf, &ts, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
if (!p1) {
// window has been closed
return false;
}
// SFilePage* bufPage = getBufPage(pAggSup->pResultBuf, p1->pageId);
// dBufSetBufPageRecycled(pAggSup->pResultBuf, bufPage);
- taosHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ tSimpleHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
return true;
}
-void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* pUpWins, SInterval* pInterval) {
+void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* pDelWins, SInterval* pInterval,
+ SHashObj* pUpdatedMap) {
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* tsStarts = (TSKEY*)pStartCol->pData;
+ SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ TSKEY* tsEnds = (TSKEY*)pEndCol->pData;
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
uint64_t* groupIds = (uint64_t*)pGroupCol->pData;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsStarts[i], pInterval, TSDB_ORDER_ASC);
- doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]);
- if (pUpWins) {
- SWinRes winRes = {.ts = win.skey, .groupId = groupIds[i]};
- taosArrayPush(pUpWins, &winRes);
- }
+ do {
+ doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]);
+ SWinKey winRes = {.ts = win.skey, .groupId = groupIds[i]};
+ if (pDelWins) {
+ taosArrayPush(pDelWins, &winRes);
+ }
+ if (pUpdatedMap) {
+ taosHashRemove(pUpdatedMap, &winRes, sizeof(SWinKey));
+ }
+ getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
+ } while (win.skey <= tsEnds[i]);
}
}
@@ -1430,22 +1434,17 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
TSKEY* startTsCols = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endTsCols = (TSKEY*)pEndTsCol->pData;
- uint64_t* pGpDatas = NULL;
- if (pBlock->info.type == STREAM_RETRIEVE) {
- SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
- pGpDatas = (uint64_t*)pGpCol->pData;
- }
- int32_t step = 0;
- int32_t startPos = 0;
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* pGpDatas = (uint64_t*)pGpCol->pData;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCols[i], pInterval, TSDB_ORDER_ASC);
while (win.ekey <= endTsCols[i]) {
- uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
+ uint64_t winGpId = pGpDatas[i];
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
if (pUpWins && res) {
- SWinRes winRes = {.ts = win.skey, .groupId = winGpId};
+ SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
taosArrayPush(pUpWins, &winRes);
}
getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
@@ -1453,11 +1452,12 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
}
}
-static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) {
- void* pIte = NULL;
- size_t keyLen = 0;
- while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
- void* key = taosHashGetKey(pIte, &keyLen);
+static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SHashObj* resWins) {
+ void* pIte = NULL;
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pIte, &keyLen);
uint64_t groupId = *(uint64_t*)key;
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
@@ -1470,25 +1470,26 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) {
return TSDB_CODE_SUCCESS;
}
-static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
+static int32_t closeIntervalWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
SHashObj* pPullDataMap, SHashObj* closeWins, SArray* pRecyPages,
SDiskbasedBuf* pDiscBuf) {
qDebug("===stream===close interval window");
- void* pIte = NULL;
- size_t keyLen = 0;
- while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
- void* key = taosHashGetKey(pIte, &keyLen);
+ void* pIte = NULL;
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pIte, &keyLen);
uint64_t groupId = *(uint64_t*)key;
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
STimeWindow win;
win.skey = ts;
win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
- SWinRes winRe = {
+ SWinKey winRe = {
.ts = win.skey,
.groupId = groupId,
};
- void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinRes));
+ void* chIds = taosHashGet(pPullDataMap, &winRe, sizeof(SWinKey));
if (isCloseWindow(&win, pSup)) {
if (chIds && pPullDataMap) {
SArray* chAy = *(SArray**)chIds;
@@ -1515,7 +1516,7 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
}
char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))];
SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId);
- taosHashRemove(pHashMap, keyBuf, keyLen);
+ tSimpleHashIterateRemove(pHashMap, keyBuf, keyLen, &pIte, &iter);
}
}
return TSDB_CODE_SUCCESS;
@@ -1552,119 +1553,15 @@ static void doBuildDeleteResult(SArray* pWins, int32_t* index, SSDataBlock* pBlo
return;
}
blockDataEnsureCapacity(pBlock, size - *index);
- SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
- SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t uid = 0;
for (int32_t i = *index; i < size; i++) {
- SWinRes* pWin = taosArrayGet(pWins, i);
- colDataAppend(pTsCol, pBlock->info.rows, (const char*)&pWin->ts, false);
- colDataAppend(pGroupCol, pBlock->info.rows, (const char*)&pWin->groupId, false);
- pBlock->info.rows++;
+ SWinKey* pWin = taosArrayGet(pWins, i);
+ appendOneRow(pBlock, &pWin->ts, &pWin->ts, &uid, &pWin->groupId);
(*index)++;
}
}
-static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
- SIntervalAggOperatorInfo* pInfo = pOperator->info;
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
-
- pInfo->inputOrder = TSDB_ORDER_ASC;
- SExprSupp* pSup = &pOperator->exprSupp;
-
- if (pOperator->status == OP_EXEC_DONE) {
- return NULL;
- }
-
- if (pOperator->status == OP_RES_TO_RETURN) {
- doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
- if (pInfo->pDelRes->info.rows > 0) {
- return pInfo->pDelRes;
- }
-
- doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
- if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainResults(&pInfo->groupResInfo)) {
- pOperator->status = OP_EXEC_DONE;
- qDebug("===stream===single interval is done");
- freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
- }
- printDataBlock(pInfo->binfo.pRes, "single interval");
- return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
- }
-
- SOperatorInfo* downstream = pOperator->pDownstream[0];
-
- SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
- SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
- while (1) {
- SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- if (pBlock == NULL) {
- break;
- }
- // qInfo("===stream===%ld", pBlock->info.version);
- printDataBlock(pBlock, "single interval recv");
-
- if (pBlock->info.type == STREAM_CLEAR) {
- doClearWindows(&pInfo->aggSup, &pOperator->exprSupp, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock,
- NULL);
- qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo));
- continue;
- }
- if (pBlock->info.type == STREAM_DELETE_DATA) {
- doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval);
- continue;
- } else if (pBlock->info.type == STREAM_GET_ALL) {
- getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
- continue;
- }
-
- if (pBlock->info.type == STREAM_NORMAL && pBlock->info.version != 0) {
- // set input version
- pTaskInfo->version = pBlock->info.version;
- }
-
- if (pInfo->scalarSupp.pExprInfo != NULL) {
- SExprSupp* pExprSup = &pInfo->scalarSupp;
- projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
- }
-
- // The timewindow that overlaps the timestamps of the input pBlock need to be recalculated and return to the
- // caller. Note that all the time window are not close till now.
- // the pDataBlock are always the same one, no need to call this again
- setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->inputOrder, MAIN_SCAN, true);
- if (pInfo->invertible) {
- setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type);
- }
-
- pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
- hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap);
- }
-
- pOperator->status = OP_RES_TO_RETURN;
- closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap,
- pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
-
- void* pIte = NULL;
- while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
- taosArrayPush(pUpdated, pIte);
- }
- taosArraySort(pUpdated, resultrowComparAsc);
-
- finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
- initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
- blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
- removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
- taosHashCleanup(pUpdatedMap);
- doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
- if (pInfo->pDelRes->info.rows > 0) {
- return pInfo->pDelRes;
- }
-
- doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
- printDataBlock(pInfo->binfo.pRes, "single interval");
- return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
-}
-
-static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput) {
+static void destroyStateWindowOperatorInfo(void* param) {
SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
taosMemoryFreeClear(pInfo->stateKey.pData);
@@ -1677,10 +1574,14 @@ static void freeItem(void* param) {
taosMemoryFree(pKey->pData);
}
-void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyIntervalOperatorInfo(void* param) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
cleanupAggSup(&pInfo->aggSup);
+ cleanupExprSupp(&pInfo->scalarSupp);
+
+ tdListFree(pInfo->binfo.resultRowInfo.openWindow);
+
pInfo->pRecycledPages = taosArrayDestroy(pInfo->pRecycledPages);
pInfo->pInterpCols = taosArrayDestroy(pInfo->pInterpCols);
taosArrayDestroyEx(pInfo->pPrevValues, freeItem);
@@ -1694,7 +1595,7 @@ void destroyIntervalOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
-void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyStreamFinalIntervalOperatorInfo(void* param) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
cleanupAggSup(&pInfo->aggSup);
@@ -1711,7 +1612,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param, int32_t numOfOutput) {
int32_t size = taosArrayGetSize(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, i);
- destroyStreamFinalIntervalOperatorInfo(pChildOp->info, numOfOutput);
+ destroyStreamFinalIntervalOperatorInfo(pChildOp->info);
taosMemoryFree(pChildOp->pDownstream);
cleanupExprSupp(&pChildOp->exprSupp);
taosMemoryFreeClear(pChildOp);
@@ -1777,20 +1678,17 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt
return needed;
}
-void increaseTs(SqlFunctionCtx* pCtx) {
- if (pCtx[0].pExpr->pExpr->_function.pFunctNode->funcType == FUNCTION_TYPE_WSTART) {
- pCtx[0].increase = true;
- }
-}
-
-void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) {
+void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup, SInterval* pInterval,
+ int64_t waterMark) {
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- // Todo(liuyao) support partition by column
+ initIntervalDownStream(downstream->pDownstream[0], type, pSup, pInterval, waterMark);
return;
}
SStreamScanInfo* pScanInfo = downstream->info;
- pScanInfo->sessionSup.parentType = type;
- pScanInfo->sessionSup.pIntervalAggSup = pSup;
+ pScanInfo->windowSup.parentType = type;
+ pScanInfo->windowSup.pIntervalAggSup = pSup;
+ pScanInfo->pUpdateInfo = updateInfoInitP(pInterval, waterMark);
+ pScanInfo->interval = *pInterval;
}
void initStreamFunciton(SqlFunctionCtx* pCtx, int32_t numOfExpr) {
@@ -1836,11 +1734,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
initResultSizeInfo(&pOperator->resultInfo, 4096);
int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
initBasicInfo(&pInfo->binfo, pResBlock);
if (isStream) {
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
initStreamFunciton(pSup->pCtx, pSup->numOfExprs);
}
@@ -1851,13 +1752,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, pInfo);
if (pInfo->timeWindowInterpo) {
- pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
+ pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
if (pInfo->binfo.resultRowInfo.openWindow == NULL) {
goto _error;
}
}
+
pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t));
- pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes));
+ pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey));
pInfo->delIndex = 0;
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
@@ -1866,15 +1768,14 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
- pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL,
+ pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, NULL, NULL,
destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
if (nodeType(pPhyNode) == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL) {
- initIntervalDownStream(downstream, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, &pInfo->aggSup);
+ initIntervalDownStream(downstream, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, &pInfo->aggSup, &pInfo->interval,
+ pInfo->twAggSup.waterMark);
}
code = appendDownstream(pOperator, &downstream, 1);
@@ -1885,7 +1786,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
return pOperator;
_error:
- destroyIntervalOperatorInfo(pInfo, numOfCols);
+ destroyIntervalOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
@@ -1918,8 +1819,8 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
if (gid != pRowSup->groupId || pInfo->winSup.prevTs == INT64_MIN) {
doKeepNewWindowStartInfo(pRowSup, tsList, j, gid);
doKeepTuple(pRowSup, tsList[j], gid);
- } else if ((tsList[j] - pRowSup->prevTs >= 0) && tsList[j] - pRowSup->prevTs <= gap ||
- (pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap)) {
+ } else if (((tsList[j] - pRowSup->prevTs >= 0) && (tsList[j] - pRowSup->prevTs <= gap)) ||
+ ((pRowSup->prevTs - tsList[j] >= 0) && (pRowSup->prevTs - tsList[j] <= gap))) {
// The gap is less than the threshold, so it belongs to current session window that has been opened already.
doKeepTuple(pRowSup, tsList[j], gid);
if (j == 0 && pRowSup->startRowIndex != 0) {
@@ -1935,7 +1836,7 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &window, masterScan, &pResult, gid, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
// pInfo->numOfRows data belong to the current session window
@@ -1954,12 +1855,12 @@ static void doSessionWindowAggImpl(SOperatorInfo* pOperator, SSessionAggOperator
int32_t ret = setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &pRowSup->win, masterScan, &pResult, gid,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pRowSup->win, false);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex,
- pRowSup->numOfRows, pBlock->info.rows, numOfOutput);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, pRowSup->startRowIndex, pRowSup->numOfRows,
+ pBlock->info.rows, numOfOutput);
}
static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
@@ -2112,6 +2013,7 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
// todo set the correct primary timestamp column
// output the result
+ bool hasInterp = true;
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
@@ -2123,7 +2025,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
switch (pSliceInfo->fillType) {
case TSDB_FILL_NULL: {
colDataAppendNULL(pDst, rows);
- pResBlock->info.rows += 1;
break;
}
@@ -2143,7 +2044,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i);
colDataAppend(pDst, rows, (char*)&v, false);
}
- pResBlock->info.rows += 1;
break;
}
@@ -2157,6 +2057,7 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
// before interp range, do not fill
if (start.key == INT64_MIN || end.key == INT64_MAX) {
+ hasInterp = false;
break;
}
@@ -2168,28 +2069,27 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
taosMemoryFree(current.val);
- pResBlock->info.rows += 1;
break;
}
case TSDB_FILL_PREV: {
if (!pSliceInfo->isPrevRowSet) {
+ hasInterp = false;
break;
}
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pPrevRow, srcSlot);
colDataAppend(pDst, rows, pkey->pData, false);
- pResBlock->info.rows += 1;
break;
}
case TSDB_FILL_NEXT: {
if (!pSliceInfo->isNextRowSet) {
+ hasInterp = false;
break;
}
SGroupKeys* pkey = taosArrayGet(pSliceInfo->pNextRow, srcSlot);
colDataAppend(pDst, rows, pkey->pData, false);
- pResBlock->info.rows += 1;
break;
}
@@ -2198,6 +2098,10 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
break;
}
}
+
+ if (hasInterp) {
+ pResBlock->info.rows += 1;
+ }
}
static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
@@ -2342,7 +2246,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
int32_t code = initKeeperInfo(pSliceInfo, pBlock);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// the pDataBlock are always the same one, no need to call this again
@@ -2378,6 +2282,11 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
+ if (colDataIsNull_s(pSrc, i)) {
+ colDataAppendNULL(pDst, pResBlock->info.rows);
+ continue;
+ }
+
char* v = colDataGetData(pSrc, i);
colDataAppend(pDst, pResBlock->info.rows, v, false);
}
@@ -2570,7 +2479,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
return pResBlock->info.rows == 0 ? NULL : pResBlock;
}
-void destroyTimeSliceOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyTimeSliceOperatorInfo(void* param) {
STimeSliceOperatorInfo* pInfo = (STimeSliceOperatorInfo*)param;
pInfo->pRes = blockDataDestroy(pInfo->pRes);
@@ -2657,20 +2566,26 @@ _error:
return NULL;
}
-SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
- SSDataBlock* pResBlock, STimeWindowAggSupp* pTwAggSup, int32_t tsSlotId,
- SColumn* pStateKeyCol, SNode* pCondition, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode,
+ SExecTaskInfo* pTaskInfo) {
SStateWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStateWindowOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
- pInfo->stateCol = *pStateKeyCol;
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pStateNode->window.node.pOutputDataBlockDesc);
+ int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
+
+ SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
+
+ pInfo->stateCol = extractColumnFromColumnNode(pColNode);
pInfo->stateKey.type = pInfo->stateCol.type;
pInfo->stateKey.bytes = pInfo->stateCol.bytes;
pInfo->stateKey.pData = taosMemoryCalloc(1, pInfo->stateCol.bytes);
- pInfo->pCondition = pCondition;
+ pInfo->pCondition = pStateNode->window.node.pConditions;
if (pInfo->stateKey.pData == NULL) {
goto _error;
}
@@ -2678,12 +2593,17 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExpr, numOfCols, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&pInfo->binfo, pResBlock);
+ int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ initBasicInfo(&pInfo->binfo, pResBlock);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
- pInfo->twAggSup = *pTwAggSup;
+ pInfo->twAggSup =
+ (STimeWindowAggSupp){.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType};
+ ;
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
pInfo->tsSlotId = tsSlotId;
@@ -2691,26 +2611,33 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExpr;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStateWindowAgg, NULL, NULL,
destroyStateWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
- int32_t code = appendDownstream(pOperator, &downstream, 1);
+ code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
return pOperator;
_error:
- pTaskInfo->code = TSDB_CODE_SUCCESS;
+ destroyStateWindowOperatorInfo(pInfo);
+ taosMemoryFreeClear(pOperator);
+ pTaskInfo->code = code;
return NULL;
}
-void destroySWindowOperatorInfo(void* param, int32_t numOfOutput) {
+void destroySWindowOperatorInfo(void* param) {
SSessionAggOperatorInfo* pInfo = (SSessionAggOperatorInfo*)param;
- cleanupBasicInfo(&pInfo->binfo);
+ if (pInfo == NULL) {
+ return;
+ }
+ cleanupBasicInfo(&pInfo->binfo);
colDataDestroy(&pInfo->twAggSup.timeWindowData);
cleanupAggSup(&pInfo->aggSup);
@@ -2757,40 +2684,50 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL,
destroySWindowOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
pOperator->pTaskInfo = pTaskInfo;
-
code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
return pOperator;
_error:
- if (pInfo != NULL) {
- destroySWindowOperatorInfo(pInfo, numOfCols);
- }
-
+ destroySWindowOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
}
void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int32_t numOfOutput,
- SExecTaskInfo* pTaskInfo) {
+ SExecTaskInfo* pTaskInfo, SColumnInfoData* pTimeWindowData) {
for (int32_t k = 0; k < numOfOutput; ++k) {
if (fmIsWindowPseudoColumnFunc(pDestCtx[k].functionId)) {
- continue;
- }
- int32_t code = TSDB_CODE_SUCCESS;
- if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) {
- code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]);
+ if (!pTimeWindowData) {
+ continue;
+ }
+
+ SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pDestCtx[k]);
+ char* p = GET_ROWCELL_INTERBUF(pEntryInfo);
+ SColumnInfoData idata = {0};
+ idata.info.type = TSDB_DATA_TYPE_BIGINT;
+ idata.info.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
+ idata.pData = p;
+
+ SScalarParam out = {.columnData = &idata};
+ SScalarParam tw = {.numOfRows = 5, .columnData = pTimeWindowData};
+ pDestCtx[k].sfp.process(&tw, 1, &out);
+ pEntryInfo->numOfRes = 1;
+ } else if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) {
+ int32_t code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]);
if (code != TSDB_CODE_SUCCESS) {
qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code));
pTaskInfo->code = code;
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
}
}
@@ -2800,24 +2737,34 @@ bool hasIntervalWindow(SAggSupporter* pSup, TSKEY ts, uint64_t groupId) {
int32_t bytes = sizeof(TSKEY);
SET_RES_WINDOW_KEY(pSup->keyBuf, &ts, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
return p1 != NULL;
}
+STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) {
+ STimeWindow w = {.skey = ts, .ekey = INT64_MAX};
+ w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
+ return w;
+}
+
static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExprSupp* pSup, SArray* pWinArray,
- int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo, SArray* pUpdated) {
+ int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo,
+ SHashObj* pUpdatedMap) {
int32_t size = taosArrayGetSize(pWinArray);
if (!pInfo->pChildren) {
return;
}
for (int32_t i = 0; i < size; i++) {
- SWinRes* pWinRes = taosArrayGet(pWinArray, i);
+ SWinKey* pWinRes = taosArrayGet(pWinArray, i);
SResultRow* pCurResult = NULL;
- STimeWindow ParentWin = {.skey = pWinRes->ts, .ekey = pWinRes->ts + 1};
- setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &ParentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx,
+ STimeWindow parentWin = getFinalTimeWindow(pWinRes->ts, &pInfo->interval);
+ if (isDeletedWindow(&parentWin, pWinRes->groupId, &pInfo->aggSup) && isCloseWindow(&parentWin, &pInfo->twAggSup)) {
+ continue;
+ }
+ setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &parentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
- bool find = true;
+ int32_t num = 0;
for (int32_t j = 0; j < numOfChildren; j++) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, j);
SIntervalAggOperatorInfo* pChInfo = pChildOp->info;
@@ -2825,15 +2772,16 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr
if (!hasIntervalWindow(&pChInfo->aggSup, pWinRes->ts, pWinRes->groupId)) {
continue;
}
- find = true;
+ num++;
SResultRow* pChResult = NULL;
- setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, &ParentWin, true, &pChResult, pWinRes->groupId,
+ setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, &parentWin, true, &pChResult, pWinRes->groupId,
pChildSup->pCtx, pChildSup->numOfExprs, pChildSup->rowEntryInfoOffset, &pChInfo->aggSup,
pTaskInfo);
- compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &parentWin, true);
+ compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
}
- if (find && pUpdated) {
- saveResultRow(pCurResult, pWinRes->groupId, pUpdated);
+ if (num > 0 && pUpdatedMap) {
+ saveWinResultRow(pCurResult, pWinRes->groupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pInfo->binfo.resultRowInfo.cur);
}
}
@@ -2841,8 +2789,8 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup) {
SET_RES_WINDOW_KEY(pSup->keyBuf, &pWin->skey, sizeof(int64_t), groupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
- GET_RES_WINDOW_KEY_LEN(sizeof(int64_t)));
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
+ GET_RES_WINDOW_KEY_LEN(sizeof(int64_t)));
return p1 == NULL;
}
@@ -2854,31 +2802,24 @@ int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY*
return getNextQualifiedWindow(pInterval, pNextWin, pBlockInfo, tsCols, prevEndPos, TSDB_ORDER_ASC);
}
-void addPullWindow(SHashObj* pMap, SWinRes* pWinRes, int32_t size) {
+void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) {
SArray* childIds = taosArrayInit(8, sizeof(int32_t));
for (int32_t i = 0; i < size; i++) {
taosArrayPush(childIds, &i);
}
- taosHashPut(pMap, pWinRes, sizeof(SWinRes), &childIds, sizeof(void*));
+ taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*));
}
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
-STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) {
- STimeWindow w = {.skey = ts, .ekey = INT64_MAX};
- w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
- return w;
-}
-
-static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
- SHashObj* pUpdatedMap) {
+static void doHashIntervalAgg(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
+ SHashObj* pUpdatedMap) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info;
SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
SExprSupp* pSup = &pOperatorInfo->exprSupp;
int32_t numOfOutput = pSup->numOfExprs;
int32_t step = 1;
- bool ascScan = true;
TSKEY* tsCols = NULL;
SResultRow* pResult = NULL;
int32_t forwardRows = 0;
@@ -2887,7 +2828,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
tsCols = (int64_t*)pColDataInfo->pData;
- int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1);
+ int32_t startPos = 0;
TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols);
STimeWindow nextWin = {0};
if (IS_FINAL_OP(pInfo)) {
@@ -2906,11 +2847,11 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
}
if (IS_FINAL_OP(pInfo) && isClosed && pInfo->pChildren) {
bool ignore = true;
- SWinRes winRes = {
+ SWinKey winRes = {
.ts = nextWin.skey,
.groupId = tableGroupId,
};
- void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinRes));
+ void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
if (isDeletedWindow(&nextWin, tableGroupId, &pInfo->aggSup) && !chIds) {
SPullWindowInfo pull = {.window = nextWin, .groupId = tableGroupId};
// add pull data request
@@ -2944,7 +2885,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, true, &pResult, tableGroupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
if (IS_FINAL_OP(pInfo)) {
@@ -2970,10 +2911,10 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
}
static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) {
- taosHashClear(pInfo->aggSup.pResultRowHashTable);
+ tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
- cleanupResultRowInfo(&pInfo->binfo.resultRowInfo);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
+ pInfo->aggSup.currentPageId = -1;
}
static void clearSpecialDataBlock(SSDataBlock* pBlock) {
@@ -3039,8 +2980,8 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
uint64_t* groupIdData = (uint64_t*)pGroupCol->pData;
int32_t chId = getChildIndex(pBlock);
for (int32_t i = 0; i < pBlock->info.rows; i++) {
- SWinRes winRes = {.ts = tsData[i], .groupId = groupIdData[i]};
- void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinRes));
+ SWinKey winRes = {.ts = tsData[i], .groupId = groupIdData[i]};
+ void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinKey));
if (chIds) {
SArray* chArray = *(SArray**)chIds;
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
@@ -3049,13 +2990,38 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
taosArrayRemove(chArray, index);
if (taosArrayGetSize(chArray) == 0) {
// pull data is over
- taosHashRemove(pMap, &winRes, sizeof(SWinRes));
+ taosHashRemove(pMap, &winRes, sizeof(SWinKey));
}
}
}
}
}
+static void addRetriveWindow(SArray* wins, SStreamFinalIntervalOperatorInfo* pInfo) {
+ int32_t size = taosArrayGetSize(wins);
+ for (int32_t i = 0; i < size; i++) {
+ SWinKey* winKey = taosArrayGet(wins, i);
+ STimeWindow nextWin = getFinalTimeWindow(winKey->ts, &pInfo->interval);
+ if (isCloseWindow(&nextWin, &pInfo->twAggSup) && !pInfo->ignoreExpiredData) {
+ void* chIds = taosHashGet(pInfo->pPullDataMap, winKey, sizeof(SWinKey));
+ if (!chIds) {
+ SPullWindowInfo pull = {.window = nextWin, .groupId = winKey->groupId};
+ // add pull data request
+ savePullWindow(&pull, pInfo->pPullWins);
+ int32_t size = taosArrayGetSize(pInfo->pChildren);
+ addPullWindow(pInfo->pPullDataMap, winKey, size);
+ qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size);
+ }
+ }
+ }
+}
+
+static void clearFunctionContext(SExprSupp* pSup) {
+ for (int32_t i = 0; i < pSup->numOfExprs; i++) {
+ pSup->pCtx[i].saveHandle.currentPage = -1;
+ }
+}
+
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
@@ -3080,12 +3046,21 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return pInfo->pPullDataRes;
}
+ doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
+ if (pInfo->pDelRes->info.rows != 0) {
+ // process the rest of the data
+ printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
+ return pInfo->pDelRes;
+ }
+
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows == 0) {
pOperator->status = OP_EXEC_DONE;
if (!IS_FINAL_OP(pInfo)) {
+ clearFunctionContext(&pOperator->exprSupp);
// semi interval operator clear disk buffer
clearStreamIntervalOperator(pInfo);
+ qDebug("===stream===clear semi operator");
} else {
freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
}
@@ -3129,11 +3104,11 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
maxTs = TMAX(maxTs, pBlock->info.window.ekey);
maxTs = TMAX(maxTs, pBlock->info.watermark);
- if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA ||
- pBlock->info.type == STREAM_INVALID) {
+ ASSERT(pBlock->info.type != STREAM_INVERT);
+ if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA) {
pInfo->binfo.pRes->info.type = pBlock->info.type;
} else if (pBlock->info.type == STREAM_CLEAR) {
- SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes));
+ SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey));
doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins);
if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock);
@@ -3149,29 +3124,34 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
removeResults(pUpWins, pUpdatedMap);
copyDataBlock(pInfo->pUpdateRes, pBlock);
- // copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
pInfo->returnUpdate = true;
taosArrayDestroy(pUpWins);
break;
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
- doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval);
+ SArray* delWins = taosArrayInit(8, sizeof(SWinKey));
+ doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, delWins, &pInfo->interval, pUpdatedMap);
if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock);
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
SStreamFinalIntervalOperatorInfo* pChildInfo = pChildOp->info;
SExprSupp* pChildSup = &pChildOp->exprSupp;
- doDeleteSpecifyIntervalWindow(&pChildInfo->aggSup, pBlock, NULL, &pChildInfo->interval);
- rebuildIntervalWindow(pInfo, pSup, pInfo->pDelWins, pInfo->binfo.pRes->info.groupId,
- pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, pUpdated);
+ doDeleteSpecifyIntervalWindow(&pChildInfo->aggSup, pBlock, NULL, &pChildInfo->interval, NULL);
+ rebuildIntervalWindow(pInfo, pSup, delWins, pInfo->binfo.pRes->info.groupId, pOperator->exprSupp.numOfExprs,
+ pOperator->pTaskInfo, pUpdatedMap);
+ addRetriveWindow(delWins, pInfo);
+ taosArrayAddAll(pInfo->pDelWins, delWins);
+ taosArrayDestroy(delWins);
continue;
}
- removeResults(pInfo->pDelWins, pUpdatedMap);
+ removeResults(delWins, pUpdatedMap);
+ taosArrayAddAll(pInfo->pDelWins, delWins);
+ taosArrayDestroy(delWins);
break;
} else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) {
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
continue;
} else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) {
- SArray* pUpWins = taosArrayInit(8, sizeof(SWinRes));
+ SArray* pUpWins = taosArrayInit(8, sizeof(SWinKey));
doClearWindows(&pInfo->aggSup, pSup, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pUpWins);
removeResults(pUpWins, pUpdatedMap);
taosArrayDestroy(pUpWins);
@@ -3189,7 +3169,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
}
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, MAIN_SCAN, true);
- doHashInterval(pOperator, pBlock, pBlock->info.groupId, pUpdatedMap);
+ doHashIntervalAgg(pOperator, pBlock, pBlock->info.groupId, pUpdatedMap);
if (IS_FINAL_OP(pInfo)) {
int32_t chIndex = getChildIndex(pBlock);
int32_t size = taosArrayGetSize(pInfo->pChildren);
@@ -3197,7 +3177,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
for (int32_t i = 0; i < chIndex + 1 - size; i++) {
SOperatorInfo* pChildOp = createStreamFinalIntervalOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0);
if (!pChildOp) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SStreamFinalIntervalOperatorInfo* pTmpInfo = pChildOp->info;
pTmpInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
@@ -3207,7 +3187,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex);
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
setInputDataBlock(pChildOp, pChildOp->exprSupp.pCtx, pBlock, pChInfo->order, MAIN_SCAN, true);
- doHashInterval(pChildOp, pBlock, pBlock->info.groupId, NULL);
+ doHashIntervalAgg(pChildOp, pBlock, pBlock->info.groupId, NULL);
}
}
@@ -3239,6 +3219,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return pInfo->pPullDataRes;
}
+ // we should send result first.
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows != 0) {
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
@@ -3262,40 +3243,6 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return NULL;
}
-SSDataBlock* createSpecialDataBlock(EStreamType type) {
- SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
- pBlock->info.hasVarCol = false;
- pBlock->info.groupId = 0;
- pBlock->info.rows = 0;
- pBlock->info.type = type;
- pBlock->info.rowSize =
- sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
- pBlock->info.watermark = INT64_MIN;
-
- pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
- SColumnInfoData infoData = {0};
- infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
- infoData.info.bytes = sizeof(TSKEY);
- // window start ts
- taosArrayPush(pBlock->pDataBlock, &infoData);
- // window end ts
- taosArrayPush(pBlock->pDataBlock, &infoData);
-
- infoData.info.type = TSDB_DATA_TYPE_UBIGINT;
- infoData.info.bytes = sizeof(uint64_t);
- // uid
- taosArrayPush(pBlock->pDataBlock, &infoData);
- // group id
- taosArrayPush(pBlock->pDataBlock, &infoData);
-
- // calculate start ts
- taosArrayPush(pBlock->pDataBlock, &infoData);
- // calculate end ts
- taosArrayPush(pBlock->pDataBlock, &infoData);
-
- return pBlock;
-}
-
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
@@ -3336,15 +3283,16 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
initStreamFunciton(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs);
initBasicInfo(&pInfo->binfo, pResBlock);
ASSERT(numOfCols > 0);
- increaseTs(pOperator->exprSupp.pCtx);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
+
initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->pChildren = NULL;
if (numOfChild > 0) {
@@ -3373,6 +3321,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
// semi interval operator does not catch result
pInfo->isFinal = false;
pOperator->name = "StreamSemiIntervalOperator";
+ ASSERT(pInfo->aggSup.currentPageId == -1);
}
if (!IS_FINAL_OP(pInfo) || numOfChild == 0) {
@@ -3386,21 +3335,19 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
pInfo->delIndex = 0;
- pInfo->pDelWins = taosArrayInit(4, sizeof(SWinRes));
+ pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey));
pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t));
pOperator->operatorType = pPhyNode->type;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL) {
- initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup);
+ initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup, &pInfo->interval, pInfo->twAggSup.waterMark);
}
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@@ -3410,7 +3357,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
return pOperator;
_error:
- destroyStreamFinalIntervalOperatorInfo(pInfo, numOfCols);
+ destroyStreamFinalIntervalOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
@@ -3448,7 +3395,7 @@ void destroyStateStreamAggSupporter(SStreamAggSupporter* pSup) {
blockDataDestroy(pSup->pScanBlock);
}
-void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyStreamSessionAggOperatorInfo(void* param) {
SStreamSessionAggOperatorInfo* pInfo = (SStreamSessionAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
destroyStreamAggSupporter(&pInfo->streamAggSup);
@@ -3458,7 +3405,7 @@ void destroyStreamSessionAggOperatorInfo(void* param, int32_t numOfOutput) {
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
- destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput);
+ destroyStreamSessionAggOperatorInfo(pChInfo);
taosMemoryFreeClear(pChild);
}
}
@@ -3483,11 +3430,10 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
initBasicInfo(pBasicInfo, pResultBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = NULL;
+ pSup->pCtx[i].saveHandle.pBuf = NULL;
}
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
return TSDB_CODE_SUCCESS;
}
@@ -3498,10 +3444,18 @@ void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t num
}
void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int64_t gap, int64_t waterMark,
- uint16_t type) {
- ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
+ uint16_t type, int32_t tsColIndex) {
+ if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION) {
+ SStreamPartitionOperatorInfo* pScanInfo = downstream->info;
+ pScanInfo->tsColIndex = tsColIndex;
+ }
+
+ if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ initDownStream(downstream->pDownstream[0], pAggSup, gap, waterMark, type, tsColIndex);
+ return;
+ }
SStreamScanInfo* pScanInfo = downstream->info;
- pScanInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = pAggSup, .gap = gap, .parentType = type};
+ pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = gap, .parentType = type};
pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, waterMark);
}
@@ -3529,7 +3483,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
if (pSessionNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar);
- int32_t code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -3573,27 +3527,25 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
pInfo->isFinal = false;
pInfo->pPhyNode = pPhyNode;
pInfo->ignoreExpiredData = pSessionNode->window.igExpired;
- pInfo->returnDelete = false;
pOperator->name = "StreamSessionWindowAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doStreamSessionAgg, NULL, NULL, destroyStreamSessionAggOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
if (downstream) {
- initDownStream(downstream, &pInfo->streamAggSup, pInfo->gap, pInfo->twAggSup.waterMark, pOperator->operatorType);
+ initDownStream(downstream, &pInfo->streamAggSup, pInfo->gap, pInfo->twAggSup.waterMark, pOperator->operatorType,
+ pInfo->primaryTsIndex);
code = appendDownstream(pOperator, &downstream, 1);
}
return pOperator;
_error:
if (pInfo != NULL) {
- destroyStreamSessionAggOperatorInfo(pInfo, numOfCols);
+ destroyStreamSessionAggOperatorInfo(pInfo);
}
taosMemoryFreeClear(pOperator);
@@ -3616,13 +3568,15 @@ bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap) {
bool isInWindow(SResultWindowInfo* pWinInfo, TSKEY ts, int64_t gap) { return isInTimeWindow(&pWinInfo->win, ts, gap); }
-static SResultWindowInfo* insertNewSessionWindow(SArray* pWinInfos, TSKEY ts, int32_t index) {
- SResultWindowInfo win = {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false};
+static SResultWindowInfo* insertNewSessionWindow(SArray* pWinInfos, TSKEY startTs, TSKEY endTs, int32_t index) {
+ SResultWindowInfo win = {
+ .pos.offset = -1, .pos.pageId = -1, .win.skey = startTs, .win.ekey = endTs, .isOutput = false};
return taosArrayInsert(pWinInfos, index, &win);
}
-static SResultWindowInfo* addNewSessionWindow(SArray* pWinInfos, TSKEY ts) {
- SResultWindowInfo win = {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false};
+static SResultWindowInfo* addNewSessionWindow(SArray* pWinInfos, TSKEY startTs, TSKEY endTs) {
+ SResultWindowInfo win = {
+ .pos.offset = -1, .pos.pageId = -1, .win.skey = startTs, .win.ekey = endTs, .isOutput = false};
return taosArrayPush(pWinInfos, &win);
}
@@ -3641,7 +3595,8 @@ SArray* getWinInfos(SStreamAggSupporter* pAggSup, uint64_t groupId) {
// don't add new window
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId,
int64_t gap, int32_t* pIndex) {
- SArray* pWinInfos = getWinInfos(pAggSup, groupId);
+ STimeWindow searchWin = {.skey = startTs, .ekey = endTs};
+ SArray* pWinInfos = getWinInfos(pAggSup, groupId);
pAggSup->pCurWins = pWinInfos;
int32_t size = taosArrayGetSize(pWinInfos);
@@ -3653,7 +3608,7 @@ SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY start
SResultWindowInfo* pWin = NULL;
if (index >= 0) {
pWin = taosArrayGet(pWinInfos, index);
- if (isInWindow(pWin, startTs, gap)) {
+ if (isInWindow(pWin, startTs, gap) || isInTimeWindow(&searchWin, pWin->win.skey, gap)) {
*pIndex = index;
return pWin;
}
@@ -3661,7 +3616,7 @@ SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY start
if (index + 1 < size) {
pWin = taosArrayGet(pWinInfos, index + 1);
- if (isInWindow(pWin, startTs, gap)) {
+ if (isInWindow(pWin, startTs, gap) || isInTimeWindow(&searchWin, pWin->win.skey, gap)) {
*pIndex = index + 1;
return pWin;
} else if (endTs != INT64_MIN && isInWindow(pWin, endTs, gap)) {
@@ -3681,7 +3636,7 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
int32_t size = taosArrayGetSize(pWinInfos);
if (size == 0) {
*pIndex = 0;
- return addNewSessionWindow(pWinInfos, startTs);
+ return addNewSessionWindow(pWinInfos, startTs, endTs);
}
// find the first position which is smaller than the key
int32_t index = binarySearch(pWinInfos, size, startTs, TSDB_ORDER_DESC, getSessionWindowEndkey);
@@ -3707,10 +3662,10 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
if (index == size - 1) {
*pIndex = taosArrayGetSize(pWinInfos);
- return addNewSessionWindow(pWinInfos, startTs);
+ return addNewSessionWindow(pWinInfos, startTs, endTs);
}
*pIndex = index + 1;
- return insertNewSessionWindow(pWinInfos, startTs, index + 1);
+ return insertNewSessionWindow(pWinInfos, startTs, endTs, index + 1);
}
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t groupId,
@@ -3721,8 +3676,8 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TS
}
if (pWinInfo->win.skey > pStartTs[i]) {
if (pStDeleted && pWinInfo->isOutput) {
- SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
+ SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId};
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->isOutput = false;
}
pWinInfo->win.skey = pStartTs[i];
@@ -3742,15 +3697,14 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes
// too many time window in query
int32_t size = taosArrayGetSize(pAggSup->pCurWins);
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH && size > MAX_INTERVAL_TIME_WINDOW) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
}
if (pWinInfo->pos.pageId == -1) {
- *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize);
+ *pResult = getNewResultRow(pAggSup->pResultBuf, &pAggSup->currentPageId, pAggSup->resultRowSize);
if (*pResult == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- initResultRow(*pResult);
// add a new result set for a new group
pWinInfo->pos.pageId = (*pResult)->pageId;
@@ -3837,11 +3791,12 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex,
setWindowOutputBuf(pWinInfo, &pWinResult, pInfo->pDummyCtx, groupId, numOfOutput, pSup->rowEntryInfoOffset,
&pInfo->streamAggSup, pTaskInfo);
pCurWin->win.ekey = TMAX(pCurWin->win.ekey, pWinInfo->win.ekey);
- compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->win, true);
+ compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition));
- if (pWinInfo->isOutput) {
- SWinRes res = {.ts = pWinInfo->win.skey, .groupId = groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
+ if (pWinInfo->isOutput && pStDeleted) {
+ SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId};
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->isOutput = false;
}
taosArrayRemove(pInfo->streamAggSup.pCurWins, i);
@@ -3894,7 +3849,7 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
pStDeleted);
code = doOneWindowAgg(pInfo, pSDataBlock, pCurWin, &pResult, i, winRows, numOfOutput, pOperator);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
int32_t winNum = getNumCompactWindow(pAggSup->pCurWins, winIndex, gap);
@@ -3903,10 +3858,10 @@ static void doStreamSessionAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSData
}
pCurWin->isClosed = false;
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pStUpdated) {
- SWinRes value = {.ts = pCurWin->win.skey, .groupId = groupId};
- code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes));
+ SWinKey value = {.ts = pCurWin->win.skey, .groupId = groupId};
+ code = taosHashPut(pStUpdated, &pCurWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
pCurWin->isOutput = true;
}
@@ -3932,18 +3887,24 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
uint64_t* gpDatas = (uint64_t*)pGroupCol->pData;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
- int32_t winIndex = 0;
- while (1) {
- SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, startDatas[i], endDatas[i], gpDatas[i], gap, &winIndex);
- if (!pCurWin) {
- break;
- }
+ int32_t winIndex = 0;
+ SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, startDatas[i], endDatas[i], gpDatas[i], gap, &winIndex);
+ if (!pCurWin) {
+ continue;
+ }
+
+ do {
+ SResultWindowInfo delWin = *pCurWin;
deleteWindow(pAggSup->pCurWins, winIndex, fp);
if (result) {
- pCurWin->groupId = gpDatas[i];
- taosArrayPush(result, pCurWin);
+ delWin.groupId = gpDatas[i];
+ taosArrayPush(result, &delWin);
}
- }
+ if (winIndex >= taosArrayGetSize(pAggSup->pCurWins)) {
+ break;
+ }
+ pCurWin = taosArrayGet(pAggSup->pCurWins, winIndex);
+ } while (pCurWin->win.skey <= endDatas[i]);
}
}
@@ -3956,8 +3917,7 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup,
int32_t step = 0;
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
int32_t winIndex = 0;
- SResultWindowInfo* pCurWin =
- getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, gpCols[i], gap, &winIndex);
+ SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, tsCols[i], INT64_MIN, gpCols[i], gap, &winIndex);
if (!pCurWin || pCurWin->pos.pageId == -1) {
// window has been closed.
step = 1;
@@ -3967,6 +3927,7 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup,
ASSERT(isInWindow(pCurWin, tsCols[i], gap));
doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pSup, numOfOutput);
if (result) {
+ pCurWin->groupId = gpCols[i];
taosArrayPush(result, pCurWin);
}
}
@@ -3982,9 +3943,9 @@ static int32_t copyUpdateResult(SHashObj* pStUpdated, SArray* pUpdated) {
if (pos == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
- pos->groupId = ((SWinRes*)pData)->groupId;
+ pos->groupId = ((SWinKey*)pData)->groupId;
pos->pos = *(SResultRowPosition*)key;
- *(int64_t*)pos->key = ((SWinRes*)pData)->ts;
+ *(int64_t*)pos->key = ((SWinKey*)pData)->ts;
taosArrayPush(pUpdated, &pos);
}
taosArraySort(pUpdated, resultrowComparAsc);
@@ -4000,11 +3961,19 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It
blockDataEnsureCapacity(pBlock, size);
size_t keyLen = 0;
while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) {
- SWinRes* res = *Ite;
- SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
- colDataAppend(pTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SWinKey* res = *Ite;
+ SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ colDataAppendNULL(pUidCol, pBlock->info.rows);
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
colDataAppend(pGpCol, pBlock->info.rows, (const char*)&res->groupId, false);
+ SColumnInfoData* pCalStCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
+ colDataAppendNULL(pCalStCol, pBlock->info.rows);
+ SColumnInfoData* pCalEdCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
+ colDataAppendNULL(pCalEdCol, pBlock->info.rows);
pBlock->info.rows += 1;
if (pBlock->info.rows + 1 >= pBlock->info.capacity) {
break;
@@ -4015,20 +3984,17 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It
}
}
-static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWinArray, int32_t groupId,
- int32_t numOfOutput, SOperatorInfo* pOperator) {
+static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWinArray, int32_t numOfOutput,
+ SOperatorInfo* pOperator, SHashObj* pStUpdated) {
SExprSupp* pSup = &pOperator->exprSupp;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
-
- int32_t size = taosArrayGetSize(pWinArray);
+ int32_t size = taosArrayGetSize(pWinArray);
ASSERT(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SResultWindowInfo* pParentWin = taosArrayGet(pWinArray, i);
- SResultRow* pCurResult = NULL;
- setWindowOutputBuf(pParentWin, &pCurResult, pSup->pCtx, groupId, numOfOutput, pSup->rowEntryInfoOffset,
- &pInfo->streamAggSup, pTaskInfo);
- int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
+ uint64_t groupId = pParentWin->groupId;
+ int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
for (int32_t j = 0; j < numOfChildren; j++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, j);
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
@@ -4041,22 +4007,36 @@ static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWin
for (int32_t k = index; k < chWinSize; k++) {
SResultWindowInfo* pChWin = taosArrayGet(pChWins, k);
if (pParentWin->win.skey <= pChWin->win.skey && pChWin->win.ekey <= pParentWin->win.ekey) {
+ int32_t winIndex = 0;
+ SResultWindowInfo* pNewParWin =
+ getSessionTimeWindow(&pInfo->streamAggSup, pChWin->win.skey, pChWin->win.ekey, groupId, 0, &winIndex);
+ SResultRow* pPareResult = NULL;
+ setWindowOutputBuf(pNewParWin, &pPareResult, pSup->pCtx, groupId, numOfOutput, pSup->rowEntryInfoOffset,
+ &pInfo->streamAggSup, pTaskInfo);
SResultRow* pChResult = NULL;
setWindowOutputBuf(pChWin, &pChResult, pChild->exprSupp.pCtx, groupId, numOfOutput,
pChild->exprSupp.rowEntryInfoOffset, &pChInfo->streamAggSup, pTaskInfo);
- compactFunctions(pSup->pCtx, pChild->exprSupp.pCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pNewParWin->win, true);
+ compactFunctions(pSup->pCtx, pChild->exprSupp.pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
+
+ int32_t winNum = getNumCompactWindow(pInfo->streamAggSup.pCurWins, winIndex, pInfo->gap);
+ if (winNum > 0) {
+ compactTimeWindow(pInfo, winIndex, winNum, groupId, numOfOutput, pStUpdated, NULL, pOperator);
+ }
+
SFilePage* bufPage = getBufPage(pChInfo->streamAggSup.pResultBuf, pChWin->pos.pageId);
releaseBufPage(pChInfo->streamAggSup.pResultBuf, bufPage);
- continue;
+
+ bufPage = getBufPage(pInfo->streamAggSup.pResultBuf, pNewParWin->pos.pageId);
+ setBufPageDirty(bufPage, true);
+ releaseBufPage(pInfo->streamAggSup.pResultBuf, bufPage);
+ SWinKey value = {.ts = pNewParWin->win.skey, .groupId = groupId};
+ taosHashPut(pStUpdated, &pNewParWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey));
} else if (!pChWin->isClosed) {
break;
}
}
}
- SFilePage* bufPage = getBufPage(pInfo->streamAggSup.pResultBuf, pParentWin->pos.pageId);
- ASSERT(size > 0);
- setBufPageDirty(bufPage, true);
- releaseBufPage(pInfo->streamAggSup.pResultBuf, bufPage);
}
}
@@ -4131,8 +4111,47 @@ static void copyDeleteWindowInfo(SArray* pResWins, SHashObj* pStDeleted) {
int32_t size = taosArrayGetSize(pResWins);
for (int32_t i = 0; i < size; i++) {
SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i);
- SWinRes res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinRes));
+ SWinKey res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId};
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
+ }
+}
+
+static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {
+ int32_t size = taosArrayGetSize(pWins);
+ for (int32_t i = 0; i < size; i++) {
+ SResultWindowInfo* pWin = taosArrayGet(pWins, i);
+ taosHashRemove(pHashMap, &pWin->pos, sizeof(SResultRowPosition));
+ }
+}
+
+int32_t compareWinKey(void* pKey, void* data, int32_t index) {
+ SArray* res = (SArray*)data;
+ SResKeyPos* pos = taosArrayGetP(res, index);
+ SWinKey* pData = (SWinKey*)pKey;
+ if (pData->ts == *(int64_t*)pos->key) {
+ if (pData->groupId > pos->groupId) {
+ return 1;
+ } else if (pData->groupId < pos->groupId) {
+ return -1;
+ }
+ return 0;
+ } else if (pData->ts > *(int64_t*)pos->key) {
+ return 1;
+ }
+ return -1;
+}
+
+static void removeSessionDeleteResults(SArray* update, SHashObj* pStDeleted) {
+ int32_t size = taosHashGetSize(pStDeleted);
+ if (size == 0) {
+ return;
+ }
+
+ int32_t num = taosArrayGetSize(update);
+ for (int32_t i = 0; i < num; i++) {
+ SResKeyPos* pos = taosArrayGetP(update, i);
+ SWinKey winKey = {.ts = *(int64_t*)pos->key, .groupId = pos->groupId};
+ taosHashRemove(pStDeleted, &winKey, sizeof(SWinKey));
}
}
@@ -4160,7 +4179,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
SHashObj* pStUpdated = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
SOperatorInfo* downstream = pOperator->pDownstream[0];
- SArray* pUpdated = taosArrayInit(16, POINTER_BYTES);
+ SArray* pUpdated = taosArrayInit(16, POINTER_BYTES); // SResKeyPos
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -4170,15 +4189,15 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
if (pBlock->info.type == STREAM_CLEAR) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
- doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, START_TS_COLUMN_INDEX, pOperator->exprSupp.numOfExprs, 0,
- pWins);
+ doClearSessionWindows(&pInfo->streamAggSup, &pOperator->exprSupp, pBlock, START_TS_COLUMN_INDEX,
+ pOperator->exprSupp.numOfExprs, 0, pWins);
if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock);
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
- doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, START_TS_COLUMN_INDEX, pChildOp->exprSupp.numOfExprs,
- 0, NULL);
- rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator);
+ doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, START_TS_COLUMN_INDEX,
+ pChildOp->exprSupp.numOfExprs, 0, NULL);
+ rebuildTimeWindow(pInfo, pWins, pOperator->exprSupp.numOfExprs, pOperator, pStUpdated);
}
taosArrayDestroy(pWins);
continue;
@@ -4192,9 +4211,10 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
// gap must be 0
doDeleteTimeWindows(&pChildInfo->streamAggSup, pBlock, 0, NULL, NULL);
- rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator);
+ rebuildTimeWindow(pInfo, pWins, pOperator->exprSupp.numOfExprs, pOperator, pStUpdated);
}
copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
+ removeSessionResults(pStUpdated, pWins);
taosArrayDestroy(pWins);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
@@ -4217,7 +4237,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
SOperatorInfo* pChildOp =
createStreamFinalSessionAggOperatorInfo(NULL, pInfo->pPhyNode, pOperator->pTaskInfo, 0);
if (!pChildOp) {
- longjmp(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
taosArrayPush(pInfo->pChildren, &pChildOp);
}
@@ -4237,6 +4257,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
pInfo->ignoreExpiredData, NULL);
closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData, NULL);
copyUpdateResult(pStUpdated, pUpdated);
+ removeSessionDeleteResults(pUpdated, pInfo->pStDeleted);
taosHashCleanup(pStUpdated);
finalizeUpdatedResult(pSup->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
@@ -4264,16 +4285,7 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
}
}
clearDiskbasedBuf(pInfo->streamAggSup.pResultBuf);
- cleanupResultRowInfo(&pInfo->binfo.resultRowInfo);
- initResultRowInfo(&pInfo->binfo.resultRowInfo);
-}
-
-static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {
- int32_t size = taosArrayGetSize(pWins);
- for (int32_t i = 0; i < size; i++) {
- SResultWindowInfo* pWin = taosArrayGet(pWins, i);
- taosHashRemove(pHashMap, &pWin->pos, sizeof(SResultRowPosition));
- }
+ pInfo->streamAggSup.currentPageId = -1;
}
static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
@@ -4284,30 +4296,35 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
- } else if (pOperator->status == OP_RES_TO_RETURN) {
+ }
+
+ {
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
if (pBInfo->pRes->info.rows > 0) {
printDataBlock(pBInfo->pRes, "semi session");
return pBInfo->pRes;
}
- // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
- if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) {
- pInfo->returnDelete = true;
+ doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
+ if (pInfo->pDelRes->info.rows > 0) {
printDataBlock(pInfo->pDelRes, "semi session");
return pInfo->pDelRes;
}
- if (pInfo->pUpdateRes->info.rows > 0) {
+ if (pInfo->pUpdateRes->info.rows > 0 && pInfo->returnUpdate) {
+ pInfo->returnUpdate = false;
// process the rest of the data
- pOperator->status = OP_OPENED;
printDataBlock(pInfo->pUpdateRes, "semi session");
return pInfo->pUpdateRes;
}
- // semi interval operator clear disk buffer
- clearStreamSessionOperator(pInfo);
- pOperator->status = OP_EXEC_DONE;
- return NULL;
+
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ clearFunctionContext(&pOperator->exprSupp);
+ // semi interval operator clear disk buffer
+ clearStreamSessionOperator(pInfo);
+ pOperator->status = OP_EXEC_DONE;
+ return NULL;
+ }
}
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
@@ -4318,6 +4335,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
clearSpecialDataBlock(pInfo->pUpdateRes);
+ pOperator->status = OP_RES_TO_RETURN;
break;
}
printDataBlock(pBlock, "semi session recv");
@@ -4328,12 +4346,15 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
removeSessionResults(pStUpdated, pWins);
taosArrayDestroy(pWins);
copyDataBlock(pInfo->pUpdateRes, pBlock);
+ pInfo->returnUpdate = true;
break;
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
// gap must be 0
- doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, NULL, NULL);
- copyDataBlock(pInfo->pDelRes, pBlock);
- pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;
+ SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
+ doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, NULL);
+ copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
+ removeSessionResults(pStUpdated, pWins);
+ taosArrayDestroy(pWins);
break;
} else if (pBlock->info.type == STREAM_GET_ALL) {
getAllSessionWindow(pInfo->streamAggSup.pResultRows, pUpdated, getResWinForSession);
@@ -4346,18 +4367,15 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
}
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSup->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
- doStreamSessionAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted, false);
+ doStreamSessionAggImpl(pOperator, pBlock, pStUpdated, NULL, false);
maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
}
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
pBInfo->pRes->info.watermark = pInfo->twAggSup.maxTs;
- // restore the value
- pOperator->status = OP_RES_TO_RETURN;
- // semi operator
- // closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated,
- // getResWinForSession);
+
copyUpdateResult(pStUpdated, pUpdated);
+ removeSessionDeleteResults(pUpdated, pInfo->pStDeleted);
taosHashCleanup(pStUpdated);
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated,
@@ -4371,16 +4389,15 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
return pBInfo->pRes;
}
- // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
- if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) {
- pInfo->returnDelete = true;
+ doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
+ if (pInfo->pDelRes->info.rows > 0) {
printDataBlock(pInfo->pDelRes, "semi session");
return pInfo->pDelRes;
}
- if (pInfo->pUpdateRes->info.rows > 0) {
+ if (pInfo->pUpdateRes->info.rows > 0 && pInfo->returnUpdate) {
+ pInfo->returnUpdate = false;
// process the rest of the data
- pOperator->status = OP_OPENED;
printDataBlock(pInfo->pUpdateRes, "semi session");
return pInfo->pUpdateRes;
}
@@ -4425,7 +4442,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
_error:
if (pInfo != NULL) {
- destroyStreamSessionAggOperatorInfo(pInfo, pOperator->exprSupp.numOfExprs);
+ destroyStreamSessionAggOperatorInfo(pInfo);
}
taosMemoryFreeClear(pOperator);
@@ -4433,7 +4450,7 @@ _error:
return NULL;
}
-void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyStreamStateOperatorInfo(void* param) {
SStreamStateAggOperatorInfo* pInfo = (SStreamStateAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
destroyStateStreamAggSupporter(&pInfo->streamAggSup);
@@ -4443,7 +4460,7 @@ void destroyStreamStateOperatorInfo(void* param, int32_t numOfOutput) {
for (int32_t i = 0; i < size; i++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, i);
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
- destroyStreamSessionAggOperatorInfo(pChInfo, numOfOutput);
+ destroyStreamSessionAggOperatorInfo(pChInfo);
taosMemoryFreeClear(pChild);
taosMemoryFreeClear(pChInfo);
}
@@ -4581,7 +4598,8 @@ SStateWindowInfo* getStateWindow(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_
}
int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, uint64_t groupId,
- SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual, SHashObj* pSeDeleted) {
+ SColumnInfoData* pKeyCol, int32_t rows, int32_t start, bool* allEqual,
+ SHashObj* pSeDeleted) {
*allEqual = true;
SStateWindowInfo* pWinInfo = taosArrayGet(pWinInfos, winIndex);
for (int32_t i = start; i < rows; ++i) {
@@ -4602,9 +4620,8 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, u
}
if (pWinInfo->winInfo.win.skey > pTs[i]) {
if (pSeDeleted && pWinInfo->winInfo.isOutput) {
- SWinRes res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId};
- taosHashPut(pSeDeleted, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &res,
- sizeof(SWinRes));
+ SWinKey res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId};
+ taosHashPut(pSeDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->winInfo.isOutput = false;
}
pWinInfo->winInfo.win.skey = pTs[i];
@@ -4617,14 +4634,14 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, u
return rows - start;
}
-static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock,
- SHashObj* pSeUpdated, SHashObj* pSeDeleted) {
+static void doClearStateWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBlock, SHashObj* pSeUpdated,
+ SHashObj* pSeDeleted) {
SColumnInfoData* pTsColInfo = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pGroupColInfo = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
TSKEY* tsCol = (TSKEY*)pTsColInfo->pData;
bool allEqual = false;
int32_t step = 1;
- uint64_t* gpCol = (uint64_t*) pGroupColInfo->pData;
+ uint64_t* gpCol = (uint64_t*)pGroupColInfo->pData;
for (int32_t i = 0; i < pBlock->info.rows; i += step) {
int32_t winIndex = 0;
SStateWindowInfo* pCurWin = getStateWindowByTs(pAggSup, tsCol[i], gpCol[i], &winIndex);
@@ -4668,27 +4685,26 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
char* pKeyData = colDataGetData(pKeyColInfo, i);
int32_t winIndex = 0;
bool allEqual = true;
- SStateWindowInfo* pCurWin =
- getStateWindow(pAggSup, tsCols[i], groupId, pKeyData, &pInfo->stateCol, &winIndex);
- winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo,
- pSDataBlock->info.rows, i, &allEqual, pStDeleted);
+ SStateWindowInfo* pCurWin = getStateWindow(pAggSup, tsCols[i], groupId, pKeyData, &pInfo->stateCol, &winIndex);
+ winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo, pSDataBlock->info.rows,
+ i, &allEqual, pStDeleted);
if (!allEqual) {
- appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey,
- GROUPID_COLUMN_INDEX, &groupId);
+ uint64_t uid = 0;
+ appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, &uid, &groupId);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo);
continue;
}
code = doOneStateWindowAgg(pInfo, pSDataBlock, &pCurWin->winInfo, &pResult, i, winRows, numOfOutput, pOperator);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
pCurWin->winInfo.isClosed = false;
if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
- SWinRes value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId};
- code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinRes));
+ SWinKey value = {.ts = pCurWin->winInfo.win.skey, .groupId = groupId};
+ code = taosHashPut(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey));
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
pCurWin->winInfo.isOutput = true;
}
@@ -4703,6 +4719,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
SExprSupp* pSup = &pOperator->exprSupp;
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
SOptrBasicInfo* pBInfo = &pInfo->binfo;
+ int64_t maxTs = INT64_MIN;
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildDeleteDataBlock(pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
if (pInfo->pDelRes->info.rows > 0) {
@@ -4731,10 +4748,11 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
if (pBlock->info.type == STREAM_CLEAR) {
doClearStateWindows(&pInfo->streamAggSup, pBlock, pSeUpdated, pInfo->pSeDeleted);
continue;
- } else if (pBlock->info.type == STREAM_DELETE_DATA) {
+ } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, destroyStateWinInfo);
copyDeleteWindowInfo(pWins, pInfo->pSeDeleted);
+ removeSessionResults(pSeUpdated, pWins);
taosArrayDestroy(pWins);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
@@ -4749,8 +4767,9 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSup->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
doStreamStateAggImpl(pOperator, pBlock, pSeUpdated, pInfo->pSeDeleted);
- pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
+ maxTs = TMAX(maxTs, pBlock->info.window.ekey);
}
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
// restore the value
pOperator->status = OP_RES_TO_RETURN;
@@ -4845,13 +4864,12 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->exprSupp.pExprInfo = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamStateAgg, NULL, NULL,
destroyStreamStateOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
- initDownStream(downstream, &pInfo->streamAggSup, 0, pInfo->twAggSup.waterMark, pOperator->operatorType);
+ initDownStream(downstream, &pInfo->streamAggSup, 0, pInfo->twAggSup.waterMark, pOperator->operatorType,
+ pInfo->primaryTsIndex);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@@ -4859,16 +4877,15 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
return pOperator;
_error:
- destroyStreamStateOperatorInfo(pInfo, numOfCols);
+ destroyStreamStateOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
}
-void destroyMergeAlignedIntervalOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyMergeAlignedIntervalOperatorInfo(void* param) {
SMergeAlignedIntervalAggOperatorInfo* miaInfo = (SMergeAlignedIntervalAggOperatorInfo*)param;
- destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo, numOfOutput);
-
+ destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo);
taosMemoryFreeClear(param);
}
@@ -4881,14 +4898,14 @@ static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, ui
SExprSupp* pSup = &pOperatorInfo->exprSupp;
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &wstartTs, TSDB_KEYSIZE, tableGroupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
- GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(
+ iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pSup->pCtx, pSup->pExprInfo, pSup->numOfExprs,
pSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
- taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
+ tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
+ ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
return TSDB_CODE_SUCCESS;
}
@@ -4911,7 +4928,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
// there is an result exists
if (miaInfo->curTs != INT64_MIN) {
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
+ ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
if (ts != miaInfo->curTs) {
outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
@@ -4919,7 +4936,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
}
} else {
miaInfo->curTs = ts;
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
+ ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
}
STimeWindow win = {0};
@@ -4931,7 +4948,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
int32_t currPos = startPos;
@@ -4958,7 +4975,7 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
miaInfo->curTs = currWin.skey;
@@ -4995,7 +5012,7 @@ static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
if (pBlock == NULL) {
// close last unfinalized time window
if (miaInfo->curTs != INT64_MIN) {
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
+ ASSERT(tSimpleHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
miaInfo->curTs = INT64_MIN;
}
@@ -5063,9 +5080,7 @@ static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
return (rows == 0) ? NULL : pRes;
}
-SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo,
- int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval,
- int32_t primaryTsSlotId, SNode* pCondition, bool mergeResultBlock,
+SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode,
SExecTaskInfo* pTaskInfo) {
SMergeAlignedIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeAlignedIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
@@ -5078,35 +5093,43 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
goto _error;
}
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pNode->window.node.pOutputDataBlockDesc);
+
+ SInterval interval = {.interval = pNode->interval,
+ .sliding = pNode->sliding,
+ .intervalUnit = pNode->intervalUnit,
+ .slidingUnit = pNode->slidingUnit,
+ .offset = pNode->offset,
+ .precision = ((SColumnNode*)pNode->window.pTspk)->node.resType.precision};
+
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
SExprSupp* pSup = &pOperator->exprSupp;
- miaInfo->pCondition = pCondition;
+ miaInfo->pCondition = pNode->window.node.pConditions;
miaInfo->curTs = INT64_MIN;
-
iaInfo->win = pTaskInfo->window;
iaInfo->inputOrder = TSDB_ORDER_ASC;
- iaInfo->interval = *pInterval;
+ iaInfo->interval = interval;
iaInfo->execModel = pTaskInfo->execModel;
- iaInfo->primaryTsIndex = primaryTsSlotId;
- iaInfo->binfo.mergeResultBlock = mergeResultBlock;
+ iaInfo->primaryTsIndex = ((SColumnNode*)pNode->window.pTspk)->slotId;
+ iaInfo->binfo.mergeResultBlock = pNode->window.mergeDataBlock;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- int32_t code =
- initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&iaInfo->binfo, pResBlock);
+ int32_t code = initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ initBasicInfo(&iaInfo->binfo, pResBlock);
initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win);
- iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, iaInfo);
+ iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, num, iaInfo);
if (iaInfo->timeWindowInterpo) {
- iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
- }
-
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
+ iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
}
initResultRowInfo(&iaInfo->binfo.resultRowInfo);
@@ -5116,9 +5139,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL;
pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = miaInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL,
@@ -5132,7 +5153,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
return pOperator;
_error:
- destroyMergeAlignedIntervalOperatorInfo(miaInfo, numOfCols);
+ destroyMergeAlignedIntervalOperatorInfo(miaInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
@@ -5155,10 +5176,10 @@ typedef struct SGroupTimeWindow {
STimeWindow window;
} SGroupTimeWindow;
-void destroyMergeIntervalOperatorInfo(void* param, int32_t numOfOutput) {
+void destroyMergeIntervalOperatorInfo(void* param) {
SMergeIntervalAggOperatorInfo* miaInfo = (SMergeIntervalAggOperatorInfo*)param;
tdListFree(miaInfo->groupIntervals);
- destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo, numOfOutput);
+ destroyIntervalOperatorInfo(&miaInfo->intervalAggOperatorInfo);
taosMemoryFreeClear(param);
}
@@ -5172,12 +5193,12 @@ static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t table
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &win->skey, TSDB_KEYSIZE, tableGroupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
- GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(
+ iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo,
pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
- taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
+ tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
return TSDB_CODE_SUCCESS;
}
@@ -5233,7 +5254,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx,
numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
TSKEY ekey = ascScan ? win.ekey : win.skey;
@@ -5243,14 +5264,14 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
// prev time window not interpolation yet.
if (iaInfo->timeWindowInterpo) {
- SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult);
+ SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos);
// restore current time window
ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pExprSup->pCtx,
numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (ret != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
// window start key interpolation
@@ -5279,7 +5300,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
pExprSup->pCtx, numOfOutput, pExprSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
- longjmp(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
ekey = ascScan ? nextWin.ekey : nextWin.skey;
@@ -5376,54 +5397,64 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
return (rows == 0) ? NULL : pRes;
}
-SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- bool mergeBlock, SExecTaskInfo* pTaskInfo) {
- SMergeIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo));
+SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode,
+ SExecTaskInfo* pTaskInfo) {
+ SMergeIntervalAggOperatorInfo* pMergeIntervalInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
- if (miaInfo == NULL || pOperator == NULL) {
+ if (pMergeIntervalInfo == NULL || pOperator == NULL) {
goto _error;
}
- miaInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow));
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pIntervalPhyNode->window.node.pOutputDataBlockDesc);
- SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
- iaInfo->win = pTaskInfo->window;
- iaInfo->inputOrder = TSDB_ORDER_ASC;
- iaInfo->interval = *pInterval;
- iaInfo->execModel = pTaskInfo->execModel;
- iaInfo->binfo.mergeResultBlock = mergeBlock;
+ SInterval interval = {.interval = pIntervalPhyNode->interval,
+ .sliding = pIntervalPhyNode->sliding,
+ .intervalUnit = pIntervalPhyNode->intervalUnit,
+ .slidingUnit = pIntervalPhyNode->slidingUnit,
+ .offset = pIntervalPhyNode->offset,
+ .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
+
+ pMergeIntervalInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow));
- iaInfo->primaryTsIndex = primaryTsSlotId;
+ SIntervalAggOperatorInfo* pIntervalInfo = &pMergeIntervalInfo->intervalAggOperatorInfo;
+ pIntervalInfo->win = pTaskInfo->window;
+ pIntervalInfo->inputOrder = TSDB_ORDER_ASC;
+ pIntervalInfo->interval = interval;
+ pIntervalInfo->execModel = pTaskInfo->execModel;
+ pIntervalInfo->binfo.mergeResultBlock = pIntervalPhyNode->window.mergeDataBlock;
+ pIntervalInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
SExprSupp* pExprSupp = &pOperator->exprSupp;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- int32_t code = initAggInfo(pExprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
- initBasicInfo(&iaInfo->binfo, pResBlock);
+ int32_t code = initAggInfo(pExprSupp, &pIntervalInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
- initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win);
+ initBasicInfo(&pIntervalInfo->binfo, pResBlock);
+ initExecTimeWindowInfo(&pIntervalInfo->twAggSup.timeWindowData, &pIntervalInfo->win);
- iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, numOfCols, iaInfo);
- if (iaInfo->timeWindowInterpo) {
- iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
- if (iaInfo->binfo.resultRowInfo.openWindow == NULL) {
+ pIntervalInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, num, pIntervalInfo);
+ if (pIntervalInfo->timeWindowInterpo) {
+ pIntervalInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
+ if (pIntervalInfo->binfo.resultRowInfo.openWindow == NULL) {
goto _error;
}
}
- initResultRowInfo(&iaInfo->binfo.resultRowInfo);
+ initResultRowInfo(&pIntervalInfo->binfo.resultRowInfo);
pOperator->name = "TimeMergeIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL;
pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->info = miaInfo;
+ pOperator->info = pMergeIntervalInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeIntervalAgg, NULL, NULL,
destroyMergeIntervalOperatorInfo, NULL, NULL, NULL);
@@ -5436,7 +5467,401 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
return pOperator;
_error:
- destroyMergeIntervalOperatorInfo(miaInfo, numOfCols);
+ destroyMergeIntervalOperatorInfo(pMergeIntervalInfo);
+ taosMemoryFreeClear(pOperator);
+ pTaskInfo->code = code;
+ return NULL;
+}
+
+static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
+ int32_t scanFlag, SHashObj* pUpdatedMap) {
+ SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
+
+ SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
+ SExprSupp* pSup = &pOperatorInfo->exprSupp;
+
+ int32_t startPos = 0;
+ int32_t numOfOutput = pSup->numOfExprs;
+ SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
+ TSKEY* tsCols = (TSKEY*)pColDataInfo->pData;
+ uint64_t tableGroupId = pBlock->info.groupId;
+ bool ascScan = true;
+ TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
+ SResultRow* pResult = NULL;
+
+ STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, TSDB_ORDER_ASC);
+ int32_t ret = TSDB_CODE_SUCCESS;
+ if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
+ inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
+ ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
+ numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
+ if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
+ saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
+ setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
+ }
+ }
+
+ TSKEY ekey = ascScan ? win.ekey : win.skey;
+ int32_t forwardRows =
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ ASSERT(forwardRows > 0);
+
+ if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
+ inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
+ }
+
+ STimeWindow nextWin = win;
+ while (1) {
+ int32_t prevEndPos = forwardRows - 1 + startPos;
+ startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC);
+ if (startPos < 0) {
+ break;
+ }
+ if (pInfo->ignoreExpiredData && isCloseWindow(&nextWin, &pInfo->twAggSup)) {
+ ekey = ascScan ? nextWin.ekey : nextWin.skey;
+ forwardRows =
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ continue;
+ }
+
+ // null data, failed to allocate more memory buffer
+ int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
+ pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
+ saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
+ setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
+ }
+
+ ekey = ascScan ? nextWin.ekey : nextWin.skey;
+ forwardRows =
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
+ }
+}
+
+static void doStreamIntervalAggImpl2(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
+ SHashObj* pUpdatedMap) {
+ SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
+
+ SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
+ SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
+ SExprSupp* pSup = &pOperatorInfo->exprSupp;
+ int32_t numOfOutput = pSup->numOfExprs;
+ int32_t step = 1;
+ TSKEY* tsCols = NULL;
+ SResultRow* pResult = NULL;
+ int32_t forwardRows = 0;
+ int32_t aa = 4;
+
+ ASSERT(pSDataBlock->pDataBlock != NULL);
+ SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
+ tsCols = (int64_t*)pColDataInfo->pData;
+
+ int32_t startPos = 0;
+ TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols);
+ STimeWindow nextWin =
+ getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, TSDB_ORDER_ASC);
+ while (1) {
+ bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
+ if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
+ startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
+ if (startPos < 0) {
+ break;
+ }
+ continue;
+ }
+
+ int32_t code = setOutputBuf(&nextWin, &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset,
+ &pInfo->aggSup, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL,
+ TSDB_ORDER_ASC);
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdatedMap) {
+ saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
+ }
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
+ pSDataBlock->info.rows, numOfOutput);
+ SWinKey key = {
+ .ts = nextWin.skey,
+ .groupId = tableGroupId,
+ };
+ saveOutput(pTaskInfo, &key, pResult, pInfo->aggSup.resultRowSize);
+ releaseOutputBuf(pTaskInfo, &key, pResult);
+ int32_t prevEndPos = (forwardRows - 1) * step + startPos;
+ ASSERT(pSDataBlock->info.window.skey > 0 && pSDataBlock->info.window.ekey > 0);
+ startPos =
+ getNextQualifiedWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC);
+ if (startPos < 0) {
+ break;
+ }
+ }
+}
+
+void doBuildResult(SOperatorInfo* pOperator, SSDataBlock* pBlock, SGroupResInfo* pGroupResInfo) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ // set output datablock version
+ pBlock->info.version = pTaskInfo->version;
+
+ blockDataCleanup(pBlock);
+ if (!hasRemainResults(pGroupResInfo)) {
+ return;
+ }
+
+ // clear the existed group id
+ pBlock->info.groupId = 0;
+ buildDataBlockFromGroupRes(pTaskInfo, pBlock, &pOperator->exprSupp, pGroupResInfo);
+}
+
+static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
+ SStreamIntervalOperatorInfo* pInfo = pOperator->info;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ int64_t maxTs = INT64_MIN;
+ SExprSupp* pSup = &pOperator->exprSupp;
+
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
+ if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "single interval");
+ return pInfo->pDelRes;
+ }
+
+ doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
+ if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainResults(&pInfo->groupResInfo)) {
+ pOperator->status = OP_EXEC_DONE;
+ qDebug("===stream===single interval is done");
+ freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
+ }
+ printDataBlock(pInfo->binfo.pRes, "single interval");
+ return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
+ }
+
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+
+ SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
+
+ SStreamState* pState = pTaskInfo->streamInfo.pState;
+
+ while (1) {
+ SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
+ if (pBlock == NULL) {
+ break;
+ }
+ printDataBlock(pBlock, "single interval recv");
+
+ if (pBlock->info.type == STREAM_CLEAR) {
+ doClearWindows(&pInfo->aggSup, &pOperator->exprSupp, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock,
+ NULL);
+ qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo));
+ continue;
+ } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
+ doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval, pUpdatedMap);
+ continue;
+ } else if (pBlock->info.type == STREAM_GET_ALL) {
+ getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
+ continue;
+ }
+
+ if (pBlock->info.type == STREAM_NORMAL && pBlock->info.version != 0) {
+ // set input version
+ pTaskInfo->version = pBlock->info.version;
+ }
+
+ if (pInfo->scalarSupp.pExprInfo != NULL) {
+ SExprSupp* pExprSup = &pInfo->scalarSupp;
+ projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
+ }
+
+ // The timewindow that overlaps the timestamps of the input pBlock need to be recalculated and return to the
+ // caller. Note that all the time window are not close till now.
+ // the pDataBlock are always the same one, no need to call this again
+ setInputDataBlock(pOperator, pSup->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
+ if (pInfo->invertible) {
+ setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type);
+ }
+
+ maxTs = TMAX(maxTs, pBlock->info.window.ekey);
+ doStreamIntervalAggImpl(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap);
+ // new disc buf
+ /*doStreamIntervalAggImpl2(pOperator, pBlock, pBlock->info.groupId, pUpdatedMap);*/
+ }
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
+
+#if 0
+ if (pState) {
+ printf(">>>>>>>> stream read backend\n");
+ SWinKey key = {
+ .ts = 1,
+ .groupId = 2,
+ };
+ char* val = NULL;
+ int32_t sz;
+ if (streamStateGet(pState, &key, (void**)&val, &sz) < 0) {
+ ASSERT(0);
+ }
+ printf("stream read %s %d\n", val, sz);
+ streamFreeVal(val);
+
+ SStreamStateCur* pCur = streamStateGetCur(pState, &key);
+ ASSERT(pCur);
+ while (streamStateCurNext(pState, pCur) == 0) {
+ SWinKey key1;
+ const void* val1;
+ if (streamStateGetKVByCur(pCur, &key1, &val1, &sz) < 0) {
+ break;
+ }
+ printf("stream iter key groupId:%d ts:%d, value %s %d\n", key1.groupId, key1.ts, val1, sz);
+ }
+ streamStateFreeCur(pCur);
+ }
+#endif
+
+ pOperator->status = OP_RES_TO_RETURN;
+ closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap,
+ pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
+
+ void* pIte = NULL;
+ while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
+ taosArrayPush(pUpdated, pIte);
+ }
+ taosArraySort(pUpdated, resultrowComparAsc);
+
+ // new disc buf
+ finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
+ initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
+ blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
+ removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
+ taosHashCleanup(pUpdatedMap);
+ doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
+ if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "single interval");
+ return pInfo->pDelRes;
+ }
+
+ doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
+ // new disc buf
+ // doBuildResult(pOperator, pInfo->binfo.pRes, &pInfo->groupResInfo);
+ printDataBlock(pInfo->binfo.pRes, "single interval");
+ return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
+}
+
+void destroyStreamIntervalOperatorInfo(void* param) {
+ SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)param;
+ cleanupBasicInfo(&pInfo->binfo);
+ cleanupAggSup(&pInfo->aggSup);
+ pInfo->pRecycledPages = taosArrayDestroy(pInfo->pRecycledPages);
+
+ pInfo->pDelWins = taosArrayDestroy(pInfo->pDelWins);
+ pInfo->pDelRes = blockDataDestroy(pInfo->pDelRes);
+
+ cleanupGroupResInfo(&pInfo->groupResInfo);
+ colDataDestroy(&pInfo->twAggSup.timeWindowData);
+ taosMemoryFreeClear(param);
+}
+
+SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo) {
+ SStreamIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamIntervalOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ goto _error;
+ }
+ SStreamIntervalPhysiNode* pIntervalPhyNode = (SStreamIntervalPhysiNode*)pPhyNode;
+
+ int32_t numOfCols = 0;
+ SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &numOfCols);
+ ASSERT(numOfCols > 0);
+ SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
+ SInterval interval = {
+ .interval = pIntervalPhyNode->interval,
+ .sliding = pIntervalPhyNode->sliding,
+ .intervalUnit = pIntervalPhyNode->intervalUnit,
+ .slidingUnit = pIntervalPhyNode->slidingUnit,
+ .offset = pIntervalPhyNode->offset,
+ .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision,
+ };
+ STimeWindowAggSupp twAggSupp = {
+ .waterMark = pIntervalPhyNode->window.watermark,
+ .calTrigger = pIntervalPhyNode->window.triggerType,
+ .maxTs = INT64_MIN,
+ };
+ ASSERT(twAggSupp.calTrigger != STREAM_TRIGGER_MAX_DELAY);
+ pOperator->pTaskInfo = pTaskInfo;
+ pInfo->interval = interval;
+ pInfo->twAggSup = twAggSupp;
+ pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
+ pInfo->isFinal = false;
+
+ if (pIntervalPhyNode->window.pExprs != NULL) {
+ int32_t numOfScalar = 0;
+ SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar);
+ int32_t code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ }
+
+ pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
+ SExprSupp* pSup = &pOperator->exprSupp;
+ size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
+ int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
+ initBasicInfo(&pInfo->binfo, pResBlock);
+ initStreamFunciton(pSup->pCtx, pSup->numOfExprs);
+ initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
+
+ pInfo->invertible = allInvertible(pSup->pCtx, numOfCols);
+ pInfo->invertible = false; // Todo(liuyao): Dependent TSDB API
+ pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t));
+ pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey));
+ pInfo->delIndex = 0;
+ pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
+ initResultRowInfo(&pInfo->binfo.resultRowInfo);
+
+ pOperator->name = "StreamIntervalOperator";
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
+ pOperator->blocking = true;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->info = pInfo;
+ pOperator->fpSet =
+ createOperatorFpSet(operatorDummyOpenFn, doStreamIntervalAgg, NULL, NULL, destroyStreamIntervalOperatorInfo,
+ aggEncodeResultRow, aggDecodeResultRow, NULL);
+
+ initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup, &pInfo->interval, pInfo->twAggSup.waterMark);
+ code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
+ return pOperator;
+
+_error:
+ destroyStreamIntervalOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c
index ad97d79f7e7ad28da3cf51aab33010303e11f509..cffabcb6aca1f1f5ba457fb765828889bc3c03e6 100644
--- a/source/libs/executor/src/tlinearhash.c
+++ b/source/libs/executor/src/tlinearhash.c
@@ -26,7 +26,7 @@ typedef struct SLHashBucket {
int32_t size; // the number of element in this entry
} SLHashBucket;
-typedef struct SLHashObj {
+struct SLHashObj {
SDiskbasedBuf *pBuf;
_hash_fn_t hashFn;
SLHashBucket **pBucket; // entry list
@@ -35,7 +35,7 @@ typedef struct SLHashObj {
int32_t bits; // the number of bits used in hash
int32_t numOfBuckets; // the number of buckets
int64_t size; // the number of total items
-} SLHashObj;
+};
/**
* the data struct for each hash node
@@ -97,9 +97,9 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
// allocate the overflow buffer page to hold this k/v.
int32_t newPageId = -1;
- SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId);
+ SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, &newPageId);
if (pNewPage == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
+ return terrno;
}
taosArrayPush(pBucket->pPageIdList, &newPageId);
@@ -138,7 +138,6 @@ static void doRemoveFromBucket(SFilePage* pPage, SLHashNode* pNode, SLHashBucket
}
setBufPageDirty(pPage, true);
-
pBucket->size -= 1;
}
@@ -228,7 +227,11 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) {
}
int32_t pageId = -1;
- SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId);
+ SFilePage* p = getNewBufPage(pHashObj->pBuf, &pageId);
+ if (p == NULL) {
+ return terrno;
+ }
+
p->num = sizeof(SFilePage);
setBufPageDirty(p, true);
@@ -252,7 +255,8 @@ SLHashObj* tHashInit(int32_t inMemPages, int32_t pageSize, _hash_fn_t fn, int32_
printf("tHash Init failed since %s", terrstr(terrno));
return NULL;
}
- int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, 0, tsTempDir);
+
+ int32_t code = createDiskbasedBuf(&pHashObj->pBuf, pageSize, inMemPages * pageSize, "", tsTempDir);
if (code != 0) {
terrno = code;
return NULL;
@@ -389,7 +393,9 @@ char* tHashGet(SLHashObj* pHashObj, const void *key, size_t keyLen) {
}
SLHashBucket* pBucket = pHashObj->pBucket[bucketId];
- for (int32_t i = 0; i < taosArrayGetSize(pBucket->pPageIdList); ++i) {
+ int32_t num = taosArrayGetSize(pBucket->pPageIdList);
+
+ for (int32_t i = 0; i < num; ++i) {
int32_t pageId = *(int32_t*)taosArrayGet(pBucket->pPageIdList, i);
SFilePage* p = getBufPage(pHashObj->pBuf, pageId);
diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c
index 6b2edf0d5e6e1f41b5d354d110fb23892a864b33..84b615af7a93aef9fbf86190a2544474b7b2c87b 100644
--- a/source/libs/executor/src/tsimplehash.c
+++ b/source/libs/executor/src/tsimplehash.c
@@ -31,21 +31,12 @@
taosMemoryFreeClear(_n); \
} while (0);
-#pragma pack(push, 4)
-typedef struct SHNode {
- struct SHNode *next;
- uint32_t keyLen : 20;
- uint32_t dataLen : 12;
- char data[];
-} SHNode;
-#pragma pack(pop)
-
struct SSHashObj {
SHNode **hashList;
size_t capacity; // number of slots
- int64_t size; // number of elements in hash table
- _hash_fn_t hashFp; // hash function
- _equal_fn_t equalFp; // equal function
+ int64_t size; // number of elements in hash table
+ _hash_fn_t hashFp; // hash function
+ _equal_fn_t equalFp; // equal function
};
static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
@@ -76,7 +67,6 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) {
pHashObj->hashFp = fn;
ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0);
-
pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *));
if (!pHashObj->hashList) {
taosMemoryFree(pHashObj);
@@ -285,6 +275,40 @@ int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) {
return TSDB_CODE_SUCCESS;
}
+int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t keyLen, void **pIter, int32_t *iter) {
+ if (!pHashObj || !key) {
+ return TSDB_CODE_FAILED;
+ }
+
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen);
+
+ int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
+
+ SHNode *pNode = pHashObj->hashList[slot];
+ SHNode *pPrev = NULL;
+ while (pNode) {
+ if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) {
+ if (!pPrev) {
+ pHashObj->hashList[slot] = pNode->next;
+ } else {
+ pPrev->next = pNode->next;
+ }
+
+ if (*pIter == (void *)GET_SHASH_NODE_DATA(pNode)) {
+ *pIter = pPrev ? GET_SHASH_NODE_DATA(pPrev) : NULL;
+ }
+
+ FREE_HASH_NODE(pNode);
+ atomic_sub_fetch_64(&pHashObj->size, 1);
+ break;
+ }
+ pPrev = pNode;
+ pNode = pNode->next;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
void tSimpleHashClear(SSHashObj *pHashObj) {
if (!pHashObj || taosHashTableEmpty(pHashObj)) {
return;
@@ -302,6 +326,7 @@ void tSimpleHashClear(SSHashObj *pHashObj) {
FREE_HASH_NODE(pNode);
pNode = pNext;
}
+ pHashObj->hashList[i] = NULL;
}
atomic_store_64(&pHashObj->size, 0);
}
@@ -324,15 +349,6 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) {
return (pHashObj->capacity * sizeof(void *)) + sizeof(SHNode) * tSimpleHashGetSize(pHashObj) + sizeof(SSHashObj);
}
-void *tSimpleHashGetKey(void *data, size_t *keyLen) {
- SHNode *node = (SHNode *)((char *)data - offsetof(SHNode, data));
- if (keyLen) {
- *keyLen = node->keyLen;
- }
-
- return POINTER_SHIFT(data, node->dataLen);
-}
-
void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
if (!pHashObj) {
return NULL;
@@ -341,53 +357,12 @@ void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
SHNode *pNode = NULL;
if (!data) {
- for (int32_t i = 0; i < pHashObj->capacity; ++i) {
- pNode = pHashObj->hashList[i];
- if (!pNode) {
- continue;
- }
- *iter = i;
- return GET_SHASH_NODE_DATA(pNode);
- }
- return NULL;
- }
-
- pNode = (SHNode *)((char *)data - offsetof(SHNode, data));
-
- if (pNode->next) {
- return GET_SHASH_NODE_DATA(pNode->next);
- }
-
- ++(*iter);
- for (int32_t i = *iter; i < pHashObj->capacity; ++i) {
- pNode = pHashObj->hashList[i];
- if (!pNode) {
- continue;
- }
- *iter = i;
- return GET_SHASH_NODE_DATA(pNode);
- }
-
- return NULL;
-}
-
-void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, int32_t *iter) {
- if (!pHashObj) {
- return NULL;
- }
-
- SHNode *pNode = NULL;
-
- if (!data) {
- for (int32_t i = 0; i < pHashObj->capacity; ++i) {
+ for (int32_t i = *iter; i < pHashObj->capacity; ++i) {
pNode = pHashObj->hashList[i];
if (!pNode) {
continue;
}
*iter = i;
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode);
}
return NULL;
@@ -396,9 +371,6 @@ void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, in
pNode = (SHNode *)((char *)data - offsetof(SHNode, data));
if (pNode->next) {
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode->next, pNode->next->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode->next);
}
@@ -409,9 +381,6 @@ void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, in
continue;
}
*iter = i;
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode);
}
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index 48af951773814d9979eb6d349670753ad4b036eb..63fc9d9e1c553bc210c7f525b014cd3d0b4f852b 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -97,7 +97,7 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t page
return pSortHandle;
}
-static int32_t sortComparClearup(SMsortComparParam* cmpParam) {
+static int32_t sortComparCleanup(SMsortComparParam* cmpParam) {
for(int32_t i = 0; i < cmpParam->numOfSources; ++i) {
SSortSource* pSource = cmpParam->pSources[i]; // NOTICE: pSource may be SGenericSource *, if it is SORT_MULTISOURCE_MERGE
blockDataDestroy(pSource->src.pBlock);
@@ -134,15 +134,14 @@ int32_t tsortAddSource(SSortHandle* pSortHandle, void* pSource) {
return TSDB_CODE_SUCCESS;
}
-static int32_t doAddNewExternalMemSource(SDiskbasedBuf *pBuf, SArray* pAllSources, SSDataBlock* pBlock, int32_t* sourceId) {
+static int32_t doAddNewExternalMemSource(SDiskbasedBuf *pBuf, SArray* pAllSources, SSDataBlock* pBlock, int32_t* sourceId, SArray* pPageIdList) {
SSortSource* pSource = taosMemoryCalloc(1, sizeof(SSortSource));
if (pSource == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
- pSource->pageIdList = getDataBufPagesIdList(pBuf, (*sourceId));
pSource->src.pBlock = pBlock;
-
+ pSource->pageIdList = pPageIdList;
taosArrayPush(pAllSources, &pSource);
(*sourceId) += 1;
@@ -171,6 +170,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
}
+ SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t));
while(start < pDataBlock->info.rows) {
int32_t stop = 0;
blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pHandle->pageSize);
@@ -180,12 +180,14 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
blockDataDestroy(p);
return terrno;
}
+ taosArrayPush(pPageIdList, &pageId);
+
int32_t size = blockDataGetSize(p) + sizeof(int32_t) + taosArrayGetSize(p->pDataBlock) * sizeof(int32_t);
assert(size <= getBufPageSize(pHandle->pBuf));
@@ -201,7 +203,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
blockDataCleanup(pDataBlock);
SSDataBlock* pBlock = createOneDataBlock(pDataBlock, false);
- return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId);
+ return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId, pPageIdList);
}
static void setCurrentSourceIsDone(SSortSource* pSource, SSortHandle* pHandle) {
@@ -225,9 +227,9 @@ static int32_t sortComparInit(SMsortComparParam* cmpParam, SArray* pSources, int
continue;
}
- SPageInfo* pPgInfo = *(SPageInfo**)taosArrayGet(pSource->pageIdList, pSource->pageIndex);
+ int32_t* pPgId = taosArrayGet(pSource->pageIdList, pSource->pageIndex);
- void* pPage = getBufPage(pHandle->pBuf, getPageId(pPgInfo));
+ void* pPage = getBufPage(pHandle->pBuf, *pPgId);
code = blockDataFromBuf(pSource->src.pBlock, pPage);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -300,9 +302,9 @@ static int32_t adjustMergeTreeForNextTuple(SSortSource *pSource, SMultiwayMergeT
pSource->pageIndex = -1;
pSource->src.pBlock = blockDataDestroy(pSource->src.pBlock);
} else {
- SPageInfo* pPgInfo = *(SPageInfo**)taosArrayGet(pSource->pageIdList, pSource->pageIndex);
+ int32_t* pPgId = taosArrayGet(pSource->pageIdList, pSource->pageIndex);
- void* pPage = getBufPage(pHandle->pBuf, getPageId(pPgInfo));
+ void* pPage = getBufPage(pHandle->pBuf, *pPgId);
int32_t code = blockDataFromBuf(pSource->src.pBlock, pPage);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -502,6 +504,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
return code;
}
+ SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t));
while (1) {
SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows);
if (pDataBlock == NULL) {
@@ -509,11 +512,13 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
return terrno;
}
+ taosArrayPush(pPageIdList, &pageId);
+
int32_t size = blockDataGetSize(pDataBlock) + sizeof(int32_t) + taosArrayGetSize(pDataBlock->pDataBlock) * sizeof(int32_t);
assert(size <= getBufPageSize(pHandle->pBuf));
@@ -525,12 +530,12 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
blockDataCleanup(pDataBlock);
}
- sortComparClearup(&pHandle->cmpParam);
+ sortComparCleanup(&pHandle->cmpParam);
tMergeTreeDestroy(pHandle->pMergeTree);
pHandle->numOfCompletedSources = 0;
SSDataBlock* pBlock = createOneDataBlock(pHandle->pDataBlock, false);
- code = doAddNewExternalMemSource(pHandle->pBuf, pResList, pBlock, &pHandle->sourceId);
+ code = doAddNewExternalMemSource(pHandle->pBuf, pResList, pBlock, &pHandle->sourceId, pPageIdList);
if (code != 0) {
return code;
}
diff --git a/source/libs/executor/test/executorTests.cpp b/source/libs/executor/test/executorTests.cpp
index bba4b254c5d56f2c72988897273d363a3fec3c0c..1c4216334945c0b682e313a975e558390fbd7049 100644
--- a/source/libs/executor/test/executorTests.cpp
+++ b/source/libs/executor/test/executorTests.cpp
@@ -26,7 +26,6 @@
#include "executor.h"
#include "executorimpl.h"
#include "function.h"
-#include "stub.h"
#include "taos.h"
#include "tdatablock.h"
#include "tdef.h"
diff --git a/source/libs/executor/test/lhashTests.cpp b/source/libs/executor/test/lhashTests.cpp
index 695552faa0f353cc631b87cf03f51003c7b66aed..c9b75395bce345802ff0e563762758601aca0a18 100644
--- a/source/libs/executor/test/lhashTests.cpp
+++ b/source/libs/executor/test/lhashTests.cpp
@@ -26,40 +26,47 @@
TEST(testCase, linear_hash_Tests) {
taosSeedRand(taosGetTimestampSec());
+ strcpy(tsTempDir, "/tmp/");
_hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT);
-#if 0
- SLHashObj* pHashObj = tHashInit(256, 4096, fn, 320);
- for(int32_t i = 0; i < 5000000; ++i) {
+
+ int64_t st = taosGetTimestampUs();
+
+ SLHashObj* pHashObj = tHashInit(4098*4*2, 512, fn, 40);
+ for(int32_t i = 0; i < 1000000; ++i) {
int32_t code = tHashPut(pHashObj, &i, sizeof(i), &i, sizeof(i));
assert(code == 0);
}
// tHashPrint(pHashObj, LINEAR_HASH_STATIS);
+ int64_t et = taosGetTimestampUs();
-// for(int32_t i = 0; i < 10000; ++i) {
-// char* v = tHashGet(pHashObj, &i, sizeof(i));
-// if (v != NULL) {
-//// printf("find value: %d, key:%d\n", *(int32_t*) v, i);
-// } else {
+ for(int32_t i = 0; i < 1000000; ++i) {
+ if (i == 950000) {
+ printf("kf\n");
+ }
+ char* v = tHashGet(pHashObj, &i, sizeof(i));
+ if (v != NULL) {
+// printf("find value: %d, key:%d\n", *(int32_t*) v, i);
+ } else {
// printf("failed to found key:%d in hash\n", i);
-// }
-// }
+ }
+ }
- tHashPrint(pHashObj, LINEAR_HASH_STATIS);
+// tHashPrint(pHashObj, LINEAR_HASH_STATIS);
tHashCleanup(pHashObj);
-#endif
+ int64_t et1 = taosGetTimestampUs();
-#if 0
- SHashObj* pHashObj = taosHashInit(1000, fn, false, HASH_NO_LOCK);
+ SHashObj* pHashObj1 = taosHashInit(1000, fn, false, HASH_NO_LOCK);
for(int32_t i = 0; i < 1000000; ++i) {
- taosHashPut(pHashObj, &i, sizeof(i), &i, sizeof(i));
+ taosHashPut(pHashObj1, &i, sizeof(i), &i, sizeof(i));
}
- for(int32_t i = 0; i < 10000; ++i) {
- void* v = taosHashGet(pHashObj, &i, sizeof(i));
+ for(int32_t i = 0; i < 1000000; ++i) {
+ void* v = taosHashGet(pHashObj1, &i, sizeof(i));
}
- taosHashCleanup(pHashObj);
-#endif
+ taosHashCleanup(pHashObj1);
+ int64_t et2 = taosGetTimestampUs();
+ printf("linear hash time:%.2f ms, buildHash:%.2f ms, hash:%.2f\n", (et1-st)/1000.0, (et-st)/1000.0, (et2-et1)/1000.0);
}
\ No newline at end of file
diff --git a/source/libs/executor/test/sortTests.cpp b/source/libs/executor/test/sortTests.cpp
index 6e244152f20e0d4b914b21fcb871a5bbec871fce..4ac15670ac5dca547572df102f7267de08c0306d 100644
--- a/source/libs/executor/test/sortTests.cpp
+++ b/source/libs/executor/test/sortTests.cpp
@@ -27,7 +27,6 @@
#include "executorimpl.h"
#include "executor.h"
-#include "stub.h"
#include "taos.h"
#include "tdatablock.h"
#include "tdef.h"
@@ -196,7 +195,7 @@ int32_t docomp(const void* p1, const void* p2, void* param) {
}
} // namespace
-#if 1
+#if 0
TEST(testCase, inMem_sort_Test) {
SBlockOrderInfo oi = {0};
oi.order = TSDB_ORDER_ASC;
@@ -382,7 +381,7 @@ TEST(testCase, ordered_merge_sort_Test) {
}
void* v = tsortGetValue(pTupleHandle, 0);
- printf("%d: %d\n", row, *(int32_t*) v);
+// printf("%d: %d\n", row, *(int32_t*) v);
ASSERT_EQ(row++, *(int32_t*) v);
}
diff --git a/source/libs/executor/test/tSimpleHashTests.cpp b/source/libs/executor/test/tSimpleHashTests.cpp
index acb6d434b484057196067954df13eeb4bcd602b3..3bf339ef9040879c0978f9bedffb2b23bd8ec806 100644
--- a/source/libs/executor/test/tSimpleHashTests.cpp
+++ b/source/libs/executor/test/tSimpleHashTests.cpp
@@ -30,7 +30,7 @@
// return RUN_ALL_TESTS();
// }
-TEST(testCase, tSimpleHashTest) {
+TEST(testCase, tSimpleHashTest_intKey) {
SSHashObj *pHashObj =
tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
@@ -57,12 +57,14 @@ TEST(testCase, tSimpleHashTest) {
int32_t iter = 0;
int64_t keySum = 0;
int64_t dataSum = 0;
+ size_t kLen = 0;
while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
- void *key = tSimpleHashGetKey(data, NULL);
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
keySum += *(int64_t *)key;
dataSum += *(int64_t *)data;
}
-
+
ASSERT_EQ(keySum, dataSum);
ASSERT_EQ(keySum, originKeySum);
@@ -74,4 +76,69 @@ TEST(testCase, tSimpleHashTest) {
tSimpleHashCleanup(pHashObj);
}
+
+TEST(testCase, tSimpleHashTest_binaryKey) {
+ SSHashObj *pHashObj =
+ tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
+
+ assert(pHashObj != nullptr);
+
+ ASSERT_EQ(0, tSimpleHashGetSize(pHashObj));
+
+ typedef struct {
+ int64_t suid;
+ int64_t uid;
+ } SCombineKey;
+
+ size_t keyLen = sizeof(SCombineKey);
+ size_t dataLen = sizeof(int64_t);
+
+ int64_t originDataSum = 0;
+ SCombineKey combineKey = {0};
+ for (int64_t i = 1; i <= 100; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ tSimpleHashPut(pHashObj, (const void *)&combineKey, keyLen, (const void *)&i, dataLen);
+ originDataSum += i;
+ ASSERT_EQ(i, tSimpleHashGetSize(pHashObj));
+ }
+
+ for (int64_t i = 1; i <= 100; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ void *data = tSimpleHashGet(pHashObj, (const void *)&combineKey, keyLen);
+ ASSERT_EQ(i, *(int64_t *)data);
+ }
+
+ void *data = NULL;
+ int32_t iter = 0;
+ int64_t keySum = 0;
+ int64_t dataSum = 0;
+ size_t kLen = 0;
+ while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
+ dataSum += *(int64_t *)data;
+ }
+
+ ASSERT_EQ(originDataSum, dataSum);
+
+ tSimpleHashRemove(pHashObj, (const void *)&combineKey, keyLen);
+
+ while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
+ }
+
+ for (int64_t i = 1; i <= 99; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ tSimpleHashRemove(pHashObj, (const void *)&combineKey, keyLen);
+ ASSERT_EQ(99 - i, tSimpleHashGetSize(pHashObj));
+ }
+
+ tSimpleHashCleanup(pHashObj);
+}
+
+
#pragma GCC diagnostic pop
\ No newline at end of file
diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h
index dfb52f76946c502b38231130858b5694b7171f35..554f9e567f35cc0272a2a9755153de1b54d34392 100644
--- a/source/libs/function/inc/tpercentile.h
+++ b/source/libs/function/inc/tpercentile.h
@@ -51,20 +51,20 @@ struct tMemBucket;
typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *value);
typedef struct tMemBucket {
- int16_t numOfSlots;
- int16_t type;
- int16_t bytes;
- int32_t total;
- int32_t elemPerPage; // number of elements for each object
- int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
- int32_t bufPageSize; // disk page size
- MinMaxEntry range; // value range
- int32_t times; // count that has been checked for deciding the correct data value buckets.
- __compar_fn_t comparFn;
-
- tMemBucketSlot * pSlots;
- SDiskbasedBuf *pBuffer;
- __perc_hash_func_t hashFunc;
+ int16_t numOfSlots;
+ int16_t type;
+ int16_t bytes;
+ int32_t total;
+ int32_t elemPerPage; // number of elements for each object
+ int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
+ int32_t bufPageSize; // disk page size
+ MinMaxEntry range; // value range
+ int32_t times; // count that has been checked for deciding the correct data value buckets.
+ __compar_fn_t comparFn;
+ tMemBucketSlot* pSlots;
+ SDiskbasedBuf* pBuffer;
+ __perc_hash_func_t hashFunc;
+ SHashObj* groupPagesMap; // disk page map for different groups;
} tMemBucket;
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval);
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index ed82e4cb50cd2ce72ab3e9965b7ef1481fe2ccfa..5844784ea4b76845500aa728440c415baeed4cea 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -303,7 +303,7 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- if (!IS_VAR_DATA_TYPE(pPara1->resType.type)) {
+ if (!IS_STR_DATA_TYPE(pPara1->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -311,13 +311,29 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
return TSDB_CODE_SUCCESS;
}
+static int32_t translateMinMax(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
+ if (1 != LIST_LENGTH(pFunc->pParameterList)) {
+ return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
+ }
+
+ uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
+ if (!IS_TIMESTAMP_TYPE(paraType) && !IS_NUMERIC_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
+ return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
+ } else if (IS_NULL_TYPE(paraType)) {
+ paraType = TSDB_DATA_TYPE_BIGINT;
+ }
+
+ pFunc->node.resType = (SDataType){.bytes = tDataTypes[paraType].bytes, .type = paraType};
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len, bool isLtrim) {
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- if (!IS_VAR_DATA_TYPE(pPara1->resType.type)) {
+ if (!IS_STR_DATA_TYPE(pPara1->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -546,7 +562,7 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t
// param2
if (3 == numOfParams) {
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
- if (!IS_VAR_DATA_TYPE(para3Type)) {
+ if (!IS_STR_DATA_TYPE(para3Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -593,7 +609,7 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int
// param2
if (3 == numOfParams) {
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
- if (!IS_VAR_DATA_TYPE(para3Type)) {
+ if (!IS_STR_DATA_TYPE(para3Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -698,7 +714,7 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -713,7 +729,7 @@ static int32_t translateSpreadImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (isPartial) {
- if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = (SDataType){.bytes = getSpreadInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
@@ -788,7 +804,7 @@ static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1388,7 +1404,7 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
// set result type
- if (IS_VAR_DATA_TYPE(colType)) {
+ if (IS_STR_DATA_TYPE(colType)) {
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
} else {
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
@@ -1431,7 +1447,7 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
// set result type
- if (IS_VAR_DATA_TYPE(colType)) {
+ if (IS_STR_DATA_TYPE(colType)) {
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
} else {
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
@@ -1514,7 +1530,7 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
for (int32_t i = 1; i < 3; ++i) {
nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i));
paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) {
+ if (!IS_STR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1634,7 +1650,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType &&
- TSDB_DATA_TYPE_TIMESTAMP != colType) {
+ !IS_TIMESTAMP_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1660,7 +1676,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
uint8_t resType;
- if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType || TSDB_DATA_TYPE_TIMESTAMP == colType) {
+ if (IS_SIGNED_NUMERIC_TYPE(colType) || IS_TIMESTAMP_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
resType = TSDB_DATA_TYPE_BIGINT;
} else {
resType = TSDB_DATA_TYPE_DOUBLE;
@@ -1682,7 +1698,7 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- if (!IS_VAR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
+ if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1714,7 +1730,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
for (int32_t i = 0; i < numOfParams; ++i) {
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
+ if (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
if (TSDB_DATA_TYPE_NCHAR == paraType) {
@@ -1770,7 +1786,7 @@ static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
uint8_t para0Type = pPara0->resType.type;
uint8_t para1Type = pPara1->resType.type;
- if (!IS_VAR_DATA_TYPE(para0Type) || !IS_INTEGER_TYPE(para1Type)) {
+ if (!IS_STR_DATA_TYPE(para0Type) || !IS_INTEGER_TYPE(para1Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1802,7 +1818,7 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
uint8_t para2Type = pFunc->node.resType.type;
int32_t para2Bytes = pFunc->node.resType.bytes;
- if (IS_VAR_DATA_TYPE(para2Type)) {
+ if (IS_STR_DATA_TYPE(para2Type)) {
para2Bytes -= VARSTR_HEADER_SIZE;
}
if (para2Bytes <= 0 || para2Bytes > 4096) { // cast dst var type length limits to 4096 bytes
@@ -1825,7 +1841,7 @@ static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t l
// param0
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (!IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1859,7 +1875,7 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- if (!IS_VAR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
+ if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1878,7 +1894,7 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
- if ((!IS_VAR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && TSDB_DATA_TYPE_TIMESTAMP != para1Type) ||
+ if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && !IS_TIMESTAMP_TYPE(para1Type)) ||
!IS_INTEGER_TYPE(para2Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1911,7 +1927,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le
for (int32_t i = 0; i < 2; ++i) {
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
}
@@ -2060,7 +2076,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "min",
.type = FUNCTION_TYPE_MIN,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC,
- .translateFunc = translateInOutNum,
+ .translateFunc = translateMinMax,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minmaxFunctionSetup,
@@ -2075,7 +2091,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.name = "max",
.type = FUNCTION_TYPE_MAX,
.classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED | FUNC_MGT_SELECT_FUNC,
- .translateFunc = translateInOutNum,
+ .translateFunc = translateMinMax,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getMinmaxFuncEnv,
.initFunc = minmaxFunctionSetup,
@@ -2159,6 +2175,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.type = FUNCTION_TYPE_AVG_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateAvgPartial,
+ .dataRequiredFunc = statisDataRequired,
.getEnvFunc = getAvgFuncEnv,
.initFunc = avgFunctionSetup,
.processFunc = avgFunction,
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 013c58cc4501c091cc745330b584174064aff404..0d7fd1a6dad29301676b79d5c1be54fc66603229 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -76,11 +76,11 @@ typedef struct STopBotResItem {
} STopBotResItem;
typedef struct STopBotRes {
- int32_t maxSize;
- int16_t type;
+ int32_t maxSize;
+ int16_t type;
- STuplePos nullTuplePos;
- bool nullTupleSaved;
+ STuplePos nullTuplePos;
+ bool nullTupleSaved;
STopBotResItem* pItems;
} STopBotRes;
@@ -223,14 +223,14 @@ typedef struct SMavgInfo {
} SMavgInfo;
typedef struct SSampleInfo {
- int32_t samples;
- int32_t totalPoints;
- int32_t numSampled;
- uint8_t colType;
- int16_t colBytes;
+ int32_t samples;
+ int32_t totalPoints;
+ int32_t numSampled;
+ uint8_t colType;
+ int16_t colBytes;
- STuplePos nullTuplePos;
- bool nullTupleSaved;
+ STuplePos nullTuplePos;
+ bool nullTupleSaved;
char* data;
STuplePos* tuplePos;
@@ -1146,8 +1146,9 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
return true;
}
-static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
-static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock);
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos);
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
// the data is loaded, not only the block SMA value
@@ -1159,6 +1160,7 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c
}
ASSERT(0);
+ return 0;
}
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
@@ -1199,10 +1201,10 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
pBuf->v = *(int64_t*)tval;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
} else {
- if (IS_SIGNED_NUMERIC_TYPE(type)) {
+ if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type)) {
int64_t prev = 0;
GET_TYPED_DATA(prev, int64_t, type, &pBuf->v);
@@ -1211,10 +1213,9 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(int64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
-
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
uint64_t prev = 0;
GET_TYPED_DATA(prev, uint64_t, type, &pBuf->v);
@@ -1224,7 +1225,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(uint64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
@@ -1236,7 +1237,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(double*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
@@ -1250,7 +1251,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
}
}
@@ -1262,7 +1263,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
int32_t start = pInput->startRowIndex;
int32_t numOfRows = pInput->numOfRows;
- if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
+ if (IS_SIGNED_NUMERIC_TYPE(type) || IS_TIMESTAMP_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
if (type == TSDB_DATA_TYPE_TINYINT || type == TSDB_DATA_TYPE_BOOL) {
int8_t* pData = (int8_t*)pCol->pData;
int8_t* val = (int8_t*)&pBuf->v;
@@ -1275,7 +1276,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1287,7 +1288,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1306,7 +1307,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1318,7 +1319,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1337,7 +1338,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1349,14 +1350,14 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
numOfElems += 1;
}
- } else if (type == TSDB_DATA_TYPE_BIGINT) {
+ } else if (type == TSDB_DATA_TYPE_BIGINT || type == TSDB_DATA_TYPE_TIMESTAMP) {
int64_t* pData = (int64_t*)pCol->pData;
int64_t* val = (int64_t*)&pBuf->v;
@@ -1368,7 +1369,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1380,7 +1381,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1401,7 +1402,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1413,7 +1414,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1432,7 +1433,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1444,7 +1445,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1463,7 +1464,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1475,7 +1476,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1494,7 +1495,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1506,7 +1507,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1526,7 +1527,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1538,7 +1539,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1557,7 +1558,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock);
}
pBuf->assign = true;
} else {
@@ -1569,7 +1570,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1579,8 +1580,8 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
}
_min_max_over:
- if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
+ if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) {
+ pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pBuf->nullTupleSaved = true;
}
return numOfElems;
@@ -1599,8 +1600,8 @@ int32_t maxFunction(SqlFunctionCtx* pCtx) {
}
static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex);
-
-static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rIndex);
+static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos,
+ int32_t rowIndex);
int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
@@ -1648,34 +1649,29 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
return;
}
- int32_t pageId = pTuplePos->pageId;
- int32_t offset = pTuplePos->offset;
+ if (pCtx->saveHandle.pBuf != NULL) {
+ if (pTuplePos->pageId != -1) {
+ int32_t numOfCols = pCtx->subsidiaries.num;
+ const char* p = loadTupleData(pCtx, pTuplePos);
- if (pTuplePos->pageId != -1) {
- int32_t numOfCols = pCtx->subsidiaries.num;
- SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
+ bool* nullList = (bool*)p;
+ char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
- bool* nullList = (bool*)((char*)pPage + offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
+ // todo set the offset value to optimize the performance.
+ for (int32_t j = 0; j < numOfCols; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
- // todo set the offset value to optimize the performance.
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
-
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
-
- SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
- ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
- if (nullList[j]) {
- colDataAppendNULL(pDstCol, rowIndex);
- } else {
- colDataAppend(pDstCol, rowIndex, pStart, false);
+ SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
+ ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
+ if (nullList[j]) {
+ colDataAppendNULL(pDstCol, rowIndex);
+ } else {
+ colDataAppend(pDstCol, rowIndex, pStart, false);
+ }
+ pStart += pDstCol->info.bytes;
}
- pStart += pDstCol->info.bytes;
}
-
- releaseBufPage(pCtx->pBuf, pPage);
}
}
@@ -1705,7 +1701,7 @@ void appendSelectivityValue(SqlFunctionCtx* pCtx, int32_t rowIndex, int32_t pos)
char* pData = colDataGetData(pSrcCol, rowIndex);
// append to dest col
- int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
+ int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
SColumnInfoData* pDstCol = taosArrayGet(pCtx->pDstBlock->pDataBlock, dstSlotId);
ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
@@ -1716,7 +1712,6 @@ void appendSelectivityValue(SqlFunctionCtx* pCtx, int32_t rowIndex, int32_t pos)
colDataAppend(pDstCol, pos, pData, false);
}
}
-
}
void replaceTupleData(STuplePos* pDestPos, STuplePos* pSourcePos) {
@@ -2594,8 +2589,8 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
- qDebug("%s merge histo, total:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems,
- pHisto->numOfEntries, pHisto);
+ qDebug("%s merge histo, total:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems, pHisto->numOfEntries,
+ pHisto);
} else {
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
qDebug("%s input histogram, elem:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems,
@@ -2605,8 +2600,8 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
- qDebug("%s merge histo, total:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems,
- pHisto->numOfEntries, pHisto);
+ qDebug("%s merge histo, total:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems, pHisto->numOfEntries,
+ pHisto);
tHistogramDestroy(&pRes);
}
}
@@ -2633,8 +2628,8 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
}
if (pInfo->algo != APERCT_ALGO_TDIGEST) {
- qDebug("%s after merge, total:%d, numOfEntry:%d, %p", __FUNCTION__, pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries,
- pInfo->pHisto);
+ qDebug("%s after merge, total:%d, numOfEntry:%d, %p", __FUNCTION__, pInfo->pHisto->numOfElems,
+ pInfo->pHisto->numOfEntries, pInfo->pHisto);
}
SET_VAL(pResInfo, 1, 1);
@@ -2713,7 +2708,7 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
}
EFuncDataRequired lastDynDataReq(void* pRes, STimeWindow* pTimeWindow) {
- SResultRowEntryInfo* pEntry = (SResultRowEntryInfo*) pRes;
+ SResultRowEntryInfo* pEntry = (SResultRowEntryInfo*)pRes;
// not initialized yet, data is required
if (pEntry == NULL) {
@@ -2756,15 +2751,16 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde
return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex);
}
-static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
+static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx,
+ SFirstLastRes* pInfo) {
if (pCtx->subsidiaries.num <= 0) {
return;
}
if (!pInfo->hasResult) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock);
} else {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
}
}
@@ -2778,7 +2774,7 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur
memcpy(pInfo->buf, pData, pInfo->bytes);
pInfo->ts = currentTs;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -2799,6 +2795,8 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
// All null data column, return directly.
if (pInput->colDataAggIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows)) {
ASSERT(pInputCol->hasNull == true);
+ // save selectivity value for column consisted of all null values
+ firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo);
return 0;
}
@@ -2875,7 +2873,10 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
}
}
#endif
-
+ if (numOfElems == 0) {
+ // save selectivity value for column consisted of all null values
+ firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo);
+ }
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
}
@@ -2896,6 +2897,8 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
// All null data column, return directly.
if (pInput->colDataAggIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows)) {
ASSERT(pInputCol->hasNull == true);
+ // save selectivity value for column consisted of all null values
+ firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo);
return 0;
}
@@ -2956,7 +2959,10 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
}
}
#endif
-
+ if (numOfElems == 0) {
+ // save selectivity value for column consisted of all null values
+ firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo);
+ }
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
}
@@ -2982,7 +2988,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
pOutput->bytes = pInput->bytes;
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
- saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
+ firstlastSaveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
pOutput->hasResult = true;
}
@@ -3087,7 +3093,7 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i
}
pInfo->ts = cts;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -3180,7 +3186,7 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
- pDiffInfo->prev.i64 = *(bool*)pv? 1:0;
+ pDiffInfo->prev.i64 = *(bool*)pv ? 1 : 0;
break;
case TSDB_DATA_TYPE_TINYINT:
pDiffInfo->prev.i64 = *(int8_t*)pv;
@@ -3420,7 +3426,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
return TSDB_CODE_SUCCESS;
@@ -3448,7 +3454,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pRes->nullTupleSaved = true;
}
@@ -3500,7 +3506,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
@@ -3524,7 +3530,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple by over writing the old data
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
@@ -3541,38 +3547,14 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
* |(n columns, one bit for each column)| src column #1| src column #2|
* +------------------------------------+--------------+--------------+
*/
-void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = NULL;
-
- // todo refactor: move away
- int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool);
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
- completeRowSize += pc->pExpr->base.resSchema.bytes;
- }
-
- if (pCtx->curBufPage == -1) {
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- } else {
- pPage = getBufPage(pCtx->pBuf, pCtx->curBufPage);
- if (pPage->num + completeRowSize > getBufPageSize(pCtx->pBuf)) {
- // current page is all used, let's prepare a new buffer page
- releaseBufPage(pCtx->pBuf, pPage);
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- }
- }
+void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsidiaryResInfo* pSubsidiaryies,
+ char* buf) {
+ char* nullList = buf;
+ char* pStart = (char*)(nullList + sizeof(bool) * pSubsidiaryies->num);
- pPos->pageId = pCtx->curBufPage;
- pPos->offset = pPage->num;
-
- // keep the current row data, extract method
int32_t offset = 0;
- bool* nullList = (bool*)((char*)pPage + pPage->num);
- char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num);
- for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
+ for (int32_t i = 0; i < pSubsidiaryies->num; ++i) {
+ SqlFunctionCtx* pc = pSubsidiaryies->pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
@@ -3593,57 +3575,94 @@ void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock*
offset += pCol->info.bytes;
}
- pPage->num += completeRowSize;
-
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset);
-#endif
+ return buf;
}
-void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
+static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length) {
+ STuplePos p = {0};
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = NULL;
- int32_t numOfCols = pCtx->subsidiaries.num;
+ if (pHandle->currentPage == -1) {
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
+ } else {
+ pPage = getBufPage(pHandle->pBuf, pHandle->currentPage);
+ if (pPage->num + length > getBufPageSize(pHandle->pBuf)) {
+ // current page is all used, let's prepare a new buffer page
+ releaseBufPage(pHandle->pBuf, pPage);
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
+ }
+ }
- bool* nullList = (bool*)((char*)pPage + pPos->offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
+ p = (STuplePos){.pageId = pHandle->currentPage, .offset = pPage->num};
+ memcpy(pPage->data + pPage->num, pBuf, length);
- int32_t offset = 0;
- for (int32_t i = 0; i < numOfCols; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t srcSlotId = pFuncParam->pCol->slotId;
+ pPage->num += length;
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+ // other tuple save policy
+ }
- SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
- if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
- offset += pCol->info.bytes;
- continue;
- }
+ return p;
+}
- char* p = colDataGetData(pCol, rowIndex);
- if (IS_VAR_DATA_TYPE(pCol->info.type)) {
- memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
- } else {
- memcpy(pStart + offset, p, pCol->info.bytes);
+STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock) {
+ if (pCtx->subsidiaries.rowLen == 0) {
+ int32_t rowLen = 0;
+ for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ rowLen += pc->pExpr->base.resSchema.bytes;
}
- offset += pCol->info.bytes;
+ pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool);
+ pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen);
}
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_copyTuple pos:%p, pageId:%d, offset:%d", pPos, pPos->pageId, pPos->offset);
-#endif
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen);
+}
+
+static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ memcpy(pPage->data + pPos->offset, pBuf, length);
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos);
+ return TSDB_CODE_SUCCESS;
+}
+
+static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ char* p = pPage->data + pPos->offset;
+ releaseBufPage(pHandle->pBuf, pPage);
+ return p;
+ } else {
+ return NULL;
+ }
+}
+
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) {
+ return doLoadTupleData(&pCtx->saveHandle, pPos);
}
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
STopBotRes* pRes = getTopBotOutputInfo(pCtx);
- int16_t type = pCtx->input.pData[0]->info.type;
+ int16_t type = pCtx->pExpr->base.resSchema.type;
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
@@ -3788,8 +3807,6 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) {
SColumnInfoData* pCol = pInput->pData[0];
int32_t start = pInput->startRowIndex;
- int32_t numOfRows = pInput->numOfRows;
-
// check the valid data one by one
for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
if (colDataIsNull_f(pCol->nullbitmap, i)) {
@@ -3973,8 +3990,8 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
}
if (pCtx->end.key == INT64_MIN) {
- pInfo->min = (pInfo->min > ptsList[start + pInput->numOfRows - 1]) ?
- ptsList[start + pInput->numOfRows - 1] : pInfo->min;
+ pInfo->min =
+ (pInfo->min > ptsList[start + pInput->numOfRows - 1]) ? ptsList[start + pInput->numOfRows - 1] : pInfo->min;
} else {
pInfo->min = pCtx->end.key;
}
@@ -3986,8 +4003,8 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
}
if (pCtx->end.key == INT64_MIN) {
- pInfo->max = (pInfo->max < ptsList[start + pInput->numOfRows - 1]) ?
- ptsList[start + pInput->numOfRows - 1] : pInfo->max;
+ pInfo->max =
+ (pInfo->max < ptsList[start + pInput->numOfRows - 1]) ? ptsList[start + pInput->numOfRows - 1] : pInfo->max;
} else {
pInfo->max = pCtx->end.key + 1;
}
@@ -4694,7 +4711,7 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) {
colDataAppendNULL(pOutput, i);
// handle selectivity
if (pCtx->subsidiaries.num > 0) {
- appendSelectivityValue(pCtx, i, i);
+ appendSelectivityValue(pCtx, i, pCtx->offset + numOfElems - 1);
}
continue;
}
@@ -4707,11 +4724,11 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) {
} else {
pInfo->count = 0;
}
- colDataAppend(pOutput, i, (char*)&output, false);
+ colDataAppend(pOutput, pCtx->offset + numOfElems - 1, (char*)&output, false);
// handle selectivity
if (pCtx->subsidiaries.num > 0) {
- appendSelectivityValue(pCtx, i, i);
+ appendSelectivityValue(pCtx, i, pCtx->offset + numOfElems - 1);
}
}
@@ -4964,7 +4981,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (pInfo->numSampled < pInfo->samples) {
sampleAssignResult(pInfo, data, pInfo->numSampled);
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]);
+ pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock);
}
pInfo->numSampled++;
} else {
@@ -4972,7 +4989,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (j < pInfo->samples) {
sampleAssignResult(pInfo, data, j);
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
+ updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
}
}
}
@@ -4995,7 +5012,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
}
if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos);
+ pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock);
pInfo->nullTupleSaved = true;
}
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 152a970c48eb5fb374f8806062d264e53b88f664..26735fa263cfed15ead940493b3c1eadf0e29c70 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -101,6 +101,14 @@ bool fmIsBuiltinFunc(const char* pFunc) {
return NULL != taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
}
+EFunctionType fmGetFuncType(const char* pFunc) {
+ void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
+ if (NULL != pVal) {
+ return funcMgtBuiltins[*(int32_t*)pVal].type;
+ }
+ return FUNCTION_TYPE_UDF;
+}
+
EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) {
if (fmIsUserDefinedFunc(pFunc->funcId) || pFunc->funcId < 0 || pFunc->funcId >= funcMgtBuiltinsNum) {
return FUNC_DATA_REQUIRED_DATA_LOAD;
diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c
index 517253dc01691754425bd93c40bfef2a2750eed5..4c58c0abe50e5784314445934618265231d4805a 100644
--- a/source/libs/function/src/tpercentile.c
+++ b/source/libs/function/src/tpercentile.c
@@ -33,13 +33,13 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx)
SFilePage *buffer = (SFilePage *)taosMemoryCalloc(1, pMemBucket->bytes * pMemBucket->pSlots[slotIdx].info.size + sizeof(SFilePage));
int32_t groupId = getGroupId(pMemBucket->numOfSlots, slotIdx, pMemBucket->times);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SArray* pIdList = *(SArray**)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
int32_t offset = 0;
- for(int32_t i = 0; i < list->size; ++i) {
- struct SPageInfo* pgInfo = *(struct SPageInfo**) taosArrayGet(list, i);
+ for(int32_t i = 0; i < taosArrayGetSize(pIdList); ++i) {
+ int32_t* pageId = taosArrayGet(pIdList, i);
- SFilePage* pg = getBufPage(pMemBucket->pBuffer, getPageId(pgInfo));
+ SFilePage* pg = getBufPage(pMemBucket->pBuffer, *pageId);
memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes));
offset += (int32_t)(pg->num * pMemBucket->bytes);
@@ -97,11 +97,11 @@ double findOnlyResult(tMemBucket *pMemBucket) {
}
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SArray* list = *(SArray**)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
assert(list->size == 1);
- struct SPageInfo* pgInfo = (struct SPageInfo*) taosArrayGetP(list, 0);
- SFilePage* pPage = getBufPage(pMemBucket->pBuffer, getPageId(pgInfo));
+ int32_t* pageId = taosArrayGet(list, 0);
+ SFilePage* pPage = getBufPage(pMemBucket->pBuffer, *pageId);
assert(pPage->num == 1);
double v = 0;
@@ -233,7 +233,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
pBucket->times = 1;
pBucket->maxCapacity = 200000;
-
+ pBucket->groupPagesMap = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (setBoundingBox(&pBucket->range, pBucket->type, minval, maxval) != 0) {
// qError("MemBucket:%p, invalid value range: %f-%f", pBucket, minval, maxval);
taosMemoryFree(pBucket);
@@ -280,8 +280,16 @@ void tMemBucketDestroy(tMemBucket *pBucket) {
return;
}
+ void* p = taosHashIterate(pBucket->groupPagesMap, NULL);
+ while(p) {
+ SArray** p1 = p;
+ p = taosHashIterate(pBucket->groupPagesMap, p);
+ taosArrayDestroy(*p1);
+ }
+
destroyDiskbasedBuf(pBucket->pBuffer);
taosMemoryFreeClear(pBucket->pSlots);
+ taosHashCleanup(pBucket->groupPagesMap);
taosMemoryFreeClear(pBucket);
}
@@ -357,8 +365,16 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
pSlot->info.data = NULL;
}
- pSlot->info.data = getNewBufPage(pBucket->pBuffer, groupId, &pageId);
+ SArray* pPageIdList = (SArray*)taosHashGet(pBucket->groupPagesMap, &groupId, sizeof(groupId));
+ if (pPageIdList == NULL) {
+ SArray* pList = taosArrayInit(4, sizeof(int32_t));
+ taosHashPut(pBucket->groupPagesMap, &groupId, sizeof(groupId), &pList, POINTER_BYTES);
+ pPageIdList = pList;
+ }
+
+ pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId);
pSlot->info.pageId = pageId;
+ taosArrayPush(pPageIdList, &pageId);
}
memcpy(pSlot->info.data->data + pSlot->info.data->num * pBucket->bytes, d, pBucket->bytes);
@@ -476,7 +492,7 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction)
resetSlotInfo(pMemBucket);
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SIDList list = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
assert(list->size > 0);
for (int32_t f = 0; f < list->size; ++f) {
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index d5a3e91eeaa63723029617dfc7be1f72679508bd..3f472b53a026de186bbab2e70d37997b2b2fda44 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -81,7 +81,7 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) {
taosDirName(path);
#endif
} else {
- strncpy(path, tsProcPath, strlen(tsProcPath));
+ strncpy(path, tsProcPath, PATH_MAX);
taosDirName(path);
}
#ifdef WINDOWS
@@ -1183,7 +1183,9 @@ void onUdfcPipeClose(uv_handle_t *handle) {
QUEUE_REMOVE(&task->procTaskQueue);
uv_sem_post(&task->taskSem);
}
- conn->session->udfUvPipe = NULL;
+ if (conn->session != NULL) {
+ conn->session->udfUvPipe = NULL;
+ }
taosMemoryFree(conn->readBuf.buf);
taosMemoryFree(conn);
taosMemoryFree((uv_pipe_t *) handle);
@@ -1803,6 +1805,7 @@ int32_t doTeardownUdf(UdfcFuncHandle handle) {
if (session->udfUvPipe == NULL) {
fnError("tear down udf. pipe to udfd does not exist. udf name: %s", session->udfName);
+ taosMemoryFree(session);
return TSDB_CODE_UDF_PIPE_NO_PIPE;
}
@@ -1821,7 +1824,11 @@ int32_t doTeardownUdf(UdfcFuncHandle handle) {
udfcRunUdfUvTask(task, UV_TASK_DISCONNECT);
fnInfo("tear down udf. udf name: %s, udf func handle: %p", session->udfName, handle);
-
+ //TODO: synchronization refactor between libuv event loop and request thread
+ if (session->udfUvPipe != NULL && session->udfUvPipe->data != NULL) {
+ SClientUvConn *conn = session->udfUvPipe->data;
+ conn->session = NULL;
+ }
taosMemoryFree(session);
taosMemoryFree(task);
diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c
index 1cbc78df48b1cbeb5d1645dcd945168f21d25ba6..a45e4585e8655d318a440e3357d50df2d2513e2c 100644
--- a/source/libs/function/src/udfd.c
+++ b/source/libs/function/src/udfd.c
@@ -84,6 +84,7 @@ typedef struct SUdf {
TUdfAggStartFunc aggStartFunc;
TUdfAggProcessFunc aggProcFunc;
TUdfAggFinishFunc aggFinishFunc;
+ TUdfAggMergeFunc aggMergeFunc;
TUdfInitFunc initFunc;
TUdfDestroyFunc destroyFunc;
@@ -271,6 +272,15 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
break;
}
+ case TSDB_UDF_CALL_AGG_MERGE: {
+ SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
+ code = udf->aggMergeFunc(&call->interBuf, &call->interBuf2, &outBuf);
+ freeUdfInterBuf(&call->interBuf);
+ freeUdfInterBuf(&call->interBuf2);
+ subRsp->resultBuf = outBuf;
+
+ break;
+ }
case TSDB_UDF_CALL_AGG_FIN: {
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
code = udf->aggFinishFunc(&call->interBuf, &outBuf);
@@ -309,6 +319,10 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
freeUdfInterBuf(&subRsp->resultBuf);
break;
}
+ case TSDB_UDF_CALL_AGG_MERGE: {
+ freeUdfInterBuf(&subRsp->resultBuf);
+ break;
+ }
case TSDB_UDF_CALL_AGG_FIN: {
freeUdfInterBuf(&subRsp->resultBuf);
break;
@@ -439,7 +453,7 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
goto _return;
}
taosCloseFile(&file);
- strncpy(udf->path, path, strlen(path));
+ strncpy(udf->path, path, PATH_MAX);
tFreeSFuncInfo(pFuncInfo);
taosArrayDestroy(retrieveRsp.pFuncInfos);
msgInfo->code = 0;
@@ -552,15 +566,19 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->aggProcFunc));
char startFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *startSuffix = "_start";
- strncpy(startFuncName, processFuncName, strlen(processFuncName));
+ strncpy(startFuncName, processFuncName, sizeof(startFuncName));
strncat(startFuncName, startSuffix, strlen(startSuffix));
uv_dlsym(&udf->lib, startFuncName, (void **)(&udf->aggStartFunc));
char finishFuncName[TSDB_FUNC_NAME_LEN + 7] = {0};
char *finishSuffix = "_finish";
- strncpy(finishFuncName, processFuncName, strlen(processFuncName));
+ strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
strncat(finishFuncName, finishSuffix, strlen(finishSuffix));
uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc));
- // TODO: merge
+ char mergeFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
+ char *mergeSuffix = "_merge";
+ strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
+ strncat(finishFuncName, mergeSuffix, strlen(mergeSuffix));
+ uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggMergeFunc));
}
return 0;
}
diff --git a/source/libs/index/inc/indexInt.h b/source/libs/index/inc/indexInt.h
index 065f4acb576263d1f7d5cbe8238273dc325ccb09..9605528ad6ae150fd88f512cdf5344b81d486a99 100644
--- a/source/libs/index/inc/indexInt.h
+++ b/source/libs/index/inc/indexInt.h
@@ -40,26 +40,31 @@ extern "C" {
#define indexTrace(...) do { if (idxDebugFlag & DEBUG_TRACE) { taosPrintLog("IDX", DEBUG_TRACE, idxDebugFlag, __VA_ARGS__);} } while (0)
// clang-format on
+extern void* indexQhandle;
+
typedef enum { LT, LE, GT, GE, CONTAINS, EQ } RangeType;
typedef enum { kTypeValue, kTypeDeletion } STermValueType;
typedef enum { kRebuild, kFinished } SIdxStatus;
typedef struct SIndexStat {
- int32_t totalAdded; //
- int32_t totalDeled; //
- int32_t totalUpdated; //
- int32_t totalTerms; //
- int32_t distinctCol; // distinct column
+ int32_t total;
+ int32_t add; //
+ int32_t del; //
+ int32_t update; //
+ int32_t terms; //
+ int32_t distCol; // distinct column
} SIndexStat;
struct SIndex {
+ SIndexOpts opts;
+
int64_t refId;
void* cache;
void* tindex;
SHashObj* colObj; // < field name, field id>
- int64_t suid; // current super table id, -1 is normal table
- int32_t cVersion; // current version allocated to cache
+ int64_t suid; // current super table id, -1 is normal table
+ int32_t version; // current version allocated to cache
SLRUCache* lru;
char* path;
@@ -68,7 +73,6 @@ struct SIndex {
TdThreadMutex mtx;
tsem_t sem;
bool quit;
- SIndexOpts opts;
};
struct SIndexMultiTermQuery {
@@ -111,14 +115,15 @@ typedef struct Iterate {
void iterateValueDestroy(IterateValue* iv, bool destroy);
-extern void* indexQhandle;
-
typedef struct TFileCacheKey {
uint64_t suid;
uint8_t colType;
char* colName;
int32_t nColName;
} ICacheKey;
+
+int32_t idxSerialCacheKey(ICacheKey* key, char* buf);
+
int idxFlushCacheToTFile(SIndex* sIdx, void*, bool quit);
int64_t idxAddRef(void* p);
@@ -126,10 +131,6 @@ int32_t idxRemoveRef(int64_t ref);
void idxAcquireRef(int64_t ref);
void idxReleaseRef(int64_t ref);
-int32_t idxSerialCacheKey(ICacheKey* key, char* buf);
-// int32_t indexSerialKey(ICacheKey* key, char* buf);
-// int32_t indexSerialTermKey(SIndexTerm* itm, char* buf);
-
#define IDX_TYPE_CONTAIN_EXTERN_TYPE(ty, exTy) (((ty >> 4) & (exTy)) != 0)
#define IDX_TYPE_GET_TYPE(ty) (ty & 0x0F)
diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c
index be64a8b44d28a76a0b04a78b3940bcb0c86101da..f507e1b3bed918419ca292b6d88ea85311122222 100644
--- a/source/libs/index/src/index.c
+++ b/source/libs/index/src/index.c
@@ -25,10 +25,6 @@
#include "tref.h"
#include "tsched.h"
-#ifdef USE_LUCENE
-#include "lucene++/Lucene_c.h"
-#endif
-
#define INDEX_NUM_OF_THREADS 5
#define INDEX_QUEUE_SIZE 200
@@ -74,7 +70,7 @@ void indexCleanup() {
typedef struct SIdxColInfo {
int colId; // generated by index internal
- int cVersion;
+ int version;
} SIdxColInfo;
static TdThreadOnce isInit = PTHREAD_ONCE_INIT;
@@ -123,7 +119,7 @@ int indexOpen(SIndexOpts* opts, const char* path, SIndex** index) {
}
idx->colObj = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
- idx->cVersion = 1;
+ idx->version = 1;
idx->path = tstrdup(path);
taosThreadMutexInit(&idx->mtx, NULL);
tsem_init(&idx->sem, 0, 0);
@@ -307,7 +303,7 @@ SIndexTerm* indexTermCreate(int64_t suid, SIndexOperOnColumn oper, uint8_t colTy
buf = strndup(INDEX_DATA_NULL_STR, (int32_t)strlen(INDEX_DATA_NULL_STR));
len = (int32_t)strlen(INDEX_DATA_NULL_STR);
} else {
- const char* emptyStr = " ";
+ static const char* emptyStr = " ";
buf = strndup(emptyStr, (int32_t)strlen(emptyStr));
len = (int32_t)strlen(emptyStr);
}
@@ -589,6 +585,12 @@ int idxFlushCacheToTFile(SIndex* sIdx, void* cache, bool quit) {
idxTRsltDestroy(tr);
int ret = idxGenTFile(sIdx, pCache, result);
+ if (ret != 0) {
+ indexError("failed to merge");
+ } else {
+ int64_t cost = taosGetTimestampUs() - st;
+ indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000);
+ }
idxDestroyFinalRslt(result);
idxCacheDestroyImm(pCache);
@@ -599,12 +601,6 @@ int idxFlushCacheToTFile(SIndex* sIdx, void* cache, bool quit) {
tfileReaderUnRef(pReader);
idxCacheUnRef(pCache);
- int64_t cost = taosGetTimestampUs() - st;
- if (ret != 0) {
- indexError("failed to merge, time cost: %" PRId64 "ms", cost / 1000);
- } else {
- indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000);
- }
atomic_store_32(&pCache->merging, 0);
if (quit) {
idxPost(sIdx);
diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c
index 794b85d244f875b6874855ac6a36a7500114afd5..7e867db755c658abad341e8fa59cd6687ef9b959 100644
--- a/source/libs/index/src/indexCache.c
+++ b/source/libs/index/src/indexCache.c
@@ -566,7 +566,6 @@ int idxCachePut(void* cache, SIndexTerm* term, uint64_t uid) {
taosThreadMutexUnlock(&pCache->mtx);
idxCacheUnRef(pCache);
return 0;
- // encode end
}
void idxCacheForceToMerge(void* cache) {
IndexCache* pCache = cache;
@@ -602,10 +601,10 @@ static int32_t idxQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTRslt* tr,
}
}
int idxCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STermValueType* s) {
- int64_t st = taosGetTimestampUs();
if (cache == NULL) {
return 0;
}
+
IndexCache* pCache = cache;
MemTable *mem = NULL, *imm = NULL;
@@ -616,6 +615,8 @@ int idxCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STerm
idxMemRef(imm);
taosThreadMutexUnlock(&pCache->mtx);
+ int64_t st = taosGetTimestampUs();
+
int ret = (mem && mem->mem) ? idxQueryMem(mem, query, result, s) : 0;
if (ret == 0 && *s != kTypeDeletion) {
// continue search in imm
diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c
index 4f33d98f9e4f7e5b210922b0dd6da0b5448d4472..ecf91360734e37f0060aeb7758e5c4c5d57d4972 100644
--- a/source/libs/index/src/indexComm.c
+++ b/source/libs/index/src/indexComm.c
@@ -81,28 +81,28 @@ __compar_fn_t idxGetCompar(int8_t type) {
}
return getComparFunc(type, 0);
}
-static TExeCond tCompareLessThan(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareLessThan(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_LESS_THAN, a, b, type);
}
-static TExeCond tCompareLessEqual(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareLessEqual(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_LESS_EQUAL, a, b, type);
}
-static TExeCond tCompareGreaterThan(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareGreaterThan(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_GREATER_THAN, a, b, type);
}
-static TExeCond tCompareGreaterEqual(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareGreaterEqual(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_GREATER_EQUAL, a, b, type);
}
-static TExeCond tCompareContains(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareContains(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_TERM, a, b, type);
}
-static TExeCond tCompareEqual(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareEqual(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_TERM, a, b, type);
}
@@ -171,15 +171,16 @@ TExeCond tCompare(__compar_fn_t func, int8_t cmptype, void* a, void* b, int8_t d
return tDoCompare(func, cmptype, &va, &vb);
}
assert(0);
+ return BREAK;
#endif
}
TExeCond tDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) {
// optime later
int32_t ret = func(a, b);
switch (comparType) {
- case QUERY_LESS_THAN: {
+ case QUERY_LESS_THAN:
if (ret < 0) return MATCH;
- } break;
+ break;
case QUERY_LESS_EQUAL: {
if (ret <= 0) return MATCH;
break;
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index 21aeaba70bb02f6f44c2fc6d40d07515201ee25a..b65acc467215da77019235e5ec44a335b363e344 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -27,6 +27,44 @@
#define SIF_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
#define SIF_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
// clang-format on
+
+typedef union {
+ uint8_t u8;
+ uint16_t u16;
+ uint32_t u32;
+ uint64_t u64;
+
+ int8_t i8;
+ int16_t i16;
+ int32_t i32;
+ int64_t i64;
+
+ double d;
+ float f;
+} SDataTypeBuf;
+
+#define SIF_DATA_CONVERT(type, val, dst) \
+ do { \
+ if (type == TSDB_DATA_TYPE_DOUBLE) \
+ dst = GET_DOUBLE_VAL(val); \
+ else if (type == TSDB_DATA_TYPE_BIGINT) \
+ dst = *(int64_t *)val; \
+ else if (type == TSDB_DATA_TYPE_INT) \
+ dst = *(int32_t *)val; \
+ else if (type == TSDB_DATA_TYPE_SMALLINT) \
+ dst = *(int16_t *)val; \
+ else if (type == TSDB_DATA_TYPE_TINYINT) \
+ dst = *(int8_t *)val; \
+ else if (type == TSDB_DATA_TYPE_UTINYINT) \
+ dst = *(uint8_t *)val; \
+ else if (type == TSDB_DATA_TYPE_USMALLINT) \
+ dst = *(uint16_t *)val; \
+ else if (type == TSDB_DATA_TYPE_UINT) \
+ dst = *(uint32_t *)val; \
+ else if (type == TSDB_DATA_TYPE_UBIGINT) \
+ dst = *(uint64_t *)val; \
+ } while (0);
+
typedef struct SIFParam {
SHashObj *pFilter;
@@ -48,10 +86,9 @@ typedef struct SIFCtx {
SHashObj *pRes; /* element is SIFParam */
bool noExec; // true: just iterate condition tree, and add hint to executor plan
SIndexMetaArg arg;
- // SIdxFltStatus st;
} SIFCtx;
-static int32_t sifGetFuncFromSql(EOperatorType src, EIndexQueryType *dst) {
+static FORCE_INLINE int32_t sifGetFuncFromSql(EOperatorType src, EIndexQueryType *dst) {
if (src == OP_TYPE_GREATER_THAN) {
*dst = QUERY_GREATER_THAN;
} else if (src == OP_TYPE_GREATER_EQUAL) {
@@ -73,15 +110,9 @@ static int32_t sifGetFuncFromSql(EOperatorType src, EIndexQueryType *dst) {
}
typedef int32_t (*sif_func_t)(SIFParam *left, SIFParam *rigth, SIFParam *output);
-
static sif_func_t sifNullFunc = NULL;
-// typedef struct SIFWalkParm
-// construct tag filter operator later
-// static void destroyTagFilterOperatorInfo(void *param) {
-// STagFilterOperatorInfo *pInfo = (STagFilterOperatorInfo *)param;
-//}
-static void sifFreeParam(SIFParam *param) {
+static FORCE_INLINE void sifFreeParam(SIFParam *param) {
if (param == NULL) return;
taosArrayDestroy(param->result);
@@ -91,7 +122,7 @@ static void sifFreeParam(SIFParam *param) {
param->pFilter = NULL;
}
-static int32_t sifGetOperParamNum(EOperatorType ty) {
+static FORCE_INLINE int32_t sifGetOperParamNum(EOperatorType ty) {
if (OP_TYPE_IS_NULL == ty || OP_TYPE_IS_NOT_NULL == ty || OP_TYPE_IS_TRUE == ty || OP_TYPE_IS_NOT_TRUE == ty ||
OP_TYPE_IS_FALSE == ty || OP_TYPE_IS_NOT_FALSE == ty || OP_TYPE_IS_UNKNOWN == ty ||
OP_TYPE_IS_NOT_UNKNOWN == ty || OP_TYPE_MINUS == ty) {
@@ -99,14 +130,14 @@ static int32_t sifGetOperParamNum(EOperatorType ty) {
}
return 2;
}
-static int32_t sifValidOp(EOperatorType ty) {
+static FORCE_INLINE int32_t sifValidOp(EOperatorType ty) {
if ((ty >= OP_TYPE_ADD && ty <= OP_TYPE_BIT_OR) || (ty == OP_TYPE_IN || ty == OP_TYPE_NOT_IN) ||
(ty == OP_TYPE_LIKE || ty == OP_TYPE_NOT_LIKE || ty == OP_TYPE_MATCH || ty == OP_TYPE_NMATCH)) {
return -1;
}
return 0;
}
-static int32_t sifValidColumn(SColumnNode *cn) {
+static FORCE_INLINE int32_t sifValidColumn(SColumnNode *cn) {
// add more check
if (cn == NULL) {
return TSDB_CODE_QRY_INVALID_INPUT;
@@ -117,7 +148,7 @@ static int32_t sifValidColumn(SColumnNode *cn) {
return TSDB_CODE_SUCCESS;
}
-static SIdxFltStatus sifMergeCond(ELogicConditionType type, SIdxFltStatus ls, SIdxFltStatus rs) {
+static FORCE_INLINE SIdxFltStatus sifMergeCond(ELogicConditionType type, SIdxFltStatus ls, SIdxFltStatus rs) {
// enh rule later
if (type == LOGIC_COND_TYPE_AND) {
if (ls == SFLT_NOT_INDEX || rs == SFLT_NOT_INDEX) {
@@ -135,7 +166,7 @@ static SIdxFltStatus sifMergeCond(ELogicConditionType type, SIdxFltStatus ls, SI
return SFLT_NOT_INDEX;
}
-static int32_t sifGetValueFromNode(SNode *node, char **value) {
+static FORCE_INLINE int32_t sifGetValueFromNode(SNode *node, char **value) {
// covert data From snode;
SValueNode *vn = (SValueNode *)node;
@@ -173,7 +204,7 @@ static int32_t sifGetValueFromNode(SNode *node, char **value) {
return TSDB_CODE_SUCCESS;
}
-static int32_t sifInitJsonParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
+static FORCE_INLINE int32_t sifInitJsonParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
SOperatorNode *nd = (SOperatorNode *)node;
assert(nodeType(node) == QUERY_NODE_OPERATOR);
SColumnNode *l = (SColumnNode *)nd->pLeft;
@@ -255,6 +286,13 @@ static int32_t sifInitOperParams(SIFParam **params, SOperatorNode *node, SIFCtx
if (node->opType == OP_TYPE_JSON_GET_VALUE) {
return code;
}
+ if ((node->pLeft != NULL && nodeType(node->pLeft) == QUERY_NODE_COLUMN) &&
+ (node->pRight != NULL && nodeType(node->pRight) == QUERY_NODE_VALUE)) {
+ SColumnNode *cn = (SColumnNode *)(node->pLeft);
+ if (cn->node.resType.type == TSDB_DATA_TYPE_JSON) {
+ SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
+ }
+ }
SIFParam *paramList = taosMemoryCalloc(nParam, sizeof(SIFParam));
if (NULL == paramList) {
@@ -316,30 +354,30 @@ static int32_t sifExecFunction(SFunctionNode *node, SIFCtx *ctx, SIFParam *outpu
return TSDB_CODE_QRY_INVALID_INPUT;
}
-typedef int (*Filter)(void *a, void *b, int16_t dtype);
+typedef int (*FilterFunc)(void *a, void *b, int16_t dtype);
-int sifGreaterThan(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifGreaterThan(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
return tDoCompare(func, QUERY_GREATER_THAN, a, b);
}
-int sifGreaterEqual(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifGreaterEqual(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
return tDoCompare(func, QUERY_GREATER_EQUAL, a, b);
}
-int sifLessEqual(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifLessEqual(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
return tDoCompare(func, QUERY_LESS_EQUAL, a, b);
}
-int sifLessThan(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifLessThan(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
return (int)tDoCompare(func, QUERY_LESS_THAN, a, b);
}
-int sifEqual(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifEqual(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
//__compar_fn_t func = idxGetCompar(dtype);
return (int)tDoCompare(func, QUERY_TERM, a, b);
}
-static Filter sifGetFilterFunc(EIndexQueryType type, bool *reverse) {
+static FORCE_INLINE FilterFunc sifGetFilterFunc(EIndexQueryType type, bool *reverse) {
if (type == QUERY_LESS_EQUAL || type == QUERY_LESS_THAN) {
*reverse = true;
} else {
@@ -358,42 +396,6 @@ static Filter sifGetFilterFunc(EIndexQueryType type, bool *reverse) {
}
return NULL;
}
-typedef union {
- uint8_t u8;
- uint16_t u16;
- uint32_t u32;
- uint64_t u64;
-
- int8_t i8;
- int16_t i16;
- int32_t i32;
- int64_t i64;
-
- double d;
- float f;
-} SDataTypeBuf;
-
-#define SIF_DATA_CONVERT(type, val, dst) \
- do { \
- if (type == TSDB_DATA_TYPE_DOUBLE) \
- dst = GET_DOUBLE_VAL(val); \
- else if (type == TSDB_DATA_TYPE_BIGINT) \
- dst = *(int64_t *)val; \
- else if (type == TSDB_DATA_TYPE_INT) \
- dst = *(int32_t *)val; \
- else if (type == TSDB_DATA_TYPE_SMALLINT) \
- dst = *(int16_t *)val; \
- else if (type == TSDB_DATA_TYPE_TINYINT) \
- dst = *(int8_t *)val; \
- else if (type == TSDB_DATA_TYPE_UTINYINT) \
- dst = *(uint8_t *)val; \
- else if (type == TSDB_DATA_TYPE_USMALLINT) \
- dst = *(uint16_t *)val; \
- else if (type == TSDB_DATA_TYPE_UINT) \
- dst = *(uint32_t *)val; \
- else if (type == TSDB_DATA_TYPE_UBIGINT) \
- dst = *(uint64_t *)val; \
- } while (0);
static void sifSetFltParam(SIFParam *left, SIFParam *right, SDataTypeBuf *typedata, SMetaFltParam *param) {
int8_t ltype = left->colValType, rtype = right->colValType;
@@ -467,8 +469,8 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
indexMultiTermQueryAdd(mtm, tm, qtype);
ret = indexJsonSearch(arg->ivtIdx, mtm, output->result);
} else {
- bool reverse;
- Filter filterFunc = sifGetFilterFunc(qtype, &reverse);
+ bool reverse;
+ FilterFunc filterFunc = sifGetFilterFunc(qtype, &reverse);
SMetaFltParam param = {.suid = arg->suid,
.cid = left->colId,
@@ -495,72 +497,72 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
return ret;
}
-static int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_LOWER_THAN;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifLessEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifLessEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_LOWER_EQUAL;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifGreaterThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifGreaterThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_GREATER_THAN;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifGreaterEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifGreaterEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_GREATER_EQUAL;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_EQUAL;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifNotEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifNotEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_NOT_EQUAL;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifInFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifInFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_IN;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifNotInFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifNotInFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_NOT_IN;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifLikeFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifLikeFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_LIKE;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifNotLikeFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifNotLikeFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_NOT_LIKE;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifMatchFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifMatchFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_MATCH;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifNotMatchFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifNotMatchFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_NMATCH;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifJsonContains(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifJsonContains(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_JSON_CONTAINS;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifJsonGetValue(SIFParam *left, SIFParam *rigth, SIFParam *output) {
+static FORCE_INLINE int32_t sifJsonGetValue(SIFParam *left, SIFParam *rigth, SIFParam *output) {
// return 0
return 0;
}
-static int32_t sifDefaultFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifDefaultFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
// add more except
return TSDB_CODE_QRY_INVALID_INPUT;
}
-static int32_t sifGetOperFn(int32_t funcId, sif_func_t *func, SIdxFltStatus *status) {
+static FORCE_INLINE int32_t sifGetOperFn(int32_t funcId, sif_func_t *func, SIdxFltStatus *status) {
// impl later
*status = SFLT_ACCURATE_INDEX;
switch (funcId) {
@@ -686,11 +688,8 @@ static int32_t sifExecLogic(SLogicConditionNode *node, SIFCtx *ctx, SIFParam *ou
for (int32_t m = 0; m < node->pParameterList->length; m++) {
if (node->condType == LOGIC_COND_TYPE_AND) {
taosArrayAddAll(output->result, params[m].result);
- // taosArrayDestroy(params[m].result);
- // params[m].result = NULL;
} else if (node->condType == LOGIC_COND_TYPE_OR) {
taosArrayAddAll(output->result, params[m].result);
- // params[m].result = NULL;
} else if (node->condType == LOGIC_COND_TYPE_NOT) {
// taosArrayAddAll(output->result, params[m].result);
}
diff --git a/source/libs/index/src/indexFst.c b/source/libs/index/src/indexFst.c
index 15152cef55c221f8a93bfee533dc6a9750f1db4b..2aa8345e03bb3cec2a86c4240351dc86cb3ec9c7 100644
--- a/source/libs/index/src/indexFst.c
+++ b/source/libs/index/src/indexFst.c
@@ -19,11 +19,12 @@
#include "tchecksum.h"
#include "tcoding.h"
-static void fstPackDeltaIn(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr, uint8_t nBytes) {
+static FORCE_INLINE void fstPackDeltaIn(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr,
+ uint8_t nBytes) {
CompiledAddr deltaAddr = (transAddr == EMPTY_ADDRESS) ? EMPTY_ADDRESS : nodeAddr - transAddr;
idxFilePackUintIn(wrt, deltaAddr, nBytes);
}
-static uint8_t fstPackDetla(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr) {
+static FORCE_INLINE uint8_t fstPackDetla(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr) {
uint8_t nBytes = packDeltaSize(nodeAddr, transAddr);
fstPackDeltaIn(wrt, nodeAddr, transAddr, nBytes);
return nBytes;
@@ -39,7 +40,7 @@ FstUnFinishedNodes* fstUnFinishedNodesCreate() {
fstUnFinishedNodesPushEmpty(nodes, false);
return nodes;
}
-static void unFinishedNodeDestroyElem(void* elem) {
+static void FORCE_INLINE unFinishedNodeDestroyElem(void* elem) {
FstBuilderNodeUnfinished* b = (FstBuilderNodeUnfinished*)elem;
fstBuilderNodeDestroy(b->node);
taosMemoryFree(b->last);
diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c
index 4f278c7af6adfa8ed4e890b06944d5d5c9560f43..2a33ddd477daa8c1de9e64e958be0d15865f9efb 100644
--- a/source/libs/index/src/indexFstFile.c
+++ b/source/libs/index/src/indexFstFile.c
@@ -30,23 +30,24 @@ typedef struct {
static void deleteDataBlockFromLRU(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); }
-static void idxGenLRUKey(char* buf, const char* path, int32_t blockId) {
+static FORCE_INLINE void idxGenLRUKey(char* buf, const char* path, int32_t blockId) {
char* p = buf;
SERIALIZE_STR_VAR_TO_BUF(p, path, strlen(path));
SERIALIZE_VAR_TO_BUF(p, '_', char);
idxInt2str(blockId, p, 0);
return;
}
-static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) {
+static FORCE_INLINE int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) {
if (ctx->type == TFILE) {
- assert(len == taosWriteFile(ctx->file.pFile, buf, len));
+ int nwr = taosWriteFile(ctx->file.pFile, buf, len);
+ assert(nwr == len);
} else {
memcpy(ctx->mem.buf + ctx->offset, buf, len);
}
ctx->offset += len;
return len;
}
-static int idxFileCtxDoRead(IFileCtx* ctx, uint8_t* buf, int len) {
+static FORCE_INLINE int idxFileCtxDoRead(IFileCtx* ctx, uint8_t* buf, int len) {
int nRead = 0;
if (ctx->type == TFILE) {
#ifdef USE_MMAP
@@ -110,7 +111,7 @@ static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t of
} while (len > 0);
return total;
}
-static int idxFileCtxGetSize(IFileCtx* ctx) {
+static FORCE_INLINE int idxFileCtxGetSize(IFileCtx* ctx) {
if (ctx->type == TFILE) {
int64_t file_size = 0;
taosStatFile(ctx->file.buf, &file_size, NULL);
@@ -118,7 +119,7 @@ static int idxFileCtxGetSize(IFileCtx* ctx) {
}
return 0;
}
-static int idxFileCtxDoFlush(IFileCtx* ctx) {
+static FORCE_INLINE int idxFileCtxDoFlush(IFileCtx* ctx) {
if (ctx->type == TFILE) {
taosFsyncFile(ctx->file.pFile);
} else {
@@ -210,9 +211,7 @@ IdxFstFile* idxFileCreate(void* wrt) {
return cw;
}
void idxFileDestroy(IdxFstFile* cw) {
- // free wrt object: close fd or free mem
idxFileFlush(cw);
- // idxFileCtxDestroy((IFileCtx *)(cw->wrt));
taosMemoryFree(cw);
}
@@ -221,10 +220,8 @@ int idxFileWrite(IdxFstFile* write, uint8_t* buf, uint32_t len) {
return 0;
}
// update checksum
- // write data to file/socket or mem
IFileCtx* ctx = write->wrt;
-
- int nWrite = ctx->write(ctx, buf, len);
+ int nWrite = ctx->write(ctx, buf, len);
assert(nWrite == len);
write->count += len;
diff --git a/source/libs/index/src/indexFstRegister.c b/source/libs/index/src/indexFstRegister.c
index 34efee0d0db510ea1ce50de26c418ae1fd08761e..e0abcadc78a07b0f69ef92003d4304141551865e 100644
--- a/source/libs/index/src/indexFstRegister.c
+++ b/source/libs/index/src/indexFstRegister.c
@@ -16,7 +16,7 @@
#include "indexFstRegistry.h"
#include "os.h"
-uint64_t fstRegistryHash(FstRegistry* registry, FstBuilderNode* bNode) {
+static FORCE_INLINE uint64_t fstRegistryHash(FstRegistry* registry, FstBuilderNode* bNode) {
// TODO(yihaoDeng): refactor later
const uint64_t FNV_PRIME = 1099511628211;
uint64_t h = 14695981039346656037u;
diff --git a/source/libs/index/src/indexFstSparse.c b/source/libs/index/src/indexFstSparse.c
index ebc0cb3637dc14a1968afe7d9669c7eabdf99427..8746b04eab9c2ea46117e1287ebd934a0a5e4eb9 100644
--- a/source/libs/index/src/indexFstSparse.c
+++ b/source/libs/index/src/indexFstSparse.c
@@ -15,7 +15,7 @@
#include "indexFstSparse.h"
-static void sparSetUtil(int32_t *buf, int32_t cap) {
+static FORCE_INLINE void sparSetInitBuf(int32_t *buf, int32_t cap) {
for (int32_t i = 0; i < cap; i++) {
buf[i] = -1;
}
@@ -28,8 +28,8 @@ FstSparseSet *sparSetCreate(int32_t sz) {
ss->dense = (int32_t *)taosMemoryMalloc(sz * sizeof(int32_t));
ss->sparse = (int32_t *)taosMemoryMalloc(sz * sizeof(int32_t));
- sparSetUtil(ss->dense, sz);
- sparSetUtil(ss->sparse, sz);
+ sparSetInitBuf(ss->dense, sz);
+ sparSetInitBuf(ss->sparse, sz);
ss->cap = sz;
@@ -90,7 +90,7 @@ void sparSetClear(FstSparseSet *ss) {
if (ss == NULL) {
return;
}
- sparSetUtil(ss->dense, ss->cap);
- sparSetUtil(ss->sparse, ss->cap);
+ sparSetInitBuf(ss->dense, ss->cap);
+ sparSetInitBuf(ss->sparse, ss->cap);
ss->size = 0;
}
diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c
index 0a47fc0f167a359b35952f0c1e88af03d544c95d..c3be0ea6f54871692ee6e15898a49cd20d731559 100644
--- a/source/libs/index/src/indexTfile.c
+++ b/source/libs/index/src/indexTfile.c
@@ -183,13 +183,14 @@ TFileReader* tfileReaderCreate(IFileCtx* ctx) {
return NULL;
}
reader->ctx = ctx;
+ reader->remove = false;
if (0 != tfileReaderVerify(reader)) {
indexError("invalid tfile, suid:%" PRIu64 ", colName:%s", reader->header.suid, reader->header.colName);
tfileReaderDestroy(reader);
return NULL;
}
- // T_REF_INC(reader);
+
if (0 != tfileReaderLoadHeader(reader)) {
indexError("failed to load index header, suid:%" PRIu64 ", colName:%s", reader->header.suid,
reader->header.colName);
@@ -203,7 +204,6 @@ TFileReader* tfileReaderCreate(IFileCtx* ctx) {
tfileReaderDestroy(reader);
return NULL;
}
- reader->remove = false;
return reader;
}
@@ -211,7 +211,6 @@ void tfileReaderDestroy(TFileReader* reader) {
if (reader == NULL) {
return;
}
- // T_REF_INC(reader);
fstDestroy(reader->fst);
if (reader->remove) {
indexInfo("%s is removed", reader->ctx->file.buf);
@@ -222,6 +221,7 @@ void tfileReaderDestroy(TFileReader* reader) {
taosMemoryFree(reader);
}
+
static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
int ret = 0;
char* p = tem->colVal;
@@ -494,7 +494,6 @@ int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr
TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t colType) {
char fullname[256] = {0};
tfileGenFileFullName(fullname, path, suid, colName, version);
- // indexInfo("open write file name %s", fullname);
IFileCtx* wcx = idxFileCtxCreate(TFILE, fullname, false, 1024 * 1024 * 64);
if (wcx == NULL) {
return NULL;
@@ -503,8 +502,8 @@ TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const c
TFileHeader tfh = {0};
tfh.suid = suid;
tfh.version = version;
- memcpy(tfh.colName, colName, strlen(colName));
tfh.colType = colType;
+ memcpy(tfh.colName, colName, strlen(colName));
return tfileWriterCreate(wcx, &tfh);
}
@@ -706,7 +705,6 @@ static bool tfileIteratorNext(Iterate* iiter) {
iv->type = ADD_VALUE; // value in tfile always ADD_VALUE
iv->colVal = colVal;
return true;
- // std::string key(ch, sz);
}
static IterateValue* tifileIterateGetValue(Iterate* iter) { return &iter->val; }
@@ -1036,7 +1034,8 @@ static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int
sprintf(filename, "%" PRIu64 "-%s-%" PRId64 ".tindex", suid, col, version);
return;
}
-static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int64_t version) {
+static void FORCE_INLINE tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col,
+ int64_t version) {
char filename[128] = {0};
tfileGenFileName(filename, suid, col, version);
sprintf(fullname, "%s/%s", path, filename);
diff --git a/source/libs/index/src/indexUtil.c b/source/libs/index/src/indexUtil.c
index 3d083c1817f4b8b3930da7d0bed12e278d948d87..cdfb79016f46658e6259cb7d89dc501e386d408f 100644
--- a/source/libs/index/src/indexUtil.c
+++ b/source/libs/index/src/indexUtil.c
@@ -21,7 +21,7 @@ typedef struct MergeIndex {
int len;
} MergeIndex;
-static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) {
+static FORCE_INLINE int iBinarySearch(SArray *arr, int s, int e, uint64_t k) {
uint64_t v;
int32_t m;
while (s <= e) {
diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt
index b3eca280032e56004e649b2d2cef44ec1672d8ac..2bc7353aa51e85cf9c9e1c27607e10d76337ff58 100644
--- a/source/libs/index/test/CMakeLists.txt
+++ b/source/libs/index/test/CMakeLists.txt
@@ -80,6 +80,11 @@ IF(NOT TD_DARWIN)
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
+ target_include_directories (idxJsonUT
+ PUBLIC
+ "${TD_SOURCE_DIR}/include/libs/index"
+ "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
+ )
target_link_libraries (idxTest
os
@@ -102,11 +107,7 @@ IF(NOT TD_DARWIN)
gtest_main
index
)
- target_include_directories (idxJsonUT
- PUBLIC
- "${TD_SOURCE_DIR}/include/libs/index"
- "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
- )
+
target_link_libraries (idxTest
os
util
diff --git a/source/libs/index/test/indexBench.cc b/source/libs/index/test/indexBench.cc
new file mode 100644
index 0000000000000000000000000000000000000000..b828be0ffe97ee94c6b19e52c71d049ae023b66a
--- /dev/null
+++ b/source/libs/index/test/indexBench.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3 * or later ("AGPL"), as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+#include
+#include
+#include
+#include
+#include
+#include "index.h"
+#include "indexCache.h"
+#include "indexFst.h"
+#include "indexFstUtil.h"
+#include "indexInt.h"
+#include "indexTfile.h"
+#include "indexUtil.h"
+#include "tskiplist.h"
+#include "tutil.h"
+using namespace std;
+
+static std::string logDir = TD_TMP_DIR_PATH "log";
+
+static void initLog() {
+ const char *defaultLogFileNamePrefix = "taoslog";
+ const int32_t maxLogFileNum = 10;
+
+ tsAsyncLog = 0;
+ idxDebugFlag = 143;
+ strcpy(tsLogDir, logDir.c_str());
+ taosRemoveDir(tsLogDir);
+ taosMkDir(tsLogDir);
+
+ if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
+ printf("failed to open log file in directory:%s\n", tsLogDir);
+ }
+}
+
+struct WriteBatch {
+ SIndexMultiTerm *terms;
+};
+class Idx {
+ public:
+ Idx(int _cacheSize = 1024 * 1024 * 4, const char *_path = "tindex") {
+ opts.cacheSize = _cacheSize;
+ path += TD_TMP_DIR_PATH;
+ path += _path;
+ }
+ int SetUp(bool remove) {
+ initLog();
+
+ if (remove) taosRemoveDir(path.c_str());
+
+ int ret = indexJsonOpen(&opts, path.c_str(), &index);
+ return ret;
+ }
+ int Write(WriteBatch *batch, uint64_t uid) {
+ // write batch
+ indexJsonPut(index, batch->terms, uid);
+ return 0;
+ }
+ int Read(const char *json, void *key, int64_t *id) {
+ // read batch
+ return 0;
+ }
+
+ void TearDown() { indexJsonClose(index); }
+
+ std::string path;
+
+ SIndexOpts opts;
+ SIndex *index;
+};
+
+SIndexTerm *indexTermCreateT(int64_t suid, SIndexOperOnColumn oper, uint8_t colType, const char *colName,
+ int32_t nColName, const char *colVal, int32_t nColVal) {
+ char buf[256] = {0};
+ int16_t sz = nColVal;
+ memcpy(buf, (uint16_t *)&sz, 2);
+ memcpy(buf + 2, colVal, nColVal);
+ if (colType == TSDB_DATA_TYPE_BINARY) {
+ return indexTermCreate(suid, oper, colType, colName, nColName, buf, sizeof(buf));
+ } else {
+ return indexTermCreate(suid, oper, colType, colName, nColName, colVal, nColVal);
+ }
+ return NULL;
+}
+int initWriteBatch(WriteBatch *wb, int batchSize) {
+ SIndexMultiTerm *terms = indexMultiTermCreate();
+
+ std::string colName;
+ std::string colVal;
+
+ for (int i = 0; i < 64; i++) {
+ colName += '0' + i;
+ colVal += '0' + i;
+ }
+
+ for (int i = 0; i < batchSize; i++) {
+ colVal[i % colVal.size()] = '0' + i % 128;
+ colName[i % colName.size()] = '0' + i % 128;
+ SIndexTerm *term = indexTermCreateT(0, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
+ colVal.c_str(), colVal.size());
+ indexMultiTermAdd(terms, term);
+ }
+
+ wb->terms = terms;
+ return 0;
+}
+
+int BenchWrite(Idx *idx, int batchSize, int limit) {
+ for (int i = 0; i < limit; i += batchSize) {
+ WriteBatch wb;
+ idx->Write(&wb, i);
+ }
+ return 0;
+}
+
+int BenchRead(Idx *idx) { return 0; }
+
+int main() {
+ // Idx *idx = new Idx;
+ // if (idx->SetUp(true) != 0) {
+ // std::cout << "failed to setup index" << std::endl;
+ // return 0;
+ // } else {
+ // std::cout << "succ to setup index" << std::endl;
+ // }
+ // BenchWrite(idx, 100, 10000);
+ return 1;
+}
diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc
index 5b76de2ef89fdce4780fcf94a1360d68f7684a9e..08bf84ff60fdc07393abf546630c67dd52f6abc1 100644
--- a/source/libs/index/test/indexTests.cc
+++ b/source/libs/index/test/indexTests.cc
@@ -271,20 +271,20 @@ void validateFst() {
}
delete m;
}
-static std::string logDir = TD_TMP_DIR_PATH "log";
-
-static void initLog() {
- const char* defaultLogFileNamePrefix = "taoslog";
- const int32_t maxLogFileNum = 10;
- tsAsyncLog = 0;
- idxDebugFlag = 143;
- strcpy(tsLogDir, logDir.c_str());
- taosRemoveDir(tsLogDir);
- taosMkDir(tsLogDir);
-
- if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
- printf("failed to open log file in directory:%s\n", tsLogDir);
+static std::string logDir = TD_TMP_DIR_PATH "log";
+static void initLog() {
+ const char* defaultLogFileNamePrefix = "taoslog";
+ const int32_t maxLogFileNum = 10;
+
+ tsAsyncLog = 0;
+ idxDebugFlag = 143;
+ strcpy(tsLogDir, logDir.c_str());
+ taosRemoveDir(tsLogDir);
+ taosMkDir(tsLogDir);
+
+ if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
+ printf("failed to open log file in directory:%s\n", tsLogDir);
}
}
class IndexEnv : public ::testing::Test {
diff --git a/source/libs/monitor/src/monMsg.c b/source/libs/monitor/src/monMsg.c
index 8fa7e8860509ca473ad41b1c89efbf430f0c2649..bbee8b1166903bfcafb611baf7b2bf8ed8b48699 100644
--- a/source/libs/monitor/src/monMsg.c
+++ b/source/libs/monitor/src/monMsg.c
@@ -510,6 +510,7 @@ int32_t tSerializeSMonVloadInfo(void *buf, int32_t bufLen, SMonVloadInfo *pInfo)
SVnodeLoad *pLoad = taosArrayGet(pInfo->pVloads, i);
if (tEncodeI32(&encoder, pLoad->vgId) < 0) return -1;
if (tEncodeI32(&encoder, pLoad->syncState) < 0) return -1;
+ if (tEncodeI64(&encoder, pLoad->cacheUsage) < 0) return -1;
if (tEncodeI64(&encoder, pLoad->numOfTables) < 0) return -1;
if (tEncodeI64(&encoder, pLoad->numOfTimeSeries) < 0) return -1;
if (tEncodeI64(&encoder, pLoad->totalStorage) < 0) return -1;
@@ -544,6 +545,7 @@ int32_t tDeserializeSMonVloadInfo(void *buf, int32_t bufLen, SMonVloadInfo *pInf
SVnodeLoad load = {0};
if (tDecodeI32(&decoder, &load.vgId) < 0) return -1;
if (tDecodeI32(&decoder, &load.syncState) < 0) return -1;
+ if (tDecodeI64(&decoder, &load.cacheUsage) < 0) return -1;
if (tDecodeI64(&decoder, &load.numOfTables) < 0) return -1;
if (tDecodeI64(&decoder, &load.numOfTimeSeries) < 0) return -1;
if (tDecodeI64(&decoder, &load.totalStorage) < 0) return -1;
@@ -594,7 +596,6 @@ int32_t tDeserializeSMonMloadInfo(void *buf, int32_t bufLen, SMonMloadInfo *pInf
return 0;
}
-
int32_t tSerializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@@ -639,5 +640,3 @@ int32_t tDeserializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) {
tDecoderClear(&decoder);
return 0;
}
-
-
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index 9390d129df4f536070320246555d690ab8b4972c..eb0b604d37459342a403ab5120c22e2bf5dc4b13 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -545,6 +545,7 @@ static int32_t physiSysTableScanCopy(const SSystemTableScanPhysiNode* pSrc, SSys
COPY_OBJECT_FIELD(mgmtEpSet, sizeof(SEpSet));
COPY_SCALAR_FIELD(showRewrite);
COPY_SCALAR_FIELD(accountId);
+ COPY_SCALAR_FIELD(sysInfo);
return TSDB_CODE_SUCCESS;
}
@@ -776,6 +777,7 @@ SNode* nodesCloneNode(const SNode* pNode) {
code = physiSessionCopy((const SSessionWinodwPhysiNode*)pNode, (SSessionWinodwPhysiNode*)pDst);
break;
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
code = physiPartitionCopy((const SPartitionPhysiNode*)pNode, (SPartitionPhysiNode*)pDst);
break;
default:
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index 0f32001c476c2e98465a0ce3f07fb705c3f53244..a41462e3fd879a73baeb37ee829724fda560719c 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -265,6 +265,8 @@ const char* nodesNodeName(ENodeType type) {
return "PhysiStreamStateWindow";
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return "PhysiPartition";
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ return "PhysiStreamPartition";
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return "PhysiIndefRowsFunc";
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
@@ -360,18 +362,14 @@ static int32_t jsonToTableComInfo(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkTableComInfoNumOfTags, pNode->numOfTags, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableComInfoPrecision, pNode->precision, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableComInfoNumOfColumns, pNode->numOfColumns, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableComInfoRowSize, pNode->rowSize, code);
- ;
}
return code;
@@ -404,14 +402,11 @@ static int32_t jsonToSchema(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkSchemaType, pNode->type, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkSchemaColId, pNode->colId, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkSchemaBytes, pNode->bytes, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkSchemaName, pNode->name);
@@ -464,26 +459,20 @@ static int32_t jsonToTableMeta(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkTableMetaVgId, pNode->vgId, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaTableType, pNode->tableType, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaUid, pNode->uid, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaSuid, pNode->suid, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaSversion, pNode->sversion, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaTversion, pNode->tversion, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, jkTableMetaComInfo, jsonToTableComInfo, &pNode->tableInfo);
@@ -924,7 +913,6 @@ static int32_t jsonToLogicFillNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToLogicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkFillLogicPlanMode, pNode->mode, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkFillLogicPlanWStartTs, &pNode->pWStartTs);
@@ -1654,6 +1642,7 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
static const char* jkSysTableScanPhysiPlanMnodeEpSet = "MnodeEpSet";
static const char* jkSysTableScanPhysiPlanShowRewrite = "ShowRewrite";
static const char* jkSysTableScanPhysiPlanAccountId = "AccountId";
+static const char* jkSysTableScanPhysiPlanSysInfo = "SysInfo";
static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) {
const SSystemTableScanPhysiNode* pNode = (const SSystemTableScanPhysiNode*)pObj;
@@ -1668,6 +1657,9 @@ static int32_t physiSysTableScanNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddBoolToObject(pJson, jkSysTableScanPhysiPlanSysInfo, pNode->sysInfo);
+ }
return code;
}
@@ -1684,7 +1676,9 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) {
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkSysTableScanPhysiPlanAccountId, pNode->accountId, code);
- ;
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetBoolValue(pJson, jkSysTableScanPhysiPlanSysInfo, &pNode->sysInfo);
}
return code;
@@ -2807,7 +2801,6 @@ static int32_t jsonToColumnNode(const SJson* pJson, void* pObj) {
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkColumnColType, pNode->colType, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkColumnDbName, pNode->dbName);
@@ -3110,7 +3103,6 @@ static int32_t jsonToOperatorNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToExprNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkOperatorType, pNode->opType, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkOperatorLeft, &pNode->pLeft);
@@ -3145,7 +3137,6 @@ static int32_t jsonToLogicConditionNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToExprNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkLogicCondType, pNode->condType, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkLogicCondParameters, &pNode->pParameterList);
@@ -3434,11 +3425,9 @@ static int32_t jsonToOrderByExprNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToNodeObject(pJson, jkOrderByExprExpr, &pNode->pExpr);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkOrderByExprOrder, pNode->order, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkOrderByExprNullOrder, pNode->nullOrder, code);
- ;
}
return code;
@@ -3616,7 +3605,6 @@ static int32_t jsonToFillNode(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkFillMode, pNode->mode, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkFillValues, &pNode->pValues);
}
@@ -4479,6 +4467,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
return physiStateWindowNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
return physiPartitionNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return physiIndefRowsFuncNodeToJson(pObj, pJson);
@@ -4626,6 +4615,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
return jsonToPhysiStateWindowNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
return jsonToPhysiPartitionNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return jsonToPhysiIndefRowsFuncNode(pJson, pObj);
diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c
new file mode 100644
index 0000000000000000000000000000000000000000..5fe31ed78e80cf19a4e107d6f1366609bd257520
--- /dev/null
+++ b/source/libs/nodes/src/nodesMsgFuncs.c
@@ -0,0 +1,3108 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "nodesUtil.h"
+#include "plannodes.h"
+#include "tdatablock.h"
+
+#define NODES_MSG_DEFAULT_LEN 1024
+
+#define tlvForEach(pDecoder, pTlv, code) \
+ while (TSDB_CODE_SUCCESS == code && TSDB_CODE_SUCCESS == (code = tlvGetNextTlv(pDecoder, &pTlv)) && NULL != pTlv)
+
+typedef struct STlv {
+ int16_t type;
+ int16_t len;
+ char value[0];
+} STlv;
+
+typedef struct STlvEncoder {
+ int32_t allocSize;
+ int32_t offset;
+ char* pBuf;
+ int32_t tlvCount;
+} STlvEncoder;
+
+typedef struct STlvDecoder {
+ int32_t bufSize;
+ int32_t offset;
+ const char* pBuf;
+} STlvDecoder;
+
+typedef int32_t (*FToMsg)(const void* pObj, STlvEncoder* pEncoder);
+typedef int32_t (*FToObject)(STlvDecoder* pDecoder, void* pObj);
+typedef void* (*FMakeObject)(int16_t type);
+typedef int32_t (*FSetObject)(STlv* pTlv, void* pObj);
+
+static int32_t nodeToMsg(const void* pObj, STlvEncoder* pEncoder);
+static int32_t nodeListToMsg(const void* pObj, STlvEncoder* pEncoder);
+static int32_t msgToNode(STlvDecoder* pDecoder, void** pObj);
+static int32_t msgToNodeFromTlv(STlv* pTlv, void** pObj);
+static int32_t msgToNodeList(STlvDecoder* pDecoder, void** pObj);
+static int32_t msgToNodeListFromTlv(STlv* pTlv, void** pObj);
+
+static int32_t initTlvEncoder(STlvEncoder* pEncoder) {
+ pEncoder->allocSize = NODES_MSG_DEFAULT_LEN;
+ pEncoder->offset = 0;
+ pEncoder->tlvCount = 0;
+ pEncoder->pBuf = taosMemoryMalloc(pEncoder->allocSize);
+ return NULL == pEncoder->pBuf ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS;
+}
+
+static void clearTlvEncoder(STlvEncoder* pEncoder) { taosMemoryFree(pEncoder->pBuf); }
+
+static void endTlvEncode(STlvEncoder* pEncoder, char** pMsg, int32_t* pLen) {
+ *pMsg = pEncoder->pBuf;
+ pEncoder->pBuf = NULL;
+ *pLen = pEncoder->offset;
+ // nodesWarn("encode tlv count = %d, tl size = %d", pEncoder->tlvCount, sizeof(STlv) * pEncoder->tlvCount);
+}
+
+static int32_t tlvEncodeImpl(STlvEncoder* pEncoder, int16_t type, const void* pValue, int16_t len) {
+ int32_t tlvLen = sizeof(STlv) + len;
+ if (pEncoder->offset + tlvLen > pEncoder->allocSize) {
+ void* pNewBuf = taosMemoryRealloc(pEncoder->pBuf, pEncoder->allocSize * 2);
+ if (NULL == pNewBuf) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pEncoder->pBuf = pNewBuf;
+ pEncoder->allocSize = pEncoder->allocSize * 2;
+ }
+ STlv* pTlv = (STlv*)(pEncoder->pBuf + pEncoder->offset);
+ pTlv->type = type;
+ pTlv->len = len;
+ memcpy(pTlv->value, pValue, len);
+ pEncoder->offset += tlvLen;
+ ++(pEncoder->tlvCount);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvEncodeI8(STlvEncoder* pEncoder, int16_t type, int8_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeI16(STlvEncoder* pEncoder, int16_t type, int16_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeI32(STlvEncoder* pEncoder, int16_t type, int32_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeI64(STlvEncoder* pEncoder, int16_t type, int64_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeU8(STlvEncoder* pEncoder, int16_t type, uint8_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeU16(STlvEncoder* pEncoder, int16_t type, uint16_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeU64(STlvEncoder* pEncoder, int16_t type, uint64_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeDouble(STlvEncoder* pEncoder, int16_t type, double value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeEnum(STlvEncoder* pEncoder, int16_t type, int32_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeBool(STlvEncoder* pEncoder, int16_t type, bool value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeCStr(STlvEncoder* pEncoder, int16_t type, const char* pValue) {
+ return tlvEncodeImpl(pEncoder, type, pValue, strlen(pValue));
+}
+
+static int32_t tlvEncodeBinary(STlvEncoder* pEncoder, int16_t type, const void* pValue, int32_t len) {
+ return tlvEncodeImpl(pEncoder, type, pValue, len);
+}
+
+static int32_t tlvEncodeObj(STlvEncoder* pEncoder, int16_t type, FToMsg func, const void* pObj) {
+ if (NULL == pObj) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t start = pEncoder->offset;
+ pEncoder->offset += sizeof(STlv);
+ int32_t code = func(pObj, pEncoder);
+ if (TSDB_CODE_SUCCESS == code) {
+ STlv* pTlv = (STlv*)(pEncoder->pBuf + start);
+ pTlv->type = type;
+ pTlv->len = pEncoder->offset - start - sizeof(STlv);
+ }
+ return code;
+}
+
+static int32_t tlvEncodeObjArray(STlvEncoder* pEncoder, int16_t type, FToMsg func, const void* pArray, int32_t itemSize,
+ int32_t num) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (num > 0) {
+ int32_t start = pEncoder->offset;
+ pEncoder->offset += sizeof(STlv);
+ for (size_t i = 0; TSDB_CODE_SUCCESS == code && i < num; ++i) {
+ code = tlvEncodeObj(pEncoder, 0, func, (const char*)pArray + i * itemSize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ STlv* pTlv = (STlv*)(pEncoder->pBuf + start);
+ pTlv->type = type;
+ pTlv->len = pEncoder->offset - start - sizeof(STlv);
+ }
+ }
+ return code;
+}
+
+static int32_t tlvGetNextTlv(STlvDecoder* pDecoder, STlv** pTlv) {
+ if (pDecoder->offset == pDecoder->bufSize) {
+ *pTlv = NULL;
+ return TSDB_CODE_SUCCESS;
+ }
+
+ *pTlv = (STlv*)(pDecoder->pBuf + pDecoder->offset);
+ if ((*pTlv)->len + pDecoder->offset > pDecoder->bufSize) {
+ return TSDB_CODE_FAILED;
+ }
+ pDecoder->offset += sizeof(STlv) + (*pTlv)->len;
+ return TSDB_CODE_SUCCESS;
+}
+
+static bool tlvDecodeEnd(STlvDecoder* pDecoder) { return pDecoder->offset == pDecoder->bufSize; }
+
+static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int16_t len) {
+ if (pTlv->len != len) {
+ return TSDB_CODE_FAILED;
+ }
+ memcpy(pValue, pTlv->value, len);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeI8(STlv* pTlv, int8_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeI16(STlv* pTlv, int16_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeI32(STlv* pTlv, int32_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeI64(STlv* pTlv, int64_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeU8(STlv* pTlv, uint8_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeU16(STlv* pTlv, uint16_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeU64(STlv* pTlv, uint64_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeDouble(STlv* pTlv, double* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeBool(STlv* pTlv, bool* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeEnum(STlv* pTlv, void* pValue, int16_t len) {
+ int32_t value = 0;
+ memcpy(&value, pTlv->value, pTlv->len);
+ switch (len) {
+ case 1:
+ *(int8_t*)pValue = value;
+ break;
+ case 2:
+ *(int16_t*)pValue = value;
+ break;
+ case 4:
+ *(int32_t*)pValue = value;
+ break;
+ default:
+ return TSDB_CODE_FAILED;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeCStr(STlv* pTlv, char* pValue) {
+ memcpy(pValue, pTlv->value, pTlv->len);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeDynBinary(STlv* pTlv, void** pValue) {
+ *pValue = taosMemoryMalloc(pTlv->len);
+ if (NULL == *pValue) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ memcpy(*pValue, pTlv->value, pTlv->len);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeObjFromTlv(STlv* pTlv, FToObject func, void* pObj) {
+ STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
+ return func(&decoder, pObj);
+}
+
+static int32_t tlvDecodeObj(STlvDecoder* pDecoder, FToObject func, void* pObj) {
+ STlv* pTlv = NULL;
+ int32_t code = tlvGetNextTlv(pDecoder, &pTlv);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeObjFromTlv(pTlv, func, pObj);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeObjArray(STlvDecoder* pDecoder, FToObject func, void* pArray, int32_t itemSize) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t i = 0;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) { code = tlvDecodeObjFromTlv(pTlv, func, (char*)pArray + itemSize * i++); }
+ return code;
+}
+
+static int32_t tlvDecodeObjArrayFromTlv(STlv* pTlv, FToObject func, void* pArray, int32_t itemSize) {
+ STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
+ return tlvDecodeObjArray(&decoder, func, pArray, itemSize);
+}
+
+static int32_t tlvDecodeDynObjFromTlv(STlv* pTlv, FMakeObject makeFunc, FToObject toFunc, void** pObj) {
+ *pObj = makeFunc(pTlv->type);
+ if (NULL == *pObj) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ return tlvDecodeObjFromTlv(pTlv, toFunc, *pObj);
+}
+
+static int32_t tlvDecodeDynObj(STlvDecoder* pDecoder, FMakeObject makeFunc, FToObject toFunc, void** pObj) {
+ STlv* pTlv = NULL;
+ int32_t code = tlvGetNextTlv(pDecoder, &pTlv);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeDynObjFromTlv(pTlv, makeFunc, toFunc, pObj);
+ }
+ return code;
+}
+
+enum { DATA_TYPE_CODE_TYPE = 1, DATA_TYPE_CODE_PRECISION, DATA_TYPE_CODE_SCALE, DATA_TYPE_CODE_BYTES };
+
+static int32_t dataTypeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataType* pNode = (const SDataType*)pObj;
+
+ int32_t code = tlvEncodeI8(pEncoder, DATA_TYPE_CODE_TYPE, pNode->type);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU8(pEncoder, DATA_TYPE_CODE_PRECISION, pNode->precision);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU8(pEncoder, DATA_TYPE_CODE_SCALE, pNode->scale);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, DATA_TYPE_CODE_BYTES, pNode->bytes);
+ }
+
+ return code;
+}
+
+static int32_t msgToDataType(STlvDecoder* pDecoder, void* pObj) {
+ SDataType* pNode = (SDataType*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case DATA_TYPE_CODE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->type);
+ break;
+ case DATA_TYPE_CODE_PRECISION:
+ code = tlvDecodeU8(pTlv, &pNode->precision);
+ break;
+ case DATA_TYPE_CODE_SCALE:
+ code = tlvDecodeU8(pTlv, &pNode->scale);
+ break;
+ case DATA_TYPE_CODE_BYTES:
+ code = tlvDecodeI32(pTlv, &pNode->bytes);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { EXPR_CODE_RES_TYPE = 1 };
+
+static int32_t exprNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SExprNode* pNode = (const SExprNode*)pObj;
+ return tlvEncodeObj(pEncoder, EXPR_CODE_RES_TYPE, dataTypeToMsg, &pNode->resType);
+}
+
+static int32_t msgToExprNode(STlvDecoder* pDecoder, void* pObj) {
+ SExprNode* pNode = (SExprNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case EXPR_CODE_RES_TYPE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToDataType, &pNode->resType);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ COLUMN_CODE_EXPR_BASE = 1,
+ COLUMN_CODE_TABLE_ID,
+ COLUMN_CODE_TABLE_TYPE,
+ COLUMN_CODE_COLUMN_ID,
+ COLUMN_CODE_COLUMN_TYPE,
+ COLUMN_CODE_DATABLOCK_ID,
+ COLUMN_CODE_SLOT_ID
+};
+
+static int32_t columnNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SColumnNode* pNode = (const SColumnNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, COLUMN_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, COLUMN_CODE_TABLE_ID, pNode->tableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, COLUMN_CODE_TABLE_TYPE, pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI16(pEncoder, COLUMN_CODE_COLUMN_ID, pNode->colId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, COLUMN_CODE_COLUMN_TYPE, pNode->colType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI16(pEncoder, COLUMN_CODE_DATABLOCK_ID, pNode->dataBlockId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI16(pEncoder, COLUMN_CODE_SLOT_ID, pNode->slotId);
+ }
+
+ return code;
+}
+
+static int32_t msgToColumnNode(STlvDecoder* pDecoder, void* pObj) {
+ SColumnNode* pNode = (SColumnNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case COLUMN_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case COLUMN_CODE_TABLE_ID:
+ code = tlvDecodeU64(pTlv, &pNode->tableId);
+ break;
+ case COLUMN_CODE_TABLE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->tableType);
+ break;
+ case COLUMN_CODE_COLUMN_ID:
+ code = tlvDecodeI16(pTlv, &pNode->colId);
+ break;
+ case COLUMN_CODE_COLUMN_TYPE:
+ code = tlvDecodeEnum(pTlv, &pNode->colType, sizeof(pNode->colType));
+ break;
+ case COLUMN_CODE_DATABLOCK_ID:
+ code = tlvDecodeI16(pTlv, &pNode->dataBlockId);
+ break;
+ case COLUMN_CODE_SLOT_ID:
+ code = tlvDecodeI16(pTlv, &pNode->slotId);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { VALUE_CODE_EXPR_BASE = 1, VALUE_CODE_IS_NULL, VALUE_CODE_DATUM };
+
+static int32_t datumToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SValueNode* pNode = (const SValueNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (pNode->node.resType.type) {
+ case TSDB_DATA_TYPE_NULL:
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ code = tlvEncodeBool(pEncoder, VALUE_CODE_DATUM, pNode->datum.b);
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ code = tlvEncodeI64(pEncoder, VALUE_CODE_DATUM, pNode->datum.i);
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ case TSDB_DATA_TYPE_UINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ code = tlvEncodeU64(pEncoder, VALUE_CODE_DATUM, pNode->datum.u);
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE:
+ code = tlvEncodeDouble(pEncoder, VALUE_CODE_DATUM, pNode->datum.d);
+ break;
+ case TSDB_DATA_TYPE_VARCHAR:
+ case TSDB_DATA_TYPE_VARBINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ code = tlvEncodeBinary(pEncoder, VALUE_CODE_DATUM, pNode->datum.p, varDataTLen(pNode->datum.p));
+ break;
+ case TSDB_DATA_TYPE_JSON:
+ code = tlvEncodeBinary(pEncoder, VALUE_CODE_DATUM, pNode->datum.p, getJsonValueLen(pNode->datum.p));
+ break;
+ case TSDB_DATA_TYPE_DECIMAL:
+ case TSDB_DATA_TYPE_BLOB:
+ // todo
+ default:
+ break;
+ }
+
+ return code;
+}
+
+static int32_t valueNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SValueNode* pNode = (const SValueNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, VALUE_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_NULL, pNode->isNull);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = datumToMsg(pNode, pEncoder);
+ }
+
+ return code;
+}
+
+static int32_t msgToDatum(STlv* pTlv, void* pObj) {
+ SValueNode* pNode = (SValueNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (pNode->node.resType.type) {
+ case TSDB_DATA_TYPE_NULL:
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ code = tlvDecodeBool(pTlv, &pNode->datum.b);
+ *(bool*)&pNode->typeData = pNode->datum.b;
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int8_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int16_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_INT:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int32_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int64_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int64_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ code = tlvDecodeU64(pTlv, &pNode->datum.u);
+ *(uint8_t*)&pNode->typeData = pNode->datum.u;
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ code = tlvDecodeU64(pTlv, &pNode->datum.u);
+ *(uint16_t*)&pNode->typeData = pNode->datum.u;
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ code = tlvDecodeU64(pTlv, &pNode->datum.u);
+ *(uint32_t*)&pNode->typeData = pNode->datum.u;
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ code = tlvDecodeU64(pTlv, &pNode->datum.u);
+ *(uint64_t*)&pNode->typeData = pNode->datum.u;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ code = tlvDecodeDouble(pTlv, &pNode->datum.d);
+ *(float*)&pNode->typeData = pNode->datum.d;
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ code = tlvDecodeDouble(pTlv, &pNode->datum.d);
+ *(double*)&pNode->typeData = pNode->datum.d;
+ break;
+ case TSDB_DATA_TYPE_NCHAR:
+ case TSDB_DATA_TYPE_VARCHAR:
+ case TSDB_DATA_TYPE_VARBINARY:
+ code = tlvDecodeDynBinary(pTlv, (void**)&pNode->datum.p);
+ if (TSDB_CODE_SUCCESS == code) {
+ varDataSetLen(pNode->datum.p, pNode->node.resType.bytes - VARSTR_HEADER_SIZE);
+ }
+ break;
+ case TSDB_DATA_TYPE_JSON:
+ code = tlvDecodeDynBinary(pTlv, (void**)&pNode->datum.p);
+ break;
+ case TSDB_DATA_TYPE_DECIMAL:
+ case TSDB_DATA_TYPE_BLOB:
+ // todo
+ default:
+ break;
+ }
+
+ return code;
+}
+
+static int32_t msgToValueNode(STlvDecoder* pDecoder, void* pObj) {
+ SValueNode* pNode = (SValueNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case VALUE_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case VALUE_CODE_IS_NULL:
+ code = tlvDecodeBool(pTlv, &pNode->isNull);
+ break;
+ case VALUE_CODE_DATUM:
+ code = msgToDatum(pTlv, pNode);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { OPERATOR_CODE_EXPR_BASE = 1, OPERATOR_CODE_OP_TYPE, OPERATOR_CODE_LEFT, OPERATOR_CODE_RIGHT };
+
+static int32_t operatorNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SOperatorNode* pNode = (const SOperatorNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, OPERATOR_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, OPERATOR_CODE_OP_TYPE, pNode->opType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, OPERATOR_CODE_LEFT, nodeToMsg, pNode->pLeft);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, OPERATOR_CODE_RIGHT, nodeToMsg, pNode->pRight);
+ }
+
+ return code;
+}
+
+static int32_t msgToOperatorNode(STlvDecoder* pDecoder, void* pObj) {
+ SOperatorNode* pNode = (SOperatorNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case OPERATOR_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case OPERATOR_CODE_OP_TYPE:
+ code = tlvDecodeEnum(pTlv, &pNode->opType, sizeof(pNode->opType));
+ break;
+ case OPERATOR_CODE_LEFT:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pLeft);
+ break;
+ case OPERATOR_CODE_RIGHT:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pRight);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { LOGIC_COND_CODE_EXPR_BASE = 1, LOGIC_COND_CODE_COND_TYPE, LOGIC_COND_CODE_PARAMETERS };
+
+static int32_t logicConditionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SLogicConditionNode* pNode = (const SLogicConditionNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, LOGIC_COND_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, LOGIC_COND_CODE_COND_TYPE, pNode->condType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, LOGIC_COND_CODE_PARAMETERS, nodeListToMsg, pNode->pParameterList);
+ }
+
+ return code;
+}
+
+static int32_t msgToLogicConditionNode(STlvDecoder* pDecoder, void* pObj) {
+ SLogicConditionNode* pNode = (SLogicConditionNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case LOGIC_COND_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case LOGIC_COND_CODE_COND_TYPE:
+ code = tlvDecodeEnum(pTlv, &pNode->condType, sizeof(pNode->condType));
+ break;
+ case LOGIC_COND_CODE_PARAMETERS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pParameterList);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ FUNCTION_CODE_EXPR_BASE = 1,
+ FUNCTION_CODE_FUNCTION_ID,
+ FUNCTION_CODE_FUNCTION_TYPE,
+ FUNCTION_CODE_PARAMETERS,
+ FUNCTION_CODE_UDF_BUF_SIZE
+};
+
+static int32_t functionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SFunctionNode* pNode = (const SFunctionNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, FUNCTION_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, FUNCTION_CODE_FUNCTION_ID, pNode->funcId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, FUNCTION_CODE_FUNCTION_TYPE, pNode->funcType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, FUNCTION_CODE_PARAMETERS, nodeListToMsg, pNode->pParameterList);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, FUNCTION_CODE_UDF_BUF_SIZE, pNode->udfBufSize);
+ }
+
+ return code;
+}
+
+static int32_t msgToFunctionNode(STlvDecoder* pDecoder, void* pObj) {
+ SFunctionNode* pNode = (SFunctionNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case FUNCTION_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case FUNCTION_CODE_FUNCTION_ID:
+ code = tlvDecodeI32(pTlv, &pNode->funcId);
+ break;
+ case FUNCTION_CODE_FUNCTION_TYPE:
+ code = tlvDecodeI32(pTlv, &pNode->funcType);
+ break;
+ case FUNCTION_CODE_PARAMETERS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pParameterList);
+ break;
+ case FUNCTION_CODE_UDF_BUF_SIZE:
+ code = tlvDecodeI32(pTlv, &pNode->udfBufSize);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { ORDER_BY_EXPR_CODE_EXPR = 1, ORDER_BY_EXPR_CODE_ORDER, ORDER_BY_EXPR_CODE_NULL_ORDER };
+
+static int32_t orderByExprNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SOrderByExprNode* pNode = (const SOrderByExprNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, ORDER_BY_EXPR_CODE_EXPR, nodeToMsg, pNode->pExpr);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, ORDER_BY_EXPR_CODE_ORDER, pNode->order);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, ORDER_BY_EXPR_CODE_NULL_ORDER, pNode->nullOrder);
+ }
+
+ return code;
+}
+
+static int32_t msgToOrderByExprNode(STlvDecoder* pDecoder, void* pObj) {
+ SOrderByExprNode* pNode = (SOrderByExprNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case ORDER_BY_EXPR_CODE_EXPR:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pExpr);
+ break;
+ case ORDER_BY_EXPR_CODE_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->order, sizeof(pNode->order));
+ break;
+ case ORDER_BY_EXPR_CODE_NULL_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->nullOrder, sizeof(pNode->nullOrder));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { LIMIT_CODE_LIMIT = 1, LIMIT_CODE_OFFSET };
+
+static int32_t limitNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SLimitNode* pNode = (const SLimitNode*)pObj;
+
+ int32_t code = tlvEncodeI64(pEncoder, LIMIT_CODE_LIMIT, pNode->limit);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, LIMIT_CODE_OFFSET, pNode->offset);
+ }
+
+ return code;
+}
+
+static int32_t msgToLimitNode(STlvDecoder* pDecoder, void* pObj) {
+ SLimitNode* pNode = (SLimitNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case LIMIT_CODE_LIMIT:
+ code = tlvDecodeI64(pTlv, &pNode->limit);
+ break;
+ case LIMIT_CODE_OFFSET:
+ code = tlvDecodeI64(pTlv, &pNode->offset);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { NAME_CODE_TYPE = 1, NAME_CODE_ACCT_ID, NAME_CODE_DB_NAME, NAME_CODE_TABLE_NAME };
+
+static int32_t nameToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SName* pNode = (const SName*)pObj;
+
+ int32_t code = tlvEncodeU8(pEncoder, NAME_CODE_TYPE, pNode->type);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, NAME_CODE_ACCT_ID, pNode->acctId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, NAME_CODE_DB_NAME, pNode->dbname);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, NAME_CODE_TABLE_NAME, pNode->tname);
+ }
+
+ return code;
+}
+
+static int32_t msgToName(STlvDecoder* pDecoder, void* pObj) {
+ SName* pNode = (SName*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case NAME_CODE_TYPE:
+ code = tlvDecodeU8(pTlv, &pNode->type);
+ break;
+ case NAME_CODE_ACCT_ID:
+ code = tlvDecodeI32(pTlv, &pNode->acctId);
+ break;
+ case NAME_CODE_DB_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->dbname);
+ break;
+ case NAME_CODE_TABLE_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->tname);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { TIME_WINDOW_CODE_START_KEY = 1, TIME_WINDOW_CODE_END_KEY };
+
+static int32_t timeWindowToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const STimeWindow* pNode = (const STimeWindow*)pObj;
+
+ int32_t code = tlvEncodeI64(pEncoder, TIME_WINDOW_CODE_START_KEY, pNode->skey);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, TIME_WINDOW_CODE_END_KEY, pNode->ekey);
+ }
+
+ return code;
+}
+
+static int32_t msgToTimeWindow(STlvDecoder* pDecoder, void* pObj) {
+ STimeWindow* pNode = (STimeWindow*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case TIME_WINDOW_CODE_START_KEY:
+ code = tlvDecodeI64(pTlv, &pNode->skey);
+ break;
+ case TIME_WINDOW_CODE_END_KEY:
+ code = tlvDecodeI64(pTlv, &pNode->ekey);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { NODE_LIST_CODE_DATA_TYPE = 1, NODE_LIST_CODE_NODE_LIST };
+
+static int32_t nodeListNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SNodeListNode* pNode = (const SNodeListNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, NODE_LIST_CODE_DATA_TYPE, dataTypeToMsg, &pNode->dataType);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, NODE_LIST_CODE_NODE_LIST, nodeListToMsg, pNode->pNodeList);
+ }
+
+ return code;
+}
+
+static int32_t msgToNodeListNode(STlvDecoder* pDecoder, void* pObj) {
+ SNodeListNode* pNode = (SNodeListNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case NODE_LIST_CODE_DATA_TYPE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToDataType, &pNode->dataType);
+ break;
+ case NODE_LIST_CODE_NODE_LIST:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pNodeList);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { TARGET_CODE_DATA_BLOCK_ID = 1, TARGET_CODE_SLOT_ID, TARGET_CODE_EXPR };
+
+static int32_t targetNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const STargetNode* pNode = (const STargetNode*)pObj;
+
+ int32_t code = tlvEncodeI16(pEncoder, TARGET_CODE_DATA_BLOCK_ID, pNode->dataBlockId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI16(pEncoder, TARGET_CODE_SLOT_ID, pNode->slotId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, TARGET_CODE_EXPR, nodeToMsg, pNode->pExpr);
+ }
+
+ return code;
+}
+
+static int32_t msgToTargetNode(STlvDecoder* pDecoder, void* pObj) {
+ STargetNode* pNode = (STargetNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case TARGET_CODE_DATA_BLOCK_ID:
+ code = tlvDecodeI16(pTlv, &pNode->dataBlockId);
+ break;
+ case TARGET_CODE_SLOT_ID:
+ code = tlvDecodeI16(pTlv, &pNode->slotId);
+ break;
+ case TARGET_CODE_EXPR:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pExpr);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ DATA_BLOCK_DESC_CODE_DATA_BLOCK_ID = 1,
+ DATA_BLOCK_DESC_CODE_SLOTS,
+ DATA_BLOCK_DESC_CODE_TOTAL_ROW_SIZE,
+ DATA_BLOCK_DESC_CODE_OUTPUT_ROW_SIZE,
+ DATA_BLOCK_DESC_CODE_PRECISION
+};
+
+static int32_t dataBlockDescNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataBlockDescNode* pNode = (const SDataBlockDescNode*)pObj;
+
+ int32_t code = tlvEncodeI16(pEncoder, DATA_BLOCK_DESC_CODE_DATA_BLOCK_ID, pNode->dataBlockId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, DATA_BLOCK_DESC_CODE_SLOTS, nodeListToMsg, pNode->pSlots);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, DATA_BLOCK_DESC_CODE_TOTAL_ROW_SIZE, pNode->totalRowSize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, DATA_BLOCK_DESC_CODE_OUTPUT_ROW_SIZE, pNode->outputRowSize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU8(pEncoder, DATA_BLOCK_DESC_CODE_PRECISION, pNode->precision);
+ }
+
+ return code;
+}
+
+static int32_t msgToDataBlockDescNode(STlvDecoder* pDecoder, void* pObj) {
+ SDataBlockDescNode* pNode = (SDataBlockDescNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case DATA_BLOCK_DESC_CODE_DATA_BLOCK_ID:
+ code = tlvDecodeI16(pTlv, &pNode->dataBlockId);
+ break;
+ case DATA_BLOCK_DESC_CODE_SLOTS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pSlots);
+ break;
+ case DATA_BLOCK_DESC_CODE_TOTAL_ROW_SIZE:
+ code = tlvDecodeI32(pTlv, &pNode->totalRowSize);
+ break;
+ case DATA_BLOCK_DESC_CODE_OUTPUT_ROW_SIZE:
+ code = tlvDecodeI32(pTlv, &pNode->outputRowSize);
+ break;
+ case DATA_BLOCK_DESC_CODE_PRECISION:
+ code = tlvDecodeU8(pTlv, &pNode->precision);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ SLOT_DESC_CODE_SLOT_ID = 1,
+ SLOT_DESC_CODE_DATA_TYPE,
+ SLOT_DESC_CODE_RESERVE,
+ SLOT_DESC_CODE_OUTPUT,
+ SLOT_DESC_CODE_TAG
+};
+
+static int32_t slotDescNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSlotDescNode* pNode = (const SSlotDescNode*)pObj;
+
+ int32_t code = tlvEncodeI16(pEncoder, SLOT_DESC_CODE_SLOT_ID, pNode->slotId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SLOT_DESC_CODE_DATA_TYPE, dataTypeToMsg, &pNode->dataType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, SLOT_DESC_CODE_RESERVE, pNode->reserve);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, SLOT_DESC_CODE_OUTPUT, pNode->output);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, SLOT_DESC_CODE_TAG, pNode->tag);
+ }
+
+ return code;
+}
+
+static int32_t msgToSlotDescNode(STlvDecoder* pDecoder, void* pObj) {
+ SSlotDescNode* pNode = (SSlotDescNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case SLOT_DESC_CODE_SLOT_ID:
+ code = tlvDecodeI16(pTlv, &pNode->slotId);
+ break;
+ case SLOT_DESC_CODE_DATA_TYPE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToDataType, &pNode->dataType);
+ break;
+ case SLOT_DESC_CODE_RESERVE:
+ code = tlvDecodeBool(pTlv, &pNode->reserve);
+ break;
+ case SLOT_DESC_CODE_OUTPUT:
+ code = tlvDecodeBool(pTlv, &pNode->output);
+ break;
+ case SLOT_DESC_CODE_TAG:
+ code = tlvDecodeBool(pTlv, &pNode->tag);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_NODE_CODE_OUTPUT_DESC = 1,
+ PHY_NODE_CODE_CONDITIONS,
+ PHY_NODE_CODE_CHILDREN,
+ PHY_NODE_CODE_LIMIT,
+ PHY_NODE_CODE_SLIMIT
+};
+
+static int32_t physiNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SPhysiNode* pNode = (const SPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_OUTPUT_DESC, nodeToMsg, pNode->pOutputDataBlockDesc);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_CONDITIONS, nodeToMsg, pNode->pConditions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_CHILDREN, nodeListToMsg, pNode->pChildren);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_LIMIT, nodeToMsg, pNode->pLimit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_SLIMIT, nodeToMsg, pNode->pSlimit);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiNode(STlvDecoder* pDecoder, void* pObj) {
+ SPhysiNode* pNode = (SPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_NODE_CODE_OUTPUT_DESC:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pOutputDataBlockDesc);
+ break;
+ case PHY_NODE_CODE_CONDITIONS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pConditions);
+ break;
+ case PHY_NODE_CODE_CHILDREN:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pChildren);
+ break;
+ case PHY_NODE_CODE_LIMIT:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pLimit);
+ break;
+ case PHY_NODE_CODE_SLIMIT:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pSlimit);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_SCAN_CODE_BASE_NODE = 1,
+ PHY_SCAN_CODE_SCAN_COLS,
+ PHY_SCAN_CODE_SCAN_PSEUDO_COLS,
+ PHY_SCAN_CODE_BASE_UID,
+ PHY_SCAN_CODE_BASE_SUID,
+ PHY_SCAN_CODE_BASE_TABLE_TYPE,
+ PHY_SCAN_CODE_BASE_TABLE_NAME
+};
+
+static int32_t physiScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SScanPhysiNode* pNode = (const SScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_SCAN_CODE_BASE_UID, pNode->uid);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_SCAN_CODE_BASE_SUID, pNode->suid);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) {
+ SScanPhysiNode* pNode = (SScanPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SCAN_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_SCAN_CODE_SCAN_COLS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols);
+ break;
+ case PHY_SCAN_CODE_SCAN_PSEUDO_COLS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols);
+ break;
+ case PHY_SCAN_CODE_BASE_UID:
+ code = tlvDecodeU64(pTlv, &pNode->uid);
+ break;
+ case PHY_SCAN_CODE_BASE_SUID:
+ code = tlvDecodeU64(pTlv, &pNode->suid);
+ break;
+ case PHY_SCAN_CODE_BASE_TABLE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->tableType);
+ break;
+ case PHY_SCAN_CODE_BASE_TABLE_NAME:
+ code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_LAST_ROW_SCAN_CODE_SCAN = 1, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, PHY_LAST_ROW_SCAN_CODE_GROUP_SORT };
+
+static int32_t physiLastRowScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SLastRowScanPhysiNode* pNode = (const SLastRowScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_LAST_ROW_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, nodeListToMsg, pNode->pGroupTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_LAST_ROW_SCAN_CODE_GROUP_SORT, pNode->groupSort);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiLastRowScanNode(STlvDecoder* pDecoder, void* pObj) {
+ SLastRowScanPhysiNode* pNode = (SLastRowScanPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_LAST_ROW_SCAN_CODE_SCAN:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan);
+ break;
+ case PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pGroupTags);
+ break;
+ case PHY_LAST_ROW_SCAN_CODE_GROUP_SORT:
+ code = tlvDecodeBool(pTlv, &pNode->groupSort);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_TABLE_SCAN_CODE_SCAN = 1,
+ PHY_TABLE_SCAN_CODE_SCAN_COUNT,
+ PHY_TABLE_SCAN_CODE_REVERSE_SCAN_COUNT,
+ PHY_TABLE_SCAN_CODE_SCAN_RANGE,
+ PHY_TABLE_SCAN_CODE_RATIO,
+ PHY_TABLE_SCAN_CODE_DATA_REQUIRED,
+ PHY_TABLE_SCAN_CODE_DYN_SCAN_FUNCS,
+ PHY_TABLE_SCAN_CODE_GROUP_TAGS,
+ PHY_TABLE_SCAN_CODE_GROUP_SORT,
+ PHY_TABLE_SCAN_CODE_INTERVAL,
+ PHY_TABLE_SCAN_CODE_OFFSET,
+ PHY_TABLE_SCAN_CODE_SLIDING,
+ PHY_TABLE_SCAN_CODE_INTERVAL_UNIT,
+ PHY_TABLE_SCAN_CODE_SLIDING_UNIT,
+ PHY_TABLE_SCAN_CODE_TRIGGER_TYPE,
+ PHY_TABLE_SCAN_CODE_WATERMARK,
+ PHY_TABLE_SCAN_CODE_IG_EXPIRED,
+ PHY_TABLE_SCAN_CODE_ASSIGN_BLOCK_UID,
+};
+
+static int32_t physiTableScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU8(pEncoder, PHY_TABLE_SCAN_CODE_SCAN_COUNT, pNode->scanSeq[0]);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU8(pEncoder, PHY_TABLE_SCAN_CODE_REVERSE_SCAN_COUNT, pNode->scanSeq[1]);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_SCAN_RANGE, timeWindowToMsg, &pNode->scanRange);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeDouble(pEncoder, PHY_TABLE_SCAN_CODE_RATIO, pNode->ratio);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_TABLE_SCAN_CODE_DATA_REQUIRED, pNode->dataRequired);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_DYN_SCAN_FUNCS, nodeListToMsg, pNode->pDynamicScanFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_GROUP_TAGS, nodeListToMsg, pNode->pGroupTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_TABLE_SCAN_CODE_GROUP_SORT, pNode->groupSort);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_TABLE_SCAN_CODE_INTERVAL, pNode->interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_TABLE_SCAN_CODE_OFFSET, pNode->offset);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_TABLE_SCAN_CODE_SLIDING, pNode->sliding);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_TABLE_SCAN_CODE_INTERVAL_UNIT, pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_TABLE_SCAN_CODE_SLIDING_UNIT, pNode->slidingUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_TABLE_SCAN_CODE_TRIGGER_TYPE, pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_TABLE_SCAN_CODE_WATERMARK, pNode->watermark);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_TABLE_SCAN_CODE_IG_EXPIRED, pNode->igExpired);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_TABLE_SCAN_CODE_ASSIGN_BLOCK_UID, pNode->assignBlockUid);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiTableScanNode(STlvDecoder* pDecoder, void* pObj) {
+ STableScanPhysiNode* pNode = (STableScanPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_TABLE_SCAN_CODE_SCAN:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan);
+ break;
+ case PHY_TABLE_SCAN_CODE_SCAN_COUNT:
+ code = tlvDecodeU8(pTlv, pNode->scanSeq);
+ break;
+ case PHY_TABLE_SCAN_CODE_REVERSE_SCAN_COUNT:
+ code = tlvDecodeU8(pTlv, pNode->scanSeq + 1);
+ break;
+ case PHY_TABLE_SCAN_CODE_SCAN_RANGE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, &pNode->scanRange);
+ break;
+ case PHY_TABLE_SCAN_CODE_RATIO:
+ code = tlvDecodeDouble(pTlv, &pNode->ratio);
+ break;
+ case PHY_TABLE_SCAN_CODE_DATA_REQUIRED:
+ code = tlvDecodeI32(pTlv, &pNode->dataRequired);
+ break;
+ case PHY_TABLE_SCAN_CODE_DYN_SCAN_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pDynamicScanFuncs);
+ break;
+ case PHY_TABLE_SCAN_CODE_GROUP_TAGS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pGroupTags);
+ break;
+ case PHY_TABLE_SCAN_CODE_GROUP_SORT:
+ code = tlvDecodeBool(pTlv, &pNode->groupSort);
+ break;
+ case PHY_TABLE_SCAN_CODE_INTERVAL:
+ code = tlvDecodeI64(pTlv, &pNode->interval);
+ break;
+ case PHY_TABLE_SCAN_CODE_OFFSET:
+ code = tlvDecodeI64(pTlv, &pNode->offset);
+ break;
+ case PHY_TABLE_SCAN_CODE_SLIDING:
+ code = tlvDecodeI64(pTlv, &pNode->sliding);
+ break;
+ case PHY_TABLE_SCAN_CODE_INTERVAL_UNIT:
+ code = tlvDecodeI8(pTlv, &pNode->intervalUnit);
+ break;
+ case PHY_TABLE_SCAN_CODE_SLIDING_UNIT:
+ code = tlvDecodeI8(pTlv, &pNode->slidingUnit);
+ break;
+ case PHY_TABLE_SCAN_CODE_TRIGGER_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->triggerType);
+ break;
+ case PHY_TABLE_SCAN_CODE_WATERMARK:
+ code = tlvDecodeI64(pTlv, &pNode->watermark);
+ break;
+ case PHY_TABLE_SCAN_CODE_IG_EXPIRED:
+ code = tlvDecodeI8(pTlv, &pNode->igExpired);
+ break;
+ case PHY_TABLE_SCAN_CODE_ASSIGN_BLOCK_UID:
+ code = tlvDecodeBool(pTlv, &pNode->assignBlockUid);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { EP_CODE_FQDN = 1, EP_CODE_port };
+
+static int32_t epToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SEp* pNode = (const SEp*)pObj;
+
+ int32_t code = tlvEncodeCStr(pEncoder, EP_CODE_FQDN, pNode->fqdn);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU16(pEncoder, EP_CODE_port, pNode->port);
+ }
+
+ return code;
+}
+
+static int32_t msgToEp(STlvDecoder* pDecoder, void* pObj) {
+ SEp* pNode = (SEp*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case EP_CODE_FQDN:
+ code = tlvDecodeCStr(pTlv, pNode->fqdn);
+ break;
+ case EP_CODE_port:
+ code = tlvDecodeU16(pTlv, &pNode->port);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { EP_SET_CODE_IN_USE = 1, EP_SET_CODE_NUM_OF_EPS, EP_SET_CODE_EPS };
+
+static int32_t epSetToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SEpSet* pNode = (const SEpSet*)pObj;
+
+ int32_t code = tlvEncodeI8(pEncoder, EP_SET_CODE_IN_USE, pNode->inUse);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, EP_SET_CODE_NUM_OF_EPS, pNode->numOfEps);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObjArray(pEncoder, EP_SET_CODE_EPS, epToMsg, pNode->eps, sizeof(SEp), pNode->numOfEps);
+ }
+
+ return code;
+}
+
+static int32_t msgToEpSet(STlvDecoder* pDecoder, void* pObj) {
+ SEpSet* pNode = (SEpSet*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case EP_SET_CODE_IN_USE:
+ code = tlvDecodeI8(pTlv, &pNode->inUse);
+ break;
+ case EP_SET_CODE_NUM_OF_EPS:
+ code = tlvDecodeI8(pTlv, &pNode->numOfEps);
+ break;
+ case EP_SET_CODE_EPS:
+ code = tlvDecodeObjArrayFromTlv(pTlv, msgToEp, pNode->eps, sizeof(SEp));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_SYSTABLE_SCAN_CODE_SCAN = 1,
+ PHY_SYSTABLE_SCAN_CODE_MGMT_EP_SET,
+ PHY_SYSTABLE_SCAN_CODE_SHOW_REWRITE,
+ PHY_SYSTABLE_SCAN_CODE_ACCOUNT_ID,
+ PHY_SYSTABLE_SCAN_CODE_SYS_INFO
+};
+
+static int32_t physiSysTableScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSystemTableScanPhysiNode* pNode = (const SSystemTableScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SYSTABLE_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SYSTABLE_SCAN_CODE_MGMT_EP_SET, epSetToMsg, &pNode->mgmtEpSet);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_SYSTABLE_SCAN_CODE_SHOW_REWRITE, pNode->showRewrite);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_SYSTABLE_SCAN_CODE_ACCOUNT_ID, pNode->accountId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_SYSTABLE_SCAN_CODE_SYS_INFO, pNode->sysInfo);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiSysTableScanNode(STlvDecoder* pDecoder, void* pObj) {
+ SSystemTableScanPhysiNode* pNode = (SSystemTableScanPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SYSTABLE_SCAN_CODE_SCAN:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan);
+ break;
+ case PHY_SYSTABLE_SCAN_CODE_MGMT_EP_SET:
+ code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->mgmtEpSet);
+ break;
+ case PHY_SYSTABLE_SCAN_CODE_SHOW_REWRITE:
+ code = tlvDecodeBool(pTlv, &pNode->showRewrite);
+ break;
+ case PHY_SYSTABLE_SCAN_CODE_ACCOUNT_ID:
+ code = tlvDecodeI32(pTlv, &pNode->accountId);
+ break;
+ case PHY_SYSTABLE_SCAN_CODE_SYS_INFO:
+ code = tlvDecodeBool(pTlv, &pNode->sysInfo);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_PROJECT_CODE_BASE_NODE = 1,
+ PHY_PROJECT_CODE_PROJECTIONS,
+ PHY_PROJECT_CODE_MERGE_DATA_BLOCK,
+ PHY_PROJECT_CODE_IGNORE_GROUP_ID
+};
+
+static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_PROJECT_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_PROJECT_CODE_PROJECTIONS, nodeListToMsg, pNode->pProjections);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_IGNORE_GROUP_ID, pNode->ignoreGroupId);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiProjectNode(STlvDecoder* pDecoder, void* pObj) {
+ SProjectPhysiNode* pNode = (SProjectPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_PROJECT_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_PROJECT_CODE_PROJECTIONS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pProjections);
+ break;
+ case PHY_PROJECT_CODE_MERGE_DATA_BLOCK:
+ code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock);
+ break;
+ case PHY_PROJECT_CODE_IGNORE_GROUP_ID:
+ code = tlvDecodeBool(pTlv, &pNode->ignoreGroupId);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_SORT_MERGE_JOIN_CODE_BASE_NODE = 1,
+ PHY_SORT_MERGE_JOIN_CODE_JOIN_TYPE,
+ PHY_SORT_MERGE_JOIN_CODE_MERGE_CONDITION,
+ PHY_SORT_MERGE_JOIN_CODE_ON_CONDITIONS,
+ PHY_SORT_MERGE_JOIN_CODE_TARGETS,
+ PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER
+};
+
+static int32_t physiJoinNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSortMergeJoinPhysiNode* pNode = (const SSortMergeJoinPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_SORT_MERGE_JOIN_CODE_JOIN_TYPE, pNode->joinType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_MERGE_CONDITION, nodeToMsg, pNode->pMergeCondition);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_ON_CONDITIONS, nodeToMsg, pNode->pOnConditions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiJoinNode(STlvDecoder* pDecoder, void* pObj) {
+ SSortMergeJoinPhysiNode* pNode = (SSortMergeJoinPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SORT_MERGE_JOIN_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_JOIN_TYPE:
+ code = tlvDecodeEnum(pTlv, &pNode->joinType, sizeof(pNode->joinType));
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_MERGE_CONDITION:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pMergeCondition);
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_ON_CONDITIONS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pOnConditions);
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_TARGETS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_AGG_CODE_BASE_NODE = 1,
+ PHY_AGG_CODE_EXPR,
+ PHY_AGG_CODE_GROUP_KEYS,
+ PHY_AGG_CODE_AGG_FUNCS,
+ PHY_AGG_CODE_MERGE_DATA_BLOCK
+};
+
+static int32_t physiAggNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SAggPhysiNode* pNode = (const SAggPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_AGG_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_AGG_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_AGG_CODE_GROUP_KEYS, nodeListToMsg, pNode->pGroupKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_AGG_CODE_AGG_FUNCS, nodeListToMsg, pNode->pAggFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_AGG_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiAggNode(STlvDecoder* pDecoder, void* pObj) {
+ SAggPhysiNode* pNode = (SAggPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_AGG_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_AGG_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_AGG_CODE_GROUP_KEYS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pGroupKeys);
+ break;
+ case PHY_AGG_CODE_AGG_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pAggFuncs);
+ break;
+ case PHY_AGG_CODE_MERGE_DATA_BLOCK:
+ code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_EXCHANGE_CODE_BASE_NODE = 1,
+ PHY_EXCHANGE_CODE_SRC_GROUP_ID,
+ PHY_EXCHANGE_CODE_SINGLE_CHANNEL,
+ PHY_EXCHANGE_CODE_SRC_ENDPOINTS
+};
+
+static int32_t physiExchangeNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SExchangePhysiNode* pNode = (const SExchangePhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_EXCHANGE_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_EXCHANGE_CODE_SRC_GROUP_ID, pNode->srcGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_EXCHANGE_CODE_SINGLE_CHANNEL, pNode->singleChannel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_EXCHANGE_CODE_SRC_ENDPOINTS, nodeListToMsg, pNode->pSrcEndPoints);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiExchangeNode(STlvDecoder* pDecoder, void* pObj) {
+ SExchangePhysiNode* pNode = (SExchangePhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_EXCHANGE_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_EXCHANGE_CODE_SRC_GROUP_ID:
+ code = tlvDecodeI32(pTlv, &pNode->srcGroupId);
+ break;
+ case PHY_EXCHANGE_CODE_SINGLE_CHANNEL:
+ code = tlvDecodeBool(pTlv, &pNode->singleChannel);
+ break;
+ case PHY_EXCHANGE_CODE_SRC_ENDPOINTS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pSrcEndPoints);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_MERGE_CODE_BASE_NODE = 1,
+ PHY_MERGE_CODE_MERGE_KEYS,
+ PHY_MERGE_CODE_TARGETS,
+ PHY_MERGE_CODE_NUM_OF_CHANNELS,
+ PHY_MERGE_CODE_SRC_GROUP_ID,
+ PHY_MERGE_CODE_GROUP_SORT
+};
+
+static int32_t physiMergeNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SMergePhysiNode* pNode = (const SMergePhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_MERGE_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_MERGE_CODE_MERGE_KEYS, nodeListToMsg, pNode->pMergeKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_MERGE_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_MERGE_CODE_NUM_OF_CHANNELS, pNode->numOfChannels);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_MERGE_CODE_SRC_GROUP_ID, pNode->srcGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_MERGE_CODE_GROUP_SORT, pNode->groupSort);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiMergeNode(STlvDecoder* pDecoder, void* pObj) {
+ SMergePhysiNode* pNode = (SMergePhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_MERGE_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_MERGE_CODE_MERGE_KEYS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pMergeKeys);
+ break;
+ case PHY_MERGE_CODE_TARGETS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
+ break;
+ case PHY_MERGE_CODE_NUM_OF_CHANNELS:
+ code = tlvDecodeI32(pTlv, &pNode->numOfChannels);
+ break;
+ case PHY_MERGE_CODE_SRC_GROUP_ID:
+ code = tlvDecodeI32(pTlv, &pNode->srcGroupId);
+ break;
+ case PHY_MERGE_CODE_GROUP_SORT:
+ code = tlvDecodeBool(pTlv, &pNode->groupSort);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_SORT_CODE_BASE_NODE = 1, PHY_SORT_CODE_EXPR, PHY_SORT_CODE_SORT_KEYS, PHY_SORT_CODE_TARGETS };
+
+static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSortPhysiNode* pNode = (const SSortPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_SORT_KEYS, nodeListToMsg, pNode->pSortKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiSortNode(STlvDecoder* pDecoder, void* pObj) {
+ SSortPhysiNode* pNode = (SSortPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SORT_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_SORT_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_SORT_CODE_SORT_KEYS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pSortKeys);
+ break;
+ case PHY_SORT_CODE_TARGETS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_WINDOW_CODE_BASE_NODE = 1,
+ PHY_WINDOW_CODE_EXPR,
+ PHY_WINDOW_CODE_FUNCS,
+ PHY_WINDOW_CODE_TS_PK,
+ PHY_WINDOW_CODE_TS_END,
+ PHY_WINDOW_CODE_TRIGGER_TYPE,
+ PHY_WINDOW_CODE_WATERMARK,
+ PHY_WINDOW_CODE_IG_EXPIRED,
+ PHY_WINDOW_CODE_INPUT_TS_ORDER,
+ PHY_WINDOW_CODE_OUTPUT_TS_ORDER,
+ PHY_WINDOW_CODE_MERGE_DATA_BLOCK
+};
+
+static int32_t physiWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_FUNCS, nodeListToMsg, pNode->pFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_TS_PK, nodeToMsg, pNode->pTspk);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_TS_END, nodeToMsg, pNode->pTsEnd);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_WINDOW_CODE_TRIGGER_TYPE, pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_WINDOW_CODE_WATERMARK, pNode->watermark);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_WINDOW_CODE_IG_EXPIRED, pNode->igExpired);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_WINDOW_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_WINDOW_CODE_OUTPUT_TS_ORDER, pNode->outputTsOrder);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_WINDOW_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiWindowNode(STlvDecoder* pDecoder, void* pObj) {
+ SWinodwPhysiNode* pNode = (SWinodwPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_WINDOW_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_WINDOW_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_WINDOW_CODE_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFuncs);
+ break;
+ case PHY_WINDOW_CODE_TS_PK:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTspk);
+ break;
+ case PHY_WINDOW_CODE_TS_END:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTsEnd);
+ break;
+ case PHY_WINDOW_CODE_TRIGGER_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->triggerType);
+ break;
+ case PHY_WINDOW_CODE_WATERMARK:
+ code = tlvDecodeI64(pTlv, &pNode->watermark);
+ break;
+ case PHY_WINDOW_CODE_IG_EXPIRED:
+ code = tlvDecodeI8(pTlv, &pNode->igExpired);
+ break;
+ case PHY_WINDOW_CODE_INPUT_TS_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
+ break;
+ case PHY_WINDOW_CODE_OUTPUT_TS_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->outputTsOrder, sizeof(pNode->outputTsOrder));
+ break;
+ case PHY_WINDOW_CODE_MERGE_DATA_BLOCK:
+ code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_INTERVAL_CODE_WINDOW = 1,
+ PHY_INTERVAL_CODE_INTERVAL,
+ PHY_INTERVAL_CODE_OFFSET,
+ PHY_INTERVAL_CODE_SLIDING,
+ PHY_INTERVAL_CODE_INTERVAL_UNIT,
+ PHY_INTERVAL_CODE_SLIDING_UNIT
+};
+
+static int32_t physiIntervalNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SIntervalPhysiNode* pNode = (const SIntervalPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_INTERVAL_CODE_WINDOW, physiWindowNodeToMsg, &pNode->window);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_INTERVAL_CODE_INTERVAL, pNode->interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_INTERVAL_CODE_OFFSET, pNode->offset);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_INTERVAL_CODE_SLIDING, pNode->sliding);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_INTERVAL_CODE_INTERVAL_UNIT, pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_INTERVAL_CODE_SLIDING_UNIT, pNode->slidingUnit);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiIntervalNode(STlvDecoder* pDecoder, void* pObj) {
+ SIntervalPhysiNode* pNode = (SIntervalPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_INTERVAL_CODE_WINDOW:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiWindowNode, &pNode->window);
+ break;
+ case PHY_INTERVAL_CODE_INTERVAL:
+ code = tlvDecodeI64(pTlv, &pNode->interval);
+ break;
+ case PHY_INTERVAL_CODE_OFFSET:
+ code = tlvDecodeI64(pTlv, &pNode->offset);
+ break;
+ case PHY_INTERVAL_CODE_SLIDING:
+ code = tlvDecodeI64(pTlv, &pNode->sliding);
+ break;
+ case PHY_INTERVAL_CODE_INTERVAL_UNIT:
+ code = tlvDecodeI8(pTlv, &pNode->intervalUnit);
+ break;
+ case PHY_INTERVAL_CODE_SLIDING_UNIT:
+ code = tlvDecodeI8(pTlv, &pNode->slidingUnit);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_FILL_CODE_BASE_NODE = 1,
+ PHY_FILL_CODE_MODE,
+ PHY_FILL_CODE_FILL_EXPRS,
+ PHY_FILL_CODE_NOT_FILL_EXPRS,
+ PHY_FILL_CODE_WSTART,
+ PHY_FILL_CODE_VALUES,
+ PHY_FILL_CODE_TIME_RANGE,
+ PHY_FILL_CODE_INPUT_TS_ORDER
+};
+
+static int32_t physiFillNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SFillPhysiNode* pNode = (const SFillPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_FILL_CODE_MODE, pNode->mode);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_FILL_EXPRS, nodeListToMsg, pNode->pFillExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_NOT_FILL_EXPRS, nodeListToMsg, pNode->pNotFillExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_WSTART, nodeToMsg, pNode->pWStartTs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_VALUES, nodeToMsg, pNode->pValues);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_TIME_RANGE, timeWindowToMsg, &pNode->timeRange);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_FILL_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiFillNode(STlvDecoder* pDecoder, void* pObj) {
+ SFillPhysiNode* pNode = (SFillPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_FILL_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_FILL_CODE_MODE:
+ code = tlvDecodeEnum(pTlv, &pNode->mode, sizeof(pNode->mode));
+ break;
+ case PHY_FILL_CODE_FILL_EXPRS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFillExprs);
+ break;
+ case PHY_FILL_CODE_NOT_FILL_EXPRS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pNotFillExprs);
+ break;
+ case PHY_FILL_CODE_WSTART:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pWStartTs);
+ break;
+ case PHY_FILL_CODE_VALUES:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pValues);
+ break;
+ case PHY_FILL_CODE_TIME_RANGE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, (void**)&pNode->timeRange);
+ break;
+ case PHY_FILL_CODE_INPUT_TS_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_SESSION_CODE_WINDOW = 1, PHY_SESSION_CODE_GAP };
+
+static int32_t physiSessionWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSessionWinodwPhysiNode* pNode = (const SSessionWinodwPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SESSION_CODE_WINDOW, physiWindowNodeToMsg, &pNode->window);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_SESSION_CODE_GAP, pNode->gap);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiSessionWindowNode(STlvDecoder* pDecoder, void* pObj) {
+ SSessionWinodwPhysiNode* pNode = (SSessionWinodwPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SESSION_CODE_WINDOW:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiWindowNode, &pNode->window);
+ break;
+ case PHY_SESSION_CODE_GAP:
+ code = tlvDecodeI64(pTlv, &pNode->gap);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_STATE_CODE_WINDOW = 1, PHY_STATE_CODE_KEY };
+
+static int32_t physiStateWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SStateWinodwPhysiNode* pNode = (const SStateWinodwPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_STATE_CODE_WINDOW, physiWindowNodeToMsg, &pNode->window);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_STATE_CODE_KEY, nodeToMsg, pNode->pStateKey);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiStateWindowNode(STlvDecoder* pDecoder, void* pObj) {
+ SStateWinodwPhysiNode* pNode = (SStateWinodwPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_STATE_CODE_WINDOW:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiWindowNode, &pNode->window);
+ break;
+ case PHY_STATE_CODE_KEY:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pStateKey);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_PARTITION_CODE_BASE_NODE = 1, PHY_PARTITION_CODE_EXPR, PHY_PARTITION_CODE_KEYS, PHY_PARTITION_CODE_TARGETS };
+
+static int32_t physiPartitionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SPartitionPhysiNode* pNode = (const SPartitionPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_PARTITION_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_PARTITION_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_PARTITION_CODE_KEYS, nodeListToMsg, pNode->pPartitionKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_PARTITION_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiPartitionNode(STlvDecoder* pDecoder, void* pObj) {
+ SPartitionPhysiNode* pNode = (SPartitionPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_PARTITION_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_PARTITION_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_PARTITION_CODE_KEYS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pPartitionKeys);
+ break;
+ case PHY_PARTITION_CODE_TARGETS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_INDEF_ROWS_FUNC_CODE_BASE_NODE = 1, PHY_INDEF_ROWS_FUNC_CODE_EXPRS, PHY_INDEF_ROWS_FUNC_CODE_FUNCS };
+
+static int32_t physiIndefRowsFuncNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SIndefRowsFuncPhysiNode* pNode = (const SIndefRowsFuncPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_INDEF_ROWS_FUNC_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INDEF_ROWS_FUNC_CODE_EXPRS, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INDEF_ROWS_FUNC_CODE_FUNCS, nodeListToMsg, pNode->pFuncs);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiIndefRowsFuncNode(STlvDecoder* pDecoder, void* pObj) {
+ SIndefRowsFuncPhysiNode* pNode = (SIndefRowsFuncPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_INDEF_ROWS_FUNC_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_INDEF_ROWS_FUNC_CODE_EXPRS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_INDEF_ROWS_FUNC_CODE_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFuncs);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_INERP_FUNC_CODE_BASE_NODE = 1,
+ PHY_INERP_FUNC_CODE_EXPR,
+ PHY_INERP_FUNC_CODE_FUNCS,
+ PHY_INERP_FUNC_CODE_TIME_RANGE,
+ PHY_INERP_FUNC_CODE_INTERVAL,
+ PHY_INERP_FUNC_CODE_INTERVAL_UNIT,
+ PHY_INERP_FUNC_CODE_FILL_MODE,
+ PHY_INERP_FUNC_CODE_FILL_VALUES,
+ PHY_INERP_FUNC_CODE_TIME_SERIES
+};
+
+static int32_t physiInterpFuncNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SInterpFuncPhysiNode* pNode = (const SInterpFuncPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_FUNCS, nodeListToMsg, pNode->pFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_TIME_RANGE, timeWindowToMsg, &pNode->timeRange);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_INERP_FUNC_CODE_INTERVAL, pNode->interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_INERP_FUNC_CODE_INTERVAL_UNIT, pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_INERP_FUNC_CODE_FILL_MODE, pNode->fillMode);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_FILL_VALUES, nodeToMsg, pNode->pFillValues);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_TIME_SERIES, nodeToMsg, pNode->pTimeSeries);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiInterpFuncNode(STlvDecoder* pDecoder, void* pObj) {
+ SInterpFuncPhysiNode* pNode = (SInterpFuncPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_INERP_FUNC_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_INERP_FUNC_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_INERP_FUNC_CODE_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFuncs);
+ break;
+ case PHY_INERP_FUNC_CODE_TIME_RANGE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, &pNode->timeRange);
+ break;
+ case PHY_INERP_FUNC_CODE_INTERVAL:
+ code = tlvDecodeI64(pTlv, &pNode->interval);
+ break;
+ case PHY_INERP_FUNC_CODE_INTERVAL_UNIT:
+ code = tlvDecodeI8(pTlv, &pNode->intervalUnit);
+ break;
+ case PHY_INERP_FUNC_CODE_FILL_MODE:
+ code = tlvDecodeEnum(pTlv, &pNode->fillMode, sizeof(pNode->fillMode));
+ break;
+ case PHY_INERP_FUNC_CODE_FILL_VALUES:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pFillValues);
+ break;
+ case PHY_INERP_FUNC_CODE_TIME_SERIES:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTimeSeries);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_DATA_SINK_CODE_INPUT_DESC = 1 };
+
+static int32_t physicDataSinkNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataSinkNode* pNode = (const SDataSinkNode*)pObj;
+ return tlvEncodeObj(pEncoder, PHY_DATA_SINK_CODE_INPUT_DESC, nodeToMsg, pNode->pInputDataBlockDesc);
+}
+
+static int32_t msgToPhysicDataSinkNode(STlvDecoder* pDecoder, void* pObj) {
+ SDataSinkNode* pNode = (SDataSinkNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_DATA_SINK_CODE_INPUT_DESC:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pInputDataBlockDesc);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_DISPATCH_CODE_SINK = 1 };
+
+static int32_t physiDispatchNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataDispatcherNode* pNode = (const SDataDispatcherNode*)pObj;
+ return tlvEncodeObj(pEncoder, PHY_DISPATCH_CODE_SINK, physicDataSinkNodeToMsg, &pNode->sink);
+}
+
+static int32_t msgToPhysiDispatchNode(STlvDecoder* pDecoder, void* pObj) {
+ SDataDispatcherNode* pNode = (SDataDispatcherNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_DISPATCH_CODE_SINK:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysicDataSinkNode, &pNode->sink);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_QUERY_INSERT_CODE_SINK = 1,
+ PHY_QUERY_INSERT_CODE_COLS,
+ PHY_QUERY_INSERT_CODE_TABLE_ID,
+ PHY_QUERY_INSERT_CODE_STABLE_ID,
+ PHY_QUERY_INSERT_CODE_TABLE_TYPE,
+ PHY_QUERY_INSERT_CODE_TABLE_NAME,
+ PHY_QUERY_INSERT_CODE_VG_ID,
+ PHY_QUERY_INSERT_CODE_EP_SET
+};
+
+static int32_t physiQueryInsertNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SQueryInserterNode* pNode = (const SQueryInserterNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_QUERY_INSERT_CODE_SINK, physicDataSinkNodeToMsg, &pNode->sink);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_QUERY_INSERT_CODE_COLS, nodeListToMsg, pNode->pCols);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_QUERY_INSERT_CODE_TABLE_ID, pNode->tableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_QUERY_INSERT_CODE_STABLE_ID, pNode->stableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_QUERY_INSERT_CODE_TABLE_TYPE, pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, PHY_QUERY_INSERT_CODE_TABLE_NAME, pNode->tableName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_QUERY_INSERT_CODE_VG_ID, pNode->vgId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_QUERY_INSERT_CODE_EP_SET, epSetToMsg, &pNode->epSet);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiQueryInsertNode(STlvDecoder* pDecoder, void* pObj) {
+ SQueryInserterNode* pNode = (SQueryInserterNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_QUERY_INSERT_CODE_SINK:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysicDataSinkNode, &pNode->sink);
+ break;
+ case PHY_QUERY_INSERT_CODE_COLS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pCols);
+ break;
+ case PHY_QUERY_INSERT_CODE_TABLE_ID:
+ code = tlvDecodeU64(pTlv, &pNode->tableId);
+ break;
+ case PHY_QUERY_INSERT_CODE_STABLE_ID:
+ code = tlvDecodeU64(pTlv, &pNode->stableId);
+ break;
+ case PHY_QUERY_INSERT_CODE_TABLE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->tableType);
+ break;
+ case PHY_QUERY_INSERT_CODE_TABLE_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->tableName);
+ break;
+ case PHY_QUERY_INSERT_CODE_VG_ID:
+ code = tlvDecodeI32(pTlv, &pNode->vgId);
+ break;
+ case PHY_QUERY_INSERT_CODE_EP_SET:
+ code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->epSet);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_DELETER_CODE_SINK = 1,
+ PHY_DELETER_CODE_TABLE_ID,
+ PHY_DELETER_CODE_TABLE_TYPE,
+ PHY_DELETER_CODE_TABLE_FNAME,
+ PHY_DELETER_CODE_TS_COL_NAME,
+ PHY_DELETER_CODE_DELETE_TIME_RANGE,
+ PHY_DELETER_CODE_AFFECTED_ROWS
+};
+
+static int32_t physiDeleteNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataDeleterNode* pNode = (const SDataDeleterNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_SINK, physicDataSinkNodeToMsg, &pNode->sink);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_DELETER_CODE_TABLE_ID, pNode->tableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_DELETER_CODE_TABLE_TYPE, pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, PHY_DELETER_CODE_TABLE_FNAME, pNode->tableFName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, PHY_DELETER_CODE_TS_COL_NAME, pNode->tsColName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_DELETE_TIME_RANGE, timeWindowToMsg, &pNode->deleteTimeRange);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_AFFECTED_ROWS, nodeToMsg, pNode->pAffectedRows);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiDeleteNode(STlvDecoder* pDecoder, void* pObj) {
+ SDataDeleterNode* pNode = (SDataDeleterNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_DELETER_CODE_SINK:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysicDataSinkNode, &pNode->sink);
+ break;
+ case PHY_DELETER_CODE_TABLE_ID:
+ code = tlvDecodeU64(pTlv, &pNode->tableId);
+ break;
+ case PHY_DELETER_CODE_TABLE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->tableType);
+ break;
+ case PHY_DELETER_CODE_TABLE_FNAME:
+ code = tlvDecodeCStr(pTlv, pNode->tableFName);
+ break;
+ case PHY_DELETER_CODE_TS_COL_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->tsColName);
+ break;
+ case PHY_DELETER_CODE_DELETE_TIME_RANGE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, &pNode->deleteTimeRange);
+ break;
+ case PHY_DELETER_CODE_AFFECTED_ROWS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pAffectedRows);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { SUBPLAN_ID_CODE_QUERY_ID = 1, SUBPLAN_ID_CODE_GROUP_ID, SUBPLAN_ID_CODE_SUBPLAN_ID };
+
+static int32_t subplanIdToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSubplanId* pNode = (const SSubplanId*)pObj;
+
+ int32_t code = tlvEncodeU64(pEncoder, SUBPLAN_ID_CODE_QUERY_ID, pNode->queryId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, SUBPLAN_ID_CODE_GROUP_ID, pNode->groupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, SUBPLAN_ID_CODE_SUBPLAN_ID, pNode->subplanId);
+ }
+
+ return code;
+}
+
+static int32_t msgToSubplanId(STlvDecoder* pDecoder, void* pObj) {
+ SSubplanId* pNode = (SSubplanId*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case SUBPLAN_ID_CODE_QUERY_ID:
+ code = tlvDecodeU64(pTlv, &pNode->queryId);
+ break;
+ case SUBPLAN_ID_CODE_GROUP_ID:
+ code = tlvDecodeI32(pTlv, &pNode->groupId);
+ break;
+ case SUBPLAN_ID_CODE_SUBPLAN_ID:
+ code = tlvDecodeI32(pTlv, &pNode->subplanId);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { QUERY_NODE_ADDR_CODE_NODE_ID = 1, QUERY_NODE_ADDR_CODE_EP_SET };
+
+static int32_t queryNodeAddrToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SQueryNodeAddr* pNode = (const SQueryNodeAddr*)pObj;
+
+ int32_t code = tlvEncodeI32(pEncoder, QUERY_NODE_ADDR_CODE_NODE_ID, pNode->nodeId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, QUERY_NODE_ADDR_CODE_EP_SET, epSetToMsg, &pNode->epSet);
+ }
+
+ return code;
+}
+
+static int32_t msgToQueryNodeAddr(STlvDecoder* pDecoder, void* pObj) {
+ SQueryNodeAddr* pNode = (SQueryNodeAddr*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case QUERY_NODE_ADDR_CODE_NODE_ID:
+ code = tlvDecodeI32(pTlv, &pNode->nodeId);
+ break;
+ case QUERY_NODE_ADDR_CODE_EP_SET:
+ code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->epSet);
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ SUBPLAN_CODE_SUBPLAN_ID = 1,
+ SUBPLAN_CODE_SUBPLAN_TYPE,
+ SUBPLAN_CODE_MSG_TYPE,
+ SUBPLAN_CODE_LEVEL,
+ SUBPLAN_CODE_DBFNAME,
+ SUBPLAN_CODE_USER,
+ SUBPLAN_CODE_EXECNODE,
+ SUBPLAN_CODE_ROOT_NODE,
+ SUBPLAN_CODE_DATA_SINK,
+ SUBPLAN_CODE_TAG_COND,
+ SUBPLAN_CODE_TAG_INDEX_COND
+};
+
+static int32_t subplanToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSubplan* pNode = (const SSubplan*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_SUBPLAN_ID, subplanIdToMsg, &pNode->id);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, SUBPLAN_CODE_SUBPLAN_TYPE, pNode->subplanType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, SUBPLAN_CODE_MSG_TYPE, pNode->msgType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, SUBPLAN_CODE_LEVEL, pNode->level);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, SUBPLAN_CODE_DBFNAME, pNode->dbFName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, SUBPLAN_CODE_USER, pNode->user);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_EXECNODE, queryNodeAddrToMsg, &pNode->execNode);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_ROOT_NODE, nodeToMsg, pNode->pNode);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_DATA_SINK, nodeToMsg, pNode->pDataSink);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_TAG_COND, nodeToMsg, pNode->pTagCond);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_TAG_INDEX_COND, nodeToMsg, pNode->pTagIndexCond);
+ }
+
+ return code;
+}
+
+static int32_t msgToSubplan(STlvDecoder* pDecoder, void* pObj) {
+ SSubplan* pNode = (SSubplan*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case SUBPLAN_CODE_SUBPLAN_ID:
+ code = tlvDecodeObjFromTlv(pTlv, msgToSubplanId, &pNode->id);
+ break;
+ case SUBPLAN_CODE_SUBPLAN_TYPE:
+ code = tlvDecodeEnum(pTlv, &pNode->subplanType, sizeof(pNode->subplanType));
+ break;
+ case SUBPLAN_CODE_MSG_TYPE:
+ code = tlvDecodeI32(pTlv, &pNode->msgType);
+ break;
+ case SUBPLAN_CODE_LEVEL:
+ code = tlvDecodeI32(pTlv, &pNode->level);
+ break;
+ case SUBPLAN_CODE_DBFNAME:
+ code = tlvDecodeCStr(pTlv, pNode->dbFName);
+ break;
+ case SUBPLAN_CODE_USER:
+ code = tlvDecodeCStr(pTlv, pNode->user);
+ break;
+ case SUBPLAN_CODE_EXECNODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToQueryNodeAddr, &pNode->execNode);
+ break;
+ case SUBPLAN_CODE_ROOT_NODE:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pNode);
+ break;
+ case SUBPLAN_CODE_DATA_SINK:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pDataSink);
+ break;
+ case SUBPLAN_CODE_TAG_COND:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTagCond);
+ break;
+ case SUBPLAN_CODE_TAG_INDEX_COND:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTagIndexCond);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { QUERY_PLAN_CODE_QUERY_ID = 1, QUERY_PLAN_CODE_NUM_OF_SUBPLANS, QUERY_PLAN_CODE_SUBPLANS };
+
+static int32_t queryPlanToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SQueryPlan* pNode = (const SQueryPlan*)pObj;
+
+ int32_t code = tlvEncodeU64(pEncoder, QUERY_PLAN_CODE_QUERY_ID, pNode->queryId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, QUERY_PLAN_CODE_NUM_OF_SUBPLANS, pNode->numOfSubplans);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, QUERY_PLAN_CODE_SUBPLANS, nodeListToMsg, pNode->pSubplans);
+ }
+
+ return code;
+}
+
+static int32_t msgToQueryPlan(STlvDecoder* pDecoder, void* pObj) {
+ SQueryPlan* pNode = (SQueryPlan*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case QUERY_PLAN_CODE_QUERY_ID:
+ code = tlvDecodeU64(pTlv, &pNode->queryId);
+ break;
+ case QUERY_PLAN_CODE_NUM_OF_SUBPLANS:
+ code = tlvDecodeI32(pTlv, &pNode->numOfSubplans);
+ break;
+ case QUERY_PLAN_CODE_SUBPLANS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pSubplans);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (nodeType(pObj)) {
+ case QUERY_NODE_COLUMN:
+ code = columnNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_VALUE:
+ code = valueNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_OPERATOR:
+ code = operatorNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_LOGIC_CONDITION:
+ code = logicConditionNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_FUNCTION:
+ code = functionNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_ORDER_BY_EXPR:
+ code = orderByExprNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_LIMIT:
+ code = limitNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_NODE_LIST:
+ code = nodeListNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_TARGET:
+ code = targetNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_DATABLOCK_DESC:
+ code = dataBlockDescNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_SLOT_DESC:
+ code = slotDescNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_LEFT_VALUE:
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
+ code = physiScanNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
+ code = physiLastRowScanNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
+ code = physiTableScanNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
+ code = physiSysTableScanNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
+ code = physiProjectNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
+ code = physiJoinNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
+ code = physiAggNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
+ code = physiExchangeNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE:
+ code = physiMergeNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_SORT:
+ case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
+ code = physiSortNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
+ code = physiIntervalNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ code = physiFillNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION:
+ code = physiSessionWindowNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
+ code = physiStateWindowNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ code = physiPartitionNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
+ code = physiIndefRowsFuncNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
+ code = physiInterpFuncNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
+ code = physiDispatchNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT:
+ code = physiQueryInsertNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_DELETE:
+ code = physiDeleteNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_SUBPLAN:
+ code = subplanToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN:
+ code = queryPlanToMsg(pObj, pEncoder);
+ break;
+ default:
+ nodesWarn("specificNodeToMsg unknown node = %s", nodesNodeName(nodeType(pObj)));
+ break;
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesError("specificNodeToMsg error node = %s", nodesNodeName(nodeType(pObj)));
+ }
+ return code;
+}
+
+static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (nodeType(pObj)) {
+ case QUERY_NODE_COLUMN:
+ code = msgToColumnNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_VALUE:
+ code = msgToValueNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_OPERATOR:
+ code = msgToOperatorNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_LOGIC_CONDITION:
+ code = msgToLogicConditionNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_FUNCTION:
+ code = msgToFunctionNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_ORDER_BY_EXPR:
+ code = msgToOrderByExprNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_LIMIT:
+ code = msgToLimitNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_NODE_LIST:
+ code = msgToNodeListNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_TARGET:
+ code = msgToTargetNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_DATABLOCK_DESC:
+ code = msgToDataBlockDescNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_SLOT_DESC:
+ code = msgToSlotDescNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_LEFT_VALUE:
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
+ code = msgToPhysiScanNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
+ code = msgToPhysiLastRowScanNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
+ code = msgToPhysiTableScanNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
+ code = msgToPhysiSysTableScanNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
+ code = msgToPhysiProjectNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
+ code = msgToPhysiJoinNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
+ code = msgToPhysiAggNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
+ code = msgToPhysiExchangeNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE:
+ code = msgToPhysiMergeNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_SORT:
+ case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
+ code = msgToPhysiSortNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
+ code = msgToPhysiIntervalNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ code = msgToPhysiFillNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION:
+ code = msgToPhysiSessionWindowNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
+ code = msgToPhysiStateWindowNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ code = msgToPhysiPartitionNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
+ code = msgToPhysiIndefRowsFuncNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
+ code = msgToPhysiInterpFuncNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
+ code = msgToPhysiDispatchNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT:
+ code = msgToPhysiQueryInsertNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_DELETE:
+ code = msgToPhysiDeleteNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_SUBPLAN:
+ code = msgToSubplan(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN:
+ code = msgToQueryPlan(pDecoder, pObj);
+ break;
+ default:
+ nodesWarn("msgToSpecificNode unknown node = %s", nodesNodeName(nodeType(pObj)));
+ break;
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesError("msgToSpecificNode error node = %s", nodesNodeName(nodeType(pObj)));
+ }
+ return code;
+}
+
+static int32_t nodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ return tlvEncodeObj(pEncoder, nodeType(pObj), specificNodeToMsg, pObj);
+}
+
+static int32_t msgToNode(STlvDecoder* pDecoder, void** pObj) {
+ return tlvDecodeDynObj(pDecoder, (FMakeObject)nodesMakeNode, msgToSpecificNode, pObj);
+}
+
+static int32_t msgToNodeFromTlv(STlv* pTlv, void** pObj) {
+ STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
+ return msgToNode(&decoder, pObj);
+}
+
+static int32_t nodeListToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SNodeList* pList = (const SNodeList*)pObj;
+
+ SNode* pNode = NULL;
+ FOREACH(pNode, pList) {
+ int32_t code = nodeToMsg(pNode, pEncoder);
+ if (TSDB_CODE_SUCCESS != code) {
+ return code;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t msgToNodeList(STlvDecoder* pDecoder, void** pObj) {
+ SNodeList* pList = nodesMakeList();
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ while (TSDB_CODE_SUCCESS == code && !tlvDecodeEnd(pDecoder)) {
+ SNode* pNode = NULL;
+ code = msgToNode(pDecoder, (void**)&pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListAppend(pList, pNode);
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ *pObj = pList;
+ } else {
+ nodesDestroyList(pList);
+ }
+ return code;
+}
+
+static int32_t msgToNodeListFromTlv(STlv* pTlv, void** pObj) {
+ STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
+ return msgToNodeList(&decoder, pObj);
+}
+
+int32_t nodesNodeToMsg(const SNode* pNode, char** pMsg, int32_t* pLen) {
+ if (NULL == pNode || NULL == pMsg || NULL == pLen) {
+ terrno = TSDB_CODE_FAILED;
+ return TSDB_CODE_FAILED;
+ }
+
+ STlvEncoder encoder;
+ int32_t code = initTlvEncoder(&encoder);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeToMsg(pNode, &encoder);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ endTlvEncode(&encoder, pMsg, pLen);
+ }
+ clearTlvEncoder(&encoder);
+
+ terrno = code;
+ return code;
+}
+
+int32_t nodesMsgToNode(const char* pMsg, int32_t len, SNode** pNode) {
+ if (NULL == pMsg || NULL == pNode) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ STlvDecoder decoder = {.bufSize = len, .offset = 0, .pBuf = pMsg};
+ int32_t code = msgToNode(&decoder, (void**)pNode);
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesDestroyNode(*pNode);
+ *pNode = NULL;
+ }
+
+ terrno = code;
+ return code;
+}
diff --git a/source/libs/nodes/src/nodesToSQLFuncs.c b/source/libs/nodes/src/nodesToSQLFuncs.c
index e521c57c3d80eac9455ab9affa5a4b053983ef84..9325d0288636ca7e22fe4fdd3a8e50ff90cdf0de 100644
--- a/source/libs/nodes/src/nodesToSQLFuncs.c
+++ b/source/libs/nodes/src/nodesToSQLFuncs.c
@@ -135,7 +135,12 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
NODES_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
- *len += snprintf(buf + *len, bufSize - *len, "%s", t);
+ int32_t tlen = strlen(t);
+ if (tlen > 32) {
+ *len += snprintf(buf + *len, bufSize - *len, "%.*s...%s", 32, t, t + tlen - 1);
+ } else {
+ *len += snprintf(buf + *len, bufSize - *len, "%s", t);
+ }
taosMemoryFree(t);
return TSDB_CODE_SUCCESS;
@@ -199,12 +204,17 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
SNodeListNode *pListNode = (SNodeListNode *)pNode;
SNode *node = NULL;
bool first = true;
+ int32_t num = 0;
*len += snprintf(buf + *len, bufSize - *len, "(");
FOREACH(node, pListNode->pNodeList) {
if (!first) {
*len += snprintf(buf + *len, bufSize - *len, ", ");
+ if (++num >= 10) {
+ *len += snprintf(buf + *len, bufSize - *len, "...");
+ break;
+ }
}
NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len));
first = false;
diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c
index 2e23998aad9463fb7a4a9b6834ceab2f7ea51e55..728e173ff85e87d553d118f0baf0022a99c58f5d 100644
--- a/source/libs/nodes/src/nodesTraverseFuncs.c
+++ b/source/libs/nodes/src/nodesTraverseFuncs.c
@@ -537,7 +537,8 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
}
break;
}
- case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: {
SPartitionPhysiNode* pPart = (SPartitionPhysiNode*)pNode;
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index d13057a93e824c2b94d94a006664b4cbc4c2f870..61b2ad954f3c91ed8d3bc8f7b9fb76c3c49cda9c 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -322,6 +322,8 @@ SNode* nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SStreamStateWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return makeNode(type, sizeof(SPartitionPhysiNode));
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ return makeNode(type, sizeof(SStreamPartitionPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return makeNode(type, sizeof(SIndefRowsFuncPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
@@ -951,7 +953,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pPhyNode->pStateKey);
break;
}
- case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: {
SPartitionPhysiNode* pPhyNode = (SPartitionPhysiNode*)pNode;
destroyPhysiNode((SPhysiNode*)pPhyNode);
nodesDestroyList(pPhyNode->pExprs);
diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h
index 2a4f4c194d3e46c9ff4d052187431287dce87b3e..898e4bf7328f873688f1ed6c8c31690de28f74c5 100644
--- a/source/libs/parser/inc/parAst.h
+++ b/source/libs/parser/inc/parAst.h
@@ -48,6 +48,7 @@ typedef enum EDatabaseOptionType {
DB_OPTION_KEEP,
DB_OPTION_PAGES,
DB_OPTION_PAGESIZE,
+ DB_OPTION_TSDB_PAGESIZE,
DB_OPTION_PRECISION,
DB_OPTION_REPLICA,
DB_OPTION_STRICT,
@@ -59,7 +60,10 @@ typedef enum EDatabaseOptionType {
DB_OPTION_WAL_RETENTION_PERIOD,
DB_OPTION_WAL_RETENTION_SIZE,
DB_OPTION_WAL_ROLL_PERIOD,
- DB_OPTION_WAL_SEGMENT_SIZE
+ DB_OPTION_WAL_SEGMENT_SIZE,
+ DB_OPTION_STT_TRIGGER,
+ DB_OPTION_TABLE_PREFIX,
+ DB_OPTION_TABLE_SUFFIX
} EDatabaseOptionType;
typedef enum ETableOptionType {
@@ -170,6 +174,7 @@ SNode* createShowCreateDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
SNode* createShowCreateTableStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pRealTable);
SNode* createShowTableDistributedStmt(SAstCreateContext* pCxt, SNode* pRealTable);
SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId);
+SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint);
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo);
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, const SToken* pVal);
SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName);
diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y
index 56e68d8374518ab7494371151513c099bc37ab80..bef2ed98ec92e07431d765ac8275ab9e6c111a2f 100644
--- a/source/libs/parser/inc/sql.y
+++ b/source/libs/parser/inc/sql.y
@@ -184,6 +184,7 @@ db_options(A) ::= db_options(B) KEEP integer_list(C).
db_options(A) ::= db_options(B) KEEP variable_list(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_KEEP, C); }
db_options(A) ::= db_options(B) PAGES NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PAGES, &C); }
db_options(A) ::= db_options(B) PAGESIZE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PAGESIZE, &C); }
+db_options(A) ::= db_options(B) TSDB_PAGESIZE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TSDB_PAGESIZE, &C); }
db_options(A) ::= db_options(B) PRECISION NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PRECISION, &C); }
db_options(A) ::= db_options(B) REPLICA NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_REPLICA, &C); }
db_options(A) ::= db_options(B) STRICT NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STRICT, &C); }
@@ -207,6 +208,9 @@ db_options(A) ::= db_options(B) WAL_RETENTION_SIZE NK_MINUS(D) NK_INTEGER(C).
}
db_options(A) ::= db_options(B) WAL_ROLL_PERIOD NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL_ROLL_PERIOD, &C); }
db_options(A) ::= db_options(B) WAL_SEGMENT_SIZE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL_SEGMENT_SIZE, &C); }
+db_options(A) ::= db_options(B) STT_TRIGGER NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STT_TRIGGER, &C); }
+db_options(A) ::= db_options(B) TABLE_PREFIX NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TABLE_PREFIX, &C); }
+db_options(A) ::= db_options(B) TABLE_SUFFIX NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TABLE_SUFFIX, &C); }
alter_db_options(A) ::= alter_db_option(B). { A = createAlterDatabaseOptions(pCxt); A = setAlterDatabaseOption(pCxt, A, &B); }
alter_db_options(A) ::= alter_db_options(B) alter_db_option(C). { A = setAlterDatabaseOption(pCxt, B, &C); }
@@ -223,6 +227,7 @@ alter_db_option(A) ::= KEEP variable_list(B).
//alter_db_option(A) ::= REPLICA NK_INTEGER(B). { A.type = DB_OPTION_REPLICA; A.val = B; }
//alter_db_option(A) ::= STRICT NK_STRING(B). { A.type = DB_OPTION_STRICT; A.val = B; }
alter_db_option(A) ::= WAL_LEVEL NK_INTEGER(B). { A.type = DB_OPTION_WAL; A.val = B; }
+alter_db_option(A) ::= STT_TRIGGER NK_INTEGER(B). { A.type = DB_OPTION_STT_TRIGGER; A.val = B; }
%type integer_list { SNodeList* }
%destructor integer_list { nodesDestroyList($$); }
@@ -410,6 +415,8 @@ cmd ::= SHOW TABLE DISTRIBUTED full_table_name(A).
cmd ::= SHOW CONSUMERS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); }
cmd ::= SHOW SUBSCRIPTIONS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); }
cmd ::= SHOW TAGS FROM table_name_cond(A) from_db_opt(B). { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, B, A, OP_TYPE_EQUAL); }
+cmd ::= SHOW VNODES NK_INTEGER(A). { pCxt->pRootNode = createShowVnodesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &A), NULL); }
+cmd ::= SHOW VNODES NK_STRING(A). { pCxt->pRootNode = createShowVnodesStmt(pCxt, NULL, createValueNode(pCxt, TSDB_DATA_TYPE_VARCHAR, &A)); }
db_name_cond_opt(A) ::= . { A = createDefaultDatabaseCondValue(pCxt); }
db_name_cond_opt(A) ::= db_name(B) NK_DOT. { A = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B); }
@@ -495,12 +502,9 @@ bufsize_opt(A) ::= BUFSIZE NK_INTEGER(B).
/************************************************ create/drop stream **************************************************/
cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A)
- stream_options(B) into_opt(C) AS query_expression(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, D); }
+ stream_options(B) INTO full_table_name(C) AS query_expression(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, D); }
cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); }
-into_opt(A) ::= . { A = NULL; }
-into_opt(A) ::= INTO full_table_name(B). { A = B; }
-
stream_options(A) ::= . { A = createStreamOptions(pCxt); }
stream_options(A) ::= stream_options(B) TRIGGER AT_ONCE. { ((SStreamOptions*)B)->triggerType = STREAM_TRIGGER_AT_ONCE; A = B; }
stream_options(A) ::= stream_options(B) TRIGGER WINDOW_CLOSE. { ((SStreamOptions*)B)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; A = B; }
@@ -1000,4 +1004,4 @@ null_ordering_opt(A) ::= NULLS LAST.
%fallback ABORT AFTER ATTACH BEFORE BEGIN BITAND BITNOT BITOR BLOCKS CHANGE COMMA COMPACT CONCAT CONFLICT COPY DEFERRED DELIMITERS DETACH DIVIDE DOT EACH END FAIL
FILE FOR GLOB ID IMMEDIATE IMPORT INITIALLY INSTEAD ISNULL KEY NK_BITNOT NK_SEMI NOTNULL OF PLUS PRIVILEGE RAISE REPLACE RESTRICT ROW SEMI STAR STATEMENT STRING
- TIMES UPDATE VALUES VARIABLE VIEW VNODES WAL.
+ TIMES UPDATE VALUES VARIABLE VIEW WAL.
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 4d0b0bbb2533111fe31d4810c58270bea5b22314..655bb68206975c65022511632c4f8fd56c0699a3 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -795,6 +795,20 @@ SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode*
return (SNode*)setOp;
}
+static void updateWalOptionsDefault(SDatabaseOptions* pOptions) {
+ if (!pOptions->walRetentionPeriodIsSet) {
+ pOptions->walRetentionPeriod =
+ pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_RET_PERIOD : TSDB_REP_DEF_DB_WAL_RET_PERIOD;
+ }
+ if (!pOptions->walRetentionSizeIsSet) {
+ pOptions->walRetentionSize = pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_RET_SIZE : TSDB_REP_DEF_DB_WAL_RET_SIZE;
+ }
+ if (!pOptions->walRollPeriodIsSet) {
+ pOptions->walRollPeriod =
+ pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD : TSDB_REP_DEF_DB_WAL_ROLL_PERIOD;
+ }
+}
+
SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
CHECK_PARSER_STATUS(pCxt);
SDatabaseOptions* pOptions = (SDatabaseOptions*)nodesMakeNode(QUERY_NODE_DATABASE_OPTIONS);
@@ -812,6 +826,7 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->keep[2] = TSDB_DEFAULT_KEEP;
pOptions->pages = TSDB_DEFAULT_PAGES_PER_VNODE;
pOptions->pagesize = TSDB_DEFAULT_PAGESIZE_PER_VNODE;
+ pOptions->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
pOptions->precision = TSDB_DEFAULT_PRECISION;
pOptions->replica = TSDB_DEFAULT_DB_REPLICA;
pOptions->strict = TSDB_DEFAULT_DB_STRICT;
@@ -819,10 +834,11 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->numOfVgroups = TSDB_DEFAULT_VN_PER_DB;
pOptions->singleStable = TSDB_DEFAULT_DB_SINGLE_STABLE;
pOptions->schemaless = TSDB_DEFAULT_DB_SCHEMALESS;
- pOptions->walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
- pOptions->walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- pOptions->walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ updateWalOptionsDefault(pOptions);
pOptions->walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
+ pOptions->sstTrigger = TSDB_DEFAULT_SST_TRIGGER;
+ pOptions->tablePrefix = TSDB_DEFAULT_HASH_PREFIX;
+ pOptions->tableSuffix = TSDB_DEFAULT_HASH_SUFFIX;
return (SNode*)pOptions;
}
@@ -843,6 +859,7 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->keep[2] = -1;
pOptions->pages = -1;
pOptions->pagesize = -1;
+ pOptions->tsdbPageSize = -1;
pOptions->precision = -1;
pOptions->replica = -1;
pOptions->strict = -1;
@@ -854,83 +871,103 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->walRetentionSize = -1;
pOptions->walRollPeriod = -1;
pOptions->walSegmentSize = -1;
+ pOptions->sstTrigger = -1;
+ pOptions->tablePrefix = -1;
+ pOptions->tableSuffix = -1;
return (SNode*)pOptions;
}
SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOptionType type, void* pVal) {
CHECK_PARSER_STATUS(pCxt);
+ SDatabaseOptions* pDbOptions = (SDatabaseOptions*)pOptions;
switch (type) {
case DB_OPTION_BUFFER:
- ((SDatabaseOptions*)pOptions)->buffer = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->buffer = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_CACHEMODEL:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->cacheModelStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->cacheModelStr, (SToken*)pVal);
break;
case DB_OPTION_CACHESIZE:
- ((SDatabaseOptions*)pOptions)->cacheLastSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->cacheLastSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_COMP:
- ((SDatabaseOptions*)pOptions)->compressionLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->compressionLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_DAYS: {
SToken* pToken = pVal;
if (TK_NK_INTEGER == pToken->type) {
- ((SDatabaseOptions*)pOptions)->daysPerFile = taosStr2Int32(pToken->z, NULL, 10) * 1440;
+ pDbOptions->daysPerFile = taosStr2Int32(pToken->z, NULL, 10) * 1440;
} else {
- ((SDatabaseOptions*)pOptions)->pDaysPerFile = (SValueNode*)createDurationValueNode(pCxt, pToken);
+ pDbOptions->pDaysPerFile = (SValueNode*)createDurationValueNode(pCxt, pToken);
}
break;
}
case DB_OPTION_FSYNC:
- ((SDatabaseOptions*)pOptions)->fsyncPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->fsyncPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_MAXROWS:
- ((SDatabaseOptions*)pOptions)->maxRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->maxRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_MINROWS:
- ((SDatabaseOptions*)pOptions)->minRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->minRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_KEEP:
- ((SDatabaseOptions*)pOptions)->pKeep = pVal;
+ pDbOptions->pKeep = pVal;
break;
case DB_OPTION_PAGES:
- ((SDatabaseOptions*)pOptions)->pages = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->pages = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_PAGESIZE:
- ((SDatabaseOptions*)pOptions)->pagesize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->pagesize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ break;
+ case DB_OPTION_TSDB_PAGESIZE:
+ pDbOptions->tsdbPageSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_PRECISION:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->precisionStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->precisionStr, (SToken*)pVal);
break;
case DB_OPTION_REPLICA:
- ((SDatabaseOptions*)pOptions)->replica = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->replica = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ updateWalOptionsDefault(pDbOptions);
break;
case DB_OPTION_STRICT:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->strictStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->strictStr, (SToken*)pVal);
break;
case DB_OPTION_WAL:
- ((SDatabaseOptions*)pOptions)->walLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_VGROUPS:
- ((SDatabaseOptions*)pOptions)->numOfVgroups = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->numOfVgroups = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_SINGLE_STABLE:
- ((SDatabaseOptions*)pOptions)->singleStable = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->singleStable = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_RETENTIONS:
- ((SDatabaseOptions*)pOptions)->pRetentions = pVal;
+ pDbOptions->pRetentions = pVal;
break;
case DB_OPTION_WAL_RETENTION_PERIOD:
- ((SDatabaseOptions*)pOptions)->walRetentionPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionPeriodIsSet = true;
break;
case DB_OPTION_WAL_RETENTION_SIZE:
- ((SDatabaseOptions*)pOptions)->walRetentionSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionSizeIsSet = true;
break;
case DB_OPTION_WAL_ROLL_PERIOD:
- ((SDatabaseOptions*)pOptions)->walRollPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRollPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRollPeriodIsSet = true;
break;
case DB_OPTION_WAL_SEGMENT_SIZE:
- ((SDatabaseOptions*)pOptions)->walSegmentSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walSegmentSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ break;
+ case DB_OPTION_STT_TRIGGER:
+ pDbOptions->sstTrigger = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ break;
+ case DB_OPTION_TABLE_PREFIX:
+ pDbOptions->tablePrefix = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ break;
+ case DB_OPTION_TABLE_SUFFIX:
+ pDbOptions->tableSuffix = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
default:
break;
@@ -1251,7 +1288,8 @@ SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
static bool needDbShowStmt(ENodeType type) {
return QUERY_NODE_SHOW_TABLES_STMT == type || QUERY_NODE_SHOW_STABLES_STMT == type ||
- QUERY_NODE_SHOW_VGROUPS_STMT == type;
+ QUERY_NODE_SHOW_VGROUPS_STMT == type || QUERY_NODE_SHOW_INDEXES_STMT == type ||
+ QUERY_NODE_SHOW_TAGS_STMT == type;
}
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type) {
@@ -1264,7 +1302,7 @@ SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type) {
SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbName,
EOperatorType tableCondType) {
CHECK_PARSER_STATUS(pCxt);
- if (needDbShowStmt(type) && NULL == pDbName && NULL == pCxt->pQueryCxt->db) {
+ if (needDbShowStmt(type) && NULL == pDbName) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified");
pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR;
return NULL;
@@ -1316,6 +1354,15 @@ SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId) {
return (SNode*)pStmt;
}
+SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint) {
+ CHECK_PARSER_STATUS(pCxt);
+ SShowVnodesStmt* pStmt = (SShowVnodesStmt*)nodesMakeNode(QUERY_NODE_SHOW_VNODES_STMT);
+ CHECK_OUT_OF_MEM(pStmt);
+ pStmt->pDnodeId = pDnodeId;
+ pStmt->pDnodeEndpoint = pDnodeEndpoint;
+ return (SNode*)pStmt;
+}
+
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo) {
CHECK_PARSER_STATUS(pCxt);
char password[TSDB_USET_PASSWORD_LEN] = {0};
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index ffa7729745021be10cfc22aa66dab7f7b3abccb3..ec5f6c4e570206f1a08d537f735605d0d54d53ef 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -97,16 +97,23 @@ typedef struct SCollectMetaKeyCxt {
typedef struct SCollectMetaKeyFromExprCxt {
SCollectMetaKeyCxt* pComCxt;
+ bool hasLastRow;
int32_t errCode;
} SCollectMetaKeyFromExprCxt;
static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt);
static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) {
- if (fmIsBuiltinFunc(pFunc->functionName)) {
- return DEAL_RES_CONTINUE;
+ switch (fmGetFuncType(pFunc->functionName)) {
+ case FUNCTION_TYPE_LAST_ROW:
+ pCxt->hasLastRow = true;
+ break;
+ case FUNCTION_TYPE_UDF:
+ pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
+ break;
+ default:
+ break;
}
- pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
@@ -136,9 +143,6 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, const c
if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES))) {
code = reserveDnodeRequiredInCache(pCxt->pMetaCache);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pDb, pCxt->pMetaCache);
- }
return code;
}
@@ -185,9 +189,19 @@ static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOpera
return code;
}
+static int32_t reserveDbCfgForLastRow(SCollectMetaKeyCxt* pCxt, SNode* pTable) {
+ if (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return reserveDbCfgInCache(pCxt->pParseCxt->acctId, ((SRealTableNode*)pTable)->table.dbName, pCxt->pMetaCache);
+}
+
static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) {
- SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
+ SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .hasLastRow = false, .errCode = TSDB_CODE_SUCCESS};
nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt);
+ if (TSDB_CODE_SUCCESS == cxt.errCode && cxt.hasLastRow) {
+ cxt.errCode = reserveDbCfgForLastRow(pCxt, pStmt->pFromTable);
+ }
return cxt.errCode;
}
@@ -360,12 +374,17 @@ static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES,
- pCxt->pMetaCache);
+ int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES,
+ pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser,
+ ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ, pCxt->pMetaCache);
+ }
+ return code;
}
static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS,
pCxt->pMetaCache);
}
@@ -373,11 +392,11 @@ static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt*
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLES,
pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
- if (NULL != pStmt->pDbName) {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
- } else {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
- }
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser,
+ ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ, pCxt->pMetaCache);
}
return code;
}
@@ -386,11 +405,7 @@ static int32_t collectMetaKeyFromShowTags(SCollectMetaKeyCxt* pCxt, SShowStmt* p
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TAGS,
pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
- if (NULL != pStmt->pDbName) {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
- } else {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
- }
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
}
return code;
}
@@ -411,7 +426,7 @@ static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TOPICS,
pCxt->pMetaCache);
}
@@ -449,6 +464,11 @@ static int32_t collectMetaKeyFromShowDnodeVariables(SCollectMetaKeyCxt* pCxt, SS
return code;
}
+static int32_t collectMetaKeyFromShowVnodes(SCollectMetaKeyCxt* pCxt, SShowVnodesStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VNODES,
+ pCxt->pMetaCache);
+}
+
static int32_t collectMetaKeyFromShowCreateDatabase(SCollectMetaKeyCxt* pCxt, SShowCreateDatabaseStmt* pStmt) {
return reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
}
@@ -464,6 +484,9 @@ static int32_t collectMetaKeyFromShowCreateTable(SCollectMetaKeyCxt* pCxt, SShow
if (TSDB_CODE_SUCCESS == code) {
code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveUserAuthInCacheExt(pCxt->pParseCxt->pUser, &name, AUTH_TYPE_READ, pCxt->pMetaCache);
+ }
return code;
}
@@ -503,7 +526,7 @@ static int32_t collectMetaKeyFromShowBlockDist(SCollectMetaKeyCxt* pCxt, SShowTa
}
static int32_t collectMetaKeyFromShowSubscriptions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS,
pCxt->pMetaCache);
}
@@ -586,6 +609,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return collectMetaKeyFromShowVariables(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
return collectMetaKeyFromShowDnodeVariables(pCxt, (SShowDnodeVariablesStmt*)pStmt);
+ case QUERY_NODE_SHOW_VNODES_STMT:
+ return collectMetaKeyFromShowVnodes(pCxt, (SShowVnodesStmt*)pStmt);
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
return collectMetaKeyFromShowCreateDatabase(pCxt, (SShowCreateDatabaseStmt*)pStmt);
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c
index befc822808c7b50eeaea5753a61bb10ffef81523..9d73be745468c33f6041f5f6bb2a9cd9bfb51b52 100644
--- a/source/libs/parser/src/parAuthenticator.c
+++ b/source/libs/parser/src/parAuthenticator.c
@@ -96,6 +96,14 @@ static int32_t authInsert(SAuthCxt* pCxt, SInsertStmt* pInsert) {
return code;
}
+static int32_t authShowTables(SAuthCxt* pCxt, SShowStmt* pStmt) {
+ return checkAuth(pCxt, ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ);
+}
+
+static int32_t authShowCreateTable(SAuthCxt* pCxt, SShowCreateTableStmt* pStmt) {
+ return checkAuth(pCxt, pStmt->dbName, AUTH_TYPE_READ);
+}
+
static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
switch (nodeType(pStmt)) {
case QUERY_NODE_SET_OPERATOR:
@@ -108,6 +116,27 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
return authDelete(pCxt, (SDeleteStmt*)pStmt);
case QUERY_NODE_INSERT_STMT:
return authInsert(pCxt, (SInsertStmt*)pStmt);
+ case QUERY_NODE_SHOW_DNODES_STMT:
+ case QUERY_NODE_SHOW_MNODES_STMT:
+ case QUERY_NODE_SHOW_MODULES_STMT:
+ case QUERY_NODE_SHOW_QNODES_STMT:
+ case QUERY_NODE_SHOW_SNODES_STMT:
+ case QUERY_NODE_SHOW_BNODES_STMT:
+ case QUERY_NODE_SHOW_CLUSTER_STMT:
+ case QUERY_NODE_SHOW_LICENCES_STMT:
+ case QUERY_NODE_SHOW_VGROUPS_STMT:
+ case QUERY_NODE_SHOW_VARIABLES_STMT:
+ case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
+ case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
+ case QUERY_NODE_SHOW_VNODES_STMT:
+ case QUERY_NODE_SHOW_SCORES_STMT:
+ return !pCxt->pParseCxt->enableSysInfo ? TSDB_CODE_PAR_PERMISSION_DENIED : TSDB_CODE_SUCCESS;
+ case QUERY_NODE_SHOW_TABLES_STMT:
+ case QUERY_NODE_SHOW_STABLES_STMT:
+ return authShowTables(pCxt, (SShowStmt*)pStmt);
+ case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
+ case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
+ return authShowCreateTable(pCxt, (SShowCreateTableStmt*)pStmt);
default:
break;
}
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index 4e32672697f9faac9d25667a5018acac60b6fbb4..74bc68f89e6014256932f70b1d75052c15874481 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -502,6 +502,10 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
return func(pMsgBuf, NULL, 0, param);
}
+ if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid numeric data", pToken->z);
+ }
+
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: {
if ((pToken->type == TK_NK_BOOL || pToken->type == TK_NK_STRING) && (pToken->n != 0)) {
@@ -782,6 +786,7 @@ static void buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, STag* pTa
if (sname) pTbReq->ctb.name = strdup(sname);
pTbReq->ctb.pTag = (uint8_t*)pTag;
pTbReq->ctb.tagName = taosArrayDup(tagName);
+ pTbReq->ttl = TSDB_DEFAULT_TABLE_TTL;
pTbReq->commentLen = -1;
return;
@@ -1117,6 +1122,43 @@ static int32_t ignoreAutoCreateTableClause(SInsertParseContext* pCxt) {
return code;
}
+static int32_t parseTableOptions(SInsertParseContext* pCxt) {
+ do {
+ int32_t index = 0;
+ SToken sToken;
+ NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index);
+ if (TK_TTL == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (TK_NK_INTEGER != sToken.type) {
+ return buildSyntaxErrMsg(&pCxt->msg, "Invalid option ttl", sToken.z);
+ }
+ pCxt->createTblReq.ttl = taosStr2Int32(sToken.z, NULL, 10);
+ if (pCxt->createTblReq.ttl < 0) {
+ return buildSyntaxErrMsg(&pCxt->msg, "Invalid option ttl", sToken.z);
+ }
+ } else if (TK_COMMENT == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (TK_NK_STRING != sToken.type) {
+ return buildSyntaxErrMsg(&pCxt->msg, "Invalid option comment", sToken.z);
+ }
+ if (sToken.n >= TSDB_TB_COMMENT_LEN) {
+ return buildSyntaxErrMsg(&pCxt->msg, "comment too long", sToken.z);
+ }
+ int32_t len = trimString(sToken.z, sToken.n, pCxt->tmpTokenBuf, TSDB_TB_COMMENT_LEN);
+ pCxt->createTblReq.comment = strndup(pCxt->tmpTokenBuf, len);
+ if (NULL == pCxt->createTblReq.comment) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCxt->createTblReq.commentLen = len;
+ } else {
+ break;
+ }
+ } while (1);
+ return TSDB_CODE_SUCCESS;
+}
+
// pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)
static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* tbFName) {
int32_t len = strlen(tbFName);
@@ -1168,7 +1210,7 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName*
return buildSyntaxErrMsg(&pCxt->msg, ") is expected", sToken.z);
}
- return TSDB_CODE_SUCCESS;
+ return parseTableOptions(pCxt);
}
static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks, int16_t timePrec, bool* gotRow,
@@ -1665,6 +1707,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
pDb = taosHashIterate(context.pDbFNameHashObj, pDb);
}
}
+ if (pContext->pStmtCb) {
+ context.pVgroupsHashObj = NULL;
+ context.pTableBlockHashObj = NULL;
+ }
destroyInsertParseContext(&context);
return code;
}
@@ -1692,6 +1738,21 @@ static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) {
static int32_t skipTagsClause(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
+static int32_t skipTableOptions(SInsertParseSyntaxCxt* pCxt) {
+ do {
+ int32_t index = 0;
+ SToken sToken;
+ NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index);
+ if (TK_TTL == sToken.type || TK_COMMENT == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ } else {
+ break;
+ }
+ } while (1);
+ return TSDB_CODE_SUCCESS;
+}
+
// pSql -> [(tag1_name, ...)] TAGS (tag1_value, ...)
static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
SToken sToken;
@@ -1710,12 +1771,13 @@ static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z);
}
CHECK_CODE(skipTagsClause(pCxt));
+ CHECK_CODE(skipTableOptions(pCxt));
return TSDB_CODE_SUCCESS;
}
static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, bool isStable, int32_t tableNo, SToken* pTbToken) {
- SName name;
+ SName name = {0};
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
CHECK_CODE(reserveTableMetaInCacheForInsert(&name, isStable ? CATALOG_REQ_TYPE_META : CATALOG_REQ_TYPE_BOTH, tableNo,
pCxt->pMetaCache));
@@ -1730,7 +1792,7 @@ static int32_t checkTableName(const char* pTableName, SMsgBuf* pMsgBuf) {
}
static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, int32_t tableNo, SToken* pTbToken) {
- SName name;
+ SName name = {0};
CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg));
CHECK_CODE(checkTableName(name.tname, &pCxt->msg));
CHECK_CODE(reserveTableMetaInCacheForInsert(&name, CATALOG_REQ_TYPE_VGROUP, tableNo, pCxt->pMetaCache));
@@ -2253,7 +2315,7 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS
SToken sToken = {.n = kv->keyLen, .z = (char*)kv->key};
col_id_t t = lastColIdx + 1;
col_id_t index = ((t == 0 && !isTag) ? 0 : findCol(&sToken, t, nCols, pSchema));
- uDebug("SML, index:%d, t:%d, ncols:%d, kv->name:%s", index, t, nCols, kv->key);
+ uDebug("SML, index:%d, t:%d, ncols:%d", index, t, nCols);
if (index < 0 && t > 0) {
index = findCol(&sToken, 0, t, pSchema);
isOrdered = false;
@@ -2474,9 +2536,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
if (p) kv = *p;
}
- if (!kv || kv->length == 0) {
- MemRowAppend(&pBuf, NULL, 0, ¶m);
- } else {
+ if (kv){
int32_t colLen = kv->length;
if (pColSchema->type == TSDB_DATA_TYPE_TIMESTAMP) {
// uError("SML:data before:%" PRId64 ", precision:%d", kv->i, pTableMeta->tableInfo.precision);
@@ -2489,6 +2549,8 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
} else {
MemRowAppend(&pBuf, &(kv->value), colLen, ¶m);
}
+ }else{
+ pBuilder->hasNone = true;
}
if (PRIMARYKEY_TIMESTAMP_COL_ID == pColSchema->colId) {
diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c
index 80ec447f66157dd00e1081ee87e431a58b5998cd..68eb3e6fb1985dda6a8db9106fc2a6bcc8cd3583 100644
--- a/source/libs/parser/src/parTokenizer.c
+++ b/source/libs/parser/src/parTokenizer.c
@@ -187,6 +187,7 @@ static SKeyword keywordTable[] = {
{"SNODES", TK_SNODES},
{"SOFFSET", TK_SOFFSET},
{"SPLIT", TK_SPLIT},
+ {"STT_TRIGGER", TK_STT_TRIGGER},
{"STABLE", TK_STABLE},
{"STABLES", TK_STABLES},
{"STATE", TK_STATE},
@@ -199,6 +200,8 @@ static SKeyword keywordTable[] = {
{"SYSINFO", TK_SYSINFO},
{"TABLE", TK_TABLE},
{"TABLES", TK_TABLES},
+ {"TABLE_PREFIX", TK_TABLE_PREFIX},
+ {"TABLE_SUFFIX", TK_TABLE_SUFFIX},
{"TAG", TK_TAG},
{"TAGS", TK_TAGS},
{"TBNAME", TK_TBNAME},
@@ -213,6 +216,7 @@ static SKeyword keywordTable[] = {
{"TRANSACTIONS", TK_TRANSACTIONS},
{"TRIGGER", TK_TRIGGER},
{"TRIM", TK_TRIM},
+ {"TSDB_PAGESIZE", TK_TSDB_PAGESIZE},
{"TSERIES", TK_TSERIES},
{"TTL", TK_TTL},
{"UNION", TK_UNION},
@@ -228,6 +232,7 @@ static SKeyword keywordTable[] = {
{"VERBOSE", TK_VERBOSE},
{"VGROUP", TK_VGROUP},
{"VGROUPS", TK_VGROUPS},
+ {"VNODES", TK_VNODES},
{"WAL_FSYNC_PERIOD", TK_WAL_FSYNC_PERIOD},
{"WAL_LEVEL", TK_WAL_LEVEL},
{"WAL_RETENTION_PERIOD", TK_WAL_RETENTION_PERIOD},
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 8a1d8763bfc129b5c7182ae6e6d582e630bfd284..2af4032fd89829cfada3f84afe7ece36af096ec5 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -142,8 +142,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_STREAMS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_STREAMS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_STREAMS,
.numOfShowCols = 1,
.pShowCols = {"stream_name"}
},
@@ -184,8 +184,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_TOPICS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_TOPICS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_TOPICS,
.numOfShowCols = 1,
.pShowCols = {"topic_name"}
},
@@ -240,8 +240,14 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_SUBSCRIPTIONS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_SUBSCRIPTIONS,
+ .numOfShowCols = 1,
+ .pShowCols = {"*"}
+ },
+ { .showType = QUERY_NODE_SHOW_VNODES_STMT,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_VNODES,
.numOfShowCols = 1,
.pShowCols = {"*"}
},
@@ -784,6 +790,9 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p
int32_t nums = pMeta->tableInfo.numOfColumns +
(igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType) ? pMeta->tableInfo.numOfTags : 0));
for (int32_t i = 0; i < nums; ++i) {
+ if (invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) {
+ continue;
+ }
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
if (NULL == pCol) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY);
@@ -826,7 +835,8 @@ static int32_t findAndSetColumn(STranslateContext* pCxt, SColumnNode** pColRef,
}
int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns;
for (int32_t i = 0; i < nums; ++i) {
- if (0 == strcmp(pCol->colName, pMeta->schema[i].name)) {
+ if (0 == strcmp(pCol->colName, pMeta->schema[i].name) &&
+ !invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) {
setColumnInfoBySchema((SRealTableNode*)pTable, pMeta->schema + i, (i - pMeta->tableInfo.numOfColumns), pCol);
*pFound = true;
break;
@@ -2156,15 +2166,16 @@ static int32_t setTableIndex(STranslateContext* pCxt, SName* pName, SRealTableNo
return TSDB_CODE_SUCCESS;
}
-static int32_t setTableCacheLastMode(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) {
- if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
+static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSelect) {
+ if (!pSelect->hasLastRowFunc || QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable)) {
return TSDB_CODE_SUCCESS;
}
- SDbCfgInfo dbCfg = {0};
- int32_t code = getDBCfg(pCxt, pRealTable->table.dbName, &dbCfg);
+ SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable;
+ SDbCfgInfo dbCfg = {0};
+ int32_t code = getDBCfg(pCxt, pTable->table.dbName, &dbCfg);
if (TSDB_CODE_SUCCESS == code) {
- pRealTable->cacheLastMode = dbCfg.cacheLast;
+ pTable->cacheLastMode = dbCfg.cacheLast;
}
return code;
}
@@ -2188,18 +2199,15 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
if (TSDB_CODE_SUCCESS == code) {
code = setTableIndex(pCxt, &name, pRealTable);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = setTableCacheLastMode(pCxt, &name, pRealTable);
- }
}
- pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision;
- pRealTable->table.singleTable = isSingleTable(pRealTable);
if (TSDB_CODE_SUCCESS == code) {
+ pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision;
+ pRealTable->table.singleTable = isSingleTable(pRealTable);
+ if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
+ pCxt->stableQuery = true;
+ }
code = addNamespace(pCxt, pRealTable);
}
- if (TSDB_SUPER_TABLE == pRealTable->pMeta->tableType) {
- pCxt->stableQuery = true;
- }
break;
}
case QUERY_NODE_TEMP_TABLE: {
@@ -2269,10 +2277,14 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
SColumnNode* pCol = (SColumnNode*)pExpr;
len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName);
+ strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pCol->colName);
+ strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
} else {
len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->aliasName);
+ strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
}
- strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
return (SNode*)pFunc;
}
@@ -2471,13 +2483,65 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
return code;
}
+static EDealRes needFillImpl(SNode* pNode, void* pContext) {
+ if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
+ *(bool*)pContext = true;
+ return DEAL_RES_END;
+ }
+ return DEAL_RES_CONTINUE;
+}
+
+static bool needFill(SNode* pNode) {
+ bool hasFillFunc = false;
+ nodesWalkExpr(pNode, needFillImpl, &hasFillFunc);
+ return hasFillFunc;
+}
+
+static bool mismatchFillDataType(SDataType origDt, SDataType fillDt) {
+ if (TSDB_DATA_TYPE_NULL == fillDt.type) {
+ return false;
+ }
+ if (IS_NUMERIC_TYPE(origDt.type) && !IS_NUMERIC_TYPE(fillDt.type)) {
+ return true;
+ }
+ if (IS_VAR_DATA_TYPE(origDt.type) && !IS_VAR_DATA_TYPE(fillDt.type)) {
+ return true;
+ }
+ return false;
+}
+
+static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
+ if (FILL_MODE_VALUE != pFill->mode) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t fillNo = 0;
+ SNodeListNode* pFillValues = (SNodeListNode*)pFill->pValues;
+ SNode* pProject = NULL;
+ FOREACH(pProject, pProjectionList) {
+ if (needFill(pProject)) {
+ if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
+ }
+ if (mismatchFillDataType(((SExprNode*)pProject)->resType,
+ ((SExprNode*)nodesListGetNode(pFillValues->pNodeList, fillNo))->resType)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
+ }
+ ++fillNo;
+ }
+ }
+ if (fillNo != LIST_LENGTH(pFillValues->pNodeList)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow) ||
NULL == ((SIntervalWindowNode*)pSelect->pWindow)->pFill) {
return TSDB_CODE_SUCCESS;
}
- SFillNode* pFill = (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill;
- return TSDB_CODE_SUCCESS;
+ return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList);
}
static int32_t rewriteProjectAlias(SNodeList* pProjectionList) {
@@ -2594,8 +2658,7 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi
return code;
}
-static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval,
- bool isInterpFill) {
+static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode* pInterval, bool isInterpFill) {
if (FILL_MODE_NONE == pFill->mode) {
if (isInterpFill) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Unsupported fill type");
@@ -3085,6 +3148,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = replaceOrderByAliasForSelect(pCxt, pSelect);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setTableCacheLastMode(pCxt, pSelect);
+ }
return code;
}
@@ -3416,6 +3482,10 @@ static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pS
pReq->walRetentionSize = pStmt->pOptions->walRetentionSize;
pReq->walRollPeriod = pStmt->pOptions->walRollPeriod;
pReq->walSegmentSize = pStmt->pOptions->walSegmentSize;
+ pReq->sstTrigger = pStmt->pOptions->sstTrigger;
+ pReq->hashPrefix = pStmt->pOptions->tablePrefix;
+ pReq->hashSuffix = pStmt->pOptions->tableSuffix;
+ pReq->tsdbPageSize = pStmt->pOptions->tsdbPageSize;
pReq->ignoreExist = pStmt->ignoreExists;
return buildCreateDbRetentions(pStmt->pOptions->pRetentions, pReq);
}
@@ -3660,6 +3730,10 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
code = checkDbRangeOption(pCxt, "pagesize", pOptions->pagesize, TSDB_MIN_PAGESIZE_PER_VNODE,
TSDB_MAX_PAGESIZE_PER_VNODE);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbRangeOption(pCxt, "tsdbPagesize", pOptions->tsdbPageSize, TSDB_MIN_TSDB_PAGESIZE,
+ TSDB_MAX_TSDB_PAGESIZE);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = checkDbPrecisionOption(pCxt, pOptions);
}
@@ -3700,6 +3774,15 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
code =
checkDbRangeOption(pCxt, "walSegmentSize", pOptions->walSegmentSize, TSDB_DB_MIN_WAL_SEGMENT_SIZE, INT32_MAX);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbRangeOption(pCxt, "sstTrigger", pOptions->sstTrigger, TSDB_MIN_STT_TRIGGER, TSDB_MAX_STT_TRIGGER);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbRangeOption(pCxt, "tablePrefix", pOptions->tablePrefix, TSDB_MIN_HASH_PREFIX, TSDB_MAX_HASH_PREFIX);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbRangeOption(pCxt, "tableSuffix", pOptions->tableSuffix, TSDB_MIN_HASH_SUFFIX, TSDB_MAX_HASH_SUFFIX);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = checkOptionsDependency(pCxt, pDbName, pOptions);
}
@@ -3773,6 +3856,7 @@ static void buildAlterDbReq(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt,
pReq->cacheLast = pStmt->pOptions->cacheModel;
pReq->cacheLastSize = pStmt->pOptions->cacheLastSize;
pReq->replications = pStmt->pOptions->replica;
+ pReq->sstTrigger = pStmt->pOptions->sstTrigger;
return;
}
@@ -4998,7 +5082,7 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt
return TSDB_CODE_SUCCESS;
}
- if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) ||
+ if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) || NULL == ((SSelectStmt*)pStmt->pQuery)->pFromTable ||
QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pStmt->pQuery)->pFromTable)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
}
@@ -5777,6 +5861,25 @@ static int32_t rewriteShowDnodeVariables(STranslateContext* pCxt, SQuery* pQuery
return code;
}
+static int32_t rewriteShowVnodes(STranslateContext* pCxt, SQuery* pQuery) {
+ SShowVnodesStmt* pShow = (SShowVnodesStmt*)(pQuery->pRoot);
+ SSelectStmt* pStmt = NULL;
+ int32_t code = createSelectStmtForShow(QUERY_NODE_SHOW_VNODES_STMT, &pStmt);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (NULL != pShow->pDnodeId) {
+ code = createOperatorNode(OP_TYPE_EQUAL, "dnode_id", pShow->pDnodeId, &pStmt->pWhere);
+ } else {
+ code = createOperatorNode(OP_TYPE_EQUAL, "dnode_ep", pShow->pDnodeEndpoint, &pStmt->pWhere);
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ pQuery->showRewrite = true;
+ nodesDestroyNode(pQuery->pRoot);
+ pQuery->pRoot = (SNode*)pStmt;
+ }
+ return code;
+}
+
static SNode* createBlockDistInfoFunc() {
SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
if (NULL == pFunc) {
@@ -6348,8 +6451,9 @@ typedef struct SVgroupDropTableBatch {
char dbName[TSDB_DB_NAME_LEN];
} SVgroupDropTableBatch;
-static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo) {
- SVDropTbReq req = {.name = pClause->tableName, .igNotExists = pClause->ignoreNotExists};
+static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo,
+ uint64_t suid) {
+ SVDropTbReq req = {.name = pClause->tableName, .suid = suid, .igNotExists = pClause->ignoreNotExists};
SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
if (NULL == pTableBatch) {
SVgroupDropTableBatch tBatch = {0};
@@ -6390,7 +6494,7 @@ static int32_t buildDropTableVgroupHashmap(STranslateContext* pCxt, SDropTableCl
code = getTableHashVgroup(pCxt, pClause->dbName, pClause->tableName, &info);
}
if (TSDB_CODE_SUCCESS == code) {
- addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info);
+ addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info, pTableMeta->suid);
}
over:
@@ -6505,7 +6609,17 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS
pReq->colId = pSchema->colId;
SDataType targetDt = schemaToDataType(pTableMeta->tableInfo.precision, pSchema);
- if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt, true)) {
+
+ if (QUERY_NODE_VALUE != pStmt->pVal->node.type) {
+ SValueNode* pVal = NULL;
+ pCxt->errCode = createTagValFromExpr(pCxt, targetDt, (SNode*)pStmt->pVal, &pVal);
+ if (pCxt->errCode) {
+ return pCxt->errCode;
+ }
+
+ nodesDestroyNode((SNode*)pStmt->pVal);
+ pStmt->pVal = pVal;
+ } else if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt, true)) {
return pCxt->errCode;
}
@@ -6886,6 +7000,9 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
code = rewriteShowDnodeVariables(pCxt, pQuery);
break;
+ case QUERY_NODE_SHOW_VNODES_STMT:
+ code = rewriteShowVnodes(pCxt, pQuery);
+ break;
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
code = rewriteShowTableDist(pCxt, pQuery);
break;
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 7e27132f3cbc453a5cf09bd487acc75fa546ff7e..7ee6a5b2236b24a676214c3538ed182aa52f427a 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -136,8 +136,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
}
static EDealRes rewriteQueryExprAliasImpl(SNode* pNode, void* pContext) {
- if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode) && '\0' == ((SExprNode*)pNode)->userAlias[0]) {
- strcpy(((SExprNode*)pNode)->userAlias, ((SExprNode*)pNode)->aliasName);
+ if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode)) {
sprintf(((SExprNode*)pNode)->aliasName, "#%d", *(int32_t*)pContext);
++(*(int32_t*)pContext);
}
diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c
index c820e955d78dc9439499d21645c2456884edb318..a2a52aa0e0b507864b1d1c9cd82656392d678aa5 100644
--- a/source/libs/parser/src/sql.c
+++ b/source/libs/parser/src/sql.c
@@ -104,26 +104,26 @@
#endif
/************* Begin control #defines *****************************************/
#define YYCODETYPE unsigned short int
-#define YYNOCODE 427
+#define YYNOCODE 430
#define YYACTIONTYPE unsigned short int
#define ParseTOKENTYPE SToken
typedef union {
int yyinit;
ParseTOKENTYPE yy0;
- SAlterOption yy95;
- EOperatorType yy198;
- EOrder yy204;
- int8_t yy215;
- ENullOrder yy277;
- bool yy313;
- int64_t yy473;
- SNodeList* yy544;
- SToken yy617;
- EJoinType yy708;
- SDataType yy784;
- EFillMode yy816;
- SNode* yy840;
- int32_t yy844;
+ int64_t yy49;
+ SDataType yy84;
+ EFillMode yy134;
+ SToken yy149;
+ EOrder yy158;
+ int32_t yy160;
+ SNode* yy312;
+ EOperatorType yy320;
+ int8_t yy363;
+ SAlterOption yy405;
+ ENullOrder yy417;
+ bool yy497;
+ SNodeList* yy824;
+ EJoinType yy832;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
#define YYSTACKDEPTH 100
@@ -139,17 +139,17 @@ typedef union {
#define ParseCTX_FETCH
#define ParseCTX_STORE
#define YYFALLBACK 1
-#define YYNSTATE 667
-#define YYNRULE 491
-#define YYNTOKEN 305
-#define YY_MAX_SHIFT 666
-#define YY_MIN_SHIFTREDUCE 973
-#define YY_MAX_SHIFTREDUCE 1463
-#define YY_ERROR_ACTION 1464
-#define YY_ACCEPT_ACTION 1465
-#define YY_NO_ACTION 1466
-#define YY_MIN_REDUCE 1467
-#define YY_MAX_REDUCE 1957
+#define YYNSTATE 675
+#define YYNRULE 496
+#define YYNTOKEN 309
+#define YY_MAX_SHIFT 674
+#define YY_MIN_SHIFTREDUCE 985
+#define YY_MAX_SHIFTREDUCE 1480
+#define YY_ERROR_ACTION 1481
+#define YY_ACCEPT_ACTION 1482
+#define YY_NO_ACTION 1483
+#define YY_MIN_REDUCE 1484
+#define YY_MAX_REDUCE 1979
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -216,694 +216,690 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2548)
+#define YY_ACTTAB_COUNT (2512)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 526, 30, 261, 526, 549, 433, 526, 434, 1502, 11,
- /* 10 */ 10, 117, 39, 37, 55, 1653, 1654, 117, 471, 378,
- /* 20 */ 339, 1468, 1264, 1006, 476, 1023, 1290, 1022, 1607, 1791,
- /* 30 */ 1598, 1607, 127, 1340, 1607, 1262, 441, 552, 434, 1502,
- /* 40 */ 469, 1775, 107, 1779, 1290, 106, 105, 104, 103, 102,
- /* 50 */ 101, 100, 99, 98, 1775, 1024, 1335, 1809, 150, 64,
- /* 60 */ 1935, 14, 1567, 1010, 1011, 553, 1771, 1777, 1270, 450,
- /* 70 */ 1761, 125, 577, 165, 39, 37, 1403, 1932, 571, 1771,
- /* 80 */ 1777, 328, 339, 1529, 1264, 551, 161, 1877, 1878, 1,
- /* 90 */ 1882, 571, 1659, 479, 478, 1340, 1823, 1262, 1376, 327,
- /* 100 */ 95, 1792, 580, 1794, 1795, 576, 496, 571, 1657, 344,
- /* 110 */ 1869, 663, 1652, 1654, 330, 1865, 160, 513, 1335, 494,
- /* 120 */ 1935, 492, 1289, 14, 325, 1342, 1343, 1705, 164, 543,
- /* 130 */ 1270, 1161, 1162, 1934, 33, 32, 1895, 1932, 40, 38,
- /* 140 */ 36, 35, 34, 148, 63, 1479, 640, 639, 638, 637,
- /* 150 */ 349, 2, 636, 635, 128, 630, 629, 628, 627, 626,
- /* 160 */ 625, 624, 139, 620, 619, 618, 348, 347, 615, 614,
- /* 170 */ 1265, 107, 1263, 663, 106, 105, 104, 103, 102, 101,
- /* 180 */ 100, 99, 98, 1809, 36, 35, 34, 1342, 1343, 224,
- /* 190 */ 225, 542, 384, 1268, 1269, 613, 1317, 1318, 1320, 1321,
- /* 200 */ 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336, 1337,
- /* 210 */ 1338, 1339, 1341, 1344, 1467, 1288, 1434, 33, 32, 482,
- /* 220 */ 481, 40, 38, 36, 35, 34, 123, 168, 541, 303,
- /* 230 */ 1465, 223, 1265, 84, 1263, 1264, 477, 480, 116, 115,
- /* 240 */ 114, 113, 112, 111, 110, 109, 108, 305, 1262, 1023,
- /* 250 */ 516, 1022, 22, 174, 1600, 1268, 1269, 1490, 1317, 1318,
- /* 260 */ 1320, 1321, 1322, 1323, 1324, 1325, 573, 569, 1333, 1334,
- /* 270 */ 1336, 1337, 1338, 1339, 1341, 1344, 39, 37, 1489, 1024,
- /* 280 */ 538, 1270, 168, 526, 339, 71, 1264, 1488, 70, 354,
- /* 290 */ 1244, 1245, 1708, 1791, 170, 211, 512, 1340, 1761, 1262,
- /* 300 */ 1119, 602, 601, 600, 1123, 599, 1125, 1126, 598, 1128,
- /* 310 */ 595, 1607, 1134, 592, 1136, 1137, 589, 586, 1935, 1761,
- /* 320 */ 1335, 1809, 1584, 1270, 663, 14, 1659, 1935, 1761, 553,
- /* 330 */ 1935, 166, 1270, 343, 1761, 1932, 577, 1935, 39, 37,
- /* 340 */ 1933, 487, 1657, 165, 1932, 552, 339, 1932, 1264, 549,
- /* 350 */ 165, 76, 305, 2, 1932, 516, 497, 544, 539, 1340,
- /* 360 */ 1823, 1262, 1698, 159, 95, 1792, 580, 1794, 1795, 576,
- /* 370 */ 210, 571, 63, 173, 1869, 663, 1646, 127, 330, 1865,
- /* 380 */ 160, 552, 1335, 1265, 490, 1263, 419, 605, 484, 1342,
- /* 390 */ 1343, 33, 32, 209, 1270, 40, 38, 36, 35, 34,
- /* 400 */ 1896, 634, 632, 39, 37, 1345, 1268, 1269, 1487, 91,
- /* 410 */ 622, 339, 1791, 1264, 42, 8, 125, 40, 38, 36,
- /* 420 */ 35, 34, 124, 611, 1340, 58, 1262, 1596, 57, 49,
- /* 430 */ 1599, 162, 1877, 1878, 1265, 1882, 1263, 663, 178, 177,
- /* 440 */ 1809, 352, 137, 136, 608, 607, 606, 1335, 575, 1761,
- /* 450 */ 43, 1342, 1343, 1761, 316, 577, 1486, 1268, 1269, 1270,
- /* 460 */ 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325, 573, 569,
- /* 470 */ 1333, 1334, 1336, 1337, 1338, 1339, 1341, 1344, 63, 1823,
- /* 480 */ 9, 74, 1935, 294, 1792, 580, 1794, 1795, 576, 574,
- /* 490 */ 571, 568, 1841, 1289, 122, 165, 1265, 1761, 1263, 1932,
- /* 500 */ 33, 32, 663, 1602, 40, 38, 36, 35, 34, 317,
- /* 510 */ 168, 315, 314, 1485, 473, 351, 1342, 1343, 475, 1268,
- /* 520 */ 1269, 1291, 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325,
- /* 530 */ 573, 569, 1333, 1334, 1336, 1337, 1338, 1339, 1341, 1344,
- /* 540 */ 474, 1010, 1011, 33, 32, 1460, 1364, 40, 38, 36,
- /* 550 */ 35, 34, 168, 168, 1761, 526, 1935, 1592, 377, 146,
- /* 560 */ 376, 1265, 63, 1263, 26, 1532, 382, 168, 1610, 165,
- /* 570 */ 33, 32, 217, 1932, 40, 38, 36, 35, 34, 218,
- /* 580 */ 1484, 1791, 1414, 1607, 1268, 1269, 1594, 1317, 1318, 1320,
- /* 590 */ 1321, 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336,
- /* 600 */ 1337, 1338, 1339, 1341, 1344, 39, 37, 77, 27, 1809,
- /* 610 */ 498, 1884, 63, 339, 78, 1264, 168, 578, 1369, 1483,
- /* 620 */ 505, 1761, 1761, 373, 577, 1302, 1340, 28, 1262, 482,
- /* 630 */ 481, 1482, 1459, 33, 32, 1881, 123, 40, 38, 36,
- /* 640 */ 35, 34, 375, 371, 438, 1590, 477, 480, 1823, 1335,
- /* 650 */ 1287, 1935, 96, 1792, 580, 1794, 1795, 576, 253, 571,
- /* 660 */ 1761, 1270, 1869, 513, 165, 1481, 1868, 1865, 1932, 1081,
- /* 670 */ 33, 32, 1761, 1706, 40, 38, 36, 35, 34, 666,
- /* 680 */ 33, 32, 9, 526, 40, 38, 36, 35, 34, 1478,
- /* 690 */ 1477, 33, 32, 268, 383, 40, 38, 36, 35, 34,
- /* 700 */ 168, 1704, 1083, 300, 663, 432, 1761, 157, 436, 1698,
- /* 710 */ 214, 1607, 656, 652, 648, 644, 266, 1582, 1342, 1343,
- /* 720 */ 176, 33, 32, 307, 572, 40, 38, 36, 35, 34,
- /* 730 */ 1761, 1761, 39, 37, 526, 604, 526, 302, 1476, 1287,
- /* 740 */ 339, 549, 1264, 526, 307, 389, 412, 404, 92, 424,
- /* 750 */ 168, 231, 1302, 1340, 405, 1262, 440, 1585, 74, 436,
- /* 760 */ 1362, 1407, 1607, 1265, 1607, 1263, 397, 1289, 425, 127,
- /* 770 */ 399, 1607, 1475, 1703, 1779, 300, 1335, 1889, 1396, 1761,
- /* 780 */ 1603, 1362, 44, 4, 523, 1775, 1268, 1269, 1270, 1317,
- /* 790 */ 1318, 1320, 1321, 1322, 1323, 1324, 1325, 573, 569, 1333,
- /* 800 */ 1334, 1336, 1337, 1338, 1339, 1341, 1344, 390, 125, 2,
- /* 810 */ 1771, 1777, 334, 1761, 1363, 7, 220, 450, 611, 386,
- /* 820 */ 90, 526, 571, 163, 1877, 1878, 1659, 1882, 1424, 145,
- /* 830 */ 87, 663, 448, 312, 1236, 1363, 213, 137, 136, 608,
- /* 840 */ 607, 606, 1657, 1480, 1884, 1342, 1343, 423, 1474, 1607,
- /* 850 */ 418, 417, 416, 415, 414, 411, 410, 409, 408, 407,
- /* 860 */ 403, 402, 401, 400, 394, 393, 392, 391, 1880, 388,
- /* 870 */ 387, 535, 1422, 1423, 1425, 1426, 29, 337, 1357, 1358,
- /* 880 */ 1359, 1360, 1361, 1365, 1366, 1367, 1368, 1350, 61, 1761,
- /* 890 */ 1265, 609, 1263, 1289, 1650, 1935, 1400, 29, 337, 1357,
- /* 900 */ 1358, 1359, 1360, 1361, 1365, 1366, 1367, 1368, 166, 1583,
- /* 910 */ 1791, 1473, 1932, 1268, 1269, 1472, 1317, 1318, 1320, 1321,
- /* 920 */ 1322, 1323, 1324, 1325, 573, 569, 1333, 1334, 1336, 1337,
- /* 930 */ 1338, 1339, 1341, 1344, 623, 147, 1579, 1791, 1809, 526,
- /* 940 */ 279, 611, 610, 256, 1319, 1650, 578, 1884, 1471, 1470,
- /* 950 */ 449, 1761, 1761, 577, 277, 60, 1761, 475, 59, 1292,
- /* 960 */ 137, 136, 608, 607, 606, 1809, 554, 1607, 1289, 613,
- /* 970 */ 1568, 1879, 135, 578, 181, 429, 427, 1823, 1761, 474,
- /* 980 */ 577, 94, 1792, 580, 1794, 1795, 576, 536, 571, 1761,
- /* 990 */ 1761, 1869, 1780, 554, 468, 306, 1865, 273, 53, 509,
- /* 1000 */ 1637, 1659, 1396, 1775, 1823, 526, 63, 1935, 94, 1792,
- /* 1010 */ 580, 1794, 1795, 576, 526, 571, 1604, 1658, 1869, 54,
- /* 1020 */ 167, 1748, 306, 1865, 1932, 1736, 1519, 202, 1771, 1777,
- /* 1030 */ 200, 336, 335, 1607, 1935, 1462, 1463, 558, 526, 526,
- /* 1040 */ 571, 1278, 1607, 1273, 93, 526, 526, 165, 483, 506,
- /* 1050 */ 510, 1932, 1340, 561, 1271, 326, 228, 522, 526, 204,
- /* 1060 */ 526, 1791, 203, 146, 499, 526, 1607, 1607, 361, 524,
- /* 1070 */ 1319, 525, 1609, 1607, 1607, 1335, 262, 41, 222, 68,
- /* 1080 */ 67, 381, 342, 526, 172, 1272, 1607, 1270, 1607, 1809,
- /* 1090 */ 146, 131, 245, 1607, 346, 206, 233, 578, 205, 1609,
- /* 1100 */ 301, 567, 1761, 369, 577, 367, 363, 359, 356, 353,
- /* 1110 */ 345, 1607, 1782, 208, 134, 135, 207, 1810, 146, 1514,
- /* 1120 */ 1399, 1512, 51, 1791, 1213, 226, 237, 1609, 1823, 556,
- /* 1130 */ 566, 51, 95, 1792, 580, 1794, 1795, 576, 519, 571,
- /* 1140 */ 41, 485, 1869, 488, 168, 1319, 330, 1865, 1948, 11,
- /* 1150 */ 10, 1809, 616, 41, 617, 1784, 350, 1903, 584, 578,
- /* 1160 */ 134, 230, 1112, 1503, 1761, 1647, 577, 135, 119, 1421,
- /* 1170 */ 134, 1899, 550, 240, 1069, 1791, 1067, 255, 1370, 250,
- /* 1180 */ 1276, 258, 260, 3, 5, 355, 313, 1326, 1050, 1279,
- /* 1190 */ 1823, 1274, 360, 1229, 95, 1792, 580, 1794, 1795, 576,
- /* 1200 */ 272, 571, 269, 1809, 1869, 1140, 1508, 1144, 330, 1865,
- /* 1210 */ 1948, 578, 1282, 1284, 1151, 1149, 1761, 138, 577, 1926,
- /* 1220 */ 175, 1051, 1275, 1287, 569, 1333, 1334, 1336, 1337, 1338,
- /* 1230 */ 1339, 1791, 385, 1354, 406, 1700, 413, 421, 420, 1293,
- /* 1240 */ 559, 1791, 1823, 422, 426, 431, 95, 1792, 580, 1794,
- /* 1250 */ 1795, 576, 428, 571, 658, 439, 1869, 430, 562, 1809,
- /* 1260 */ 330, 1865, 1948, 1295, 442, 443, 184, 578, 1294, 1809,
- /* 1270 */ 186, 1888, 1761, 1296, 577, 444, 445, 578, 189, 447,
- /* 1280 */ 191, 72, 1761, 73, 577, 451, 470, 554, 195, 472,
- /* 1290 */ 1791, 304, 1597, 199, 118, 1593, 1741, 554, 1823, 501,
- /* 1300 */ 201, 140, 286, 1792, 580, 1794, 1795, 576, 1823, 571,
- /* 1310 */ 141, 1595, 286, 1792, 580, 1794, 1795, 576, 1809, 571,
- /* 1320 */ 1591, 142, 143, 212, 270, 500, 578, 215, 1935, 507,
- /* 1330 */ 504, 1761, 511, 577, 322, 219, 534, 514, 1935, 132,
- /* 1340 */ 1740, 167, 1710, 520, 517, 1932, 133, 324, 81, 521,
- /* 1350 */ 1791, 165, 1292, 530, 271, 1932, 83, 1823, 1608, 235,
- /* 1360 */ 1791, 96, 1792, 580, 1794, 1795, 576, 1900, 571, 537,
- /* 1370 */ 239, 1869, 532, 1910, 6, 565, 1865, 533, 1809, 546,
- /* 1380 */ 329, 1909, 540, 531, 529, 244, 578, 1891, 1809, 528,
- /* 1390 */ 1396, 1761, 1291, 577, 154, 126, 578, 249, 563, 560,
- /* 1400 */ 246, 1761, 48, 577, 1885, 247, 331, 248, 85, 1791,
- /* 1410 */ 582, 1651, 1580, 265, 274, 659, 660, 1823, 1931, 662,
- /* 1420 */ 52, 149, 1792, 580, 1794, 1795, 576, 1823, 571, 1951,
- /* 1430 */ 153, 96, 1792, 580, 1794, 1795, 576, 1809, 571, 557,
- /* 1440 */ 1755, 1869, 323, 287, 297, 578, 1866, 1850, 296, 254,
- /* 1450 */ 1761, 276, 577, 564, 1754, 278, 257, 259, 65, 1753,
- /* 1460 */ 1791, 1752, 66, 1749, 357, 555, 1949, 358, 1256, 1257,
- /* 1470 */ 171, 362, 1747, 364, 365, 366, 1823, 1746, 1745, 368,
- /* 1480 */ 295, 1792, 580, 1794, 1795, 576, 370, 571, 1809, 1744,
- /* 1490 */ 372, 1743, 374, 527, 1232, 1231, 578, 1721, 1720, 379,
- /* 1500 */ 380, 1761, 1201, 577, 1719, 1718, 1693, 129, 1692, 1691,
- /* 1510 */ 1690, 69, 1791, 1689, 1688, 1687, 1686, 1685, 395, 396,
- /* 1520 */ 1684, 398, 1791, 130, 1669, 1668, 1667, 1823, 1683, 1682,
- /* 1530 */ 1681, 295, 1792, 580, 1794, 1795, 576, 1680, 571, 1791,
- /* 1540 */ 1809, 1679, 1678, 1677, 1676, 1675, 1674, 1673, 578, 1672,
- /* 1550 */ 1809, 1671, 1670, 1761, 1666, 577, 1665, 1664, 578, 1663,
- /* 1560 */ 1203, 1662, 1661, 1761, 1660, 577, 1534, 1809, 179, 1533,
- /* 1570 */ 1531, 1499, 120, 182, 180, 575, 1498, 158, 435, 1823,
- /* 1580 */ 1761, 1013, 577, 290, 1792, 580, 1794, 1795, 576, 1823,
- /* 1590 */ 571, 190, 1012, 149, 1792, 580, 1794, 1795, 576, 1791,
- /* 1600 */ 571, 437, 1734, 183, 121, 1728, 1823, 1717, 1716, 1702,
- /* 1610 */ 294, 1792, 580, 1794, 1795, 576, 1791, 571, 188, 1842,
- /* 1620 */ 1586, 545, 1043, 1530, 1528, 452, 454, 1809, 1526, 453,
- /* 1630 */ 456, 457, 338, 458, 1524, 578, 460, 462, 1950, 461,
- /* 1640 */ 1761, 1522, 577, 465, 1809, 464, 1511, 1510, 1495, 340,
- /* 1650 */ 466, 1588, 578, 1155, 1154, 1587, 50, 1761, 631, 577,
- /* 1660 */ 1080, 1077, 633, 1520, 198, 1076, 1823, 1075, 1515, 1513,
- /* 1670 */ 295, 1792, 580, 1794, 1795, 576, 318, 571, 319, 320,
- /* 1680 */ 486, 1494, 1493, 1823, 1791, 489, 197, 295, 1792, 580,
- /* 1690 */ 1794, 1795, 576, 491, 571, 1492, 493, 495, 97, 1733,
- /* 1700 */ 152, 1238, 1791, 1727, 216, 467, 463, 459, 455, 196,
- /* 1710 */ 56, 502, 1809, 144, 1715, 1713, 1714, 1712, 1711, 221,
- /* 1720 */ 578, 1248, 15, 1709, 227, 1761, 79, 577, 1701, 503,
- /* 1730 */ 1809, 321, 508, 80, 232, 518, 41, 87, 578, 229,
- /* 1740 */ 47, 75, 16, 1761, 194, 577, 243, 242, 82, 25,
- /* 1750 */ 17, 1823, 1436, 23, 234, 280, 1792, 580, 1794, 1795,
- /* 1760 */ 576, 1791, 571, 236, 1418, 515, 238, 1782, 151, 1823,
- /* 1770 */ 1420, 252, 241, 281, 1792, 580, 1794, 1795, 576, 24,
- /* 1780 */ 571, 1413, 1393, 46, 1781, 86, 18, 155, 1392, 1809,
- /* 1790 */ 1448, 1453, 1442, 1447, 332, 1452, 1451, 578, 333, 10,
- /* 1800 */ 45, 1280, 1761, 1330, 577, 1355, 193, 187, 13, 192,
- /* 1810 */ 1791, 19, 1328, 446, 1327, 156, 1826, 169, 570, 31,
- /* 1820 */ 12, 20, 1310, 21, 583, 1141, 341, 1138, 1823, 185,
- /* 1830 */ 587, 1791, 282, 1792, 580, 1794, 1795, 576, 1809, 571,
- /* 1840 */ 585, 588, 581, 1135, 579, 590, 578, 1129, 593, 596,
- /* 1850 */ 1118, 1761, 1127, 577, 591, 594, 597, 1133, 1132, 1809,
- /* 1860 */ 1131, 1130, 88, 89, 263, 603, 1150, 578, 1146, 62,
- /* 1870 */ 1041, 1072, 1761, 612, 577, 1071, 1070, 1823, 1068, 1066,
- /* 1880 */ 1065, 289, 1792, 580, 1794, 1795, 576, 1064, 571, 1791,
- /* 1890 */ 1087, 621, 264, 1062, 1061, 1060, 1059, 1058, 1823, 1791,
- /* 1900 */ 1057, 1056, 291, 1792, 580, 1794, 1795, 576, 1047, 571,
- /* 1910 */ 1084, 1082, 1053, 1052, 1049, 1048, 1046, 1809, 1527, 641,
- /* 1920 */ 1525, 642, 643, 645, 647, 578, 1523, 1809, 649, 646,
- /* 1930 */ 1761, 651, 577, 1521, 650, 578, 653, 655, 654, 1509,
- /* 1940 */ 1761, 657, 577, 1491, 1003, 267, 661, 1466, 1466, 1266,
- /* 1950 */ 275, 1791, 664, 1466, 665, 1466, 1823, 1466, 1466, 1466,
- /* 1960 */ 283, 1792, 580, 1794, 1795, 576, 1823, 571, 1791, 1466,
- /* 1970 */ 292, 1792, 580, 1794, 1795, 576, 1466, 571, 1466, 1809,
- /* 1980 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 578, 1466, 1466,
- /* 1990 */ 1466, 1466, 1761, 1466, 577, 1466, 1809, 1466, 1466, 1466,
- /* 2000 */ 1466, 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761,
- /* 2010 */ 1466, 577, 1466, 1466, 1466, 1466, 1466, 1791, 1823, 1466,
- /* 2020 */ 1466, 1466, 284, 1792, 580, 1794, 1795, 576, 1466, 571,
- /* 2030 */ 1466, 1466, 1466, 1466, 1791, 1823, 1466, 1466, 1466, 293,
- /* 2040 */ 1792, 580, 1794, 1795, 576, 1809, 571, 1466, 1466, 1466,
- /* 2050 */ 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466,
- /* 2060 */ 577, 1466, 1809, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2070 */ 578, 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466,
- /* 2080 */ 1466, 1466, 1466, 1791, 1823, 1466, 1466, 1466, 285, 1792,
- /* 2090 */ 580, 1794, 1795, 576, 1466, 571, 1466, 1466, 1466, 1466,
- /* 2100 */ 1466, 1823, 1466, 1466, 1466, 298, 1792, 580, 1794, 1795,
- /* 2110 */ 576, 1809, 571, 1466, 1466, 1466, 1466, 1466, 1466, 578,
- /* 2120 */ 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466, 1466,
- /* 2130 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466, 1466,
- /* 2140 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466, 1466,
- /* 2150 */ 1823, 1466, 1466, 1466, 299, 1792, 580, 1794, 1795, 576,
- /* 2160 */ 1466, 571, 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466,
- /* 2170 */ 1466, 1466, 1466, 578, 1466, 1809, 1466, 1466, 1761, 1466,
- /* 2180 */ 577, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466,
- /* 2190 */ 577, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1791, 1466,
- /* 2200 */ 1466, 1466, 1466, 1466, 1823, 1466, 1466, 1466, 1803, 1792,
- /* 2210 */ 580, 1794, 1795, 576, 1823, 571, 1791, 1466, 1802, 1792,
- /* 2220 */ 580, 1794, 1795, 576, 1466, 571, 1809, 1466, 1466, 1466,
- /* 2230 */ 1466, 1466, 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761,
- /* 2240 */ 1466, 577, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466,
- /* 2250 */ 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577,
- /* 2260 */ 1466, 1466, 1466, 1466, 1466, 1823, 1466, 1466, 1466, 1801,
- /* 2270 */ 1792, 580, 1794, 1795, 576, 1791, 571, 1466, 1466, 1466,
- /* 2280 */ 1466, 1466, 1466, 1823, 1466, 1466, 1466, 310, 1792, 580,
- /* 2290 */ 1794, 1795, 576, 1466, 571, 1466, 1791, 1466, 1466, 1466,
- /* 2300 */ 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2310 */ 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577, 1466,
- /* 2320 */ 1466, 1466, 1466, 1466, 1809, 1466, 1466, 1466, 1466, 1466,
- /* 2330 */ 1466, 1466, 578, 1466, 1466, 1466, 1466, 1761, 1466, 577,
- /* 2340 */ 1466, 1466, 1823, 1466, 1466, 1466, 309, 1792, 580, 1794,
- /* 2350 */ 1795, 576, 1791, 571, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2360 */ 1466, 1466, 1791, 1823, 1466, 1466, 1466, 311, 1792, 580,
- /* 2370 */ 1794, 1795, 576, 1466, 571, 1466, 1466, 1466, 1466, 1466,
- /* 2380 */ 1809, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 578, 1466,
- /* 2390 */ 1809, 1466, 1466, 1761, 549, 577, 1466, 1466, 578, 1466,
- /* 2400 */ 1466, 1466, 1466, 1761, 1466, 577, 1466, 1466, 1466, 1466,
- /* 2410 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1823,
- /* 2420 */ 1466, 1466, 127, 308, 1792, 580, 1794, 1795, 576, 1823,
- /* 2430 */ 571, 1466, 1466, 288, 1792, 580, 1794, 1795, 576, 1466,
- /* 2440 */ 571, 549, 554, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2450 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2460 */ 1466, 125, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 127,
- /* 2470 */ 1466, 1466, 1466, 1466, 1466, 1466, 251, 1877, 548, 1466,
- /* 2480 */ 547, 1466, 1466, 1935, 1466, 1466, 1466, 1466, 1466, 554,
- /* 2490 */ 1466, 1466, 1466, 1466, 1466, 1466, 167, 1466, 1466, 1466,
- /* 2500 */ 1932, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 125, 1466,
- /* 2510 */ 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2520 */ 1466, 1466, 1466, 251, 1877, 548, 1466, 547, 1466, 1466,
- /* 2530 */ 1935, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466, 1466,
- /* 2540 */ 1466, 1466, 1466, 165, 1466, 1466, 1466, 1932,
+ /* 0 */ 436, 1813, 437, 1519, 444, 345, 437, 1519, 1674, 1676,
+ /* 10 */ 33, 32, 39, 37, 40, 38, 36, 35, 34, 1620,
+ /* 20 */ 340, 530, 1281, 1801, 40, 38, 36, 35, 34, 1831,
+ /* 30 */ 1797, 1681, 117, 1357, 1797, 1279, 1770, 582, 312, 474,
+ /* 40 */ 30, 260, 1783, 1525, 581, 385, 148, 1679, 1496, 1629,
+ /* 50 */ 1035, 1813, 1034, 1801, 1793, 1799, 1352, 558, 1793, 1799,
+ /* 60 */ 329, 14, 1675, 1676, 1797, 472, 575, 1845, 1287, 1546,
+ /* 70 */ 575, 94, 1814, 584, 1816, 1817, 580, 1485, 575, 1831,
+ /* 80 */ 1036, 1891, 303, 362, 1802, 306, 1887, 582, 1793, 1799,
+ /* 90 */ 335, 666, 1783, 1, 581, 1797, 1451, 1957, 107, 1957,
+ /* 100 */ 575, 106, 105, 104, 103, 102, 101, 100, 99, 98,
+ /* 110 */ 166, 1681, 165, 1307, 1954, 671, 1954, 1845, 328, 1793,
+ /* 120 */ 1799, 149, 1814, 584, 1816, 1817, 580, 1679, 575, 1359,
+ /* 130 */ 1360, 575, 648, 647, 646, 645, 350, 1607, 644, 643,
+ /* 140 */ 128, 638, 637, 636, 635, 634, 633, 632, 631, 139,
+ /* 150 */ 627, 626, 625, 349, 348, 622, 621, 620, 619, 618,
+ /* 160 */ 36, 35, 34, 33, 32, 559, 1971, 40, 38, 36,
+ /* 170 */ 35, 34, 556, 556, 1306, 1282, 556, 1280, 1136, 606,
+ /* 180 */ 605, 604, 1140, 603, 1142, 1143, 602, 1145, 599, 63,
+ /* 190 */ 1151, 596, 1153, 1154, 593, 590, 453, 453, 1285, 1286,
+ /* 200 */ 553, 1334, 1335, 1337, 1338, 1339, 1340, 1341, 1342, 577,
+ /* 210 */ 573, 1350, 1351, 1353, 1354, 1355, 1356, 1358, 1361, 39,
+ /* 220 */ 37, 1420, 1178, 1179, 1307, 1831, 500, 340, 127, 1281,
+ /* 230 */ 33, 32, 167, 546, 40, 38, 36, 35, 34, 498,
+ /* 240 */ 1357, 496, 1279, 486, 485, 43, 42, 63, 558, 107,
+ /* 250 */ 123, 379, 106, 105, 104, 103, 102, 101, 100, 99,
+ /* 260 */ 98, 481, 484, 1352, 491, 22, 125, 480, 14, 545,
+ /* 270 */ 378, 84, 377, 1018, 1813, 1287, 39, 37, 530, 501,
+ /* 280 */ 1417, 250, 1899, 552, 340, 551, 1281, 478, 1957, 117,
+ /* 290 */ 1813, 1957, 1622, 209, 7, 327, 479, 1357, 1507, 1279,
+ /* 300 */ 2, 166, 1831, 146, 164, 1954, 1629, 494, 1954, 477,
+ /* 310 */ 582, 488, 1631, 1022, 1023, 1783, 208, 581, 1831, 150,
+ /* 320 */ 1352, 1957, 671, 1588, 435, 14, 579, 439, 167, 1441,
+ /* 330 */ 558, 1783, 1287, 581, 1956, 422, 1359, 1360, 1954, 1783,
+ /* 340 */ 1845, 135, 173, 517, 94, 1814, 584, 1816, 1817, 580,
+ /* 350 */ 326, 575, 58, 1729, 1891, 57, 1845, 2, 306, 1887,
+ /* 360 */ 293, 1814, 584, 1816, 1817, 580, 578, 575, 572, 1863,
+ /* 370 */ 1957, 1484, 539, 1439, 1440, 1442, 1443, 222, 71, 671,
+ /* 380 */ 146, 70, 1282, 164, 1280, 167, 167, 1954, 1506, 1632,
+ /* 390 */ 177, 176, 54, 1359, 1360, 116, 115, 114, 113, 112,
+ /* 400 */ 111, 110, 109, 108, 63, 1285, 1286, 1098, 1334, 1335,
+ /* 410 */ 1337, 1338, 1339, 1340, 1341, 1342, 577, 573, 1350, 1351,
+ /* 420 */ 1353, 1354, 1355, 1356, 1358, 1361, 33, 32, 1681, 1783,
+ /* 430 */ 40, 38, 36, 35, 34, 344, 26, 1261, 1262, 1282,
+ /* 440 */ 1100, 1280, 33, 32, 1679, 28, 40, 38, 36, 35,
+ /* 450 */ 34, 33, 32, 217, 562, 40, 38, 36, 35, 34,
+ /* 460 */ 1813, 317, 1285, 1286, 542, 1334, 1335, 1337, 1338, 1339,
+ /* 470 */ 1340, 1341, 1342, 577, 573, 1350, 1351, 1353, 1354, 1355,
+ /* 480 */ 1356, 1358, 1361, 39, 37, 302, 1413, 1304, 1831, 530,
+ /* 490 */ 63, 340, 78, 1281, 415, 210, 557, 427, 223, 224,
+ /* 500 */ 55, 1783, 1957, 581, 1357, 1308, 1279, 441, 516, 1416,
+ /* 510 */ 353, 1957, 1431, 1304, 400, 165, 428, 1629, 402, 1954,
+ /* 520 */ 318, 49, 316, 315, 1955, 476, 1845, 1352, 1954, 478,
+ /* 530 */ 95, 1814, 584, 1816, 1817, 580, 1722, 575, 1306, 1287,
+ /* 540 */ 1891, 548, 543, 167, 331, 1887, 159, 172, 1957, 1281,
+ /* 550 */ 1957, 477, 76, 305, 674, 64, 520, 305, 163, 393,
+ /* 560 */ 520, 164, 1279, 164, 8, 1954, 1917, 1954, 267, 553,
+ /* 570 */ 547, 389, 158, 33, 32, 343, 346, 40, 38, 36,
+ /* 580 */ 35, 34, 156, 146, 146, 1668, 671, 664, 660, 656,
+ /* 590 */ 652, 265, 1631, 1631, 1482, 1287, 1035, 127, 1034, 426,
+ /* 600 */ 1359, 1360, 421, 420, 419, 418, 417, 414, 413, 412,
+ /* 610 */ 411, 410, 406, 405, 404, 403, 397, 396, 395, 394,
+ /* 620 */ 1505, 391, 390, 314, 617, 1424, 1036, 92, 1367, 167,
+ /* 630 */ 230, 1306, 33, 32, 1306, 125, 40, 38, 36, 35,
+ /* 640 */ 34, 1305, 671, 1381, 63, 615, 1282, 252, 1280, 555,
+ /* 650 */ 160, 1899, 1900, 355, 1904, 352, 11, 10, 483, 482,
+ /* 660 */ 167, 1783, 563, 527, 137, 136, 612, 611, 610, 1285,
+ /* 670 */ 1286, 1732, 1334, 1335, 1337, 1338, 1339, 1340, 1341, 1342,
+ /* 680 */ 577, 573, 1350, 1351, 1353, 1354, 1355, 1356, 1358, 1361,
+ /* 690 */ 39, 37, 1362, 1957, 1287, 1957, 219, 502, 340, 374,
+ /* 700 */ 1281, 74, 1282, 167, 1280, 27, 164, 74, 164, 91,
+ /* 710 */ 1954, 1357, 1954, 1279, 1255, 1386, 212, 1504, 376, 372,
+ /* 720 */ 122, 1605, 124, 1625, 1813, 1285, 1286, 1722, 1503, 1624,
+ /* 730 */ 1621, 1502, 1477, 530, 1352, 33, 32, 1957, 175, 40,
+ /* 740 */ 38, 36, 35, 34, 169, 530, 1287, 39, 37, 1393,
+ /* 750 */ 164, 530, 1831, 1549, 1954, 340, 383, 1281, 1783, 1501,
+ /* 760 */ 582, 1629, 384, 443, 553, 1783, 439, 581, 1357, 1783,
+ /* 770 */ 1279, 9, 1783, 1629, 530, 642, 640, 517, 530, 1629,
+ /* 780 */ 558, 617, 61, 167, 1306, 392, 145, 1730, 1500, 407,
+ /* 790 */ 1845, 1352, 127, 671, 285, 1814, 584, 1816, 1817, 580,
+ /* 800 */ 1783, 575, 1629, 1287, 337, 336, 1629, 1359, 1360, 609,
+ /* 810 */ 1499, 530, 530, 1336, 1295, 530, 1336, 486, 485, 1476,
+ /* 820 */ 1957, 1319, 408, 451, 123, 1357, 452, 1288, 9, 1783,
+ /* 830 */ 125, 530, 1498, 166, 1495, 481, 484, 1954, 629, 1629,
+ /* 840 */ 1629, 480, 1626, 1629, 1309, 161, 1899, 1900, 1352, 1904,
+ /* 850 */ 671, 1783, 1728, 1282, 300, 1280, 630, 1618, 1601, 1629,
+ /* 860 */ 1287, 90, 33, 32, 1359, 1360, 40, 38, 36, 35,
+ /* 870 */ 34, 87, 1727, 1783, 300, 1783, 1285, 1286, 1494, 1334,
+ /* 880 */ 1335, 1337, 1338, 1339, 1340, 1341, 1342, 577, 573, 1350,
+ /* 890 */ 1351, 1353, 1354, 1355, 1356, 1358, 1361, 530, 530, 553,
+ /* 900 */ 530, 201, 530, 530, 199, 1906, 509, 570, 347, 503,
+ /* 910 */ 1282, 510, 1280, 514, 227, 307, 1614, 1493, 613, 1783,
+ /* 920 */ 1492, 1672, 1491, 1022, 1023, 1629, 1629, 127, 1629, 1903,
+ /* 930 */ 1629, 1629, 1490, 1285, 1286, 1489, 1334, 1335, 1337, 1338,
+ /* 940 */ 1339, 1340, 1341, 1342, 577, 573, 1350, 1351, 1353, 1354,
+ /* 950 */ 1355, 1356, 1358, 1361, 39, 37, 1379, 530, 1783, 530,
+ /* 960 */ 1616, 1783, 340, 1783, 1281, 125, 1336, 1296, 526, 1291,
+ /* 970 */ 528, 1911, 1413, 1783, 196, 1357, 1783, 1279, 1488, 1487,
+ /* 980 */ 162, 1899, 1900, 1606, 1904, 1629, 232, 1629, 152, 1813,
+ /* 990 */ 1299, 1301, 1906, 470, 466, 462, 458, 195, 1352, 1681,
+ /* 1000 */ 44, 4, 573, 1350, 1351, 1353, 1354, 1355, 1356, 1604,
+ /* 1010 */ 1287, 1380, 530, 530, 386, 1680, 1902, 1831, 614, 1783,
+ /* 1020 */ 1783, 1672, 324, 529, 261, 582, 1906, 387, 560, 147,
+ /* 1030 */ 1783, 565, 581, 75, 278, 2, 193, 571, 33, 32,
+ /* 1040 */ 1629, 1629, 40, 38, 36, 35, 34, 41, 276, 60,
+ /* 1050 */ 1901, 272, 59, 203, 1659, 1845, 202, 671, 216, 294,
+ /* 1060 */ 1814, 584, 1816, 1817, 580, 221, 575, 131, 180, 432,
+ /* 1070 */ 430, 1359, 1360, 29, 338, 1374, 1375, 1376, 1377, 1378,
+ /* 1080 */ 1382, 1383, 1384, 1385, 205, 1290, 207, 204, 615, 206,
+ /* 1090 */ 1536, 307, 53, 513, 11, 10, 1289, 77, 1232, 192,
+ /* 1100 */ 186, 1531, 191, 1529, 63, 1589, 449, 137, 136, 612,
+ /* 1110 */ 611, 610, 487, 134, 615, 135, 225, 1282, 523, 1280,
+ /* 1120 */ 51, 236, 184, 489, 1319, 492, 1479, 1480, 1804, 1612,
+ /* 1130 */ 51, 41, 1379, 137, 136, 612, 611, 610, 41, 588,
+ /* 1140 */ 1285, 1286, 93, 1334, 1335, 1337, 1338, 1339, 1340, 1341,
+ /* 1150 */ 1342, 577, 573, 1350, 1351, 1353, 1354, 1355, 1356, 1358,
+ /* 1160 */ 1361, 134, 135, 119, 229, 134, 1129, 1062, 213, 623,
+ /* 1170 */ 624, 1438, 239, 576, 1371, 1806, 608, 1497, 68, 67,
+ /* 1180 */ 382, 1387, 1343, 171, 255, 540, 471, 1380, 244, 271,
+ /* 1190 */ 1157, 1082, 1080, 1832, 1813, 351, 1520, 1669, 1921, 301,
+ /* 1200 */ 1063, 554, 370, 254, 368, 364, 360, 357, 354, 249,
+ /* 1210 */ 3, 257, 1161, 1168, 1166, 259, 138, 356, 5, 361,
+ /* 1220 */ 1248, 313, 1831, 174, 1304, 268, 388, 1293, 409, 1724,
+ /* 1230 */ 557, 416, 424, 423, 425, 1783, 429, 581, 1292, 431,
+ /* 1240 */ 433, 566, 1310, 167, 1312, 434, 442, 1813, 183, 29,
+ /* 1250 */ 338, 1374, 1375, 1376, 1377, 1378, 1382, 1383, 1384, 1385,
+ /* 1260 */ 1845, 445, 446, 185, 95, 1814, 584, 1816, 1817, 580,
+ /* 1270 */ 1311, 575, 447, 1313, 1891, 1831, 188, 450, 331, 1887,
+ /* 1280 */ 159, 448, 473, 582, 190, 72, 73, 1813, 1783, 454,
+ /* 1290 */ 581, 194, 269, 475, 1619, 198, 304, 1615, 118, 200,
+ /* 1300 */ 1918, 1763, 211, 140, 141, 1617, 1613, 504, 214, 505,
+ /* 1310 */ 142, 1813, 143, 1845, 511, 1831, 515, 95, 1814, 584,
+ /* 1320 */ 1816, 1817, 580, 582, 575, 218, 508, 1891, 1783, 524,
+ /* 1330 */ 581, 331, 1887, 1970, 518, 81, 538, 132, 1762, 1831,
+ /* 1340 */ 323, 83, 1925, 1734, 521, 1309, 270, 582, 325, 1630,
+ /* 1350 */ 541, 1813, 1783, 1845, 581, 6, 133, 279, 1814, 584,
+ /* 1360 */ 1816, 1817, 580, 1932, 575, 534, 525, 536, 537, 234,
+ /* 1370 */ 238, 330, 550, 544, 535, 533, 532, 1845, 1413, 1831,
+ /* 1380 */ 126, 95, 1814, 584, 1816, 1817, 580, 582, 575, 1922,
+ /* 1390 */ 245, 1891, 1783, 1931, 581, 331, 1887, 1970, 567, 1308,
+ /* 1400 */ 564, 248, 48, 332, 1813, 85, 1948, 1907, 586, 1673,
+ /* 1410 */ 1602, 273, 667, 52, 668, 1913, 243, 1845, 670, 299,
+ /* 1420 */ 264, 95, 1814, 584, 1816, 1817, 580, 277, 575, 246,
+ /* 1430 */ 153, 1891, 1831, 1872, 247, 331, 1887, 1970, 275, 286,
+ /* 1440 */ 582, 296, 295, 1777, 253, 1783, 1910, 581, 1776, 1775,
+ /* 1450 */ 1953, 65, 1774, 561, 1813, 568, 256, 1973, 258, 66,
+ /* 1460 */ 558, 1771, 358, 359, 1273, 1274, 170, 363, 1769, 365,
+ /* 1470 */ 1845, 1813, 366, 367, 285, 1814, 584, 1816, 1817, 580,
+ /* 1480 */ 1768, 575, 1831, 369, 1767, 371, 1766, 373, 1765, 375,
+ /* 1490 */ 582, 1251, 1250, 1745, 1744, 1783, 381, 581, 380, 1831,
+ /* 1500 */ 1957, 1743, 1742, 1220, 1717, 1716, 1715, 582, 129, 1714,
+ /* 1510 */ 1713, 1712, 1783, 164, 581, 69, 1711, 1954, 1710, 1709,
+ /* 1520 */ 1845, 1813, 398, 1706, 96, 1814, 584, 1816, 1817, 580,
+ /* 1530 */ 1708, 575, 1707, 399, 1891, 401, 1705, 1845, 1890, 1887,
+ /* 1540 */ 1704, 96, 1814, 584, 1816, 1817, 580, 1703, 575, 1831,
+ /* 1550 */ 1702, 1891, 1701, 1700, 1699, 569, 1887, 582, 1698, 1697,
+ /* 1560 */ 1696, 1695, 1783, 1694, 581, 1693, 1692, 130, 1691, 1813,
+ /* 1570 */ 1690, 1689, 1688, 1687, 1686, 1685, 1684, 1222, 1683, 1813,
+ /* 1580 */ 178, 1550, 179, 1548, 1516, 181, 182, 1845, 1682, 1551,
+ /* 1590 */ 120, 96, 1814, 584, 1816, 1817, 580, 1831, 575, 157,
+ /* 1600 */ 1025, 1891, 531, 438, 1024, 582, 1888, 1831, 440, 1515,
+ /* 1610 */ 1783, 1758, 581, 1752, 1741, 582, 121, 189, 187, 1740,
+ /* 1620 */ 1783, 1726, 581, 1608, 1547, 1545, 1055, 1543, 455, 456,
+ /* 1630 */ 1813, 459, 460, 457, 461, 1845, 1541, 463, 464, 294,
+ /* 1640 */ 1814, 584, 1816, 1817, 580, 1845, 575, 465, 1539, 289,
+ /* 1650 */ 1814, 584, 1816, 1817, 580, 467, 575, 468, 1831, 1171,
+ /* 1660 */ 1609, 469, 1528, 1527, 1512, 1610, 582, 1172, 1097, 50,
+ /* 1670 */ 1096, 1783, 639, 581, 641, 1093, 1092, 1091, 1537, 319,
+ /* 1680 */ 1532, 1530, 493, 320, 1511, 495, 1510, 549, 490, 321,
+ /* 1690 */ 1813, 497, 1509, 499, 97, 1757, 1845, 197, 1751, 56,
+ /* 1700 */ 149, 1814, 584, 1816, 1817, 580, 144, 575, 1813, 506,
+ /* 1710 */ 1257, 507, 1739, 1737, 1738, 1736, 1735, 1265, 1831, 15,
+ /* 1720 */ 519, 1733, 220, 226, 1725, 231, 579, 79, 215, 80,
+ /* 1730 */ 45, 1783, 41, 581, 47, 82, 1831, 23, 233, 16,
+ /* 1740 */ 1453, 339, 241, 235, 582, 1972, 87, 522, 1435, 1783,
+ /* 1750 */ 242, 581, 1437, 1804, 25, 1465, 1845, 228, 1813, 237,
+ /* 1760 */ 293, 1814, 584, 1816, 1817, 580, 322, 575, 251, 1864,
+ /* 1770 */ 512, 1430, 46, 1803, 1845, 17, 151, 154, 294, 1814,
+ /* 1780 */ 584, 1816, 1817, 580, 1813, 575, 1831, 240, 18, 24,
+ /* 1790 */ 86, 341, 1464, 333, 582, 1410, 1409, 1470, 1469, 1783,
+ /* 1800 */ 1459, 581, 1468, 334, 10, 1297, 155, 19, 1813, 1327,
+ /* 1810 */ 1372, 1848, 1831, 587, 585, 168, 583, 1347, 574, 31,
+ /* 1820 */ 582, 342, 1345, 12, 1845, 1783, 1344, 581, 294, 1814,
+ /* 1830 */ 584, 1816, 1817, 580, 1813, 575, 1831, 13, 1158, 20,
+ /* 1840 */ 21, 589, 591, 1155, 582, 592, 594, 1152, 595, 1783,
+ /* 1850 */ 1845, 581, 597, 600, 280, 1814, 584, 1816, 1817, 580,
+ /* 1860 */ 1146, 575, 1831, 598, 1135, 1144, 601, 1150, 1149, 88,
+ /* 1870 */ 582, 1148, 1167, 1147, 1845, 1783, 262, 581, 281, 1814,
+ /* 1880 */ 584, 1816, 1817, 580, 607, 575, 89, 62, 1163, 1053,
+ /* 1890 */ 616, 1088, 1813, 1087, 1086, 1085, 1084, 1083, 1081, 1079,
+ /* 1900 */ 1845, 628, 1078, 1077, 288, 1814, 584, 1816, 1817, 580,
+ /* 1910 */ 1813, 575, 553, 1104, 263, 1075, 1074, 1073, 1072, 1071,
+ /* 1920 */ 1831, 1070, 1069, 1068, 1099, 1101, 1065, 1064, 582, 1061,
+ /* 1930 */ 1059, 1060, 1544, 1783, 1058, 581, 649, 650, 1831, 651,
+ /* 1940 */ 127, 1542, 653, 655, 1540, 654, 582, 657, 659, 658,
+ /* 1950 */ 1538, 1783, 661, 581, 663, 1526, 1508, 662, 1845, 665,
+ /* 1960 */ 558, 1015, 290, 1814, 584, 1816, 1817, 580, 266, 575,
+ /* 1970 */ 669, 1813, 1483, 1283, 274, 672, 1845, 673, 125, 1483,
+ /* 1980 */ 282, 1814, 584, 1816, 1817, 580, 1483, 575, 1483, 1483,
+ /* 1990 */ 1483, 1483, 1483, 250, 1899, 552, 1483, 551, 1483, 1831,
+ /* 2000 */ 1957, 1483, 1483, 1483, 1483, 1483, 1483, 582, 1483, 1483,
+ /* 2010 */ 1483, 1483, 1783, 164, 581, 1483, 1483, 1954, 1483, 1483,
+ /* 2020 */ 1483, 1813, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2030 */ 1483, 1813, 1483, 1483, 1483, 1483, 1483, 1845, 1483, 1483,
+ /* 2040 */ 1483, 291, 1814, 584, 1816, 1817, 580, 1483, 575, 1831,
+ /* 2050 */ 1483, 1483, 1483, 1483, 1483, 1483, 1483, 582, 1483, 1831,
+ /* 2060 */ 1483, 1483, 1783, 1483, 581, 1483, 1483, 582, 1483, 1483,
+ /* 2070 */ 1483, 1813, 1783, 1483, 581, 1483, 1483, 1483, 1483, 1483,
+ /* 2080 */ 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1845, 1483, 1483,
+ /* 2090 */ 1483, 283, 1814, 584, 1816, 1817, 580, 1845, 575, 1831,
+ /* 2100 */ 1483, 292, 1814, 584, 1816, 1817, 580, 582, 575, 1483,
+ /* 2110 */ 1483, 1483, 1783, 1483, 581, 1483, 1483, 1483, 1483, 1483,
+ /* 2120 */ 1483, 1813, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2130 */ 1483, 1483, 1483, 1813, 1483, 1483, 1483, 1845, 1483, 1483,
+ /* 2140 */ 1483, 284, 1814, 584, 1816, 1817, 580, 1813, 575, 1831,
+ /* 2150 */ 1483, 1483, 1483, 1483, 1483, 1483, 1483, 582, 1483, 1483,
+ /* 2160 */ 1483, 1831, 1783, 1483, 581, 1483, 1483, 1483, 1483, 582,
+ /* 2170 */ 1483, 1483, 1483, 1483, 1783, 1831, 581, 1483, 1483, 1483,
+ /* 2180 */ 1483, 1483, 1483, 582, 1483, 1483, 1483, 1845, 1783, 1483,
+ /* 2190 */ 581, 297, 1814, 584, 1816, 1817, 580, 1483, 575, 1845,
+ /* 2200 */ 1483, 1483, 1483, 298, 1814, 584, 1816, 1817, 580, 1483,
+ /* 2210 */ 575, 1483, 1483, 1845, 1483, 1483, 1483, 1825, 1814, 584,
+ /* 2220 */ 1816, 1817, 580, 1813, 575, 1483, 1483, 1483, 1483, 1483,
+ /* 2230 */ 1483, 1483, 1483, 1813, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2240 */ 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2250 */ 1483, 1831, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 582,
+ /* 2260 */ 1483, 1831, 1483, 1483, 1783, 1483, 581, 1483, 1483, 582,
+ /* 2270 */ 1483, 1483, 1483, 1483, 1783, 1483, 581, 1483, 1483, 1483,
+ /* 2280 */ 1483, 1483, 1483, 1483, 1813, 1483, 1483, 1483, 1483, 1845,
+ /* 2290 */ 1483, 1483, 1483, 1824, 1814, 584, 1816, 1817, 580, 1845,
+ /* 2300 */ 575, 1483, 1483, 1823, 1814, 584, 1816, 1817, 580, 1483,
+ /* 2310 */ 575, 1483, 1831, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2320 */ 582, 1483, 1483, 1483, 1483, 1783, 1483, 581, 1483, 1483,
+ /* 2330 */ 1483, 1483, 1483, 1483, 1813, 1483, 1483, 1483, 1483, 1483,
+ /* 2340 */ 1483, 1483, 1483, 1483, 1813, 1483, 1483, 1483, 1483, 1483,
+ /* 2350 */ 1845, 1483, 1483, 1483, 310, 1814, 584, 1816, 1817, 580,
+ /* 2360 */ 1483, 575, 1831, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2370 */ 582, 1483, 1831, 1483, 1483, 1783, 1483, 581, 1483, 1483,
+ /* 2380 */ 582, 1483, 1483, 1483, 1813, 1783, 1483, 581, 1483, 1483,
+ /* 2390 */ 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2400 */ 1845, 1483, 1483, 1483, 309, 1814, 584, 1816, 1817, 580,
+ /* 2410 */ 1845, 575, 1831, 1483, 311, 1814, 584, 1816, 1817, 580,
+ /* 2420 */ 582, 575, 1483, 1483, 1483, 1783, 1483, 581, 1483, 1483,
+ /* 2430 */ 1483, 1483, 1483, 1483, 1813, 1483, 1483, 1483, 1483, 1483,
+ /* 2440 */ 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2450 */ 1845, 1483, 1483, 1483, 308, 1814, 584, 1816, 1817, 580,
+ /* 2460 */ 1483, 575, 1831, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2470 */ 582, 1483, 1483, 1483, 1483, 1783, 1483, 581, 1483, 1483,
+ /* 2480 */ 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2490 */ 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483, 1483,
+ /* 2500 */ 1845, 1483, 1483, 1483, 287, 1814, 584, 1816, 1817, 580,
+ /* 2510 */ 1483, 575,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 316, 390, 391, 316, 316, 312, 316, 314, 315, 1,
- /* 10 */ 2, 327, 12, 13, 327, 350, 351, 327, 334, 364,
- /* 20 */ 20, 0, 22, 4, 334, 20, 20, 22, 344, 308,
- /* 30 */ 338, 344, 344, 33, 344, 35, 312, 20, 314, 315,
- /* 40 */ 35, 349, 21, 338, 20, 24, 25, 26, 27, 28,
- /* 50 */ 29, 30, 31, 32, 349, 50, 56, 336, 321, 4,
- /* 60 */ 405, 61, 325, 44, 45, 344, 374, 375, 68, 60,
- /* 70 */ 349, 383, 351, 418, 12, 13, 14, 422, 386, 374,
- /* 80 */ 375, 376, 20, 0, 22, 397, 398, 399, 400, 89,
- /* 90 */ 402, 386, 336, 322, 323, 33, 375, 35, 90, 343,
- /* 100 */ 379, 380, 381, 382, 383, 384, 21, 386, 352, 347,
- /* 110 */ 389, 111, 350, 351, 393, 394, 395, 351, 56, 34,
- /* 120 */ 405, 36, 20, 61, 358, 125, 126, 361, 407, 20,
- /* 130 */ 68, 125, 126, 418, 8, 9, 415, 422, 12, 13,
- /* 140 */ 14, 15, 16, 307, 89, 309, 63, 64, 65, 66,
- /* 150 */ 67, 89, 69, 70, 71, 72, 73, 74, 75, 76,
- /* 160 */ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
- /* 170 */ 170, 21, 172, 111, 24, 25, 26, 27, 28, 29,
- /* 180 */ 30, 31, 32, 336, 14, 15, 16, 125, 126, 120,
- /* 190 */ 121, 344, 316, 193, 194, 60, 196, 197, 198, 199,
- /* 200 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
- /* 210 */ 210, 211, 212, 213, 0, 20, 90, 8, 9, 64,
- /* 220 */ 65, 12, 13, 14, 15, 16, 71, 227, 381, 353,
- /* 230 */ 305, 120, 170, 318, 172, 22, 81, 82, 24, 25,
- /* 240 */ 26, 27, 28, 29, 30, 31, 32, 178, 35, 20,
- /* 250 */ 181, 22, 43, 56, 339, 193, 194, 308, 196, 197,
- /* 260 */ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
- /* 270 */ 208, 209, 210, 211, 212, 213, 12, 13, 308, 50,
- /* 280 */ 155, 68, 227, 316, 20, 88, 22, 308, 91, 364,
- /* 290 */ 179, 180, 0, 308, 327, 121, 364, 33, 349, 35,
- /* 300 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
- /* 310 */ 112, 344, 114, 115, 116, 117, 118, 119, 405, 349,
- /* 320 */ 56, 336, 0, 68, 111, 61, 336, 405, 349, 344,
- /* 330 */ 405, 418, 68, 343, 349, 422, 351, 405, 12, 13,
- /* 340 */ 418, 4, 352, 418, 422, 20, 20, 422, 22, 316,
- /* 350 */ 418, 177, 178, 89, 422, 181, 19, 232, 233, 33,
- /* 360 */ 375, 35, 344, 335, 379, 380, 381, 382, 383, 384,
- /* 370 */ 33, 386, 89, 355, 389, 111, 348, 344, 393, 394,
- /* 380 */ 395, 20, 56, 170, 47, 172, 77, 100, 51, 125,
- /* 390 */ 126, 8, 9, 56, 68, 12, 13, 14, 15, 16,
- /* 400 */ 415, 322, 323, 12, 13, 14, 193, 194, 308, 318,
- /* 410 */ 68, 20, 308, 22, 89, 89, 383, 12, 13, 14,
- /* 420 */ 15, 16, 331, 101, 33, 88, 35, 337, 91, 89,
- /* 430 */ 339, 398, 399, 400, 170, 402, 172, 111, 129, 130,
- /* 440 */ 336, 364, 120, 121, 122, 123, 124, 56, 344, 349,
- /* 450 */ 89, 125, 126, 349, 37, 351, 308, 193, 194, 68,
- /* 460 */ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
- /* 470 */ 206, 207, 208, 209, 210, 211, 212, 213, 89, 375,
- /* 480 */ 89, 320, 405, 379, 380, 381, 382, 383, 384, 385,
- /* 490 */ 386, 387, 388, 20, 333, 418, 170, 349, 172, 422,
- /* 500 */ 8, 9, 111, 342, 12, 13, 14, 15, 16, 92,
- /* 510 */ 227, 94, 95, 308, 97, 364, 125, 126, 101, 193,
- /* 520 */ 194, 20, 196, 197, 198, 199, 200, 201, 202, 203,
- /* 530 */ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
- /* 540 */ 123, 44, 45, 8, 9, 162, 152, 12, 13, 14,
- /* 550 */ 15, 16, 227, 227, 349, 316, 405, 337, 169, 336,
- /* 560 */ 171, 170, 89, 172, 2, 0, 327, 227, 345, 418,
- /* 570 */ 8, 9, 56, 422, 12, 13, 14, 15, 16, 56,
- /* 580 */ 308, 308, 90, 344, 193, 194, 337, 196, 197, 198,
- /* 590 */ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
- /* 600 */ 209, 210, 211, 212, 213, 12, 13, 91, 214, 336,
- /* 610 */ 364, 377, 89, 20, 91, 22, 227, 344, 224, 308,
- /* 620 */ 368, 349, 349, 165, 351, 90, 33, 2, 35, 64,
- /* 630 */ 65, 308, 249, 8, 9, 401, 71, 12, 13, 14,
- /* 640 */ 15, 16, 184, 185, 14, 337, 81, 82, 375, 56,
- /* 650 */ 20, 405, 379, 380, 381, 382, 383, 384, 157, 386,
- /* 660 */ 349, 68, 389, 351, 418, 308, 393, 394, 422, 35,
- /* 670 */ 8, 9, 349, 361, 12, 13, 14, 15, 16, 19,
- /* 680 */ 8, 9, 89, 316, 12, 13, 14, 15, 16, 308,
- /* 690 */ 308, 8, 9, 33, 327, 12, 13, 14, 15, 16,
- /* 700 */ 227, 360, 68, 362, 111, 313, 349, 47, 316, 344,
- /* 710 */ 337, 344, 52, 53, 54, 55, 56, 0, 125, 126,
- /* 720 */ 355, 8, 9, 61, 337, 12, 13, 14, 15, 16,
- /* 730 */ 349, 349, 12, 13, 316, 337, 316, 18, 308, 20,
- /* 740 */ 20, 316, 22, 316, 61, 327, 27, 327, 88, 30,
- /* 750 */ 227, 91, 90, 33, 327, 35, 313, 0, 320, 316,
- /* 760 */ 98, 14, 344, 170, 344, 172, 47, 20, 49, 344,
- /* 770 */ 51, 344, 308, 360, 338, 362, 56, 225, 226, 349,
- /* 780 */ 342, 98, 42, 43, 124, 349, 193, 194, 68, 196,
- /* 790 */ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
- /* 800 */ 207, 208, 209, 210, 211, 212, 213, 88, 383, 89,
- /* 810 */ 374, 375, 376, 349, 152, 39, 156, 60, 101, 100,
- /* 820 */ 89, 316, 386, 398, 399, 400, 336, 402, 193, 157,
- /* 830 */ 99, 111, 327, 343, 174, 152, 176, 120, 121, 122,
- /* 840 */ 123, 124, 352, 309, 377, 125, 126, 128, 308, 344,
- /* 850 */ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
- /* 860 */ 141, 142, 143, 144, 145, 146, 147, 148, 401, 150,
- /* 870 */ 151, 236, 237, 238, 239, 240, 214, 215, 216, 217,
- /* 880 */ 218, 219, 220, 221, 222, 223, 224, 14, 3, 349,
- /* 890 */ 170, 346, 172, 20, 349, 405, 4, 214, 215, 216,
- /* 900 */ 217, 218, 219, 220, 221, 222, 223, 224, 418, 0,
- /* 910 */ 308, 308, 422, 193, 194, 308, 196, 197, 198, 199,
- /* 920 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
- /* 930 */ 210, 211, 212, 213, 324, 18, 326, 308, 336, 316,
- /* 940 */ 23, 101, 346, 425, 197, 349, 344, 377, 308, 308,
- /* 950 */ 327, 349, 349, 351, 37, 38, 349, 101, 41, 20,
- /* 960 */ 120, 121, 122, 123, 124, 336, 364, 344, 20, 60,
- /* 970 */ 325, 401, 43, 344, 57, 58, 59, 375, 349, 123,
- /* 980 */ 351, 379, 380, 381, 382, 383, 384, 416, 386, 349,
- /* 990 */ 349, 389, 338, 364, 317, 393, 394, 329, 157, 158,
- /* 1000 */ 332, 336, 226, 349, 375, 316, 89, 405, 379, 380,
- /* 1010 */ 381, 382, 383, 384, 316, 386, 327, 352, 389, 90,
- /* 1020 */ 418, 0, 393, 394, 422, 327, 0, 93, 374, 375,
- /* 1030 */ 96, 12, 13, 344, 405, 125, 126, 43, 316, 316,
- /* 1040 */ 386, 22, 344, 35, 127, 316, 316, 418, 22, 327,
- /* 1050 */ 327, 422, 33, 43, 35, 328, 327, 327, 316, 93,
- /* 1060 */ 316, 308, 96, 336, 371, 316, 344, 344, 47, 327,
- /* 1070 */ 197, 327, 345, 344, 344, 56, 327, 43, 43, 162,
- /* 1080 */ 163, 164, 328, 316, 167, 35, 344, 68, 344, 336,
- /* 1090 */ 336, 43, 412, 344, 327, 93, 157, 344, 96, 345,
- /* 1100 */ 183, 61, 349, 186, 351, 188, 189, 190, 191, 192,
- /* 1110 */ 328, 344, 46, 93, 43, 43, 96, 336, 336, 0,
- /* 1120 */ 228, 0, 43, 308, 90, 90, 43, 345, 375, 244,
- /* 1130 */ 111, 43, 379, 380, 381, 382, 383, 384, 90, 386,
- /* 1140 */ 43, 22, 389, 22, 227, 197, 393, 394, 395, 1,
- /* 1150 */ 2, 336, 13, 43, 13, 89, 317, 404, 43, 344,
- /* 1160 */ 43, 90, 90, 315, 349, 348, 351, 43, 43, 90,
- /* 1170 */ 43, 378, 403, 90, 35, 308, 35, 419, 90, 396,
- /* 1180 */ 172, 419, 419, 406, 229, 373, 372, 90, 35, 170,
- /* 1190 */ 375, 172, 47, 168, 379, 380, 381, 382, 383, 384,
- /* 1200 */ 90, 386, 366, 336, 389, 90, 0, 90, 393, 394,
- /* 1210 */ 395, 344, 193, 194, 90, 90, 349, 90, 351, 404,
- /* 1220 */ 42, 68, 172, 20, 205, 206, 207, 208, 209, 210,
- /* 1230 */ 211, 308, 356, 193, 316, 316, 356, 152, 354, 20,
- /* 1240 */ 246, 308, 375, 354, 316, 310, 379, 380, 381, 382,
- /* 1250 */ 383, 384, 316, 386, 48, 310, 389, 316, 248, 336,
- /* 1260 */ 393, 394, 395, 20, 370, 351, 320, 344, 20, 336,
- /* 1270 */ 320, 404, 349, 20, 351, 363, 365, 344, 320, 363,
- /* 1280 */ 320, 320, 349, 320, 351, 316, 310, 364, 320, 336,
- /* 1290 */ 308, 310, 336, 336, 316, 336, 349, 364, 375, 369,
- /* 1300 */ 336, 336, 379, 380, 381, 382, 383, 384, 375, 386,
- /* 1310 */ 336, 336, 379, 380, 381, 382, 383, 384, 336, 386,
- /* 1320 */ 336, 336, 336, 318, 370, 175, 344, 318, 405, 316,
- /* 1330 */ 351, 349, 316, 351, 363, 318, 234, 349, 405, 359,
- /* 1340 */ 349, 418, 349, 154, 349, 422, 359, 349, 318, 357,
- /* 1350 */ 308, 418, 20, 349, 332, 422, 318, 375, 344, 359,
- /* 1360 */ 308, 379, 380, 381, 382, 383, 384, 378, 386, 235,
- /* 1370 */ 359, 389, 349, 411, 241, 393, 394, 349, 336, 161,
- /* 1380 */ 349, 411, 349, 243, 242, 413, 344, 414, 336, 230,
- /* 1390 */ 226, 349, 20, 351, 411, 344, 344, 373, 247, 245,
- /* 1400 */ 410, 349, 89, 351, 377, 409, 250, 408, 89, 308,
- /* 1410 */ 340, 349, 326, 318, 316, 36, 311, 375, 421, 310,
- /* 1420 */ 367, 379, 380, 381, 382, 383, 384, 375, 386, 426,
- /* 1430 */ 362, 379, 380, 381, 382, 383, 384, 336, 386, 421,
- /* 1440 */ 0, 389, 341, 330, 330, 344, 394, 392, 330, 420,
- /* 1450 */ 349, 319, 351, 421, 0, 306, 420, 420, 177, 0,
- /* 1460 */ 308, 0, 42, 0, 35, 423, 424, 187, 35, 35,
- /* 1470 */ 35, 187, 0, 35, 35, 187, 375, 0, 0, 187,
- /* 1480 */ 379, 380, 381, 382, 383, 384, 35, 386, 336, 0,
- /* 1490 */ 22, 0, 35, 341, 172, 170, 344, 0, 0, 166,
- /* 1500 */ 165, 349, 46, 351, 0, 0, 0, 42, 0, 0,
- /* 1510 */ 0, 149, 308, 0, 0, 0, 0, 0, 144, 35,
- /* 1520 */ 0, 144, 308, 42, 0, 0, 0, 375, 0, 0,
- /* 1530 */ 0, 379, 380, 381, 382, 383, 384, 0, 386, 308,
- /* 1540 */ 336, 0, 0, 0, 0, 0, 0, 0, 344, 0,
- /* 1550 */ 336, 0, 0, 349, 0, 351, 0, 0, 344, 0,
- /* 1560 */ 22, 0, 0, 349, 0, 351, 0, 336, 56, 0,
- /* 1570 */ 0, 0, 39, 42, 56, 344, 0, 43, 46, 375,
- /* 1580 */ 349, 14, 351, 379, 380, 381, 382, 383, 384, 375,
- /* 1590 */ 386, 161, 14, 379, 380, 381, 382, 383, 384, 308,
- /* 1600 */ 386, 46, 0, 40, 39, 0, 375, 0, 0, 0,
- /* 1610 */ 379, 380, 381, 382, 383, 384, 308, 386, 39, 388,
- /* 1620 */ 0, 417, 62, 0, 0, 35, 39, 336, 0, 47,
- /* 1630 */ 35, 47, 341, 39, 0, 344, 35, 39, 424, 47,
- /* 1640 */ 349, 0, 351, 47, 336, 35, 0, 0, 0, 341,
- /* 1650 */ 39, 0, 344, 35, 22, 0, 98, 349, 43, 351,
- /* 1660 */ 35, 35, 43, 0, 96, 35, 375, 22, 0, 0,
- /* 1670 */ 379, 380, 381, 382, 383, 384, 22, 386, 22, 22,
- /* 1680 */ 49, 0, 0, 375, 308, 35, 33, 379, 380, 381,
- /* 1690 */ 382, 383, 384, 35, 386, 0, 35, 22, 20, 0,
- /* 1700 */ 47, 35, 308, 0, 154, 52, 53, 54, 55, 56,
- /* 1710 */ 157, 22, 336, 173, 0, 0, 0, 0, 0, 90,
- /* 1720 */ 344, 35, 89, 0, 89, 349, 89, 351, 0, 157,
- /* 1730 */ 336, 157, 159, 39, 46, 155, 43, 99, 344, 153,
- /* 1740 */ 43, 88, 231, 349, 91, 351, 46, 43, 89, 43,
- /* 1750 */ 231, 375, 90, 89, 89, 379, 380, 381, 382, 383,
- /* 1760 */ 384, 308, 386, 90, 90, 182, 89, 46, 89, 375,
- /* 1770 */ 90, 46, 89, 379, 380, 381, 382, 383, 384, 89,
- /* 1780 */ 386, 90, 90, 43, 46, 89, 43, 46, 90, 336,
- /* 1790 */ 35, 90, 90, 35, 35, 35, 35, 344, 35, 2,
- /* 1800 */ 225, 22, 349, 90, 351, 193, 153, 154, 231, 156,
- /* 1810 */ 308, 43, 90, 160, 90, 46, 89, 46, 89, 89,
- /* 1820 */ 89, 89, 22, 89, 35, 90, 35, 90, 375, 176,
- /* 1830 */ 35, 308, 379, 380, 381, 382, 383, 384, 336, 386,
- /* 1840 */ 89, 89, 100, 90, 195, 35, 344, 90, 35, 35,
- /* 1850 */ 22, 349, 90, 351, 89, 89, 89, 113, 113, 336,
- /* 1860 */ 113, 113, 89, 89, 43, 101, 35, 344, 22, 89,
- /* 1870 */ 62, 35, 349, 61, 351, 35, 35, 375, 35, 35,
- /* 1880 */ 35, 379, 380, 381, 382, 383, 384, 35, 386, 308,
- /* 1890 */ 68, 87, 43, 35, 35, 22, 35, 22, 375, 308,
- /* 1900 */ 35, 35, 379, 380, 381, 382, 383, 384, 22, 386,
- /* 1910 */ 68, 35, 35, 35, 35, 35, 35, 336, 0, 35,
- /* 1920 */ 0, 47, 39, 35, 39, 344, 0, 336, 35, 47,
- /* 1930 */ 349, 39, 351, 0, 47, 344, 35, 39, 47, 0,
- /* 1940 */ 349, 35, 351, 0, 35, 22, 21, 427, 427, 22,
- /* 1950 */ 22, 308, 21, 427, 20, 427, 375, 427, 427, 427,
- /* 1960 */ 379, 380, 381, 382, 383, 384, 375, 386, 308, 427,
- /* 1970 */ 379, 380, 381, 382, 383, 384, 427, 386, 427, 336,
- /* 1980 */ 427, 427, 427, 427, 427, 427, 427, 344, 427, 427,
- /* 1990 */ 427, 427, 349, 427, 351, 427, 336, 427, 427, 427,
- /* 2000 */ 427, 427, 427, 427, 344, 427, 427, 427, 427, 349,
- /* 2010 */ 427, 351, 427, 427, 427, 427, 427, 308, 375, 427,
- /* 2020 */ 427, 427, 379, 380, 381, 382, 383, 384, 427, 386,
- /* 2030 */ 427, 427, 427, 427, 308, 375, 427, 427, 427, 379,
- /* 2040 */ 380, 381, 382, 383, 384, 336, 386, 427, 427, 427,
- /* 2050 */ 427, 427, 427, 344, 427, 427, 427, 427, 349, 427,
- /* 2060 */ 351, 427, 336, 427, 427, 427, 427, 427, 427, 427,
- /* 2070 */ 344, 427, 427, 427, 427, 349, 427, 351, 427, 427,
- /* 2080 */ 427, 427, 427, 308, 375, 427, 427, 427, 379, 380,
- /* 2090 */ 381, 382, 383, 384, 427, 386, 427, 427, 427, 427,
- /* 2100 */ 427, 375, 427, 427, 427, 379, 380, 381, 382, 383,
- /* 2110 */ 384, 336, 386, 427, 427, 427, 427, 427, 427, 344,
- /* 2120 */ 427, 427, 427, 427, 349, 427, 351, 427, 427, 427,
- /* 2130 */ 427, 427, 427, 427, 427, 427, 427, 308, 427, 427,
- /* 2140 */ 427, 427, 427, 427, 427, 427, 427, 308, 427, 427,
- /* 2150 */ 375, 427, 427, 427, 379, 380, 381, 382, 383, 384,
- /* 2160 */ 427, 386, 427, 427, 427, 336, 427, 427, 427, 427,
- /* 2170 */ 427, 427, 427, 344, 427, 336, 427, 427, 349, 427,
- /* 2180 */ 351, 427, 427, 344, 427, 427, 427, 427, 349, 427,
- /* 2190 */ 351, 427, 427, 427, 427, 427, 427, 427, 308, 427,
- /* 2200 */ 427, 427, 427, 427, 375, 427, 427, 427, 379, 380,
- /* 2210 */ 381, 382, 383, 384, 375, 386, 308, 427, 379, 380,
- /* 2220 */ 381, 382, 383, 384, 427, 386, 336, 427, 427, 427,
- /* 2230 */ 427, 427, 427, 427, 344, 427, 427, 427, 427, 349,
- /* 2240 */ 427, 351, 427, 427, 336, 427, 427, 427, 427, 427,
- /* 2250 */ 427, 427, 344, 427, 427, 427, 427, 349, 427, 351,
- /* 2260 */ 427, 427, 427, 427, 427, 375, 427, 427, 427, 379,
- /* 2270 */ 380, 381, 382, 383, 384, 308, 386, 427, 427, 427,
- /* 2280 */ 427, 427, 427, 375, 427, 427, 427, 379, 380, 381,
- /* 2290 */ 382, 383, 384, 427, 386, 427, 308, 427, 427, 427,
- /* 2300 */ 427, 427, 427, 336, 427, 427, 427, 427, 427, 427,
- /* 2310 */ 427, 344, 427, 427, 427, 427, 349, 427, 351, 427,
- /* 2320 */ 427, 427, 427, 427, 336, 427, 427, 427, 427, 427,
- /* 2330 */ 427, 427, 344, 427, 427, 427, 427, 349, 427, 351,
- /* 2340 */ 427, 427, 375, 427, 427, 427, 379, 380, 381, 382,
- /* 2350 */ 383, 384, 308, 386, 427, 427, 427, 427, 427, 427,
- /* 2360 */ 427, 427, 308, 375, 427, 427, 427, 379, 380, 381,
- /* 2370 */ 382, 383, 384, 427, 386, 427, 427, 427, 427, 427,
- /* 2380 */ 336, 427, 427, 427, 427, 427, 427, 427, 344, 427,
- /* 2390 */ 336, 427, 427, 349, 316, 351, 427, 427, 344, 427,
- /* 2400 */ 427, 427, 427, 349, 427, 351, 427, 427, 427, 427,
- /* 2410 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 375,
- /* 2420 */ 427, 427, 344, 379, 380, 381, 382, 383, 384, 375,
- /* 2430 */ 386, 427, 427, 379, 380, 381, 382, 383, 384, 427,
- /* 2440 */ 386, 316, 364, 427, 427, 427, 427, 427, 427, 427,
- /* 2450 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 427,
- /* 2460 */ 427, 383, 427, 427, 427, 427, 427, 427, 427, 344,
- /* 2470 */ 427, 427, 427, 427, 427, 427, 398, 399, 400, 427,
- /* 2480 */ 402, 427, 427, 405, 427, 427, 427, 427, 427, 364,
- /* 2490 */ 427, 427, 427, 427, 427, 427, 418, 427, 427, 427,
- /* 2500 */ 422, 427, 427, 427, 427, 427, 427, 427, 383, 427,
- /* 2510 */ 427, 427, 427, 427, 427, 427, 427, 427, 427, 427,
- /* 2520 */ 427, 427, 427, 398, 399, 400, 427, 402, 427, 427,
- /* 2530 */ 405, 427, 427, 427, 427, 427, 427, 427, 427, 427,
- /* 2540 */ 427, 427, 427, 418, 427, 427, 427, 422,
+ /* 0 */ 316, 312, 318, 319, 316, 351, 318, 319, 354, 355,
+ /* 10 */ 8, 9, 12, 13, 12, 13, 14, 15, 16, 342,
+ /* 20 */ 20, 320, 22, 342, 12, 13, 14, 15, 16, 340,
+ /* 30 */ 353, 340, 331, 33, 353, 35, 0, 348, 347, 338,
+ /* 40 */ 393, 394, 353, 0, 355, 320, 311, 356, 313, 348,
+ /* 50 */ 20, 312, 22, 342, 377, 378, 56, 368, 377, 378,
+ /* 60 */ 379, 61, 354, 355, 353, 35, 389, 378, 68, 0,
+ /* 70 */ 389, 382, 383, 384, 385, 386, 387, 0, 389, 340,
+ /* 80 */ 50, 392, 357, 47, 342, 396, 397, 348, 377, 378,
+ /* 90 */ 379, 48, 353, 93, 355, 353, 94, 408, 21, 408,
+ /* 100 */ 389, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ /* 110 */ 421, 340, 421, 20, 425, 115, 425, 378, 347, 377,
+ /* 120 */ 378, 382, 383, 384, 385, 386, 387, 356, 389, 129,
+ /* 130 */ 130, 389, 63, 64, 65, 66, 67, 0, 69, 70,
+ /* 140 */ 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
+ /* 150 */ 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
+ /* 160 */ 14, 15, 16, 8, 9, 426, 427, 12, 13, 14,
+ /* 170 */ 15, 16, 20, 20, 20, 175, 20, 177, 106, 107,
+ /* 180 */ 108, 109, 110, 111, 112, 113, 114, 115, 116, 93,
+ /* 190 */ 118, 119, 120, 121, 122, 123, 60, 60, 198, 199,
+ /* 200 */ 320, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+ /* 210 */ 210, 211, 212, 213, 214, 215, 216, 217, 218, 12,
+ /* 220 */ 13, 14, 129, 130, 20, 340, 21, 20, 348, 22,
+ /* 230 */ 8, 9, 232, 348, 12, 13, 14, 15, 16, 34,
+ /* 240 */ 33, 36, 35, 64, 65, 93, 93, 93, 368, 21,
+ /* 250 */ 71, 368, 24, 25, 26, 27, 28, 29, 30, 31,
+ /* 260 */ 32, 82, 83, 56, 4, 43, 386, 88, 61, 384,
+ /* 270 */ 174, 322, 176, 4, 312, 68, 12, 13, 320, 19,
+ /* 280 */ 4, 401, 402, 403, 20, 405, 22, 105, 408, 331,
+ /* 290 */ 312, 408, 343, 33, 39, 332, 338, 33, 312, 35,
+ /* 300 */ 93, 421, 340, 340, 421, 425, 348, 47, 425, 127,
+ /* 310 */ 348, 51, 349, 44, 45, 353, 56, 355, 340, 325,
+ /* 320 */ 56, 408, 115, 329, 317, 61, 348, 320, 232, 198,
+ /* 330 */ 368, 353, 68, 355, 421, 78, 129, 130, 425, 353,
+ /* 340 */ 378, 43, 56, 355, 382, 383, 384, 385, 386, 387,
+ /* 350 */ 362, 389, 92, 365, 392, 95, 378, 93, 396, 397,
+ /* 360 */ 382, 383, 384, 385, 386, 387, 388, 389, 390, 391,
+ /* 370 */ 408, 0, 241, 242, 243, 244, 245, 124, 92, 115,
+ /* 380 */ 340, 95, 175, 421, 177, 232, 232, 425, 312, 349,
+ /* 390 */ 133, 134, 94, 129, 130, 24, 25, 26, 27, 28,
+ /* 400 */ 29, 30, 31, 32, 93, 198, 199, 35, 201, 202,
+ /* 410 */ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ /* 420 */ 213, 214, 215, 216, 217, 218, 8, 9, 340, 353,
+ /* 430 */ 12, 13, 14, 15, 16, 347, 2, 184, 185, 175,
+ /* 440 */ 68, 177, 8, 9, 356, 2, 12, 13, 14, 15,
+ /* 450 */ 16, 8, 9, 56, 43, 12, 13, 14, 15, 16,
+ /* 460 */ 312, 37, 198, 199, 160, 201, 202, 203, 204, 205,
+ /* 470 */ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ /* 480 */ 216, 217, 218, 12, 13, 18, 231, 20, 340, 320,
+ /* 490 */ 93, 20, 95, 22, 27, 125, 348, 30, 124, 125,
+ /* 500 */ 331, 353, 408, 355, 33, 20, 35, 14, 368, 233,
+ /* 510 */ 368, 408, 94, 20, 47, 421, 49, 348, 51, 425,
+ /* 520 */ 96, 93, 98, 99, 421, 101, 378, 56, 425, 105,
+ /* 530 */ 382, 383, 384, 385, 386, 387, 348, 389, 20, 68,
+ /* 540 */ 392, 237, 238, 232, 396, 397, 398, 359, 408, 22,
+ /* 550 */ 408, 127, 182, 183, 19, 4, 186, 183, 410, 92,
+ /* 560 */ 186, 421, 35, 421, 93, 425, 418, 425, 33, 320,
+ /* 570 */ 20, 104, 339, 8, 9, 332, 332, 12, 13, 14,
+ /* 580 */ 15, 16, 47, 340, 340, 352, 115, 52, 53, 54,
+ /* 590 */ 55, 56, 349, 349, 309, 68, 20, 348, 22, 132,
+ /* 600 */ 129, 130, 135, 136, 137, 138, 139, 140, 141, 142,
+ /* 610 */ 143, 144, 145, 146, 147, 148, 149, 150, 151, 152,
+ /* 620 */ 312, 154, 155, 156, 60, 14, 50, 92, 14, 232,
+ /* 630 */ 95, 20, 8, 9, 20, 386, 12, 13, 14, 15,
+ /* 640 */ 16, 20, 115, 157, 93, 105, 175, 162, 177, 400,
+ /* 650 */ 401, 402, 403, 368, 405, 368, 1, 2, 326, 327,
+ /* 660 */ 232, 353, 251, 128, 124, 125, 126, 127, 128, 198,
+ /* 670 */ 199, 0, 201, 202, 203, 204, 205, 206, 207, 208,
+ /* 680 */ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ /* 690 */ 12, 13, 14, 408, 68, 408, 161, 368, 20, 170,
+ /* 700 */ 22, 324, 175, 232, 177, 219, 421, 324, 421, 322,
+ /* 710 */ 425, 33, 425, 35, 179, 229, 181, 312, 189, 190,
+ /* 720 */ 337, 0, 335, 346, 312, 198, 199, 348, 312, 346,
+ /* 730 */ 343, 312, 167, 320, 56, 8, 9, 408, 359, 12,
+ /* 740 */ 13, 14, 15, 16, 331, 320, 68, 12, 13, 94,
+ /* 750 */ 421, 320, 340, 0, 425, 20, 331, 22, 353, 312,
+ /* 760 */ 348, 348, 331, 317, 320, 353, 320, 355, 33, 353,
+ /* 770 */ 35, 93, 353, 348, 320, 326, 327, 355, 320, 348,
+ /* 780 */ 368, 60, 3, 232, 20, 331, 162, 365, 312, 331,
+ /* 790 */ 378, 56, 348, 115, 382, 383, 384, 385, 386, 387,
+ /* 800 */ 353, 389, 348, 68, 12, 13, 348, 129, 130, 104,
+ /* 810 */ 312, 320, 320, 202, 22, 320, 202, 64, 65, 254,
+ /* 820 */ 408, 94, 331, 331, 71, 33, 331, 35, 93, 353,
+ /* 830 */ 386, 320, 312, 421, 312, 82, 83, 425, 68, 348,
+ /* 840 */ 348, 88, 331, 348, 20, 401, 402, 403, 56, 405,
+ /* 850 */ 115, 353, 364, 175, 366, 177, 328, 341, 330, 348,
+ /* 860 */ 68, 93, 8, 9, 129, 130, 12, 13, 14, 15,
+ /* 870 */ 16, 103, 364, 353, 366, 353, 198, 199, 312, 201,
+ /* 880 */ 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ /* 890 */ 212, 213, 214, 215, 216, 217, 218, 320, 320, 320,
+ /* 900 */ 320, 97, 320, 320, 100, 380, 372, 115, 331, 331,
+ /* 910 */ 175, 331, 177, 331, 331, 61, 341, 312, 350, 353,
+ /* 920 */ 312, 353, 312, 44, 45, 348, 348, 348, 348, 404,
+ /* 930 */ 348, 348, 312, 198, 199, 312, 201, 202, 203, 204,
+ /* 940 */ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ /* 950 */ 215, 216, 217, 218, 12, 13, 102, 320, 353, 320,
+ /* 960 */ 341, 353, 20, 353, 22, 386, 202, 175, 331, 177,
+ /* 970 */ 331, 230, 231, 353, 33, 33, 353, 35, 312, 312,
+ /* 980 */ 401, 402, 403, 0, 405, 348, 162, 348, 47, 312,
+ /* 990 */ 198, 199, 380, 52, 53, 54, 55, 56, 56, 340,
+ /* 1000 */ 42, 43, 210, 211, 212, 213, 214, 215, 216, 0,
+ /* 1010 */ 68, 157, 320, 320, 22, 356, 404, 340, 350, 353,
+ /* 1020 */ 353, 353, 345, 331, 331, 348, 380, 35, 249, 18,
+ /* 1030 */ 353, 43, 355, 92, 23, 93, 95, 61, 8, 9,
+ /* 1040 */ 348, 348, 12, 13, 14, 15, 16, 43, 37, 38,
+ /* 1050 */ 404, 333, 41, 97, 336, 378, 100, 115, 56, 382,
+ /* 1060 */ 383, 384, 385, 386, 387, 43, 389, 43, 57, 58,
+ /* 1070 */ 59, 129, 130, 219, 220, 221, 222, 223, 224, 225,
+ /* 1080 */ 226, 227, 228, 229, 97, 35, 97, 100, 105, 100,
+ /* 1090 */ 0, 61, 162, 163, 1, 2, 35, 95, 94, 158,
+ /* 1100 */ 159, 0, 161, 0, 93, 329, 165, 124, 125, 126,
+ /* 1110 */ 127, 128, 22, 43, 105, 43, 94, 175, 94, 177,
+ /* 1120 */ 43, 43, 181, 22, 94, 22, 129, 130, 46, 341,
+ /* 1130 */ 43, 43, 102, 124, 125, 126, 127, 128, 43, 43,
+ /* 1140 */ 198, 199, 131, 201, 202, 203, 204, 205, 206, 207,
+ /* 1150 */ 208, 209, 210, 211, 212, 213, 214, 215, 216, 217,
+ /* 1160 */ 218, 43, 43, 43, 94, 43, 94, 35, 341, 13,
+ /* 1170 */ 13, 94, 94, 341, 198, 93, 341, 313, 167, 168,
+ /* 1180 */ 169, 94, 94, 172, 428, 419, 321, 157, 415, 94,
+ /* 1190 */ 94, 35, 35, 340, 312, 321, 319, 352, 381, 188,
+ /* 1200 */ 68, 406, 191, 422, 193, 194, 195, 196, 197, 399,
+ /* 1210 */ 409, 422, 94, 94, 94, 422, 94, 376, 234, 47,
+ /* 1220 */ 173, 375, 340, 42, 20, 370, 360, 177, 320, 320,
+ /* 1230 */ 348, 360, 157, 358, 358, 353, 320, 355, 177, 320,
+ /* 1240 */ 320, 253, 20, 232, 20, 314, 314, 312, 324, 219,
+ /* 1250 */ 220, 221, 222, 223, 224, 225, 226, 227, 228, 229,
+ /* 1260 */ 378, 374, 355, 324, 382, 383, 384, 385, 386, 387,
+ /* 1270 */ 20, 389, 367, 20, 392, 340, 324, 367, 396, 397,
+ /* 1280 */ 398, 369, 314, 348, 324, 324, 324, 312, 353, 320,
+ /* 1290 */ 355, 324, 374, 340, 340, 340, 314, 340, 320, 340,
+ /* 1300 */ 418, 353, 322, 340, 340, 340, 340, 180, 322, 373,
+ /* 1310 */ 340, 312, 340, 378, 320, 340, 320, 382, 383, 384,
+ /* 1320 */ 385, 386, 387, 348, 389, 322, 355, 392, 353, 159,
+ /* 1330 */ 355, 396, 397, 398, 353, 322, 239, 363, 353, 340,
+ /* 1340 */ 367, 322, 407, 353, 353, 20, 336, 348, 353, 348,
+ /* 1350 */ 240, 312, 353, 378, 355, 246, 363, 382, 383, 384,
+ /* 1360 */ 385, 386, 387, 414, 389, 353, 361, 353, 353, 363,
+ /* 1370 */ 363, 353, 166, 353, 248, 247, 235, 378, 231, 340,
+ /* 1380 */ 348, 382, 383, 384, 385, 386, 387, 348, 389, 381,
+ /* 1390 */ 413, 392, 353, 414, 355, 396, 397, 398, 252, 20,
+ /* 1400 */ 250, 376, 93, 255, 312, 93, 407, 380, 344, 353,
+ /* 1410 */ 330, 320, 36, 371, 315, 417, 416, 378, 314, 366,
+ /* 1420 */ 322, 382, 383, 384, 385, 386, 387, 310, 389, 412,
+ /* 1430 */ 414, 392, 340, 395, 411, 396, 397, 398, 323, 334,
+ /* 1440 */ 348, 334, 334, 0, 423, 353, 407, 355, 0, 0,
+ /* 1450 */ 424, 182, 0, 424, 312, 424, 423, 429, 423, 42,
+ /* 1460 */ 368, 0, 35, 192, 35, 35, 35, 192, 0, 35,
+ /* 1470 */ 378, 312, 35, 192, 382, 383, 384, 385, 386, 387,
+ /* 1480 */ 0, 389, 340, 192, 0, 35, 0, 22, 0, 35,
+ /* 1490 */ 348, 177, 175, 0, 0, 353, 170, 355, 171, 340,
+ /* 1500 */ 408, 0, 0, 46, 0, 0, 0, 348, 42, 0,
+ /* 1510 */ 0, 0, 353, 421, 355, 153, 0, 425, 0, 0,
+ /* 1520 */ 378, 312, 148, 0, 382, 383, 384, 385, 386, 387,
+ /* 1530 */ 0, 389, 0, 35, 392, 148, 0, 378, 396, 397,
+ /* 1540 */ 0, 382, 383, 384, 385, 386, 387, 0, 389, 340,
+ /* 1550 */ 0, 392, 0, 0, 0, 396, 397, 348, 0, 0,
+ /* 1560 */ 0, 0, 353, 0, 355, 0, 0, 42, 0, 312,
+ /* 1570 */ 0, 0, 0, 0, 0, 0, 0, 22, 0, 312,
+ /* 1580 */ 56, 0, 56, 0, 0, 42, 40, 378, 0, 0,
+ /* 1590 */ 39, 382, 383, 384, 385, 386, 387, 340, 389, 43,
+ /* 1600 */ 14, 392, 345, 46, 14, 348, 397, 340, 46, 0,
+ /* 1610 */ 353, 0, 355, 0, 0, 348, 39, 166, 39, 0,
+ /* 1620 */ 353, 0, 355, 0, 0, 0, 62, 0, 35, 47,
+ /* 1630 */ 312, 35, 47, 39, 39, 378, 0, 35, 47, 382,
+ /* 1640 */ 383, 384, 385, 386, 387, 378, 389, 39, 0, 382,
+ /* 1650 */ 383, 384, 385, 386, 387, 35, 389, 47, 340, 22,
+ /* 1660 */ 0, 39, 0, 0, 0, 0, 348, 35, 35, 102,
+ /* 1670 */ 35, 353, 43, 355, 43, 35, 35, 22, 0, 22,
+ /* 1680 */ 0, 0, 35, 22, 0, 35, 0, 420, 49, 22,
+ /* 1690 */ 312, 35, 0, 22, 20, 0, 378, 100, 0, 162,
+ /* 1700 */ 382, 383, 384, 385, 386, 387, 178, 389, 312, 22,
+ /* 1710 */ 35, 162, 0, 0, 0, 0, 0, 35, 340, 93,
+ /* 1720 */ 187, 0, 94, 93, 0, 46, 348, 93, 159, 39,
+ /* 1730 */ 230, 353, 43, 355, 43, 93, 340, 93, 93, 236,
+ /* 1740 */ 94, 345, 43, 94, 348, 427, 103, 160, 94, 353,
+ /* 1750 */ 46, 355, 94, 46, 43, 35, 378, 158, 312, 93,
+ /* 1760 */ 382, 383, 384, 385, 386, 387, 162, 389, 46, 391,
+ /* 1770 */ 164, 94, 43, 46, 378, 236, 93, 46, 382, 383,
+ /* 1780 */ 384, 385, 386, 387, 312, 389, 340, 93, 43, 93,
+ /* 1790 */ 93, 345, 35, 35, 348, 94, 94, 94, 35, 353,
+ /* 1800 */ 94, 355, 35, 35, 2, 22, 46, 43, 312, 22,
+ /* 1810 */ 198, 93, 340, 35, 104, 46, 200, 94, 93, 93,
+ /* 1820 */ 348, 35, 94, 93, 378, 353, 94, 355, 382, 383,
+ /* 1830 */ 384, 385, 386, 387, 312, 389, 340, 236, 94, 93,
+ /* 1840 */ 93, 93, 35, 94, 348, 93, 35, 94, 93, 353,
+ /* 1850 */ 378, 355, 35, 35, 382, 383, 384, 385, 386, 387,
+ /* 1860 */ 94, 389, 340, 93, 22, 94, 93, 117, 117, 93,
+ /* 1870 */ 348, 117, 35, 117, 378, 353, 43, 355, 382, 383,
+ /* 1880 */ 384, 385, 386, 387, 105, 389, 93, 93, 22, 62,
+ /* 1890 */ 61, 35, 312, 35, 35, 35, 35, 35, 35, 35,
+ /* 1900 */ 378, 91, 35, 35, 382, 383, 384, 385, 386, 387,
+ /* 1910 */ 312, 389, 320, 68, 43, 35, 35, 22, 35, 22,
+ /* 1920 */ 340, 35, 35, 35, 35, 68, 35, 35, 348, 35,
+ /* 1930 */ 22, 35, 0, 353, 35, 355, 35, 47, 340, 39,
+ /* 1940 */ 348, 0, 35, 39, 0, 47, 348, 35, 39, 47,
+ /* 1950 */ 0, 353, 35, 355, 39, 0, 0, 47, 378, 35,
+ /* 1960 */ 368, 35, 382, 383, 384, 385, 386, 387, 22, 389,
+ /* 1970 */ 21, 312, 430, 22, 22, 21, 378, 20, 386, 430,
+ /* 1980 */ 382, 383, 384, 385, 386, 387, 430, 389, 430, 430,
+ /* 1990 */ 430, 430, 430, 401, 402, 403, 430, 405, 430, 340,
+ /* 2000 */ 408, 430, 430, 430, 430, 430, 430, 348, 430, 430,
+ /* 2010 */ 430, 430, 353, 421, 355, 430, 430, 425, 430, 430,
+ /* 2020 */ 430, 312, 430, 430, 430, 430, 430, 430, 430, 430,
+ /* 2030 */ 430, 312, 430, 430, 430, 430, 430, 378, 430, 430,
+ /* 2040 */ 430, 382, 383, 384, 385, 386, 387, 430, 389, 340,
+ /* 2050 */ 430, 430, 430, 430, 430, 430, 430, 348, 430, 340,
+ /* 2060 */ 430, 430, 353, 430, 355, 430, 430, 348, 430, 430,
+ /* 2070 */ 430, 312, 353, 430, 355, 430, 430, 430, 430, 430,
+ /* 2080 */ 430, 430, 430, 430, 430, 430, 430, 378, 430, 430,
+ /* 2090 */ 430, 382, 383, 384, 385, 386, 387, 378, 389, 340,
+ /* 2100 */ 430, 382, 383, 384, 385, 386, 387, 348, 389, 430,
+ /* 2110 */ 430, 430, 353, 430, 355, 430, 430, 430, 430, 430,
+ /* 2120 */ 430, 312, 430, 430, 430, 430, 430, 430, 430, 430,
+ /* 2130 */ 430, 430, 430, 312, 430, 430, 430, 378, 430, 430,
+ /* 2140 */ 430, 382, 383, 384, 385, 386, 387, 312, 389, 340,
+ /* 2150 */ 430, 430, 430, 430, 430, 430, 430, 348, 430, 430,
+ /* 2160 */ 430, 340, 353, 430, 355, 430, 430, 430, 430, 348,
+ /* 2170 */ 430, 430, 430, 430, 353, 340, 355, 430, 430, 430,
+ /* 2180 */ 430, 430, 430, 348, 430, 430, 430, 378, 353, 430,
+ /* 2190 */ 355, 382, 383, 384, 385, 386, 387, 430, 389, 378,
+ /* 2200 */ 430, 430, 430, 382, 383, 384, 385, 386, 387, 430,
+ /* 2210 */ 389, 430, 430, 378, 430, 430, 430, 382, 383, 384,
+ /* 2220 */ 385, 386, 387, 312, 389, 430, 430, 430, 430, 430,
+ /* 2230 */ 430, 430, 430, 312, 430, 430, 430, 430, 430, 430,
+ /* 2240 */ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ /* 2250 */ 430, 340, 430, 430, 430, 430, 430, 430, 430, 348,
+ /* 2260 */ 430, 340, 430, 430, 353, 430, 355, 430, 430, 348,
+ /* 2270 */ 430, 430, 430, 430, 353, 430, 355, 430, 430, 430,
+ /* 2280 */ 430, 430, 430, 430, 312, 430, 430, 430, 430, 378,
+ /* 2290 */ 430, 430, 430, 382, 383, 384, 385, 386, 387, 378,
+ /* 2300 */ 389, 430, 430, 382, 383, 384, 385, 386, 387, 430,
+ /* 2310 */ 389, 430, 340, 430, 430, 430, 430, 430, 430, 430,
+ /* 2320 */ 348, 430, 430, 430, 430, 353, 430, 355, 430, 430,
+ /* 2330 */ 430, 430, 430, 430, 312, 430, 430, 430, 430, 430,
+ /* 2340 */ 430, 430, 430, 430, 312, 430, 430, 430, 430, 430,
+ /* 2350 */ 378, 430, 430, 430, 382, 383, 384, 385, 386, 387,
+ /* 2360 */ 430, 389, 340, 430, 430, 430, 430, 430, 430, 430,
+ /* 2370 */ 348, 430, 340, 430, 430, 353, 430, 355, 430, 430,
+ /* 2380 */ 348, 430, 430, 430, 312, 353, 430, 355, 430, 430,
+ /* 2390 */ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ /* 2400 */ 378, 430, 430, 430, 382, 383, 384, 385, 386, 387,
+ /* 2410 */ 378, 389, 340, 430, 382, 383, 384, 385, 386, 387,
+ /* 2420 */ 348, 389, 430, 430, 430, 353, 430, 355, 430, 430,
+ /* 2430 */ 430, 430, 430, 430, 312, 430, 430, 430, 430, 430,
+ /* 2440 */ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ /* 2450 */ 378, 430, 430, 430, 382, 383, 384, 385, 386, 387,
+ /* 2460 */ 430, 389, 340, 430, 430, 430, 430, 430, 430, 430,
+ /* 2470 */ 348, 430, 430, 430, 430, 353, 430, 355, 430, 430,
+ /* 2480 */ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ /* 2490 */ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ /* 2500 */ 378, 430, 430, 430, 382, 383, 384, 385, 386, 387,
+ /* 2510 */ 430, 389,
};
-#define YY_SHIFT_COUNT (666)
+#define YY_SHIFT_COUNT (674)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (1943)
+#define YY_SHIFT_MAX (1957)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 917, 0, 0, 62, 62, 264, 264, 264, 326, 326,
- /* 10 */ 264, 264, 391, 593, 720, 593, 593, 593, 593, 593,
- /* 20 */ 593, 593, 593, 593, 593, 593, 593, 593, 593, 593,
- /* 30 */ 593, 593, 593, 593, 593, 593, 593, 593, 593, 593,
- /* 40 */ 593, 593, 325, 325, 361, 361, 361, 1019, 1019, 473,
- /* 50 */ 1019, 1019, 389, 523, 283, 340, 283, 17, 17, 19,
- /* 60 */ 19, 55, 6, 283, 283, 17, 17, 17, 17, 17,
- /* 70 */ 17, 17, 17, 17, 17, 9, 17, 17, 17, 24,
- /* 80 */ 17, 17, 102, 17, 17, 102, 109, 17, 102, 102,
- /* 90 */ 102, 17, 135, 719, 662, 683, 683, 150, 213, 213,
- /* 100 */ 213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
- /* 110 */ 213, 213, 213, 213, 213, 213, 213, 417, 155, 6,
- /* 120 */ 630, 630, 757, 634, 909, 501, 501, 501, 634, 195,
- /* 130 */ 195, 24, 292, 292, 102, 102, 255, 255, 287, 342,
- /* 140 */ 198, 198, 198, 198, 198, 198, 198, 660, 21, 383,
- /* 150 */ 565, 635, 5, 174, 125, 747, 873, 229, 497, 856,
- /* 160 */ 939, 552, 776, 552, 740, 885, 885, 885, 892, 948,
- /* 170 */ 955, 1145, 1025, 1178, 1203, 1203, 1178, 1085, 1085, 1203,
- /* 180 */ 1203, 1203, 1219, 1219, 1243, 9, 24, 9, 1248, 1253,
- /* 190 */ 9, 1248, 9, 9, 9, 1203, 9, 1219, 102, 102,
- /* 200 */ 102, 102, 102, 102, 102, 102, 102, 102, 102, 1203,
- /* 210 */ 1219, 255, 1243, 135, 1150, 24, 135, 1203, 1203, 1248,
- /* 220 */ 135, 1102, 255, 255, 255, 255, 1102, 255, 1189, 135,
- /* 230 */ 287, 135, 195, 1332, 255, 1134, 1102, 255, 255, 1134,
- /* 240 */ 1102, 255, 255, 102, 1133, 1218, 1134, 1140, 1142, 1159,
- /* 250 */ 955, 1164, 195, 1372, 1151, 1154, 1156, 1151, 1154, 1151,
- /* 260 */ 1154, 1313, 1319, 255, 342, 1203, 135, 1379, 1219, 2548,
- /* 270 */ 2548, 2548, 2548, 2548, 2548, 2548, 83, 1653, 214, 337,
- /* 280 */ 126, 209, 492, 562, 625, 672, 535, 322, 713, 713,
- /* 290 */ 713, 713, 713, 713, 713, 713, 717, 840, 405, 405,
- /* 300 */ 69, 458, 197, 309, 85, 111, 8, 394, 170, 170,
- /* 310 */ 170, 170, 929, 1021, 934, 966, 1002, 1020, 1026, 1119,
- /* 320 */ 1121, 516, 841, 1034, 1035, 1048, 1071, 1072, 1079, 1083,
- /* 330 */ 1148, 910, 994, 1010, 1088, 1008, 1050, 1040, 1097, 1066,
- /* 340 */ 1110, 1115, 1117, 1124, 1125, 1127, 731, 1139, 1141, 1153,
- /* 350 */ 1206, 1440, 1454, 1281, 1459, 1461, 1420, 1463, 1429, 1280,
- /* 360 */ 1433, 1434, 1435, 1284, 1472, 1438, 1439, 1288, 1477, 1292,
- /* 370 */ 1478, 1451, 1489, 1468, 1491, 1457, 1322, 1325, 1497, 1498,
- /* 380 */ 1333, 1335, 1504, 1505, 1456, 1506, 1465, 1508, 1509, 1510,
- /* 390 */ 1362, 1513, 1514, 1515, 1516, 1517, 1374, 1484, 1520, 1377,
- /* 400 */ 1528, 1529, 1530, 1537, 1541, 1542, 1543, 1544, 1545, 1546,
- /* 410 */ 1547, 1549, 1551, 1552, 1481, 1524, 1525, 1526, 1554, 1556,
- /* 420 */ 1557, 1538, 1559, 1561, 1562, 1564, 1566, 1512, 1569, 1518,
- /* 430 */ 1570, 1571, 1531, 1533, 1534, 1567, 1532, 1578, 1555, 1576,
- /* 440 */ 1563, 1565, 1602, 1605, 1607, 1579, 1430, 1608, 1609, 1620,
- /* 450 */ 1560, 1623, 1624, 1590, 1582, 1587, 1628, 1595, 1584, 1594,
- /* 460 */ 1634, 1601, 1592, 1598, 1641, 1610, 1596, 1611, 1646, 1647,
- /* 470 */ 1648, 1651, 1558, 1568, 1618, 1632, 1655, 1625, 1615, 1619,
- /* 480 */ 1626, 1630, 1645, 1663, 1654, 1668, 1656, 1631, 1669, 1657,
- /* 490 */ 1650, 1681, 1658, 1682, 1661, 1695, 1675, 1678, 1699, 1553,
- /* 500 */ 1666, 1703, 1540, 1689, 1572, 1550, 1714, 1715, 1574, 1573,
- /* 510 */ 1716, 1717, 1718, 1633, 1629, 1686, 1583, 1723, 1635, 1580,
- /* 520 */ 1637, 1728, 1694, 1586, 1659, 1638, 1688, 1693, 1511, 1664,
- /* 530 */ 1662, 1665, 1673, 1674, 1677, 1697, 1680, 1679, 1683, 1690,
- /* 540 */ 1691, 1704, 1700, 1721, 1696, 1706, 1519, 1692, 1698, 1725,
- /* 550 */ 1575, 1740, 1738, 1741, 1701, 1743, 1577, 1702, 1755, 1758,
- /* 560 */ 1759, 1760, 1761, 1763, 1702, 1797, 1779, 1612, 1768, 1727,
- /* 570 */ 1713, 1729, 1722, 1730, 1724, 1769, 1731, 1732, 1771, 1800,
- /* 580 */ 1649, 1734, 1742, 1735, 1789, 1791, 1751, 1737, 1795, 1752,
- /* 590 */ 1753, 1810, 1765, 1757, 1813, 1766, 1762, 1814, 1767, 1744,
- /* 600 */ 1745, 1747, 1748, 1828, 1764, 1773, 1774, 1831, 1780, 1821,
- /* 610 */ 1821, 1846, 1808, 1812, 1836, 1840, 1841, 1843, 1844, 1845,
- /* 620 */ 1852, 1822, 1804, 1849, 1858, 1859, 1873, 1861, 1875, 1865,
- /* 630 */ 1866, 1842, 1615, 1876, 1619, 1877, 1878, 1879, 1880, 1886,
- /* 640 */ 1881, 1918, 1884, 1874, 1883, 1920, 1888, 1882, 1885, 1926,
- /* 650 */ 1893, 1887, 1892, 1933, 1901, 1891, 1898, 1939, 1906, 1909,
- /* 660 */ 1943, 1923, 1925, 1927, 1928, 1931, 1934,
+ /* 0 */ 1011, 0, 0, 207, 207, 264, 264, 264, 471, 471,
+ /* 10 */ 264, 264, 678, 735, 942, 735, 735, 735, 735, 735,
+ /* 20 */ 735, 735, 735, 735, 735, 735, 735, 735, 735, 735,
+ /* 30 */ 735, 735, 735, 735, 735, 735, 735, 735, 735, 735,
+ /* 40 */ 735, 735, 153, 153, 152, 152, 152, 792, 792, 154,
+ /* 50 */ 792, 792, 96, 397, 311, 428, 311, 156, 156, 269,
+ /* 60 */ 269, 551, 93, 311, 311, 156, 156, 156, 156, 156,
+ /* 70 */ 156, 156, 156, 156, 156, 136, 156, 156, 156, 204,
+ /* 80 */ 156, 156, 518, 156, 156, 518, 550, 156, 518, 518,
+ /* 90 */ 518, 156, 564, 467, 1030, 854, 854, 228, 527, 527,
+ /* 100 */ 527, 527, 527, 527, 527, 527, 527, 527, 527, 527,
+ /* 110 */ 527, 527, 527, 527, 527, 527, 527, 424, 179, 93,
+ /* 120 */ 493, 493, 137, 372, 721, 485, 485, 485, 372, 621,
+ /* 130 */ 621, 204, 671, 671, 518, 518, 626, 626, 705, 770,
+ /* 140 */ 72, 72, 72, 72, 72, 72, 72, 535, 77, 565,
+ /* 150 */ 753, 131, 30, 304, 611, 614, 576, 879, 182, 824,
+ /* 160 */ 741, 255, 741, 958, 779, 779, 779, 276, 764, 984,
+ /* 170 */ 1172, 1047, 1181, 1204, 1204, 1181, 1075, 1075, 1204, 1204,
+ /* 180 */ 1204, 1222, 1222, 1224, 136, 204, 136, 1250, 1253, 136,
+ /* 190 */ 1250, 136, 136, 136, 1204, 136, 1222, 518, 518, 518,
+ /* 200 */ 518, 518, 518, 518, 518, 518, 518, 518, 1204, 1222,
+ /* 210 */ 626, 1224, 564, 1127, 204, 564, 1204, 1204, 1250, 564,
+ /* 220 */ 1097, 626, 626, 626, 626, 1097, 626, 1170, 564, 705,
+ /* 230 */ 564, 621, 1325, 626, 1110, 1097, 626, 626, 1110, 1097,
+ /* 240 */ 626, 626, 518, 1109, 1206, 1110, 1126, 1128, 1141, 984,
+ /* 250 */ 1147, 621, 1379, 1146, 1150, 1148, 1146, 1150, 1146, 1150,
+ /* 260 */ 1309, 1312, 626, 770, 1204, 564, 1376, 1222, 2512, 2512,
+ /* 270 */ 2512, 2512, 2512, 2512, 2512, 69, 941, 371, 260, 2,
+ /* 280 */ 222, 418, 434, 443, 624, 727, 983, 155, 155, 155,
+ /* 290 */ 155, 155, 155, 155, 155, 1009, 540, 12, 12, 370,
+ /* 300 */ 374, 529, 286, 257, 205, 253, 655, 486, 146, 146,
+ /* 310 */ 146, 146, 298, 36, 992, 804, 956, 987, 989, 1090,
+ /* 320 */ 1101, 1103, 1002, 930, 1004, 1022, 1024, 1070, 1072, 1077,
+ /* 330 */ 1078, 1093, 997, 411, 988, 1087, 1050, 1061, 976, 1088,
+ /* 340 */ 1082, 1095, 1096, 1118, 1119, 1120, 1122, 768, 1156, 1157,
+ /* 350 */ 1132, 43, 1443, 1448, 1269, 1449, 1452, 1417, 1461, 1427,
+ /* 360 */ 1271, 1429, 1430, 1431, 1275, 1468, 1434, 1437, 1281, 1480,
+ /* 370 */ 1291, 1484, 1450, 1486, 1465, 1488, 1454, 1314, 1317, 1493,
+ /* 380 */ 1494, 1327, 1326, 1501, 1502, 1457, 1504, 1505, 1506, 1466,
+ /* 390 */ 1509, 1510, 1511, 1362, 1516, 1518, 1519, 1530, 1532, 1374,
+ /* 400 */ 1498, 1523, 1387, 1536, 1540, 1547, 1550, 1552, 1553, 1554,
+ /* 410 */ 1558, 1559, 1560, 1561, 1563, 1565, 1566, 1525, 1568, 1570,
+ /* 420 */ 1571, 1572, 1573, 1574, 1555, 1575, 1576, 1578, 1588, 1589,
+ /* 430 */ 1524, 1581, 1526, 1583, 1584, 1543, 1551, 1556, 1586, 1557,
+ /* 440 */ 1590, 1562, 1609, 1546, 1577, 1611, 1613, 1614, 1579, 1451,
+ /* 450 */ 1619, 1621, 1623, 1564, 1624, 1625, 1593, 1582, 1594, 1627,
+ /* 460 */ 1596, 1585, 1595, 1636, 1602, 1591, 1608, 1648, 1620, 1610,
+ /* 470 */ 1622, 1662, 1663, 1664, 1665, 1567, 1597, 1632, 1637, 1660,
+ /* 480 */ 1633, 1635, 1629, 1631, 1640, 1641, 1655, 1678, 1657, 1680,
+ /* 490 */ 1661, 1639, 1681, 1667, 1647, 1684, 1650, 1686, 1656, 1692,
+ /* 500 */ 1671, 1674, 1695, 1537, 1675, 1698, 1528, 1687, 1549, 1569,
+ /* 510 */ 1712, 1713, 1604, 1606, 1714, 1715, 1716, 1626, 1628, 1682,
+ /* 520 */ 1533, 1721, 1630, 1587, 1634, 1724, 1690, 1599, 1642, 1643,
+ /* 530 */ 1679, 1689, 1503, 1644, 1646, 1645, 1649, 1654, 1666, 1691,
+ /* 540 */ 1658, 1683, 1694, 1696, 1677, 1699, 1704, 1707, 1697, 1711,
+ /* 550 */ 1539, 1701, 1702, 1722, 1500, 1729, 1727, 1731, 1703, 1745,
+ /* 560 */ 1601, 1706, 1720, 1757, 1758, 1763, 1767, 1768, 1706, 1802,
+ /* 570 */ 1783, 1612, 1764, 1718, 1723, 1725, 1728, 1726, 1732, 1760,
+ /* 580 */ 1730, 1746, 1769, 1787, 1616, 1747, 1710, 1744, 1778, 1786,
+ /* 590 */ 1748, 1749, 1807, 1752, 1753, 1811, 1755, 1766, 1817, 1770,
+ /* 600 */ 1771, 1818, 1773, 1750, 1751, 1754, 1756, 1842, 1779, 1776,
+ /* 610 */ 1793, 1837, 1794, 1833, 1833, 1866, 1827, 1829, 1856, 1858,
+ /* 620 */ 1859, 1860, 1861, 1862, 1863, 1864, 1867, 1868, 1845, 1810,
+ /* 630 */ 1871, 1880, 1881, 1895, 1883, 1897, 1886, 1887, 1888, 1857,
+ /* 640 */ 1629, 1889, 1631, 1891, 1892, 1894, 1896, 1908, 1899, 1932,
+ /* 650 */ 1901, 1890, 1900, 1941, 1907, 1898, 1904, 1944, 1912, 1902,
+ /* 660 */ 1909, 1950, 1917, 1910, 1915, 1955, 1924, 1926, 1956, 1946,
+ /* 670 */ 1949, 1951, 1952, 1954, 1957,
};
-#define YY_REDUCE_COUNT (275)
-#define YY_REDUCE_MIN (-389)
-#define YY_REDUCE_MAX (2125)
+#define YY_REDUCE_COUNT (274)
+#define YY_REDUCE_MIN (-353)
+#define YY_REDUCE_MAX (2122)
static const short yy_reduce_ofst[] = {
- /* 0 */ -75, 602, 629, -279, -15, 753, 815, 867, 923, 933,
- /* 10 */ 273, 982, 104, 1042, 1052, 1101, 1152, 1204, 1214, 1231,
- /* 20 */ 1291, 1308, 1376, 1394, 1453, 1502, 1523, 1581, 1591, 1643,
- /* 30 */ 1660, 1709, 1726, 1775, 1829, 1839, 1890, 1908, 1967, 1988,
- /* 40 */ 2044, 2054, 2078, 2125, -312, 33, 425, -295, 436, 490,
- /* 50 */ -308, 654, -345, -68, 77, 151, 246, -316, -310, -307,
- /* 60 */ -276, -285, -238, -87, -78, -313, -33, 239, 367, 418,
- /* 70 */ 420, 427, 505, 623, 689, 161, 698, 722, 723, -234,
- /* 80 */ 729, 730, 727, 742, 744, -244, -153, 749, 754, -10,
- /* 90 */ 782, 767, 91, -124, -389, -389, -389, -164, -51, -30,
- /* 100 */ -21, 100, 148, 205, 272, 311, 323, 357, 381, 382,
- /* 110 */ 430, 464, 540, 603, 607, 640, 641, 28, -263, -335,
- /* 120 */ 392, 443, 438, -229, -85, 234, 467, 570, 79, 18,
- /* 130 */ 365, 312, 341, 413, 223, 665, 545, 596, 668, 610,
- /* 140 */ 90, 220, 249, 308, 373, 387, 398, 252, 534, 518,
- /* 150 */ 645, 571, 677, 693, 680, 781, 781, 839, 848, 817,
- /* 160 */ 793, 769, 769, 769, 783, 758, 762, 763, 777, 781,
- /* 170 */ 812, 814, 836, 876, 918, 919, 880, 884, 889, 928,
- /* 180 */ 936, 941, 935, 945, 894, 946, 914, 950, 912, 911,
- /* 190 */ 958, 916, 960, 961, 963, 969, 968, 976, 953, 956,
- /* 200 */ 957, 959, 964, 965, 974, 975, 984, 985, 986, 978,
- /* 210 */ 981, 947, 954, 1005, 930, 979, 1009, 1013, 1016, 971,
- /* 220 */ 1017, 980, 988, 991, 993, 995, 987, 998, 992, 1030,
- /* 230 */ 1022, 1038, 1014, 989, 1004, 962, 1000, 1023, 1028, 970,
- /* 240 */ 1011, 1031, 1033, 781, 973, 972, 983, 990, 996, 999,
- /* 250 */ 1024, 769, 1051, 1027, 997, 1029, 1003, 1018, 1036, 1032,
- /* 260 */ 1037, 1055, 1070, 1062, 1086, 1098, 1095, 1105, 1109, 1053,
- /* 270 */ 1068, 1113, 1114, 1118, 1132, 1149,
+ /* 0 */ 285, -311, -38, 148, 882, 935, 999, 1039, 412, 1092,
+ /* 10 */ 1142, 1159, -22, -261, 1209, 677, 1257, 1267, 1318, 1378,
+ /* 20 */ 1396, 1446, 975, 1472, 1496, 1522, 1580, 1598, 1659, 1709,
+ /* 30 */ 1719, 1759, 1809, 1821, 1835, 1911, 1921, 1972, 2022, 2032,
+ /* 40 */ 2072, 2122, -120, 1592, 249, 444, 579, -319, -289, -309,
+ /* 50 */ -323, -258, -117, 140, 142, 287, 329, -299, -42, -316,
+ /* 60 */ -312, -87, -346, 94, 103, 169, 413, 425, 431, 454,
+ /* 70 */ 458, 491, 492, 495, 511, 383, 578, 580, 582, -12,
+ /* 80 */ 583, 637, -37, 639, 692, -229, -115, 693, 243, 88,
+ /* 90 */ 244, 577, 387, -275, -353, -353, -353, -265, -14, 76,
+ /* 100 */ 308, 405, 416, 419, 447, 476, 498, 520, 522, 566,
+ /* 110 */ 605, 608, 610, 620, 623, 666, 667, 233, -6, -292,
+ /* 120 */ 7, 446, 377, 332, -51, 525, 612, 646, 449, 188,
+ /* 130 */ 379, 422, 488, 508, 40, 659, 568, 668, 718, 528,
+ /* 140 */ 516, 575, 619, 788, 827, 832, 835, 534, 864, 756,
+ /* 150 */ 776, 766, 865, 773, 853, 853, 874, 877, 845, 817,
+ /* 160 */ 795, 795, 795, 810, 781, 789, 793, 801, 853, 841,
+ /* 170 */ 846, 855, 866, 908, 909, 871, 875, 876, 916, 919,
+ /* 180 */ 920, 931, 932, 887, 924, 907, 939, 905, 912, 952,
+ /* 190 */ 910, 960, 961, 962, 969, 967, 968, 953, 954, 955,
+ /* 200 */ 957, 959, 963, 964, 965, 966, 970, 972, 978, 982,
+ /* 210 */ 948, 918, 980, 936, 971, 986, 994, 996, 973, 1003,
+ /* 220 */ 974, 981, 985, 990, 991, 993, 995, 1005, 1013, 1010,
+ /* 230 */ 1019, 1001, 1008, 1012, 949, 1006, 1014, 1015, 979, 1007,
+ /* 240 */ 1018, 1020, 853, 998, 1000, 1016, 977, 1017, 1023, 1025,
+ /* 250 */ 795, 1032, 1027, 1026, 1021, 1028, 1029, 1033, 1031, 1035,
+ /* 260 */ 1038, 1064, 1056, 1080, 1091, 1098, 1099, 1104, 1042, 1053,
+ /* 270 */ 1105, 1107, 1108, 1115, 1117,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 10 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 20 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 30 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 40 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 50 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 60 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 70 */ 1464, 1464, 1464, 1464, 1464, 1538, 1464, 1464, 1464, 1464,
- /* 80 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 90 */ 1464, 1464, 1536, 1694, 1464, 1871, 1464, 1464, 1464, 1464,
- /* 100 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 110 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 120 */ 1464, 1464, 1538, 1464, 1536, 1883, 1883, 1883, 1464, 1464,
- /* 130 */ 1464, 1464, 1737, 1737, 1464, 1464, 1464, 1464, 1636, 1464,
- /* 140 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1729, 1464, 1952,
- /* 150 */ 1464, 1464, 1464, 1735, 1906, 1464, 1464, 1464, 1464, 1589,
- /* 160 */ 1898, 1875, 1889, 1876, 1873, 1937, 1937, 1937, 1892, 1464,
- /* 170 */ 1902, 1464, 1722, 1699, 1464, 1464, 1699, 1696, 1696, 1464,
- /* 180 */ 1464, 1464, 1464, 1464, 1464, 1538, 1464, 1538, 1464, 1464,
- /* 190 */ 1538, 1464, 1538, 1538, 1538, 1464, 1538, 1464, 1464, 1464,
- /* 200 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 210 */ 1464, 1464, 1464, 1536, 1731, 1464, 1536, 1464, 1464, 1464,
- /* 220 */ 1536, 1911, 1464, 1464, 1464, 1464, 1911, 1464, 1464, 1536,
- /* 230 */ 1464, 1536, 1464, 1464, 1464, 1913, 1911, 1464, 1464, 1913,
- /* 240 */ 1911, 1464, 1464, 1464, 1925, 1921, 1913, 1929, 1927, 1904,
- /* 250 */ 1902, 1889, 1464, 1464, 1943, 1939, 1955, 1943, 1939, 1943,
- /* 260 */ 1939, 1464, 1605, 1464, 1464, 1464, 1536, 1496, 1464, 1724,
- /* 270 */ 1737, 1639, 1639, 1639, 1539, 1469, 1464, 1464, 1464, 1464,
- /* 280 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1808, 1924,
- /* 290 */ 1923, 1847, 1846, 1845, 1843, 1807, 1464, 1601, 1806, 1805,
- /* 300 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1799, 1800,
- /* 310 */ 1798, 1797, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 320 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 330 */ 1872, 1464, 1940, 1944, 1464, 1464, 1464, 1464, 1464, 1783,
- /* 340 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 350 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 360 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 370 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 380 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 390 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 400 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 410 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 420 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 430 */ 1464, 1464, 1464, 1464, 1501, 1464, 1464, 1464, 1464, 1464,
- /* 440 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 450 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 460 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 470 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1573, 1572,
- /* 480 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 490 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 500 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 510 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1741, 1464, 1464,
- /* 520 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1905, 1464, 1464,
- /* 530 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 540 */ 1464, 1464, 1464, 1783, 1464, 1922, 1464, 1882, 1878, 1464,
- /* 550 */ 1464, 1874, 1782, 1464, 1464, 1938, 1464, 1464, 1464, 1464,
- /* 560 */ 1464, 1464, 1464, 1464, 1464, 1867, 1464, 1464, 1840, 1825,
- /* 570 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 580 */ 1793, 1464, 1464, 1464, 1464, 1464, 1633, 1464, 1464, 1464,
- /* 590 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1618,
- /* 600 */ 1616, 1615, 1614, 1464, 1611, 1464, 1464, 1464, 1464, 1642,
- /* 610 */ 1641, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 620 */ 1464, 1464, 1464, 1557, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 630 */ 1464, 1464, 1549, 1464, 1548, 1464, 1464, 1464, 1464, 1464,
- /* 640 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 650 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464, 1464,
- /* 660 */ 1464, 1464, 1464, 1464, 1464, 1464, 1464,
+ /* 0 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 10 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 20 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 30 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 40 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 50 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 60 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 70 */ 1481, 1481, 1481, 1481, 1481, 1555, 1481, 1481, 1481, 1481,
+ /* 80 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 90 */ 1481, 1481, 1553, 1718, 1481, 1893, 1481, 1481, 1481, 1481,
+ /* 100 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 110 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 120 */ 1481, 1481, 1555, 1481, 1553, 1905, 1905, 1905, 1481, 1481,
+ /* 130 */ 1481, 1481, 1759, 1759, 1481, 1481, 1481, 1481, 1658, 1481,
+ /* 140 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1753, 1481, 1974,
+ /* 150 */ 1481, 1481, 1481, 1928, 1481, 1481, 1481, 1481, 1611, 1920,
+ /* 160 */ 1897, 1911, 1898, 1895, 1959, 1959, 1959, 1914, 1481, 1924,
+ /* 170 */ 1481, 1746, 1723, 1481, 1481, 1723, 1720, 1720, 1481, 1481,
+ /* 180 */ 1481, 1481, 1481, 1481, 1555, 1481, 1555, 1481, 1481, 1555,
+ /* 190 */ 1481, 1555, 1555, 1555, 1481, 1555, 1481, 1481, 1481, 1481,
+ /* 200 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 210 */ 1481, 1481, 1553, 1755, 1481, 1553, 1481, 1481, 1481, 1553,
+ /* 220 */ 1933, 1481, 1481, 1481, 1481, 1933, 1481, 1481, 1553, 1481,
+ /* 230 */ 1553, 1481, 1481, 1481, 1935, 1933, 1481, 1481, 1935, 1933,
+ /* 240 */ 1481, 1481, 1481, 1947, 1943, 1935, 1951, 1949, 1926, 1924,
+ /* 250 */ 1911, 1481, 1481, 1965, 1961, 1977, 1965, 1961, 1965, 1961,
+ /* 260 */ 1481, 1627, 1481, 1481, 1481, 1553, 1513, 1481, 1748, 1759,
+ /* 270 */ 1661, 1661, 1661, 1556, 1486, 1481, 1481, 1481, 1481, 1481,
+ /* 280 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1830, 1946, 1945,
+ /* 290 */ 1869, 1868, 1867, 1865, 1829, 1481, 1623, 1828, 1827, 1481,
+ /* 300 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1821, 1822,
+ /* 310 */ 1820, 1819, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 320 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 330 */ 1481, 1894, 1481, 1962, 1966, 1481, 1481, 1481, 1481, 1481,
+ /* 340 */ 1805, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 350 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 360 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 370 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 380 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 390 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 400 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 410 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 420 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 430 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1518, 1481, 1481,
+ /* 440 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 450 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 460 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 470 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 480 */ 1481, 1481, 1594, 1593, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 490 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 500 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 510 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 520 */ 1481, 1763, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 530 */ 1481, 1927, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 540 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1805, 1481, 1944,
+ /* 550 */ 1481, 1904, 1900, 1481, 1481, 1896, 1804, 1481, 1481, 1960,
+ /* 560 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1889,
+ /* 570 */ 1481, 1481, 1862, 1847, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 580 */ 1481, 1481, 1481, 1481, 1815, 1481, 1481, 1481, 1481, 1481,
+ /* 590 */ 1655, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 600 */ 1481, 1481, 1481, 1640, 1638, 1637, 1636, 1481, 1633, 1481,
+ /* 610 */ 1481, 1481, 1481, 1664, 1663, 1481, 1481, 1481, 1481, 1481,
+ /* 620 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 630 */ 1575, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 640 */ 1566, 1481, 1565, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 650 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 660 */ 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481, 1481,
+ /* 670 */ 1481, 1481, 1481, 1481, 1481,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -997,6 +993,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* KEEP => nothing */
0, /* PAGES => nothing */
0, /* PAGESIZE => nothing */
+ 0, /* TSDB_PAGESIZE => nothing */
0, /* PRECISION => nothing */
0, /* REPLICA => nothing */
0, /* STRICT => nothing */
@@ -1010,6 +1007,9 @@ static const YYCODETYPE yyFallback[] = {
0, /* WAL_RETENTION_SIZE => nothing */
0, /* WAL_ROLL_PERIOD => nothing */
0, /* WAL_SEGMENT_SIZE => nothing */
+ 0, /* STT_TRIGGER => nothing */
+ 0, /* TABLE_PREFIX => nothing */
+ 0, /* TABLE_SUFFIX => nothing */
0, /* NK_COLON => nothing */
0, /* TABLE => nothing */
0, /* NK_LP => nothing */
@@ -1075,6 +1075,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* DISTRIBUTED => nothing */
0, /* CONSUMERS => nothing */
0, /* SUBSCRIPTIONS => nothing */
+ 0, /* VNODES => nothing */
0, /* LIKE => nothing */
0, /* INDEX => nothing */
0, /* FUNCTION => nothing */
@@ -1175,59 +1176,58 @@ static const YYCODETYPE yyFallback[] = {
0, /* ASC => nothing */
0, /* NULLS => nothing */
0, /* ABORT => nothing */
- 251, /* AFTER => ABORT */
- 251, /* ATTACH => ABORT */
- 251, /* BEFORE => ABORT */
- 251, /* BEGIN => ABORT */
- 251, /* BITAND => ABORT */
- 251, /* BITNOT => ABORT */
- 251, /* BITOR => ABORT */
- 251, /* BLOCKS => ABORT */
- 251, /* CHANGE => ABORT */
- 251, /* COMMA => ABORT */
- 251, /* COMPACT => ABORT */
- 251, /* CONCAT => ABORT */
- 251, /* CONFLICT => ABORT */
- 251, /* COPY => ABORT */
- 251, /* DEFERRED => ABORT */
- 251, /* DELIMITERS => ABORT */
- 251, /* DETACH => ABORT */
- 251, /* DIVIDE => ABORT */
- 251, /* DOT => ABORT */
- 251, /* EACH => ABORT */
- 251, /* END => ABORT */
- 251, /* FAIL => ABORT */
- 251, /* FILE => ABORT */
- 251, /* FOR => ABORT */
- 251, /* GLOB => ABORT */
- 251, /* ID => ABORT */
- 251, /* IMMEDIATE => ABORT */
- 251, /* IMPORT => ABORT */
- 251, /* INITIALLY => ABORT */
- 251, /* INSTEAD => ABORT */
- 251, /* ISNULL => ABORT */
- 251, /* KEY => ABORT */
- 251, /* NK_BITNOT => ABORT */
- 251, /* NK_SEMI => ABORT */
- 251, /* NOTNULL => ABORT */
- 251, /* OF => ABORT */
- 251, /* PLUS => ABORT */
- 251, /* PRIVILEGE => ABORT */
- 251, /* RAISE => ABORT */
- 251, /* REPLACE => ABORT */
- 251, /* RESTRICT => ABORT */
- 251, /* ROW => ABORT */
- 251, /* SEMI => ABORT */
- 251, /* STAR => ABORT */
- 251, /* STATEMENT => ABORT */
- 251, /* STRING => ABORT */
- 251, /* TIMES => ABORT */
- 251, /* UPDATE => ABORT */
- 251, /* VALUES => ABORT */
- 251, /* VARIABLE => ABORT */
- 251, /* VIEW => ABORT */
- 251, /* VNODES => ABORT */
- 251, /* WAL => ABORT */
+ 256, /* AFTER => ABORT */
+ 256, /* ATTACH => ABORT */
+ 256, /* BEFORE => ABORT */
+ 256, /* BEGIN => ABORT */
+ 256, /* BITAND => ABORT */
+ 256, /* BITNOT => ABORT */
+ 256, /* BITOR => ABORT */
+ 256, /* BLOCKS => ABORT */
+ 256, /* CHANGE => ABORT */
+ 256, /* COMMA => ABORT */
+ 256, /* COMPACT => ABORT */
+ 256, /* CONCAT => ABORT */
+ 256, /* CONFLICT => ABORT */
+ 256, /* COPY => ABORT */
+ 256, /* DEFERRED => ABORT */
+ 256, /* DELIMITERS => ABORT */
+ 256, /* DETACH => ABORT */
+ 256, /* DIVIDE => ABORT */
+ 256, /* DOT => ABORT */
+ 256, /* EACH => ABORT */
+ 256, /* END => ABORT */
+ 256, /* FAIL => ABORT */
+ 256, /* FILE => ABORT */
+ 256, /* FOR => ABORT */
+ 256, /* GLOB => ABORT */
+ 256, /* ID => ABORT */
+ 256, /* IMMEDIATE => ABORT */
+ 256, /* IMPORT => ABORT */
+ 256, /* INITIALLY => ABORT */
+ 256, /* INSTEAD => ABORT */
+ 256, /* ISNULL => ABORT */
+ 256, /* KEY => ABORT */
+ 256, /* NK_BITNOT => ABORT */
+ 256, /* NK_SEMI => ABORT */
+ 256, /* NOTNULL => ABORT */
+ 256, /* OF => ABORT */
+ 256, /* PLUS => ABORT */
+ 256, /* PRIVILEGE => ABORT */
+ 256, /* RAISE => ABORT */
+ 256, /* REPLACE => ABORT */
+ 256, /* RESTRICT => ABORT */
+ 256, /* ROW => ABORT */
+ 256, /* SEMI => ABORT */
+ 256, /* STAR => ABORT */
+ 256, /* STATEMENT => ABORT */
+ 256, /* STRING => ABORT */
+ 256, /* TIMES => ABORT */
+ 256, /* UPDATE => ABORT */
+ 256, /* VALUES => ABORT */
+ 256, /* VARIABLE => ABORT */
+ 256, /* VIEW => ABORT */
+ 256, /* WAL => ABORT */
};
#endif /* YYFALLBACK */
@@ -1389,359 +1389,362 @@ static const char *const yyTokenName[] = {
/* 71 */ "KEEP",
/* 72 */ "PAGES",
/* 73 */ "PAGESIZE",
- /* 74 */ "PRECISION",
- /* 75 */ "REPLICA",
- /* 76 */ "STRICT",
- /* 77 */ "VGROUPS",
- /* 78 */ "SINGLE_STABLE",
- /* 79 */ "RETENTIONS",
- /* 80 */ "SCHEMALESS",
- /* 81 */ "WAL_LEVEL",
- /* 82 */ "WAL_FSYNC_PERIOD",
- /* 83 */ "WAL_RETENTION_PERIOD",
- /* 84 */ "WAL_RETENTION_SIZE",
- /* 85 */ "WAL_ROLL_PERIOD",
- /* 86 */ "WAL_SEGMENT_SIZE",
- /* 87 */ "NK_COLON",
- /* 88 */ "TABLE",
- /* 89 */ "NK_LP",
- /* 90 */ "NK_RP",
- /* 91 */ "STABLE",
- /* 92 */ "ADD",
- /* 93 */ "COLUMN",
- /* 94 */ "MODIFY",
- /* 95 */ "RENAME",
- /* 96 */ "TAG",
- /* 97 */ "SET",
- /* 98 */ "NK_EQ",
- /* 99 */ "USING",
- /* 100 */ "TAGS",
- /* 101 */ "COMMENT",
- /* 102 */ "BOOL",
- /* 103 */ "TINYINT",
- /* 104 */ "SMALLINT",
- /* 105 */ "INT",
- /* 106 */ "INTEGER",
- /* 107 */ "BIGINT",
- /* 108 */ "FLOAT",
- /* 109 */ "DOUBLE",
- /* 110 */ "BINARY",
- /* 111 */ "TIMESTAMP",
- /* 112 */ "NCHAR",
- /* 113 */ "UNSIGNED",
- /* 114 */ "JSON",
- /* 115 */ "VARCHAR",
- /* 116 */ "MEDIUMBLOB",
- /* 117 */ "BLOB",
- /* 118 */ "VARBINARY",
- /* 119 */ "DECIMAL",
- /* 120 */ "MAX_DELAY",
- /* 121 */ "WATERMARK",
- /* 122 */ "ROLLUP",
- /* 123 */ "TTL",
- /* 124 */ "SMA",
- /* 125 */ "FIRST",
- /* 126 */ "LAST",
- /* 127 */ "SHOW",
- /* 128 */ "DATABASES",
- /* 129 */ "TABLES",
- /* 130 */ "STABLES",
- /* 131 */ "MNODES",
- /* 132 */ "MODULES",
- /* 133 */ "QNODES",
- /* 134 */ "FUNCTIONS",
- /* 135 */ "INDEXES",
- /* 136 */ "ACCOUNTS",
- /* 137 */ "APPS",
- /* 138 */ "CONNECTIONS",
- /* 139 */ "LICENCES",
- /* 140 */ "GRANTS",
- /* 141 */ "QUERIES",
- /* 142 */ "SCORES",
- /* 143 */ "TOPICS",
- /* 144 */ "VARIABLES",
- /* 145 */ "BNODES",
- /* 146 */ "SNODES",
- /* 147 */ "CLUSTER",
- /* 148 */ "TRANSACTIONS",
- /* 149 */ "DISTRIBUTED",
- /* 150 */ "CONSUMERS",
- /* 151 */ "SUBSCRIPTIONS",
- /* 152 */ "LIKE",
- /* 153 */ "INDEX",
- /* 154 */ "FUNCTION",
- /* 155 */ "INTERVAL",
- /* 156 */ "TOPIC",
- /* 157 */ "AS",
- /* 158 */ "WITH",
- /* 159 */ "META",
- /* 160 */ "CONSUMER",
- /* 161 */ "GROUP",
- /* 162 */ "DESC",
- /* 163 */ "DESCRIBE",
- /* 164 */ "RESET",
- /* 165 */ "QUERY",
- /* 166 */ "CACHE",
- /* 167 */ "EXPLAIN",
- /* 168 */ "ANALYZE",
- /* 169 */ "VERBOSE",
- /* 170 */ "NK_BOOL",
- /* 171 */ "RATIO",
- /* 172 */ "NK_FLOAT",
- /* 173 */ "OUTPUTTYPE",
- /* 174 */ "AGGREGATE",
- /* 175 */ "BUFSIZE",
- /* 176 */ "STREAM",
- /* 177 */ "INTO",
- /* 178 */ "TRIGGER",
- /* 179 */ "AT_ONCE",
- /* 180 */ "WINDOW_CLOSE",
- /* 181 */ "IGNORE",
- /* 182 */ "EXPIRED",
- /* 183 */ "KILL",
- /* 184 */ "CONNECTION",
- /* 185 */ "TRANSACTION",
- /* 186 */ "BALANCE",
- /* 187 */ "VGROUP",
- /* 188 */ "MERGE",
- /* 189 */ "REDISTRIBUTE",
- /* 190 */ "SPLIT",
- /* 191 */ "DELETE",
- /* 192 */ "INSERT",
- /* 193 */ "NULL",
- /* 194 */ "NK_QUESTION",
- /* 195 */ "NK_ARROW",
- /* 196 */ "ROWTS",
- /* 197 */ "TBNAME",
- /* 198 */ "QSTART",
- /* 199 */ "QEND",
- /* 200 */ "QDURATION",
- /* 201 */ "WSTART",
- /* 202 */ "WEND",
- /* 203 */ "WDURATION",
- /* 204 */ "CAST",
- /* 205 */ "NOW",
- /* 206 */ "TODAY",
- /* 207 */ "TIMEZONE",
- /* 208 */ "CLIENT_VERSION",
- /* 209 */ "SERVER_VERSION",
- /* 210 */ "SERVER_STATUS",
- /* 211 */ "CURRENT_USER",
- /* 212 */ "COUNT",
- /* 213 */ "LAST_ROW",
- /* 214 */ "BETWEEN",
- /* 215 */ "IS",
- /* 216 */ "NK_LT",
- /* 217 */ "NK_GT",
- /* 218 */ "NK_LE",
- /* 219 */ "NK_GE",
- /* 220 */ "NK_NE",
- /* 221 */ "MATCH",
- /* 222 */ "NMATCH",
- /* 223 */ "CONTAINS",
- /* 224 */ "IN",
- /* 225 */ "JOIN",
- /* 226 */ "INNER",
- /* 227 */ "SELECT",
- /* 228 */ "DISTINCT",
- /* 229 */ "WHERE",
- /* 230 */ "PARTITION",
- /* 231 */ "BY",
- /* 232 */ "SESSION",
- /* 233 */ "STATE_WINDOW",
- /* 234 */ "SLIDING",
- /* 235 */ "FILL",
- /* 236 */ "VALUE",
- /* 237 */ "NONE",
- /* 238 */ "PREV",
- /* 239 */ "LINEAR",
- /* 240 */ "NEXT",
- /* 241 */ "HAVING",
- /* 242 */ "RANGE",
- /* 243 */ "EVERY",
- /* 244 */ "ORDER",
- /* 245 */ "SLIMIT",
- /* 246 */ "SOFFSET",
- /* 247 */ "LIMIT",
- /* 248 */ "OFFSET",
- /* 249 */ "ASC",
- /* 250 */ "NULLS",
- /* 251 */ "ABORT",
- /* 252 */ "AFTER",
- /* 253 */ "ATTACH",
- /* 254 */ "BEFORE",
- /* 255 */ "BEGIN",
- /* 256 */ "BITAND",
- /* 257 */ "BITNOT",
- /* 258 */ "BITOR",
- /* 259 */ "BLOCKS",
- /* 260 */ "CHANGE",
- /* 261 */ "COMMA",
- /* 262 */ "COMPACT",
- /* 263 */ "CONCAT",
- /* 264 */ "CONFLICT",
- /* 265 */ "COPY",
- /* 266 */ "DEFERRED",
- /* 267 */ "DELIMITERS",
- /* 268 */ "DETACH",
- /* 269 */ "DIVIDE",
- /* 270 */ "DOT",
- /* 271 */ "EACH",
- /* 272 */ "END",
- /* 273 */ "FAIL",
- /* 274 */ "FILE",
- /* 275 */ "FOR",
- /* 276 */ "GLOB",
- /* 277 */ "ID",
- /* 278 */ "IMMEDIATE",
- /* 279 */ "IMPORT",
- /* 280 */ "INITIALLY",
- /* 281 */ "INSTEAD",
- /* 282 */ "ISNULL",
- /* 283 */ "KEY",
- /* 284 */ "NK_BITNOT",
- /* 285 */ "NK_SEMI",
- /* 286 */ "NOTNULL",
- /* 287 */ "OF",
- /* 288 */ "PLUS",
- /* 289 */ "PRIVILEGE",
- /* 290 */ "RAISE",
- /* 291 */ "REPLACE",
- /* 292 */ "RESTRICT",
- /* 293 */ "ROW",
- /* 294 */ "SEMI",
- /* 295 */ "STAR",
- /* 296 */ "STATEMENT",
- /* 297 */ "STRING",
- /* 298 */ "TIMES",
- /* 299 */ "UPDATE",
- /* 300 */ "VALUES",
- /* 301 */ "VARIABLE",
- /* 302 */ "VIEW",
- /* 303 */ "VNODES",
- /* 304 */ "WAL",
- /* 305 */ "cmd",
- /* 306 */ "account_options",
- /* 307 */ "alter_account_options",
- /* 308 */ "literal",
- /* 309 */ "alter_account_option",
- /* 310 */ "user_name",
- /* 311 */ "sysinfo_opt",
- /* 312 */ "privileges",
- /* 313 */ "priv_level",
- /* 314 */ "priv_type_list",
- /* 315 */ "priv_type",
- /* 316 */ "db_name",
- /* 317 */ "dnode_endpoint",
- /* 318 */ "not_exists_opt",
- /* 319 */ "db_options",
- /* 320 */ "exists_opt",
- /* 321 */ "alter_db_options",
- /* 322 */ "integer_list",
- /* 323 */ "variable_list",
- /* 324 */ "retention_list",
- /* 325 */ "alter_db_option",
- /* 326 */ "retention",
- /* 327 */ "full_table_name",
- /* 328 */ "column_def_list",
- /* 329 */ "tags_def_opt",
- /* 330 */ "table_options",
- /* 331 */ "multi_create_clause",
- /* 332 */ "tags_def",
- /* 333 */ "multi_drop_clause",
- /* 334 */ "alter_table_clause",
- /* 335 */ "alter_table_options",
- /* 336 */ "column_name",
- /* 337 */ "type_name",
- /* 338 */ "signed_literal",
- /* 339 */ "create_subtable_clause",
- /* 340 */ "specific_cols_opt",
- /* 341 */ "expression_list",
- /* 342 */ "drop_table_clause",
- /* 343 */ "col_name_list",
- /* 344 */ "table_name",
- /* 345 */ "column_def",
- /* 346 */ "duration_list",
- /* 347 */ "rollup_func_list",
- /* 348 */ "alter_table_option",
- /* 349 */ "duration_literal",
- /* 350 */ "rollup_func_name",
- /* 351 */ "function_name",
- /* 352 */ "col_name",
- /* 353 */ "db_name_cond_opt",
- /* 354 */ "like_pattern_opt",
- /* 355 */ "table_name_cond",
- /* 356 */ "from_db_opt",
- /* 357 */ "index_options",
- /* 358 */ "func_list",
- /* 359 */ "sliding_opt",
- /* 360 */ "sma_stream_opt",
- /* 361 */ "func",
- /* 362 */ "stream_options",
- /* 363 */ "topic_name",
- /* 364 */ "query_expression",
- /* 365 */ "cgroup_name",
- /* 366 */ "analyze_opt",
- /* 367 */ "explain_options",
- /* 368 */ "agg_func_opt",
- /* 369 */ "bufsize_opt",
- /* 370 */ "stream_name",
- /* 371 */ "into_opt",
- /* 372 */ "dnode_list",
- /* 373 */ "where_clause_opt",
- /* 374 */ "signed",
- /* 375 */ "literal_func",
- /* 376 */ "literal_list",
- /* 377 */ "table_alias",
- /* 378 */ "column_alias",
- /* 379 */ "expression",
- /* 380 */ "pseudo_column",
- /* 381 */ "column_reference",
- /* 382 */ "function_expression",
- /* 383 */ "subquery",
- /* 384 */ "star_func",
- /* 385 */ "star_func_para_list",
- /* 386 */ "noarg_func",
- /* 387 */ "other_para_list",
- /* 388 */ "star_func_para",
- /* 389 */ "predicate",
- /* 390 */ "compare_op",
- /* 391 */ "in_op",
- /* 392 */ "in_predicate_value",
- /* 393 */ "boolean_value_expression",
- /* 394 */ "boolean_primary",
- /* 395 */ "common_expression",
- /* 396 */ "from_clause_opt",
- /* 397 */ "table_reference_list",
- /* 398 */ "table_reference",
- /* 399 */ "table_primary",
- /* 400 */ "joined_table",
- /* 401 */ "alias_opt",
- /* 402 */ "parenthesized_joined_table",
- /* 403 */ "join_type",
- /* 404 */ "search_condition",
- /* 405 */ "query_specification",
- /* 406 */ "set_quantifier_opt",
- /* 407 */ "select_list",
- /* 408 */ "partition_by_clause_opt",
- /* 409 */ "range_opt",
- /* 410 */ "every_opt",
- /* 411 */ "fill_opt",
- /* 412 */ "twindow_clause_opt",
- /* 413 */ "group_by_clause_opt",
- /* 414 */ "having_clause_opt",
- /* 415 */ "select_item",
- /* 416 */ "fill_mode",
- /* 417 */ "group_by_list",
- /* 418 */ "query_expression_body",
- /* 419 */ "order_by_clause_opt",
- /* 420 */ "slimit_clause_opt",
- /* 421 */ "limit_clause_opt",
- /* 422 */ "query_primary",
- /* 423 */ "sort_specification_list",
- /* 424 */ "sort_specification",
- /* 425 */ "ordering_specification_opt",
- /* 426 */ "null_ordering_opt",
+ /* 74 */ "TSDB_PAGESIZE",
+ /* 75 */ "PRECISION",
+ /* 76 */ "REPLICA",
+ /* 77 */ "STRICT",
+ /* 78 */ "VGROUPS",
+ /* 79 */ "SINGLE_STABLE",
+ /* 80 */ "RETENTIONS",
+ /* 81 */ "SCHEMALESS",
+ /* 82 */ "WAL_LEVEL",
+ /* 83 */ "WAL_FSYNC_PERIOD",
+ /* 84 */ "WAL_RETENTION_PERIOD",
+ /* 85 */ "WAL_RETENTION_SIZE",
+ /* 86 */ "WAL_ROLL_PERIOD",
+ /* 87 */ "WAL_SEGMENT_SIZE",
+ /* 88 */ "STT_TRIGGER",
+ /* 89 */ "TABLE_PREFIX",
+ /* 90 */ "TABLE_SUFFIX",
+ /* 91 */ "NK_COLON",
+ /* 92 */ "TABLE",
+ /* 93 */ "NK_LP",
+ /* 94 */ "NK_RP",
+ /* 95 */ "STABLE",
+ /* 96 */ "ADD",
+ /* 97 */ "COLUMN",
+ /* 98 */ "MODIFY",
+ /* 99 */ "RENAME",
+ /* 100 */ "TAG",
+ /* 101 */ "SET",
+ /* 102 */ "NK_EQ",
+ /* 103 */ "USING",
+ /* 104 */ "TAGS",
+ /* 105 */ "COMMENT",
+ /* 106 */ "BOOL",
+ /* 107 */ "TINYINT",
+ /* 108 */ "SMALLINT",
+ /* 109 */ "INT",
+ /* 110 */ "INTEGER",
+ /* 111 */ "BIGINT",
+ /* 112 */ "FLOAT",
+ /* 113 */ "DOUBLE",
+ /* 114 */ "BINARY",
+ /* 115 */ "TIMESTAMP",
+ /* 116 */ "NCHAR",
+ /* 117 */ "UNSIGNED",
+ /* 118 */ "JSON",
+ /* 119 */ "VARCHAR",
+ /* 120 */ "MEDIUMBLOB",
+ /* 121 */ "BLOB",
+ /* 122 */ "VARBINARY",
+ /* 123 */ "DECIMAL",
+ /* 124 */ "MAX_DELAY",
+ /* 125 */ "WATERMARK",
+ /* 126 */ "ROLLUP",
+ /* 127 */ "TTL",
+ /* 128 */ "SMA",
+ /* 129 */ "FIRST",
+ /* 130 */ "LAST",
+ /* 131 */ "SHOW",
+ /* 132 */ "DATABASES",
+ /* 133 */ "TABLES",
+ /* 134 */ "STABLES",
+ /* 135 */ "MNODES",
+ /* 136 */ "MODULES",
+ /* 137 */ "QNODES",
+ /* 138 */ "FUNCTIONS",
+ /* 139 */ "INDEXES",
+ /* 140 */ "ACCOUNTS",
+ /* 141 */ "APPS",
+ /* 142 */ "CONNECTIONS",
+ /* 143 */ "LICENCES",
+ /* 144 */ "GRANTS",
+ /* 145 */ "QUERIES",
+ /* 146 */ "SCORES",
+ /* 147 */ "TOPICS",
+ /* 148 */ "VARIABLES",
+ /* 149 */ "BNODES",
+ /* 150 */ "SNODES",
+ /* 151 */ "CLUSTER",
+ /* 152 */ "TRANSACTIONS",
+ /* 153 */ "DISTRIBUTED",
+ /* 154 */ "CONSUMERS",
+ /* 155 */ "SUBSCRIPTIONS",
+ /* 156 */ "VNODES",
+ /* 157 */ "LIKE",
+ /* 158 */ "INDEX",
+ /* 159 */ "FUNCTION",
+ /* 160 */ "INTERVAL",
+ /* 161 */ "TOPIC",
+ /* 162 */ "AS",
+ /* 163 */ "WITH",
+ /* 164 */ "META",
+ /* 165 */ "CONSUMER",
+ /* 166 */ "GROUP",
+ /* 167 */ "DESC",
+ /* 168 */ "DESCRIBE",
+ /* 169 */ "RESET",
+ /* 170 */ "QUERY",
+ /* 171 */ "CACHE",
+ /* 172 */ "EXPLAIN",
+ /* 173 */ "ANALYZE",
+ /* 174 */ "VERBOSE",
+ /* 175 */ "NK_BOOL",
+ /* 176 */ "RATIO",
+ /* 177 */ "NK_FLOAT",
+ /* 178 */ "OUTPUTTYPE",
+ /* 179 */ "AGGREGATE",
+ /* 180 */ "BUFSIZE",
+ /* 181 */ "STREAM",
+ /* 182 */ "INTO",
+ /* 183 */ "TRIGGER",
+ /* 184 */ "AT_ONCE",
+ /* 185 */ "WINDOW_CLOSE",
+ /* 186 */ "IGNORE",
+ /* 187 */ "EXPIRED",
+ /* 188 */ "KILL",
+ /* 189 */ "CONNECTION",
+ /* 190 */ "TRANSACTION",
+ /* 191 */ "BALANCE",
+ /* 192 */ "VGROUP",
+ /* 193 */ "MERGE",
+ /* 194 */ "REDISTRIBUTE",
+ /* 195 */ "SPLIT",
+ /* 196 */ "DELETE",
+ /* 197 */ "INSERT",
+ /* 198 */ "NULL",
+ /* 199 */ "NK_QUESTION",
+ /* 200 */ "NK_ARROW",
+ /* 201 */ "ROWTS",
+ /* 202 */ "TBNAME",
+ /* 203 */ "QSTART",
+ /* 204 */ "QEND",
+ /* 205 */ "QDURATION",
+ /* 206 */ "WSTART",
+ /* 207 */ "WEND",
+ /* 208 */ "WDURATION",
+ /* 209 */ "CAST",
+ /* 210 */ "NOW",
+ /* 211 */ "TODAY",
+ /* 212 */ "TIMEZONE",
+ /* 213 */ "CLIENT_VERSION",
+ /* 214 */ "SERVER_VERSION",
+ /* 215 */ "SERVER_STATUS",
+ /* 216 */ "CURRENT_USER",
+ /* 217 */ "COUNT",
+ /* 218 */ "LAST_ROW",
+ /* 219 */ "BETWEEN",
+ /* 220 */ "IS",
+ /* 221 */ "NK_LT",
+ /* 222 */ "NK_GT",
+ /* 223 */ "NK_LE",
+ /* 224 */ "NK_GE",
+ /* 225 */ "NK_NE",
+ /* 226 */ "MATCH",
+ /* 227 */ "NMATCH",
+ /* 228 */ "CONTAINS",
+ /* 229 */ "IN",
+ /* 230 */ "JOIN",
+ /* 231 */ "INNER",
+ /* 232 */ "SELECT",
+ /* 233 */ "DISTINCT",
+ /* 234 */ "WHERE",
+ /* 235 */ "PARTITION",
+ /* 236 */ "BY",
+ /* 237 */ "SESSION",
+ /* 238 */ "STATE_WINDOW",
+ /* 239 */ "SLIDING",
+ /* 240 */ "FILL",
+ /* 241 */ "VALUE",
+ /* 242 */ "NONE",
+ /* 243 */ "PREV",
+ /* 244 */ "LINEAR",
+ /* 245 */ "NEXT",
+ /* 246 */ "HAVING",
+ /* 247 */ "RANGE",
+ /* 248 */ "EVERY",
+ /* 249 */ "ORDER",
+ /* 250 */ "SLIMIT",
+ /* 251 */ "SOFFSET",
+ /* 252 */ "LIMIT",
+ /* 253 */ "OFFSET",
+ /* 254 */ "ASC",
+ /* 255 */ "NULLS",
+ /* 256 */ "ABORT",
+ /* 257 */ "AFTER",
+ /* 258 */ "ATTACH",
+ /* 259 */ "BEFORE",
+ /* 260 */ "BEGIN",
+ /* 261 */ "BITAND",
+ /* 262 */ "BITNOT",
+ /* 263 */ "BITOR",
+ /* 264 */ "BLOCKS",
+ /* 265 */ "CHANGE",
+ /* 266 */ "COMMA",
+ /* 267 */ "COMPACT",
+ /* 268 */ "CONCAT",
+ /* 269 */ "CONFLICT",
+ /* 270 */ "COPY",
+ /* 271 */ "DEFERRED",
+ /* 272 */ "DELIMITERS",
+ /* 273 */ "DETACH",
+ /* 274 */ "DIVIDE",
+ /* 275 */ "DOT",
+ /* 276 */ "EACH",
+ /* 277 */ "END",
+ /* 278 */ "FAIL",
+ /* 279 */ "FILE",
+ /* 280 */ "FOR",
+ /* 281 */ "GLOB",
+ /* 282 */ "ID",
+ /* 283 */ "IMMEDIATE",
+ /* 284 */ "IMPORT",
+ /* 285 */ "INITIALLY",
+ /* 286 */ "INSTEAD",
+ /* 287 */ "ISNULL",
+ /* 288 */ "KEY",
+ /* 289 */ "NK_BITNOT",
+ /* 290 */ "NK_SEMI",
+ /* 291 */ "NOTNULL",
+ /* 292 */ "OF",
+ /* 293 */ "PLUS",
+ /* 294 */ "PRIVILEGE",
+ /* 295 */ "RAISE",
+ /* 296 */ "REPLACE",
+ /* 297 */ "RESTRICT",
+ /* 298 */ "ROW",
+ /* 299 */ "SEMI",
+ /* 300 */ "STAR",
+ /* 301 */ "STATEMENT",
+ /* 302 */ "STRING",
+ /* 303 */ "TIMES",
+ /* 304 */ "UPDATE",
+ /* 305 */ "VALUES",
+ /* 306 */ "VARIABLE",
+ /* 307 */ "VIEW",
+ /* 308 */ "WAL",
+ /* 309 */ "cmd",
+ /* 310 */ "account_options",
+ /* 311 */ "alter_account_options",
+ /* 312 */ "literal",
+ /* 313 */ "alter_account_option",
+ /* 314 */ "user_name",
+ /* 315 */ "sysinfo_opt",
+ /* 316 */ "privileges",
+ /* 317 */ "priv_level",
+ /* 318 */ "priv_type_list",
+ /* 319 */ "priv_type",
+ /* 320 */ "db_name",
+ /* 321 */ "dnode_endpoint",
+ /* 322 */ "not_exists_opt",
+ /* 323 */ "db_options",
+ /* 324 */ "exists_opt",
+ /* 325 */ "alter_db_options",
+ /* 326 */ "integer_list",
+ /* 327 */ "variable_list",
+ /* 328 */ "retention_list",
+ /* 329 */ "alter_db_option",
+ /* 330 */ "retention",
+ /* 331 */ "full_table_name",
+ /* 332 */ "column_def_list",
+ /* 333 */ "tags_def_opt",
+ /* 334 */ "table_options",
+ /* 335 */ "multi_create_clause",
+ /* 336 */ "tags_def",
+ /* 337 */ "multi_drop_clause",
+ /* 338 */ "alter_table_clause",
+ /* 339 */ "alter_table_options",
+ /* 340 */ "column_name",
+ /* 341 */ "type_name",
+ /* 342 */ "signed_literal",
+ /* 343 */ "create_subtable_clause",
+ /* 344 */ "specific_cols_opt",
+ /* 345 */ "expression_list",
+ /* 346 */ "drop_table_clause",
+ /* 347 */ "col_name_list",
+ /* 348 */ "table_name",
+ /* 349 */ "column_def",
+ /* 350 */ "duration_list",
+ /* 351 */ "rollup_func_list",
+ /* 352 */ "alter_table_option",
+ /* 353 */ "duration_literal",
+ /* 354 */ "rollup_func_name",
+ /* 355 */ "function_name",
+ /* 356 */ "col_name",
+ /* 357 */ "db_name_cond_opt",
+ /* 358 */ "like_pattern_opt",
+ /* 359 */ "table_name_cond",
+ /* 360 */ "from_db_opt",
+ /* 361 */ "index_options",
+ /* 362 */ "func_list",
+ /* 363 */ "sliding_opt",
+ /* 364 */ "sma_stream_opt",
+ /* 365 */ "func",
+ /* 366 */ "stream_options",
+ /* 367 */ "topic_name",
+ /* 368 */ "query_expression",
+ /* 369 */ "cgroup_name",
+ /* 370 */ "analyze_opt",
+ /* 371 */ "explain_options",
+ /* 372 */ "agg_func_opt",
+ /* 373 */ "bufsize_opt",
+ /* 374 */ "stream_name",
+ /* 375 */ "dnode_list",
+ /* 376 */ "where_clause_opt",
+ /* 377 */ "signed",
+ /* 378 */ "literal_func",
+ /* 379 */ "literal_list",
+ /* 380 */ "table_alias",
+ /* 381 */ "column_alias",
+ /* 382 */ "expression",
+ /* 383 */ "pseudo_column",
+ /* 384 */ "column_reference",
+ /* 385 */ "function_expression",
+ /* 386 */ "subquery",
+ /* 387 */ "star_func",
+ /* 388 */ "star_func_para_list",
+ /* 389 */ "noarg_func",
+ /* 390 */ "other_para_list",
+ /* 391 */ "star_func_para",
+ /* 392 */ "predicate",
+ /* 393 */ "compare_op",
+ /* 394 */ "in_op",
+ /* 395 */ "in_predicate_value",
+ /* 396 */ "boolean_value_expression",
+ /* 397 */ "boolean_primary",
+ /* 398 */ "common_expression",
+ /* 399 */ "from_clause_opt",
+ /* 400 */ "table_reference_list",
+ /* 401 */ "table_reference",
+ /* 402 */ "table_primary",
+ /* 403 */ "joined_table",
+ /* 404 */ "alias_opt",
+ /* 405 */ "parenthesized_joined_table",
+ /* 406 */ "join_type",
+ /* 407 */ "search_condition",
+ /* 408 */ "query_specification",
+ /* 409 */ "set_quantifier_opt",
+ /* 410 */ "select_list",
+ /* 411 */ "partition_by_clause_opt",
+ /* 412 */ "range_opt",
+ /* 413 */ "every_opt",
+ /* 414 */ "fill_opt",
+ /* 415 */ "twindow_clause_opt",
+ /* 416 */ "group_by_clause_opt",
+ /* 417 */ "having_clause_opt",
+ /* 418 */ "select_item",
+ /* 419 */ "fill_mode",
+ /* 420 */ "group_by_list",
+ /* 421 */ "query_expression_body",
+ /* 422 */ "order_by_clause_opt",
+ /* 423 */ "slimit_clause_opt",
+ /* 424 */ "limit_clause_opt",
+ /* 425 */ "query_primary",
+ /* 426 */ "sort_specification_list",
+ /* 427 */ "sort_specification",
+ /* 428 */ "ordering_specification_opt",
+ /* 429 */ "null_ordering_opt",
};
#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
@@ -1834,412 +1837,417 @@ static const char *const yyRuleName[] = {
/* 82 */ "db_options ::= db_options KEEP variable_list",
/* 83 */ "db_options ::= db_options PAGES NK_INTEGER",
/* 84 */ "db_options ::= db_options PAGESIZE NK_INTEGER",
- /* 85 */ "db_options ::= db_options PRECISION NK_STRING",
- /* 86 */ "db_options ::= db_options REPLICA NK_INTEGER",
- /* 87 */ "db_options ::= db_options STRICT NK_STRING",
- /* 88 */ "db_options ::= db_options VGROUPS NK_INTEGER",
- /* 89 */ "db_options ::= db_options SINGLE_STABLE NK_INTEGER",
- /* 90 */ "db_options ::= db_options RETENTIONS retention_list",
- /* 91 */ "db_options ::= db_options SCHEMALESS NK_INTEGER",
- /* 92 */ "db_options ::= db_options WAL_LEVEL NK_INTEGER",
- /* 93 */ "db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER",
- /* 94 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER",
- /* 95 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER",
- /* 96 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER",
- /* 97 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER",
- /* 98 */ "db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER",
- /* 99 */ "db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER",
- /* 100 */ "alter_db_options ::= alter_db_option",
- /* 101 */ "alter_db_options ::= alter_db_options alter_db_option",
- /* 102 */ "alter_db_option ::= CACHEMODEL NK_STRING",
- /* 103 */ "alter_db_option ::= CACHESIZE NK_INTEGER",
- /* 104 */ "alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER",
- /* 105 */ "alter_db_option ::= KEEP integer_list",
- /* 106 */ "alter_db_option ::= KEEP variable_list",
- /* 107 */ "alter_db_option ::= WAL_LEVEL NK_INTEGER",
- /* 108 */ "integer_list ::= NK_INTEGER",
- /* 109 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER",
- /* 110 */ "variable_list ::= NK_VARIABLE",
- /* 111 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE",
- /* 112 */ "retention_list ::= retention",
- /* 113 */ "retention_list ::= retention_list NK_COMMA retention",
- /* 114 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE",
- /* 115 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options",
- /* 116 */ "cmd ::= CREATE TABLE multi_create_clause",
- /* 117 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options",
- /* 118 */ "cmd ::= DROP TABLE multi_drop_clause",
- /* 119 */ "cmd ::= DROP STABLE exists_opt full_table_name",
- /* 120 */ "cmd ::= ALTER TABLE alter_table_clause",
- /* 121 */ "cmd ::= ALTER STABLE alter_table_clause",
- /* 122 */ "alter_table_clause ::= full_table_name alter_table_options",
- /* 123 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name",
- /* 124 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name",
- /* 125 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name",
- /* 126 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name",
- /* 127 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name",
- /* 128 */ "alter_table_clause ::= full_table_name DROP TAG column_name",
- /* 129 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name",
- /* 130 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name",
- /* 131 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal",
- /* 132 */ "multi_create_clause ::= create_subtable_clause",
- /* 133 */ "multi_create_clause ::= multi_create_clause create_subtable_clause",
- /* 134 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options",
- /* 135 */ "multi_drop_clause ::= drop_table_clause",
- /* 136 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause",
- /* 137 */ "drop_table_clause ::= exists_opt full_table_name",
- /* 138 */ "specific_cols_opt ::=",
- /* 139 */ "specific_cols_opt ::= NK_LP col_name_list NK_RP",
- /* 140 */ "full_table_name ::= table_name",
- /* 141 */ "full_table_name ::= db_name NK_DOT table_name",
- /* 142 */ "column_def_list ::= column_def",
- /* 143 */ "column_def_list ::= column_def_list NK_COMMA column_def",
- /* 144 */ "column_def ::= column_name type_name",
- /* 145 */ "column_def ::= column_name type_name COMMENT NK_STRING",
- /* 146 */ "type_name ::= BOOL",
- /* 147 */ "type_name ::= TINYINT",
- /* 148 */ "type_name ::= SMALLINT",
- /* 149 */ "type_name ::= INT",
- /* 150 */ "type_name ::= INTEGER",
- /* 151 */ "type_name ::= BIGINT",
- /* 152 */ "type_name ::= FLOAT",
- /* 153 */ "type_name ::= DOUBLE",
- /* 154 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP",
- /* 155 */ "type_name ::= TIMESTAMP",
- /* 156 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP",
- /* 157 */ "type_name ::= TINYINT UNSIGNED",
- /* 158 */ "type_name ::= SMALLINT UNSIGNED",
- /* 159 */ "type_name ::= INT UNSIGNED",
- /* 160 */ "type_name ::= BIGINT UNSIGNED",
- /* 161 */ "type_name ::= JSON",
- /* 162 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP",
- /* 163 */ "type_name ::= MEDIUMBLOB",
- /* 164 */ "type_name ::= BLOB",
- /* 165 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP",
- /* 166 */ "type_name ::= DECIMAL",
- /* 167 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP",
- /* 168 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP",
- /* 169 */ "tags_def_opt ::=",
- /* 170 */ "tags_def_opt ::= tags_def",
- /* 171 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP",
- /* 172 */ "table_options ::=",
- /* 173 */ "table_options ::= table_options COMMENT NK_STRING",
- /* 174 */ "table_options ::= table_options MAX_DELAY duration_list",
- /* 175 */ "table_options ::= table_options WATERMARK duration_list",
- /* 176 */ "table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP",
- /* 177 */ "table_options ::= table_options TTL NK_INTEGER",
- /* 178 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP",
- /* 179 */ "alter_table_options ::= alter_table_option",
- /* 180 */ "alter_table_options ::= alter_table_options alter_table_option",
- /* 181 */ "alter_table_option ::= COMMENT NK_STRING",
- /* 182 */ "alter_table_option ::= TTL NK_INTEGER",
- /* 183 */ "duration_list ::= duration_literal",
- /* 184 */ "duration_list ::= duration_list NK_COMMA duration_literal",
- /* 185 */ "rollup_func_list ::= rollup_func_name",
- /* 186 */ "rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name",
- /* 187 */ "rollup_func_name ::= function_name",
- /* 188 */ "rollup_func_name ::= FIRST",
- /* 189 */ "rollup_func_name ::= LAST",
- /* 190 */ "col_name_list ::= col_name",
- /* 191 */ "col_name_list ::= col_name_list NK_COMMA col_name",
- /* 192 */ "col_name ::= column_name",
- /* 193 */ "cmd ::= SHOW DNODES",
- /* 194 */ "cmd ::= SHOW USERS",
- /* 195 */ "cmd ::= SHOW DATABASES",
- /* 196 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt",
- /* 197 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt",
- /* 198 */ "cmd ::= SHOW db_name_cond_opt VGROUPS",
- /* 199 */ "cmd ::= SHOW MNODES",
- /* 200 */ "cmd ::= SHOW MODULES",
- /* 201 */ "cmd ::= SHOW QNODES",
- /* 202 */ "cmd ::= SHOW FUNCTIONS",
- /* 203 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt",
- /* 204 */ "cmd ::= SHOW STREAMS",
- /* 205 */ "cmd ::= SHOW ACCOUNTS",
- /* 206 */ "cmd ::= SHOW APPS",
- /* 207 */ "cmd ::= SHOW CONNECTIONS",
- /* 208 */ "cmd ::= SHOW LICENCES",
- /* 209 */ "cmd ::= SHOW GRANTS",
- /* 210 */ "cmd ::= SHOW CREATE DATABASE db_name",
- /* 211 */ "cmd ::= SHOW CREATE TABLE full_table_name",
- /* 212 */ "cmd ::= SHOW CREATE STABLE full_table_name",
- /* 213 */ "cmd ::= SHOW QUERIES",
- /* 214 */ "cmd ::= SHOW SCORES",
- /* 215 */ "cmd ::= SHOW TOPICS",
- /* 216 */ "cmd ::= SHOW VARIABLES",
- /* 217 */ "cmd ::= SHOW LOCAL VARIABLES",
- /* 218 */ "cmd ::= SHOW DNODE NK_INTEGER VARIABLES",
- /* 219 */ "cmd ::= SHOW BNODES",
- /* 220 */ "cmd ::= SHOW SNODES",
- /* 221 */ "cmd ::= SHOW CLUSTER",
- /* 222 */ "cmd ::= SHOW TRANSACTIONS",
- /* 223 */ "cmd ::= SHOW TABLE DISTRIBUTED full_table_name",
- /* 224 */ "cmd ::= SHOW CONSUMERS",
- /* 225 */ "cmd ::= SHOW SUBSCRIPTIONS",
- /* 226 */ "cmd ::= SHOW TAGS FROM table_name_cond from_db_opt",
- /* 227 */ "db_name_cond_opt ::=",
- /* 228 */ "db_name_cond_opt ::= db_name NK_DOT",
- /* 229 */ "like_pattern_opt ::=",
- /* 230 */ "like_pattern_opt ::= LIKE NK_STRING",
- /* 231 */ "table_name_cond ::= table_name",
- /* 232 */ "from_db_opt ::=",
- /* 233 */ "from_db_opt ::= FROM db_name",
- /* 234 */ "cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options",
- /* 235 */ "cmd ::= DROP INDEX exists_opt full_table_name",
- /* 236 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt",
- /* 237 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt",
- /* 238 */ "func_list ::= func",
- /* 239 */ "func_list ::= func_list NK_COMMA func",
- /* 240 */ "func ::= function_name NK_LP expression_list NK_RP",
- /* 241 */ "sma_stream_opt ::=",
- /* 242 */ "sma_stream_opt ::= stream_options WATERMARK duration_literal",
- /* 243 */ "sma_stream_opt ::= stream_options MAX_DELAY duration_literal",
- /* 244 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression",
- /* 245 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name",
- /* 246 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name",
- /* 247 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name",
- /* 248 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name",
- /* 249 */ "cmd ::= DROP TOPIC exists_opt topic_name",
- /* 250 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name",
- /* 251 */ "cmd ::= DESC full_table_name",
- /* 252 */ "cmd ::= DESCRIBE full_table_name",
- /* 253 */ "cmd ::= RESET QUERY CACHE",
- /* 254 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression",
- /* 255 */ "analyze_opt ::=",
- /* 256 */ "analyze_opt ::= ANALYZE",
- /* 257 */ "explain_options ::=",
- /* 258 */ "explain_options ::= explain_options VERBOSE NK_BOOL",
- /* 259 */ "explain_options ::= explain_options RATIO NK_FLOAT",
- /* 260 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt",
- /* 261 */ "cmd ::= DROP FUNCTION exists_opt function_name",
- /* 262 */ "agg_func_opt ::=",
- /* 263 */ "agg_func_opt ::= AGGREGATE",
- /* 264 */ "bufsize_opt ::=",
- /* 265 */ "bufsize_opt ::= BUFSIZE NK_INTEGER",
- /* 266 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression",
- /* 267 */ "cmd ::= DROP STREAM exists_opt stream_name",
- /* 268 */ "into_opt ::=",
- /* 269 */ "into_opt ::= INTO full_table_name",
- /* 270 */ "stream_options ::=",
- /* 271 */ "stream_options ::= stream_options TRIGGER AT_ONCE",
- /* 272 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE",
- /* 273 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal",
- /* 274 */ "stream_options ::= stream_options WATERMARK duration_literal",
- /* 275 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER",
- /* 276 */ "cmd ::= KILL CONNECTION NK_INTEGER",
- /* 277 */ "cmd ::= KILL QUERY NK_STRING",
- /* 278 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
- /* 279 */ "cmd ::= BALANCE VGROUP",
- /* 280 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
- /* 281 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list",
- /* 282 */ "cmd ::= SPLIT VGROUP NK_INTEGER",
- /* 283 */ "dnode_list ::= DNODE NK_INTEGER",
- /* 284 */ "dnode_list ::= dnode_list DNODE NK_INTEGER",
- /* 285 */ "cmd ::= DELETE FROM full_table_name where_clause_opt",
- /* 286 */ "cmd ::= query_expression",
- /* 287 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression",
- /* 288 */ "cmd ::= INSERT INTO full_table_name query_expression",
- /* 289 */ "literal ::= NK_INTEGER",
- /* 290 */ "literal ::= NK_FLOAT",
- /* 291 */ "literal ::= NK_STRING",
- /* 292 */ "literal ::= NK_BOOL",
- /* 293 */ "literal ::= TIMESTAMP NK_STRING",
- /* 294 */ "literal ::= duration_literal",
- /* 295 */ "literal ::= NULL",
- /* 296 */ "literal ::= NK_QUESTION",
- /* 297 */ "duration_literal ::= NK_VARIABLE",
- /* 298 */ "signed ::= NK_INTEGER",
- /* 299 */ "signed ::= NK_PLUS NK_INTEGER",
- /* 300 */ "signed ::= NK_MINUS NK_INTEGER",
- /* 301 */ "signed ::= NK_FLOAT",
- /* 302 */ "signed ::= NK_PLUS NK_FLOAT",
- /* 303 */ "signed ::= NK_MINUS NK_FLOAT",
- /* 304 */ "signed_literal ::= signed",
- /* 305 */ "signed_literal ::= NK_STRING",
- /* 306 */ "signed_literal ::= NK_BOOL",
- /* 307 */ "signed_literal ::= TIMESTAMP NK_STRING",
- /* 308 */ "signed_literal ::= duration_literal",
- /* 309 */ "signed_literal ::= NULL",
- /* 310 */ "signed_literal ::= literal_func",
- /* 311 */ "signed_literal ::= NK_QUESTION",
- /* 312 */ "literal_list ::= signed_literal",
- /* 313 */ "literal_list ::= literal_list NK_COMMA signed_literal",
- /* 314 */ "db_name ::= NK_ID",
- /* 315 */ "table_name ::= NK_ID",
- /* 316 */ "column_name ::= NK_ID",
- /* 317 */ "function_name ::= NK_ID",
- /* 318 */ "table_alias ::= NK_ID",
- /* 319 */ "column_alias ::= NK_ID",
- /* 320 */ "user_name ::= NK_ID",
- /* 321 */ "topic_name ::= NK_ID",
- /* 322 */ "stream_name ::= NK_ID",
- /* 323 */ "cgroup_name ::= NK_ID",
- /* 324 */ "expression ::= literal",
- /* 325 */ "expression ::= pseudo_column",
- /* 326 */ "expression ::= column_reference",
- /* 327 */ "expression ::= function_expression",
- /* 328 */ "expression ::= subquery",
- /* 329 */ "expression ::= NK_LP expression NK_RP",
- /* 330 */ "expression ::= NK_PLUS expression",
- /* 331 */ "expression ::= NK_MINUS expression",
- /* 332 */ "expression ::= expression NK_PLUS expression",
- /* 333 */ "expression ::= expression NK_MINUS expression",
- /* 334 */ "expression ::= expression NK_STAR expression",
- /* 335 */ "expression ::= expression NK_SLASH expression",
- /* 336 */ "expression ::= expression NK_REM expression",
- /* 337 */ "expression ::= column_reference NK_ARROW NK_STRING",
- /* 338 */ "expression ::= expression NK_BITAND expression",
- /* 339 */ "expression ::= expression NK_BITOR expression",
- /* 340 */ "expression_list ::= expression",
- /* 341 */ "expression_list ::= expression_list NK_COMMA expression",
- /* 342 */ "column_reference ::= column_name",
- /* 343 */ "column_reference ::= table_name NK_DOT column_name",
- /* 344 */ "pseudo_column ::= ROWTS",
- /* 345 */ "pseudo_column ::= TBNAME",
- /* 346 */ "pseudo_column ::= table_name NK_DOT TBNAME",
- /* 347 */ "pseudo_column ::= QSTART",
- /* 348 */ "pseudo_column ::= QEND",
- /* 349 */ "pseudo_column ::= QDURATION",
- /* 350 */ "pseudo_column ::= WSTART",
- /* 351 */ "pseudo_column ::= WEND",
- /* 352 */ "pseudo_column ::= WDURATION",
- /* 353 */ "function_expression ::= function_name NK_LP expression_list NK_RP",
- /* 354 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP",
- /* 355 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP",
- /* 356 */ "function_expression ::= literal_func",
- /* 357 */ "literal_func ::= noarg_func NK_LP NK_RP",
- /* 358 */ "literal_func ::= NOW",
- /* 359 */ "noarg_func ::= NOW",
- /* 360 */ "noarg_func ::= TODAY",
- /* 361 */ "noarg_func ::= TIMEZONE",
- /* 362 */ "noarg_func ::= DATABASE",
- /* 363 */ "noarg_func ::= CLIENT_VERSION",
- /* 364 */ "noarg_func ::= SERVER_VERSION",
- /* 365 */ "noarg_func ::= SERVER_STATUS",
- /* 366 */ "noarg_func ::= CURRENT_USER",
- /* 367 */ "noarg_func ::= USER",
- /* 368 */ "star_func ::= COUNT",
- /* 369 */ "star_func ::= FIRST",
- /* 370 */ "star_func ::= LAST",
- /* 371 */ "star_func ::= LAST_ROW",
- /* 372 */ "star_func_para_list ::= NK_STAR",
- /* 373 */ "star_func_para_list ::= other_para_list",
- /* 374 */ "other_para_list ::= star_func_para",
- /* 375 */ "other_para_list ::= other_para_list NK_COMMA star_func_para",
- /* 376 */ "star_func_para ::= expression",
- /* 377 */ "star_func_para ::= table_name NK_DOT NK_STAR",
- /* 378 */ "predicate ::= expression compare_op expression",
- /* 379 */ "predicate ::= expression BETWEEN expression AND expression",
- /* 380 */ "predicate ::= expression NOT BETWEEN expression AND expression",
- /* 381 */ "predicate ::= expression IS NULL",
- /* 382 */ "predicate ::= expression IS NOT NULL",
- /* 383 */ "predicate ::= expression in_op in_predicate_value",
- /* 384 */ "compare_op ::= NK_LT",
- /* 385 */ "compare_op ::= NK_GT",
- /* 386 */ "compare_op ::= NK_LE",
- /* 387 */ "compare_op ::= NK_GE",
- /* 388 */ "compare_op ::= NK_NE",
- /* 389 */ "compare_op ::= NK_EQ",
- /* 390 */ "compare_op ::= LIKE",
- /* 391 */ "compare_op ::= NOT LIKE",
- /* 392 */ "compare_op ::= MATCH",
- /* 393 */ "compare_op ::= NMATCH",
- /* 394 */ "compare_op ::= CONTAINS",
- /* 395 */ "in_op ::= IN",
- /* 396 */ "in_op ::= NOT IN",
- /* 397 */ "in_predicate_value ::= NK_LP literal_list NK_RP",
- /* 398 */ "boolean_value_expression ::= boolean_primary",
- /* 399 */ "boolean_value_expression ::= NOT boolean_primary",
- /* 400 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression",
- /* 401 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression",
- /* 402 */ "boolean_primary ::= predicate",
- /* 403 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP",
- /* 404 */ "common_expression ::= expression",
- /* 405 */ "common_expression ::= boolean_value_expression",
- /* 406 */ "from_clause_opt ::=",
- /* 407 */ "from_clause_opt ::= FROM table_reference_list",
- /* 408 */ "table_reference_list ::= table_reference",
- /* 409 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference",
- /* 410 */ "table_reference ::= table_primary",
- /* 411 */ "table_reference ::= joined_table",
- /* 412 */ "table_primary ::= table_name alias_opt",
- /* 413 */ "table_primary ::= db_name NK_DOT table_name alias_opt",
- /* 414 */ "table_primary ::= subquery alias_opt",
- /* 415 */ "table_primary ::= parenthesized_joined_table",
- /* 416 */ "alias_opt ::=",
- /* 417 */ "alias_opt ::= table_alias",
- /* 418 */ "alias_opt ::= AS table_alias",
- /* 419 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP",
- /* 420 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP",
- /* 421 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition",
- /* 422 */ "join_type ::=",
- /* 423 */ "join_type ::= INNER",
- /* 424 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt",
- /* 425 */ "set_quantifier_opt ::=",
- /* 426 */ "set_quantifier_opt ::= DISTINCT",
- /* 427 */ "set_quantifier_opt ::= ALL",
- /* 428 */ "select_list ::= select_item",
- /* 429 */ "select_list ::= select_list NK_COMMA select_item",
- /* 430 */ "select_item ::= NK_STAR",
- /* 431 */ "select_item ::= common_expression",
- /* 432 */ "select_item ::= common_expression column_alias",
- /* 433 */ "select_item ::= common_expression AS column_alias",
- /* 434 */ "select_item ::= table_name NK_DOT NK_STAR",
- /* 435 */ "where_clause_opt ::=",
- /* 436 */ "where_clause_opt ::= WHERE search_condition",
- /* 437 */ "partition_by_clause_opt ::=",
- /* 438 */ "partition_by_clause_opt ::= PARTITION BY expression_list",
- /* 439 */ "twindow_clause_opt ::=",
- /* 440 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP",
- /* 441 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP",
- /* 442 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt",
- /* 443 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt",
- /* 444 */ "sliding_opt ::=",
- /* 445 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP",
- /* 446 */ "fill_opt ::=",
- /* 447 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP",
- /* 448 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP",
- /* 449 */ "fill_mode ::= NONE",
- /* 450 */ "fill_mode ::= PREV",
- /* 451 */ "fill_mode ::= NULL",
- /* 452 */ "fill_mode ::= LINEAR",
- /* 453 */ "fill_mode ::= NEXT",
- /* 454 */ "group_by_clause_opt ::=",
- /* 455 */ "group_by_clause_opt ::= GROUP BY group_by_list",
- /* 456 */ "group_by_list ::= expression",
- /* 457 */ "group_by_list ::= group_by_list NK_COMMA expression",
- /* 458 */ "having_clause_opt ::=",
- /* 459 */ "having_clause_opt ::= HAVING search_condition",
- /* 460 */ "range_opt ::=",
- /* 461 */ "range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP",
- /* 462 */ "every_opt ::=",
- /* 463 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP",
- /* 464 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt",
- /* 465 */ "query_expression_body ::= query_primary",
- /* 466 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body",
- /* 467 */ "query_expression_body ::= query_expression_body UNION query_expression_body",
- /* 468 */ "query_primary ::= query_specification",
- /* 469 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP",
- /* 470 */ "order_by_clause_opt ::=",
- /* 471 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
- /* 472 */ "slimit_clause_opt ::=",
- /* 473 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
- /* 474 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
- /* 475 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 476 */ "limit_clause_opt ::=",
- /* 477 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
- /* 478 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
- /* 479 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 480 */ "subquery ::= NK_LP query_expression NK_RP",
- /* 481 */ "search_condition ::= common_expression",
- /* 482 */ "sort_specification_list ::= sort_specification",
- /* 483 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
- /* 484 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt",
- /* 485 */ "ordering_specification_opt ::=",
- /* 486 */ "ordering_specification_opt ::= ASC",
- /* 487 */ "ordering_specification_opt ::= DESC",
- /* 488 */ "null_ordering_opt ::=",
- /* 489 */ "null_ordering_opt ::= NULLS FIRST",
- /* 490 */ "null_ordering_opt ::= NULLS LAST",
+ /* 85 */ "db_options ::= db_options TSDB_PAGESIZE NK_INTEGER",
+ /* 86 */ "db_options ::= db_options PRECISION NK_STRING",
+ /* 87 */ "db_options ::= db_options REPLICA NK_INTEGER",
+ /* 88 */ "db_options ::= db_options STRICT NK_STRING",
+ /* 89 */ "db_options ::= db_options VGROUPS NK_INTEGER",
+ /* 90 */ "db_options ::= db_options SINGLE_STABLE NK_INTEGER",
+ /* 91 */ "db_options ::= db_options RETENTIONS retention_list",
+ /* 92 */ "db_options ::= db_options SCHEMALESS NK_INTEGER",
+ /* 93 */ "db_options ::= db_options WAL_LEVEL NK_INTEGER",
+ /* 94 */ "db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER",
+ /* 95 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER",
+ /* 96 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER",
+ /* 97 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER",
+ /* 98 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER",
+ /* 99 */ "db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER",
+ /* 100 */ "db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER",
+ /* 101 */ "db_options ::= db_options STT_TRIGGER NK_INTEGER",
+ /* 102 */ "db_options ::= db_options TABLE_PREFIX NK_INTEGER",
+ /* 103 */ "db_options ::= db_options TABLE_SUFFIX NK_INTEGER",
+ /* 104 */ "alter_db_options ::= alter_db_option",
+ /* 105 */ "alter_db_options ::= alter_db_options alter_db_option",
+ /* 106 */ "alter_db_option ::= CACHEMODEL NK_STRING",
+ /* 107 */ "alter_db_option ::= CACHESIZE NK_INTEGER",
+ /* 108 */ "alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER",
+ /* 109 */ "alter_db_option ::= KEEP integer_list",
+ /* 110 */ "alter_db_option ::= KEEP variable_list",
+ /* 111 */ "alter_db_option ::= WAL_LEVEL NK_INTEGER",
+ /* 112 */ "alter_db_option ::= STT_TRIGGER NK_INTEGER",
+ /* 113 */ "integer_list ::= NK_INTEGER",
+ /* 114 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER",
+ /* 115 */ "variable_list ::= NK_VARIABLE",
+ /* 116 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE",
+ /* 117 */ "retention_list ::= retention",
+ /* 118 */ "retention_list ::= retention_list NK_COMMA retention",
+ /* 119 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE",
+ /* 120 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options",
+ /* 121 */ "cmd ::= CREATE TABLE multi_create_clause",
+ /* 122 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options",
+ /* 123 */ "cmd ::= DROP TABLE multi_drop_clause",
+ /* 124 */ "cmd ::= DROP STABLE exists_opt full_table_name",
+ /* 125 */ "cmd ::= ALTER TABLE alter_table_clause",
+ /* 126 */ "cmd ::= ALTER STABLE alter_table_clause",
+ /* 127 */ "alter_table_clause ::= full_table_name alter_table_options",
+ /* 128 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name",
+ /* 129 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name",
+ /* 130 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name",
+ /* 131 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name",
+ /* 132 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name",
+ /* 133 */ "alter_table_clause ::= full_table_name DROP TAG column_name",
+ /* 134 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name",
+ /* 135 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name",
+ /* 136 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal",
+ /* 137 */ "multi_create_clause ::= create_subtable_clause",
+ /* 138 */ "multi_create_clause ::= multi_create_clause create_subtable_clause",
+ /* 139 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options",
+ /* 140 */ "multi_drop_clause ::= drop_table_clause",
+ /* 141 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause",
+ /* 142 */ "drop_table_clause ::= exists_opt full_table_name",
+ /* 143 */ "specific_cols_opt ::=",
+ /* 144 */ "specific_cols_opt ::= NK_LP col_name_list NK_RP",
+ /* 145 */ "full_table_name ::= table_name",
+ /* 146 */ "full_table_name ::= db_name NK_DOT table_name",
+ /* 147 */ "column_def_list ::= column_def",
+ /* 148 */ "column_def_list ::= column_def_list NK_COMMA column_def",
+ /* 149 */ "column_def ::= column_name type_name",
+ /* 150 */ "column_def ::= column_name type_name COMMENT NK_STRING",
+ /* 151 */ "type_name ::= BOOL",
+ /* 152 */ "type_name ::= TINYINT",
+ /* 153 */ "type_name ::= SMALLINT",
+ /* 154 */ "type_name ::= INT",
+ /* 155 */ "type_name ::= INTEGER",
+ /* 156 */ "type_name ::= BIGINT",
+ /* 157 */ "type_name ::= FLOAT",
+ /* 158 */ "type_name ::= DOUBLE",
+ /* 159 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP",
+ /* 160 */ "type_name ::= TIMESTAMP",
+ /* 161 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP",
+ /* 162 */ "type_name ::= TINYINT UNSIGNED",
+ /* 163 */ "type_name ::= SMALLINT UNSIGNED",
+ /* 164 */ "type_name ::= INT UNSIGNED",
+ /* 165 */ "type_name ::= BIGINT UNSIGNED",
+ /* 166 */ "type_name ::= JSON",
+ /* 167 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP",
+ /* 168 */ "type_name ::= MEDIUMBLOB",
+ /* 169 */ "type_name ::= BLOB",
+ /* 170 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP",
+ /* 171 */ "type_name ::= DECIMAL",
+ /* 172 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP",
+ /* 173 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP",
+ /* 174 */ "tags_def_opt ::=",
+ /* 175 */ "tags_def_opt ::= tags_def",
+ /* 176 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP",
+ /* 177 */ "table_options ::=",
+ /* 178 */ "table_options ::= table_options COMMENT NK_STRING",
+ /* 179 */ "table_options ::= table_options MAX_DELAY duration_list",
+ /* 180 */ "table_options ::= table_options WATERMARK duration_list",
+ /* 181 */ "table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP",
+ /* 182 */ "table_options ::= table_options TTL NK_INTEGER",
+ /* 183 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP",
+ /* 184 */ "alter_table_options ::= alter_table_option",
+ /* 185 */ "alter_table_options ::= alter_table_options alter_table_option",
+ /* 186 */ "alter_table_option ::= COMMENT NK_STRING",
+ /* 187 */ "alter_table_option ::= TTL NK_INTEGER",
+ /* 188 */ "duration_list ::= duration_literal",
+ /* 189 */ "duration_list ::= duration_list NK_COMMA duration_literal",
+ /* 190 */ "rollup_func_list ::= rollup_func_name",
+ /* 191 */ "rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name",
+ /* 192 */ "rollup_func_name ::= function_name",
+ /* 193 */ "rollup_func_name ::= FIRST",
+ /* 194 */ "rollup_func_name ::= LAST",
+ /* 195 */ "col_name_list ::= col_name",
+ /* 196 */ "col_name_list ::= col_name_list NK_COMMA col_name",
+ /* 197 */ "col_name ::= column_name",
+ /* 198 */ "cmd ::= SHOW DNODES",
+ /* 199 */ "cmd ::= SHOW USERS",
+ /* 200 */ "cmd ::= SHOW DATABASES",
+ /* 201 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt",
+ /* 202 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt",
+ /* 203 */ "cmd ::= SHOW db_name_cond_opt VGROUPS",
+ /* 204 */ "cmd ::= SHOW MNODES",
+ /* 205 */ "cmd ::= SHOW MODULES",
+ /* 206 */ "cmd ::= SHOW QNODES",
+ /* 207 */ "cmd ::= SHOW FUNCTIONS",
+ /* 208 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt",
+ /* 209 */ "cmd ::= SHOW STREAMS",
+ /* 210 */ "cmd ::= SHOW ACCOUNTS",
+ /* 211 */ "cmd ::= SHOW APPS",
+ /* 212 */ "cmd ::= SHOW CONNECTIONS",
+ /* 213 */ "cmd ::= SHOW LICENCES",
+ /* 214 */ "cmd ::= SHOW GRANTS",
+ /* 215 */ "cmd ::= SHOW CREATE DATABASE db_name",
+ /* 216 */ "cmd ::= SHOW CREATE TABLE full_table_name",
+ /* 217 */ "cmd ::= SHOW CREATE STABLE full_table_name",
+ /* 218 */ "cmd ::= SHOW QUERIES",
+ /* 219 */ "cmd ::= SHOW SCORES",
+ /* 220 */ "cmd ::= SHOW TOPICS",
+ /* 221 */ "cmd ::= SHOW VARIABLES",
+ /* 222 */ "cmd ::= SHOW LOCAL VARIABLES",
+ /* 223 */ "cmd ::= SHOW DNODE NK_INTEGER VARIABLES",
+ /* 224 */ "cmd ::= SHOW BNODES",
+ /* 225 */ "cmd ::= SHOW SNODES",
+ /* 226 */ "cmd ::= SHOW CLUSTER",
+ /* 227 */ "cmd ::= SHOW TRANSACTIONS",
+ /* 228 */ "cmd ::= SHOW TABLE DISTRIBUTED full_table_name",
+ /* 229 */ "cmd ::= SHOW CONSUMERS",
+ /* 230 */ "cmd ::= SHOW SUBSCRIPTIONS",
+ /* 231 */ "cmd ::= SHOW TAGS FROM table_name_cond from_db_opt",
+ /* 232 */ "cmd ::= SHOW VNODES NK_INTEGER",
+ /* 233 */ "cmd ::= SHOW VNODES NK_STRING",
+ /* 234 */ "db_name_cond_opt ::=",
+ /* 235 */ "db_name_cond_opt ::= db_name NK_DOT",
+ /* 236 */ "like_pattern_opt ::=",
+ /* 237 */ "like_pattern_opt ::= LIKE NK_STRING",
+ /* 238 */ "table_name_cond ::= table_name",
+ /* 239 */ "from_db_opt ::=",
+ /* 240 */ "from_db_opt ::= FROM db_name",
+ /* 241 */ "cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options",
+ /* 242 */ "cmd ::= DROP INDEX exists_opt full_table_name",
+ /* 243 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt",
+ /* 244 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt",
+ /* 245 */ "func_list ::= func",
+ /* 246 */ "func_list ::= func_list NK_COMMA func",
+ /* 247 */ "func ::= function_name NK_LP expression_list NK_RP",
+ /* 248 */ "sma_stream_opt ::=",
+ /* 249 */ "sma_stream_opt ::= stream_options WATERMARK duration_literal",
+ /* 250 */ "sma_stream_opt ::= stream_options MAX_DELAY duration_literal",
+ /* 251 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression",
+ /* 252 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name",
+ /* 253 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name",
+ /* 254 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name",
+ /* 255 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name",
+ /* 256 */ "cmd ::= DROP TOPIC exists_opt topic_name",
+ /* 257 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name",
+ /* 258 */ "cmd ::= DESC full_table_name",
+ /* 259 */ "cmd ::= DESCRIBE full_table_name",
+ /* 260 */ "cmd ::= RESET QUERY CACHE",
+ /* 261 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression",
+ /* 262 */ "analyze_opt ::=",
+ /* 263 */ "analyze_opt ::= ANALYZE",
+ /* 264 */ "explain_options ::=",
+ /* 265 */ "explain_options ::= explain_options VERBOSE NK_BOOL",
+ /* 266 */ "explain_options ::= explain_options RATIO NK_FLOAT",
+ /* 267 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt",
+ /* 268 */ "cmd ::= DROP FUNCTION exists_opt function_name",
+ /* 269 */ "agg_func_opt ::=",
+ /* 270 */ "agg_func_opt ::= AGGREGATE",
+ /* 271 */ "bufsize_opt ::=",
+ /* 272 */ "bufsize_opt ::= BUFSIZE NK_INTEGER",
+ /* 273 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression",
+ /* 274 */ "cmd ::= DROP STREAM exists_opt stream_name",
+ /* 275 */ "stream_options ::=",
+ /* 276 */ "stream_options ::= stream_options TRIGGER AT_ONCE",
+ /* 277 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE",
+ /* 278 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal",
+ /* 279 */ "stream_options ::= stream_options WATERMARK duration_literal",
+ /* 280 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER",
+ /* 281 */ "cmd ::= KILL CONNECTION NK_INTEGER",
+ /* 282 */ "cmd ::= KILL QUERY NK_STRING",
+ /* 283 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
+ /* 284 */ "cmd ::= BALANCE VGROUP",
+ /* 285 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
+ /* 286 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list",
+ /* 287 */ "cmd ::= SPLIT VGROUP NK_INTEGER",
+ /* 288 */ "dnode_list ::= DNODE NK_INTEGER",
+ /* 289 */ "dnode_list ::= dnode_list DNODE NK_INTEGER",
+ /* 290 */ "cmd ::= DELETE FROM full_table_name where_clause_opt",
+ /* 291 */ "cmd ::= query_expression",
+ /* 292 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression",
+ /* 293 */ "cmd ::= INSERT INTO full_table_name query_expression",
+ /* 294 */ "literal ::= NK_INTEGER",
+ /* 295 */ "literal ::= NK_FLOAT",
+ /* 296 */ "literal ::= NK_STRING",
+ /* 297 */ "literal ::= NK_BOOL",
+ /* 298 */ "literal ::= TIMESTAMP NK_STRING",
+ /* 299 */ "literal ::= duration_literal",
+ /* 300 */ "literal ::= NULL",
+ /* 301 */ "literal ::= NK_QUESTION",
+ /* 302 */ "duration_literal ::= NK_VARIABLE",
+ /* 303 */ "signed ::= NK_INTEGER",
+ /* 304 */ "signed ::= NK_PLUS NK_INTEGER",
+ /* 305 */ "signed ::= NK_MINUS NK_INTEGER",
+ /* 306 */ "signed ::= NK_FLOAT",
+ /* 307 */ "signed ::= NK_PLUS NK_FLOAT",
+ /* 308 */ "signed ::= NK_MINUS NK_FLOAT",
+ /* 309 */ "signed_literal ::= signed",
+ /* 310 */ "signed_literal ::= NK_STRING",
+ /* 311 */ "signed_literal ::= NK_BOOL",
+ /* 312 */ "signed_literal ::= TIMESTAMP NK_STRING",
+ /* 313 */ "signed_literal ::= duration_literal",
+ /* 314 */ "signed_literal ::= NULL",
+ /* 315 */ "signed_literal ::= literal_func",
+ /* 316 */ "signed_literal ::= NK_QUESTION",
+ /* 317 */ "literal_list ::= signed_literal",
+ /* 318 */ "literal_list ::= literal_list NK_COMMA signed_literal",
+ /* 319 */ "db_name ::= NK_ID",
+ /* 320 */ "table_name ::= NK_ID",
+ /* 321 */ "column_name ::= NK_ID",
+ /* 322 */ "function_name ::= NK_ID",
+ /* 323 */ "table_alias ::= NK_ID",
+ /* 324 */ "column_alias ::= NK_ID",
+ /* 325 */ "user_name ::= NK_ID",
+ /* 326 */ "topic_name ::= NK_ID",
+ /* 327 */ "stream_name ::= NK_ID",
+ /* 328 */ "cgroup_name ::= NK_ID",
+ /* 329 */ "expression ::= literal",
+ /* 330 */ "expression ::= pseudo_column",
+ /* 331 */ "expression ::= column_reference",
+ /* 332 */ "expression ::= function_expression",
+ /* 333 */ "expression ::= subquery",
+ /* 334 */ "expression ::= NK_LP expression NK_RP",
+ /* 335 */ "expression ::= NK_PLUS expression",
+ /* 336 */ "expression ::= NK_MINUS expression",
+ /* 337 */ "expression ::= expression NK_PLUS expression",
+ /* 338 */ "expression ::= expression NK_MINUS expression",
+ /* 339 */ "expression ::= expression NK_STAR expression",
+ /* 340 */ "expression ::= expression NK_SLASH expression",
+ /* 341 */ "expression ::= expression NK_REM expression",
+ /* 342 */ "expression ::= column_reference NK_ARROW NK_STRING",
+ /* 343 */ "expression ::= expression NK_BITAND expression",
+ /* 344 */ "expression ::= expression NK_BITOR expression",
+ /* 345 */ "expression_list ::= expression",
+ /* 346 */ "expression_list ::= expression_list NK_COMMA expression",
+ /* 347 */ "column_reference ::= column_name",
+ /* 348 */ "column_reference ::= table_name NK_DOT column_name",
+ /* 349 */ "pseudo_column ::= ROWTS",
+ /* 350 */ "pseudo_column ::= TBNAME",
+ /* 351 */ "pseudo_column ::= table_name NK_DOT TBNAME",
+ /* 352 */ "pseudo_column ::= QSTART",
+ /* 353 */ "pseudo_column ::= QEND",
+ /* 354 */ "pseudo_column ::= QDURATION",
+ /* 355 */ "pseudo_column ::= WSTART",
+ /* 356 */ "pseudo_column ::= WEND",
+ /* 357 */ "pseudo_column ::= WDURATION",
+ /* 358 */ "function_expression ::= function_name NK_LP expression_list NK_RP",
+ /* 359 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP",
+ /* 360 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP",
+ /* 361 */ "function_expression ::= literal_func",
+ /* 362 */ "literal_func ::= noarg_func NK_LP NK_RP",
+ /* 363 */ "literal_func ::= NOW",
+ /* 364 */ "noarg_func ::= NOW",
+ /* 365 */ "noarg_func ::= TODAY",
+ /* 366 */ "noarg_func ::= TIMEZONE",
+ /* 367 */ "noarg_func ::= DATABASE",
+ /* 368 */ "noarg_func ::= CLIENT_VERSION",
+ /* 369 */ "noarg_func ::= SERVER_VERSION",
+ /* 370 */ "noarg_func ::= SERVER_STATUS",
+ /* 371 */ "noarg_func ::= CURRENT_USER",
+ /* 372 */ "noarg_func ::= USER",
+ /* 373 */ "star_func ::= COUNT",
+ /* 374 */ "star_func ::= FIRST",
+ /* 375 */ "star_func ::= LAST",
+ /* 376 */ "star_func ::= LAST_ROW",
+ /* 377 */ "star_func_para_list ::= NK_STAR",
+ /* 378 */ "star_func_para_list ::= other_para_list",
+ /* 379 */ "other_para_list ::= star_func_para",
+ /* 380 */ "other_para_list ::= other_para_list NK_COMMA star_func_para",
+ /* 381 */ "star_func_para ::= expression",
+ /* 382 */ "star_func_para ::= table_name NK_DOT NK_STAR",
+ /* 383 */ "predicate ::= expression compare_op expression",
+ /* 384 */ "predicate ::= expression BETWEEN expression AND expression",
+ /* 385 */ "predicate ::= expression NOT BETWEEN expression AND expression",
+ /* 386 */ "predicate ::= expression IS NULL",
+ /* 387 */ "predicate ::= expression IS NOT NULL",
+ /* 388 */ "predicate ::= expression in_op in_predicate_value",
+ /* 389 */ "compare_op ::= NK_LT",
+ /* 390 */ "compare_op ::= NK_GT",
+ /* 391 */ "compare_op ::= NK_LE",
+ /* 392 */ "compare_op ::= NK_GE",
+ /* 393 */ "compare_op ::= NK_NE",
+ /* 394 */ "compare_op ::= NK_EQ",
+ /* 395 */ "compare_op ::= LIKE",
+ /* 396 */ "compare_op ::= NOT LIKE",
+ /* 397 */ "compare_op ::= MATCH",
+ /* 398 */ "compare_op ::= NMATCH",
+ /* 399 */ "compare_op ::= CONTAINS",
+ /* 400 */ "in_op ::= IN",
+ /* 401 */ "in_op ::= NOT IN",
+ /* 402 */ "in_predicate_value ::= NK_LP literal_list NK_RP",
+ /* 403 */ "boolean_value_expression ::= boolean_primary",
+ /* 404 */ "boolean_value_expression ::= NOT boolean_primary",
+ /* 405 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression",
+ /* 406 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression",
+ /* 407 */ "boolean_primary ::= predicate",
+ /* 408 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP",
+ /* 409 */ "common_expression ::= expression",
+ /* 410 */ "common_expression ::= boolean_value_expression",
+ /* 411 */ "from_clause_opt ::=",
+ /* 412 */ "from_clause_opt ::= FROM table_reference_list",
+ /* 413 */ "table_reference_list ::= table_reference",
+ /* 414 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference",
+ /* 415 */ "table_reference ::= table_primary",
+ /* 416 */ "table_reference ::= joined_table",
+ /* 417 */ "table_primary ::= table_name alias_opt",
+ /* 418 */ "table_primary ::= db_name NK_DOT table_name alias_opt",
+ /* 419 */ "table_primary ::= subquery alias_opt",
+ /* 420 */ "table_primary ::= parenthesized_joined_table",
+ /* 421 */ "alias_opt ::=",
+ /* 422 */ "alias_opt ::= table_alias",
+ /* 423 */ "alias_opt ::= AS table_alias",
+ /* 424 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP",
+ /* 425 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP",
+ /* 426 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition",
+ /* 427 */ "join_type ::=",
+ /* 428 */ "join_type ::= INNER",
+ /* 429 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt",
+ /* 430 */ "set_quantifier_opt ::=",
+ /* 431 */ "set_quantifier_opt ::= DISTINCT",
+ /* 432 */ "set_quantifier_opt ::= ALL",
+ /* 433 */ "select_list ::= select_item",
+ /* 434 */ "select_list ::= select_list NK_COMMA select_item",
+ /* 435 */ "select_item ::= NK_STAR",
+ /* 436 */ "select_item ::= common_expression",
+ /* 437 */ "select_item ::= common_expression column_alias",
+ /* 438 */ "select_item ::= common_expression AS column_alias",
+ /* 439 */ "select_item ::= table_name NK_DOT NK_STAR",
+ /* 440 */ "where_clause_opt ::=",
+ /* 441 */ "where_clause_opt ::= WHERE search_condition",
+ /* 442 */ "partition_by_clause_opt ::=",
+ /* 443 */ "partition_by_clause_opt ::= PARTITION BY expression_list",
+ /* 444 */ "twindow_clause_opt ::=",
+ /* 445 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP",
+ /* 446 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP",
+ /* 447 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt",
+ /* 448 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt",
+ /* 449 */ "sliding_opt ::=",
+ /* 450 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP",
+ /* 451 */ "fill_opt ::=",
+ /* 452 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP",
+ /* 453 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP",
+ /* 454 */ "fill_mode ::= NONE",
+ /* 455 */ "fill_mode ::= PREV",
+ /* 456 */ "fill_mode ::= NULL",
+ /* 457 */ "fill_mode ::= LINEAR",
+ /* 458 */ "fill_mode ::= NEXT",
+ /* 459 */ "group_by_clause_opt ::=",
+ /* 460 */ "group_by_clause_opt ::= GROUP BY group_by_list",
+ /* 461 */ "group_by_list ::= expression",
+ /* 462 */ "group_by_list ::= group_by_list NK_COMMA expression",
+ /* 463 */ "having_clause_opt ::=",
+ /* 464 */ "having_clause_opt ::= HAVING search_condition",
+ /* 465 */ "range_opt ::=",
+ /* 466 */ "range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP",
+ /* 467 */ "every_opt ::=",
+ /* 468 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP",
+ /* 469 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt",
+ /* 470 */ "query_expression_body ::= query_primary",
+ /* 471 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body",
+ /* 472 */ "query_expression_body ::= query_expression_body UNION query_expression_body",
+ /* 473 */ "query_primary ::= query_specification",
+ /* 474 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP",
+ /* 475 */ "order_by_clause_opt ::=",
+ /* 476 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
+ /* 477 */ "slimit_clause_opt ::=",
+ /* 478 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
+ /* 479 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
+ /* 480 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 481 */ "limit_clause_opt ::=",
+ /* 482 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
+ /* 483 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
+ /* 484 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 485 */ "subquery ::= NK_LP query_expression NK_RP",
+ /* 486 */ "search_condition ::= common_expression",
+ /* 487 */ "sort_specification_list ::= sort_specification",
+ /* 488 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
+ /* 489 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt",
+ /* 490 */ "ordering_specification_opt ::=",
+ /* 491 */ "ordering_specification_opt ::= ASC",
+ /* 492 */ "ordering_specification_opt ::= DESC",
+ /* 493 */ "null_ordering_opt ::=",
+ /* 494 */ "null_ordering_opt ::= NULLS FIRST",
+ /* 495 */ "null_ordering_opt ::= NULLS LAST",
};
#endif /* NDEBUG */
@@ -2366,180 +2374,179 @@ static void yy_destructor(
*/
/********* Begin destructor definitions ***************************************/
/* Default NON-TERMINAL Destructor */
- case 305: /* cmd */
- case 308: /* literal */
- case 319: /* db_options */
- case 321: /* alter_db_options */
- case 326: /* retention */
- case 327: /* full_table_name */
- case 330: /* table_options */
- case 334: /* alter_table_clause */
- case 335: /* alter_table_options */
- case 338: /* signed_literal */
- case 339: /* create_subtable_clause */
- case 342: /* drop_table_clause */
- case 345: /* column_def */
- case 349: /* duration_literal */
- case 350: /* rollup_func_name */
- case 352: /* col_name */
- case 353: /* db_name_cond_opt */
- case 354: /* like_pattern_opt */
- case 355: /* table_name_cond */
- case 356: /* from_db_opt */
- case 357: /* index_options */
- case 359: /* sliding_opt */
- case 360: /* sma_stream_opt */
- case 361: /* func */
- case 362: /* stream_options */
- case 364: /* query_expression */
- case 367: /* explain_options */
- case 371: /* into_opt */
- case 373: /* where_clause_opt */
- case 374: /* signed */
- case 375: /* literal_func */
- case 379: /* expression */
- case 380: /* pseudo_column */
- case 381: /* column_reference */
- case 382: /* function_expression */
- case 383: /* subquery */
- case 388: /* star_func_para */
- case 389: /* predicate */
- case 392: /* in_predicate_value */
- case 393: /* boolean_value_expression */
- case 394: /* boolean_primary */
- case 395: /* common_expression */
- case 396: /* from_clause_opt */
- case 397: /* table_reference_list */
- case 398: /* table_reference */
- case 399: /* table_primary */
- case 400: /* joined_table */
- case 402: /* parenthesized_joined_table */
- case 404: /* search_condition */
- case 405: /* query_specification */
- case 409: /* range_opt */
- case 410: /* every_opt */
- case 411: /* fill_opt */
- case 412: /* twindow_clause_opt */
- case 414: /* having_clause_opt */
- case 415: /* select_item */
- case 418: /* query_expression_body */
- case 420: /* slimit_clause_opt */
- case 421: /* limit_clause_opt */
- case 422: /* query_primary */
- case 424: /* sort_specification */
+ case 309: /* cmd */
+ case 312: /* literal */
+ case 323: /* db_options */
+ case 325: /* alter_db_options */
+ case 330: /* retention */
+ case 331: /* full_table_name */
+ case 334: /* table_options */
+ case 338: /* alter_table_clause */
+ case 339: /* alter_table_options */
+ case 342: /* signed_literal */
+ case 343: /* create_subtable_clause */
+ case 346: /* drop_table_clause */
+ case 349: /* column_def */
+ case 353: /* duration_literal */
+ case 354: /* rollup_func_name */
+ case 356: /* col_name */
+ case 357: /* db_name_cond_opt */
+ case 358: /* like_pattern_opt */
+ case 359: /* table_name_cond */
+ case 360: /* from_db_opt */
+ case 361: /* index_options */
+ case 363: /* sliding_opt */
+ case 364: /* sma_stream_opt */
+ case 365: /* func */
+ case 366: /* stream_options */
+ case 368: /* query_expression */
+ case 371: /* explain_options */
+ case 376: /* where_clause_opt */
+ case 377: /* signed */
+ case 378: /* literal_func */
+ case 382: /* expression */
+ case 383: /* pseudo_column */
+ case 384: /* column_reference */
+ case 385: /* function_expression */
+ case 386: /* subquery */
+ case 391: /* star_func_para */
+ case 392: /* predicate */
+ case 395: /* in_predicate_value */
+ case 396: /* boolean_value_expression */
+ case 397: /* boolean_primary */
+ case 398: /* common_expression */
+ case 399: /* from_clause_opt */
+ case 400: /* table_reference_list */
+ case 401: /* table_reference */
+ case 402: /* table_primary */
+ case 403: /* joined_table */
+ case 405: /* parenthesized_joined_table */
+ case 407: /* search_condition */
+ case 408: /* query_specification */
+ case 412: /* range_opt */
+ case 413: /* every_opt */
+ case 414: /* fill_opt */
+ case 415: /* twindow_clause_opt */
+ case 417: /* having_clause_opt */
+ case 418: /* select_item */
+ case 421: /* query_expression_body */
+ case 423: /* slimit_clause_opt */
+ case 424: /* limit_clause_opt */
+ case 425: /* query_primary */
+ case 427: /* sort_specification */
{
- nodesDestroyNode((yypminor->yy840));
+ nodesDestroyNode((yypminor->yy312));
}
break;
- case 306: /* account_options */
- case 307: /* alter_account_options */
- case 309: /* alter_account_option */
- case 369: /* bufsize_opt */
+ case 310: /* account_options */
+ case 311: /* alter_account_options */
+ case 313: /* alter_account_option */
+ case 373: /* bufsize_opt */
{
}
break;
- case 310: /* user_name */
- case 313: /* priv_level */
- case 316: /* db_name */
- case 317: /* dnode_endpoint */
- case 336: /* column_name */
- case 344: /* table_name */
- case 351: /* function_name */
- case 363: /* topic_name */
- case 365: /* cgroup_name */
- case 370: /* stream_name */
- case 377: /* table_alias */
- case 378: /* column_alias */
- case 384: /* star_func */
- case 386: /* noarg_func */
- case 401: /* alias_opt */
+ case 314: /* user_name */
+ case 317: /* priv_level */
+ case 320: /* db_name */
+ case 321: /* dnode_endpoint */
+ case 340: /* column_name */
+ case 348: /* table_name */
+ case 355: /* function_name */
+ case 367: /* topic_name */
+ case 369: /* cgroup_name */
+ case 374: /* stream_name */
+ case 380: /* table_alias */
+ case 381: /* column_alias */
+ case 387: /* star_func */
+ case 389: /* noarg_func */
+ case 404: /* alias_opt */
{
}
break;
- case 311: /* sysinfo_opt */
+ case 315: /* sysinfo_opt */
{
}
break;
- case 312: /* privileges */
- case 314: /* priv_type_list */
- case 315: /* priv_type */
+ case 316: /* privileges */
+ case 318: /* priv_type_list */
+ case 319: /* priv_type */
{
}
break;
- case 318: /* not_exists_opt */
- case 320: /* exists_opt */
- case 366: /* analyze_opt */
- case 368: /* agg_func_opt */
- case 406: /* set_quantifier_opt */
+ case 322: /* not_exists_opt */
+ case 324: /* exists_opt */
+ case 370: /* analyze_opt */
+ case 372: /* agg_func_opt */
+ case 409: /* set_quantifier_opt */
{
}
break;
- case 322: /* integer_list */
- case 323: /* variable_list */
- case 324: /* retention_list */
- case 328: /* column_def_list */
- case 329: /* tags_def_opt */
- case 331: /* multi_create_clause */
- case 332: /* tags_def */
- case 333: /* multi_drop_clause */
- case 340: /* specific_cols_opt */
- case 341: /* expression_list */
- case 343: /* col_name_list */
- case 346: /* duration_list */
- case 347: /* rollup_func_list */
- case 358: /* func_list */
- case 372: /* dnode_list */
- case 376: /* literal_list */
- case 385: /* star_func_para_list */
- case 387: /* other_para_list */
- case 407: /* select_list */
- case 408: /* partition_by_clause_opt */
- case 413: /* group_by_clause_opt */
- case 417: /* group_by_list */
- case 419: /* order_by_clause_opt */
- case 423: /* sort_specification_list */
+ case 326: /* integer_list */
+ case 327: /* variable_list */
+ case 328: /* retention_list */
+ case 332: /* column_def_list */
+ case 333: /* tags_def_opt */
+ case 335: /* multi_create_clause */
+ case 336: /* tags_def */
+ case 337: /* multi_drop_clause */
+ case 344: /* specific_cols_opt */
+ case 345: /* expression_list */
+ case 347: /* col_name_list */
+ case 350: /* duration_list */
+ case 351: /* rollup_func_list */
+ case 362: /* func_list */
+ case 375: /* dnode_list */
+ case 379: /* literal_list */
+ case 388: /* star_func_para_list */
+ case 390: /* other_para_list */
+ case 410: /* select_list */
+ case 411: /* partition_by_clause_opt */
+ case 416: /* group_by_clause_opt */
+ case 420: /* group_by_list */
+ case 422: /* order_by_clause_opt */
+ case 426: /* sort_specification_list */
{
- nodesDestroyList((yypminor->yy544));
+ nodesDestroyList((yypminor->yy824));
}
break;
- case 325: /* alter_db_option */
- case 348: /* alter_table_option */
+ case 329: /* alter_db_option */
+ case 352: /* alter_table_option */
{
}
break;
- case 337: /* type_name */
+ case 341: /* type_name */
{
}
break;
- case 390: /* compare_op */
- case 391: /* in_op */
+ case 393: /* compare_op */
+ case 394: /* in_op */
{
}
break;
- case 403: /* join_type */
+ case 406: /* join_type */
{
}
break;
- case 416: /* fill_mode */
+ case 419: /* fill_mode */
{
}
break;
- case 425: /* ordering_specification_opt */
+ case 428: /* ordering_specification_opt */
{
}
break;
- case 426: /* null_ordering_opt */
+ case 429: /* null_ordering_opt */
{
}
@@ -2838,497 +2845,502 @@ static const struct {
YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
signed char nrhs; /* Negative of the number of RHS symbols in the rule */
} yyRuleInfo[] = {
- { 305, -6 }, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */
- { 305, -4 }, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */
- { 306, 0 }, /* (2) account_options ::= */
- { 306, -3 }, /* (3) account_options ::= account_options PPS literal */
- { 306, -3 }, /* (4) account_options ::= account_options TSERIES literal */
- { 306, -3 }, /* (5) account_options ::= account_options STORAGE literal */
- { 306, -3 }, /* (6) account_options ::= account_options STREAMS literal */
- { 306, -3 }, /* (7) account_options ::= account_options QTIME literal */
- { 306, -3 }, /* (8) account_options ::= account_options DBS literal */
- { 306, -3 }, /* (9) account_options ::= account_options USERS literal */
- { 306, -3 }, /* (10) account_options ::= account_options CONNS literal */
- { 306, -3 }, /* (11) account_options ::= account_options STATE literal */
- { 307, -1 }, /* (12) alter_account_options ::= alter_account_option */
- { 307, -2 }, /* (13) alter_account_options ::= alter_account_options alter_account_option */
- { 309, -2 }, /* (14) alter_account_option ::= PASS literal */
- { 309, -2 }, /* (15) alter_account_option ::= PPS literal */
- { 309, -2 }, /* (16) alter_account_option ::= TSERIES literal */
- { 309, -2 }, /* (17) alter_account_option ::= STORAGE literal */
- { 309, -2 }, /* (18) alter_account_option ::= STREAMS literal */
- { 309, -2 }, /* (19) alter_account_option ::= QTIME literal */
- { 309, -2 }, /* (20) alter_account_option ::= DBS literal */
- { 309, -2 }, /* (21) alter_account_option ::= USERS literal */
- { 309, -2 }, /* (22) alter_account_option ::= CONNS literal */
- { 309, -2 }, /* (23) alter_account_option ::= STATE literal */
- { 305, -6 }, /* (24) cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */
- { 305, -5 }, /* (25) cmd ::= ALTER USER user_name PASS NK_STRING */
- { 305, -5 }, /* (26) cmd ::= ALTER USER user_name ENABLE NK_INTEGER */
- { 305, -5 }, /* (27) cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */
- { 305, -3 }, /* (28) cmd ::= DROP USER user_name */
- { 311, 0 }, /* (29) sysinfo_opt ::= */
- { 311, -2 }, /* (30) sysinfo_opt ::= SYSINFO NK_INTEGER */
- { 305, -6 }, /* (31) cmd ::= GRANT privileges ON priv_level TO user_name */
- { 305, -6 }, /* (32) cmd ::= REVOKE privileges ON priv_level FROM user_name */
- { 312, -1 }, /* (33) privileges ::= ALL */
- { 312, -1 }, /* (34) privileges ::= priv_type_list */
- { 314, -1 }, /* (35) priv_type_list ::= priv_type */
- { 314, -3 }, /* (36) priv_type_list ::= priv_type_list NK_COMMA priv_type */
- { 315, -1 }, /* (37) priv_type ::= READ */
- { 315, -1 }, /* (38) priv_type ::= WRITE */
- { 313, -3 }, /* (39) priv_level ::= NK_STAR NK_DOT NK_STAR */
- { 313, -3 }, /* (40) priv_level ::= db_name NK_DOT NK_STAR */
- { 305, -3 }, /* (41) cmd ::= CREATE DNODE dnode_endpoint */
- { 305, -5 }, /* (42) cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */
- { 305, -3 }, /* (43) cmd ::= DROP DNODE NK_INTEGER */
- { 305, -3 }, /* (44) cmd ::= DROP DNODE dnode_endpoint */
- { 305, -4 }, /* (45) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */
- { 305, -5 }, /* (46) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */
- { 305, -4 }, /* (47) cmd ::= ALTER ALL DNODES NK_STRING */
- { 305, -5 }, /* (48) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */
- { 317, -1 }, /* (49) dnode_endpoint ::= NK_STRING */
- { 317, -1 }, /* (50) dnode_endpoint ::= NK_ID */
- { 317, -1 }, /* (51) dnode_endpoint ::= NK_IPTOKEN */
- { 305, -3 }, /* (52) cmd ::= ALTER LOCAL NK_STRING */
- { 305, -4 }, /* (53) cmd ::= ALTER LOCAL NK_STRING NK_STRING */
- { 305, -5 }, /* (54) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (55) cmd ::= DROP QNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (56) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (57) cmd ::= DROP BNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (58) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (59) cmd ::= DROP SNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (60) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (61) cmd ::= DROP MNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (62) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */
- { 305, -4 }, /* (63) cmd ::= DROP DATABASE exists_opt db_name */
- { 305, -2 }, /* (64) cmd ::= USE db_name */
- { 305, -4 }, /* (65) cmd ::= ALTER DATABASE db_name alter_db_options */
- { 305, -3 }, /* (66) cmd ::= FLUSH DATABASE db_name */
- { 305, -3 }, /* (67) cmd ::= TRIM DATABASE db_name */
- { 318, -3 }, /* (68) not_exists_opt ::= IF NOT EXISTS */
- { 318, 0 }, /* (69) not_exists_opt ::= */
- { 320, -2 }, /* (70) exists_opt ::= IF EXISTS */
- { 320, 0 }, /* (71) exists_opt ::= */
- { 319, 0 }, /* (72) db_options ::= */
- { 319, -3 }, /* (73) db_options ::= db_options BUFFER NK_INTEGER */
- { 319, -3 }, /* (74) db_options ::= db_options CACHEMODEL NK_STRING */
- { 319, -3 }, /* (75) db_options ::= db_options CACHESIZE NK_INTEGER */
- { 319, -3 }, /* (76) db_options ::= db_options COMP NK_INTEGER */
- { 319, -3 }, /* (77) db_options ::= db_options DURATION NK_INTEGER */
- { 319, -3 }, /* (78) db_options ::= db_options DURATION NK_VARIABLE */
- { 319, -3 }, /* (79) db_options ::= db_options MAXROWS NK_INTEGER */
- { 319, -3 }, /* (80) db_options ::= db_options MINROWS NK_INTEGER */
- { 319, -3 }, /* (81) db_options ::= db_options KEEP integer_list */
- { 319, -3 }, /* (82) db_options ::= db_options KEEP variable_list */
- { 319, -3 }, /* (83) db_options ::= db_options PAGES NK_INTEGER */
- { 319, -3 }, /* (84) db_options ::= db_options PAGESIZE NK_INTEGER */
- { 319, -3 }, /* (85) db_options ::= db_options PRECISION NK_STRING */
- { 319, -3 }, /* (86) db_options ::= db_options REPLICA NK_INTEGER */
- { 319, -3 }, /* (87) db_options ::= db_options STRICT NK_STRING */
- { 319, -3 }, /* (88) db_options ::= db_options VGROUPS NK_INTEGER */
- { 319, -3 }, /* (89) db_options ::= db_options SINGLE_STABLE NK_INTEGER */
- { 319, -3 }, /* (90) db_options ::= db_options RETENTIONS retention_list */
- { 319, -3 }, /* (91) db_options ::= db_options SCHEMALESS NK_INTEGER */
- { 319, -3 }, /* (92) db_options ::= db_options WAL_LEVEL NK_INTEGER */
- { 319, -3 }, /* (93) db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
- { 319, -3 }, /* (94) db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
- { 319, -4 }, /* (95) db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
- { 319, -3 }, /* (96) db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
- { 319, -4 }, /* (97) db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
- { 319, -3 }, /* (98) db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
- { 319, -3 }, /* (99) db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
- { 321, -1 }, /* (100) alter_db_options ::= alter_db_option */
- { 321, -2 }, /* (101) alter_db_options ::= alter_db_options alter_db_option */
- { 325, -2 }, /* (102) alter_db_option ::= CACHEMODEL NK_STRING */
- { 325, -2 }, /* (103) alter_db_option ::= CACHESIZE NK_INTEGER */
- { 325, -2 }, /* (104) alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
- { 325, -2 }, /* (105) alter_db_option ::= KEEP integer_list */
- { 325, -2 }, /* (106) alter_db_option ::= KEEP variable_list */
- { 325, -2 }, /* (107) alter_db_option ::= WAL_LEVEL NK_INTEGER */
- { 322, -1 }, /* (108) integer_list ::= NK_INTEGER */
- { 322, -3 }, /* (109) integer_list ::= integer_list NK_COMMA NK_INTEGER */
- { 323, -1 }, /* (110) variable_list ::= NK_VARIABLE */
- { 323, -3 }, /* (111) variable_list ::= variable_list NK_COMMA NK_VARIABLE */
- { 324, -1 }, /* (112) retention_list ::= retention */
- { 324, -3 }, /* (113) retention_list ::= retention_list NK_COMMA retention */
- { 326, -3 }, /* (114) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
- { 305, -9 }, /* (115) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
- { 305, -3 }, /* (116) cmd ::= CREATE TABLE multi_create_clause */
- { 305, -9 }, /* (117) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */
- { 305, -3 }, /* (118) cmd ::= DROP TABLE multi_drop_clause */
- { 305, -4 }, /* (119) cmd ::= DROP STABLE exists_opt full_table_name */
- { 305, -3 }, /* (120) cmd ::= ALTER TABLE alter_table_clause */
- { 305, -3 }, /* (121) cmd ::= ALTER STABLE alter_table_clause */
- { 334, -2 }, /* (122) alter_table_clause ::= full_table_name alter_table_options */
- { 334, -5 }, /* (123) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
- { 334, -4 }, /* (124) alter_table_clause ::= full_table_name DROP COLUMN column_name */
- { 334, -5 }, /* (125) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
- { 334, -5 }, /* (126) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
- { 334, -5 }, /* (127) alter_table_clause ::= full_table_name ADD TAG column_name type_name */
- { 334, -4 }, /* (128) alter_table_clause ::= full_table_name DROP TAG column_name */
- { 334, -5 }, /* (129) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
- { 334, -5 }, /* (130) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
- { 334, -6 }, /* (131) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
- { 331, -1 }, /* (132) multi_create_clause ::= create_subtable_clause */
- { 331, -2 }, /* (133) multi_create_clause ::= multi_create_clause create_subtable_clause */
- { 339, -10 }, /* (134) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
- { 333, -1 }, /* (135) multi_drop_clause ::= drop_table_clause */
- { 333, -2 }, /* (136) multi_drop_clause ::= multi_drop_clause drop_table_clause */
- { 342, -2 }, /* (137) drop_table_clause ::= exists_opt full_table_name */
- { 340, 0 }, /* (138) specific_cols_opt ::= */
- { 340, -3 }, /* (139) specific_cols_opt ::= NK_LP col_name_list NK_RP */
- { 327, -1 }, /* (140) full_table_name ::= table_name */
- { 327, -3 }, /* (141) full_table_name ::= db_name NK_DOT table_name */
- { 328, -1 }, /* (142) column_def_list ::= column_def */
- { 328, -3 }, /* (143) column_def_list ::= column_def_list NK_COMMA column_def */
- { 345, -2 }, /* (144) column_def ::= column_name type_name */
- { 345, -4 }, /* (145) column_def ::= column_name type_name COMMENT NK_STRING */
- { 337, -1 }, /* (146) type_name ::= BOOL */
- { 337, -1 }, /* (147) type_name ::= TINYINT */
- { 337, -1 }, /* (148) type_name ::= SMALLINT */
- { 337, -1 }, /* (149) type_name ::= INT */
- { 337, -1 }, /* (150) type_name ::= INTEGER */
- { 337, -1 }, /* (151) type_name ::= BIGINT */
- { 337, -1 }, /* (152) type_name ::= FLOAT */
- { 337, -1 }, /* (153) type_name ::= DOUBLE */
- { 337, -4 }, /* (154) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
- { 337, -1 }, /* (155) type_name ::= TIMESTAMP */
- { 337, -4 }, /* (156) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
- { 337, -2 }, /* (157) type_name ::= TINYINT UNSIGNED */
- { 337, -2 }, /* (158) type_name ::= SMALLINT UNSIGNED */
- { 337, -2 }, /* (159) type_name ::= INT UNSIGNED */
- { 337, -2 }, /* (160) type_name ::= BIGINT UNSIGNED */
- { 337, -1 }, /* (161) type_name ::= JSON */
- { 337, -4 }, /* (162) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
- { 337, -1 }, /* (163) type_name ::= MEDIUMBLOB */
- { 337, -1 }, /* (164) type_name ::= BLOB */
- { 337, -4 }, /* (165) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
- { 337, -1 }, /* (166) type_name ::= DECIMAL */
- { 337, -4 }, /* (167) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
- { 337, -6 }, /* (168) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
- { 329, 0 }, /* (169) tags_def_opt ::= */
- { 329, -1 }, /* (170) tags_def_opt ::= tags_def */
- { 332, -4 }, /* (171) tags_def ::= TAGS NK_LP column_def_list NK_RP */
- { 330, 0 }, /* (172) table_options ::= */
- { 330, -3 }, /* (173) table_options ::= table_options COMMENT NK_STRING */
- { 330, -3 }, /* (174) table_options ::= table_options MAX_DELAY duration_list */
- { 330, -3 }, /* (175) table_options ::= table_options WATERMARK duration_list */
- { 330, -5 }, /* (176) table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
- { 330, -3 }, /* (177) table_options ::= table_options TTL NK_INTEGER */
- { 330, -5 }, /* (178) table_options ::= table_options SMA NK_LP col_name_list NK_RP */
- { 335, -1 }, /* (179) alter_table_options ::= alter_table_option */
- { 335, -2 }, /* (180) alter_table_options ::= alter_table_options alter_table_option */
- { 348, -2 }, /* (181) alter_table_option ::= COMMENT NK_STRING */
- { 348, -2 }, /* (182) alter_table_option ::= TTL NK_INTEGER */
- { 346, -1 }, /* (183) duration_list ::= duration_literal */
- { 346, -3 }, /* (184) duration_list ::= duration_list NK_COMMA duration_literal */
- { 347, -1 }, /* (185) rollup_func_list ::= rollup_func_name */
- { 347, -3 }, /* (186) rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */
- { 350, -1 }, /* (187) rollup_func_name ::= function_name */
- { 350, -1 }, /* (188) rollup_func_name ::= FIRST */
- { 350, -1 }, /* (189) rollup_func_name ::= LAST */
- { 343, -1 }, /* (190) col_name_list ::= col_name */
- { 343, -3 }, /* (191) col_name_list ::= col_name_list NK_COMMA col_name */
- { 352, -1 }, /* (192) col_name ::= column_name */
- { 305, -2 }, /* (193) cmd ::= SHOW DNODES */
- { 305, -2 }, /* (194) cmd ::= SHOW USERS */
- { 305, -2 }, /* (195) cmd ::= SHOW DATABASES */
- { 305, -4 }, /* (196) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
- { 305, -4 }, /* (197) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
- { 305, -3 }, /* (198) cmd ::= SHOW db_name_cond_opt VGROUPS */
- { 305, -2 }, /* (199) cmd ::= SHOW MNODES */
- { 305, -2 }, /* (200) cmd ::= SHOW MODULES */
- { 305, -2 }, /* (201) cmd ::= SHOW QNODES */
- { 305, -2 }, /* (202) cmd ::= SHOW FUNCTIONS */
- { 305, -5 }, /* (203) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
- { 305, -2 }, /* (204) cmd ::= SHOW STREAMS */
- { 305, -2 }, /* (205) cmd ::= SHOW ACCOUNTS */
- { 305, -2 }, /* (206) cmd ::= SHOW APPS */
- { 305, -2 }, /* (207) cmd ::= SHOW CONNECTIONS */
- { 305, -2 }, /* (208) cmd ::= SHOW LICENCES */
- { 305, -2 }, /* (209) cmd ::= SHOW GRANTS */
- { 305, -4 }, /* (210) cmd ::= SHOW CREATE DATABASE db_name */
- { 305, -4 }, /* (211) cmd ::= SHOW CREATE TABLE full_table_name */
- { 305, -4 }, /* (212) cmd ::= SHOW CREATE STABLE full_table_name */
- { 305, -2 }, /* (213) cmd ::= SHOW QUERIES */
- { 305, -2 }, /* (214) cmd ::= SHOW SCORES */
- { 305, -2 }, /* (215) cmd ::= SHOW TOPICS */
- { 305, -2 }, /* (216) cmd ::= SHOW VARIABLES */
- { 305, -3 }, /* (217) cmd ::= SHOW LOCAL VARIABLES */
- { 305, -4 }, /* (218) cmd ::= SHOW DNODE NK_INTEGER VARIABLES */
- { 305, -2 }, /* (219) cmd ::= SHOW BNODES */
- { 305, -2 }, /* (220) cmd ::= SHOW SNODES */
- { 305, -2 }, /* (221) cmd ::= SHOW CLUSTER */
- { 305, -2 }, /* (222) cmd ::= SHOW TRANSACTIONS */
- { 305, -4 }, /* (223) cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
- { 305, -2 }, /* (224) cmd ::= SHOW CONSUMERS */
- { 305, -2 }, /* (225) cmd ::= SHOW SUBSCRIPTIONS */
- { 305, -5 }, /* (226) cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
- { 353, 0 }, /* (227) db_name_cond_opt ::= */
- { 353, -2 }, /* (228) db_name_cond_opt ::= db_name NK_DOT */
- { 354, 0 }, /* (229) like_pattern_opt ::= */
- { 354, -2 }, /* (230) like_pattern_opt ::= LIKE NK_STRING */
- { 355, -1 }, /* (231) table_name_cond ::= table_name */
- { 356, 0 }, /* (232) from_db_opt ::= */
- { 356, -2 }, /* (233) from_db_opt ::= FROM db_name */
- { 305, -8 }, /* (234) cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
- { 305, -4 }, /* (235) cmd ::= DROP INDEX exists_opt full_table_name */
- { 357, -10 }, /* (236) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
- { 357, -12 }, /* (237) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
- { 358, -1 }, /* (238) func_list ::= func */
- { 358, -3 }, /* (239) func_list ::= func_list NK_COMMA func */
- { 361, -4 }, /* (240) func ::= function_name NK_LP expression_list NK_RP */
- { 360, 0 }, /* (241) sma_stream_opt ::= */
- { 360, -3 }, /* (242) sma_stream_opt ::= stream_options WATERMARK duration_literal */
- { 360, -3 }, /* (243) sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
- { 305, -6 }, /* (244) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */
- { 305, -7 }, /* (245) cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
- { 305, -9 }, /* (246) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
- { 305, -7 }, /* (247) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
- { 305, -9 }, /* (248) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
- { 305, -4 }, /* (249) cmd ::= DROP TOPIC exists_opt topic_name */
- { 305, -7 }, /* (250) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
- { 305, -2 }, /* (251) cmd ::= DESC full_table_name */
- { 305, -2 }, /* (252) cmd ::= DESCRIBE full_table_name */
- { 305, -3 }, /* (253) cmd ::= RESET QUERY CACHE */
- { 305, -4 }, /* (254) cmd ::= EXPLAIN analyze_opt explain_options query_expression */
- { 366, 0 }, /* (255) analyze_opt ::= */
- { 366, -1 }, /* (256) analyze_opt ::= ANALYZE */
- { 367, 0 }, /* (257) explain_options ::= */
- { 367, -3 }, /* (258) explain_options ::= explain_options VERBOSE NK_BOOL */
- { 367, -3 }, /* (259) explain_options ::= explain_options RATIO NK_FLOAT */
- { 305, -10 }, /* (260) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
- { 305, -4 }, /* (261) cmd ::= DROP FUNCTION exists_opt function_name */
- { 368, 0 }, /* (262) agg_func_opt ::= */
- { 368, -1 }, /* (263) agg_func_opt ::= AGGREGATE */
- { 369, 0 }, /* (264) bufsize_opt ::= */
- { 369, -2 }, /* (265) bufsize_opt ::= BUFSIZE NK_INTEGER */
- { 305, -8 }, /* (266) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */
- { 305, -4 }, /* (267) cmd ::= DROP STREAM exists_opt stream_name */
- { 371, 0 }, /* (268) into_opt ::= */
- { 371, -2 }, /* (269) into_opt ::= INTO full_table_name */
- { 362, 0 }, /* (270) stream_options ::= */
- { 362, -3 }, /* (271) stream_options ::= stream_options TRIGGER AT_ONCE */
- { 362, -3 }, /* (272) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
- { 362, -4 }, /* (273) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
- { 362, -3 }, /* (274) stream_options ::= stream_options WATERMARK duration_literal */
- { 362, -4 }, /* (275) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
- { 305, -3 }, /* (276) cmd ::= KILL CONNECTION NK_INTEGER */
- { 305, -3 }, /* (277) cmd ::= KILL QUERY NK_STRING */
- { 305, -3 }, /* (278) cmd ::= KILL TRANSACTION NK_INTEGER */
- { 305, -2 }, /* (279) cmd ::= BALANCE VGROUP */
- { 305, -4 }, /* (280) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
- { 305, -4 }, /* (281) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
- { 305, -3 }, /* (282) cmd ::= SPLIT VGROUP NK_INTEGER */
- { 372, -2 }, /* (283) dnode_list ::= DNODE NK_INTEGER */
- { 372, -3 }, /* (284) dnode_list ::= dnode_list DNODE NK_INTEGER */
- { 305, -4 }, /* (285) cmd ::= DELETE FROM full_table_name where_clause_opt */
- { 305, -1 }, /* (286) cmd ::= query_expression */
- { 305, -7 }, /* (287) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
- { 305, -4 }, /* (288) cmd ::= INSERT INTO full_table_name query_expression */
- { 308, -1 }, /* (289) literal ::= NK_INTEGER */
- { 308, -1 }, /* (290) literal ::= NK_FLOAT */
- { 308, -1 }, /* (291) literal ::= NK_STRING */
- { 308, -1 }, /* (292) literal ::= NK_BOOL */
- { 308, -2 }, /* (293) literal ::= TIMESTAMP NK_STRING */
- { 308, -1 }, /* (294) literal ::= duration_literal */
- { 308, -1 }, /* (295) literal ::= NULL */
- { 308, -1 }, /* (296) literal ::= NK_QUESTION */
- { 349, -1 }, /* (297) duration_literal ::= NK_VARIABLE */
- { 374, -1 }, /* (298) signed ::= NK_INTEGER */
- { 374, -2 }, /* (299) signed ::= NK_PLUS NK_INTEGER */
- { 374, -2 }, /* (300) signed ::= NK_MINUS NK_INTEGER */
- { 374, -1 }, /* (301) signed ::= NK_FLOAT */
- { 374, -2 }, /* (302) signed ::= NK_PLUS NK_FLOAT */
- { 374, -2 }, /* (303) signed ::= NK_MINUS NK_FLOAT */
- { 338, -1 }, /* (304) signed_literal ::= signed */
- { 338, -1 }, /* (305) signed_literal ::= NK_STRING */
- { 338, -1 }, /* (306) signed_literal ::= NK_BOOL */
- { 338, -2 }, /* (307) signed_literal ::= TIMESTAMP NK_STRING */
- { 338, -1 }, /* (308) signed_literal ::= duration_literal */
- { 338, -1 }, /* (309) signed_literal ::= NULL */
- { 338, -1 }, /* (310) signed_literal ::= literal_func */
- { 338, -1 }, /* (311) signed_literal ::= NK_QUESTION */
- { 376, -1 }, /* (312) literal_list ::= signed_literal */
- { 376, -3 }, /* (313) literal_list ::= literal_list NK_COMMA signed_literal */
- { 316, -1 }, /* (314) db_name ::= NK_ID */
- { 344, -1 }, /* (315) table_name ::= NK_ID */
- { 336, -1 }, /* (316) column_name ::= NK_ID */
- { 351, -1 }, /* (317) function_name ::= NK_ID */
- { 377, -1 }, /* (318) table_alias ::= NK_ID */
- { 378, -1 }, /* (319) column_alias ::= NK_ID */
- { 310, -1 }, /* (320) user_name ::= NK_ID */
- { 363, -1 }, /* (321) topic_name ::= NK_ID */
- { 370, -1 }, /* (322) stream_name ::= NK_ID */
- { 365, -1 }, /* (323) cgroup_name ::= NK_ID */
- { 379, -1 }, /* (324) expression ::= literal */
- { 379, -1 }, /* (325) expression ::= pseudo_column */
- { 379, -1 }, /* (326) expression ::= column_reference */
- { 379, -1 }, /* (327) expression ::= function_expression */
- { 379, -1 }, /* (328) expression ::= subquery */
- { 379, -3 }, /* (329) expression ::= NK_LP expression NK_RP */
- { 379, -2 }, /* (330) expression ::= NK_PLUS expression */
- { 379, -2 }, /* (331) expression ::= NK_MINUS expression */
- { 379, -3 }, /* (332) expression ::= expression NK_PLUS expression */
- { 379, -3 }, /* (333) expression ::= expression NK_MINUS expression */
- { 379, -3 }, /* (334) expression ::= expression NK_STAR expression */
- { 379, -3 }, /* (335) expression ::= expression NK_SLASH expression */
- { 379, -3 }, /* (336) expression ::= expression NK_REM expression */
- { 379, -3 }, /* (337) expression ::= column_reference NK_ARROW NK_STRING */
- { 379, -3 }, /* (338) expression ::= expression NK_BITAND expression */
- { 379, -3 }, /* (339) expression ::= expression NK_BITOR expression */
- { 341, -1 }, /* (340) expression_list ::= expression */
- { 341, -3 }, /* (341) expression_list ::= expression_list NK_COMMA expression */
- { 381, -1 }, /* (342) column_reference ::= column_name */
- { 381, -3 }, /* (343) column_reference ::= table_name NK_DOT column_name */
- { 380, -1 }, /* (344) pseudo_column ::= ROWTS */
- { 380, -1 }, /* (345) pseudo_column ::= TBNAME */
- { 380, -3 }, /* (346) pseudo_column ::= table_name NK_DOT TBNAME */
- { 380, -1 }, /* (347) pseudo_column ::= QSTART */
- { 380, -1 }, /* (348) pseudo_column ::= QEND */
- { 380, -1 }, /* (349) pseudo_column ::= QDURATION */
- { 380, -1 }, /* (350) pseudo_column ::= WSTART */
- { 380, -1 }, /* (351) pseudo_column ::= WEND */
- { 380, -1 }, /* (352) pseudo_column ::= WDURATION */
- { 382, -4 }, /* (353) function_expression ::= function_name NK_LP expression_list NK_RP */
- { 382, -4 }, /* (354) function_expression ::= star_func NK_LP star_func_para_list NK_RP */
- { 382, -6 }, /* (355) function_expression ::= CAST NK_LP expression AS type_name NK_RP */
- { 382, -1 }, /* (356) function_expression ::= literal_func */
- { 375, -3 }, /* (357) literal_func ::= noarg_func NK_LP NK_RP */
- { 375, -1 }, /* (358) literal_func ::= NOW */
- { 386, -1 }, /* (359) noarg_func ::= NOW */
- { 386, -1 }, /* (360) noarg_func ::= TODAY */
- { 386, -1 }, /* (361) noarg_func ::= TIMEZONE */
- { 386, -1 }, /* (362) noarg_func ::= DATABASE */
- { 386, -1 }, /* (363) noarg_func ::= CLIENT_VERSION */
- { 386, -1 }, /* (364) noarg_func ::= SERVER_VERSION */
- { 386, -1 }, /* (365) noarg_func ::= SERVER_STATUS */
- { 386, -1 }, /* (366) noarg_func ::= CURRENT_USER */
- { 386, -1 }, /* (367) noarg_func ::= USER */
- { 384, -1 }, /* (368) star_func ::= COUNT */
- { 384, -1 }, /* (369) star_func ::= FIRST */
- { 384, -1 }, /* (370) star_func ::= LAST */
- { 384, -1 }, /* (371) star_func ::= LAST_ROW */
- { 385, -1 }, /* (372) star_func_para_list ::= NK_STAR */
- { 385, -1 }, /* (373) star_func_para_list ::= other_para_list */
- { 387, -1 }, /* (374) other_para_list ::= star_func_para */
- { 387, -3 }, /* (375) other_para_list ::= other_para_list NK_COMMA star_func_para */
- { 388, -1 }, /* (376) star_func_para ::= expression */
- { 388, -3 }, /* (377) star_func_para ::= table_name NK_DOT NK_STAR */
- { 389, -3 }, /* (378) predicate ::= expression compare_op expression */
- { 389, -5 }, /* (379) predicate ::= expression BETWEEN expression AND expression */
- { 389, -6 }, /* (380) predicate ::= expression NOT BETWEEN expression AND expression */
- { 389, -3 }, /* (381) predicate ::= expression IS NULL */
- { 389, -4 }, /* (382) predicate ::= expression IS NOT NULL */
- { 389, -3 }, /* (383) predicate ::= expression in_op in_predicate_value */
- { 390, -1 }, /* (384) compare_op ::= NK_LT */
- { 390, -1 }, /* (385) compare_op ::= NK_GT */
- { 390, -1 }, /* (386) compare_op ::= NK_LE */
- { 390, -1 }, /* (387) compare_op ::= NK_GE */
- { 390, -1 }, /* (388) compare_op ::= NK_NE */
- { 390, -1 }, /* (389) compare_op ::= NK_EQ */
- { 390, -1 }, /* (390) compare_op ::= LIKE */
- { 390, -2 }, /* (391) compare_op ::= NOT LIKE */
- { 390, -1 }, /* (392) compare_op ::= MATCH */
- { 390, -1 }, /* (393) compare_op ::= NMATCH */
- { 390, -1 }, /* (394) compare_op ::= CONTAINS */
- { 391, -1 }, /* (395) in_op ::= IN */
- { 391, -2 }, /* (396) in_op ::= NOT IN */
- { 392, -3 }, /* (397) in_predicate_value ::= NK_LP literal_list NK_RP */
- { 393, -1 }, /* (398) boolean_value_expression ::= boolean_primary */
- { 393, -2 }, /* (399) boolean_value_expression ::= NOT boolean_primary */
- { 393, -3 }, /* (400) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
- { 393, -3 }, /* (401) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
- { 394, -1 }, /* (402) boolean_primary ::= predicate */
- { 394, -3 }, /* (403) boolean_primary ::= NK_LP boolean_value_expression NK_RP */
- { 395, -1 }, /* (404) common_expression ::= expression */
- { 395, -1 }, /* (405) common_expression ::= boolean_value_expression */
- { 396, 0 }, /* (406) from_clause_opt ::= */
- { 396, -2 }, /* (407) from_clause_opt ::= FROM table_reference_list */
- { 397, -1 }, /* (408) table_reference_list ::= table_reference */
- { 397, -3 }, /* (409) table_reference_list ::= table_reference_list NK_COMMA table_reference */
- { 398, -1 }, /* (410) table_reference ::= table_primary */
- { 398, -1 }, /* (411) table_reference ::= joined_table */
- { 399, -2 }, /* (412) table_primary ::= table_name alias_opt */
- { 399, -4 }, /* (413) table_primary ::= db_name NK_DOT table_name alias_opt */
- { 399, -2 }, /* (414) table_primary ::= subquery alias_opt */
- { 399, -1 }, /* (415) table_primary ::= parenthesized_joined_table */
- { 401, 0 }, /* (416) alias_opt ::= */
- { 401, -1 }, /* (417) alias_opt ::= table_alias */
- { 401, -2 }, /* (418) alias_opt ::= AS table_alias */
- { 402, -3 }, /* (419) parenthesized_joined_table ::= NK_LP joined_table NK_RP */
- { 402, -3 }, /* (420) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */
- { 400, -6 }, /* (421) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
- { 403, 0 }, /* (422) join_type ::= */
- { 403, -1 }, /* (423) join_type ::= INNER */
- { 405, -12 }, /* (424) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
- { 406, 0 }, /* (425) set_quantifier_opt ::= */
- { 406, -1 }, /* (426) set_quantifier_opt ::= DISTINCT */
- { 406, -1 }, /* (427) set_quantifier_opt ::= ALL */
- { 407, -1 }, /* (428) select_list ::= select_item */
- { 407, -3 }, /* (429) select_list ::= select_list NK_COMMA select_item */
- { 415, -1 }, /* (430) select_item ::= NK_STAR */
- { 415, -1 }, /* (431) select_item ::= common_expression */
- { 415, -2 }, /* (432) select_item ::= common_expression column_alias */
- { 415, -3 }, /* (433) select_item ::= common_expression AS column_alias */
- { 415, -3 }, /* (434) select_item ::= table_name NK_DOT NK_STAR */
- { 373, 0 }, /* (435) where_clause_opt ::= */
- { 373, -2 }, /* (436) where_clause_opt ::= WHERE search_condition */
- { 408, 0 }, /* (437) partition_by_clause_opt ::= */
- { 408, -3 }, /* (438) partition_by_clause_opt ::= PARTITION BY expression_list */
- { 412, 0 }, /* (439) twindow_clause_opt ::= */
- { 412, -6 }, /* (440) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
- { 412, -4 }, /* (441) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
- { 412, -6 }, /* (442) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
- { 412, -8 }, /* (443) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
- { 359, 0 }, /* (444) sliding_opt ::= */
- { 359, -4 }, /* (445) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
- { 411, 0 }, /* (446) fill_opt ::= */
- { 411, -4 }, /* (447) fill_opt ::= FILL NK_LP fill_mode NK_RP */
- { 411, -6 }, /* (448) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
- { 416, -1 }, /* (449) fill_mode ::= NONE */
- { 416, -1 }, /* (450) fill_mode ::= PREV */
- { 416, -1 }, /* (451) fill_mode ::= NULL */
- { 416, -1 }, /* (452) fill_mode ::= LINEAR */
- { 416, -1 }, /* (453) fill_mode ::= NEXT */
- { 413, 0 }, /* (454) group_by_clause_opt ::= */
- { 413, -3 }, /* (455) group_by_clause_opt ::= GROUP BY group_by_list */
- { 417, -1 }, /* (456) group_by_list ::= expression */
- { 417, -3 }, /* (457) group_by_list ::= group_by_list NK_COMMA expression */
- { 414, 0 }, /* (458) having_clause_opt ::= */
- { 414, -2 }, /* (459) having_clause_opt ::= HAVING search_condition */
- { 409, 0 }, /* (460) range_opt ::= */
- { 409, -6 }, /* (461) range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
- { 410, 0 }, /* (462) every_opt ::= */
- { 410, -4 }, /* (463) every_opt ::= EVERY NK_LP duration_literal NK_RP */
- { 364, -4 }, /* (464) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
- { 418, -1 }, /* (465) query_expression_body ::= query_primary */
- { 418, -4 }, /* (466) query_expression_body ::= query_expression_body UNION ALL query_expression_body */
- { 418, -3 }, /* (467) query_expression_body ::= query_expression_body UNION query_expression_body */
- { 422, -1 }, /* (468) query_primary ::= query_specification */
- { 422, -6 }, /* (469) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
- { 419, 0 }, /* (470) order_by_clause_opt ::= */
- { 419, -3 }, /* (471) order_by_clause_opt ::= ORDER BY sort_specification_list */
- { 420, 0 }, /* (472) slimit_clause_opt ::= */
- { 420, -2 }, /* (473) slimit_clause_opt ::= SLIMIT NK_INTEGER */
- { 420, -4 }, /* (474) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- { 420, -4 }, /* (475) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- { 421, 0 }, /* (476) limit_clause_opt ::= */
- { 421, -2 }, /* (477) limit_clause_opt ::= LIMIT NK_INTEGER */
- { 421, -4 }, /* (478) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
- { 421, -4 }, /* (479) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- { 383, -3 }, /* (480) subquery ::= NK_LP query_expression NK_RP */
- { 404, -1 }, /* (481) search_condition ::= common_expression */
- { 423, -1 }, /* (482) sort_specification_list ::= sort_specification */
- { 423, -3 }, /* (483) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
- { 424, -3 }, /* (484) sort_specification ::= expression ordering_specification_opt null_ordering_opt */
- { 425, 0 }, /* (485) ordering_specification_opt ::= */
- { 425, -1 }, /* (486) ordering_specification_opt ::= ASC */
- { 425, -1 }, /* (487) ordering_specification_opt ::= DESC */
- { 426, 0 }, /* (488) null_ordering_opt ::= */
- { 426, -2 }, /* (489) null_ordering_opt ::= NULLS FIRST */
- { 426, -2 }, /* (490) null_ordering_opt ::= NULLS LAST */
+ { 309, -6 }, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */
+ { 309, -4 }, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */
+ { 310, 0 }, /* (2) account_options ::= */
+ { 310, -3 }, /* (3) account_options ::= account_options PPS literal */
+ { 310, -3 }, /* (4) account_options ::= account_options TSERIES literal */
+ { 310, -3 }, /* (5) account_options ::= account_options STORAGE literal */
+ { 310, -3 }, /* (6) account_options ::= account_options STREAMS literal */
+ { 310, -3 }, /* (7) account_options ::= account_options QTIME literal */
+ { 310, -3 }, /* (8) account_options ::= account_options DBS literal */
+ { 310, -3 }, /* (9) account_options ::= account_options USERS literal */
+ { 310, -3 }, /* (10) account_options ::= account_options CONNS literal */
+ { 310, -3 }, /* (11) account_options ::= account_options STATE literal */
+ { 311, -1 }, /* (12) alter_account_options ::= alter_account_option */
+ { 311, -2 }, /* (13) alter_account_options ::= alter_account_options alter_account_option */
+ { 313, -2 }, /* (14) alter_account_option ::= PASS literal */
+ { 313, -2 }, /* (15) alter_account_option ::= PPS literal */
+ { 313, -2 }, /* (16) alter_account_option ::= TSERIES literal */
+ { 313, -2 }, /* (17) alter_account_option ::= STORAGE literal */
+ { 313, -2 }, /* (18) alter_account_option ::= STREAMS literal */
+ { 313, -2 }, /* (19) alter_account_option ::= QTIME literal */
+ { 313, -2 }, /* (20) alter_account_option ::= DBS literal */
+ { 313, -2 }, /* (21) alter_account_option ::= USERS literal */
+ { 313, -2 }, /* (22) alter_account_option ::= CONNS literal */
+ { 313, -2 }, /* (23) alter_account_option ::= STATE literal */
+ { 309, -6 }, /* (24) cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */
+ { 309, -5 }, /* (25) cmd ::= ALTER USER user_name PASS NK_STRING */
+ { 309, -5 }, /* (26) cmd ::= ALTER USER user_name ENABLE NK_INTEGER */
+ { 309, -5 }, /* (27) cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */
+ { 309, -3 }, /* (28) cmd ::= DROP USER user_name */
+ { 315, 0 }, /* (29) sysinfo_opt ::= */
+ { 315, -2 }, /* (30) sysinfo_opt ::= SYSINFO NK_INTEGER */
+ { 309, -6 }, /* (31) cmd ::= GRANT privileges ON priv_level TO user_name */
+ { 309, -6 }, /* (32) cmd ::= REVOKE privileges ON priv_level FROM user_name */
+ { 316, -1 }, /* (33) privileges ::= ALL */
+ { 316, -1 }, /* (34) privileges ::= priv_type_list */
+ { 318, -1 }, /* (35) priv_type_list ::= priv_type */
+ { 318, -3 }, /* (36) priv_type_list ::= priv_type_list NK_COMMA priv_type */
+ { 319, -1 }, /* (37) priv_type ::= READ */
+ { 319, -1 }, /* (38) priv_type ::= WRITE */
+ { 317, -3 }, /* (39) priv_level ::= NK_STAR NK_DOT NK_STAR */
+ { 317, -3 }, /* (40) priv_level ::= db_name NK_DOT NK_STAR */
+ { 309, -3 }, /* (41) cmd ::= CREATE DNODE dnode_endpoint */
+ { 309, -5 }, /* (42) cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */
+ { 309, -3 }, /* (43) cmd ::= DROP DNODE NK_INTEGER */
+ { 309, -3 }, /* (44) cmd ::= DROP DNODE dnode_endpoint */
+ { 309, -4 }, /* (45) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */
+ { 309, -5 }, /* (46) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */
+ { 309, -4 }, /* (47) cmd ::= ALTER ALL DNODES NK_STRING */
+ { 309, -5 }, /* (48) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */
+ { 321, -1 }, /* (49) dnode_endpoint ::= NK_STRING */
+ { 321, -1 }, /* (50) dnode_endpoint ::= NK_ID */
+ { 321, -1 }, /* (51) dnode_endpoint ::= NK_IPTOKEN */
+ { 309, -3 }, /* (52) cmd ::= ALTER LOCAL NK_STRING */
+ { 309, -4 }, /* (53) cmd ::= ALTER LOCAL NK_STRING NK_STRING */
+ { 309, -5 }, /* (54) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */
+ { 309, -5 }, /* (55) cmd ::= DROP QNODE ON DNODE NK_INTEGER */
+ { 309, -5 }, /* (56) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */
+ { 309, -5 }, /* (57) cmd ::= DROP BNODE ON DNODE NK_INTEGER */
+ { 309, -5 }, /* (58) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */
+ { 309, -5 }, /* (59) cmd ::= DROP SNODE ON DNODE NK_INTEGER */
+ { 309, -5 }, /* (60) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */
+ { 309, -5 }, /* (61) cmd ::= DROP MNODE ON DNODE NK_INTEGER */
+ { 309, -5 }, /* (62) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */
+ { 309, -4 }, /* (63) cmd ::= DROP DATABASE exists_opt db_name */
+ { 309, -2 }, /* (64) cmd ::= USE db_name */
+ { 309, -4 }, /* (65) cmd ::= ALTER DATABASE db_name alter_db_options */
+ { 309, -3 }, /* (66) cmd ::= FLUSH DATABASE db_name */
+ { 309, -3 }, /* (67) cmd ::= TRIM DATABASE db_name */
+ { 322, -3 }, /* (68) not_exists_opt ::= IF NOT EXISTS */
+ { 322, 0 }, /* (69) not_exists_opt ::= */
+ { 324, -2 }, /* (70) exists_opt ::= IF EXISTS */
+ { 324, 0 }, /* (71) exists_opt ::= */
+ { 323, 0 }, /* (72) db_options ::= */
+ { 323, -3 }, /* (73) db_options ::= db_options BUFFER NK_INTEGER */
+ { 323, -3 }, /* (74) db_options ::= db_options CACHEMODEL NK_STRING */
+ { 323, -3 }, /* (75) db_options ::= db_options CACHESIZE NK_INTEGER */
+ { 323, -3 }, /* (76) db_options ::= db_options COMP NK_INTEGER */
+ { 323, -3 }, /* (77) db_options ::= db_options DURATION NK_INTEGER */
+ { 323, -3 }, /* (78) db_options ::= db_options DURATION NK_VARIABLE */
+ { 323, -3 }, /* (79) db_options ::= db_options MAXROWS NK_INTEGER */
+ { 323, -3 }, /* (80) db_options ::= db_options MINROWS NK_INTEGER */
+ { 323, -3 }, /* (81) db_options ::= db_options KEEP integer_list */
+ { 323, -3 }, /* (82) db_options ::= db_options KEEP variable_list */
+ { 323, -3 }, /* (83) db_options ::= db_options PAGES NK_INTEGER */
+ { 323, -3 }, /* (84) db_options ::= db_options PAGESIZE NK_INTEGER */
+ { 323, -3 }, /* (85) db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */
+ { 323, -3 }, /* (86) db_options ::= db_options PRECISION NK_STRING */
+ { 323, -3 }, /* (87) db_options ::= db_options REPLICA NK_INTEGER */
+ { 323, -3 }, /* (88) db_options ::= db_options STRICT NK_STRING */
+ { 323, -3 }, /* (89) db_options ::= db_options VGROUPS NK_INTEGER */
+ { 323, -3 }, /* (90) db_options ::= db_options SINGLE_STABLE NK_INTEGER */
+ { 323, -3 }, /* (91) db_options ::= db_options RETENTIONS retention_list */
+ { 323, -3 }, /* (92) db_options ::= db_options SCHEMALESS NK_INTEGER */
+ { 323, -3 }, /* (93) db_options ::= db_options WAL_LEVEL NK_INTEGER */
+ { 323, -3 }, /* (94) db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
+ { 323, -3 }, /* (95) db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
+ { 323, -4 }, /* (96) db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
+ { 323, -3 }, /* (97) db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
+ { 323, -4 }, /* (98) db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
+ { 323, -3 }, /* (99) db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
+ { 323, -3 }, /* (100) db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
+ { 323, -3 }, /* (101) db_options ::= db_options STT_TRIGGER NK_INTEGER */
+ { 323, -3 }, /* (102) db_options ::= db_options TABLE_PREFIX NK_INTEGER */
+ { 323, -3 }, /* (103) db_options ::= db_options TABLE_SUFFIX NK_INTEGER */
+ { 325, -1 }, /* (104) alter_db_options ::= alter_db_option */
+ { 325, -2 }, /* (105) alter_db_options ::= alter_db_options alter_db_option */
+ { 329, -2 }, /* (106) alter_db_option ::= CACHEMODEL NK_STRING */
+ { 329, -2 }, /* (107) alter_db_option ::= CACHESIZE NK_INTEGER */
+ { 329, -2 }, /* (108) alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
+ { 329, -2 }, /* (109) alter_db_option ::= KEEP integer_list */
+ { 329, -2 }, /* (110) alter_db_option ::= KEEP variable_list */
+ { 329, -2 }, /* (111) alter_db_option ::= WAL_LEVEL NK_INTEGER */
+ { 329, -2 }, /* (112) alter_db_option ::= STT_TRIGGER NK_INTEGER */
+ { 326, -1 }, /* (113) integer_list ::= NK_INTEGER */
+ { 326, -3 }, /* (114) integer_list ::= integer_list NK_COMMA NK_INTEGER */
+ { 327, -1 }, /* (115) variable_list ::= NK_VARIABLE */
+ { 327, -3 }, /* (116) variable_list ::= variable_list NK_COMMA NK_VARIABLE */
+ { 328, -1 }, /* (117) retention_list ::= retention */
+ { 328, -3 }, /* (118) retention_list ::= retention_list NK_COMMA retention */
+ { 330, -3 }, /* (119) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
+ { 309, -9 }, /* (120) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
+ { 309, -3 }, /* (121) cmd ::= CREATE TABLE multi_create_clause */
+ { 309, -9 }, /* (122) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */
+ { 309, -3 }, /* (123) cmd ::= DROP TABLE multi_drop_clause */
+ { 309, -4 }, /* (124) cmd ::= DROP STABLE exists_opt full_table_name */
+ { 309, -3 }, /* (125) cmd ::= ALTER TABLE alter_table_clause */
+ { 309, -3 }, /* (126) cmd ::= ALTER STABLE alter_table_clause */
+ { 338, -2 }, /* (127) alter_table_clause ::= full_table_name alter_table_options */
+ { 338, -5 }, /* (128) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
+ { 338, -4 }, /* (129) alter_table_clause ::= full_table_name DROP COLUMN column_name */
+ { 338, -5 }, /* (130) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
+ { 338, -5 }, /* (131) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
+ { 338, -5 }, /* (132) alter_table_clause ::= full_table_name ADD TAG column_name type_name */
+ { 338, -4 }, /* (133) alter_table_clause ::= full_table_name DROP TAG column_name */
+ { 338, -5 }, /* (134) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
+ { 338, -5 }, /* (135) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
+ { 338, -6 }, /* (136) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
+ { 335, -1 }, /* (137) multi_create_clause ::= create_subtable_clause */
+ { 335, -2 }, /* (138) multi_create_clause ::= multi_create_clause create_subtable_clause */
+ { 343, -10 }, /* (139) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
+ { 337, -1 }, /* (140) multi_drop_clause ::= drop_table_clause */
+ { 337, -2 }, /* (141) multi_drop_clause ::= multi_drop_clause drop_table_clause */
+ { 346, -2 }, /* (142) drop_table_clause ::= exists_opt full_table_name */
+ { 344, 0 }, /* (143) specific_cols_opt ::= */
+ { 344, -3 }, /* (144) specific_cols_opt ::= NK_LP col_name_list NK_RP */
+ { 331, -1 }, /* (145) full_table_name ::= table_name */
+ { 331, -3 }, /* (146) full_table_name ::= db_name NK_DOT table_name */
+ { 332, -1 }, /* (147) column_def_list ::= column_def */
+ { 332, -3 }, /* (148) column_def_list ::= column_def_list NK_COMMA column_def */
+ { 349, -2 }, /* (149) column_def ::= column_name type_name */
+ { 349, -4 }, /* (150) column_def ::= column_name type_name COMMENT NK_STRING */
+ { 341, -1 }, /* (151) type_name ::= BOOL */
+ { 341, -1 }, /* (152) type_name ::= TINYINT */
+ { 341, -1 }, /* (153) type_name ::= SMALLINT */
+ { 341, -1 }, /* (154) type_name ::= INT */
+ { 341, -1 }, /* (155) type_name ::= INTEGER */
+ { 341, -1 }, /* (156) type_name ::= BIGINT */
+ { 341, -1 }, /* (157) type_name ::= FLOAT */
+ { 341, -1 }, /* (158) type_name ::= DOUBLE */
+ { 341, -4 }, /* (159) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
+ { 341, -1 }, /* (160) type_name ::= TIMESTAMP */
+ { 341, -4 }, /* (161) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
+ { 341, -2 }, /* (162) type_name ::= TINYINT UNSIGNED */
+ { 341, -2 }, /* (163) type_name ::= SMALLINT UNSIGNED */
+ { 341, -2 }, /* (164) type_name ::= INT UNSIGNED */
+ { 341, -2 }, /* (165) type_name ::= BIGINT UNSIGNED */
+ { 341, -1 }, /* (166) type_name ::= JSON */
+ { 341, -4 }, /* (167) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
+ { 341, -1 }, /* (168) type_name ::= MEDIUMBLOB */
+ { 341, -1 }, /* (169) type_name ::= BLOB */
+ { 341, -4 }, /* (170) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
+ { 341, -1 }, /* (171) type_name ::= DECIMAL */
+ { 341, -4 }, /* (172) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
+ { 341, -6 }, /* (173) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
+ { 333, 0 }, /* (174) tags_def_opt ::= */
+ { 333, -1 }, /* (175) tags_def_opt ::= tags_def */
+ { 336, -4 }, /* (176) tags_def ::= TAGS NK_LP column_def_list NK_RP */
+ { 334, 0 }, /* (177) table_options ::= */
+ { 334, -3 }, /* (178) table_options ::= table_options COMMENT NK_STRING */
+ { 334, -3 }, /* (179) table_options ::= table_options MAX_DELAY duration_list */
+ { 334, -3 }, /* (180) table_options ::= table_options WATERMARK duration_list */
+ { 334, -5 }, /* (181) table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
+ { 334, -3 }, /* (182) table_options ::= table_options TTL NK_INTEGER */
+ { 334, -5 }, /* (183) table_options ::= table_options SMA NK_LP col_name_list NK_RP */
+ { 339, -1 }, /* (184) alter_table_options ::= alter_table_option */
+ { 339, -2 }, /* (185) alter_table_options ::= alter_table_options alter_table_option */
+ { 352, -2 }, /* (186) alter_table_option ::= COMMENT NK_STRING */
+ { 352, -2 }, /* (187) alter_table_option ::= TTL NK_INTEGER */
+ { 350, -1 }, /* (188) duration_list ::= duration_literal */
+ { 350, -3 }, /* (189) duration_list ::= duration_list NK_COMMA duration_literal */
+ { 351, -1 }, /* (190) rollup_func_list ::= rollup_func_name */
+ { 351, -3 }, /* (191) rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */
+ { 354, -1 }, /* (192) rollup_func_name ::= function_name */
+ { 354, -1 }, /* (193) rollup_func_name ::= FIRST */
+ { 354, -1 }, /* (194) rollup_func_name ::= LAST */
+ { 347, -1 }, /* (195) col_name_list ::= col_name */
+ { 347, -3 }, /* (196) col_name_list ::= col_name_list NK_COMMA col_name */
+ { 356, -1 }, /* (197) col_name ::= column_name */
+ { 309, -2 }, /* (198) cmd ::= SHOW DNODES */
+ { 309, -2 }, /* (199) cmd ::= SHOW USERS */
+ { 309, -2 }, /* (200) cmd ::= SHOW DATABASES */
+ { 309, -4 }, /* (201) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
+ { 309, -4 }, /* (202) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
+ { 309, -3 }, /* (203) cmd ::= SHOW db_name_cond_opt VGROUPS */
+ { 309, -2 }, /* (204) cmd ::= SHOW MNODES */
+ { 309, -2 }, /* (205) cmd ::= SHOW MODULES */
+ { 309, -2 }, /* (206) cmd ::= SHOW QNODES */
+ { 309, -2 }, /* (207) cmd ::= SHOW FUNCTIONS */
+ { 309, -5 }, /* (208) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
+ { 309, -2 }, /* (209) cmd ::= SHOW STREAMS */
+ { 309, -2 }, /* (210) cmd ::= SHOW ACCOUNTS */
+ { 309, -2 }, /* (211) cmd ::= SHOW APPS */
+ { 309, -2 }, /* (212) cmd ::= SHOW CONNECTIONS */
+ { 309, -2 }, /* (213) cmd ::= SHOW LICENCES */
+ { 309, -2 }, /* (214) cmd ::= SHOW GRANTS */
+ { 309, -4 }, /* (215) cmd ::= SHOW CREATE DATABASE db_name */
+ { 309, -4 }, /* (216) cmd ::= SHOW CREATE TABLE full_table_name */
+ { 309, -4 }, /* (217) cmd ::= SHOW CREATE STABLE full_table_name */
+ { 309, -2 }, /* (218) cmd ::= SHOW QUERIES */
+ { 309, -2 }, /* (219) cmd ::= SHOW SCORES */
+ { 309, -2 }, /* (220) cmd ::= SHOW TOPICS */
+ { 309, -2 }, /* (221) cmd ::= SHOW VARIABLES */
+ { 309, -3 }, /* (222) cmd ::= SHOW LOCAL VARIABLES */
+ { 309, -4 }, /* (223) cmd ::= SHOW DNODE NK_INTEGER VARIABLES */
+ { 309, -2 }, /* (224) cmd ::= SHOW BNODES */
+ { 309, -2 }, /* (225) cmd ::= SHOW SNODES */
+ { 309, -2 }, /* (226) cmd ::= SHOW CLUSTER */
+ { 309, -2 }, /* (227) cmd ::= SHOW TRANSACTIONS */
+ { 309, -4 }, /* (228) cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
+ { 309, -2 }, /* (229) cmd ::= SHOW CONSUMERS */
+ { 309, -2 }, /* (230) cmd ::= SHOW SUBSCRIPTIONS */
+ { 309, -5 }, /* (231) cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
+ { 309, -3 }, /* (232) cmd ::= SHOW VNODES NK_INTEGER */
+ { 309, -3 }, /* (233) cmd ::= SHOW VNODES NK_STRING */
+ { 357, 0 }, /* (234) db_name_cond_opt ::= */
+ { 357, -2 }, /* (235) db_name_cond_opt ::= db_name NK_DOT */
+ { 358, 0 }, /* (236) like_pattern_opt ::= */
+ { 358, -2 }, /* (237) like_pattern_opt ::= LIKE NK_STRING */
+ { 359, -1 }, /* (238) table_name_cond ::= table_name */
+ { 360, 0 }, /* (239) from_db_opt ::= */
+ { 360, -2 }, /* (240) from_db_opt ::= FROM db_name */
+ { 309, -8 }, /* (241) cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
+ { 309, -4 }, /* (242) cmd ::= DROP INDEX exists_opt full_table_name */
+ { 361, -10 }, /* (243) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
+ { 361, -12 }, /* (244) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
+ { 362, -1 }, /* (245) func_list ::= func */
+ { 362, -3 }, /* (246) func_list ::= func_list NK_COMMA func */
+ { 365, -4 }, /* (247) func ::= function_name NK_LP expression_list NK_RP */
+ { 364, 0 }, /* (248) sma_stream_opt ::= */
+ { 364, -3 }, /* (249) sma_stream_opt ::= stream_options WATERMARK duration_literal */
+ { 364, -3 }, /* (250) sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
+ { 309, -6 }, /* (251) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */
+ { 309, -7 }, /* (252) cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
+ { 309, -9 }, /* (253) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
+ { 309, -7 }, /* (254) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
+ { 309, -9 }, /* (255) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
+ { 309, -4 }, /* (256) cmd ::= DROP TOPIC exists_opt topic_name */
+ { 309, -7 }, /* (257) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
+ { 309, -2 }, /* (258) cmd ::= DESC full_table_name */
+ { 309, -2 }, /* (259) cmd ::= DESCRIBE full_table_name */
+ { 309, -3 }, /* (260) cmd ::= RESET QUERY CACHE */
+ { 309, -4 }, /* (261) cmd ::= EXPLAIN analyze_opt explain_options query_expression */
+ { 370, 0 }, /* (262) analyze_opt ::= */
+ { 370, -1 }, /* (263) analyze_opt ::= ANALYZE */
+ { 371, 0 }, /* (264) explain_options ::= */
+ { 371, -3 }, /* (265) explain_options ::= explain_options VERBOSE NK_BOOL */
+ { 371, -3 }, /* (266) explain_options ::= explain_options RATIO NK_FLOAT */
+ { 309, -10 }, /* (267) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
+ { 309, -4 }, /* (268) cmd ::= DROP FUNCTION exists_opt function_name */
+ { 372, 0 }, /* (269) agg_func_opt ::= */
+ { 372, -1 }, /* (270) agg_func_opt ::= AGGREGATE */
+ { 373, 0 }, /* (271) bufsize_opt ::= */
+ { 373, -2 }, /* (272) bufsize_opt ::= BUFSIZE NK_INTEGER */
+ { 309, -9 }, /* (273) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression */
+ { 309, -4 }, /* (274) cmd ::= DROP STREAM exists_opt stream_name */
+ { 366, 0 }, /* (275) stream_options ::= */
+ { 366, -3 }, /* (276) stream_options ::= stream_options TRIGGER AT_ONCE */
+ { 366, -3 }, /* (277) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
+ { 366, -4 }, /* (278) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
+ { 366, -3 }, /* (279) stream_options ::= stream_options WATERMARK duration_literal */
+ { 366, -4 }, /* (280) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
+ { 309, -3 }, /* (281) cmd ::= KILL CONNECTION NK_INTEGER */
+ { 309, -3 }, /* (282) cmd ::= KILL QUERY NK_STRING */
+ { 309, -3 }, /* (283) cmd ::= KILL TRANSACTION NK_INTEGER */
+ { 309, -2 }, /* (284) cmd ::= BALANCE VGROUP */
+ { 309, -4 }, /* (285) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
+ { 309, -4 }, /* (286) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
+ { 309, -3 }, /* (287) cmd ::= SPLIT VGROUP NK_INTEGER */
+ { 375, -2 }, /* (288) dnode_list ::= DNODE NK_INTEGER */
+ { 375, -3 }, /* (289) dnode_list ::= dnode_list DNODE NK_INTEGER */
+ { 309, -4 }, /* (290) cmd ::= DELETE FROM full_table_name where_clause_opt */
+ { 309, -1 }, /* (291) cmd ::= query_expression */
+ { 309, -7 }, /* (292) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
+ { 309, -4 }, /* (293) cmd ::= INSERT INTO full_table_name query_expression */
+ { 312, -1 }, /* (294) literal ::= NK_INTEGER */
+ { 312, -1 }, /* (295) literal ::= NK_FLOAT */
+ { 312, -1 }, /* (296) literal ::= NK_STRING */
+ { 312, -1 }, /* (297) literal ::= NK_BOOL */
+ { 312, -2 }, /* (298) literal ::= TIMESTAMP NK_STRING */
+ { 312, -1 }, /* (299) literal ::= duration_literal */
+ { 312, -1 }, /* (300) literal ::= NULL */
+ { 312, -1 }, /* (301) literal ::= NK_QUESTION */
+ { 353, -1 }, /* (302) duration_literal ::= NK_VARIABLE */
+ { 377, -1 }, /* (303) signed ::= NK_INTEGER */
+ { 377, -2 }, /* (304) signed ::= NK_PLUS NK_INTEGER */
+ { 377, -2 }, /* (305) signed ::= NK_MINUS NK_INTEGER */
+ { 377, -1 }, /* (306) signed ::= NK_FLOAT */
+ { 377, -2 }, /* (307) signed ::= NK_PLUS NK_FLOAT */
+ { 377, -2 }, /* (308) signed ::= NK_MINUS NK_FLOAT */
+ { 342, -1 }, /* (309) signed_literal ::= signed */
+ { 342, -1 }, /* (310) signed_literal ::= NK_STRING */
+ { 342, -1 }, /* (311) signed_literal ::= NK_BOOL */
+ { 342, -2 }, /* (312) signed_literal ::= TIMESTAMP NK_STRING */
+ { 342, -1 }, /* (313) signed_literal ::= duration_literal */
+ { 342, -1 }, /* (314) signed_literal ::= NULL */
+ { 342, -1 }, /* (315) signed_literal ::= literal_func */
+ { 342, -1 }, /* (316) signed_literal ::= NK_QUESTION */
+ { 379, -1 }, /* (317) literal_list ::= signed_literal */
+ { 379, -3 }, /* (318) literal_list ::= literal_list NK_COMMA signed_literal */
+ { 320, -1 }, /* (319) db_name ::= NK_ID */
+ { 348, -1 }, /* (320) table_name ::= NK_ID */
+ { 340, -1 }, /* (321) column_name ::= NK_ID */
+ { 355, -1 }, /* (322) function_name ::= NK_ID */
+ { 380, -1 }, /* (323) table_alias ::= NK_ID */
+ { 381, -1 }, /* (324) column_alias ::= NK_ID */
+ { 314, -1 }, /* (325) user_name ::= NK_ID */
+ { 367, -1 }, /* (326) topic_name ::= NK_ID */
+ { 374, -1 }, /* (327) stream_name ::= NK_ID */
+ { 369, -1 }, /* (328) cgroup_name ::= NK_ID */
+ { 382, -1 }, /* (329) expression ::= literal */
+ { 382, -1 }, /* (330) expression ::= pseudo_column */
+ { 382, -1 }, /* (331) expression ::= column_reference */
+ { 382, -1 }, /* (332) expression ::= function_expression */
+ { 382, -1 }, /* (333) expression ::= subquery */
+ { 382, -3 }, /* (334) expression ::= NK_LP expression NK_RP */
+ { 382, -2 }, /* (335) expression ::= NK_PLUS expression */
+ { 382, -2 }, /* (336) expression ::= NK_MINUS expression */
+ { 382, -3 }, /* (337) expression ::= expression NK_PLUS expression */
+ { 382, -3 }, /* (338) expression ::= expression NK_MINUS expression */
+ { 382, -3 }, /* (339) expression ::= expression NK_STAR expression */
+ { 382, -3 }, /* (340) expression ::= expression NK_SLASH expression */
+ { 382, -3 }, /* (341) expression ::= expression NK_REM expression */
+ { 382, -3 }, /* (342) expression ::= column_reference NK_ARROW NK_STRING */
+ { 382, -3 }, /* (343) expression ::= expression NK_BITAND expression */
+ { 382, -3 }, /* (344) expression ::= expression NK_BITOR expression */
+ { 345, -1 }, /* (345) expression_list ::= expression */
+ { 345, -3 }, /* (346) expression_list ::= expression_list NK_COMMA expression */
+ { 384, -1 }, /* (347) column_reference ::= column_name */
+ { 384, -3 }, /* (348) column_reference ::= table_name NK_DOT column_name */
+ { 383, -1 }, /* (349) pseudo_column ::= ROWTS */
+ { 383, -1 }, /* (350) pseudo_column ::= TBNAME */
+ { 383, -3 }, /* (351) pseudo_column ::= table_name NK_DOT TBNAME */
+ { 383, -1 }, /* (352) pseudo_column ::= QSTART */
+ { 383, -1 }, /* (353) pseudo_column ::= QEND */
+ { 383, -1 }, /* (354) pseudo_column ::= QDURATION */
+ { 383, -1 }, /* (355) pseudo_column ::= WSTART */
+ { 383, -1 }, /* (356) pseudo_column ::= WEND */
+ { 383, -1 }, /* (357) pseudo_column ::= WDURATION */
+ { 385, -4 }, /* (358) function_expression ::= function_name NK_LP expression_list NK_RP */
+ { 385, -4 }, /* (359) function_expression ::= star_func NK_LP star_func_para_list NK_RP */
+ { 385, -6 }, /* (360) function_expression ::= CAST NK_LP expression AS type_name NK_RP */
+ { 385, -1 }, /* (361) function_expression ::= literal_func */
+ { 378, -3 }, /* (362) literal_func ::= noarg_func NK_LP NK_RP */
+ { 378, -1 }, /* (363) literal_func ::= NOW */
+ { 389, -1 }, /* (364) noarg_func ::= NOW */
+ { 389, -1 }, /* (365) noarg_func ::= TODAY */
+ { 389, -1 }, /* (366) noarg_func ::= TIMEZONE */
+ { 389, -1 }, /* (367) noarg_func ::= DATABASE */
+ { 389, -1 }, /* (368) noarg_func ::= CLIENT_VERSION */
+ { 389, -1 }, /* (369) noarg_func ::= SERVER_VERSION */
+ { 389, -1 }, /* (370) noarg_func ::= SERVER_STATUS */
+ { 389, -1 }, /* (371) noarg_func ::= CURRENT_USER */
+ { 389, -1 }, /* (372) noarg_func ::= USER */
+ { 387, -1 }, /* (373) star_func ::= COUNT */
+ { 387, -1 }, /* (374) star_func ::= FIRST */
+ { 387, -1 }, /* (375) star_func ::= LAST */
+ { 387, -1 }, /* (376) star_func ::= LAST_ROW */
+ { 388, -1 }, /* (377) star_func_para_list ::= NK_STAR */
+ { 388, -1 }, /* (378) star_func_para_list ::= other_para_list */
+ { 390, -1 }, /* (379) other_para_list ::= star_func_para */
+ { 390, -3 }, /* (380) other_para_list ::= other_para_list NK_COMMA star_func_para */
+ { 391, -1 }, /* (381) star_func_para ::= expression */
+ { 391, -3 }, /* (382) star_func_para ::= table_name NK_DOT NK_STAR */
+ { 392, -3 }, /* (383) predicate ::= expression compare_op expression */
+ { 392, -5 }, /* (384) predicate ::= expression BETWEEN expression AND expression */
+ { 392, -6 }, /* (385) predicate ::= expression NOT BETWEEN expression AND expression */
+ { 392, -3 }, /* (386) predicate ::= expression IS NULL */
+ { 392, -4 }, /* (387) predicate ::= expression IS NOT NULL */
+ { 392, -3 }, /* (388) predicate ::= expression in_op in_predicate_value */
+ { 393, -1 }, /* (389) compare_op ::= NK_LT */
+ { 393, -1 }, /* (390) compare_op ::= NK_GT */
+ { 393, -1 }, /* (391) compare_op ::= NK_LE */
+ { 393, -1 }, /* (392) compare_op ::= NK_GE */
+ { 393, -1 }, /* (393) compare_op ::= NK_NE */
+ { 393, -1 }, /* (394) compare_op ::= NK_EQ */
+ { 393, -1 }, /* (395) compare_op ::= LIKE */
+ { 393, -2 }, /* (396) compare_op ::= NOT LIKE */
+ { 393, -1 }, /* (397) compare_op ::= MATCH */
+ { 393, -1 }, /* (398) compare_op ::= NMATCH */
+ { 393, -1 }, /* (399) compare_op ::= CONTAINS */
+ { 394, -1 }, /* (400) in_op ::= IN */
+ { 394, -2 }, /* (401) in_op ::= NOT IN */
+ { 395, -3 }, /* (402) in_predicate_value ::= NK_LP literal_list NK_RP */
+ { 396, -1 }, /* (403) boolean_value_expression ::= boolean_primary */
+ { 396, -2 }, /* (404) boolean_value_expression ::= NOT boolean_primary */
+ { 396, -3 }, /* (405) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
+ { 396, -3 }, /* (406) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
+ { 397, -1 }, /* (407) boolean_primary ::= predicate */
+ { 397, -3 }, /* (408) boolean_primary ::= NK_LP boolean_value_expression NK_RP */
+ { 398, -1 }, /* (409) common_expression ::= expression */
+ { 398, -1 }, /* (410) common_expression ::= boolean_value_expression */
+ { 399, 0 }, /* (411) from_clause_opt ::= */
+ { 399, -2 }, /* (412) from_clause_opt ::= FROM table_reference_list */
+ { 400, -1 }, /* (413) table_reference_list ::= table_reference */
+ { 400, -3 }, /* (414) table_reference_list ::= table_reference_list NK_COMMA table_reference */
+ { 401, -1 }, /* (415) table_reference ::= table_primary */
+ { 401, -1 }, /* (416) table_reference ::= joined_table */
+ { 402, -2 }, /* (417) table_primary ::= table_name alias_opt */
+ { 402, -4 }, /* (418) table_primary ::= db_name NK_DOT table_name alias_opt */
+ { 402, -2 }, /* (419) table_primary ::= subquery alias_opt */
+ { 402, -1 }, /* (420) table_primary ::= parenthesized_joined_table */
+ { 404, 0 }, /* (421) alias_opt ::= */
+ { 404, -1 }, /* (422) alias_opt ::= table_alias */
+ { 404, -2 }, /* (423) alias_opt ::= AS table_alias */
+ { 405, -3 }, /* (424) parenthesized_joined_table ::= NK_LP joined_table NK_RP */
+ { 405, -3 }, /* (425) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */
+ { 403, -6 }, /* (426) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
+ { 406, 0 }, /* (427) join_type ::= */
+ { 406, -1 }, /* (428) join_type ::= INNER */
+ { 408, -12 }, /* (429) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
+ { 409, 0 }, /* (430) set_quantifier_opt ::= */
+ { 409, -1 }, /* (431) set_quantifier_opt ::= DISTINCT */
+ { 409, -1 }, /* (432) set_quantifier_opt ::= ALL */
+ { 410, -1 }, /* (433) select_list ::= select_item */
+ { 410, -3 }, /* (434) select_list ::= select_list NK_COMMA select_item */
+ { 418, -1 }, /* (435) select_item ::= NK_STAR */
+ { 418, -1 }, /* (436) select_item ::= common_expression */
+ { 418, -2 }, /* (437) select_item ::= common_expression column_alias */
+ { 418, -3 }, /* (438) select_item ::= common_expression AS column_alias */
+ { 418, -3 }, /* (439) select_item ::= table_name NK_DOT NK_STAR */
+ { 376, 0 }, /* (440) where_clause_opt ::= */
+ { 376, -2 }, /* (441) where_clause_opt ::= WHERE search_condition */
+ { 411, 0 }, /* (442) partition_by_clause_opt ::= */
+ { 411, -3 }, /* (443) partition_by_clause_opt ::= PARTITION BY expression_list */
+ { 415, 0 }, /* (444) twindow_clause_opt ::= */
+ { 415, -6 }, /* (445) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
+ { 415, -4 }, /* (446) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
+ { 415, -6 }, /* (447) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
+ { 415, -8 }, /* (448) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
+ { 363, 0 }, /* (449) sliding_opt ::= */
+ { 363, -4 }, /* (450) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
+ { 414, 0 }, /* (451) fill_opt ::= */
+ { 414, -4 }, /* (452) fill_opt ::= FILL NK_LP fill_mode NK_RP */
+ { 414, -6 }, /* (453) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
+ { 419, -1 }, /* (454) fill_mode ::= NONE */
+ { 419, -1 }, /* (455) fill_mode ::= PREV */
+ { 419, -1 }, /* (456) fill_mode ::= NULL */
+ { 419, -1 }, /* (457) fill_mode ::= LINEAR */
+ { 419, -1 }, /* (458) fill_mode ::= NEXT */
+ { 416, 0 }, /* (459) group_by_clause_opt ::= */
+ { 416, -3 }, /* (460) group_by_clause_opt ::= GROUP BY group_by_list */
+ { 420, -1 }, /* (461) group_by_list ::= expression */
+ { 420, -3 }, /* (462) group_by_list ::= group_by_list NK_COMMA expression */
+ { 417, 0 }, /* (463) having_clause_opt ::= */
+ { 417, -2 }, /* (464) having_clause_opt ::= HAVING search_condition */
+ { 412, 0 }, /* (465) range_opt ::= */
+ { 412, -6 }, /* (466) range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
+ { 413, 0 }, /* (467) every_opt ::= */
+ { 413, -4 }, /* (468) every_opt ::= EVERY NK_LP duration_literal NK_RP */
+ { 368, -4 }, /* (469) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ { 421, -1 }, /* (470) query_expression_body ::= query_primary */
+ { 421, -4 }, /* (471) query_expression_body ::= query_expression_body UNION ALL query_expression_body */
+ { 421, -3 }, /* (472) query_expression_body ::= query_expression_body UNION query_expression_body */
+ { 425, -1 }, /* (473) query_primary ::= query_specification */
+ { 425, -6 }, /* (474) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
+ { 422, 0 }, /* (475) order_by_clause_opt ::= */
+ { 422, -3 }, /* (476) order_by_clause_opt ::= ORDER BY sort_specification_list */
+ { 423, 0 }, /* (477) slimit_clause_opt ::= */
+ { 423, -2 }, /* (478) slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ { 423, -4 }, /* (479) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ { 423, -4 }, /* (480) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ { 424, 0 }, /* (481) limit_clause_opt ::= */
+ { 424, -2 }, /* (482) limit_clause_opt ::= LIMIT NK_INTEGER */
+ { 424, -4 }, /* (483) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
+ { 424, -4 }, /* (484) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ { 386, -3 }, /* (485) subquery ::= NK_LP query_expression NK_RP */
+ { 407, -1 }, /* (486) search_condition ::= common_expression */
+ { 426, -1 }, /* (487) sort_specification_list ::= sort_specification */
+ { 426, -3 }, /* (488) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
+ { 427, -3 }, /* (489) sort_specification ::= expression ordering_specification_opt null_ordering_opt */
+ { 428, 0 }, /* (490) ordering_specification_opt ::= */
+ { 428, -1 }, /* (491) ordering_specification_opt ::= ASC */
+ { 428, -1 }, /* (492) ordering_specification_opt ::= DESC */
+ { 429, 0 }, /* (493) null_ordering_opt ::= */
+ { 429, -2 }, /* (494) null_ordering_opt ::= NULLS FIRST */
+ { 429, -2 }, /* (495) null_ordering_opt ::= NULLS LAST */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -3417,11 +3429,11 @@ static YYACTIONTYPE yy_reduce(
YYMINORTYPE yylhsminor;
case 0: /* cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
- yy_destructor(yypParser,306,&yymsp[0].minor);
+ yy_destructor(yypParser,310,&yymsp[0].minor);
break;
case 1: /* cmd ::= ALTER ACCOUNT NK_ID alter_account_options */
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
- yy_destructor(yypParser,307,&yymsp[0].minor);
+ yy_destructor(yypParser,311,&yymsp[0].minor);
break;
case 2: /* account_options ::= */
{ }
@@ -3435,20 +3447,20 @@ static YYACTIONTYPE yy_reduce(
case 9: /* account_options ::= account_options USERS literal */ yytestcase(yyruleno==9);
case 10: /* account_options ::= account_options CONNS literal */ yytestcase(yyruleno==10);
case 11: /* account_options ::= account_options STATE literal */ yytestcase(yyruleno==11);
-{ yy_destructor(yypParser,306,&yymsp[-2].minor);
+{ yy_destructor(yypParser,310,&yymsp[-2].minor);
{ }
- yy_destructor(yypParser,308,&yymsp[0].minor);
+ yy_destructor(yypParser,312,&yymsp[0].minor);
}
break;
case 12: /* alter_account_options ::= alter_account_option */
-{ yy_destructor(yypParser,309,&yymsp[0].minor);
+{ yy_destructor(yypParser,313,&yymsp[0].minor);
{ }
}
break;
case 13: /* alter_account_options ::= alter_account_options alter_account_option */
-{ yy_destructor(yypParser,307,&yymsp[-1].minor);
+{ yy_destructor(yypParser,311,&yymsp[-1].minor);
{ }
- yy_destructor(yypParser,309,&yymsp[0].minor);
+ yy_destructor(yypParser,313,&yymsp[0].minor);
}
break;
case 14: /* alter_account_option ::= PASS literal */
@@ -3462,72 +3474,72 @@ static YYACTIONTYPE yy_reduce(
case 22: /* alter_account_option ::= CONNS literal */ yytestcase(yyruleno==22);
case 23: /* alter_account_option ::= STATE literal */ yytestcase(yyruleno==23);
{ }
- yy_destructor(yypParser,308,&yymsp[0].minor);
+ yy_destructor(yypParser,312,&yymsp[0].minor);
break;
case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */
-{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy617, &yymsp[-1].minor.yy0, yymsp[0].minor.yy215); }
+{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy149, &yymsp[-1].minor.yy0, yymsp[0].minor.yy363); }
break;
case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy149, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); }
break;
case 26: /* cmd ::= ALTER USER user_name ENABLE NK_INTEGER */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy149, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); }
break;
case 27: /* cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy617, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy149, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); }
break;
case 28: /* cmd ::= DROP USER user_name */
-{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy149); }
break;
case 29: /* sysinfo_opt ::= */
-{ yymsp[1].minor.yy215 = 1; }
+{ yymsp[1].minor.yy363 = 1; }
break;
case 30: /* sysinfo_opt ::= SYSINFO NK_INTEGER */
-{ yymsp[-1].minor.yy215 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); }
+{ yymsp[-1].minor.yy363 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); }
break;
case 31: /* cmd ::= GRANT privileges ON priv_level TO user_name */
-{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy473, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy49, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy149); }
break;
case 32: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */
-{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy473, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy49, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy149); }
break;
case 33: /* privileges ::= ALL */
-{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_ALL; }
+{ yymsp[0].minor.yy49 = PRIVILEGE_TYPE_ALL; }
break;
case 34: /* privileges ::= priv_type_list */
case 35: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==35);
-{ yylhsminor.yy473 = yymsp[0].minor.yy473; }
- yymsp[0].minor.yy473 = yylhsminor.yy473;
+{ yylhsminor.yy49 = yymsp[0].minor.yy49; }
+ yymsp[0].minor.yy49 = yylhsminor.yy49;
break;
case 36: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */
-{ yylhsminor.yy473 = yymsp[-2].minor.yy473 | yymsp[0].minor.yy473; }
- yymsp[-2].minor.yy473 = yylhsminor.yy473;
+{ yylhsminor.yy49 = yymsp[-2].minor.yy49 | yymsp[0].minor.yy49; }
+ yymsp[-2].minor.yy49 = yylhsminor.yy49;
break;
case 37: /* priv_type ::= READ */
-{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_READ; }
+{ yymsp[0].minor.yy49 = PRIVILEGE_TYPE_READ; }
break;
case 38: /* priv_type ::= WRITE */
-{ yymsp[0].minor.yy473 = PRIVILEGE_TYPE_WRITE; }
+{ yymsp[0].minor.yy49 = PRIVILEGE_TYPE_WRITE; }
break;
case 39: /* priv_level ::= NK_STAR NK_DOT NK_STAR */
-{ yylhsminor.yy617 = yymsp[-2].minor.yy0; }
- yymsp[-2].minor.yy617 = yylhsminor.yy617;
+{ yylhsminor.yy149 = yymsp[-2].minor.yy0; }
+ yymsp[-2].minor.yy149 = yylhsminor.yy149;
break;
case 40: /* priv_level ::= db_name NK_DOT NK_STAR */
-{ yylhsminor.yy617 = yymsp[-2].minor.yy617; }
- yymsp[-2].minor.yy617 = yylhsminor.yy617;
+{ yylhsminor.yy149 = yymsp[-2].minor.yy149; }
+ yymsp[-2].minor.yy149 = yylhsminor.yy149;
break;
case 41: /* cmd ::= CREATE DNODE dnode_endpoint */
-{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy617, NULL); }
+{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy149, NULL); }
break;
case 42: /* cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */
-{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy0); }
break;
case 43: /* cmd ::= DROP DNODE NK_INTEGER */
{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy0); }
break;
case 44: /* cmd ::= DROP DNODE dnode_endpoint */
-{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy149); }
break;
case 45: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */
{ pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); }
@@ -3544,31 +3556,31 @@ static YYACTIONTYPE yy_reduce(
case 49: /* dnode_endpoint ::= NK_STRING */
case 50: /* dnode_endpoint ::= NK_ID */ yytestcase(yyruleno==50);
case 51: /* dnode_endpoint ::= NK_IPTOKEN */ yytestcase(yyruleno==51);
- case 314: /* db_name ::= NK_ID */ yytestcase(yyruleno==314);
- case 315: /* table_name ::= NK_ID */ yytestcase(yyruleno==315);
- case 316: /* column_name ::= NK_ID */ yytestcase(yyruleno==316);
- case 317: /* function_name ::= NK_ID */ yytestcase(yyruleno==317);
- case 318: /* table_alias ::= NK_ID */ yytestcase(yyruleno==318);
- case 319: /* column_alias ::= NK_ID */ yytestcase(yyruleno==319);
- case 320: /* user_name ::= NK_ID */ yytestcase(yyruleno==320);
- case 321: /* topic_name ::= NK_ID */ yytestcase(yyruleno==321);
- case 322: /* stream_name ::= NK_ID */ yytestcase(yyruleno==322);
- case 323: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==323);
- case 359: /* noarg_func ::= NOW */ yytestcase(yyruleno==359);
- case 360: /* noarg_func ::= TODAY */ yytestcase(yyruleno==360);
- case 361: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==361);
- case 362: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==362);
- case 363: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==363);
- case 364: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==364);
- case 365: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==365);
- case 366: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==366);
- case 367: /* noarg_func ::= USER */ yytestcase(yyruleno==367);
- case 368: /* star_func ::= COUNT */ yytestcase(yyruleno==368);
- case 369: /* star_func ::= FIRST */ yytestcase(yyruleno==369);
- case 370: /* star_func ::= LAST */ yytestcase(yyruleno==370);
- case 371: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==371);
-{ yylhsminor.yy617 = yymsp[0].minor.yy0; }
- yymsp[0].minor.yy617 = yylhsminor.yy617;
+ case 319: /* db_name ::= NK_ID */ yytestcase(yyruleno==319);
+ case 320: /* table_name ::= NK_ID */ yytestcase(yyruleno==320);
+ case 321: /* column_name ::= NK_ID */ yytestcase(yyruleno==321);
+ case 322: /* function_name ::= NK_ID */ yytestcase(yyruleno==322);
+ case 323: /* table_alias ::= NK_ID */ yytestcase(yyruleno==323);
+ case 324: /* column_alias ::= NK_ID */ yytestcase(yyruleno==324);
+ case 325: /* user_name ::= NK_ID */ yytestcase(yyruleno==325);
+ case 326: /* topic_name ::= NK_ID */ yytestcase(yyruleno==326);
+ case 327: /* stream_name ::= NK_ID */ yytestcase(yyruleno==327);
+ case 328: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==328);
+ case 364: /* noarg_func ::= NOW */ yytestcase(yyruleno==364);
+ case 365: /* noarg_func ::= TODAY */ yytestcase(yyruleno==365);
+ case 366: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==366);
+ case 367: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==367);
+ case 368: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==368);
+ case 369: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==369);
+ case 370: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==370);
+ case 371: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==371);
+ case 372: /* noarg_func ::= USER */ yytestcase(yyruleno==372);
+ case 373: /* star_func ::= COUNT */ yytestcase(yyruleno==373);
+ case 374: /* star_func ::= FIRST */ yytestcase(yyruleno==374);
+ case 375: /* star_func ::= LAST */ yytestcase(yyruleno==375);
+ case 376: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==376);
+{ yylhsminor.yy149 = yymsp[0].minor.yy0; }
+ yymsp[0].minor.yy149 = yylhsminor.yy149;
break;
case 52: /* cmd ::= ALTER LOCAL NK_STRING */
{ pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); }
@@ -3601,1263 +3613,1286 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); }
break;
case 62: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */
-{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy313, &yymsp[-1].minor.yy617, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy497, &yymsp[-1].minor.yy149, yymsp[0].minor.yy312); }
break;
case 63: /* cmd ::= DROP DATABASE exists_opt db_name */
-{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy497, &yymsp[0].minor.yy149); }
break;
case 64: /* cmd ::= USE db_name */
-{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy149); }
break;
case 65: /* cmd ::= ALTER DATABASE db_name alter_db_options */
-{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy617, yymsp[0].minor.yy840); }
+{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy149, yymsp[0].minor.yy312); }
break;
case 66: /* cmd ::= FLUSH DATABASE db_name */
-{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy149); }
break;
case 67: /* cmd ::= TRIM DATABASE db_name */
-{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[0].minor.yy617); }
+{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[0].minor.yy149); }
break;
case 68: /* not_exists_opt ::= IF NOT EXISTS */
-{ yymsp[-2].minor.yy313 = true; }
+{ yymsp[-2].minor.yy497 = true; }
break;
case 69: /* not_exists_opt ::= */
case 71: /* exists_opt ::= */ yytestcase(yyruleno==71);
- case 255: /* analyze_opt ::= */ yytestcase(yyruleno==255);
- case 262: /* agg_func_opt ::= */ yytestcase(yyruleno==262);
- case 425: /* set_quantifier_opt ::= */ yytestcase(yyruleno==425);
-{ yymsp[1].minor.yy313 = false; }
+ case 262: /* analyze_opt ::= */ yytestcase(yyruleno==262);
+ case 269: /* agg_func_opt ::= */ yytestcase(yyruleno==269);
+ case 430: /* set_quantifier_opt ::= */ yytestcase(yyruleno==430);
+{ yymsp[1].minor.yy497 = false; }
break;
case 70: /* exists_opt ::= IF EXISTS */
-{ yymsp[-1].minor.yy313 = true; }
+{ yymsp[-1].minor.yy497 = true; }
break;
case 72: /* db_options ::= */
-{ yymsp[1].minor.yy840 = createDefaultDatabaseOptions(pCxt); }
+{ yymsp[1].minor.yy312 = createDefaultDatabaseOptions(pCxt); }
break;
case 73: /* db_options ::= db_options BUFFER NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 74: /* db_options ::= db_options CACHEMODEL NK_STRING */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 75: /* db_options ::= db_options CACHESIZE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 76: /* db_options ::= db_options COMP NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 77: /* db_options ::= db_options DURATION NK_INTEGER */
case 78: /* db_options ::= db_options DURATION NK_VARIABLE */ yytestcase(yyruleno==78);
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 79: /* db_options ::= db_options MAXROWS NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 80: /* db_options ::= db_options MINROWS NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 81: /* db_options ::= db_options KEEP integer_list */
case 82: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==82);
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_KEEP, yymsp[0].minor.yy544); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_KEEP, yymsp[0].minor.yy824); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 83: /* db_options ::= db_options PAGES NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PAGES, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_PAGES, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
case 84: /* db_options ::= db_options PAGESIZE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 85: /* db_options ::= db_options PRECISION NK_STRING */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 85: /* db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_TSDB_PAGESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 86: /* db_options ::= db_options REPLICA NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 86: /* db_options ::= db_options PRECISION NK_STRING */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 87: /* db_options ::= db_options STRICT NK_STRING */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_STRICT, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 87: /* db_options ::= db_options REPLICA NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 88: /* db_options ::= db_options VGROUPS NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 88: /* db_options ::= db_options STRICT NK_STRING */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_STRICT, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 89: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 89: /* db_options ::= db_options VGROUPS NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 90: /* db_options ::= db_options RETENTIONS retention_list */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_RETENTIONS, yymsp[0].minor.yy544); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 90: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 91: /* db_options ::= db_options SCHEMALESS NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 91: /* db_options ::= db_options RETENTIONS retention_list */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_RETENTIONS, yymsp[0].minor.yy824); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 92: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 92: /* db_options ::= db_options SCHEMALESS NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 93: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 93: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_WAL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 94: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 94: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 95: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
+ case 95: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 96: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-3].minor.yy840, DB_OPTION_WAL_RETENTION_PERIOD, &t);
+ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-3].minor.yy312, DB_OPTION_WAL_RETENTION_PERIOD, &t);
}
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 96: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 97: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 97: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
+ case 98: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-3].minor.yy840, DB_OPTION_WAL_RETENTION_SIZE, &t);
+ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-3].minor.yy312, DB_OPTION_WAL_RETENTION_SIZE, &t);
}
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
- break;
- case 98: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 99: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
-{ yylhsminor.yy840 = setDatabaseOption(pCxt, yymsp[-2].minor.yy840, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 100: /* alter_db_options ::= alter_db_option */
-{ yylhsminor.yy840 = createAlterDatabaseOptions(pCxt); yylhsminor.yy840 = setAlterDatabaseOption(pCxt, yylhsminor.yy840, &yymsp[0].minor.yy95); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 101: /* alter_db_options ::= alter_db_options alter_db_option */
-{ yylhsminor.yy840 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy840, &yymsp[0].minor.yy95); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
- break;
- case 102: /* alter_db_option ::= CACHEMODEL NK_STRING */
-{ yymsp[-1].minor.yy95.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
- break;
- case 103: /* alter_db_option ::= CACHESIZE NK_INTEGER */
-{ yymsp[-1].minor.yy95.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
- break;
- case 104: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
-{ yymsp[-1].minor.yy95.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
- break;
- case 105: /* alter_db_option ::= KEEP integer_list */
- case 106: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==106);
-{ yymsp[-1].minor.yy95.type = DB_OPTION_KEEP; yymsp[-1].minor.yy95.pList = yymsp[0].minor.yy544; }
- break;
- case 107: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */
-{ yymsp[-1].minor.yy95.type = DB_OPTION_WAL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
- break;
- case 108: /* integer_list ::= NK_INTEGER */
-{ yylhsminor.yy544 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
- break;
- case 109: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */
- case 284: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==284);
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
- break;
- case 110: /* variable_list ::= NK_VARIABLE */
-{ yylhsminor.yy544 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
- break;
- case 111: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
- break;
- case 112: /* retention_list ::= retention */
- case 132: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==132);
- case 135: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==135);
- case 142: /* column_def_list ::= column_def */ yytestcase(yyruleno==142);
- case 185: /* rollup_func_list ::= rollup_func_name */ yytestcase(yyruleno==185);
- case 190: /* col_name_list ::= col_name */ yytestcase(yyruleno==190);
- case 238: /* func_list ::= func */ yytestcase(yyruleno==238);
- case 312: /* literal_list ::= signed_literal */ yytestcase(yyruleno==312);
- case 374: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==374);
- case 428: /* select_list ::= select_item */ yytestcase(yyruleno==428);
- case 482: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==482);
-{ yylhsminor.yy544 = createNodeList(pCxt, yymsp[0].minor.yy840); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
- break;
- case 113: /* retention_list ::= retention_list NK_COMMA retention */
- case 143: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==143);
- case 186: /* rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ yytestcase(yyruleno==186);
- case 191: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==191);
- case 239: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==239);
- case 313: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==313);
- case 375: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==375);
- case 429: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==429);
- case 483: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==483);
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
- break;
- case 114: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
-{ yylhsminor.yy840 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 115: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
- case 117: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==117);
-{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy313, yymsp[-5].minor.yy840, yymsp[-3].minor.yy544, yymsp[-1].minor.yy544, yymsp[0].minor.yy840); }
- break;
- case 116: /* cmd ::= CREATE TABLE multi_create_clause */
-{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy544); }
- break;
- case 118: /* cmd ::= DROP TABLE multi_drop_clause */
-{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy544); }
- break;
- case 119: /* cmd ::= DROP STABLE exists_opt full_table_name */
-{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); }
- break;
- case 120: /* cmd ::= ALTER TABLE alter_table_clause */
- case 286: /* cmd ::= query_expression */ yytestcase(yyruleno==286);
-{ pCxt->pRootNode = yymsp[0].minor.yy840; }
- break;
- case 121: /* cmd ::= ALTER STABLE alter_table_clause */
-{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy840); }
- break;
- case 122: /* alter_table_clause ::= full_table_name alter_table_options */
-{ yylhsminor.yy840 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
- break;
- case 123: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
-{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
- break;
- case 124: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */
-{ yylhsminor.yy840 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy840, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy617); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
- break;
- case 125: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
-{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 99: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 100: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 101: /* db_options ::= db_options STT_TRIGGER NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_STT_TRIGGER, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 102: /* db_options ::= db_options TABLE_PREFIX NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_TABLE_PREFIX, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 103: /* db_options ::= db_options TABLE_SUFFIX NK_INTEGER */
+{ yylhsminor.yy312 = setDatabaseOption(pCxt, yymsp[-2].minor.yy312, DB_OPTION_TABLE_SUFFIX, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 104: /* alter_db_options ::= alter_db_option */
+{ yylhsminor.yy312 = createAlterDatabaseOptions(pCxt); yylhsminor.yy312 = setAlterDatabaseOption(pCxt, yylhsminor.yy312, &yymsp[0].minor.yy405); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 105: /* alter_db_options ::= alter_db_options alter_db_option */
+{ yylhsminor.yy312 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy312, &yymsp[0].minor.yy405); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 106: /* alter_db_option ::= CACHEMODEL NK_STRING */
+{ yymsp[-1].minor.yy405.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy405.val = yymsp[0].minor.yy0; }
+ break;
+ case 107: /* alter_db_option ::= CACHESIZE NK_INTEGER */
+{ yymsp[-1].minor.yy405.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy405.val = yymsp[0].minor.yy0; }
+ break;
+ case 108: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
+{ yymsp[-1].minor.yy405.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy405.val = yymsp[0].minor.yy0; }
+ break;
+ case 109: /* alter_db_option ::= KEEP integer_list */
+ case 110: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==110);
+{ yymsp[-1].minor.yy405.type = DB_OPTION_KEEP; yymsp[-1].minor.yy405.pList = yymsp[0].minor.yy824; }
+ break;
+ case 111: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */
+{ yymsp[-1].minor.yy405.type = DB_OPTION_WAL; yymsp[-1].minor.yy405.val = yymsp[0].minor.yy0; }
+ break;
+ case 112: /* alter_db_option ::= STT_TRIGGER NK_INTEGER */
+{ yymsp[-1].minor.yy405.type = DB_OPTION_STT_TRIGGER; yymsp[-1].minor.yy405.val = yymsp[0].minor.yy0; }
+ break;
+ case 113: /* integer_list ::= NK_INTEGER */
+{ yylhsminor.yy824 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy824 = yylhsminor.yy824;
+ break;
+ case 114: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */
+ case 289: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==289);
+{ yylhsminor.yy824 = addNodeToList(pCxt, yymsp[-2].minor.yy824, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy824 = yylhsminor.yy824;
+ break;
+ case 115: /* variable_list ::= NK_VARIABLE */
+{ yylhsminor.yy824 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy824 = yylhsminor.yy824;
+ break;
+ case 116: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */
+{ yylhsminor.yy824 = addNodeToList(pCxt, yymsp[-2].minor.yy824, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy824 = yylhsminor.yy824;
+ break;
+ case 117: /* retention_list ::= retention */
+ case 137: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==137);
+ case 140: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==140);
+ case 147: /* column_def_list ::= column_def */ yytestcase(yyruleno==147);
+ case 190: /* rollup_func_list ::= rollup_func_name */ yytestcase(yyruleno==190);
+ case 195: /* col_name_list ::= col_name */ yytestcase(yyruleno==195);
+ case 245: /* func_list ::= func */ yytestcase(yyruleno==245);
+ case 317: /* literal_list ::= signed_literal */ yytestcase(yyruleno==317);
+ case 379: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==379);
+ case 433: /* select_list ::= select_item */ yytestcase(yyruleno==433);
+ case 487: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==487);
+{ yylhsminor.yy824 = createNodeList(pCxt, yymsp[0].minor.yy312); }
+ yymsp[0].minor.yy824 = yylhsminor.yy824;
+ break;
+ case 118: /* retention_list ::= retention_list NK_COMMA retention */
+ case 148: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==148);
+ case 191: /* rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ yytestcase(yyruleno==191);
+ case 196: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==196);
+ case 246: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==246);
+ case 318: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==318);
+ case 380: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==380);
+ case 434: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==434);
+ case 488: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==488);
+{ yylhsminor.yy824 = addNodeToList(pCxt, yymsp[-2].minor.yy824, yymsp[0].minor.yy312); }
+ yymsp[-2].minor.yy824 = yylhsminor.yy824;
+ break;
+ case 119: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
+{ yylhsminor.yy312 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 120: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
+ case 122: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==122);
+{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy497, yymsp[-5].minor.yy312, yymsp[-3].minor.yy824, yymsp[-1].minor.yy824, yymsp[0].minor.yy312); }
+ break;
+ case 121: /* cmd ::= CREATE TABLE multi_create_clause */
+{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy824); }
+ break;
+ case 123: /* cmd ::= DROP TABLE multi_drop_clause */
+{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy824); }
+ break;
+ case 124: /* cmd ::= DROP STABLE exists_opt full_table_name */
+{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy497, yymsp[0].minor.yy312); }
+ break;
+ case 125: /* cmd ::= ALTER TABLE alter_table_clause */
+ case 291: /* cmd ::= query_expression */ yytestcase(yyruleno==291);
+{ pCxt->pRootNode = yymsp[0].minor.yy312; }
+ break;
+ case 126: /* cmd ::= ALTER STABLE alter_table_clause */
+{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy312); }
+ break;
+ case 127: /* alter_table_clause ::= full_table_name alter_table_options */
+{ yylhsminor.yy312 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy312, yymsp[0].minor.yy312); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 128: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
+{ yylhsminor.yy312 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy312, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy149, yymsp[0].minor.yy84); }
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 129: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */
+{ yylhsminor.yy312 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy312, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy149); }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 130: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
+{ yylhsminor.yy312 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy312, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy149, yymsp[0].minor.yy84); }
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
break;
- case 126: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
-{ yylhsminor.yy840 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+ case 131: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
+{ yylhsminor.yy312 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy312, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy149, &yymsp[0].minor.yy149); }
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
break;
- case 127: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */
-{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
- break;
- case 128: /* alter_table_clause ::= full_table_name DROP TAG column_name */
-{ yylhsminor.yy840 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy840, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy617); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 132: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */
+{ yylhsminor.yy312 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy312, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy149, yymsp[0].minor.yy84); }
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 133: /* alter_table_clause ::= full_table_name DROP TAG column_name */
+{ yylhsminor.yy312 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy312, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy149); }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 129: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
-{ yylhsminor.yy840 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+ case 134: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
+{ yylhsminor.yy312 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy312, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy149, yymsp[0].minor.yy84); }
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
break;
- case 130: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
-{ yylhsminor.yy840 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy840, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+ case 135: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
+{ yylhsminor.yy312 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy312, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy149, &yymsp[0].minor.yy149); }
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
break;
- case 131: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
-{ yylhsminor.yy840 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy840, &yymsp[-2].minor.yy617, yymsp[0].minor.yy840); }
- yymsp[-5].minor.yy840 = yylhsminor.yy840;
+ case 136: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
+{ yylhsminor.yy312 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy312, &yymsp[-2].minor.yy149, yymsp[0].minor.yy312); }
+ yymsp[-5].minor.yy312 = yylhsminor.yy312;
break;
- case 133: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
- case 136: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==136);
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-1].minor.yy544, yymsp[0].minor.yy840); }
- yymsp[-1].minor.yy544 = yylhsminor.yy544;
+ case 138: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
+ case 141: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==141);
+{ yylhsminor.yy824 = addNodeToList(pCxt, yymsp[-1].minor.yy824, yymsp[0].minor.yy312); }
+ yymsp[-1].minor.yy824 = yylhsminor.yy824;
break;
- case 134: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
-{ yylhsminor.yy840 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy313, yymsp[-8].minor.yy840, yymsp[-6].minor.yy840, yymsp[-5].minor.yy544, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); }
- yymsp[-9].minor.yy840 = yylhsminor.yy840;
+ case 139: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
+{ yylhsminor.yy312 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy497, yymsp[-8].minor.yy312, yymsp[-6].minor.yy312, yymsp[-5].minor.yy824, yymsp[-2].minor.yy824, yymsp[0].minor.yy312); }
+ yymsp[-9].minor.yy312 = yylhsminor.yy312;
break;
- case 137: /* drop_table_clause ::= exists_opt full_table_name */
-{ yylhsminor.yy840 = createDropTableClause(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 142: /* drop_table_clause ::= exists_opt full_table_name */
+{ yylhsminor.yy312 = createDropTableClause(pCxt, yymsp[-1].minor.yy497, yymsp[0].minor.yy312); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 138: /* specific_cols_opt ::= */
- case 169: /* tags_def_opt ::= */ yytestcase(yyruleno==169);
- case 437: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==437);
- case 454: /* group_by_clause_opt ::= */ yytestcase(yyruleno==454);
- case 470: /* order_by_clause_opt ::= */ yytestcase(yyruleno==470);
-{ yymsp[1].minor.yy544 = NULL; }
+ case 143: /* specific_cols_opt ::= */
+ case 174: /* tags_def_opt ::= */ yytestcase(yyruleno==174);
+ case 442: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==442);
+ case 459: /* group_by_clause_opt ::= */ yytestcase(yyruleno==459);
+ case 475: /* order_by_clause_opt ::= */ yytestcase(yyruleno==475);
+{ yymsp[1].minor.yy824 = NULL; }
break;
- case 139: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */
-{ yymsp[-2].minor.yy544 = yymsp[-1].minor.yy544; }
+ case 144: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */
+{ yymsp[-2].minor.yy824 = yymsp[-1].minor.yy824; }
break;
- case 140: /* full_table_name ::= table_name */
-{ yylhsminor.yy840 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy617, NULL); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 145: /* full_table_name ::= table_name */
+{ yylhsminor.yy312 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy149, NULL); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 141: /* full_table_name ::= db_name NK_DOT table_name */
-{ yylhsminor.yy840 = createRealTableNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617, NULL); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 146: /* full_table_name ::= db_name NK_DOT table_name */
+{ yylhsminor.yy312 = createRealTableNode(pCxt, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy149, NULL); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 144: /* column_def ::= column_name type_name */
-{ yylhsminor.yy840 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy617, yymsp[0].minor.yy784, NULL); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 149: /* column_def ::= column_name type_name */
+{ yylhsminor.yy312 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy149, yymsp[0].minor.yy84, NULL); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 145: /* column_def ::= column_name type_name COMMENT NK_STRING */
-{ yylhsminor.yy840 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-2].minor.yy784, &yymsp[0].minor.yy0); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 150: /* column_def ::= column_name type_name COMMENT NK_STRING */
+{ yylhsminor.yy312 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy149, yymsp[-2].minor.yy84, &yymsp[0].minor.yy0); }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 146: /* type_name ::= BOOL */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BOOL); }
+ case 151: /* type_name ::= BOOL */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_BOOL); }
break;
- case 147: /* type_name ::= TINYINT */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_TINYINT); }
+ case 152: /* type_name ::= TINYINT */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_TINYINT); }
break;
- case 148: /* type_name ::= SMALLINT */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_SMALLINT); }
+ case 153: /* type_name ::= SMALLINT */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_SMALLINT); }
break;
- case 149: /* type_name ::= INT */
- case 150: /* type_name ::= INTEGER */ yytestcase(yyruleno==150);
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_INT); }
+ case 154: /* type_name ::= INT */
+ case 155: /* type_name ::= INTEGER */ yytestcase(yyruleno==155);
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_INT); }
break;
- case 151: /* type_name ::= BIGINT */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BIGINT); }
+ case 156: /* type_name ::= BIGINT */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_BIGINT); }
break;
- case 152: /* type_name ::= FLOAT */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_FLOAT); }
+ case 157: /* type_name ::= FLOAT */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_FLOAT); }
break;
- case 153: /* type_name ::= DOUBLE */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_DOUBLE); }
+ case 158: /* type_name ::= DOUBLE */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_DOUBLE); }
break;
- case 154: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); }
+ case 159: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy84 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); }
break;
- case 155: /* type_name ::= TIMESTAMP */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); }
+ case 160: /* type_name ::= TIMESTAMP */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); }
break;
- case 156: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); }
+ case 161: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy84 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); }
break;
- case 157: /* type_name ::= TINYINT UNSIGNED */
-{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UTINYINT); }
+ case 162: /* type_name ::= TINYINT UNSIGNED */
+{ yymsp[-1].minor.yy84 = createDataType(TSDB_DATA_TYPE_UTINYINT); }
break;
- case 158: /* type_name ::= SMALLINT UNSIGNED */
-{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_USMALLINT); }
+ case 163: /* type_name ::= SMALLINT UNSIGNED */
+{ yymsp[-1].minor.yy84 = createDataType(TSDB_DATA_TYPE_USMALLINT); }
break;
- case 159: /* type_name ::= INT UNSIGNED */
-{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UINT); }
+ case 164: /* type_name ::= INT UNSIGNED */
+{ yymsp[-1].minor.yy84 = createDataType(TSDB_DATA_TYPE_UINT); }
break;
- case 160: /* type_name ::= BIGINT UNSIGNED */
-{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UBIGINT); }
+ case 165: /* type_name ::= BIGINT UNSIGNED */
+{ yymsp[-1].minor.yy84 = createDataType(TSDB_DATA_TYPE_UBIGINT); }
break;
- case 161: /* type_name ::= JSON */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_JSON); }
+ case 166: /* type_name ::= JSON */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_JSON); }
break;
- case 162: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); }
+ case 167: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy84 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); }
break;
- case 163: /* type_name ::= MEDIUMBLOB */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); }
+ case 168: /* type_name ::= MEDIUMBLOB */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); }
break;
- case 164: /* type_name ::= BLOB */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BLOB); }
+ case 169: /* type_name ::= BLOB */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_BLOB); }
break;
- case 165: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); }
+ case 170: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy84 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); }
break;
- case 166: /* type_name ::= DECIMAL */
-{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 171: /* type_name ::= DECIMAL */
+{ yymsp[0].minor.yy84 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 167: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 172: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy84 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 168: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
-{ yymsp[-5].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 173: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
+{ yymsp[-5].minor.yy84 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 170: /* tags_def_opt ::= tags_def */
- case 373: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==373);
-{ yylhsminor.yy544 = yymsp[0].minor.yy544; }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+ case 175: /* tags_def_opt ::= tags_def */
+ case 378: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==378);
+{ yylhsminor.yy824 = yymsp[0].minor.yy824; }
+ yymsp[0].minor.yy824 = yylhsminor.yy824;
break;
- case 171: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */
-{ yymsp[-3].minor.yy544 = yymsp[-1].minor.yy544; }
+ case 176: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */
+{ yymsp[-3].minor.yy824 = yymsp[-1].minor.yy824; }
break;
- case 172: /* table_options ::= */
-{ yymsp[1].minor.yy840 = createDefaultTableOptions(pCxt); }
+ case 177: /* table_options ::= */
+{ yymsp[1].minor.yy312 = createDefaultTableOptions(pCxt); }
break;
- case 173: /* table_options ::= table_options COMMENT NK_STRING */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 178: /* table_options ::= table_options COMMENT NK_STRING */
+{ yylhsminor.yy312 = setTableOption(pCxt, yymsp[-2].minor.yy312, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 174: /* table_options ::= table_options MAX_DELAY duration_list */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy544); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 179: /* table_options ::= table_options MAX_DELAY duration_list */
+{ yylhsminor.yy312 = setTableOption(pCxt, yymsp[-2].minor.yy312, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy824); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 175: /* table_options ::= table_options WATERMARK duration_list */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy544); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 180: /* table_options ::= table_options WATERMARK duration_list */
+{ yylhsminor.yy312 = setTableOption(pCxt, yymsp[-2].minor.yy312, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy824); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 176: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-4].minor.yy840, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy544); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+ case 181: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
+{ yylhsminor.yy312 = setTableOption(pCxt, yymsp[-4].minor.yy312, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy824); }
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
break;
- case 177: /* table_options ::= table_options TTL NK_INTEGER */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-2].minor.yy840, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 182: /* table_options ::= table_options TTL NK_INTEGER */
+{ yylhsminor.yy312 = setTableOption(pCxt, yymsp[-2].minor.yy312, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 178: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-4].minor.yy840, TABLE_OPTION_SMA, yymsp[-1].minor.yy544); }
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+ case 183: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */
+{ yylhsminor.yy312 = setTableOption(pCxt, yymsp[-4].minor.yy312, TABLE_OPTION_SMA, yymsp[-1].minor.yy824); }
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
break;
- case 179: /* alter_table_options ::= alter_table_option */
-{ yylhsminor.yy840 = createAlterTableOptions(pCxt); yylhsminor.yy840 = setTableOption(pCxt, yylhsminor.yy840, yymsp[0].minor.yy95.type, &yymsp[0].minor.yy95.val); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 184: /* alter_table_options ::= alter_table_option */
+{ yylhsminor.yy312 = createAlterTableOptions(pCxt); yylhsminor.yy312 = setTableOption(pCxt, yylhsminor.yy312, yymsp[0].minor.yy405.type, &yymsp[0].minor.yy405.val); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 180: /* alter_table_options ::= alter_table_options alter_table_option */
-{ yylhsminor.yy840 = setTableOption(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy95.type, &yymsp[0].minor.yy95.val); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 185: /* alter_table_options ::= alter_table_options alter_table_option */
+{ yylhsminor.yy312 = setTableOption(pCxt, yymsp[-1].minor.yy312, yymsp[0].minor.yy405.type, &yymsp[0].minor.yy405.val); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 181: /* alter_table_option ::= COMMENT NK_STRING */
-{ yymsp[-1].minor.yy95.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
+ case 186: /* alter_table_option ::= COMMENT NK_STRING */
+{ yymsp[-1].minor.yy405.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy405.val = yymsp[0].minor.yy0; }
break;
- case 182: /* alter_table_option ::= TTL NK_INTEGER */
-{ yymsp[-1].minor.yy95.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy95.val = yymsp[0].minor.yy0; }
+ case 187: /* alter_table_option ::= TTL NK_INTEGER */
+{ yymsp[-1].minor.yy405.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy405.val = yymsp[0].minor.yy0; }
break;
- case 183: /* duration_list ::= duration_literal */
- case 340: /* expression_list ::= expression */ yytestcase(yyruleno==340);
-{ yylhsminor.yy544 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840)); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+ case 188: /* duration_list ::= duration_literal */
+ case 345: /* expression_list ::= expression */ yytestcase(yyruleno==345);
+{ yylhsminor.yy824 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy312)); }
+ yymsp[0].minor.yy824 = yylhsminor.yy824;
break;
- case 184: /* duration_list ::= duration_list NK_COMMA duration_literal */
- case 341: /* expression_list ::= expression_list NK_COMMA expression */ yytestcase(yyruleno==341);
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, releaseRawExprNode(pCxt, yymsp[0].minor.yy840)); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
+ case 189: /* duration_list ::= duration_list NK_COMMA duration_literal */
+ case 346: /* expression_list ::= expression_list NK_COMMA expression */ yytestcase(yyruleno==346);
+{ yylhsminor.yy824 = addNodeToList(pCxt, yymsp[-2].minor.yy824, releaseRawExprNode(pCxt, yymsp[0].minor.yy312)); }
+ yymsp[-2].minor.yy824 = yylhsminor.yy824;
break;
- case 187: /* rollup_func_name ::= function_name */
-{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[0].minor.yy617, NULL); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 192: /* rollup_func_name ::= function_name */
+{ yylhsminor.yy312 = createFunctionNode(pCxt, &yymsp[0].minor.yy149, NULL); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 188: /* rollup_func_name ::= FIRST */
- case 189: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==189);
-{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 193: /* rollup_func_name ::= FIRST */
+ case 194: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==194);
+{ yylhsminor.yy312 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 192: /* col_name ::= column_name */
-{ yylhsminor.yy840 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy617); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 197: /* col_name ::= column_name */
+{ yylhsminor.yy312 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy149); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 193: /* cmd ::= SHOW DNODES */
+ case 198: /* cmd ::= SHOW DNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT); }
break;
- case 194: /* cmd ::= SHOW USERS */
+ case 199: /* cmd ::= SHOW USERS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT); }
break;
- case 195: /* cmd ::= SHOW DATABASES */
+ case 200: /* cmd ::= SHOW DATABASES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT); }
break;
- case 196: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, OP_TYPE_LIKE); }
+ case 201: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy312, yymsp[0].minor.yy312, OP_TYPE_LIKE); }
break;
- case 197: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, OP_TYPE_LIKE); }
+ case 202: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy312, yymsp[0].minor.yy312, OP_TYPE_LIKE); }
break;
- case 198: /* cmd ::= SHOW db_name_cond_opt VGROUPS */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy840, NULL, OP_TYPE_LIKE); }
+ case 203: /* cmd ::= SHOW db_name_cond_opt VGROUPS */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy312, NULL, OP_TYPE_LIKE); }
break;
- case 199: /* cmd ::= SHOW MNODES */
+ case 204: /* cmd ::= SHOW MNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT); }
break;
- case 200: /* cmd ::= SHOW MODULES */
+ case 205: /* cmd ::= SHOW MODULES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MODULES_STMT); }
break;
- case 201: /* cmd ::= SHOW QNODES */
+ case 206: /* cmd ::= SHOW QNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT); }
break;
- case 202: /* cmd ::= SHOW FUNCTIONS */
+ case 207: /* cmd ::= SHOW FUNCTIONS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT); }
break;
- case 203: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy840, yymsp[-1].minor.yy840, OP_TYPE_EQUAL); }
+ case 208: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy312, yymsp[-1].minor.yy312, OP_TYPE_EQUAL); }
break;
- case 204: /* cmd ::= SHOW STREAMS */
+ case 209: /* cmd ::= SHOW STREAMS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT); }
break;
- case 205: /* cmd ::= SHOW ACCOUNTS */
+ case 210: /* cmd ::= SHOW ACCOUNTS */
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
break;
- case 206: /* cmd ::= SHOW APPS */
+ case 211: /* cmd ::= SHOW APPS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_APPS_STMT); }
break;
- case 207: /* cmd ::= SHOW CONNECTIONS */
+ case 212: /* cmd ::= SHOW CONNECTIONS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONNECTIONS_STMT); }
break;
- case 208: /* cmd ::= SHOW LICENCES */
- case 209: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==209);
+ case 213: /* cmd ::= SHOW LICENCES */
+ case 214: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==214);
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCES_STMT); }
break;
- case 210: /* cmd ::= SHOW CREATE DATABASE db_name */
-{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy617); }
+ case 215: /* cmd ::= SHOW CREATE DATABASE db_name */
+{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy149); }
break;
- case 211: /* cmd ::= SHOW CREATE TABLE full_table_name */
-{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy840); }
+ case 216: /* cmd ::= SHOW CREATE TABLE full_table_name */
+{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy312); }
break;
- case 212: /* cmd ::= SHOW CREATE STABLE full_table_name */
-{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy840); }
+ case 217: /* cmd ::= SHOW CREATE STABLE full_table_name */
+{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy312); }
break;
- case 213: /* cmd ::= SHOW QUERIES */
+ case 218: /* cmd ::= SHOW QUERIES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT); }
break;
- case 214: /* cmd ::= SHOW SCORES */
+ case 219: /* cmd ::= SHOW SCORES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SCORES_STMT); }
break;
- case 215: /* cmd ::= SHOW TOPICS */
+ case 220: /* cmd ::= SHOW TOPICS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TOPICS_STMT); }
break;
- case 216: /* cmd ::= SHOW VARIABLES */
+ case 221: /* cmd ::= SHOW VARIABLES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLES_STMT); }
break;
- case 217: /* cmd ::= SHOW LOCAL VARIABLES */
+ case 222: /* cmd ::= SHOW LOCAL VARIABLES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT); }
break;
- case 218: /* cmd ::= SHOW DNODE NK_INTEGER VARIABLES */
+ case 223: /* cmd ::= SHOW DNODE NK_INTEGER VARIABLES */
{ pCxt->pRootNode = createShowDnodeVariablesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[-1].minor.yy0)); }
break;
- case 219: /* cmd ::= SHOW BNODES */
+ case 224: /* cmd ::= SHOW BNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT); }
break;
- case 220: /* cmd ::= SHOW SNODES */
+ case 225: /* cmd ::= SHOW SNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT); }
break;
- case 221: /* cmd ::= SHOW CLUSTER */
+ case 226: /* cmd ::= SHOW CLUSTER */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT); }
break;
- case 222: /* cmd ::= SHOW TRANSACTIONS */
+ case 227: /* cmd ::= SHOW TRANSACTIONS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT); }
break;
- case 223: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
-{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy840); }
+ case 228: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
+{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy312); }
break;
- case 224: /* cmd ::= SHOW CONSUMERS */
+ case 229: /* cmd ::= SHOW CONSUMERS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); }
break;
- case 225: /* cmd ::= SHOW SUBSCRIPTIONS */
+ case 230: /* cmd ::= SHOW SUBSCRIPTIONS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); }
break;
- case 226: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy840, yymsp[-1].minor.yy840, OP_TYPE_EQUAL); }
+ case 231: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy312, yymsp[-1].minor.yy312, OP_TYPE_EQUAL); }
break;
- case 227: /* db_name_cond_opt ::= */
- case 232: /* from_db_opt ::= */ yytestcase(yyruleno==232);
-{ yymsp[1].minor.yy840 = createDefaultDatabaseCondValue(pCxt); }
+ case 232: /* cmd ::= SHOW VNODES NK_INTEGER */
+{ pCxt->pRootNode = createShowVnodesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0), NULL); }
break;
- case 228: /* db_name_cond_opt ::= db_name NK_DOT */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy617); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 233: /* cmd ::= SHOW VNODES NK_STRING */
+{ pCxt->pRootNode = createShowVnodesStmt(pCxt, NULL, createValueNode(pCxt, TSDB_DATA_TYPE_VARCHAR, &yymsp[0].minor.yy0)); }
break;
- case 229: /* like_pattern_opt ::= */
- case 268: /* into_opt ::= */ yytestcase(yyruleno==268);
- case 406: /* from_clause_opt ::= */ yytestcase(yyruleno==406);
- case 435: /* where_clause_opt ::= */ yytestcase(yyruleno==435);
- case 439: /* twindow_clause_opt ::= */ yytestcase(yyruleno==439);
- case 444: /* sliding_opt ::= */ yytestcase(yyruleno==444);
- case 446: /* fill_opt ::= */ yytestcase(yyruleno==446);
- case 458: /* having_clause_opt ::= */ yytestcase(yyruleno==458);
- case 460: /* range_opt ::= */ yytestcase(yyruleno==460);
- case 462: /* every_opt ::= */ yytestcase(yyruleno==462);
- case 472: /* slimit_clause_opt ::= */ yytestcase(yyruleno==472);
- case 476: /* limit_clause_opt ::= */ yytestcase(yyruleno==476);
-{ yymsp[1].minor.yy840 = NULL; }
+ case 234: /* db_name_cond_opt ::= */
+ case 239: /* from_db_opt ::= */ yytestcase(yyruleno==239);
+{ yymsp[1].minor.yy312 = createDefaultDatabaseCondValue(pCxt); }
break;
- case 230: /* like_pattern_opt ::= LIKE NK_STRING */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
+ case 235: /* db_name_cond_opt ::= db_name NK_DOT */
+{ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy149); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 231: /* table_name_cond ::= table_name */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy617); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 236: /* like_pattern_opt ::= */
+ case 411: /* from_clause_opt ::= */ yytestcase(yyruleno==411);
+ case 440: /* where_clause_opt ::= */ yytestcase(yyruleno==440);
+ case 444: /* twindow_clause_opt ::= */ yytestcase(yyruleno==444);
+ case 449: /* sliding_opt ::= */ yytestcase(yyruleno==449);
+ case 451: /* fill_opt ::= */ yytestcase(yyruleno==451);
+ case 463: /* having_clause_opt ::= */ yytestcase(yyruleno==463);
+ case 465: /* range_opt ::= */ yytestcase(yyruleno==465);
+ case 467: /* every_opt ::= */ yytestcase(yyruleno==467);
+ case 477: /* slimit_clause_opt ::= */ yytestcase(yyruleno==477);
+ case 481: /* limit_clause_opt ::= */ yytestcase(yyruleno==481);
+{ yymsp[1].minor.yy312 = NULL; }
break;
- case 233: /* from_db_opt ::= FROM db_name */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy617); }
+ case 237: /* like_pattern_opt ::= LIKE NK_STRING */
+{ yymsp[-1].minor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
break;
- case 234: /* cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
-{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy313, yymsp[-3].minor.yy840, yymsp[-1].minor.yy840, NULL, yymsp[0].minor.yy840); }
+ case 238: /* table_name_cond ::= table_name */
+{ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy149); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 235: /* cmd ::= DROP INDEX exists_opt full_table_name */
-{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy313, yymsp[0].minor.yy840); }
+ case 240: /* from_db_opt ::= FROM db_name */
+{ yymsp[-1].minor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy149); }
break;
- case 236: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
-{ yymsp[-9].minor.yy840 = createIndexOption(pCxt, yymsp[-7].minor.yy544, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+ case 241: /* cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
+{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy497, yymsp[-3].minor.yy312, yymsp[-1].minor.yy312, NULL, yymsp[0].minor.yy312); }
break;
- case 237: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
-{ yymsp[-11].minor.yy840 = createIndexOption(pCxt, yymsp[-9].minor.yy544, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+ case 242: /* cmd ::= DROP INDEX exists_opt full_table_name */
+{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy497, yymsp[0].minor.yy312); }
break;
- case 240: /* func ::= function_name NK_LP expression_list NK_RP */
-{ yylhsminor.yy840 = createFunctionNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-1].minor.yy544); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 243: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
+{ yymsp[-9].minor.yy312 = createIndexOption(pCxt, yymsp[-7].minor.yy824, releaseRawExprNode(pCxt, yymsp[-3].minor.yy312), NULL, yymsp[-1].minor.yy312, yymsp[0].minor.yy312); }
break;
- case 241: /* sma_stream_opt ::= */
- case 270: /* stream_options ::= */ yytestcase(yyruleno==270);
-{ yymsp[1].minor.yy840 = createStreamOptions(pCxt); }
+ case 244: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
+{ yymsp[-11].minor.yy312 = createIndexOption(pCxt, yymsp[-9].minor.yy824, releaseRawExprNode(pCxt, yymsp[-5].minor.yy312), releaseRawExprNode(pCxt, yymsp[-3].minor.yy312), yymsp[-1].minor.yy312, yymsp[0].minor.yy312); }
break;
- case 242: /* sma_stream_opt ::= stream_options WATERMARK duration_literal */
- case 274: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==274);
-{ ((SStreamOptions*)yymsp[-2].minor.yy840)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-2].minor.yy840; }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 247: /* func ::= function_name NK_LP expression_list NK_RP */
+{ yylhsminor.yy312 = createFunctionNode(pCxt, &yymsp[-3].minor.yy149, yymsp[-1].minor.yy824); }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 243: /* sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
-{ ((SStreamOptions*)yymsp[-2].minor.yy840)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-2].minor.yy840; }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 248: /* sma_stream_opt ::= */
+ case 275: /* stream_options ::= */ yytestcase(yyruleno==275);
+{ yymsp[1].minor.yy312 = createStreamOptions(pCxt); }
break;
- case 244: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */
-{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy313, &yymsp[-2].minor.yy617, yymsp[0].minor.yy840); }
+ case 249: /* sma_stream_opt ::= stream_options WATERMARK duration_literal */
+ case 279: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==279);
+{ ((SStreamOptions*)yymsp[-2].minor.yy312)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy312); yylhsminor.yy312 = yymsp[-2].minor.yy312; }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 245: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy313, &yymsp[-3].minor.yy617, &yymsp[0].minor.yy617, false); }
+ case 250: /* sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
+{ ((SStreamOptions*)yymsp[-2].minor.yy312)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy312); yylhsminor.yy312 = yymsp[-2].minor.yy312; }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 246: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy313, &yymsp[-5].minor.yy617, &yymsp[0].minor.yy617, true); }
+ case 251: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */
+{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy497, &yymsp[-2].minor.yy149, yymsp[0].minor.yy312); }
break;
- case 247: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy313, &yymsp[-3].minor.yy617, yymsp[0].minor.yy840, false); }
+ case 252: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
+{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy497, &yymsp[-3].minor.yy149, &yymsp[0].minor.yy149, false); }
break;
- case 248: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy313, &yymsp[-5].minor.yy617, yymsp[0].minor.yy840, true); }
+ case 253: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
+{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy497, &yymsp[-5].minor.yy149, &yymsp[0].minor.yy149, true); }
break;
- case 249: /* cmd ::= DROP TOPIC exists_opt topic_name */
-{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); }
+ case 254: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
+{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy497, &yymsp[-3].minor.yy149, yymsp[0].minor.yy312, false); }
break;
- case 250: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
-{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy313, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617); }
+ case 255: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
+{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy497, &yymsp[-5].minor.yy149, yymsp[0].minor.yy312, true); }
break;
- case 251: /* cmd ::= DESC full_table_name */
- case 252: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==252);
-{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy840); }
+ case 256: /* cmd ::= DROP TOPIC exists_opt topic_name */
+{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy497, &yymsp[0].minor.yy149); }
break;
- case 253: /* cmd ::= RESET QUERY CACHE */
-{ pCxt->pRootNode = createResetQueryCacheStmt(pCxt); }
+ case 257: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
+{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy497, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy149); }
break;
- case 254: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */
-{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy313, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+ case 258: /* cmd ::= DESC full_table_name */
+ case 259: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==259);
+{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy312); }
break;
- case 256: /* analyze_opt ::= ANALYZE */
- case 263: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==263);
- case 426: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==426);
-{ yymsp[0].minor.yy313 = true; }
+ case 260: /* cmd ::= RESET QUERY CACHE */
+{ pCxt->pRootNode = createResetQueryCacheStmt(pCxt); }
break;
- case 257: /* explain_options ::= */
-{ yymsp[1].minor.yy840 = createDefaultExplainOptions(pCxt); }
+ case 261: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */
+{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy497, yymsp[-1].minor.yy312, yymsp[0].minor.yy312); }
break;
- case 258: /* explain_options ::= explain_options VERBOSE NK_BOOL */
-{ yylhsminor.yy840 = setExplainVerbose(pCxt, yymsp[-2].minor.yy840, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 263: /* analyze_opt ::= ANALYZE */
+ case 270: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==270);
+ case 431: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==431);
+{ yymsp[0].minor.yy497 = true; }
break;
- case 259: /* explain_options ::= explain_options RATIO NK_FLOAT */
-{ yylhsminor.yy840 = setExplainRatio(pCxt, yymsp[-2].minor.yy840, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 264: /* explain_options ::= */
+{ yymsp[1].minor.yy312 = createDefaultExplainOptions(pCxt); }
break;
- case 260: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
-{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy313, yymsp[-8].minor.yy313, &yymsp[-5].minor.yy617, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy784, yymsp[0].minor.yy844); }
+ case 265: /* explain_options ::= explain_options VERBOSE NK_BOOL */
+{ yylhsminor.yy312 = setExplainVerbose(pCxt, yymsp[-2].minor.yy312, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 261: /* cmd ::= DROP FUNCTION exists_opt function_name */
-{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); }
+ case 266: /* explain_options ::= explain_options RATIO NK_FLOAT */
+{ yylhsminor.yy312 = setExplainRatio(pCxt, yymsp[-2].minor.yy312, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 264: /* bufsize_opt ::= */
-{ yymsp[1].minor.yy844 = 0; }
+ case 267: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
+{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy497, yymsp[-8].minor.yy497, &yymsp[-5].minor.yy149, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy84, yymsp[0].minor.yy160); }
break;
- case 265: /* bufsize_opt ::= BUFSIZE NK_INTEGER */
-{ yymsp[-1].minor.yy844 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); }
+ case 268: /* cmd ::= DROP FUNCTION exists_opt function_name */
+{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy497, &yymsp[0].minor.yy149); }
break;
- case 266: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options into_opt AS query_expression */
-{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-5].minor.yy313, &yymsp[-4].minor.yy617, yymsp[-2].minor.yy840, yymsp[-3].minor.yy840, yymsp[0].minor.yy840); }
+ case 271: /* bufsize_opt ::= */
+{ yymsp[1].minor.yy160 = 0; }
break;
- case 267: /* cmd ::= DROP STREAM exists_opt stream_name */
-{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy313, &yymsp[0].minor.yy617); }
+ case 272: /* bufsize_opt ::= BUFSIZE NK_INTEGER */
+{ yymsp[-1].minor.yy160 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); }
break;
- case 269: /* into_opt ::= INTO full_table_name */
- case 407: /* from_clause_opt ::= FROM table_reference_list */ yytestcase(yyruleno==407);
- case 436: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==436);
- case 459: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==459);
-{ yymsp[-1].minor.yy840 = yymsp[0].minor.yy840; }
+ case 273: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression */
+{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-6].minor.yy497, &yymsp[-5].minor.yy149, yymsp[-2].minor.yy312, yymsp[-4].minor.yy312, yymsp[0].minor.yy312); }
break;
- case 271: /* stream_options ::= stream_options TRIGGER AT_ONCE */
-{ ((SStreamOptions*)yymsp[-2].minor.yy840)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy840 = yymsp[-2].minor.yy840; }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 274: /* cmd ::= DROP STREAM exists_opt stream_name */
+{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy497, &yymsp[0].minor.yy149); }
break;
- case 272: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
-{ ((SStreamOptions*)yymsp[-2].minor.yy840)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy840 = yymsp[-2].minor.yy840; }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 276: /* stream_options ::= stream_options TRIGGER AT_ONCE */
+{ ((SStreamOptions*)yymsp[-2].minor.yy312)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy312 = yymsp[-2].minor.yy312; }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 273: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
-{ ((SStreamOptions*)yymsp[-3].minor.yy840)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy840)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); yylhsminor.yy840 = yymsp[-3].minor.yy840; }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 277: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
+{ ((SStreamOptions*)yymsp[-2].minor.yy312)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy312 = yymsp[-2].minor.yy312; }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 275: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
-{ ((SStreamOptions*)yymsp[-3].minor.yy840)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy840 = yymsp[-3].minor.yy840; }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 278: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
+{ ((SStreamOptions*)yymsp[-3].minor.yy312)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy312)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy312); yylhsminor.yy312 = yymsp[-3].minor.yy312; }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 276: /* cmd ::= KILL CONNECTION NK_INTEGER */
+ case 280: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
+{ ((SStreamOptions*)yymsp[-3].minor.yy312)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy312 = yymsp[-3].minor.yy312; }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 281: /* cmd ::= KILL CONNECTION NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); }
break;
- case 277: /* cmd ::= KILL QUERY NK_STRING */
+ case 282: /* cmd ::= KILL QUERY NK_STRING */
{ pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); }
break;
- case 278: /* cmd ::= KILL TRANSACTION NK_INTEGER */
+ case 283: /* cmd ::= KILL TRANSACTION NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); }
break;
- case 279: /* cmd ::= BALANCE VGROUP */
+ case 284: /* cmd ::= BALANCE VGROUP */
{ pCxt->pRootNode = createBalanceVgroupStmt(pCxt); }
break;
- case 280: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
+ case 285: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
{ pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 281: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
-{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy544); }
+ case 286: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
+{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy824); }
break;
- case 282: /* cmd ::= SPLIT VGROUP NK_INTEGER */
+ case 287: /* cmd ::= SPLIT VGROUP NK_INTEGER */
{ pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); }
break;
- case 283: /* dnode_list ::= DNODE NK_INTEGER */
-{ yymsp[-1].minor.yy544 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- break;
- case 285: /* cmd ::= DELETE FROM full_table_name where_clause_opt */
-{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
- break;
- case 287: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
-{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy840, yymsp[-2].minor.yy544, yymsp[0].minor.yy840); }
- break;
- case 288: /* cmd ::= INSERT INTO full_table_name query_expression */
-{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy840, NULL, yymsp[0].minor.yy840); }
- break;
- case 289: /* literal ::= NK_INTEGER */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 290: /* literal ::= NK_FLOAT */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 291: /* literal ::= NK_STRING */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 292: /* literal ::= NK_BOOL */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 293: /* literal ::= TIMESTAMP NK_STRING */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
- break;
- case 294: /* literal ::= duration_literal */
- case 304: /* signed_literal ::= signed */ yytestcase(yyruleno==304);
- case 324: /* expression ::= literal */ yytestcase(yyruleno==324);
- case 325: /* expression ::= pseudo_column */ yytestcase(yyruleno==325);
- case 326: /* expression ::= column_reference */ yytestcase(yyruleno==326);
- case 327: /* expression ::= function_expression */ yytestcase(yyruleno==327);
- case 328: /* expression ::= subquery */ yytestcase(yyruleno==328);
- case 356: /* function_expression ::= literal_func */ yytestcase(yyruleno==356);
- case 398: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==398);
- case 402: /* boolean_primary ::= predicate */ yytestcase(yyruleno==402);
- case 404: /* common_expression ::= expression */ yytestcase(yyruleno==404);
- case 405: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==405);
- case 408: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==408);
- case 410: /* table_reference ::= table_primary */ yytestcase(yyruleno==410);
- case 411: /* table_reference ::= joined_table */ yytestcase(yyruleno==411);
- case 415: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==415);
- case 465: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==465);
- case 468: /* query_primary ::= query_specification */ yytestcase(yyruleno==468);
-{ yylhsminor.yy840 = yymsp[0].minor.yy840; }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 295: /* literal ::= NULL */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 296: /* literal ::= NK_QUESTION */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 297: /* duration_literal ::= NK_VARIABLE */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 298: /* signed ::= NK_INTEGER */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 299: /* signed ::= NK_PLUS NK_INTEGER */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
- break;
- case 300: /* signed ::= NK_MINUS NK_INTEGER */
+ case 288: /* dnode_list ::= DNODE NK_INTEGER */
+{ yymsp[-1].minor.yy824 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ break;
+ case 290: /* cmd ::= DELETE FROM full_table_name where_clause_opt */
+{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy312, yymsp[0].minor.yy312); }
+ break;
+ case 292: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
+{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy312, yymsp[-2].minor.yy824, yymsp[0].minor.yy312); }
+ break;
+ case 293: /* cmd ::= INSERT INTO full_table_name query_expression */
+{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy312, NULL, yymsp[0].minor.yy312); }
+ break;
+ case 294: /* literal ::= NK_INTEGER */
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 295: /* literal ::= NK_FLOAT */
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 296: /* literal ::= NK_STRING */
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 297: /* literal ::= NK_BOOL */
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 298: /* literal ::= TIMESTAMP NK_STRING */
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 299: /* literal ::= duration_literal */
+ case 309: /* signed_literal ::= signed */ yytestcase(yyruleno==309);
+ case 329: /* expression ::= literal */ yytestcase(yyruleno==329);
+ case 330: /* expression ::= pseudo_column */ yytestcase(yyruleno==330);
+ case 331: /* expression ::= column_reference */ yytestcase(yyruleno==331);
+ case 332: /* expression ::= function_expression */ yytestcase(yyruleno==332);
+ case 333: /* expression ::= subquery */ yytestcase(yyruleno==333);
+ case 361: /* function_expression ::= literal_func */ yytestcase(yyruleno==361);
+ case 403: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==403);
+ case 407: /* boolean_primary ::= predicate */ yytestcase(yyruleno==407);
+ case 409: /* common_expression ::= expression */ yytestcase(yyruleno==409);
+ case 410: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==410);
+ case 413: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==413);
+ case 415: /* table_reference ::= table_primary */ yytestcase(yyruleno==415);
+ case 416: /* table_reference ::= joined_table */ yytestcase(yyruleno==416);
+ case 420: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==420);
+ case 470: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==470);
+ case 473: /* query_primary ::= query_specification */ yytestcase(yyruleno==473);
+{ yylhsminor.yy312 = yymsp[0].minor.yy312; }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 300: /* literal ::= NULL */
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 301: /* literal ::= NK_QUESTION */
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 302: /* duration_literal ::= NK_VARIABLE */
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 303: /* signed ::= NK_INTEGER */
+{ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 304: /* signed ::= NK_PLUS NK_INTEGER */
+{ yymsp[-1].minor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
+ break;
+ case 305: /* signed ::= NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t);
+ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t);
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 301: /* signed ::= NK_FLOAT */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 306: /* signed ::= NK_FLOAT */
+{ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 302: /* signed ::= NK_PLUS NK_FLOAT */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
+ case 307: /* signed ::= NK_PLUS NK_FLOAT */
+{ yymsp[-1].minor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
break;
- case 303: /* signed ::= NK_MINUS NK_FLOAT */
+ case 308: /* signed ::= NK_MINUS NK_FLOAT */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t);
+ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t);
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 305: /* signed_literal ::= NK_STRING */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 310: /* signed_literal ::= NK_STRING */
+{ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 306: /* signed_literal ::= NK_BOOL */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 311: /* signed_literal ::= NK_BOOL */
+{ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 307: /* signed_literal ::= TIMESTAMP NK_STRING */
-{ yymsp[-1].minor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); }
+ case 312: /* signed_literal ::= TIMESTAMP NK_STRING */
+{ yymsp[-1].minor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); }
break;
- case 308: /* signed_literal ::= duration_literal */
- case 310: /* signed_literal ::= literal_func */ yytestcase(yyruleno==310);
- case 376: /* star_func_para ::= expression */ yytestcase(yyruleno==376);
- case 431: /* select_item ::= common_expression */ yytestcase(yyruleno==431);
- case 481: /* search_condition ::= common_expression */ yytestcase(yyruleno==481);
-{ yylhsminor.yy840 = releaseRawExprNode(pCxt, yymsp[0].minor.yy840); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 313: /* signed_literal ::= duration_literal */
+ case 315: /* signed_literal ::= literal_func */ yytestcase(yyruleno==315);
+ case 381: /* star_func_para ::= expression */ yytestcase(yyruleno==381);
+ case 436: /* select_item ::= common_expression */ yytestcase(yyruleno==436);
+ case 486: /* search_condition ::= common_expression */ yytestcase(yyruleno==486);
+{ yylhsminor.yy312 = releaseRawExprNode(pCxt, yymsp[0].minor.yy312); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 309: /* signed_literal ::= NULL */
-{ yylhsminor.yy840 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 314: /* signed_literal ::= NULL */
+{ yylhsminor.yy312 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 311: /* signed_literal ::= NK_QUESTION */
-{ yylhsminor.yy840 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 316: /* signed_literal ::= NK_QUESTION */
+{ yylhsminor.yy312 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 329: /* expression ::= NK_LP expression NK_RP */
- case 403: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==403);
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 334: /* expression ::= NK_LP expression NK_RP */
+ case 408: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==408);
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy312)); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 330: /* expression ::= NK_PLUS expression */
+ case 335: /* expression ::= NK_PLUS expression */
{
- SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy840));
+ SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy312));
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 331: /* expression ::= NK_MINUS expression */
+ case 336: /* expression ::= NK_MINUS expression */
{
- SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy840), NULL));
+ SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy312), NULL));
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 332: /* expression ::= expression NK_PLUS expression */
+ case 337: /* expression ::= expression NK_PLUS expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 333: /* expression ::= expression NK_MINUS expression */
+ case 338: /* expression ::= expression NK_MINUS expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 334: /* expression ::= expression NK_STAR expression */
+ case 339: /* expression ::= expression NK_STAR expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 335: /* expression ::= expression NK_SLASH expression */
+ case 340: /* expression ::= expression NK_SLASH expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 336: /* expression ::= expression NK_REM expression */
+ case 341: /* expression ::= expression NK_REM expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 337: /* expression ::= column_reference NK_ARROW NK_STRING */
+ case 342: /* expression ::= column_reference NK_ARROW NK_STRING */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 338: /* expression ::= expression NK_BITAND expression */
+ case 343: /* expression ::= expression NK_BITAND expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 339: /* expression ::= expression NK_BITOR expression */
+ case 344: /* expression ::= expression NK_BITOR expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 342: /* column_reference ::= column_name */
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy617, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy617)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 343: /* column_reference ::= table_name NK_DOT column_name */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617, createColumnNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy617)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 344: /* pseudo_column ::= ROWTS */
- case 345: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==345);
- case 347: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==347);
- case 348: /* pseudo_column ::= QEND */ yytestcase(yyruleno==348);
- case 349: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==349);
- case 350: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==350);
- case 351: /* pseudo_column ::= WEND */ yytestcase(yyruleno==351);
- case 352: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==352);
- case 358: /* literal_func ::= NOW */ yytestcase(yyruleno==358);
-{ yylhsminor.yy840 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
- break;
- case 346: /* pseudo_column ::= table_name NK_DOT TBNAME */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy617)))); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 353: /* function_expression ::= function_name NK_LP expression_list NK_RP */
- case 354: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==354);
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy617, yymsp[-1].minor.yy544)); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
- break;
- case 355: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy784)); }
- yymsp[-5].minor.yy840 = yylhsminor.yy840;
- break;
- case 357: /* literal_func ::= noarg_func NK_LP NK_RP */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy617, NULL)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 372: /* star_func_para_list ::= NK_STAR */
-{ yylhsminor.yy544 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
- break;
- case 377: /* star_func_para ::= table_name NK_DOT NK_STAR */
- case 434: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==434);
-{ yylhsminor.yy840 = createColumnNode(pCxt, &yymsp[-2].minor.yy617, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
- break;
- case 378: /* predicate ::= expression compare_op expression */
- case 383: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==383);
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 347: /* column_reference ::= column_name */
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy149, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy149)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 348: /* column_reference ::= table_name NK_DOT column_name */
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy149, createColumnNode(pCxt, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy149)); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 349: /* pseudo_column ::= ROWTS */
+ case 350: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==350);
+ case 352: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==352);
+ case 353: /* pseudo_column ::= QEND */ yytestcase(yyruleno==353);
+ case 354: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==354);
+ case 355: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==355);
+ case 356: /* pseudo_column ::= WEND */ yytestcase(yyruleno==356);
+ case 357: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==357);
+ case 363: /* literal_func ::= NOW */ yytestcase(yyruleno==363);
+{ yylhsminor.yy312 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 351: /* pseudo_column ::= table_name NK_DOT TBNAME */
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy149)))); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 358: /* function_expression ::= function_name NK_LP expression_list NK_RP */
+ case 359: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==359);
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy149, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy149, yymsp[-1].minor.yy824)); }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 360: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy312), yymsp[-1].minor.yy84)); }
+ yymsp[-5].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 362: /* literal_func ::= noarg_func NK_LP NK_RP */
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy149, NULL)); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 377: /* star_func_para_list ::= NK_STAR */
+{ yylhsminor.yy824 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy824 = yylhsminor.yy824;
+ break;
+ case 382: /* star_func_para ::= table_name NK_DOT NK_STAR */
+ case 439: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==439);
+{ yylhsminor.yy312 = createColumnNode(pCxt, &yymsp[-2].minor.yy149, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 383: /* predicate ::= expression compare_op expression */
+ case 388: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==388);
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy198, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy320, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 379: /* predicate ::= expression BETWEEN expression AND expression */
+ case 384: /* predicate ::= expression BETWEEN expression AND expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy840), releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy312), releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-4].minor.yy840 = yylhsminor.yy840;
+ yymsp[-4].minor.yy312 = yylhsminor.yy312;
break;
- case 380: /* predicate ::= expression NOT BETWEEN expression AND expression */
+ case 385: /* predicate ::= expression NOT BETWEEN expression AND expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy312), releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-5].minor.yy840 = yylhsminor.yy840;
+ yymsp[-5].minor.yy312 = yylhsminor.yy312;
break;
- case 381: /* predicate ::= expression IS NULL */
+ case 386: /* predicate ::= expression IS NULL */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), NULL));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), NULL));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 382: /* predicate ::= expression IS NOT NULL */
+ case 387: /* predicate ::= expression IS NOT NULL */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy312), NULL));
}
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 384: /* compare_op ::= NK_LT */
-{ yymsp[0].minor.yy198 = OP_TYPE_LOWER_THAN; }
+ case 389: /* compare_op ::= NK_LT */
+{ yymsp[0].minor.yy320 = OP_TYPE_LOWER_THAN; }
break;
- case 385: /* compare_op ::= NK_GT */
-{ yymsp[0].minor.yy198 = OP_TYPE_GREATER_THAN; }
+ case 390: /* compare_op ::= NK_GT */
+{ yymsp[0].minor.yy320 = OP_TYPE_GREATER_THAN; }
break;
- case 386: /* compare_op ::= NK_LE */
-{ yymsp[0].minor.yy198 = OP_TYPE_LOWER_EQUAL; }
+ case 391: /* compare_op ::= NK_LE */
+{ yymsp[0].minor.yy320 = OP_TYPE_LOWER_EQUAL; }
break;
- case 387: /* compare_op ::= NK_GE */
-{ yymsp[0].minor.yy198 = OP_TYPE_GREATER_EQUAL; }
+ case 392: /* compare_op ::= NK_GE */
+{ yymsp[0].minor.yy320 = OP_TYPE_GREATER_EQUAL; }
break;
- case 388: /* compare_op ::= NK_NE */
-{ yymsp[0].minor.yy198 = OP_TYPE_NOT_EQUAL; }
+ case 393: /* compare_op ::= NK_NE */
+{ yymsp[0].minor.yy320 = OP_TYPE_NOT_EQUAL; }
break;
- case 389: /* compare_op ::= NK_EQ */
-{ yymsp[0].minor.yy198 = OP_TYPE_EQUAL; }
+ case 394: /* compare_op ::= NK_EQ */
+{ yymsp[0].minor.yy320 = OP_TYPE_EQUAL; }
break;
- case 390: /* compare_op ::= LIKE */
-{ yymsp[0].minor.yy198 = OP_TYPE_LIKE; }
+ case 395: /* compare_op ::= LIKE */
+{ yymsp[0].minor.yy320 = OP_TYPE_LIKE; }
break;
- case 391: /* compare_op ::= NOT LIKE */
-{ yymsp[-1].minor.yy198 = OP_TYPE_NOT_LIKE; }
+ case 396: /* compare_op ::= NOT LIKE */
+{ yymsp[-1].minor.yy320 = OP_TYPE_NOT_LIKE; }
break;
- case 392: /* compare_op ::= MATCH */
-{ yymsp[0].minor.yy198 = OP_TYPE_MATCH; }
+ case 397: /* compare_op ::= MATCH */
+{ yymsp[0].minor.yy320 = OP_TYPE_MATCH; }
break;
- case 393: /* compare_op ::= NMATCH */
-{ yymsp[0].minor.yy198 = OP_TYPE_NMATCH; }
+ case 398: /* compare_op ::= NMATCH */
+{ yymsp[0].minor.yy320 = OP_TYPE_NMATCH; }
break;
- case 394: /* compare_op ::= CONTAINS */
-{ yymsp[0].minor.yy198 = OP_TYPE_JSON_CONTAINS; }
+ case 399: /* compare_op ::= CONTAINS */
+{ yymsp[0].minor.yy320 = OP_TYPE_JSON_CONTAINS; }
break;
- case 395: /* in_op ::= IN */
-{ yymsp[0].minor.yy198 = OP_TYPE_IN; }
+ case 400: /* in_op ::= IN */
+{ yymsp[0].minor.yy320 = OP_TYPE_IN; }
break;
- case 396: /* in_op ::= NOT IN */
-{ yymsp[-1].minor.yy198 = OP_TYPE_NOT_IN; }
+ case 401: /* in_op ::= NOT IN */
+{ yymsp[-1].minor.yy320 = OP_TYPE_NOT_IN; }
break;
- case 397: /* in_predicate_value ::= NK_LP literal_list NK_RP */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy544)); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 402: /* in_predicate_value ::= NK_LP literal_list NK_RP */
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy824)); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 399: /* boolean_value_expression ::= NOT boolean_primary */
+ case 404: /* boolean_value_expression ::= NOT boolean_primary */
{
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy840), NULL));
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy312), NULL));
}
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 400: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
+ case 405: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 401: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
+ case 406: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy840);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy840);
- yylhsminor.yy840 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), releaseRawExprNode(pCxt, yymsp[0].minor.yy840)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy312);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy312);
+ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), releaseRawExprNode(pCxt, yymsp[0].minor.yy312)));
}
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
+ break;
+ case 412: /* from_clause_opt ::= FROM table_reference_list */
+ case 441: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==441);
+ case 464: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==464);
+{ yymsp[-1].minor.yy312 = yymsp[0].minor.yy312; }
break;
- case 409: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */
-{ yylhsminor.yy840 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy840, yymsp[0].minor.yy840, NULL); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 414: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */
+{ yylhsminor.yy312 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy312, yymsp[0].minor.yy312, NULL); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 412: /* table_primary ::= table_name alias_opt */
-{ yylhsminor.yy840 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 417: /* table_primary ::= table_name alias_opt */
+{ yylhsminor.yy312 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy149, &yymsp[0].minor.yy149); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 413: /* table_primary ::= db_name NK_DOT table_name alias_opt */
-{ yylhsminor.yy840 = createRealTableNode(pCxt, &yymsp[-3].minor.yy617, &yymsp[-1].minor.yy617, &yymsp[0].minor.yy617); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 418: /* table_primary ::= db_name NK_DOT table_name alias_opt */
+{ yylhsminor.yy312 = createRealTableNode(pCxt, &yymsp[-3].minor.yy149, &yymsp[-1].minor.yy149, &yymsp[0].minor.yy149); }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 414: /* table_primary ::= subquery alias_opt */
-{ yylhsminor.yy840 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840), &yymsp[0].minor.yy617); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 419: /* table_primary ::= subquery alias_opt */
+{ yylhsminor.yy312 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy312), &yymsp[0].minor.yy149); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 416: /* alias_opt ::= */
-{ yymsp[1].minor.yy617 = nil_token; }
+ case 421: /* alias_opt ::= */
+{ yymsp[1].minor.yy149 = nil_token; }
break;
- case 417: /* alias_opt ::= table_alias */
-{ yylhsminor.yy617 = yymsp[0].minor.yy617; }
- yymsp[0].minor.yy617 = yylhsminor.yy617;
+ case 422: /* alias_opt ::= table_alias */
+{ yylhsminor.yy149 = yymsp[0].minor.yy149; }
+ yymsp[0].minor.yy149 = yylhsminor.yy149;
break;
- case 418: /* alias_opt ::= AS table_alias */
-{ yymsp[-1].minor.yy617 = yymsp[0].minor.yy617; }
+ case 423: /* alias_opt ::= AS table_alias */
+{ yymsp[-1].minor.yy149 = yymsp[0].minor.yy149; }
break;
- case 419: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */
- case 420: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==420);
-{ yymsp[-2].minor.yy840 = yymsp[-1].minor.yy840; }
+ case 424: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */
+ case 425: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==425);
+{ yymsp[-2].minor.yy312 = yymsp[-1].minor.yy312; }
break;
- case 421: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
-{ yylhsminor.yy840 = createJoinTableNode(pCxt, yymsp[-4].minor.yy708, yymsp[-5].minor.yy840, yymsp[-2].minor.yy840, yymsp[0].minor.yy840); }
- yymsp[-5].minor.yy840 = yylhsminor.yy840;
+ case 426: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
+{ yylhsminor.yy312 = createJoinTableNode(pCxt, yymsp[-4].minor.yy832, yymsp[-5].minor.yy312, yymsp[-2].minor.yy312, yymsp[0].minor.yy312); }
+ yymsp[-5].minor.yy312 = yylhsminor.yy312;
break;
- case 422: /* join_type ::= */
-{ yymsp[1].minor.yy708 = JOIN_TYPE_INNER; }
+ case 427: /* join_type ::= */
+{ yymsp[1].minor.yy832 = JOIN_TYPE_INNER; }
break;
- case 423: /* join_type ::= INNER */
-{ yymsp[0].minor.yy708 = JOIN_TYPE_INNER; }
+ case 428: /* join_type ::= INNER */
+{ yymsp[0].minor.yy832 = JOIN_TYPE_INNER; }
break;
- case 424: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
+ case 429: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
{
- yymsp[-11].minor.yy840 = createSelectStmt(pCxt, yymsp[-10].minor.yy313, yymsp[-9].minor.yy544, yymsp[-8].minor.yy840);
- yymsp[-11].minor.yy840 = addWhereClause(pCxt, yymsp[-11].minor.yy840, yymsp[-7].minor.yy840);
- yymsp[-11].minor.yy840 = addPartitionByClause(pCxt, yymsp[-11].minor.yy840, yymsp[-6].minor.yy544);
- yymsp[-11].minor.yy840 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy840, yymsp[-2].minor.yy840);
- yymsp[-11].minor.yy840 = addGroupByClause(pCxt, yymsp[-11].minor.yy840, yymsp[-1].minor.yy544);
- yymsp[-11].minor.yy840 = addHavingClause(pCxt, yymsp[-11].minor.yy840, yymsp[0].minor.yy840);
- yymsp[-11].minor.yy840 = addRangeClause(pCxt, yymsp[-11].minor.yy840, yymsp[-5].minor.yy840);
- yymsp[-11].minor.yy840 = addEveryClause(pCxt, yymsp[-11].minor.yy840, yymsp[-4].minor.yy840);
- yymsp[-11].minor.yy840 = addFillClause(pCxt, yymsp[-11].minor.yy840, yymsp[-3].minor.yy840);
+ yymsp[-11].minor.yy312 = createSelectStmt(pCxt, yymsp[-10].minor.yy497, yymsp[-9].minor.yy824, yymsp[-8].minor.yy312);
+ yymsp[-11].minor.yy312 = addWhereClause(pCxt, yymsp[-11].minor.yy312, yymsp[-7].minor.yy312);
+ yymsp[-11].minor.yy312 = addPartitionByClause(pCxt, yymsp[-11].minor.yy312, yymsp[-6].minor.yy824);
+ yymsp[-11].minor.yy312 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy312, yymsp[-2].minor.yy312);
+ yymsp[-11].minor.yy312 = addGroupByClause(pCxt, yymsp[-11].minor.yy312, yymsp[-1].minor.yy824);
+ yymsp[-11].minor.yy312 = addHavingClause(pCxt, yymsp[-11].minor.yy312, yymsp[0].minor.yy312);
+ yymsp[-11].minor.yy312 = addRangeClause(pCxt, yymsp[-11].minor.yy312, yymsp[-5].minor.yy312);
+ yymsp[-11].minor.yy312 = addEveryClause(pCxt, yymsp[-11].minor.yy312, yymsp[-4].minor.yy312);
+ yymsp[-11].minor.yy312 = addFillClause(pCxt, yymsp[-11].minor.yy312, yymsp[-3].minor.yy312);
}
break;
- case 427: /* set_quantifier_opt ::= ALL */
-{ yymsp[0].minor.yy313 = false; }
+ case 432: /* set_quantifier_opt ::= ALL */
+{ yymsp[0].minor.yy497 = false; }
break;
- case 430: /* select_item ::= NK_STAR */
-{ yylhsminor.yy840 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy840 = yylhsminor.yy840;
+ case 435: /* select_item ::= NK_STAR */
+{ yylhsminor.yy312 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy312 = yylhsminor.yy312;
break;
- case 432: /* select_item ::= common_expression column_alias */
-{ yylhsminor.yy840 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840), &yymsp[0].minor.yy617); }
- yymsp[-1].minor.yy840 = yylhsminor.yy840;
+ case 437: /* select_item ::= common_expression column_alias */
+{ yylhsminor.yy312 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy312), &yymsp[0].minor.yy149); }
+ yymsp[-1].minor.yy312 = yylhsminor.yy312;
break;
- case 433: /* select_item ::= common_expression AS column_alias */
-{ yylhsminor.yy840 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), &yymsp[0].minor.yy617); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 438: /* select_item ::= common_expression AS column_alias */
+{ yylhsminor.yy312 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), &yymsp[0].minor.yy149); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 438: /* partition_by_clause_opt ::= PARTITION BY expression_list */
- case 455: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==455);
- case 471: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==471);
-{ yymsp[-2].minor.yy544 = yymsp[0].minor.yy544; }
+ case 443: /* partition_by_clause_opt ::= PARTITION BY expression_list */
+ case 460: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==460);
+ case 476: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==476);
+{ yymsp[-2].minor.yy824 = yymsp[0].minor.yy824; }
break;
- case 440: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
-{ yymsp[-5].minor.yy840 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); }
+ case 445: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
+{ yymsp[-5].minor.yy312 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy312), releaseRawExprNode(pCxt, yymsp[-1].minor.yy312)); }
break;
- case 441: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
-{ yymsp[-3].minor.yy840 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); }
+ case 446: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
+{ yymsp[-3].minor.yy312 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy312)); }
break;
- case 442: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
-{ yymsp[-5].minor.yy840 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), NULL, yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+ case 447: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
+{ yymsp[-5].minor.yy312 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy312), NULL, yymsp[-1].minor.yy312, yymsp[0].minor.yy312); }
break;
- case 443: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
-{ yymsp[-7].minor.yy840 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy840), releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), yymsp[-1].minor.yy840, yymsp[0].minor.yy840); }
+ case 448: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
+{ yymsp[-7].minor.yy312 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy312), releaseRawExprNode(pCxt, yymsp[-3].minor.yy312), yymsp[-1].minor.yy312, yymsp[0].minor.yy312); }
break;
- case 445: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
- case 463: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==463);
-{ yymsp[-3].minor.yy840 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy840); }
+ case 450: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
+ case 468: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==468);
+{ yymsp[-3].minor.yy312 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy312); }
break;
- case 447: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */
-{ yymsp[-3].minor.yy840 = createFillNode(pCxt, yymsp[-1].minor.yy816, NULL); }
+ case 452: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */
+{ yymsp[-3].minor.yy312 = createFillNode(pCxt, yymsp[-1].minor.yy134, NULL); }
break;
- case 448: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
-{ yymsp[-5].minor.yy840 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy544)); }
+ case 453: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
+{ yymsp[-5].minor.yy312 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy824)); }
break;
- case 449: /* fill_mode ::= NONE */
-{ yymsp[0].minor.yy816 = FILL_MODE_NONE; }
+ case 454: /* fill_mode ::= NONE */
+{ yymsp[0].minor.yy134 = FILL_MODE_NONE; }
break;
- case 450: /* fill_mode ::= PREV */
-{ yymsp[0].minor.yy816 = FILL_MODE_PREV; }
+ case 455: /* fill_mode ::= PREV */
+{ yymsp[0].minor.yy134 = FILL_MODE_PREV; }
break;
- case 451: /* fill_mode ::= NULL */
-{ yymsp[0].minor.yy816 = FILL_MODE_NULL; }
+ case 456: /* fill_mode ::= NULL */
+{ yymsp[0].minor.yy134 = FILL_MODE_NULL; }
break;
- case 452: /* fill_mode ::= LINEAR */
-{ yymsp[0].minor.yy816 = FILL_MODE_LINEAR; }
+ case 457: /* fill_mode ::= LINEAR */
+{ yymsp[0].minor.yy134 = FILL_MODE_LINEAR; }
break;
- case 453: /* fill_mode ::= NEXT */
-{ yymsp[0].minor.yy816 = FILL_MODE_NEXT; }
+ case 458: /* fill_mode ::= NEXT */
+{ yymsp[0].minor.yy134 = FILL_MODE_NEXT; }
break;
- case 456: /* group_by_list ::= expression */
-{ yylhsminor.yy544 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); }
- yymsp[0].minor.yy544 = yylhsminor.yy544;
+ case 461: /* group_by_list ::= expression */
+{ yylhsminor.yy824 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy312))); }
+ yymsp[0].minor.yy824 = yylhsminor.yy824;
break;
- case 457: /* group_by_list ::= group_by_list NK_COMMA expression */
-{ yylhsminor.yy544 = addNodeToList(pCxt, yymsp[-2].minor.yy544, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy840))); }
- yymsp[-2].minor.yy544 = yylhsminor.yy544;
+ case 462: /* group_by_list ::= group_by_list NK_COMMA expression */
+{ yylhsminor.yy824 = addNodeToList(pCxt, yymsp[-2].minor.yy824, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy312))); }
+ yymsp[-2].minor.yy824 = yylhsminor.yy824;
break;
- case 461: /* range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
-{ yymsp[-5].minor.yy840 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy840), releaseRawExprNode(pCxt, yymsp[-1].minor.yy840)); }
+ case 466: /* range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
+{ yymsp[-5].minor.yy312 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy312), releaseRawExprNode(pCxt, yymsp[-1].minor.yy312)); }
break;
- case 464: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ case 469: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
{
- yylhsminor.yy840 = addOrderByClause(pCxt, yymsp[-3].minor.yy840, yymsp[-2].minor.yy544);
- yylhsminor.yy840 = addSlimitClause(pCxt, yylhsminor.yy840, yymsp[-1].minor.yy840);
- yylhsminor.yy840 = addLimitClause(pCxt, yylhsminor.yy840, yymsp[0].minor.yy840);
+ yylhsminor.yy312 = addOrderByClause(pCxt, yymsp[-3].minor.yy312, yymsp[-2].minor.yy824);
+ yylhsminor.yy312 = addSlimitClause(pCxt, yylhsminor.yy312, yymsp[-1].minor.yy312);
+ yylhsminor.yy312 = addLimitClause(pCxt, yylhsminor.yy312, yymsp[0].minor.yy312);
}
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 466: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */
-{ yylhsminor.yy840 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy840, yymsp[0].minor.yy840); }
- yymsp[-3].minor.yy840 = yylhsminor.yy840;
+ case 471: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */
+{ yylhsminor.yy312 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy312, yymsp[0].minor.yy312); }
+ yymsp[-3].minor.yy312 = yylhsminor.yy312;
break;
- case 467: /* query_expression_body ::= query_expression_body UNION query_expression_body */
-{ yylhsminor.yy840 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy840, yymsp[0].minor.yy840); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 472: /* query_expression_body ::= query_expression_body UNION query_expression_body */
+{ yylhsminor.yy312 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy312, yymsp[0].minor.yy312); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 469: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
+ case 474: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
{
- yymsp[-5].minor.yy840 = addOrderByClause(pCxt, yymsp[-4].minor.yy840, yymsp[-3].minor.yy544);
- yymsp[-5].minor.yy840 = addSlimitClause(pCxt, yymsp[-5].minor.yy840, yymsp[-2].minor.yy840);
- yymsp[-5].minor.yy840 = addLimitClause(pCxt, yymsp[-5].minor.yy840, yymsp[-1].minor.yy840);
+ yymsp[-5].minor.yy312 = addOrderByClause(pCxt, yymsp[-4].minor.yy312, yymsp[-3].minor.yy824);
+ yymsp[-5].minor.yy312 = addSlimitClause(pCxt, yymsp[-5].minor.yy312, yymsp[-2].minor.yy312);
+ yymsp[-5].minor.yy312 = addLimitClause(pCxt, yymsp[-5].minor.yy312, yymsp[-1].minor.yy312);
}
break;
- case 473: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
- case 477: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==477);
-{ yymsp[-1].minor.yy840 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
+ case 478: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ case 482: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==482);
+{ yymsp[-1].minor.yy312 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
break;
- case 474: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- case 478: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==478);
-{ yymsp[-3].minor.yy840 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
+ case 479: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ case 483: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==483);
+{ yymsp[-3].minor.yy312 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 475: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- case 479: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==479);
-{ yymsp[-3].minor.yy840 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
+ case 480: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ case 484: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==484);
+{ yymsp[-3].minor.yy312 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
break;
- case 480: /* subquery ::= NK_LP query_expression NK_RP */
-{ yylhsminor.yy840 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy840); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 485: /* subquery ::= NK_LP query_expression NK_RP */
+{ yylhsminor.yy312 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy312); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 484: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */
-{ yylhsminor.yy840 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy840), yymsp[-1].minor.yy204, yymsp[0].minor.yy277); }
- yymsp[-2].minor.yy840 = yylhsminor.yy840;
+ case 489: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */
+{ yylhsminor.yy312 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy312), yymsp[-1].minor.yy158, yymsp[0].minor.yy417); }
+ yymsp[-2].minor.yy312 = yylhsminor.yy312;
break;
- case 485: /* ordering_specification_opt ::= */
-{ yymsp[1].minor.yy204 = ORDER_ASC; }
+ case 490: /* ordering_specification_opt ::= */
+{ yymsp[1].minor.yy158 = ORDER_ASC; }
break;
- case 486: /* ordering_specification_opt ::= ASC */
-{ yymsp[0].minor.yy204 = ORDER_ASC; }
+ case 491: /* ordering_specification_opt ::= ASC */
+{ yymsp[0].minor.yy158 = ORDER_ASC; }
break;
- case 487: /* ordering_specification_opt ::= DESC */
-{ yymsp[0].minor.yy204 = ORDER_DESC; }
+ case 492: /* ordering_specification_opt ::= DESC */
+{ yymsp[0].minor.yy158 = ORDER_DESC; }
break;
- case 488: /* null_ordering_opt ::= */
-{ yymsp[1].minor.yy277 = NULL_ORDER_DEFAULT; }
+ case 493: /* null_ordering_opt ::= */
+{ yymsp[1].minor.yy417 = NULL_ORDER_DEFAULT; }
break;
- case 489: /* null_ordering_opt ::= NULLS FIRST */
-{ yymsp[-1].minor.yy277 = NULL_ORDER_FIRST; }
+ case 494: /* null_ordering_opt ::= NULLS FIRST */
+{ yymsp[-1].minor.yy417 = NULL_ORDER_FIRST; }
break;
- case 490: /* null_ordering_opt ::= NULLS LAST */
-{ yymsp[-1].minor.yy277 = NULL_ORDER_LAST; }
+ case 495: /* null_ordering_opt ::= NULLS LAST */
+{ yymsp[-1].minor.yy417 = NULL_ORDER_LAST; }
break;
default:
break;
diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp
index b376c33d1aca8951ed31297cd12a1843ebf47462..7725674200c9e9c931f4640a1eecbed7e16b55f2 100644
--- a/source/libs/parser/test/mockCatalog.cpp
+++ b/source/libs/parser/test/mockCatalog.cpp
@@ -32,100 +32,59 @@
namespace {
void generateInformationSchema(MockCatalogService* mcs) {
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DATABASES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_FUNCTIONS, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_FUNC_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_INDEXES, TSDB_SYSTEM_TABLE, 3)
- .addColumn("index_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
- .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES, TSDB_SYSTEM_TABLE, 2)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
- .addColumn("stable_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLES, TSDB_SYSTEM_TABLE, 2)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
- .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLE_DISTRIBUTED, TSDB_SYSTEM_TABLE, 1)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USERS, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_USER_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS, TSDB_SYSTEM_TABLE, 1)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_CONFIG_OPTION_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODE_VARIABLES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("dnode_id", TSDB_DATA_TYPE_INT);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_CLUSTER_ID_LEN);
- builder.done();
- }
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DATABASES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_FUNCTIONS, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_FUNC_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_INDEXES, TSDB_SYSTEM_TABLE, 3)
+ .addColumn("index_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES, TSDB_SYSTEM_TABLE, 2)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .addColumn("stable_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLES, TSDB_SYSTEM_TABLE, 2)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLE_DISTRIBUTED, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USERS, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_USER_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_CONFIG_OPTION_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODE_VARIABLES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("dnode_id", TSDB_DATA_TYPE_INT)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_CLUSTER_ID_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VNODES, TSDB_SYSTEM_TABLE, 2)
+ .addColumn("dnode_id", TSDB_DATA_TYPE_INT)
+ .addColumn("dnode_ep", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
}
void generatePerformanceSchema(MockCatalogService* mcs) {
@@ -137,7 +96,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1)
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
@@ -149,7 +108,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1)
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
diff --git a/source/libs/parser/test/parAlterToBalanceTest.cpp b/source/libs/parser/test/parAlterToBalanceTest.cpp
index 1caba6eab0384019b01f0c4957a6b9b9ffa1a5d1..3a08ef97564c426c834e26ed0569f77a29184d76 100644
--- a/source/libs/parser/test/parAlterToBalanceTest.cpp
+++ b/source/libs/parser/test/parAlterToBalanceTest.cpp
@@ -88,6 +88,7 @@ TEST_F(ParserInitialATest, alterDnode) {
* | REPLICA int_value -- todo: enum 1, 3, default 1, unit replica
* | STRICT {'off' | 'on'} -- todo: default 'off'
* | WAL_LEVEL int_value -- enum 1, 2, default 1
+ * | SST_TRIGGER int_value -- rang [1, 16], default 8
* }
*/
TEST_F(ParserInitialATest, alterDatabase) {
@@ -112,6 +113,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
expect.cacheLast = -1;
expect.cacheLastSize = -1;
expect.replications = -1;
+ expect.sstTrigger = -1;
};
auto setAlterDbBuffer = [&](int32_t buffer) { expect.buffer = buffer; };
auto setAlterDbPageSize = [&](int32_t pageSize) { expect.pageSize = pageSize; };
@@ -128,6 +130,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
auto setAlterDbStrict = [&](int8_t strict) { expect.strict = strict; };
auto setAlterDbCacheModel = [&](int8_t cacheModel) { expect.cacheLast = cacheModel; };
auto setAlterDbReplica = [&](int8_t replications) { expect.replications = replications; };
+ auto setAlterDbSstTrigger = [&](int8_t sstTrigger) { expect.sstTrigger = sstTrigger; };
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_DATABASE_STMT);
@@ -146,6 +149,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
ASSERT_EQ(req.strict, expect.strict);
ASSERT_EQ(req.cacheLast, expect.cacheLast);
ASSERT_EQ(req.replications, expect.replications);
+ ASSERT_EQ(req.sstTrigger, expect.sstTrigger);
});
const int32_t MINUTE_PER_DAY = MILLISECOND_PER_DAY / MILLISECOND_PER_MINUTE;
@@ -157,7 +161,8 @@ TEST_F(ParserInitialATest, alterDatabase) {
setAlterDbFsync(200);
setAlterDbWal(1);
setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW);
- run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 WAL_LEVEL 1");
+ setAlterDbSstTrigger(16);
+ run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 WAL_LEVEL 1 STT_TRIGGER 16");
clearAlterDbReq();
initAlterDb("test");
@@ -231,6 +236,8 @@ TEST_F(ParserInitialATest, alterDatabaseSemanticCheck) {
run("ALTER DATABASE test KEEP 1w", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL_LEVEL 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL_LEVEL 3", TSDB_CODE_PAR_INVALID_DB_OPTION);
+ run("ALTER DATABASE test STT_TRIGGER 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
+ run("ALTER DATABASE test STT_TRIGGER 17", TSDB_CODE_PAR_INVALID_DB_OPTION);
// Regardless of the specific sentence
run("ALTER DATABASE db WAL_LEVEL 0 # td-14436", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
}
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index 9bca6cae0a41a145237b1035c5dd1edb4fdf0cd9..a159d797faed1fccf1194c54ecbe5506531ff0cc 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -111,10 +111,14 @@ TEST_F(ParserInitialCTest, createDatabase) {
expect.numOfVgroups = TSDB_DEFAULT_VN_PER_DB;
expect.numOfStables = TSDB_DEFAULT_DB_SINGLE_STABLE;
expect.schemaless = TSDB_DEFAULT_DB_SCHEMALESS;
- expect.walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
- expect.walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- expect.walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ expect.walRetentionPeriod = TSDB_REP_DEF_DB_WAL_RET_PERIOD;
+ expect.walRetentionSize = TSDB_REP_DEF_DB_WAL_RET_SIZE;
+ expect.walRollPeriod = TSDB_REP_DEF_DB_WAL_ROLL_PERIOD;
expect.walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
+ expect.sstTrigger = TSDB_DEFAULT_SST_TRIGGER;
+ expect.hashPrefix = TSDB_DEFAULT_HASH_PREFIX;
+ expect.hashSuffix = TSDB_DEFAULT_HASH_SUFFIX;
+ expect.tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
};
auto setDbBufferFunc = [&](int32_t buffer) { expect.buffer = buffer; };
@@ -155,6 +159,10 @@ TEST_F(ParserInitialCTest, createDatabase) {
auto setDbWalRetentionSize = [&](int32_t walRetentionSize) { expect.walRetentionSize = walRetentionSize; };
auto setDbWalRollPeriod = [&](int32_t walRollPeriod) { expect.walRollPeriod = walRollPeriod; };
auto setDbWalSegmentSize = [&](int32_t walSegmentSize) { expect.walSegmentSize = walSegmentSize; };
+ auto setDbSstTrigger = [&](int32_t sstTrigger) { expect.sstTrigger = sstTrigger; };
+ auto setDbHashPrefix = [&](int32_t hashPrefix) { expect.hashPrefix = hashPrefix; };
+ auto setDbHashSuffix = [&](int32_t hashSuffix) { expect.hashSuffix = hashSuffix; };
+ auto setDbTsdbPageSize = [&](int32_t tsdbPageSize) { expect.tsdbPageSize = tsdbPageSize; };
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_DATABASE_STMT);
@@ -185,7 +193,10 @@ TEST_F(ParserInitialCTest, createDatabase) {
ASSERT_EQ(req.walRetentionSize, expect.walRetentionSize);
ASSERT_EQ(req.walRollPeriod, expect.walRollPeriod);
ASSERT_EQ(req.walSegmentSize, expect.walSegmentSize);
- // ASSERT_EQ(req.schemaless, expect.schemaless);
+ ASSERT_EQ(req.sstTrigger, expect.sstTrigger);
+ ASSERT_EQ(req.hashPrefix, expect.hashPrefix);
+ ASSERT_EQ(req.hashSuffix, expect.hashSuffix);
+ ASSERT_EQ(req.tsdbPageSize, expect.tsdbPageSize);
ASSERT_EQ(req.ignoreExist, expect.ignoreExist);
ASSERT_EQ(req.numOfRetensions, expect.numOfRetensions);
if (expect.numOfRetensions > 0) {
@@ -233,6 +244,10 @@ TEST_F(ParserInitialCTest, createDatabase) {
setDbWalRetentionSize(-1);
setDbWalRollPeriod(10);
setDbWalSegmentSize(20);
+ setDbSstTrigger(16);
+ setDbHashPrefix(3);
+ setDbHashSuffix(4);
+ setDbTsdbPageSize(32);
run("CREATE DATABASE IF NOT EXISTS wxy_db "
"BUFFER 64 "
"CACHEMODEL 'last_value' "
@@ -256,7 +271,11 @@ TEST_F(ParserInitialCTest, createDatabase) {
"WAL_RETENTION_PERIOD -1 "
"WAL_RETENTION_SIZE -1 "
"WAL_ROLL_PERIOD 10 "
- "WAL_SEGMENT_SIZE 20");
+ "WAL_SEGMENT_SIZE 20 "
+ "STT_TRIGGER 16 "
+ "TABLE_PREFIX 3 "
+ "TABLE_SUFFIX 4 "
+ "TSDB_PAGESIZE 32");
clearCreateDbReq();
setCreateDbReqFunc("wxy_db", 1);
@@ -266,6 +285,14 @@ TEST_F(ParserInitialCTest, createDatabase) {
"DURATION 100m "
"KEEP 1440m,300h,400d ");
clearCreateDbReq();
+
+ setCreateDbReqFunc("wxy_db", 1);
+ setDbReplicaFunc(3);
+ setDbWalRetentionPeriod(TSDB_REPS_DEF_DB_WAL_RET_PERIOD);
+ setDbWalRetentionSize(TSDB_REPS_DEF_DB_WAL_RET_SIZE);
+ setDbWalRollPeriod(TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD);
+ run("CREATE DATABASE IF NOT EXISTS wxy_db REPLICA 3");
+ clearCreateDbReq();
}
TEST_F(ParserInitialCTest, createDatabaseSemanticCheck) {
@@ -568,15 +595,13 @@ TEST_F(ParserInitialCTest, createStream) {
memset(&expect, 0, sizeof(SCMCreateStreamReq));
};
- auto setCreateStreamReqFunc = [&](const char* pStream, const char* pSrcDb, const char* pSql,
- const char* pDstStb = nullptr, int8_t igExists = 0,
- int8_t triggerType = STREAM_TRIGGER_AT_ONCE, int64_t maxDelay = 0,
- int64_t watermark = 0, int8_t igExpired = STREAM_DEFAULT_IGNORE_EXPIRED) {
+ auto setCreateStreamReqFunc = [&](const char* pStream, const char* pSrcDb, const char* pSql, const char* pDstStb,
+ int8_t igExists = 0, int8_t triggerType = STREAM_TRIGGER_AT_ONCE,
+ int64_t maxDelay = 0, int64_t watermark = 0,
+ int8_t igExpired = STREAM_DEFAULT_IGNORE_EXPIRED) {
snprintf(expect.name, sizeof(expect.name), "0.%s", pStream);
snprintf(expect.sourceDB, sizeof(expect.sourceDB), "0.%s", pSrcDb);
- if (NULL != pDstStb) {
- snprintf(expect.targetStbFullName, sizeof(expect.targetStbFullName), "0.test.%s", pDstStb);
- }
+ snprintf(expect.targetStbFullName, sizeof(expect.targetStbFullName), "0.test.%s", pDstStb);
expect.igExists = igExists;
expect.sql = strdup(pSql);
expect.triggerType = triggerType;
@@ -603,15 +628,6 @@ TEST_F(ParserInitialCTest, createStream) {
tFreeSCMCreateStreamReq(&req);
});
- setCreateStreamReqFunc("s1", "test", "create stream s1 as select count(*) from t1 interval(10s)");
- run("CREATE STREAM s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
- clearCreateStreamReq();
-
- setCreateStreamReqFunc("s1", "test", "create stream if not exists s1 as select count(*) from t1 interval(10s)",
- nullptr, 1);
- run("CREATE STREAM IF NOT EXISTS s1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
- clearCreateStreamReq();
-
setCreateStreamReqFunc("s1", "test", "create stream s1 into st1 as select count(*) from t1 interval(10s)", "st1");
run("CREATE STREAM s1 INTO st1 AS SELECT COUNT(*) FROM t1 INTERVAL(10S)");
clearCreateStreamReq();
@@ -629,7 +645,8 @@ TEST_F(ParserInitialCTest, createStream) {
TEST_F(ParserInitialCTest, createStreamSemanticCheck) {
useDb("root", "test");
- run("CREATE STREAM s1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)", TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC);
+ run("CREATE STREAM s1 INTO st1 AS SELECT PERCENTILE(c1, 30) FROM t1 INTERVAL(10S)",
+ TSDB_CODE_PAR_STREAM_NOT_ALLOWED_FUNC);
}
TEST_F(ParserInitialCTest, createTable) {
diff --git a/source/libs/parser/test/parShowToUse.cpp b/source/libs/parser/test/parShowToUse.cpp
index 6590378565849e8b39bab100a324823e2d665848..e33252c072fb1b34e5801098e27c2a51bef51c68 100644
--- a/source/libs/parser/test/parShowToUse.cpp
+++ b/source/libs/parser/test/parShowToUse.cpp
@@ -218,7 +218,13 @@ TEST_F(ParserShowToUseTest, showVgroups) {
run("SHOW test.vgroups");
}
-// todo SHOW vnodes
+TEST_F(ParserShowToUseTest, showVnodes) {
+ useDb("root", "test");
+
+ run("SHOW VNODES 1");
+
+ run("SHOW VNODES 'node1:7030'");
+}
TEST_F(ParserShowToUseTest, splitVgroup) {
useDb("root", "test");
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 98281b7bf070095b4bb23326b156d5e8764690de..360b904c170e50682b17d9c99a8ec1cd679a6db0 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -207,6 +207,7 @@ class ParserTestBaseImpl {
pCxt->db = caseEnv_.db_.c_str();
pCxt->pUser = caseEnv_.user_.c_str();
pCxt->isSuperUser = caseEnv_.user_ == "root";
+ pCxt->enableSysInfo = true;
pCxt->pSql = stmtEnv_.sql_.c_str();
pCxt->sqlLen = stmtEnv_.sql_.length();
pCxt->pMsg = stmtEnv_.msgBuf_.data();
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 71f084d41226fee17bd0b8c0d63f69ad07ca3a20..bf72f5210577d6f43f8ae97d098091b3020aeb16 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -44,12 +44,15 @@ static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol) {
pCol->colType = COLUMN_TYPE_TBNAME;
break;
case FUNCTION_TYPE_WSTART:
+ pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ pCol->colType = COLUMN_TYPE_WINDOW_START;
+ break;
case FUNCTION_TYPE_WEND:
pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
- pCol->colType = COLUMN_TYPE_WINDOW_PC;
+ pCol->colType = COLUMN_TYPE_WINDOW_END;
break;
case FUNCTION_TYPE_WDURATION:
- pCol->colType = COLUMN_TYPE_WINDOW_PC;
+ pCol->colType = COLUMN_TYPE_WINDOW_DURATION;
break;
case FUNCTION_TYPE_GROUP_KEY:
pCol->colType = COLUMN_TYPE_GROUP_KEY;
@@ -194,28 +197,21 @@ static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols
return SCAN_TYPE_TABLE;
}
-static SNode* createPrimaryKeyCol(uint64_t tableId) {
+static SNode* createFirstCol(uint64_t tableId, const SSchema* pSchema) {
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
if (NULL == pCol) {
return NULL;
}
- pCol->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
- pCol->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
+ pCol->node.resType.type = pSchema->type;
+ pCol->node.resType.bytes = pSchema->bytes;
pCol->tableId = tableId;
- pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ pCol->colId = pSchema->colId;
pCol->colType = COLUMN_TYPE_COLUMN;
- strcpy(pCol->colName, "#primarykey");
+ strcpy(pCol->colName, pSchema->name);
return (SNode*)pCol;
}
-static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
- if (NULL == *pCols) {
- *pCols = nodesMakeList();
- if (NULL == *pCols) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
-
+static int32_t addPrimaryKeyCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
bool found = false;
SNode* pCol = NULL;
FOREACH(pCol, *pCols) {
@@ -226,13 +222,25 @@ static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
}
if (!found) {
- if (TSDB_CODE_SUCCESS != nodesListStrictAppend(*pCols, createPrimaryKeyCol(tableId))) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
}
return TSDB_CODE_SUCCESS;
}
+static int32_t addSystableFirstCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
+ if (LIST_LENGTH(*pCols) > 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
+}
+
+static int32_t addDefaultScanCol(const STableMeta* pMeta, SNodeList** pCols) {
+ if (TSDB_SYSTEM_TABLE == pMeta->tableType) {
+ return addSystableFirstCol(pMeta->uid, pMeta->schema, pCols);
+ }
+ return addPrimaryKeyCol(pMeta->uid, pMeta->schema, pCols);
+}
+
static int32_t makeScanLogicNode(SLogicPlanContext* pCxt, SRealTableNode* pRealTable, bool hasRepeatScanFuncs,
SLogicNode** pLogicNode) {
SScanLogicNode* pScan = (SScanLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SCAN);
@@ -296,8 +304,8 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pScan->hasNormalCols = true;
}
- if (TSDB_CODE_SUCCESS == code && SCAN_TYPE_SYSTEM_TABLE != pScan->scanType) {
- code = addPrimaryKeyCol(pScan->tableId, &pScan->pScanCols);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = addDefaultScanCol(pRealTable->pMeta, &pScan->pScanCols);
}
// set output
@@ -784,7 +792,8 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele
static EDealRes needFillValueImpl(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
SColumnNode* pCol = (SColumnNode*)pNode;
- if (COLUMN_TYPE_WINDOW_PC != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) {
+ if (COLUMN_TYPE_WINDOW_START != pCol->colType && COLUMN_TYPE_WINDOW_END != pCol->colType &&
+ COLUMN_TYPE_WINDOW_DURATION != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) {
*(bool*)pContext = true;
return DEAL_RES_END;
}
@@ -1002,7 +1011,8 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
int32_t code =
nodesCollectColumns(pSelect, SQL_CLAUSE_PARTITION_BY, NULL, COLLECT_COL_TYPE_ALL, &pPartition->node.pTargets);
if (TSDB_CODE_SUCCESS == code && NULL == pPartition->node.pTargets) {
- code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
+ code = nodesListMakeStrictAppend(&pPartition->node.pTargets,
+ nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
}
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 45ab3903a9e9eb6df844244b6fc7cd8d009ebd47..b160f45479a8e11a160fc092b7af536c4165436a 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -16,6 +16,7 @@
#include "filter.h"
#include "functionMgt.h"
#include "planInt.h"
+#include "tglobal.h"
#include "ttime.h"
#define OPTIMIZE_FLAG_MASK(n) (1 << n)
@@ -1084,7 +1085,7 @@ static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool* pNot
switch (nodeType(pNode)) {
case QUERY_NODE_LOGIC_PLAN_SCAN: {
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
- if (NULL != pScan->pGroupTags) {
+ if (NULL != pScan->pGroupTags || TSDB_SYSTEM_TABLE == pScan->tableType) {
*pNotOptimize = true;
return TSDB_CODE_SUCCESS;
}
@@ -1665,7 +1666,10 @@ static bool eliminateProjOptMayBeOptimized(SLogicNode* pNode) {
return false;
}
- if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren)) {
+ // Super table scan requires project operator to merge packets to improve performance.
+ if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren) ||
+ (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0)) &&
+ TSDB_SUPER_TABLE == ((SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0))->tableType)) {
return false;
}
@@ -2407,7 +2411,7 @@ static const SOptimizeRule optimizeRuleSet[] = {
static const int32_t optimizeRuleNum = (sizeof(optimizeRuleSet) / sizeof(SOptimizeRule));
static void dumpLogicSubplan(const char* pRuleName, SLogicSubplan* pSubplan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index c7eb6f7b5e61fdd1d4c29cc88a8b980bc1efdf79..0cbb833a4d4506b5123b45a0184bbc6023b53c2a 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -576,6 +576,7 @@ static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan*
pScan->showRewrite = pScanLogicNode->showRewrite;
pScan->accountId = pCxt->pPlanCxt->acctId;
+ pScan->sysInfo = pCxt->pPlanCxt->sysInfo;
if (0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLES) ||
0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TABLE_DISTRIBUTED) ||
0 == strcmp(pScanLogicNode->tableName.tname, TSDB_INS_TABLE_TAGS)) {
@@ -1323,7 +1324,8 @@ static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
static int32_t createPartitionPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
SPartitionLogicNode* pPartLogicNode, SPhysiNode** pPhyNode) {
SPartitionPhysiNode* pPart =
- (SPartitionPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pPartLogicNode, QUERY_NODE_PHYSICAL_PLAN_PARTITION);
+ (SPartitionPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pPartLogicNode,
+ pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION : QUERY_NODE_PHYSICAL_PLAN_PARTITION);
if (NULL == pPart) {
return TSDB_CODE_OUT_OF_MEMORY;
}
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index c582994b7c319778477238ab26b99ce844cb8c1c..beb938b161ca1656f09d15c559351aa2e081df2a 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -1427,7 +1427,7 @@ static const SSplitRule splitRuleSet[] = {
static const int32_t splitRuleNum = (sizeof(splitRuleSet) / sizeof(SSplitRule));
static void dumpLogicSubplan(const char* pRuleName, SLogicSubplan* pSubplan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c
index c1296982e0217ae9b3c2e67b210f1922492cf547..baa1d1074c7d4bea0df280649777db4a659247cb 100644
--- a/source/libs/planner/src/planner.c
+++ b/source/libs/planner/src/planner.c
@@ -17,9 +17,10 @@
#include "planInt.h"
#include "scalar.h"
+#include "tglobal.h"
static void dumpQueryPlan(SQueryPlan* pPlan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
diff --git a/source/libs/planner/test/planOtherTest.cpp b/source/libs/planner/test/planOtherTest.cpp
index 7107f8b3c94c616ae9db90132a59f2804b542aca..350ccd0d927c9773059cfb2c027a0ca2292e4d13 100644
--- a/source/libs/planner/test/planOtherTest.cpp
+++ b/source/libs/planner/test/planOtherTest.cpp
@@ -37,9 +37,9 @@ TEST_F(PlanOtherTest, createStream) {
TEST_F(PlanOtherTest, createStreamUseSTable) {
useDb("root", "test");
- run("CREATE STREAM IF NOT EXISTS s1 as SELECT COUNT(*) FROM st1 INTERVAL(10s)");
+ run("CREATE STREAM IF NOT EXISTS s1 into st1 as SELECT COUNT(*) FROM st1 INTERVAL(10s)");
- run("CREATE STREAM IF NOT EXISTS s1 as SELECT COUNT(*) FROM st1 PARTITION BY TBNAME INTERVAL(10s)");
+ run("CREATE STREAM IF NOT EXISTS s1 into st1 as SELECT COUNT(*) FROM st1 PARTITION BY TBNAME INTERVAL(10s)");
}
TEST_F(PlanOtherTest, createSmaIndex) {
diff --git a/source/libs/planner/test/planSysTbTest.cpp b/source/libs/planner/test/planSysTbTest.cpp
index 921f86f09a41d36448ab0d435ab6a439645b9bfc..6b40e381cc18cb75cc9271352cd654d31a74242b 100644
--- a/source/libs/planner/test/planSysTbTest.cpp
+++ b/source/libs/planner/test/planSysTbTest.cpp
@@ -32,3 +32,9 @@ TEST_F(PlanSysTableTest, informationSchema) {
run("SELECT * FROM information_schema.ins_databases WHERE name = 'information_schema'");
}
+
+TEST_F(PlanSysTableTest, withAgg) {
+ useDb("root", "information_schema");
+
+ run("SELECT COUNT(1) FROM ins_users");
+}
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index 5fc8b3cf302612c9b8528a8380475b32a79a8824..b280b32a94f7d824bcd475573f3f62744c3e3d26 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -19,6 +19,7 @@
#include
#include
+#include
#include "cmdnodes.h"
#include "mockCatalogService.h"
@@ -251,6 +252,7 @@ class PlannerTestBaseImpl {
string splitLogicPlan_;
string scaledLogicPlan_;
string physiPlan_;
+ string physiPlanMsg_;
vector physiSubplans_;
};
@@ -274,16 +276,17 @@ class PlannerTestBaseImpl {
res_.splitLogicPlan_.clear();
res_.scaledLogicPlan_.clear();
res_.physiPlan_.clear();
+ res_.physiPlanMsg_.clear();
res_.physiSubplans_.clear();
}
void dump(DumpModule module) {
+ cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl;
+
if (DUMP_MODULE_NOTHING == module) {
return;
}
- cout << "========================================== " << sqlNo_ << " sql : [" << stmtEnv_.sql_ << "]" << endl;
-
if (DUMP_MODULE_ALL == module || DUMP_MODULE_PARSER == module) {
if (res_.prepareAst_.empty()) {
cout << "+++++++++++++++++++++syntax tree : " << endl;
@@ -343,6 +346,7 @@ class PlannerTestBaseImpl {
cxt.pMsg = stmtEnv_.msgBuf_.data();
cxt.msgLen = stmtEnv_.msgBuf_.max_size();
cxt.svrVer = "3.0.0.0";
+ cxt.enableSysInfo = true;
if (prepare) {
SStmtCallback stmtCb = {0};
cxt.pStmtCb = &stmtCb;
@@ -407,6 +411,8 @@ class PlannerTestBaseImpl {
SNode* pSubplan;
FOREACH(pSubplan, ((SNodeListNode*)pNode)->pNodeList) { res_.physiSubplans_.push_back(toString(pSubplan)); }
}
+ res_.physiPlanMsg_ = toMsg((SNode*)(*pPlan));
+ cout << "json len: " << res_.physiPlan_.length() << ", msg len: " << res_.physiPlanMsg_.length() << endl;
}
void setPlanContext(SQuery* pQuery, SPlanContext* pCxt) {
@@ -445,12 +451,45 @@ class PlannerTestBaseImpl {
string toString(const SNode* pRoot) {
char* pStr = NULL;
int32_t len = 0;
+
+ auto start = chrono::steady_clock::now();
DO_WITH_THROW(nodesNodeToString, pRoot, false, &pStr, &len)
+ if (QUERY_NODE_PHYSICAL_PLAN == nodeType(pRoot)) {
+ cout << "nodesNodeToString: "
+ << chrono::duration_cast(chrono::steady_clock::now() - start).count() << "us" << endl;
+ }
+
string str(pStr);
taosMemoryFreeClear(pStr);
return str;
}
+ string toMsg(const SNode* pRoot) {
+ char* pStr = NULL;
+ int32_t len = 0;
+
+ auto start = chrono::steady_clock::now();
+ DO_WITH_THROW(nodesNodeToMsg, pRoot, &pStr, &len)
+ cout << "nodesNodeToMsg: "
+ << chrono::duration_cast(chrono::steady_clock::now() - start).count() << "us" << endl;
+
+ SNode* pNode = NULL;
+ char* pNewStr = NULL;
+ int32_t newlen = 0;
+ DO_WITH_THROW(nodesMsgToNode, pStr, len, &pNode)
+ DO_WITH_THROW(nodesNodeToMsg, pNode, &pNewStr, &newlen)
+ if (newlen != len || 0 != memcmp(pStr, pNewStr, len)) {
+ cout << "nodesNodeToMsg error!!!!!!!!!!!!!! len = " << len << ", newlen = " << newlen << endl;
+ DO_WITH_THROW(nodesNodeToString, pNode, false, &pNewStr, &newlen)
+ cout << "nodesNodeToString " << pNewStr << endl;
+ }
+ taosMemoryFreeClear(pNewStr);
+
+ string str(pStr, len);
+ taosMemoryFreeClear(pStr);
+ return str;
+ }
+
caseEnv caseEnv_;
stmtEnv stmtEnv_;
stmtRes res_;
diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c
index 5143aa4af1f90ba0e7a0ac2f37af6648ed68c685..d848016e46482614972d5e85469e4297136d6cc0 100644
--- a/source/libs/qcom/src/queryUtil.c
+++ b/source/libs/qcom/src/queryUtil.c
@@ -213,15 +213,25 @@ SSchema createSchema(int8_t type, int32_t bytes, col_id_t colId, const char* nam
return s;
}
+void freeSTableMetaRspPointer(void *p) {
+ tFreeSTableMetaRsp(*(void**)p);
+ taosMemoryFreeClear(*(void**)p);
+}
+
void destroyQueryExecRes(SExecResult* pRes) {
if (NULL == pRes || NULL == pRes->res) {
return;
}
switch (pRes->msgType) {
+ case TDMT_VND_CREATE_TABLE: {
+ taosArrayDestroyEx((SArray*)pRes->res, freeSTableMetaRspPointer);
+ break;
+ }
+ case TDMT_MND_CREATE_STB:
case TDMT_VND_ALTER_TABLE:
case TDMT_MND_ALTER_STB: {
- tFreeSTableMetaRsp((STableMetaRsp*)pRes->res);
+ tFreeSTableMetaRsp(pRes->res);
taosMemoryFreeClear(pRes->res);
break;
}
diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c
index ed8786170d0e37f677d1b731d08eafb511875023..e54937114cd1bc0e011e8dc9d8ed44a710bc1807 100644
--- a/source/libs/qcom/src/querymsg.c
+++ b/source/libs/qcom/src/querymsg.c
@@ -38,6 +38,8 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
pOut->dbVgroup->vgVersion = usedbRsp->vgVersion;
pOut->dbVgroup->hashMethod = usedbRsp->hashMethod;
+ pOut->dbVgroup->hashPrefix = usedbRsp->hashPrefix;
+ pOut->dbVgroup->hashSuffix = usedbRsp->hashSuffix;
qDebug("Got %d vgroup for db %s", usedbRsp->vgNum, usedbRsp->db);
@@ -354,6 +356,19 @@ static int32_t queryConvertTableMetaMsg(STableMetaRsp *pMetaMsg) {
return TSDB_CODE_SUCCESS;
}
+int32_t queryCreateCTableMetaFromMsg(STableMetaRsp *msg, SCTableMeta *pMeta) {
+ pMeta->vgId = msg->vgId;
+ pMeta->tableType = msg->tableType;
+ pMeta->uid = msg->tuid;
+ pMeta->suid = msg->suid;
+
+ qDebug("ctable %s uid %" PRIx64 " meta returned, type %d vgId:%d db %s suid %" PRIx64 ,
+ msg->tbName, pMeta->uid, pMeta->tableType, pMeta->vgId, msg->dbFName, pMeta->suid);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t queryCreateTableMetaFromMsg(STableMetaRsp *msg, bool isStb, STableMeta **pMeta) {
int32_t total = msg->numOfColumns + msg->numOfTags;
int32_t metaSize = sizeof(STableMeta) + sizeof(SSchema) * total;
diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h
index 23693c785aa17921e5ba4420fe6477fa72b27392..e7695b2f04ea4fed2ebf9a77bf00717e1978003e 100644
--- a/source/libs/scalar/inc/filterInt.h
+++ b/source/libs/scalar/inc/filterInt.h
@@ -276,7 +276,7 @@ struct SFilterInfo {
#define FILTER_CLR_FLAG(st, f) st &= (~f)
#define SIMPLE_COPY_VALUES(dst, src) *((int64_t *)dst) = *((int64_t *)src)
-#define FILTER_PACKAGE_UNIT_HASH_KEY(v, optr, idx1, idx2) do { char *_t = (char *)v; _t[0] = optr; *(uint32_t *)(_t + 1) = idx1; *(uint32_t *)(_t + 3) = idx2; } while (0)
+#define FLT_PACKAGE_UNIT_HASH_KEY(v, op1, op2, lidx, ridx, ridx2) do { char *_t = (char *)(v); _t[0] = (op1); _t[1] = (op2); *(uint32_t *)(_t + 2) = (lidx); *(uint32_t *)(_t + 2 + sizeof(uint32_t)) = (ridx); } while (0)
#define FILTER_GREATER(cr,sflag,eflag) ((cr > 0) || ((cr == 0) && (FILTER_GET_FLAG(sflag,RANGE_FLG_EXCLUDE) || FILTER_GET_FLAG(eflag,RANGE_FLG_EXCLUDE))))
#define FILTER_COPY_RA(dst, src) do { (dst)->sflag = (src)->sflag; (dst)->eflag = (src)->eflag; (dst)->s = (src)->s; (dst)->e = (src)->e; } while (0)
@@ -350,6 +350,7 @@ struct SFilterInfo {
extern bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right);
extern __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr);
+extern __compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr);
#ifdef __cplusplus
}
diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h
index d423b92da7e83589aacc6d384c0e2cafa0949038..15e9026ddbc2eea8ad4e066519dd4bbea9767b7e 100644
--- a/source/libs/scalar/inc/sclInt.h
+++ b/source/libs/scalar/inc/sclInt.h
@@ -45,6 +45,9 @@ typedef struct SScalarCtx {
#define SCL_IS_CONST_CALC(_ctx) (NULL == (_ctx)->pBlockList)
//#define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type) && (((SValueNode *)_node)->placeholderNo <= 0))
#define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type))
+#define SCL_IS_COMPARISON_OPERATOR(_opType) ((_opType) >= OP_TYPE_GREATER_THAN && (_opType) < OP_TYPE_IS_NOT_UNKNOWN)
+#define SCL_DOWNGRADE_DATETYPE(_type) ((_type) == TSDB_DATA_TYPE_BIGINT || TSDB_DATA_TYPE_DOUBLE == (_type) || (_type) == TSDB_DATA_TYPE_UBIGINT)
+#define SCL_NO_NEED_CONVERT_COMPARISION(_ltype, _rtype, _optr) (IS_NUMERIC_TYPE(_ltype) && IS_NUMERIC_TYPE(_rtype) && ((_optr) >= OP_TYPE_GREATER_THAN && (_optr) <= OP_TYPE_NOT_EQUAL))
#define sclFatal(...) qFatal(__VA_ARGS__)
#define sclError(...) qError(__VA_ARGS__)
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 4377dbf14ec55dae53d41859af8480886f4cce51..9e676354374fce6c2e733ac8d42c45baef9bada8 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -132,6 +132,77 @@ __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val
compareChkNotInString, compareStrPatternNotMatch, compareWStrPatternNotMatch
};
+__compar_fn_t gInt8SignCompare[] = {
+ compareInt8Val, compareInt8Int16, compareInt8Int32, compareInt8Int64, compareInt8Float, compareInt8Double
+};
+__compar_fn_t gInt8UsignCompare[] = {
+ compareInt8Uint8, compareInt8Uint16, compareInt8Uint32, compareInt8Uint64
+};
+
+__compar_fn_t gInt16SignCompare[] = {
+ compareInt16Int8, compareInt16Val, compareInt16Int32, compareInt16Int64, compareInt16Float, compareInt16Double
+};
+__compar_fn_t gInt16UsignCompare[] = {
+ compareInt16Uint8, compareInt16Uint16, compareInt16Uint32, compareInt16Uint64
+};
+
+__compar_fn_t gInt32SignCompare[] = {
+ compareInt32Int8, compareInt32Int16, compareInt32Val, compareInt32Int64, compareInt32Float, compareInt32Double
+};
+__compar_fn_t gInt32UsignCompare[] = {
+ compareInt32Uint8, compareInt32Uint16, compareInt32Uint32, compareInt32Uint64
+};
+
+__compar_fn_t gInt64SignCompare[] = {
+ compareInt64Int8, compareInt64Int16, compareInt64Int32, compareInt64Val, compareInt64Float, compareInt64Double
+};
+__compar_fn_t gInt64UsignCompare[] = {
+ compareInt64Uint8, compareInt64Uint16, compareInt64Uint32, compareInt64Uint64
+};
+
+__compar_fn_t gFloatSignCompare[] = {
+ compareFloatInt8, compareFloatInt16, compareFloatInt32, compareFloatInt64, compareFloatVal, compareFloatDouble
+};
+__compar_fn_t gFloatUsignCompare[] = {
+ compareFloatUint8, compareFloatUint16, compareFloatUint32, compareFloatUint64
+};
+
+__compar_fn_t gDoubleSignCompare[] = {
+ compareDoubleInt8, compareDoubleInt16, compareDoubleInt32, compareDoubleInt64, compareDoubleFloat, compareDoubleVal
+};
+__compar_fn_t gDoubleUsignCompare[] = {
+ compareDoubleUint8, compareDoubleUint16, compareDoubleUint32, compareDoubleUint64
+};
+
+__compar_fn_t gUint8SignCompare[] = {
+ compareUint8Int8, compareUint8Int16, compareUint8Int32, compareUint8Int64, compareUint8Float, compareUint8Double
+};
+__compar_fn_t gUint8UsignCompare[] = {
+ compareUint8Val, compareUint8Uint16, compareUint8Uint32, compareUint8Uint64
+};
+
+__compar_fn_t gUint16SignCompare[] = {
+ compareUint16Int8, compareUint16Int16, compareUint16Int32, compareUint16Int64, compareUint16Float, compareUint16Double
+};
+__compar_fn_t gUint16UsignCompare[] = {
+ compareUint16Uint8, compareUint16Val, compareUint16Uint32, compareUint16Uint64
+};
+
+__compar_fn_t gUint32SignCompare[] = {
+ compareUint32Int8, compareUint32Int16, compareUint32Int32, compareUint32Int64, compareUint32Float, compareUint32Double
+};
+__compar_fn_t gUint32UsignCompare[] = {
+ compareUint32Uint8, compareUint32Uint16, compareUint32Val, compareUint32Uint64
+};
+
+__compar_fn_t gUint64SignCompare[] = {
+ compareUint64Int8, compareUint64Int16, compareUint64Int32, compareUint64Int64, compareUint64Float, compareUint64Double
+};
+__compar_fn_t gUint64UsignCompare[] = {
+ compareUint64Uint8, compareUint64Uint16, compareUint64Uint32, compareUint64Val
+};
+
+
int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
int8_t comparFn = 0;
@@ -257,6 +328,93 @@ __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr) {
return gDataCompare[filterGetCompFuncIdx(type, optr)];
}
+__compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr) {
+ switch (lType) {
+ case TSDB_DATA_TYPE_TINYINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt8SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_SMALLINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt16SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_INT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt32SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_BIGINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt64SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gFloatSignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gFloatUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gDoubleSignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gDoubleUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint8SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_USMALLINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint16SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint32SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint64SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return NULL;
+}
static FORCE_INLINE int32_t filterCompareGroupCtx(const void *pLeft, const void *pRight) {
SFilterGroupCtx *left = *((SFilterGroupCtx**)pLeft), *right = *((SFilterGroupCtx**)pRight);
@@ -910,14 +1068,14 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f
return TSDB_CODE_SUCCESS;
}
-int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint8_t optr2, SFilterFieldId *right2, uint32_t *uidx) {
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
if (info->pctx.unitHash == NULL) {
info->pctx.unitHash = taosHashInit(FILTER_DEFAULT_GROUP_SIZE * FILTER_DEFAULT_UNIT_SIZE, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, false);
} else {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- void *hu = taosHashGet(info->pctx.unitHash, &v, sizeof(v));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ void *hu = taosHashGet(info->pctx.unitHash, v, sizeof(v));
if (hu) {
*uidx = *(uint32_t *)hu;
return TSDB_CODE_SUCCESS;
@@ -939,7 +1097,11 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
if (right) {
u->right = *right;
}
-
+ u->compare.optr2 = optr2;
+ if (right2) {
+ u->right2 = *right2;
+ }
+
if (u->right.type == FLD_TYPE_VALUE) {
SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u);
assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE));
@@ -960,9 +1122,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
*uidx = info->unitNum;
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- taosHashPut(info->pctx.unitHash, &v, sizeof(v), uidx, sizeof(*uidx));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ taosHashPut(info->pctx.unitHash, v, sizeof(v), uidx, sizeof(*uidx));
}
++info->unitNum;
@@ -971,6 +1133,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
}
+int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+ return filterAddUnitImpl(info, optr, left, right, 0, NULL, uidx);
+}
int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) {
if (group->unitNum >= group->unitSize) {
@@ -1147,8 +1312,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &ra->e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
return TSDB_CODE_SUCCESS;
}
@@ -1222,8 +1387,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &r->ra.e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
}
@@ -2073,6 +2238,44 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t
return TSDB_CODE_SUCCESS;
}
+bool filterIsSameUnits(SFilterColInfo* pCol1, SFilterColInfo* pCol2) {
+ if (pCol1->type != pCol2->type) {
+ return false;
+ }
+
+ if (RANGE_TYPE_MR_CTX == pCol1->type) {
+ SFilterRangeCtx* pCtx1 = (SFilterRangeCtx*)pCol1->info;
+ SFilterRangeCtx* pCtx2 = (SFilterRangeCtx*)pCol2->info;
+
+ if ((pCtx1->isnull != pCtx2->isnull) || (pCtx1->notnull != pCtx2->notnull) || (pCtx1->isrange != pCtx2->isrange)) {
+ return false;
+ }
+
+
+ SFilterRangeNode* pNode1 = pCtx1->rs;
+ SFilterRangeNode* pNode2 = pCtx2->rs;
+
+ while (true) {
+ if (NULL == pNode1 && NULL == pNode2) {
+ break;
+ }
+
+ if (NULL == pNode1 || NULL == pNode2) {
+ return false;
+ }
+
+ if (pNode1->ra.s != pNode2->ra.s || pNode1->ra.e != pNode2->ra.e || pNode1->ra.sflag != pNode2->ra.sflag || pNode1->ra.eflag != pNode2->ra.eflag) {
+ return false;
+ }
+
+ pNode1 = pNode1->next;
+ pNode2 = pNode2->next;
+ }
+ }
+
+ return true;
+}
+
void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool *conflict) {
uint32_t idx1 = 0, idx2 = 0, m = 0, n = 0;
bool equal = false;
@@ -2098,6 +2301,11 @@ void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool
return;
}
+ if (!filterIsSameUnits(&gRes1->colInfo[idx1], &gRes2->colInfo[idx2])) {
+ *conflict = true;
+ return;
+ }
+
// for long in operation
if (gRes1->colInfo[idx1].optr == OP_TYPE_EQUAL && gRes2->colInfo[idx2].optr == OP_TYPE_EQUAL) {
SFilterRangeCtx* ctx = gRes1->colInfo[idx1].info;
@@ -2711,17 +2919,22 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
for (uint32_t g = 0; g < info->groupNum; ++g) {
SFilterGroup *group = &info->groups[g];
+ // first is block unint num for a group, following append unitNum blkUnitIdx for this group
*unitNum = group->unitNum;
all = 0;
empty = 0;
+ // save group idx start pointer
+ uint32_t * pGroupIdx = unitIdx;
for (uint32_t u = 0; u < group->unitNum; ++u) {
uint32_t uidx = group->unitIdxs[u];
if (info->blkUnitRes[uidx] == 1) {
+ // blkUnitRes == 1 is always true, so need not compare every time, delete this unit from group
--(*unitNum);
all = 1;
continue;
} else if (info->blkUnitRes[uidx] == -1) {
+ // blkUnitRes == -1 is alwary false, so in group is alwary false, need delete this group from blkGroupNum
*unitNum = 0;
empty = 1;
break;
@@ -2731,6 +2944,9 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
}
if (*unitNum == 0) {
+ // if unit num is zero, reset unitIdx to start on this group
+ unitIdx = pGroupIdx;
+
--info->blkGroupNum;
assert(empty || all);
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index 6634a29f4091773c89988940c9ab6ed5de2487da..cd1f6624bdf83e4fe143c1a648e5e30947bcdd65 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -9,6 +9,7 @@
#include "scalar.h"
#include "tudf.h"
#include "ttime.h"
+#include "tcompare.h"
int32_t scalarGetOperatorParamNum(EOperatorType type) {
if (OP_TYPE_IS_NULL == type || OP_TYPE_IS_NOT_NULL == type || OP_TYPE_IS_TRUE == type || OP_TYPE_IS_NOT_TRUE == type
@@ -219,6 +220,82 @@ void sclFreeParamList(SScalarParam *param, int32_t paramNum) {
taosMemoryFree(param);
}
+void sclDowngradeValueType(SValueNode *valueNode) {
+ switch (valueNode->node.resType.type) {
+ case TSDB_DATA_TYPE_BIGINT: {
+ int8_t i8 = valueNode->datum.i;
+ if (i8 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_TINYINT;
+ *(int8_t*)&valueNode->typeData = i8;
+ break;
+ }
+ int16_t i16 = valueNode->datum.i;
+ if (i16 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_SMALLINT;
+ *(int16_t*)&valueNode->typeData = i16;
+ break;
+ }
+ int32_t i32 = valueNode->datum.i;
+ if (i32 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_INT;
+ *(int32_t*)&valueNode->typeData = i32;
+ break;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT:{
+ uint8_t u8 = valueNode->datum.i;
+ if (u8 == valueNode->datum.i) {
+ int8_t i8 = valueNode->datum.i;
+ if (i8 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_TINYINT;
+ *(int8_t*)&valueNode->typeData = i8;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_UTINYINT;
+ *(uint8_t*)&valueNode->typeData = u8;
+ }
+ break;
+ }
+ uint16_t u16 = valueNode->datum.i;
+ if (u16 == valueNode->datum.i) {
+ int16_t i16 = valueNode->datum.i;
+ if (i16 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_SMALLINT;
+ *(int16_t*)&valueNode->typeData = i16;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_USMALLINT;
+ *(uint16_t*)&valueNode->typeData = u16;
+ }
+ break;
+ }
+ uint32_t u32 = valueNode->datum.i;
+ if (u32 == valueNode->datum.i) {
+ int32_t i32 = valueNode->datum.i;
+ if (i32 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_INT;
+ *(int32_t*)&valueNode->typeData = i32;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_UINT;
+ *(uint32_t*)&valueNode->typeData = u32;
+ }
+ break;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ float f = valueNode->datum.d;
+ if (FLT_EQUAL(f, valueNode->datum.d)) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_FLOAT;
+ *(float*)&valueNode->typeData = f;
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t *rowNum) {
switch (nodeType(node)) {
case QUERY_NODE_LEFT_VALUE: {
@@ -675,6 +752,10 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
return DEAL_RES_ERROR;
}
}
+
+ if (SCL_IS_COMPARISON_OPERATOR(node->opType) && SCL_DOWNGRADE_DATETYPE(valueNode->node.resType.type)) {
+ sclDowngradeValueType(valueNode);
+ }
}
if (node->pRight && (QUERY_NODE_VALUE == nodeType(node->pRight))) {
@@ -692,6 +773,10 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
return DEAL_RES_ERROR;
}
}
+
+ if (SCL_IS_COMPARISON_OPERATOR(node->opType) && SCL_DOWNGRADE_DATETYPE(valueNode->node.resType.type)) {
+ sclDowngradeValueType(valueNode);
+ }
}
if (node->pRight && (QUERY_NODE_NODE_LIST == nodeType(node->pRight))) {
diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c
index aaa70ef5ae5f8ab00ce88b56433885cd00004893..a003315fcabeab38f49ae3a6056e25dff10e4e16 100644
--- a/source/libs/scalar/src/sclvector.c
+++ b/source/libs/scalar/src/sclvector.c
@@ -909,11 +909,11 @@ int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut, int32_t*
int8_t gConvertTypes[TSDB_DATA_TYPE_BLOB+1][TSDB_DATA_TYPE_BLOB+1] = {
/* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB */
/*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-/*BOOL*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 0, 12, 13, 14, 0, 7, 0, 0,
+/*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 7, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0,
/*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0,
/*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0,
/*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 7, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0,
-/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 0, 7, 5, 5, 5, 7, 0, 7, 0, 0,
+/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0,
/*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0,
/*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0,
/*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0,
@@ -1681,10 +1681,14 @@ void vectorBitOr(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : TMAX(pLeft->numOfRows, pRight->numOfRows) - 1;
int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
-
- __compar_fn_t fp = filterGetCompFunc(GET_PARAM_TYPE(pLeft), optr);
- if(terrno != TSDB_CODE_SUCCESS){
- return;
+ int32_t lType = GET_PARAM_TYPE(pLeft);
+ int32_t rType = GET_PARAM_TYPE(pRight);
+ __compar_fn_t fp = NULL;
+
+ if (lType == rType) {
+ fp = filterGetCompFunc(lType, optr);
+ } else {
+ fp = filterGetCompFuncEx(lType, rType, optr);
}
pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows);
@@ -1716,22 +1720,26 @@ void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *
void vectorCompare(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
SScalarParam pLeftOut = {0};
SScalarParam pRightOut = {0};
-
- vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut);
-
SScalarParam *param1 = NULL;
SScalarParam *param2 = NULL;
- if (pLeftOut.columnData != NULL) {
- param1 = &pLeftOut;
- } else {
+ if (SCL_NO_NEED_CONVERT_COMPARISION(GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight), optr)) {
param1 = pLeft;
- }
-
- if (pRightOut.columnData != NULL) {
- param2 = &pRightOut;
- } else {
param2 = pRight;
+ } else {
+ vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut);
+
+ if (pLeftOut.columnData != NULL) {
+ param1 = &pLeftOut;
+ } else {
+ param1 = pLeft;
+ }
+
+ if (pRightOut.columnData != NULL) {
+ param2 = &pRightOut;
+ } else {
+ param2 = pRight;
+ }
}
vectorCompareImpl(param1, param2, pOut, _ord, optr);
diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h
index ce841ed83cc527849021517a6ed9c7a9c3a56f0c..957fd46ba5a767858a3bb5bbe50142b4f1c1ce47 100644
--- a/source/libs/scheduler/inc/schInt.h
+++ b/source/libs/scheduler/inc/schInt.h
@@ -283,7 +283,7 @@ typedef struct SSchJob {
} SSchJob;
typedef struct SSchTaskCtx {
- SSchJob *pJob;
+ int64_t jobRid;
SSchTask *pTask;
} SSchTaskCtx;
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index ecd9daf1bcd3b83803754017aec27c1ebe62becf..5a64aaaebb3860d2c6729ac8eb1e00be0cc9cda1 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -102,15 +102,30 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
tDecoderInit(&coder, msg, msgSize);
code = tDecodeSVCreateTbBatchRsp(&coder, &batchRsp);
if (TSDB_CODE_SUCCESS == code && batchRsp.nRsps > 0) {
+ SCH_LOCK(SCH_WRITE, &pJob->resLock);
+ if (NULL == pJob->execRes.res) {
+ pJob->execRes.res = taosArrayInit(batchRsp.nRsps, POINTER_BYTES);
+ pJob->execRes.msgType = TDMT_VND_CREATE_TABLE;
+ }
+
for (int32_t i = 0; i < batchRsp.nRsps; ++i) {
SVCreateTbRsp *rsp = batchRsp.pRsps + i;
+ if (rsp->pMeta) {
+ taosArrayPush((SArray*)pJob->execRes.res, &rsp->pMeta);
+ }
+
if (TSDB_CODE_SUCCESS != rsp->code) {
code = rsp->code;
- tDecoderClear(&coder);
- SCH_ERR_JRET(code);
}
}
+ SCH_UNLOCK(SCH_WRITE, &pJob->resLock);
+
+ if (taosArrayGetSize((SArray*)pJob->execRes.res) <= 0) {
+ taosArrayDestroy((SArray*)pJob->execRes.res);
+ pJob->execRes.res = NULL;
+ }
}
+
tDecoderClear(&coder);
SCH_ERR_JRET(code);
}
diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c
index d16d15c1191a3360be19e6c485f13bc3ce66d0ef..c5f161b66a8312a8c5919efca8fe8b1b2d61308c 100644
--- a/source/libs/scheduler/src/schTask.c
+++ b/source/libs/scheduler/src/schTask.c
@@ -52,7 +52,7 @@ void schInitTaskRetryTimes(SSchJob *pJob, SSchTask *pTask, SSchLevel *pLevel) {
int32_t nodeNum = taosArrayGetSize(pJob->nodeList);
pTask->maxRetryTimes = TMAX(nodeNum, SCH_DEFAULT_MAX_RETRY_NUM);
}
-
+
pTask->maxExecTimes = pTask->maxRetryTimes * (pLevel->level + 1);
}
@@ -139,13 +139,15 @@ int32_t schUpdateTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle, int3
}
if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it
- SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
+ SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId,
+ pTask->execId, pTask->waitRetry);
return TSDB_CODE_SUCCESS;
}
SSchNodeInfo *nodeInfo = taosHashGet(pTask->execNodes, &execId, sizeof(execId));
if (NULL == nodeInfo) { // ignore it
- SCH_TASK_DLOG("handle not updated since execId %d already not exist, current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
+ SCH_TASK_DLOG("handle not updated since execId %d already not exist, current execId %d, waitRetry %d", execId,
+ pTask->execId, pTask->waitRetry);
return TSDB_CODE_SUCCESS;
}
@@ -314,7 +316,7 @@ int32_t schRescheduleTask(SSchJob *pJob, SSchTask *pTask) {
if (!schMgmt.cfg.enableReSchedule) {
return TSDB_CODE_SUCCESS;
}
-
+
if (SCH_IS_DATA_BIND_TASK(pTask)) {
return TSDB_CODE_SUCCESS;
}
@@ -341,7 +343,8 @@ int32_t schDoTaskRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32
}
if (((pTask->execId + 1) >= pTask->maxExecTimes) || ((pTask->retryTimes + 1) > pTask->maxRetryTimes)) {
- SCH_TASK_DLOG("task no more retry since reach max times %d:%d, execId %d", pTask->maxRetryTimes, pTask->maxExecTimes, pTask->execId);
+ SCH_TASK_DLOG("task no more retry since reach max times %d:%d, execId %d", pTask->maxRetryTimes,
+ pTask->maxExecTimes, pTask->execId);
schHandleJobFailure(pJob, rspCode);
return TSDB_CODE_SUCCESS;
}
@@ -548,7 +551,8 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo
if ((pTask->retryTimes + 1) > pTask->maxRetryTimes) {
*needRetry = false;
- SCH_TASK_DLOG("task no more retry since reach max retry times, retryTimes:%d/%d", pTask->retryTimes, pTask->maxRetryTimes);
+ SCH_TASK_DLOG("task no more retry since reach max retry times, retryTimes:%d/%d", pTask->retryTimes,
+ pTask->maxRetryTimes);
return TSDB_CODE_SUCCESS;
}
@@ -564,25 +568,25 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo
return TSDB_CODE_SUCCESS;
}
-/*
- if (SCH_IS_DATA_BIND_TASK(pTask)) {
- if ((pTask->execId + 1) >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) {
- *needRetry = false;
- SCH_TASK_DLOG("task no more retry since all ep tried, execId:%d, epNum:%d", pTask->execId,
- SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode));
- return TSDB_CODE_SUCCESS;
- }
- } else {
- int32_t candidateNum = taosArrayGetSize(pTask->candidateAddrs);
+ /*
+ if (SCH_IS_DATA_BIND_TASK(pTask)) {
+ if ((pTask->execId + 1) >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) {
+ *needRetry = false;
+ SCH_TASK_DLOG("task no more retry since all ep tried, execId:%d, epNum:%d", pTask->execId,
+ SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode));
+ return TSDB_CODE_SUCCESS;
+ }
+ } else {
+ int32_t candidateNum = taosArrayGetSize(pTask->candidateAddrs);
- if ((pTask->candidateIdx + 1) >= candidateNum && (TSDB_CODE_SCH_TIMEOUT_ERROR != errCode)) {
- *needRetry = false;
- SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d",
- pTask->candidateIdx, candidateNum);
- return TSDB_CODE_SUCCESS;
+ if ((pTask->candidateIdx + 1) >= candidateNum && (TSDB_CODE_SCH_TIMEOUT_ERROR != errCode)) {
+ *needRetry = false;
+ SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d",
+ pTask->candidateIdx, candidateNum);
+ return TSDB_CODE_SUCCESS;
+ }
}
- }
-*/
+ */
*needRetry = true;
SCH_TASK_DLOG("task need the %dth retry, errCode:%x - %s", pTask->execId + 1, errCode, tstrerror(errCode));
@@ -630,8 +634,9 @@ int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- SCH_TASK_TLOG("set %dth candidate addr, id %d, inUse:%d/%d, fqdn:%s, port:%d", i, naddr->nodeId, naddr->epSet.inUse, naddr->epSet.numOfEps,
- SCH_GET_CUR_EP(naddr)->fqdn, SCH_GET_CUR_EP(naddr)->port);
+ SCH_TASK_TLOG("set %dth candidate addr, id %d, inUse:%d/%d, fqdn:%s, port:%d", i, naddr->nodeId,
+ naddr->epSet.inUse, naddr->epSet.numOfEps, SCH_GET_CUR_EP(naddr)->fqdn,
+ SCH_GET_CUR_EP(naddr)->port);
++addNum;
}
@@ -711,10 +716,10 @@ int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask) {
if (candidateNum <= 1) {
goto _return;
}
-
+
switch (schMgmt.cfg.schPolicy) {
case SCH_LOAD_SEQ:
- case SCH_ALL:
+ case SCH_ALL:
default:
if (++pTask->candidateIdx >= candidateNum) {
pTask->candidateIdx = 0;
@@ -732,7 +737,7 @@ int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask) {
_return:
SCH_TASK_DLOG("switch task candiateIdx to %d/%d", pTask->candidateIdx, candidateNum);
-
+
return TSDB_CODE_SUCCESS;
}
@@ -759,7 +764,7 @@ void schDropTaskOnExecNode(SSchJob *pJob, SSchTask *pTask) {
return;
}
- int32_t i = 0;
+ int32_t i = 0;
SSchNodeInfo *nodeInfo = taosHashIterate(pTask->execNodes, NULL);
while (nodeInfo) {
if (nodeInfo->handle) {
@@ -821,10 +826,16 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) {
int32_t schLaunchTaskImpl(void *param) {
SSchTaskCtx *pCtx = (SSchTaskCtx *)param;
- SSchJob *pJob = pCtx->pJob;
+ SSchJob *pJob = schAcquireJob(pCtx->jobRid);
+ if (NULL == pJob) {
+ qDebug("job refId 0x%" PRIx64 " already not exist", pCtx->jobRid);
+ taosMemoryFree(param);
+ SCH_RET(TSDB_CODE_SCH_JOB_IS_DROPPING);
+ }
+
SSchTask *pTask = pCtx->pTask;
- int8_t status = 0;
- int32_t code = 0;
+ int8_t status = 0;
+ int32_t code = 0;
atomic_add_fetch_32(&pTask->level->taskLaunchedNum, 1);
pTask->execId++;
@@ -880,17 +891,18 @@ _return:
}
}
+ schReleaseJob(pJob->refId);
+
SCH_RET(code);
}
-int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
-
+int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
SSchTaskCtx *param = taosMemoryCalloc(1, sizeof(SSchTaskCtx));
if (NULL == param) {
SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
- param->pJob = pJob;
+
+ param->jobRid = pJob->refId;
param->pTask = pTask;
if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) {
@@ -898,7 +910,7 @@ int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
} else {
SCH_ERR_RET(schLaunchTaskImpl(param));
}
-
+
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index c78ff0756f22e50e1f64a3f02526a04376eb9b08..7cdb7c0db95cd582fad03174d0fa6927cb1fd668 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -243,6 +243,39 @@ FAIL:
return 0;
}
+int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
+ int64_t groupId) {
+ char* ctbName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, groupId);
+ SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+
+ /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
+ SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
+ uint32_t hashValue =
+ taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
+ taosMemoryFree(ctbName);
+
+ bool found = false;
+ // TODO: optimize search
+ int32_t j;
+ for (j = 0; j < vgSz; j++) {
+ SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j);
+ ASSERT(pVgInfo->vgId > 0);
+ if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) {
+ if (streamAddBlockToDispatchMsg(pDataBlock, &pReqs[j]) < 0) {
+ return -1;
+ }
+ if (pReqs[j].blockNum == 0) {
+ atomic_add_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
+ }
+ pReqs[j].blockNum++;
+ found = true;
+ break;
+ }
+ }
+ ASSERT(found);
+ return 0;
+}
+
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) {
int32_t code = -1;
int32_t blockNum = taosArrayGetSize(pData->blocks);
@@ -317,20 +350,10 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
for (int32_t i = 0; i < blockNum; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i);
- char* ctbName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, pDataBlock->info.groupId);
-
- // TODO: get hash function by hashMethod
- uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));
-
- taosMemoryFree(ctbName);
- bool found = false;
- // TODO: optimize search
- int32_t j;
- for (j = 0; j < vgSz; j++) {
- SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j);
- ASSERT(pVgInfo->vgId > 0);
- if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) {
+ // TODO: do not use broadcast
+ if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
+ for (int32_t j = 0; j < vgSz; j++) {
if (streamAddBlockToDispatchMsg(pDataBlock, &pReqs[j]) < 0) {
goto FAIL_SHUFFLE_DISPATCH;
}
@@ -338,11 +361,13 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
atomic_add_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
}
pReqs[j].blockNum++;
- found = true;
- break;
}
+ continue;
+ }
+
+ if (streamSearchAndAddBlock(pTask, pReqs, pDataBlock, vgSz, pDataBlock->info.groupId) < 0) {
+ goto FAIL_SHUFFLE_DISPATCH;
}
- ASSERT(found);
}
for (int32_t i = 0; i < vgSz; i++) {
@@ -358,7 +383,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
FAIL_SHUFFLE_DISPATCH:
if (pReqs) {
for (int32_t i = 0; i < vgSz; i++) {
- taosArrayDestroy(pReqs[i].data);
+ taosArrayDestroyP(pReqs[i].data, taosMemoryFree);
taosArrayDestroy(pReqs[i].dataLen);
}
taosMemoryFree(pReqs);
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index 06ca26f0292df2447fa7c267a0d43e65f4117964..102bad742652005df440b5d4d7a87bcef34ba636 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -140,7 +140,6 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch)
return 0;
}
-// TODO: handle version
int32_t streamExecForAll(SStreamTask* pTask) {
while (1) {
int32_t batchCnt = 1;
diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c
index 5ff700546cf63acd3e0c4d0798383d6724947de1..1442ed2e0509e37d8b21806dc05343adcaa0f32c 100644
--- a/source/libs/stream/src/streamMeta.c
+++ b/source/libs/stream/src/streamMeta.c
@@ -14,7 +14,7 @@
*/
#include "executor.h"
-#include "tstream.h"
+#include "streamInc.h"
#include "ttimer.h"
SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandFunc) {
@@ -23,17 +23,23 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
- pMeta->path = strdup(path);
+ int32_t len = strlen(path) + 20;
+ char* streamPath = taosMemoryCalloc(1, len);
+ sprintf(streamPath, "%s/%s", path, "stream");
+ pMeta->path = strdup(streamPath);
if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db) < 0) {
goto _err;
}
+ sprintf(streamPath, "%s/%s", pMeta->path, "checkpoints");
+ mkdir(streamPath, 0755);
+ taosMemoryFree(streamPath);
+
if (tdbTbOpen("task.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pTaskDb) < 0) {
goto _err;
}
- // open state storage backend
- if (tdbTbOpen("state.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pStateDb) < 0) {
+ if (tdbTbOpen("checkpoint.db", sizeof(int32_t), -1, NULL, pMeta->db, &pMeta->pCheckpointDb) < 0) {
goto _err;
}
@@ -49,16 +55,13 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF
pMeta->ahandle = ahandle;
pMeta->expandFunc = expandFunc;
- if (streamLoadTasks(pMeta) < 0) {
- goto _err;
- }
return pMeta;
_err:
if (pMeta->path) taosMemoryFree(pMeta->path);
if (pMeta->pTasks) taosHashCleanup(pMeta->pTasks);
- if (pMeta->pStateDb) tdbTbClose(pMeta->pStateDb);
if (pMeta->pTaskDb) tdbTbClose(pMeta->pTaskDb);
+ if (pMeta->pCheckpointDb) tdbTbClose(pMeta->pCheckpointDb);
if (pMeta->db) tdbClose(pMeta->db);
taosMemoryFree(pMeta);
return NULL;
@@ -67,7 +70,7 @@ _err:
void streamMetaClose(SStreamMeta* pMeta) {
tdbCommit(pMeta->db, &pMeta->txn);
tdbTbClose(pMeta->pTaskDb);
- tdbTbClose(pMeta->pStateDb);
+ tdbTbClose(pMeta->pCheckpointDb);
tdbClose(pMeta->db);
void* pIter = NULL;
@@ -262,6 +265,8 @@ int32_t streamLoadTasks(SStreamMeta* pMeta) {
}
}
+ tdbFree(pKey);
+ tdbFree(pVal);
if (tdbTbcClose(pCur) < 0) {
return -1;
}
diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c
index 263053778b1ae94de5a5353edf158e37604baf98..0505c3edd6dd8211792679b7164bcc001bde6c4e 100644
--- a/source/libs/stream/src/streamRecover.c
+++ b/source/libs/stream/src/streamRecover.c
@@ -176,6 +176,7 @@ int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstrea
}
int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
+#if 0
void* buf = NULL;
ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
@@ -224,10 +225,12 @@ int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
FAIL:
if (buf) taosMemoryFree(buf);
return -1;
+#endif
return 0;
}
int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
+#if 0
void* pVal = NULL;
int32_t vLen = 0;
if (tdbTbGet(pMeta->pStateDb, &pTask->taskId, sizeof(void*), &pVal, &vLen) < 0) {
@@ -241,7 +244,7 @@ int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
pTask->nextCheckId = aggCheckpoint.checkpointId + 1;
pTask->checkpointInfo = aggCheckpoint.checkpointVer;
-
+#endif
return 0;
}
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
new file mode 100644
index 0000000000000000000000000000000000000000..5efdbb46795e52550e51c57caba18a8662b8d99a
--- /dev/null
+++ b/source/libs/stream/src/streamState.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "executor.h"
+#include "streamInc.h"
+#include "tcommon.h"
+#include "ttimer.h"
+
+SStreamState* streamStateOpen(char* path, SStreamTask* pTask) {
+ SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState));
+ if (pState == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+ char statePath[300];
+ sprintf(statePath, "%s/%d", path, pTask->taskId);
+ if (tdbOpen(statePath, 4096, 256, &pState->db) < 0) {
+ goto _err;
+ }
+
+ // open state storage backend
+ if (tdbTbOpen("state.db", sizeof(SWinKey), -1, SWinKeyCmpr, pState->db, &pState->pStateDb) < 0) {
+ goto _err;
+ }
+
+ if (streamStateBegin(pState) < 0) {
+ goto _err;
+ }
+
+ pState->pOwner = pTask;
+
+ return pState;
+
+_err:
+ if (pState->pStateDb) tdbTbClose(pState->pStateDb);
+ if (pState->db) tdbClose(pState->db);
+ taosMemoryFree(pState);
+ return NULL;
+}
+
+void streamStateClose(SStreamState* pState) {
+ tdbCommit(pState->db, &pState->txn);
+ tdbTbClose(pState->pStateDb);
+ tdbClose(pState->db);
+
+ taosMemoryFree(pState);
+}
+
+int32_t streamStateBegin(SStreamState* pState) {
+ if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
+ 0) {
+ return -1;
+ }
+
+ if (tdbBegin(pState->db, &pState->txn) < 0) {
+ tdbTxnClose(&pState->txn);
+ return -1;
+ }
+ return 0;
+}
+
+int32_t streamStateCommit(SStreamState* pState) {
+ if (tdbCommit(pState->db, &pState->txn) < 0) {
+ return -1;
+ }
+ memset(&pState->txn, 0, sizeof(TXN));
+ if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
+ 0) {
+ return -1;
+ }
+ if (tdbBegin(pState->db, &pState->txn) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+int32_t streamStateAbort(SStreamState* pState) {
+ if (tdbAbort(pState->db, &pState->txn) < 0) {
+ return -1;
+ }
+ memset(&pState->txn, 0, sizeof(TXN));
+ if (tdbTxnOpen(&pState->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED) <
+ 0) {
+ return -1;
+ }
+ if (tdbBegin(pState->db, &pState->txn) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
+ return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
+}
+int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
+ return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen);
+}
+
+int32_t streamStateDel(SStreamState* pState, const SWinKey* key) {
+ return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn);
+}
+
+int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
+ // todo refactor
+ int32_t size = *pVLen;
+ if (streamStateGet(pState, key, pVal, pVLen) == 0) {
+ return 0;
+ }
+ void* tmp = taosMemoryCalloc(1, size);
+ if (streamStatePut(pState, key, &tmp, size) == 0) {
+ taosMemoryFree(tmp);
+ int32_t code = streamStateGet(pState, key, pVal, pVLen);
+ ASSERT(code == 0);
+ return code;
+ }
+ taosMemoryFree(tmp);
+ return -1;
+}
+
+int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal) {
+ // todo refactor
+ streamFreeVal(pVal);
+ return 0;
+}
+
+SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
+ SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
+ if (pCur == NULL) return NULL;
+ tdbTbcOpen(pState->pStateDb, &pCur->pCur, NULL);
+
+ int32_t c;
+ tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c);
+ if (c != 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+ return pCur;
+}
+
+int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
+ const SWinKey* pKTmp = NULL;
+ int32_t kLen;
+ if (tdbTbcGet(pCur->pCur, (const void**)&pKTmp, &kLen, pVal, pVLen) < 0) {
+ return -1;
+ }
+ *pKey = *pKTmp;
+ return 0;
+}
+
+int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) {
+ //
+ return tdbTbcMoveToFirst(pCur->pCur);
+}
+
+int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) {
+ //
+ return tdbTbcMoveToLast(pCur->pCur);
+}
+
+SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) {
+ SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
+ if (pCur == NULL) {
+ return NULL;
+ }
+
+ int32_t c;
+ if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+ if (c > 0) return pCur;
+
+ if (tdbTbcMoveToNext(pCur->pCur) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+
+ return pCur;
+}
+
+SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key) {
+ SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
+ if (pCur == NULL) {
+ return NULL;
+ }
+
+ int32_t c;
+ if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+ if (c < 0) return pCur;
+
+ if (tdbTbcMoveToPrev(pCur->pCur) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+
+ return pCur;
+}
+
+int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur) {
+ //
+ return tdbTbcMoveToNext(pCur->pCur);
+}
+
+int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur) {
+ //
+ return tdbTbcMoveToPrev(pCur->pCur);
+}
+void streamStateFreeCur(SStreamStateCur* pCur) {
+ tdbTbcClose(pCur->pCur);
+ taosMemoryFree(pCur);
+}
+
+void streamFreeVal(void* val) { tdbFree(val); }
diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c
index 4009a47c65af469bc8a6f4fe5443411306e4ec2b..ce5917de296c317f739e79cb78cda21660769aa8 100644
--- a/source/libs/stream/src/streamTask.c
+++ b/source/libs/stream/src/streamTask.c
@@ -165,5 +165,8 @@ void tFreeSStreamTask(SStreamTask* pTask) {
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
taosArrayDestroy(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
}
+
+ if (pTask->pState) streamStateClose(pTask->pState);
+
taosMemoryFree(pTask);
}
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index d053662bd30287d5d9589a3881c8588fd3eb82ec..332f7ad2fd7be60f532b1394eb2d72adf985b82a 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -170,8 +170,17 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
if (ts < maxTs - pInfo->watermark) {
// this window has been closed.
if (pInfo->pCloseWinSBF) {
- return tScalableBfPut(pInfo->pCloseWinSBF, &ts, sizeof(TSKEY));
+ res = tScalableBfPut(pInfo->pCloseWinSBF, &ts, sizeof(TSKEY));
+ if (res == TSDB_CODE_SUCCESS) {
+ return false;
+ } else {
+ qDebug("===stream===Update close window sbf. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
+ return true;
+ }
}
+ qDebug("===stream===Update close window. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
return true;
}
@@ -193,7 +202,7 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
}
if (ts < pInfo->minTS) {
- qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ qDebug("===stream===Update min ts. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
maxTs, *pMapMaxTs, ts);
return true;
} else if (res == TSDB_CODE_SUCCESS) {
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index 07c4fa8429dc539609d3ae788caab3352b0a3e60..3bda9bcd51a1fe41fbeb09a1e4a39c3a53f1cd74 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -163,6 +163,7 @@ int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pR
}
}
ASSERT(0);
+ return -1;
}
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime) {
@@ -190,6 +191,7 @@ int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRa
}
}
ASSERT(0);
+ return -1;
}
// for debug -------------------
@@ -245,4 +247,5 @@ SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftI
}
}
ASSERT(0);
+ return -1;
}
\ No newline at end of file
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 51098374b03531142c9c12443fa5b02efddc3aca..6f29b54f806f1113ec69dface7bdcbb4b0c42afc 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -2766,8 +2766,6 @@ const char* syncStr(ESyncState state) {
}
int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry) {
- SyncLeaderTransfer* pSyncLeaderTransfer = syncLeaderTransferFromRpcMsg2(pRpcMsg);
-
if (ths->state != TAOS_SYNC_STATE_FOLLOWER) {
syncNodeEventLog(ths, "I am not follower, can not do leader transfer");
return 0;
@@ -2799,6 +2797,8 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p
}
*/
+ SyncLeaderTransfer* pSyncLeaderTransfer = syncLeaderTransferFromRpcMsg2(pRpcMsg);
+
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "do leader transfer, index:%ld", pEntry->index);
diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c
index b42aba560fa1c26ef9426b55729c1d39cafa8a24..faebe5bbecb16012831ed103de520c14accc81d5 100644
--- a/source/libs/sync/src/syncMessage.c
+++ b/source/libs/sync/src/syncMessage.c
@@ -1992,6 +1992,313 @@ void syncAppendEntriesReplyLog2(char* s, const SyncAppendEntriesReply* pMsg) {
}
}
+// ---- message process SyncHeartbeat----
+SyncHeartbeat* syncHeartbeatBuild(int32_t vgId) {
+ uint32_t bytes = sizeof(SyncHeartbeat);
+ SyncHeartbeat* pMsg = taosMemoryMalloc(bytes);
+ memset(pMsg, 0, bytes);
+ pMsg->bytes = bytes;
+ pMsg->vgId = vgId;
+ pMsg->msgType = TDMT_SYNC_HEARTBEAT;
+ return pMsg;
+}
+
+void syncHeartbeatDestroy(SyncHeartbeat* pMsg) {
+ if (pMsg != NULL) {
+ taosMemoryFree(pMsg);
+ }
+}
+
+void syncHeartbeatSerialize(const SyncHeartbeat* pMsg, char* buf, uint32_t bufLen) {
+ ASSERT(pMsg->bytes <= bufLen);
+ memcpy(buf, pMsg, pMsg->bytes);
+}
+
+void syncHeartbeatDeserialize(const char* buf, uint32_t len, SyncHeartbeat* pMsg) {
+ memcpy(pMsg, buf, len);
+ ASSERT(len == pMsg->bytes);
+}
+
+char* syncHeartbeatSerialize2(const SyncHeartbeat* pMsg, uint32_t* len) {
+ char* buf = taosMemoryMalloc(pMsg->bytes);
+ ASSERT(buf != NULL);
+ syncHeartbeatSerialize(pMsg, buf, pMsg->bytes);
+ if (len != NULL) {
+ *len = pMsg->bytes;
+ }
+ return buf;
+}
+
+SyncHeartbeat* syncHeartbeatDeserialize2(const char* buf, uint32_t len) {
+ uint32_t bytes = *((uint32_t*)buf);
+ SyncHeartbeat* pMsg = taosMemoryMalloc(bytes);
+ ASSERT(pMsg != NULL);
+ syncHeartbeatDeserialize(buf, len, pMsg);
+ ASSERT(len == pMsg->bytes);
+ return pMsg;
+}
+
+void syncHeartbeat2RpcMsg(const SyncHeartbeat* pMsg, SRpcMsg* pRpcMsg) {
+ memset(pRpcMsg, 0, sizeof(*pRpcMsg));
+ pRpcMsg->msgType = pMsg->msgType;
+ pRpcMsg->contLen = pMsg->bytes;
+ pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen);
+ syncHeartbeatSerialize(pMsg, pRpcMsg->pCont, pRpcMsg->contLen);
+}
+
+void syncHeartbeatFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeat* pMsg) {
+ syncHeartbeatDeserialize(pRpcMsg->pCont, pRpcMsg->contLen, pMsg);
+}
+
+SyncHeartbeat* syncHeartbeatFromRpcMsg2(const SRpcMsg* pRpcMsg) {
+ SyncHeartbeat* pMsg = syncHeartbeatDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ ASSERT(pMsg != NULL);
+ return pMsg;
+}
+
+cJSON* syncHeartbeat2Json(const SyncHeartbeat* pMsg) {
+ char u64buf[128] = {0};
+ cJSON* pRoot = cJSON_CreateObject();
+
+ if (pMsg != NULL) {
+ cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
+ cJSON_AddNumberToObject(pRoot, "vgId", pMsg->vgId);
+ cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
+
+ cJSON* pSrcId = cJSON_CreateObject();
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->srcId.addr);
+ cJSON_AddStringToObject(pSrcId, "addr", u64buf);
+ {
+ uint64_t u64 = pMsg->srcId.addr;
+ cJSON* pTmp = pSrcId;
+ char host[128] = {0};
+ uint16_t port;
+ syncUtilU642Addr(u64, host, sizeof(host), &port);
+ cJSON_AddStringToObject(pTmp, "addr_host", host);
+ cJSON_AddNumberToObject(pTmp, "addr_port", port);
+ }
+ cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
+ cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
+
+ cJSON* pDestId = cJSON_CreateObject();
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->destId.addr);
+ cJSON_AddStringToObject(pDestId, "addr", u64buf);
+ {
+ uint64_t u64 = pMsg->destId.addr;
+ cJSON* pTmp = pDestId;
+ char host[128] = {0};
+ uint16_t port;
+ syncUtilU642Addr(u64, host, sizeof(host), &port);
+ cJSON_AddStringToObject(pTmp, "addr_host", host);
+ cJSON_AddNumberToObject(pTmp, "addr_port", port);
+ }
+ cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
+ cJSON_AddItemToObject(pRoot, "destId", pDestId);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->term);
+ cJSON_AddStringToObject(pRoot, "term", u64buf);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->privateTerm);
+ cJSON_AddStringToObject(pRoot, "privateTerm", u64buf);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->commitIndex);
+ cJSON_AddStringToObject(pRoot, "commitIndex", u64buf);
+ }
+
+ cJSON* pJson = cJSON_CreateObject();
+ cJSON_AddItemToObject(pJson, "SyncHeartbeat", pRoot);
+ return pJson;
+}
+
+char* syncHeartbeat2Str(const SyncHeartbeat* pMsg) {
+ cJSON* pJson = syncHeartbeat2Json(pMsg);
+ char* serialized = cJSON_Print(pJson);
+ cJSON_Delete(pJson);
+ return serialized;
+}
+
+void syncHeartbeatPrint(const SyncHeartbeat* pMsg) {
+ char* serialized = syncHeartbeat2Str(pMsg);
+ printf("syncHeartbeatPrint | len:%" PRIu64 " | %s \n", strlen(serialized), serialized);
+ fflush(NULL);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatPrint2(char* s, const SyncHeartbeat* pMsg) {
+ char* serialized = syncHeartbeat2Str(pMsg);
+ printf("syncHeartbeatPrint2 | len:%" PRIu64 " | %s | %s \n", strlen(serialized), s, serialized);
+ fflush(NULL);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatLog(const SyncHeartbeat* pMsg) {
+ char* serialized = syncHeartbeat2Str(pMsg);
+ sTrace("syncHeartbeatLog | len:%" PRIu64 " | %s", strlen(serialized), serialized);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatLog2(char* s, const SyncHeartbeat* pMsg) {
+ if (gRaftDetailLog) {
+ char* serialized = syncHeartbeat2Str(pMsg);
+ sTrace("syncHeartbeatLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized);
+ taosMemoryFree(serialized);
+ }
+}
+
+// ---- message process SyncHeartbeatReply----
+SyncHeartbeatReply* syncHeartbeatReplyBuild(int32_t vgId) {
+ uint32_t bytes = sizeof(SyncHeartbeatReply);
+ SyncHeartbeatReply* pMsg = taosMemoryMalloc(bytes);
+ memset(pMsg, 0, bytes);
+ pMsg->bytes = bytes;
+ pMsg->vgId = vgId;
+ pMsg->msgType = TDMT_SYNC_HEARTBEAT_REPLY;
+ return pMsg;
+}
+
+void syncHeartbeatReplyDestroy(SyncHeartbeatReply* pMsg) {
+ if (pMsg != NULL) {
+ taosMemoryFree(pMsg);
+ }
+}
+
+void syncHeartbeatReplySerialize(const SyncHeartbeatReply* pMsg, char* buf, uint32_t bufLen) {
+ ASSERT(pMsg->bytes <= bufLen);
+ memcpy(buf, pMsg, pMsg->bytes);
+}
+
+void syncHeartbeatReplyDeserialize(const char* buf, uint32_t len, SyncHeartbeatReply* pMsg) {
+ memcpy(pMsg, buf, len);
+ ASSERT(len == pMsg->bytes);
+}
+
+char* syncHeartbeatReplySerialize2(const SyncHeartbeatReply* pMsg, uint32_t* len) {
+ char* buf = taosMemoryMalloc(pMsg->bytes);
+ ASSERT(buf != NULL);
+ syncHeartbeatReplySerialize(pMsg, buf, pMsg->bytes);
+ if (len != NULL) {
+ *len = pMsg->bytes;
+ }
+ return buf;
+}
+
+SyncHeartbeatReply* syncHeartbeatReplyDeserialize2(const char* buf, uint32_t len) {
+ uint32_t bytes = *((uint32_t*)buf);
+ SyncHeartbeatReply* pMsg = taosMemoryMalloc(bytes);
+ ASSERT(pMsg != NULL);
+ syncHeartbeatReplyDeserialize(buf, len, pMsg);
+ ASSERT(len == pMsg->bytes);
+ return pMsg;
+}
+
+void syncHeartbeatReply2RpcMsg(const SyncHeartbeatReply* pMsg, SRpcMsg* pRpcMsg) {
+ memset(pRpcMsg, 0, sizeof(*pRpcMsg));
+ pRpcMsg->msgType = pMsg->msgType;
+ pRpcMsg->contLen = pMsg->bytes;
+ pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen);
+ syncHeartbeatReplySerialize(pMsg, pRpcMsg->pCont, pRpcMsg->contLen);
+}
+
+void syncHeartbeatReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeatReply* pMsg) {
+ syncHeartbeatReplyDeserialize(pRpcMsg->pCont, pRpcMsg->contLen, pMsg);
+}
+
+SyncHeartbeatReply* syncHeartbeatReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
+ SyncHeartbeatReply* pMsg = syncHeartbeatReplyDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ ASSERT(pMsg != NULL);
+ return pMsg;
+}
+
+cJSON* syncHeartbeatReply2Json(const SyncHeartbeatReply* pMsg) {
+ char u64buf[128] = {0};
+ cJSON* pRoot = cJSON_CreateObject();
+
+ if (pMsg != NULL) {
+ cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
+ cJSON_AddNumberToObject(pRoot, "vgId", pMsg->vgId);
+ cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
+
+ cJSON* pSrcId = cJSON_CreateObject();
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->srcId.addr);
+ cJSON_AddStringToObject(pSrcId, "addr", u64buf);
+ {
+ uint64_t u64 = pMsg->srcId.addr;
+ cJSON* pTmp = pSrcId;
+ char host[128] = {0};
+ uint16_t port;
+ syncUtilU642Addr(u64, host, sizeof(host), &port);
+ cJSON_AddStringToObject(pTmp, "addr_host", host);
+ cJSON_AddNumberToObject(pTmp, "addr_port", port);
+ }
+ cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
+ cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
+
+ cJSON* pDestId = cJSON_CreateObject();
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->destId.addr);
+ cJSON_AddStringToObject(pDestId, "addr", u64buf);
+ {
+ uint64_t u64 = pMsg->destId.addr;
+ cJSON* pTmp = pDestId;
+ char host[128] = {0};
+ uint16_t port;
+ syncUtilU642Addr(u64, host, sizeof(host), &port);
+ cJSON_AddStringToObject(pTmp, "addr_host", host);
+ cJSON_AddNumberToObject(pTmp, "addr_port", port);
+ }
+ cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
+ cJSON_AddItemToObject(pRoot, "destId", pDestId);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->privateTerm);
+ cJSON_AddStringToObject(pRoot, "privateTerm", u64buf);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->term);
+ cJSON_AddStringToObject(pRoot, "term", u64buf);
+
+ cJSON_AddStringToObject(pRoot, "matchIndex", u64buf);
+ snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->startTime);
+ cJSON_AddStringToObject(pRoot, "startTime", u64buf);
+ }
+
+ cJSON* pJson = cJSON_CreateObject();
+ cJSON_AddItemToObject(pJson, "SyncHeartbeatReply", pRoot);
+ return pJson;
+}
+
+char* syncHeartbeatReply2Str(const SyncHeartbeatReply* pMsg) {
+ cJSON* pJson = syncHeartbeatReply2Json(pMsg);
+ char* serialized = cJSON_Print(pJson);
+ cJSON_Delete(pJson);
+ return serialized;
+}
+
+void syncHeartbeatReplyPrint(const SyncHeartbeatReply* pMsg) {
+ char* serialized = syncHeartbeatReply2Str(pMsg);
+ printf("syncHeartbeatReplyPrint | len:%" PRIu64 " | %s \n", strlen(serialized), serialized);
+ fflush(NULL);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatReplyPrint2(char* s, const SyncHeartbeatReply* pMsg) {
+ char* serialized = syncHeartbeatReply2Str(pMsg);
+ printf("syncHeartbeatReplyPrint2 | len:%" PRIu64 " | %s | %s \n", strlen(serialized), s, serialized);
+ fflush(NULL);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatReplyLog(const SyncHeartbeatReply* pMsg) {
+ char* serialized = syncHeartbeatReply2Str(pMsg);
+ sTrace("syncHeartbeatReplyLog | len:%" PRIu64 " | %s", strlen(serialized), serialized);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatReplyLog2(char* s, const SyncHeartbeatReply* pMsg) {
+ if (gRaftDetailLog) {
+ char* serialized = syncHeartbeatReply2Str(pMsg);
+ sTrace("syncHeartbeatReplyLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized);
+ taosMemoryFree(serialized);
+ }
+}
+
// ---- message process SyncApplyMsg----
SyncApplyMsg* syncApplyMsgBuild(uint32_t dataLen) {
uint32_t bytes = sizeof(SyncApplyMsg) + dataLen;
diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c
index 5489a107e76082106961a0ed107413e5ec9b4a64..0be3392a9a52b69e29cbcedcb910cdbc0f9a6234 100644
--- a/source/libs/sync/src/syncSnapshot.c
+++ b/source/libs/sync/src/syncSnapshot.c
@@ -583,7 +583,7 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
&(pReceiver->snapshot));
if (code != 0) {
syncNodeErrorLog(pReceiver->pSyncNode, "snapshot stop writer true error");
- ASSERT(0);
+ // ASSERT(0);
return -1;
}
pReceiver->pWriter = NULL;
diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c
index af15c377fbc36ae523776824962f282462ff2bc9..c3c8131cbb31c3d4ac0b9fb59afc7bc751096329 100644
--- a/source/libs/sync/src/syncTimeout.c
+++ b/source/libs/sync/src/syncTimeout.c
@@ -91,16 +91,16 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
if (atomic_load_64(&ths->electTimerLogicClockUser) <= pMsg->logicClock) {
++(ths->electTimerCounter);
- sInfo("vgId:%d, sync timeout, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
- ths->electTimerCounter, ths->electTimerLogicClockUser);
+ sTrace("vgId:%d, sync timer, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
+ ths->electTimerCounter, ths->electTimerLogicClockUser);
syncNodeElect(ths);
}
} else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
if (atomic_load_64(&ths->heartbeatTimerLogicClockUser) <= pMsg->logicClock) {
++(ths->heartbeatTimerCounter);
- sInfo("vgId:%d, sync timeout, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
- ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
+ sTrace("vgId:%d, sync timer, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
+ ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
syncNodeReplicate(ths, true);
}
} else {
diff --git a/source/libs/sync/test/CMakeLists.txt b/source/libs/sync/test/CMakeLists.txt
index 72845d0c1d1a9378a3a189f4037c6fc646c8a536..b9cc7a391dde35e2569f30000752b3ef175fc824 100644
--- a/source/libs/sync/test/CMakeLists.txt
+++ b/source/libs/sync/test/CMakeLists.txt
@@ -57,6 +57,8 @@ add_executable(syncLeaderTransferTest "")
add_executable(syncReconfigFinishTest "")
add_executable(syncRestoreFromSnapshot "")
add_executable(syncRaftCfgIndexTest "")
+add_executable(syncHeartbeatTest "")
+add_executable(syncHeartbeatReplyTest "")
target_sources(syncTest
@@ -295,6 +297,14 @@ target_sources(syncRaftCfgIndexTest
PRIVATE
"syncRaftCfgIndexTest.cpp"
)
+target_sources(syncHeartbeatTest
+ PRIVATE
+ "syncHeartbeatTest.cpp"
+)
+target_sources(syncHeartbeatReplyTest
+ PRIVATE
+ "syncHeartbeatReplyTest.cpp"
+)
target_include_directories(syncTest
@@ -592,6 +602,16 @@ target_include_directories(syncRaftCfgIndexTest
"${TD_SOURCE_DIR}/include/libs/sync"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
+target_include_directories(syncHeartbeatTest
+ PUBLIC
+ "${TD_SOURCE_DIR}/include/libs/sync"
+ "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
+)
+target_include_directories(syncHeartbeatReplyTest
+ PUBLIC
+ "${TD_SOURCE_DIR}/include/libs/sync"
+ "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
+)
target_link_libraries(syncTest
@@ -830,6 +850,14 @@ target_link_libraries(syncRaftCfgIndexTest
sync
gtest_main
)
+target_link_libraries(syncHeartbeatTest
+ sync
+ gtest_main
+)
+target_link_libraries(syncHeartbeatReplyTest
+ sync
+ gtest_main
+)
enable_testing()
diff --git a/source/libs/sync/test/syncHeartbeatReplyTest.cpp b/source/libs/sync/test/syncHeartbeatReplyTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0ccd7b70bb8f552fa14c0a615993abbab678a8a0
--- /dev/null
+++ b/source/libs/sync/test/syncHeartbeatReplyTest.cpp
@@ -0,0 +1,105 @@
+#include
+#include
+#include "syncIO.h"
+#include "syncInt.h"
+#include "syncMessage.h"
+#include "syncUtil.h"
+
+void logTest() {
+ sTrace("--- sync log test: trace");
+ sDebug("--- sync log test: debug");
+ sInfo("--- sync log test: info");
+ sWarn("--- sync log test: warn");
+ sError("--- sync log test: error");
+ sFatal("--- sync log test: fatal");
+}
+
+SyncHeartbeatReply *createMsg() {
+ SyncHeartbeatReply *pMsg = syncHeartbeatReplyBuild(1000);
+ pMsg->srcId.addr = syncUtilAddr2U64("127.0.0.1", 1234);
+ pMsg->srcId.vgId = 100;
+ pMsg->destId.addr = syncUtilAddr2U64("127.0.0.1", 5678);
+ pMsg->destId.vgId = 100;
+
+ pMsg->term = 33;
+ pMsg->privateTerm = 44;
+ pMsg->startTime = taosGetTimestampMs();
+ return pMsg;
+}
+
+void test1() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ syncHeartbeatReplyLog2((char *)"test1:", pMsg);
+ syncHeartbeatReplyDestroy(pMsg);
+}
+
+void test2() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ uint32_t len = pMsg->bytes;
+ char * serialized = (char *)taosMemoryMalloc(len);
+ syncHeartbeatReplySerialize(pMsg, serialized, len);
+ SyncHeartbeatReply *pMsg2 = syncHeartbeatReplyBuild(1000);
+ syncHeartbeatReplyDeserialize(serialized, len, pMsg2);
+ syncHeartbeatReplyLog2((char *)"test2: syncHeartbeatReplySerialize -> syncHeartbeatReplyDeserialize ",
+ pMsg2);
+
+ taosMemoryFree(serialized);
+ syncHeartbeatReplyDestroy(pMsg);
+ syncHeartbeatReplyDestroy(pMsg2);
+}
+
+void test3() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ uint32_t len;
+ char * serialized = syncHeartbeatReplySerialize2(pMsg, &len);
+ SyncHeartbeatReply *pMsg2 = syncHeartbeatReplyDeserialize2(serialized, len);
+ syncHeartbeatReplyLog2((char *)"test3: syncHeartbeatReplySerialize3 -> syncHeartbeatReplyDeserialize2 ",
+ pMsg2);
+
+ taosMemoryFree(serialized);
+ syncHeartbeatReplyDestroy(pMsg);
+ syncHeartbeatReplyDestroy(pMsg2);
+}
+
+void test4() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ SRpcMsg rpcMsg;
+ syncHeartbeatReply2RpcMsg(pMsg, &rpcMsg);
+ SyncHeartbeatReply *pMsg2 = syncHeartbeatReplyBuild(1000);
+ syncHeartbeatReplyFromRpcMsg(&rpcMsg, pMsg2);
+ syncHeartbeatReplyLog2((char *)"test4: syncHeartbeatReply2RpcMsg -> syncHeartbeatReplyFromRpcMsg ",
+ pMsg2);
+
+ rpcFreeCont(rpcMsg.pCont);
+ syncHeartbeatReplyDestroy(pMsg);
+ syncHeartbeatReplyDestroy(pMsg2);
+}
+
+void test5() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ SRpcMsg rpcMsg;
+ syncHeartbeatReply2RpcMsg(pMsg, &rpcMsg);
+ SyncHeartbeatReply *pMsg2 = syncHeartbeatReplyFromRpcMsg2(&rpcMsg);
+ syncHeartbeatReplyLog2((char *)"test5: syncHeartbeatReply2RpcMsg -> syncHeartbeatReplyFromRpcMsg2 ",
+ pMsg2);
+
+ rpcFreeCont(rpcMsg.pCont);
+ syncHeartbeatReplyDestroy(pMsg);
+ syncHeartbeatReplyDestroy(pMsg2);
+}
+
+int main() {
+ gRaftDetailLog = true;
+
+ tsAsyncLog = 0;
+ sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
+ logTest();
+
+ test1();
+ test2();
+ test3();
+ test4();
+ test5();
+
+ return 0;
+}
diff --git a/source/libs/sync/test/syncHeartbeatTest.cpp b/source/libs/sync/test/syncHeartbeatTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d910c828f1e13f4e6484c7b8ad7ea6758f626750
--- /dev/null
+++ b/source/libs/sync/test/syncHeartbeatTest.cpp
@@ -0,0 +1,99 @@
+#include
+#include
+#include "syncIO.h"
+#include "syncInt.h"
+#include "syncMessage.h"
+#include "syncUtil.h"
+
+void logTest() {
+ sTrace("--- sync log test: trace");
+ sDebug("--- sync log test: debug");
+ sInfo("--- sync log test: info");
+ sWarn("--- sync log test: warn");
+ sError("--- sync log test: error");
+ sFatal("--- sync log test: fatal");
+}
+
+SyncHeartbeat *createMsg() {
+ SyncHeartbeat *pMsg = syncHeartbeatBuild(789);
+ pMsg->srcId.addr = syncUtilAddr2U64("127.0.0.1", 1234);
+ pMsg->srcId.vgId = 100;
+ pMsg->destId.addr = syncUtilAddr2U64("127.0.0.1", 5678);
+ pMsg->destId.vgId = 100;
+ pMsg->term = 8;
+ pMsg->commitIndex = 33;
+ pMsg->privateTerm = 44;
+ return pMsg;
+}
+
+void test1() {
+ SyncHeartbeat *pMsg = createMsg();
+ syncHeartbeatLog2((char *)"test1:", pMsg);
+ syncHeartbeatDestroy(pMsg);
+}
+
+void test2() {
+ SyncHeartbeat *pMsg = createMsg();
+ uint32_t len = pMsg->bytes;
+ char * serialized = (char *)taosMemoryMalloc(len);
+ syncHeartbeatSerialize(pMsg, serialized, len);
+ SyncHeartbeat *pMsg2 = syncHeartbeatBuild(789);
+ syncHeartbeatDeserialize(serialized, len, pMsg2);
+ syncHeartbeatLog2((char *)"test2: syncHeartbeatSerialize -> syncHeartbeatDeserialize ", pMsg2);
+
+ taosMemoryFree(serialized);
+ syncHeartbeatDestroy(pMsg);
+ syncHeartbeatDestroy(pMsg2);
+}
+
+void test3() {
+ SyncHeartbeat *pMsg = createMsg();
+ uint32_t len;
+ char * serialized = syncHeartbeatSerialize2(pMsg, &len);
+ SyncHeartbeat *pMsg2 = syncHeartbeatDeserialize2(serialized, len);
+ syncHeartbeatLog2((char *)"test3: syncHeartbeatSerialize2 -> syncHeartbeatDeserialize2 ", pMsg2);
+
+ taosMemoryFree(serialized);
+ syncHeartbeatDestroy(pMsg);
+ syncHeartbeatDestroy(pMsg2);
+}
+
+void test4() {
+ SyncHeartbeat *pMsg = createMsg();
+ SRpcMsg rpcMsg;
+ syncHeartbeat2RpcMsg(pMsg, &rpcMsg);
+ SyncHeartbeat *pMsg2 = (SyncHeartbeat *)taosMemoryMalloc(rpcMsg.contLen);
+ syncHeartbeatFromRpcMsg(&rpcMsg, pMsg2);
+ syncHeartbeatLog2((char *)"test4: syncHeartbeat2RpcMsg -> syncHeartbeatFromRpcMsg ", pMsg2);
+
+ rpcFreeCont(rpcMsg.pCont);
+ syncHeartbeatDestroy(pMsg);
+ syncHeartbeatDestroy(pMsg2);
+}
+
+void test5() {
+ SyncHeartbeat *pMsg = createMsg();
+ SRpcMsg rpcMsg;
+ syncHeartbeat2RpcMsg(pMsg, &rpcMsg);
+ SyncHeartbeat *pMsg2 =syncHeartbeatFromRpcMsg2(&rpcMsg);
+ syncHeartbeatLog2((char *)"test5: syncHeartbeat2RpcMsg -> syncHeartbeatFromRpcMsg2 ", pMsg2);
+
+ rpcFreeCont(rpcMsg.pCont);
+ syncHeartbeatDestroy(pMsg);
+ syncHeartbeatDestroy(pMsg2);
+}
+
+int main() {
+ tsAsyncLog = 0;
+ sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
+ gRaftDetailLog = true;
+ logTest();
+
+ test1();
+ test2();
+ test3();
+ test4();
+ test5();
+
+ return 0;
+}
diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c
index 7a44edb12cddf5a386e3b77031920559d8b0a5e9..1480920f908e34bb7be5b95affe64619ac042289 100644
--- a/source/libs/tdb/src/db/tdbBtree.c
+++ b/source/libs/tdb/src/db/tdbBtree.c
@@ -509,7 +509,7 @@ static int tdbBtreeBalanceDeeper(SBTree *pBt, SPage *pRoot, SPage **ppChild, TXN
static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTxn) {
int ret;
- int nOlds;
+ int nOlds, pageIdx;
SPage *pOlds[3] = {0};
SCell *pDivCell[3] = {0};
int szDivCell[3];
@@ -782,6 +782,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
pBt);
tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0);
tdbOsFree(pNewCell);
+
+ if (TDB_CELLDECODER_FREE_VAL(&cd)) {
+ tdbFree(cd.pVal);
+ cd.pVal = NULL;
+ }
}
// move to next new page
@@ -844,13 +849,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
}
}
- // TODO: here is not corrent for drop case
- for (int i = 0; i < nNews; i++) {
- if (i < nOlds) {
- tdbPagerReturnPage(pBt->pPager, pOlds[i], pTxn);
- } else {
- tdbPagerReturnPage(pBt->pPager, pNews[i], pTxn);
- }
+ for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) {
+ tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn);
+ }
+ for (; pageIdx < nNews; ++pageIdx) {
+ tdbPagerReturnPage(pBt->pPager, pNews[pageIdx], pTxn);
}
return 0;
@@ -934,6 +937,8 @@ static int tdbFetchOvflPage(SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt)
return -1;
}
+ tdbPCacheRelease(pBt->pPager->pCache, *ppOfp, pTxn);
+
return ret;
}
@@ -1277,6 +1282,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
nLeft -= bytes;
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
+
+ tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn);
}
} else {
int nLeftKey = kLen;
@@ -1336,6 +1343,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
memcpy(&pgno, ofpCell + bytes, sizeof(pgno));
+ tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn);
+
nLeftKey -= bytes;
nLeft -= bytes;
}
@@ -1374,6 +1383,8 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
memcpy(&pgno, ofpCell + vLen - nLeft + bytes, sizeof(pgno));
+ tdbPCacheRelease(pBt->pPager->pCache, ofp, pTxn);
+
nLeft -= bytes;
}
}
@@ -1401,7 +1412,7 @@ static int tdbBtreeDecodeCell(SPage *pPage, const SCell *pCell, SCellDecoder *pD
pDecoder->pgno = 0;
TDB_CELLDECODER_SET_FREE_NIL(pDecoder);
- tdbDebug("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV);
+ // tdbTrace("tdb btc decoder set nil: %p/0x%x ", pDecoder, pDecoder->freeKV);
// 1. Decode header part
if (!leaf) {
diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c
index ab9b21dc3fc01158ef5c504d69530b30eab6d79a..62541585911a5dfdc84c0d2fb84724c83efc5475 100644
--- a/source/libs/tdb/src/db/tdbPCache.c
+++ b/source/libs/tdb/src/db/tdbPCache.c
@@ -98,6 +98,7 @@ SPage *tdbPCacheFetch(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) {
// printf("thread %" PRId64 " fetch page %d pgno %d pPage %p nRef %d\n", taosGetSelfPthreadId(), pPage->id,
// TDB_PAGE_PGNO(pPage), pPage, nRef);
+ tdbDebug("pcache/fetch page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef);
return pPage;
}
@@ -111,6 +112,7 @@ void tdbPCacheRelease(SPCache *pCache, SPage *pPage, TXN *pTxn) {
tdbPCacheLock(pCache);
nRef = tdbUnrefPage(pPage);
+ tdbDebug("pcache/release page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef);
if (nRef == 0) {
// test the nRef again to make sure
// it is safe th handle the page
@@ -145,7 +147,7 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn)
// 1. Search the hash table
pPage = pCache->pgHash[tdbPCachePageHash(pPgid) % pCache->nHash];
while (pPage) {
- if (memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0 && pPage->pgid.pgno == pPgid->pgno) break;
+ if (pPage->pgid.pgno == pPgid->pgno && memcmp(pPage->pgid.fileid, pPgid->fileid, TDB_FILE_ID_LEN) == 0) break;
pPage = pPage->pHashNext;
}
@@ -212,7 +214,8 @@ static SPage *tdbPCacheFetchImpl(SPCache *pCache, const SPgid *pPgid, TXN *pTxn)
pPage->pPager = pPageH->pPager;
memcpy(pPage->pData, pPageH->pData, pPage->pageSize);
- tdbDebug("pcache/pPageH: %p %d %p %p", pPageH, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize, pPage);
+ tdbDebug("pcache/pPageH: %p %d %p %p %d", pPageH, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize, pPage,
+ TDB_PAGE_PGNO(pPageH));
tdbPageInit(pPage, pPageH->pPageHdr - pPageH->pData, pPageH->xCellSize);
pPage->kLen = pPageH->kLen;
pPage->vLen = pPageH->vLen;
@@ -243,7 +246,7 @@ static void tdbPCachePinPage(SPCache *pCache, SPage *pPage) {
pCache->nRecyclable--;
// printf("pin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
- tdbTrace("pin page %d", pPage->id);
+ tdbDebug("pcache/pin page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id);
}
}
@@ -264,15 +267,14 @@ static void tdbPCacheUnpinPage(SPCache *pCache, SPage *pPage) {
pCache->nRecyclable++;
// printf("unpin page %d pgno %d pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
- tdbTrace("unpin page %d", pPage->id);
+ tdbDebug("pcache/unpin page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id);
}
static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) {
- SPage **ppPage;
- uint32_t h;
+ uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash;
- h = tdbPCachePageHash(&(pPage->pgid));
- for (ppPage = &(pCache->pgHash[h % pCache->nHash]); (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext))
+ SPage **ppPage = &(pCache->pgHash[h]);
+ for (; (*ppPage) && *ppPage != pPage; ppPage = &((*ppPage)->pHashNext))
;
if (*ppPage) {
@@ -281,13 +283,11 @@ static void tdbPCacheRemovePageFromHash(SPCache *pCache, SPage *pPage) {
// printf("rmv page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
}
- tdbTrace("remove page %d to hash", pPage->id);
+ tdbDebug("pcache/remove page %p/%d/%d from hash %" PRIu32, pPage, TDB_PAGE_PGNO(pPage), pPage->id, h);
}
static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) {
- int h;
-
- h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash;
+ uint32_t h = tdbPCachePageHash(&(pPage->pgid)) % pCache->nHash;
pPage->pHashNext = pCache->pgHash[h];
pCache->pgHash[h] = pPage;
@@ -295,7 +295,7 @@ static void tdbPCacheAddPageToHash(SPCache *pCache, SPage *pPage) {
pCache->nPage++;
// printf("add page %d to hash, pgno %d, pPage %p\n", pPage->id, TDB_PAGE_PGNO(pPage), pPage);
- tdbTrace("add page %d to hash", pPage->id);
+ tdbDebug("pcache/add page %p/%d/%d to hash %" PRIu32, pPage, TDB_PAGE_PGNO(pPage), pPage->id, h);
}
static int tdbPCacheOpenImpl(SPCache *pCache) {
diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c
index 276b06b147586bbf18fe73f94cdb2592032d97e2..a3f376b929291780bdd57cbf99f5db6035e70aff 100644
--- a/source/libs/tdb/src/db/tdbPage.c
+++ b/source/libs/tdb/src/db/tdbPage.c
@@ -68,12 +68,15 @@ int tdbPageCreate(int pageSize, SPage **ppPage, void *(*xMalloc)(void *, size_t)
}
*ppPage = pPage;
+
+ tdbDebug("page/create: %p %p", pPage, xMalloc);
return 0;
}
int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg) {
u8 *ptr;
+ tdbDebug("page/destroy: %p %p", pPage, xFree);
ASSERT(xFree);
for (int iOvfl = 0; iOvfl < pPage->nOverflow; iOvfl++) {
@@ -87,6 +90,7 @@ int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg)
}
void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) {
+ tdbDebug("page/zero: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize);
pPage->pPageHdr = pPage->pData + szAmHdr;
TDB_PAGE_NCELLS_SET(pPage, 0);
TDB_PAGE_CCELLS_SET(pPage, pPage->pageSize - sizeof(SPageFtr));
@@ -103,6 +107,7 @@ void tdbPageZero(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell
}
void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell *, int, TXN *, SBTree *pBt)) {
+ tdbDebug("page/init: %p %" PRIu8 " %p", pPage, szAmHdr, xCellSize);
pPage->pPageHdr = pPage->pData + szAmHdr;
pPage->pCellIdx = pPage->pPageHdr + TDB_PAGE_HDR_SIZE(pPage);
pPage->pFreeStart = pPage->pCellIdx + TDB_PAGE_OFFSET_SIZE(pPage) * TDB_PAGE_NCELLS(pPage);
diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c
index 4de99e8b1bde34c7f6583d0aedc205074d7c1cca..bb2151ed9ded3728aee58f2994c8a163a354adc0 100644
--- a/source/libs/tdb/src/db/tdbPager.c
+++ b/source/libs/tdb/src/db/tdbPager.c
@@ -34,6 +34,22 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage
static int tdbPagerWritePageToJournal(SPager *pPager, SPage *pPage);
static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage);
+static FORCE_INLINE int32_t pageCmpFn(const void *lhs, const void *rhs) {
+ SPage *pPageL = (SPage *)(((uint8_t *)lhs) - sizeof(SRBTreeNode));
+ SPage *pPageR = (SPage *)(((uint8_t *)rhs) - sizeof(SRBTreeNode));
+
+ SPgno pgnoL = TDB_PAGE_PGNO(pPageL);
+ SPgno pgnoR = TDB_PAGE_PGNO(pPageR);
+
+ if (pgnoL < pgnoR) {
+ return -1;
+ } else if (pgnoL > pgnoR) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager) {
uint8_t *pPtr;
SPager *pPager;
@@ -83,6 +99,8 @@ int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager) {
ret = tdbGetFileSize(pPager->fd, pPager->pageSize, &(pPager->dbOrigSize));
pPager->dbFileSize = pPager->dbOrigSize;
+ tRBTreeCreate(&pPager->rbt, pageCmpFn);
+
*ppPager = pPager;
return 0;
}
@@ -166,7 +184,8 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) {
// ref page one more time so the page will not be release
tdbRefPage(pPage);
-
+ tdbDebug("pcache/mdirty page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id);
+ /*
// Set page as dirty
pPage->isDirty = 1;
@@ -184,6 +203,8 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) {
ASSERT(*ppPage == NULL || TDB_PAGE_PGNO(*ppPage) > TDB_PAGE_PGNO(pPage));
pPage->pDirtyNext = *ppPage;
*ppPage = pPage;
+ */
+ tRBTreePut(&pPager->rbt, (SRBTreeNode *)pPage);
// Write page to journal if neccessary
if (TDB_PAGE_PGNO(pPage) <= pPager->dbOrigSize) {
@@ -227,6 +248,23 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
return 0;
}
+ SRBTreeIter iter = tRBTreeIterCreate(&pPager->rbt, 1);
+ SRBTreeNode *pNode = NULL;
+ while ((pNode = tRBTreeIterNext(&iter)) != NULL) {
+ pPage = (SPage *)pNode;
+ ret = tdbPagerWritePageToDB(pPager, pPage);
+ if (ret < 0) {
+ ASSERT(0);
+ return -1;
+ }
+
+ pPage->isDirty = 0;
+
+ tdbPCacheRelease(pPager->pCache, pPage, pTxn);
+ }
+
+ tRBTreeCreate(&pPager->rbt, pageCmpFn);
+ /*
// loop to write the dirty pages to file
for (pPage = pPager->pDirty; pPage; pPage = pPage->pDirtyNext) {
// TODO: update the page footer
@@ -237,9 +275,6 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
}
}
- tdbTrace("tdbttl commit:%p, %d", pPager, pPager->dbOrigSize);
- pPager->dbOrigSize = pPager->dbFileSize;
-
// release the page
for (pPage = pPager->pDirty; pPage; pPage = pPager->pDirty) {
pPager->pDirty = pPage->pDirtyNext;
@@ -249,6 +284,9 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
}
+ */
+ tdbTrace("tdbttl commit:%p, %d", pPager, pPager->dbOrigSize);
+ pPager->dbOrigSize = pPager->dbFileSize;
// sync the db file
tdbOsFSync(pPager->fd);
@@ -496,12 +534,19 @@ static int tdbPagerWritePageToJournal(SPager *pPager, SPage *pPage) {
return 0;
}
-
+/*
+struct TdFile {
+ TdThreadRwlock rwlock;
+ int refId;
+ int fd;
+ FILE *fp;
+} TdFile;
+*/
static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage) {
i64 offset;
int ret;
- offset = pPage->pageSize * (TDB_PAGE_PGNO(pPage) - 1);
+ offset = (i64)pPage->pageSize * (TDB_PAGE_PGNO(pPage) - 1);
if (tdbOsLSeek(pPager->fd, offset, SEEK_SET) < 0) {
ASSERT(0);
return -1;
@@ -513,6 +558,7 @@ static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage) {
return -1;
}
+ // pwrite(pPager->fd->fd, pPage->pData, pPage->pageSize, offset);
return 0;
}
diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h
index 49126b80b6e5dd11f30a7cddf581f42994db7bec..df6ba8b35f18c6937688276c3c0c268b9efd33ce 100644
--- a/source/libs/tdb/src/inc/tdbInt.h
+++ b/source/libs/tdb/src/inc/tdbInt.h
@@ -19,6 +19,7 @@
#include "tdb.h"
#include "tlog.h"
+#include "trbtree.h"
#ifdef __cplusplus
extern "C" {
@@ -256,6 +257,7 @@ typedef struct {
#pragma pack(pop)
struct SPage {
+ SRBTreeNode node; // must be the first field for pageCmpFn to work
tdb_spinlock_t lock;
int pageSize;
u8 *pData;
@@ -280,13 +282,13 @@ struct SPage {
static inline i32 tdbRefPage(SPage *pPage) {
i32 nRef = atomic_add_fetch_32(&((pPage)->nRef), 1);
- tdbTrace("ref page %d, nRef %d", pPage->id, nRef);
+ // tdbTrace("ref page %p/%d, nRef %d", pPage, pPage->id, nRef);
return nRef;
}
static inline i32 tdbUnrefPage(SPage *pPage) {
i32 nRef = atomic_sub_fetch_32(&((pPage)->nRef), 1);
- tdbTrace("unref page %d, nRef %d", pPage->id, nRef);
+ // tdbTrace("unref page %p/%d, nRef %d", pPage, pPage->id, nRef);
return nRef;
}
@@ -389,6 +391,7 @@ struct SPager {
SPgno dbFileSize;
SPgno dbOrigSize;
SPage *pDirty;
+ SRBTree rbt;
u8 inTran;
SPager *pNext; // used by TDB
SPager *pHashNext; // used by TDB
diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c
index 935f536a9021cc26ea950dd161655a6b6b1e9bef..1559c85e23d59fec376890433f924522df8dc761 100644
--- a/source/libs/transport/src/thttp.c
+++ b/source/libs/transport/src/thttp.c
@@ -21,14 +21,16 @@
#include "taoserror.h"
#include "tlog.h"
+// clang-format on
#define HTTP_RECV_BUF_SIZE 1024
+
typedef struct SHttpClient {
uv_connect_t conn;
uv_tcp_t tcp;
uv_write_t req;
uv_buf_t* wbuf;
- char *rbuf;
+ char* rbuf;
char* addr;
uint16_t port;
} SHttpClient;
@@ -124,40 +126,41 @@ _OVER:
return code;
}
-static void destroyHttpClient(SHttpClient* cli) {
+static FORCE_INLINE void destroyHttpClient(SHttpClient* cli) {
taosMemoryFree(cli->wbuf);
taosMemoryFree(cli->rbuf);
taosMemoryFree(cli->addr);
taosMemoryFree(cli);
-
}
-static void clientCloseCb(uv_handle_t* handle) {
+static FORCE_INLINE void clientCloseCb(uv_handle_t* handle) {
SHttpClient* cli = handle->data;
destroyHttpClient(cli);
}
-static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
- SHttpClient* cli = handle->data;
- buf->base = cli->rbuf;
- buf->len = HTTP_RECV_BUF_SIZE;
+static FORCE_INLINE void clientAllocBuffCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
+ SHttpClient* cli = handle->data;
+ buf->base = cli->rbuf;
+ buf->len = HTTP_RECV_BUF_SIZE;
}
-static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) {
- SHttpClient* cli = handle->data;
+static FORCE_INLINE void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
+ SHttpClient* cli = handle->data;
if (nread < 0) {
- uError("http-report read error:%s", uv_err_name(nread));
+ uError("http-report recv error:%s", uv_err_name(nread));
} else {
- uTrace("http-report succ to read %d bytes, just ignore it", nread);
+ uTrace("http-report succ to recv %d bytes, just ignore it", nread);
}
uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
-}
+}
static void clientSentCb(uv_write_t* req, int32_t status) {
SHttpClient* cli = req->data;
if (status != 0) {
terrno = TAOS_SYSTEM_ERROR(status);
uError("http-report failed to send data %s", uv_strerror(status));
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
+ return;
} else {
uTrace("http-report succ to send data");
}
- uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb);
+ uv_read_start((uv_stream_t*)&cli->tcp, clientAllocBuffCb, clientRecvCb);
}
static void clientConnCb(uv_connect_t* req, int32_t status) {
SHttpClient* cli = req->data;
@@ -170,11 +173,11 @@ static void clientConnCb(uv_connect_t* req, int32_t status) {
uv_write(&cli->req, (uv_stream_t*)&cli->tcp, cli->wbuf, 2, clientSentCb);
}
-static int32_t taosBuildDstAddr(const char* server, uint16_t port, struct sockaddr_in* dest) {
+static FORCE_INLINE int32_t taosBuildDstAddr(const char* server, uint16_t port, struct sockaddr_in* dest) {
uint32_t ip = taosGetIpv4FromFqdn(server);
if (ip == 0xffffffff) {
terrno = TAOS_SYSTEM_ERROR(errno);
- uError("http-report failed to get http server:%s ip since %s", server, terrstr());
+ uError("http-report failed to get http server:%s since %s", server, errno == 0 ? "invalid http server" : terrstr());
return -1;
}
char buf[128] = {0};
@@ -209,7 +212,7 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
cli->tcp.data = cli;
cli->req.data = cli;
cli->wbuf = wb;
- cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
+ cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
cli->addr = tstrdup(server);
cli->port = port;
@@ -223,10 +226,10 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
if (ret != 0) {
uError("http-report failed to connect to server, reason:%s, dst:%s:%d", uv_strerror(ret), cli->addr, cli->port);
destroyHttpClient(cli);
+ uv_stop(loop);
}
uv_run(loop, UV_RUN_DEFAULT);
uv_loop_close(loop);
return terrno;
}
-// clang-format on
diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c
index 0a0dcef378bde92a18b9455b203774a3c28aa428..9e0a8f2a10c282cc8ef20e59f89aed477d5c1eef 100644
--- a/source/libs/transport/src/trans.c
+++ b/source/libs/transport/src/trans.c
@@ -43,7 +43,7 @@ void* rpcOpen(const SRpcInit* pInit) {
return NULL;
}
if (pInit->label) {
- tstrncpy(pRpc->label, pInit->label, strlen(pInit->label) + 1);
+ tstrncpy(pRpc->label, pInit->label, TSDB_LABEL_LEN);
}
// register callback handle
pRpc->cfp = pInit->cfp;
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 7052b0b915137678d6aff528a26540a973cd74f5..4a0008b5ff22283b17e263d9e5168fc4ff0f16fd 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -16,7 +16,7 @@
#include "transComm.h"
typedef struct SConnList {
- queue conn;
+ queue conns;
int32_t size;
} SConnList;
@@ -69,11 +69,9 @@ typedef struct SCliThrd {
SAsyncPool* asyncPool;
uv_prepare_t* prepare;
void* pool; // conn pool
-
+ // timer handles
SArray* timerList;
-
// msg queue
-
queue msg;
TdThreadMutex msgMtx;
SDelayQueue* delayQueue;
@@ -107,11 +105,11 @@ static void doCloseIdleConn(void* param);
static void cliReadTimeoutCb(uv_timer_t* handle);
// register timer in each thread to clear expire conn
// static void cliTimeoutCb(uv_timer_t* handle);
-// alloc buf for recv
-static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
-// callback after read nbytes from socket
+// alloc buffer for recv
+static FORCE_INLINE void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
+// callback after recv nbytes from socket
static void cliRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
-// callback after write data to socket
+// callback after send data to socket
static void cliSendCb(uv_write_t* req, int status);
// callback after conn to server
static void cliConnCb(uv_connect_t* req, int status);
@@ -129,19 +127,14 @@ static SCliConn* cliCreateConn(SCliThrd* thrd);
static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/);
static void cliDestroy(uv_handle_t* handle);
static void cliSend(SCliConn* pConn);
+static void cliDestroyConnMsgs(SCliConn* conn, bool destroy);
-static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
- if (code != 0) return false;
- if (pCtx->retryCnt == 0) return false;
- if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
- return true;
-}
+// cli util func
+static FORCE_INLINE bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx);
+static FORCE_INLINE void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
+
+static FORCE_INLINE int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* resp);
-void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
-/*
- * set TCP connection timeout per-socket level
- */
-static int cliCreateSocket();
// process data read from server, add decompress etc later
static void cliHandleResp(SCliConn* conn);
// handle except about conn
@@ -155,13 +148,11 @@ static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd);
static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease, NULL,
cliHandleUpdate};
-static void cliSendQuit(SCliThrd* thrd);
-static void destroyUserdata(STransMsg* userdata);
-
-static int cliRBChoseIdx(STrans* pTransInst);
+static FORCE_INLINE void destroyUserdata(STransMsg* userdata);
+static FORCE_INLINE void destroyCmsg(void* cmsg);
+static FORCE_INLINE int cliRBChoseIdx(STrans* pTransInst);
+static FORCE_INLINE void transDestroyConnCtx(STransConnCtx* ctx);
-static void destroyCmsg(void* cmsg);
-static void transDestroyConnCtx(STransConnCtx* ctx);
// thread obj
static SCliThrd* createThrdObj();
static void destroyThrdObj(SCliThrd* pThrd);
@@ -169,15 +160,14 @@ static void destroyThrdObj(SCliThrd* pThrd);
static void cliWalkCb(uv_handle_t* handle, void* arg);
static void cliReleaseUnfinishedMsg(SCliConn* conn) {
- SCliMsg* pMsg = NULL;
for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
- pMsg = transQueueGet(&conn->cliMsgs, i);
- if (pMsg != NULL && pMsg->ctx != NULL) {
- if (conn->ctx.freeFunc != NULL) {
- conn->ctx.freeFunc(pMsg->ctx->ahandle);
+ SCliMsg* msg = transQueueGet(&conn->cliMsgs, i);
+ if (msg != NULL && msg->ctx != NULL && msg->ctx->ahandle != (void*)0x9527) {
+ if (conn->ctx.freeFunc != NULL && msg->ctx->ahandle != NULL) {
+ conn->ctx.freeFunc(msg->ctx->ahandle);
}
}
- destroyCmsg(pMsg);
+ destroyCmsg(msg);
}
}
#define CLI_RELEASE_UV(loop) \
@@ -206,20 +196,22 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
#define CONN_GET_HOST_THREAD(conn) (conn ? ((SCliConn*)conn)->hostThrd : NULL)
#define CONN_GET_INST_LABEL(conn) (((STrans*)(((SCliThrd*)(conn)->hostThrd)->pTransInst))->label)
-#define CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle) \
- do { \
- int i = 0, sz = transQueueSize(&conn->cliMsgs); \
- for (; i < sz; i++) { \
- pMsg = transQueueGet(&conn->cliMsgs, i); \
- if (pMsg != NULL && pMsg->ctx != NULL && (uint64_t)pMsg->ctx->ahandle == ahandle) { \
- break; \
- } \
- } \
- if (i == sz) { \
- pMsg = NULL; \
- } else { \
- pMsg = transQueueRm(&conn->cliMsgs, i); \
- } \
+#define CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle) \
+ do { \
+ int i = 0, sz = transQueueSize(&conn->cliMsgs); \
+ for (; i < sz; i++) { \
+ pMsg = transQueueGet(&conn->cliMsgs, i); \
+ if (pMsg->ctx != NULL && (uint64_t)pMsg->ctx->ahandle == ahandle) { \
+ break; \
+ } \
+ } \
+ if (i == sz) { \
+ pMsg = NULL; \
+ tDebug("msg not found, %" PRIu64 "", ahandle); \
+ } else { \
+ pMsg = transQueueRm(&conn->cliMsgs, i); \
+ tDebug("msg found, %" PRIu64 "", ahandle); \
+ } \
} while (0)
#define CONN_GET_NEXT_SENDMSG(conn) \
do { \
@@ -297,7 +289,12 @@ bool cliMaySendCachedMsg(SCliConn* conn) {
if (!transQueueEmpty(&conn->cliMsgs)) {
SCliMsg* pCliMsg = NULL;
CONN_GET_NEXT_SENDMSG(conn);
- cliSend(conn);
+ if (pCliMsg == NULL)
+ return false;
+ else {
+ cliSend(conn);
+ return true;
+ }
}
return false;
_RETURN:
@@ -384,8 +381,10 @@ void cliHandleResp(SCliConn* conn) {
return;
}
- if (cliAppCb(conn, &transMsg, pMsg) != 0) {
- return;
+ if (pMsg == NULL || (pMsg && pMsg->type != Release)) {
+ if (cliAppCb(conn, &transMsg, pMsg) != 0) {
+ return;
+ }
}
destroyCmsg(pMsg);
@@ -433,17 +432,20 @@ void cliHandleExceptImpl(SCliConn* pConn, int32_t code) {
transMsg.info.ahandle);
}
} else {
- transMsg.info.ahandle = pCtx ? pCtx->ahandle : NULL;
+ transMsg.info.ahandle = (pMsg->type != Release && pCtx) ? pCtx->ahandle : NULL;
}
if (pCtx == NULL || pCtx->pSem == NULL) {
if (transMsg.info.ahandle == NULL) {
+ if (REQUEST_NO_RESP(&pMsg->msg) || pMsg->type == Release) destroyCmsg(pMsg);
once = true;
continue;
}
}
- if (cliAppCb(pConn, &transMsg, pMsg) != 0) {
- return;
+ if (pMsg == NULL || (pMsg && pMsg->type != Release)) {
+ if (cliAppCb(pConn, &transMsg, pMsg) != 0) {
+ return;
+ }
}
destroyCmsg(pMsg);
tTrace("%s conn %p start to destroy, ref:%d", CONN_GET_INST_LABEL(pConn), pConn, T_REF_VAL_GET(pConn));
@@ -470,8 +472,8 @@ void* createConnPool(int size) {
void* destroyConnPool(void* pool) {
SConnList* connList = taosHashIterate((SHashObj*)pool, NULL);
while (connList != NULL) {
- while (!QUEUE_IS_EMPTY(&connList->conn)) {
- queue* h = QUEUE_HEAD(&connList->conn);
+ while (!QUEUE_IS_EMPTY(&connList->conns)) {
+ queue* h = QUEUE_HEAD(&connList->conns);
SCliConn* c = QUEUE_DATA(h, SCliConn, q);
cliDestroyConn(c, true);
}
@@ -484,21 +486,21 @@ void* destroyConnPool(void* pool) {
static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
char key[32] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
- SHashObj* pPool = pool;
- SConnList* plist = taosHashGet(pPool, key, strlen(key));
+
+ SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key));
if (plist == NULL) {
SConnList list = {0};
- taosHashPut(pPool, key, strlen(key), (void*)&list, sizeof(list));
- plist = taosHashGet(pPool, key, strlen(key));
- QUEUE_INIT(&plist->conn);
+ taosHashPut((SHashObj*)pool, key, strlen(key), (void*)&list, sizeof(list));
+ plist = taosHashGet((SHashObj*)pool, key, strlen(key));
+ QUEUE_INIT(&plist->conns);
}
- if (QUEUE_IS_EMPTY(&plist->conn)) {
+ if (QUEUE_IS_EMPTY(&plist->conns)) {
return NULL;
}
plist->size -= 1;
- queue* h = QUEUE_HEAD(&plist->conn);
+ queue* h = QUEUE_HEAD(&plist->conns);
SCliConn* conn = QUEUE_DATA(h, SCliConn, q);
conn->status = ConnNormal;
QUEUE_REMOVE(&conn->q);
@@ -514,22 +516,21 @@ static void addConnToPool(void* pool, SCliConn* conn) {
if (conn->status == ConnInPool) {
return;
}
- SCliThrd* thrd = conn->hostThrd;
- CONN_HANDLE_THREAD_QUIT(thrd);
-
allocConnRef(conn, true);
+ SCliThrd* thrd = conn->hostThrd;
if (conn->timer != NULL) {
uv_timer_stop(conn->timer);
taosArrayPush(thrd->timerList, &conn->timer);
conn->timer->data = NULL;
conn->timer = NULL;
}
+ if (T_REF_VAL_GET(conn) > 1) {
+ transUnrefCliHandle(conn);
+ }
+
+ cliDestroyConnMsgs(conn, false);
- STrans* pTransInst = thrd->pTransInst;
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
- transCtxCleanup(&conn->ctx);
conn->status = ConnInPool;
if (conn->list == NULL) {
@@ -540,18 +541,15 @@ static void addConnToPool(void* pool, SCliConn* conn) {
} else {
tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
}
- assert(conn->list != NULL);
- QUEUE_INIT(&conn->q);
- QUEUE_PUSH(&conn->list->conn, &conn->q);
+ QUEUE_PUSH(&conn->list->conns, &conn->q);
conn->list->size += 1;
- conn->task = NULL;
- assert(!QUEUE_IS_EMPTY(&conn->list->conn));
-
if (conn->list->size >= 50) {
STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
arg->param1 = conn;
arg->param2 = thrd;
+
+ STrans* pTransInst = thrd->pTransInst;
conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
}
}
@@ -691,11 +689,10 @@ static void cliDestroy(uv_handle_t* handle) {
transRemoveExHandle(transGetRefMgt(), conn->refId);
taosMemoryFree(conn->ip);
- conn->stream->data = NULL;
taosMemoryFree(conn->stream);
- transCtxCleanup(&conn->ctx);
- cliReleaseUnfinishedMsg(conn);
- transQueueDestroy(&conn->cliMsgs);
+
+ cliDestroyConnMsgs(conn, true);
+
tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn);
transReqQueueClear(&conn->wreqQueue);
transDestroyBuffer(&conn->readBuf);
@@ -714,6 +711,9 @@ static bool cliHandleNoResp(SCliConn* conn) {
if (cliMaySendCachedMsg(conn) == false) {
SCliThrd* thrd = conn->hostThrd;
addConnToPool(thrd->pool, conn);
+ res = false;
+ } else {
+ res = true;
}
}
}
@@ -738,8 +738,6 @@ static void cliSendCb(uv_write_t* req, int status) {
}
void cliSend(SCliConn* pConn) {
- CONN_HANDLE_BROKEN(pConn);
-
assert(!transQueueEmpty(&pConn->cliMsgs));
SCliMsg* pCliMsg = NULL;
@@ -756,8 +754,8 @@ void cliSend(SCliConn* pConn) {
pMsg->pCont = (void*)rpcMallocCont(0);
pMsg->contLen = 0;
}
- int msgLen = transMsgLenFromCont(pMsg->contLen);
+ int msgLen = transMsgLenFromCont(pMsg->contLen);
STransMsgHead* pHead = transHeadFromCont(pMsg->pCont);
pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0;
pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0;
@@ -769,8 +767,6 @@ void cliSend(SCliConn* pConn) {
pHead->traceId = pMsg->info.traceId;
pHead->magicNum = htonl(TRANS_MAGIC_NUM);
- uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
-
STraceId* trace = &pMsg->info.traceId;
tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn,
TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen);
@@ -792,8 +788,16 @@ void cliSend(SCliConn* pConn) {
tGTrace("%s conn %p start timer for msg:%s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pMsg->msgType));
uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0);
}
+
+ uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
uv_write_t* req = transReqQueuePush(&pConn->wreqQueue);
- uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
+
+ int status = uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
+ if (status != 0) {
+ tGError("%s conn %p failed to sent msg:%s, errmsg:%s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pMsg->msgType),
+ uv_err_name(status));
+ cliHandleExcept(pConn);
+ }
return;
_RETURN:
return;
@@ -807,7 +811,6 @@ void cliConnCb(uv_connect_t* req, int status) {
cliHandleExcept(pConn);
return;
}
- // int addrlen = sizeof(pConn->addr);
struct sockaddr peername, sockname;
int addrlen = sizeof(peername);
@@ -840,7 +843,7 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
int64_t refId = (int64_t)(pMsg->msg.info.handle);
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId);
if (exh == NULL) {
- tDebug("%" PRId64 " already release", refId);
+ tDebug("%" PRId64 " already released", refId);
destroyCmsg(pMsg);
return;
}
@@ -856,6 +859,9 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
return;
}
cliSend(conn);
+ } else {
+ tError("%s conn %p already released", CONN_GET_INST_LABEL(conn), conn);
+ destroyCmsg(pMsg);
}
}
static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd) {
@@ -894,17 +900,35 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) {
}
return conn;
}
-void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
+FORCE_INLINE void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
if (pCvtAddr->cvt == false) {
return;
}
- for (int i = 0; i < pEpSet->numOfEps && pEpSet->numOfEps == 1; i++) {
- if (strncmp(pEpSet->eps[i].fqdn, pCvtAddr->fqdn, TSDB_FQDN_LEN) == 0) {
- memset(pEpSet->eps[i].fqdn, 0, TSDB_FQDN_LEN);
- memcpy(pEpSet->eps[i].fqdn, pCvtAddr->ip, TSDB_FQDN_LEN);
- }
+ if (pEpSet->numOfEps == 1 && strncmp(pEpSet->eps[0].fqdn, pCvtAddr->fqdn, TSDB_FQDN_LEN) == 0) {
+ memset(pEpSet->eps[0].fqdn, 0, TSDB_FQDN_LEN);
+ memcpy(pEpSet->eps[0].fqdn, pCvtAddr->ip, TSDB_FQDN_LEN);
}
}
+
+FORCE_INLINE bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
+ if (code != 0) return false;
+ if (pCtx->retryCnt == 0) return false;
+ if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
+ return true;
+}
+FORCE_INLINE int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* pResp) {
+ if (pMsg == NULL) return -1;
+
+ memset(pResp, 0, sizeof(STransMsg));
+
+ pResp->code = TSDB_CODE_RPC_BROKEN_LINK;
+ pResp->msgType = pMsg->msg.msgType + 1;
+ pResp->info.ahandle = pMsg->ctx ? pMsg->ctx->ahandle : NULL;
+ pResp->info.traceId = pMsg->msg.info.traceId;
+
+ return 0;
+}
+
void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STrans* pTransInst = pThrd->pTransInst;
STransConnCtx* pCtx = pMsg->ctx;
@@ -920,14 +944,11 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
if (ignore == true) {
// persist conn already release by server
- STransMsg resp = {0};
- resp.code = TSDB_CODE_RPC_BROKEN_LINK;
- resp.msgType = pMsg->msg.msgType + 1;
-
- resp.info.ahandle = pMsg && pMsg->ctx ? pMsg->ctx->ahandle : NULL;
- resp.info.traceId = pMsg->msg.info.traceId;
-
- pTransInst->cfp(pTransInst->parent, &resp, NULL);
+ STransMsg resp;
+ cliBuildExceptResp(pMsg, &resp);
+ if (pMsg->type != Release) {
+ pTransInst->cfp(pTransInst->parent, &resp, NULL);
+ }
destroyCmsg(pMsg);
return;
}
@@ -973,6 +994,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
return;
}
}
+ STraceId* trace = &pMsg->msg.info.traceId;
+ tGTrace("%s conn %p ready", pTransInst->label, conn);
}
static void cliAsyncCb(uv_async_t* handle) {
SAsyncItem* item = handle->data;
@@ -991,9 +1014,6 @@ static void cliAsyncCb(uv_async_t* handle) {
QUEUE_REMOVE(h);
SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
- if (pMsg == NULL) {
- continue;
- }
(*cliAsyncHandle[pMsg->type])(pMsg, pThrd);
count++;
}
@@ -1035,24 +1055,58 @@ static void cliPrepareCb(uv_prepare_t* handle) {
if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd);
}
+void cliDestroyConnMsgs(SCliConn* conn, bool destroy) {
+ transCtxCleanup(&conn->ctx);
+ cliReleaseUnfinishedMsg(conn);
+ if (destroy == 1) {
+ transQueueDestroy(&conn->cliMsgs);
+ } else {
+ transQueueClear(&conn->cliMsgs);
+ }
+}
+
+void cliIteraConnMsgs(SCliConn* conn) {
+ SCliThrd* pThrd = conn->hostThrd;
+ STrans* pTransInst = pThrd->pTransInst;
+
+ for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cmsg = transQueueGet(&conn->cliMsgs, i);
+ if (cmsg->type == Release || REQUEST_NO_RESP(&cmsg->msg) || cmsg->msg.msgType == TDMT_SCH_DROP_TASK) {
+ continue;
+ }
+
+ STransMsg resp = {0};
+ if (-1 == cliBuildExceptResp(cmsg, &resp)) {
+ continue;
+ }
+ pTransInst->cfp(pTransInst->parent, &resp, NULL);
+
+ cmsg->ctx->ahandle = NULL;
+ }
+}
bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) {
if (pHead->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
uint64_t ahandle = pHead->ahandle;
+ tDebug("ahandle = %" PRIu64 "", ahandle);
SCliMsg* pMsg = NULL;
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle);
+
transClearBuffer(&conn->readBuf);
transFreeMsg(transContFromHead((char*)pHead));
- if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) {
- SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0);
- if (cliMsg->type == Release) return true;
+
+ for (int i = 0; ahandle == 0 && i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, i);
+ if (cliMsg->type == Release) {
+ assert(pMsg == NULL);
+ return true;
+ }
}
+
+ cliIteraConnMsgs(conn);
+
tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId);
- if (T_REF_VAL_GET(conn) > 1) {
- transUnrefCliHandle(conn);
- }
destroyCmsg(pMsg);
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
+
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn);
return true;
}
@@ -1090,14 +1144,15 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads,
return cli;
}
-static void destroyUserdata(STransMsg* userdata) {
+static FORCE_INLINE void destroyUserdata(STransMsg* userdata) {
if (userdata->pCont == NULL) {
return;
}
transFreeMsg(userdata->pCont);
userdata->pCont = NULL;
}
-static void destroyCmsg(void* arg) {
+
+static FORCE_INLINE void destroyCmsg(void* arg) {
SCliMsg* pMsg = arg;
if (pMsg == NULL) {
return;
@@ -1163,7 +1218,7 @@ static void destroyThrdObj(SCliThrd* pThrd) {
taosMemoryFree(pThrd);
}
-static void transDestroyConnCtx(STransConnCtx* ctx) {
+static FORCE_INLINE void transDestroyConnCtx(STransConnCtx* ctx) {
//
taosMemoryFree(ctx);
}
@@ -1182,7 +1237,7 @@ void cliWalkCb(uv_handle_t* handle, void* arg) {
}
}
-int cliRBChoseIdx(STrans* pTransInst) {
+FORCE_INLINE int cliRBChoseIdx(STrans* pTransInst) {
int8_t index = pTransInst->index;
if (pTransInst->numOfThreads == 0) {
return -1;
@@ -1192,7 +1247,7 @@ int cliRBChoseIdx(STrans* pTransInst) {
}
return index % pTransInst->numOfThreads;
}
-static void doDelayTask(void* param) {
+static FORCE_INLINE void doDelayTask(void* param) {
STaskArg* arg = param;
SCliMsg* pMsg = arg->param1;
SCliThrd* pThrd = arg->param2;
@@ -1226,13 +1281,13 @@ static void cliSchedMsgToNextNode(SCliMsg* pMsg, SCliThrd* pThrd) {
transDQSched(pThrd->delayQueue, doDelayTask, arg, TRANS_RETRY_INTERVAL);
}
-void cliCompareAndSwap(int8_t* val, int8_t exp, int8_t newVal) {
+FORCE_INLINE void cliCompareAndSwap(int8_t* val, int8_t exp, int8_t newVal) {
if (*val != exp) {
*val = newVal;
}
}
-bool cliTryExtractEpSet(STransMsg* pResp, SEpSet* dst) {
+FORCE_INLINE bool cliTryExtractEpSet(STransMsg* pResp, SEpSet* dst) {
if ((pResp == NULL || pResp->info.hasEpSet == 0)) {
return false;
}
@@ -1262,15 +1317,11 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
STrans* pTransInst = pThrd->pTransInst;
if (pMsg == NULL || pMsg->ctx == NULL) {
- tTrace("%s conn %p handle resp", pTransInst->label, pConn);
+ tDebug("%s conn %p handle resp", pTransInst->label, pConn);
pTransInst->cfp(pTransInst->parent, pResp, NULL);
return 0;
}
- /*
- * no retry
- * 1. query conn
- * 2. rpc thread already receive quit msg
- */
+
STransConnCtx* pCtx = pMsg->ctx;
int32_t code = pResp->code;
@@ -1368,53 +1419,57 @@ void transUnrefCliHandle(void* handle) {
cliDestroyConn((SCliConn*)handle, true);
}
}
-SCliThrd* transGetWorkThrdFromHandle(int64_t handle, bool* validHandle) {
+static FORCE_INLINE SCliThrd* transGetWorkThrdFromHandle(STrans* trans, int64_t handle) {
SCliThrd* pThrd = NULL;
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), handle);
if (exh == NULL) {
return NULL;
}
- *validHandle = true;
+ if (exh->pThrd == NULL && trans != NULL) {
+ int idx = cliRBChoseIdx(trans);
+ if (idx < 0) return NULL;
+ exh->pThrd = ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
+ }
+
pThrd = exh->pThrd;
transReleaseExHandle(transGetRefMgt(), handle);
return pThrd;
}
-SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle, bool* validHandle) {
+SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle) {
if (handle == 0) {
int idx = cliRBChoseIdx(trans);
if (idx < 0) return NULL;
return ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
}
- SCliThrd* pThrd = transGetWorkThrdFromHandle(handle, validHandle);
- if (*validHandle == true && pThrd == NULL) {
- int idx = cliRBChoseIdx(trans);
- if (idx < 0) return NULL;
- pThrd = ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
- }
+ SCliThrd* pThrd = transGetWorkThrdFromHandle(trans, handle);
return pThrd;
}
int transReleaseCliHandle(void* handle) {
int idx = -1;
bool valid = false;
- SCliThrd* pThrd = transGetWorkThrdFromHandle((int64_t)handle, &valid);
+ SCliThrd* pThrd = transGetWorkThrdFromHandle(NULL, (int64_t)handle);
if (pThrd == NULL) {
return -1;
}
- STransMsg tmsg = {.info.handle = handle};
+ STransMsg tmsg = {.info.handle = handle, .info.ahandle = (void*)0x9527};
TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64());
+ STransConnCtx* pCtx = taosMemoryCalloc(1, sizeof(STransConnCtx));
+ pCtx->ahandle = tmsg.info.ahandle;
+
SCliMsg* cmsg = taosMemoryCalloc(1, sizeof(SCliMsg));
cmsg->msg = tmsg;
cmsg->type = Release;
+ cmsg->ctx = pCtx;
STraceId* trace = &tmsg.info.traceId;
tGDebug("send release request at thread:%08" PRId64 "", pThrd->pid);
if (0 != transAsyncSend(pThrd->asyncPool, &cmsg->q)) {
- taosMemoryFree(cmsg);
+ destroyCmsg(cmsg);
return -1;
}
return 0;
@@ -1427,9 +1482,8 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran
return -1;
}
- bool valid = false;
- SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle, &valid);
- if (pThrd == NULL && valid == false) {
+ SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle);
+ if (pThrd == NULL) {
transFreeMsg(pReq->pCont);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
return TSDB_CODE_RPC_BROKEN_LINK;
@@ -1472,9 +1526,8 @@ int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMs
return -1;
}
- bool valid = false;
- SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle, &valid);
- if (pThrd == NULL && valid == false) {
+ SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle);
+ if (pThrd == NULL) {
transFreeMsg(pReq->pCont);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
return TSDB_CODE_RPC_BROKEN_LINK;
@@ -1558,6 +1611,7 @@ int64_t transAllocHandle() {
SExHandle* exh = taosMemoryCalloc(1, sizeof(SExHandle));
exh->refId = transAddExHandle(transGetRefMgt(), exh);
tDebug("pre alloc refId %" PRId64 "", exh->refId);
+
return exh->refId;
}
#endif
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index a4d679b281512ff13757eab7c9c42a11e0edb36b..5f3171ee0e840ee7f558a13b4ad315bd9bcfb856 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -287,10 +287,10 @@ void transCtxMerge(STransCtx* dst, STransCtx* src) {
STransCtxVal* sVal = (STransCtxVal*)iter;
key = taosHashGetKey(sVal, &klen);
- STransCtxVal* dVal = taosHashGet(dst->args, key, klen);
- if (dVal) {
- dst->freeFunc(dVal->val);
- }
+ // STransCtxVal* dVal = taosHashGet(dst->args, key, klen);
+ // if (dVal) {
+ // dst->freeFunc(dVal->val);
+ // }
taosHashPut(dst->args, key, klen, sVal, sizeof(*sVal));
iter = taosHashIterate(src->args, iter);
}
@@ -424,7 +424,7 @@ void transQueueDestroy(STransQueue* queue) {
taosArrayDestroy(queue->q);
}
-static int32_t timeCompare(const HeapNode* a, const HeapNode* b) {
+static FORCE_INLINE int32_t timeCompare(const HeapNode* a, const HeapNode* b) {
SDelayTask* arg1 = container_of(a, SDelayTask, node);
SDelayTask* arg2 = container_of(b, SDelayTask, node);
if (arg1->execTime > arg2->execTime) {
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 447db7613656613255369230138979a7596754a9..d3277d1cc1ad740bb9e6f01aba4690d42e07fd38 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -125,17 +125,17 @@ static void uvWorkAfterTask(uv_work_t* req, int status);
static void uvWalkCb(uv_handle_t* handle, void* arg);
static void uvFreeCb(uv_handle_t* handle);
-static void uvStartSendRespImpl(SSvrMsg* smsg);
+static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg);
+
static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
static void uvStartSendResp(SSvrMsg* msg);
static void uvNotifyLinkBrokenToApp(SSvrConn* conn);
-static void destroySmsg(SSvrMsg* smsg);
-// check whether already read complete packet
-static SSvrConn* createConn(void* hThrd);
-static void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/);
-static void destroyConnRegArg(SSvrConn* conn);
+static FORCE_INLINE void destroySmsg(SSvrMsg* smsg);
+static FORCE_INLINE SSvrConn* createConn(void* hThrd);
+static FORCE_INLINE void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/);
+static FORCE_INLINE void destroyConnRegArg(SSvrConn* conn);
static int reallocConnRef(SSvrConn* conn);
@@ -276,14 +276,16 @@ void uvOnRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf) {
while (transReadComplete(pBuf)) {
tTrace("%s conn %p alread read complete packet", transLabel(pTransInst), conn);
if (true == pBuf->invalid || false == uvHandleReq(conn)) {
- tError("%s conn %p read invalid packet", transLabel(pTransInst), conn);
+ tError("%s conn %p read invalid packet, received from %s, local info:%s", transLabel(pTransInst), conn,
+ conn->dst, conn->src);
destroyConn(conn, true);
return;
}
}
return;
} else {
- tError("%s conn %p read invalid packet, exceed limit", transLabel(pTransInst), conn);
+ tError("%s conn %p read invalid packet, exceed limit, received from %s, local info:", transLabel(pTransInst),
+ conn, conn->dst, conn->src);
destroyConn(conn, true);
return;
}
@@ -411,7 +413,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
wb->len = len;
}
-static void uvStartSendRespImpl(SSvrMsg* smsg) {
+static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg) {
SSvrConn* pConn = smsg->pConn;
if (pConn->broken) {
return;
@@ -445,7 +447,7 @@ static void uvStartSendResp(SSvrMsg* smsg) {
return;
}
-static void destroySmsg(SSvrMsg* smsg) {
+static FORCE_INLINE void destroySmsg(SSvrMsg* smsg) {
if (smsg == NULL) {
return;
}
@@ -490,7 +492,6 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
// release handle to rpc init
if (msg->type == Quit) {
(*transAsyncHandle[msg->type])(msg, pThrd);
- continue;
} else {
STransMsg transMsg = msg->msg;
@@ -649,7 +650,7 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) {
pObj->workerIdx = (pObj->workerIdx + 1) % pObj->numOfThreads;
- tTrace("new conntion accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx);
+ tTrace("new connection accepted by main server, dispatch to %dth worker-thread", pObj->workerIdx);
uv_write2(wr, (uv_stream_t*)&(pObj->pipe[pObj->workerIdx][0]), &buf, 1, (uv_stream_t*)cli, uvOnPipeWriteCb);
} else {
@@ -769,7 +770,7 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
// conn set
QUEUE_INIT(&pThrd->conn);
- pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 1, pThrd, uvWorkerAsyncCb);
+ pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 5, pThrd, uvWorkerAsyncCb);
uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb);
// uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
return true;
@@ -811,7 +812,7 @@ void* transWorkerThread(void* arg) {
return NULL;
}
-static SSvrConn* createConn(void* hThrd) {
+static FORCE_INLINE SSvrConn* createConn(void* hThrd) {
SWorkThrd* pThrd = hThrd;
SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn));
@@ -841,7 +842,7 @@ static SSvrConn* createConn(void* hThrd) {
return pConn;
}
-static void destroyConn(SSvrConn* conn, bool clear) {
+static FORCE_INLINE void destroyConn(SSvrConn* conn, bool clear) {
if (conn == NULL) {
return;
}
@@ -853,7 +854,7 @@ static void destroyConn(SSvrConn* conn, bool clear) {
}
}
}
-static void destroyConnRegArg(SSvrConn* conn) {
+static FORCE_INLINE void destroyConnRegArg(SSvrConn* conn) {
if (conn->regArg.init == 1) {
transFreeMsg(conn->regArg.msg.pCont);
conn->regArg.init = 0;
@@ -905,23 +906,30 @@ static void uvDestroyConn(uv_handle_t* handle) {
}
}
static void uvPipeListenCb(uv_stream_t* handle, int status) {
- ASSERT(status == 0);
+ if (status != 0) {
+ tError("server failed to init pipe, errmsg: %s", uv_err_name(status));
+ return;
+ }
SServerObj* srv = container_of(handle, SServerObj, pipeListen);
uv_pipe_t* pipe = &(srv->pipe[srv->numOfWorkerReady][0]);
- ASSERT(0 == uv_pipe_init(srv->loop, pipe, 1));
- ASSERT(0 == uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe));
- ASSERT(1 == uv_is_readable((uv_stream_t*)pipe));
- ASSERT(1 == uv_is_writable((uv_stream_t*)pipe));
- ASSERT(0 == uv_is_closing((uv_handle_t*)pipe));
+ int ret = uv_pipe_init(srv->loop, pipe, 1);
+ assert(ret == 0);
- srv->numOfWorkerReady++;
+ ret = uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe);
+ assert(ret == 0);
+
+ ret = uv_is_readable((uv_stream_t*)pipe);
+ assert(ret == 1);
+
+ ret = uv_is_writable((uv_stream_t*)pipe);
+ assert(ret == 1);
- // ASSERT(0 == uv_listen((uv_stream_t*)&ctx.send.tcp, 512, uvOnAcceptCb));
+ ret = uv_is_closing((uv_handle_t*)pipe);
+ assert(ret == 0);
- // r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, read_cb);
- // ASSERT(r == 0);
+ srv->numOfWorkerReady++;
}
void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
@@ -936,7 +944,12 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->port = port;
uv_loop_init(srv->loop);
- assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
+ int ret = uv_pipe_init(srv->loop, &srv->pipeListen, 0);
+ if (ret != 0) {
+ tError("failed to init pipe, errmsg: %s", uv_err_name(ret));
+ goto End;
+ }
+
#ifdef WINDOWS
char pipeName[64];
snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc.%p-" PRIu64, taosSafeRand(), GetCurrentProcessId());
@@ -945,8 +958,17 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
snprintf(pipeName, sizeof(pipeName), "%s%spipe.trans.rpc.%08X-" PRIu64, tsTempDir, TD_DIRSEP, taosSafeRand(),
taosGetSelfPthreadId());
#endif
- assert(0 == uv_pipe_bind(&srv->pipeListen, pipeName));
- assert(0 == uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb));
+ ret = uv_pipe_bind(&srv->pipeListen, pipeName);
+ if (ret != 0) {
+ tError("failed to bind pipe, errmsg: %s", uv_err_name(ret));
+ goto End;
+ }
+
+ ret = uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb);
+ if (ret != 0) {
+ tError("failed to listen pipe, errmsg: %s", uv_err_name(ret));
+ goto End;
+ }
for (int i = 0; i < srv->numOfThreads; i++) {
SWorkThrd* thrd = (SWorkThrd*)taosMemoryCalloc(1, sizeof(SWorkThrd));
@@ -1069,12 +1091,12 @@ void transCloseServer(void* arg) {
if (srv->inited) {
uv_async_send(srv->pAcceptAsync);
taosThreadJoin(srv->thread, NULL);
- }
- SRV_RELEASE_UV(srv->loop);
+ SRV_RELEASE_UV(srv->loop);
- for (int i = 0; i < srv->numOfThreads; i++) {
- sendQuitToWorkThrd(srv->pThreadObj[i]);
- destroyWorkThrd(srv->pThreadObj[i]);
+ for (int i = 0; i < srv->numOfThreads; i++) {
+ sendQuitToWorkThrd(srv->pThreadObj[i]);
+ destroyWorkThrd(srv->pThreadObj[i]);
+ }
}
taosMemoryFree(srv->pThreadObj);
diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c
index a8da6809100fa5789e5b7e57e051631257782e1e..93ced912f8e2358c2aab6f04957ce060cf61c924 100644
--- a/source/libs/wal/src/walMeta.c
+++ b/source/libs/wal/src/walMeta.c
@@ -121,7 +121,7 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
if (found == NULL) {
// file corrupted, no complete log
// TODO delete and search in previous files
- ASSERT(0);
+ /*ASSERT(0);*/
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
@@ -221,7 +221,6 @@ int walCheckAndRepairMeta(SWal* pWal) {
int code = walSaveMeta(pWal);
if (code < 0) {
- taosArrayDestroy(actualLog);
return -1;
}
}
diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c
index a5b5a2b7b4cac113978d8278ecf0a57686a67257..5c437e6f7aeb942e02e79314e4140af0cdfe4323 100644
--- a/source/libs/wal/src/walRead.c
+++ b/source/libs/wal/src/walRead.c
@@ -168,6 +168,9 @@ static int32_t walReadChangeFile(SWalReader *pReader, int64_t fileFirstVer) {
}
pReader->pIdxFile = pIdxFile;
+
+ pReader->curFileFirstVer = fileFirstVer;
+
return 0;
}
@@ -372,7 +375,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) {
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead) {
int64_t code;
- ASSERT(pRead->curVersion == pHead->head.version);
+ // ASSERT(pRead->curVersion == pHead->head.version);
code = taosLSeekFile(pRead->pLogFile, pHead->head.bodyLen, SEEK_CUR);
if (code < 0) {
@@ -415,7 +418,8 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) {
}
if (walValidBodyCksum(*ppHead) != 0) {
- wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver);
+ wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId,
+ ver);
pRead->curInvalid = 1;
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c
index b755a35815fb64d6fa11ff3e0c35efc647318b83..3dfb1458ad2fc802af761f68a4fe4407098fff25 100644
--- a/source/os/src/osDir.c
+++ b/source/os/src/osDir.c
@@ -133,6 +133,7 @@ int32_t taosMulMkDir(const char *dirname) {
code = mkdir(temp, 0755);
#endif
if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return code;
}
*pos = TD_DIRSEP[0];
@@ -146,6 +147,7 @@ int32_t taosMulMkDir(const char *dirname) {
code = mkdir(temp, 0755);
#endif
if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return code;
}
}
@@ -158,6 +160,66 @@ int32_t taosMulMkDir(const char *dirname) {
return code;
}
+int32_t taosMulModeMkDir(const char *dirname, int mode) {
+ if (dirname == NULL) return -1;
+ char temp[1024];
+ char *pos = temp;
+ int32_t code = 0;
+#ifdef WINDOWS
+ taosRealPath(dirname, temp, sizeof(temp));
+ if (temp[1] == ':') pos += 3;
+#else
+ strcpy(temp, dirname);
+#endif
+
+ if (taosDirExist(temp)) {
+ chmod(temp, mode);
+ return code;
+ }
+
+ if (strncmp(temp, TD_DIRSEP, 1) == 0) {
+ pos += 1;
+ } else if (strncmp(temp, "." TD_DIRSEP, 2) == 0) {
+ pos += 2;
+ }
+
+ for (; *pos != '\0'; pos++) {
+ if (*pos == TD_DIRSEP[0]) {
+ *pos = '\0';
+#ifdef WINDOWS
+ code = _mkdir(temp, mode);
+#else
+ code = mkdir(temp, mode);
+#endif
+ if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return code;
+ }
+ *pos = TD_DIRSEP[0];
+ }
+ }
+
+ if (*(pos - 1) != TD_DIRSEP[0]) {
+#ifdef WINDOWS
+ code = _mkdir(temp, mode);
+#else
+ code = mkdir(temp, mode);
+#endif
+ if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return code;
+ }
+ }
+
+ if (code < 0 && errno == EEXIST) {
+ chmod(temp, mode);
+ return 0;
+ }
+
+ chmod(temp, mode);
+ return code;
+}
+
void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
TdDirPtr pDir = taosOpenDir(dirname);
if (pDir == NULL) return;
diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c
index 2d9cfe3246de35910c9fce2a88ba5b62f9968e67..fab933755a73ba23be962cb76b34da002b8a3702 100644
--- a/source/os/src/osFile.c
+++ b/source/os/src/osFile.c
@@ -203,10 +203,11 @@ int32_t taosRenameFile(const char *oldName, const char *newName) {
}
int32_t taosStatFile(const char *path, int64_t *size, int32_t *mtime) {
- struct stat fileStat;
#ifdef WINDOWS
- int32_t code = _stat(path, &fileStat);
+ struct _stati64 fileStat;
+ int32_t code = _stati64(path, &fileStat);
#else
+ struct stat fileStat;
int32_t code = stat(path, &fileStat);
#endif
if (code < 0) {
@@ -312,6 +313,7 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
assert(!(tdFileOptions & TD_FILE_EXCL));
fp = fopen(path, mode);
if (fp == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
} else {
@@ -334,6 +336,7 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
fd = open(path, access, S_IRWXU | S_IRWXG | S_IRWXO);
#endif
if (fd == -1) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
}
diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c
index a7d2ba85311b8f2a9ababbde0f1f5857cb354484..8cc6f0ef2e2b436624cc961315e3ecff6db7691b 100644
--- a/source/os/src/osSemaphore.c
+++ b/source/os/src/osSemaphore.c
@@ -392,179 +392,32 @@ int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) {
// *sem = NULL;
// return 0;
// }
-typedef struct {
- pthread_mutex_t count_lock;
- pthread_cond_t count_bump;
- unsigned int count;
-} bosal_sem_t;
int tsem_init(tsem_t *psem, int flags, unsigned int count) {
- bosal_sem_t *pnewsem;
- int result;
-
- pnewsem = (bosal_sem_t *)malloc(sizeof(bosal_sem_t));
- if (!pnewsem) {
- return -1;
- }
- result = pthread_mutex_init(&pnewsem->count_lock, NULL);
- if (result) {
- free(pnewsem);
- return result;
- }
- result = pthread_cond_init(&pnewsem->count_bump, NULL);
- if (result) {
- pthread_mutex_destroy(&pnewsem->count_lock);
- free(pnewsem);
- return result;
- }
- pnewsem->count = count;
- *psem = (tsem_t)pnewsem;
+ *psem = dispatch_semaphore_create(count);
+ if (*psem == NULL) return -1;
return 0;
}
int tsem_destroy(tsem_t *psem) {
- bosal_sem_t *poldsem;
-
- if (!psem) {
- return EINVAL;
- }
- poldsem = (bosal_sem_t *)*psem;
-
- pthread_mutex_destroy(&poldsem->count_lock);
- pthread_cond_destroy(&poldsem->count_bump);
- free(poldsem);
return 0;
}
int tsem_post(tsem_t *psem) {
- bosal_sem_t *pxsem;
- int result, xresult;
-
- if (!psem) {
- return EINVAL;
- }
- pxsem = (bosal_sem_t *)*psem;
-
- result = pthread_mutex_lock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- pxsem->count = pxsem->count + 1;
-
- xresult = pthread_cond_signal(&pxsem->count_bump);
-
- result = pthread_mutex_unlock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- if (xresult) {
- errno = xresult;
- return -1;
- }
- return 0;
-}
-
-int tsem_trywait(tsem_t *psem) {
- bosal_sem_t *pxsem;
- int result, xresult;
-
- if (!psem) {
- return EINVAL;
- }
- pxsem = (bosal_sem_t *)*psem;
-
- result = pthread_mutex_lock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- xresult = 0;
-
- if (pxsem->count > 0) {
- pxsem->count--;
- } else {
- xresult = EAGAIN;
- }
- result = pthread_mutex_unlock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- if (xresult) {
- errno = xresult;
- return -1;
- }
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_signal(*psem);
return 0;
}
int tsem_wait(tsem_t *psem) {
- bosal_sem_t *pxsem;
- int result, xresult;
-
- if (!psem) {
- return EINVAL;
- }
- pxsem = (bosal_sem_t *)*psem;
-
- result = pthread_mutex_lock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- xresult = 0;
-
- if (pxsem->count == 0) {
- xresult = pthread_cond_wait(&pxsem->count_bump, &pxsem->count_lock);
- }
- if (!xresult) {
- if (pxsem->count > 0) {
- pxsem->count--;
- }
- }
- result = pthread_mutex_unlock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- if (xresult) {
- errno = xresult;
- return -1;
- }
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER);
return 0;
}
int tsem_timewait(tsem_t *psem, int64_t nanosecs) {
- struct timespec abstim = {
- .tv_sec = 0,
- .tv_nsec = nanosecs,
- };
-
- bosal_sem_t *pxsem;
- int result, xresult;
-
- if (!psem) {
- return EINVAL;
- }
- pxsem = (bosal_sem_t *)*psem;
-
- result = pthread_mutex_lock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- xresult = 0;
-
- if (pxsem->count == 0) {
- xresult = pthread_cond_timedwait(&pxsem->count_bump, &pxsem->count_lock, &abstim);
- }
- if (!xresult) {
- if (pxsem->count > 0) {
- pxsem->count--;
- }
- }
- result = pthread_mutex_unlock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- if (xresult) {
- errno = xresult;
- return -1;
- }
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_wait(*psem, nanosecs);
return 0;
}
diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c
index 3a75e18a7f8c1b60a86e8aac75f5c5f624176e5a..19e9568bbebf20a74e5f316bb50056efa4786c1a 100644
--- a/source/os/src/osSysinfo.c
+++ b/source/os/src/osSysinfo.c
@@ -595,6 +595,7 @@ int32_t taosGetDiskSize(char *dataDir, SDiskSize *diskSize) {
#else
struct statvfs info;
if (statvfs(dataDir, &info)) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
} else {
diskSize->total = info.f_blocks * info.f_frsize;
@@ -851,13 +852,12 @@ char *taosGetCmdlineByPID(int pid) {
}
void taosSetCoreDump(bool enable) {
+ if (!enable) return;
#ifdef WINDOWS
- // SetUnhandledExceptionFilter(exceptionHandler);
- // SetUnhandledExceptionFilter(&FlCrashDump);
+ SetUnhandledExceptionFilter(exceptionHandler);
+ SetUnhandledExceptionFilter(&FlCrashDump);
#elif defined(_TD_DARWIN_64)
#else
- if (!enable) return;
-
// 1. set ulimit -c unlimited
struct rlimit rlim;
struct rlimit rlim_new;
diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c
index 5353cd9bfec94b460fc1f5c3d9ad657ead6ad76b..699f0db7a193b1e0390efd12de6f639de5b69f86 100644
--- a/source/util/src/talgo.c
+++ b/source/util/src/talgo.c
@@ -201,6 +201,7 @@ void *taosbsearch(const void *key, const void *base, int32_t nmemb, int32_t size
return (c > 0) ? p : (midx > 0 ? p - size : NULL);
} else {
ASSERT(0);
+ return NULL;
}
}
diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c
index fe3065b2b78a46a85d6dc04b90fcff4e0fe80f03..cbda4e46557e7931d1ce5dea31c2baa4f2d6ddef 100644
--- a/source/util/src/tcompare.c
+++ b/source/util/src/tcompare.c
@@ -186,15 +186,16 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
- if (len1 != len2) {
- return len1 > len2 ? 1 : -1;
- } else {
- int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), len1);
- if (ret == 0) {
+ int32_t minLen = TMIN(len1, len2);
+ int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), minLen);
+ if (ret == 0) {
+ if (len1 == len2) {
return 0;
} else {
- return ret > 0 ? 1 : -1;
+ return len1 > len2 ? 1 : -1;
}
+ } else {
+ return ret > 0 ? 1 : -1;
}
}
@@ -243,9 +244,760 @@ int32_t compareJsonVal(const void *pLeft, const void *pRight) {
return 0;
}else{
assert(0);
+ return 0;
}
}
+int32_t compareInt8Int16(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Int32(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Int64(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Float(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Double(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint8(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint16(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint32(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint64(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int8(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int32(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int64(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Float(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Double(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint8(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint16(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint32(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint64(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+
+int32_t compareInt32Int8(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Int16(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Int64(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Float(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Double(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint8(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint16(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint32(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint64(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int8(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int16(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int32(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Float(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Double(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint8(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint16(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint32(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint64(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt8(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt16(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt32(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt64(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatDouble(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+
+ if (isnan(left) && isnan(right)) {
+ return 0;
+ }
+
+ if (isnan(left)) {
+ return -1;
+ }
+
+ if (isnan(right)) {
+ return 1;
+ }
+
+ if (FLT_EQUAL(left, right)) {
+ return 0;
+ }
+ return FLT_GREATER(left, right) ? 1 : -1;
+}
+
+int32_t compareFloatUint8(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint16(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint32(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint64(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt8(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt16(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt32(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt64(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleFloat(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+
+ if (isnan(left) && isnan(right)) {
+ return 0;
+ }
+
+ if (isnan(left)) {
+ return -1;
+ }
+
+ if (isnan(right)) {
+ return 1;
+ }
+
+ if (FLT_EQUAL(left, right)) {
+ return 0;
+ }
+ return FLT_GREATER(left, right) ? 1 : -1;
+}
+
+int32_t compareDoubleUint8(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint16(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint32(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint64(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int8(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int16(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int32(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int64(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Float(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Double(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint16(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint32(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint64(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int8(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int16(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int32(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int64(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Float(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Double(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint8(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint32(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint64(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int8(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int16(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int32(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int64(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Float(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Double(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint8(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint16(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint64(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int8(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int16(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int32(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int64(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Float(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Double(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint8(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint16(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint32(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+
int32_t compareJsonValDesc(const void *pLeft, const void *pRight) {
return compareJsonVal(pRight, pLeft);
}
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 662a3f0c88012191f3a7d76c78eb6d06a8b20292..044cdc86b4d91cbb26e00435cb241f39c0442f00 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -121,7 +121,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, "Connection killed")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, "Syntax error in SQL")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specified or available")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist")
-TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
@@ -618,10 +618,11 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REMOVE_EXISTS, "Rsma remove exists"
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP, "Rsma fetch msg is messed up")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_EMPTY_INFO, "Rsma info is empty")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_SCHEMA, "Rsma invalid schema")
+TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REGEX_MATCH, "Rsma regex match")
//index
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
-TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file")
+TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is invalid")
//tmq
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
diff --git a/source/util/src/thash.c b/source/util/src/thash.c
index aee84a0d55336c63840d1a5df887da7752592841..b69d8ea52866055668ce4937836c5eb46842f1c2 100644
--- a/source/util/src/thash.c
+++ b/source/util/src/thash.c
@@ -21,7 +21,7 @@
// the add ref count operation may trigger the warning if the reference count is greater than the MAX_WARNING_REF_COUNT
#define MAX_WARNING_REF_COUNT 10000
-#define HASH_MAX_CAPACITY (1024 * 1024 * 16)
+#define HASH_MAX_CAPACITY (1024 * 1024 * 1024)
#define HASH_DEFAULT_LOAD_FACTOR (0.75)
#define HASH_INDEX(v, c) ((v) & ((c)-1))
@@ -67,6 +67,7 @@ struct SHashObj {
bool enableUpdate; // enable update
SArray *pMemBlock; // memory block allocated for SHashEntry
_hash_before_fn_t callbackFp; // function invoked before return the value to caller
+ int64_t compTimes;
};
/*
@@ -146,6 +147,7 @@ static FORCE_INLINE SHashNode *doSearchInEntryList(SHashObj *pHashObj, SHashEntr
uint32_t hashVal) {
SHashNode *pNode = pe->next;
while (pNode) {
+ atomic_add_fetch_64(&pHashObj->compTimes, 1);
if ((pNode->keyLen == keyLen) && ((*(pHashObj->equalFp))(GET_HASH_NODE_KEY(pNode), key, keyLen) == 0) &&
pNode->removed == 0) {
assert(pNode->hashVal == hashVal);
@@ -250,11 +252,15 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp
// the max slots is not defined by user
pHashObj->capacity = taosHashCapacity((int32_t)capacity);
+ pHashObj->size = 0;
pHashObj->equalFp = memcmp;
pHashObj->hashFp = fn;
pHashObj->type = type;
+ pHashObj->lock = 0;
pHashObj->enableUpdate = update;
+ pHashObj->freeFp = NULL;
+ pHashObj->callbackFp = NULL;
ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0);
@@ -327,7 +333,7 @@ int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, const vo
// disable resize
taosHashRLock(pHashObj);
- int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
+ uint32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
SHashEntry *pe = pHashObj->hashList[slot];
taosHashEntryWLock(pHashObj, pe);
@@ -882,3 +888,7 @@ void *taosHashAcquire(SHashObj *pHashObj, const void *key, size_t keyLen) {
}
void taosHashRelease(SHashObj *pHashObj, void *p) { taosHashCancelIterate(pHashObj, p); }
+
+int64_t taosHashGetCompTimes(SHashObj *pHashObj) { return atomic_load_64(&pHashObj->compTimes); }
+
+
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index 2e8239c68f0861486d2d6175d698dc76ed92b128..46203658f15848fffca902606ba0df50647dac86 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -97,7 +97,7 @@ int32_t tqDebugFlag = 135;
int32_t fsDebugFlag = 135;
int32_t metaDebugFlag = 135;
int32_t udfDebugFlag = 135;
-int32_t smaDebugFlag = 135;
+int32_t smaDebugFlag = 131;
int32_t idxDebugFlag = 135;
int64_t dbgEmptyW = 0;
@@ -429,7 +429,7 @@ static inline int32_t taosBuildLogHead(char *buffer, const char *flags) {
}
static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *buffer, int32_t len) {
- if ((dflag & DEBUG_FILE) && tsLogObj.logHandle && tsLogObj.logHandle->pFile != NULL) {
+ if ((dflag & DEBUG_FILE) && tsLogObj.logHandle && tsLogObj.logHandle->pFile != NULL && osLogSpaceAvailable()) {
taosUpdateLogNums(level);
if (tsAsyncLog) {
taosPushLogBuffer(tsLogObj.logHandle, buffer, len);
@@ -446,12 +446,14 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b
}
if (dflag & DEBUG_SCREEN) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
write(1, buffer, (uint32_t)len);
+#pragma GCC diagnostic pop
}
}
void taosPrintLog(const char *flags, ELogLevel level, int32_t dflag, const char *format, ...) {
- if (!osLogSpaceAvailable()) return;
if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return;
char buffer[LOG_MAX_LINE_BUFFER_SIZE];
diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c
index 0e608d0da22da836f0a357c7bd4f9b194c11fd13..2767fed9373aa47ebdbea39b07f28c238db14c7d 100644
--- a/source/util/src/tpagedbuf.c
+++ b/source/util/src/tpagedbuf.c
@@ -33,7 +33,7 @@ struct SDiskbasedBuf {
int32_t pageSize; // current used page size
int32_t inMemPages; // numOfPages that are allocated in memory
SList* freePgList; // free page list
- SHashObj* groupSet; // id hash table, todo remove it
+ SArray* pIdList; // page id list
SHashObj* all;
SList* lruList;
void* emptyDummyIdList; // dummy id list
@@ -241,26 +241,7 @@ static int32_t loadPageFromDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) {
return 0;
}
-static SIDList addNewGroup(SDiskbasedBuf* pBuf, int32_t groupId) {
- assert(taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t)) == NULL);
-
- SArray* pa = taosArrayInit(1, POINTER_BYTES);
- int32_t ret = taosHashPut(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t), &pa, POINTER_BYTES);
- assert(ret == 0);
-
- return pa;
-}
-
-static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t pageId) {
- SIDList list = NULL;
-
- char** p = taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t));
- if (p == NULL) { // it is a new group id
- list = addNewGroup(pBuf, groupId);
- } else {
- list = (SIDList)(*p);
- }
-
+static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t pageId) {
pBuf->numOfPages += 1;
SPageInfo* ppi = taosMemoryMalloc(sizeof(SPageInfo));
@@ -273,7 +254,7 @@ static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t pag
ppi->pn = NULL;
ppi->dirty = false;
- return *(SPageInfo**)taosArrayPush(list, &ppi);
+ return *(SPageInfo**)taosArrayPush(pBuf->pIdList, &ppi);
}
static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
@@ -293,22 +274,13 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
}
}
- // int32_t pos = listNEles(pBuf->lruList);
- // SListIter iter1 = {0};
- // tdListInitIter(pBuf->lruList, &iter1, TD_LIST_BACKWARD);
- // SListNode* pn1 = NULL;
- // while((pn1 = tdListNext(&iter1)) != NULL) {
- // SPageInfo* pageInfo = *(SPageInfo**) pn1->data;
- // printf("page %d is used, dirty:%d, pos:%d\n", pageInfo->pageId, pageInfo->dirty, pos - 1);
- // pos -= 1;
- // }
-
return pn;
}
static char* evacOneDataPage(SDiskbasedBuf* pBuf) {
char* bufPage = NULL;
SListNode* pn = getEldestUnrefedPage(pBuf);
+ terrno = 0;
// all pages are referenced by user, try to allocate new space
if (pn == NULL) {
@@ -332,6 +304,7 @@ static char* evacOneDataPage(SDiskbasedBuf* pBuf) {
bufPage = flushPageToDisk(pBuf, d);
}
+ ASSERT((bufPage != NULL) || terrno != TSDB_CODE_SUCCESS);
return bufPage;
}
@@ -380,7 +353,8 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
// init id hash table
_hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT);
- pPBuf->groupSet = taosHashInit(10, fn, true, false);
+ pPBuf->pIdList = taosArrayInit(4, POINTER_BYTES);
+
pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES
pPBuf->all = taosHashInit(10, fn, true, false);
@@ -397,7 +371,7 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
return TSDB_CODE_SUCCESS;
}
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) {
pBuf->statis.getPages += 1;
char* availablePage = NULL;
@@ -423,7 +397,7 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
*pageId = (++pBuf->allocateId);
// register page id info
- pi = registerPage(pBuf, groupId, *pageId);
+ pi = registerPage(pBuf, *pageId);
// add to hash map
taosHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES);
@@ -524,19 +498,11 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
pBuf->statis.releasePages += 1;
}
-size_t getNumOfBufGroupId(const SDiskbasedBuf* pBuf) { return taosHashGetSize(pBuf->groupSet); }
-
size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; }
-SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf, int32_t groupId) {
- assert(pBuf != NULL);
-
- char** p = taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t));
- if (p == NULL) { // it is a new group id
- return pBuf->emptyDummyIdList;
- } else {
- return (SArray*)(*p);
- }
+SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf) {
+ ASSERT(pBuf != NULL);
+ return pBuf->pIdList;
}
void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
@@ -576,26 +542,21 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
taosRemoveFile(pBuf->path);
taosMemoryFreeClear(pBuf->path);
- SArray** p = taosHashIterate(pBuf->groupSet, NULL);
- while (p) {
- size_t n = taosArrayGetSize(*p);
- for (int32_t i = 0; i < n; ++i) {
- SPageInfo* pi = taosArrayGetP(*p, i);
- taosMemoryFreeClear(pi->pData);
- taosMemoryFreeClear(pi);
- }
-
- taosArrayDestroy(*p);
- p = taosHashIterate(pBuf->groupSet, p);
+ size_t n = taosArrayGetSize(pBuf->pIdList);
+ for (int32_t i = 0; i < n; ++i) {
+ SPageInfo* pi = taosArrayGetP(pBuf->pIdList, i);
+ taosMemoryFreeClear(pi->pData);
+ taosMemoryFreeClear(pi);
}
+ taosArrayDestroy(pBuf->pIdList);
+
tdListFree(pBuf->lruList);
tdListFree(pBuf->freePgList);
taosArrayDestroy(pBuf->emptyDummyIdList);
taosArrayDestroy(pBuf->pFree);
- taosHashCleanup(pBuf->groupSet);
taosHashCleanup(pBuf->all);
taosMemoryFreeClear(pBuf->id);
@@ -659,32 +620,32 @@ void dBufPrintStatis(const SDiskbasedBuf* pBuf) {
pBuf->totalBufSize / 1024.0, pBuf->numOfPages, listNEles(pBuf->lruList) * pBuf->pageSize / 1024.0,
listNEles(pBuf->lruList), pBuf->fileSize / 1024.0, pBuf->pageSize / 1024.0f, pBuf->id);
- printf(
- "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb\n",
- ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages,
- ps->loadBytes / (1024.0 * ps->loadPages));
+ if (ps->loadPages > 0) {
+ printf(
+ "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb\n",
+ ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
+ ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
+ } else {
+ printf("no page loaded\n");
+ }
}
void clearDiskbasedBuf(SDiskbasedBuf* pBuf) {
- SArray** p = taosHashIterate(pBuf->groupSet, NULL);
- while (p) {
- size_t n = taosArrayGetSize(*p);
- for (int32_t i = 0; i < n; ++i) {
- SPageInfo* pi = taosArrayGetP(*p, i);
- taosMemoryFreeClear(pi->pData);
- taosMemoryFreeClear(pi);
- }
- taosArrayDestroy(*p);
- p = taosHashIterate(pBuf->groupSet, p);
+ size_t n = taosArrayGetSize(pBuf->pIdList);
+ for (int32_t i = 0; i < n; ++i) {
+ SPageInfo* pi = taosArrayGetP(pBuf->pIdList, i);
+ taosMemoryFreeClear(pi->pData);
+ taosMemoryFreeClear(pi);
}
+ taosArrayClear(pBuf->pIdList);
+
tdListEmpty(pBuf->lruList);
tdListEmpty(pBuf->freePgList);
taosArrayClear(pBuf->emptyDummyIdList);
taosArrayClear(pBuf->pFree);
- taosHashClear(pBuf->groupSet);
taosHashClear(pBuf->all);
pBuf->numOfPages = 0; // all pages are in buffer in the first place
diff --git a/source/util/src/trbtree.c b/source/util/src/trbtree.c
index 0970485dade90bb8719a2fa39facb047e07bcfff..65f1bac60aa51c48fe65895dbe0ded241d22590d 100644
--- a/source/util/src/trbtree.c
+++ b/source/util/src/trbtree.c
@@ -13,179 +13,297 @@
* along with this program. If not, see .
*/
-#include "os.h"
+#include "trbtree.h"
-typedef int32_t (*tRBTreeCmprFn)(void *, void *);
-
-typedef struct SRBTree SRBTree;
-typedef struct SRBTreeNode SRBTreeNode;
-typedef struct SRBTreeIter SRBTreeIter;
-
-struct SRBTreeNode {
- enum { RED, BLACK } color;
- SRBTreeNode *parent;
- SRBTreeNode *left;
- SRBTreeNode *right;
- uint8_t payload[];
-};
-
-struct SRBTree {
- tRBTreeCmprFn cmprFn;
- SRBTreeNode *root;
-};
-
-struct SRBTreeIter {
- SRBTree *pTree;
-};
-
-#define RBTREE_NODE_COLOR(N) ((N) ? (N)->color : BLACK)
-
-// APIs ================================================
-static void tRBTreeRotateLeft(SRBTree *pTree, SRBTreeNode *pNode) {
- SRBTreeNode *right = pNode->right;
-
- pNode->right = right->left;
- if (pNode->right) {
- pNode->right->parent = pNode;
+static void tRBTreeRotateLeft(SRBTree *pTree, SRBTreeNode *x) {
+ SRBTreeNode *y = x->right;
+ x->right = y->left;
+ if (y->left != pTree->NIL) {
+ y->left->parent = x;
}
-
- right->parent = pNode->parent;
- if (pNode->parent == NULL) {
- pTree->root = right;
- } else if (pNode == pNode->parent->left) {
- pNode->parent->left = right;
+ y->parent = x->parent;
+ if (x->parent == pTree->NIL) {
+ pTree->root = y;
+ } else if (x == x->parent->left) {
+ x->parent->left = y;
} else {
- pNode->parent->right = right;
+ x->parent->right = y;
}
-
- right->left = pNode;
- pNode->parent = right;
+ y->left = x;
+ x->parent = y;
}
-static void tRBTreeRotateRight(SRBTree *pTree, SRBTreeNode *pNode) {
- SRBTreeNode *left = pNode->left;
-
- pNode->left = left->right;
- if (pNode->left) {
- pNode->left->parent = pNode;
+static void tRBTreeRotateRight(SRBTree *pTree, SRBTreeNode *x) {
+ SRBTreeNode *y = x->left;
+ x->left = y->right;
+ if (y->right != pTree->NIL) {
+ y->right->parent = x;
}
-
- left->parent = pNode->parent;
- if (pNode->parent == NULL) {
- pTree->root = left;
- } else if (pNode == pNode->parent->left) {
- pNode->parent->left = left;
+ y->parent = x->parent;
+ if (x->parent == pTree->NIL) {
+ pTree->root = y;
+ } else if (x == x->parent->right) {
+ x->parent->right = y;
} else {
- pNode->parent->right = left;
+ x->parent->left = y;
}
-
- left->right = pNode;
- pNode->parent = left;
+ y->right = x;
+ x->parent = y;
}
-#define tRBTreeCreate(compare) \
- (SRBTree) { .cmprFn = (compare), .root = NULL }
-
-SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *pNew) {
- pNew->left = NULL;
- pNew->right = NULL;
- pNew->color = RED;
-
- // insert
- if (pTree->root == NULL) {
- pNew->parent = NULL;
- pTree->root = pNew;
- } else {
- SRBTreeNode *pNode = pTree->root;
- while (true) {
- ASSERT(pNode);
-
- int32_t c = pTree->cmprFn(pNew->payload, pNode->payload);
- if (c < 0) {
- if (pNode->left) {
- pNode = pNode->left;
- } else {
- pNew->parent = pNode;
- pNode->left = pNew;
- break;
- }
- } else if (c > 0) {
- if (pNode->right) {
- pNode = pNode->right;
- } else {
- pNew->parent = pNode;
- pNode->right = pNew;
- break;
+static void tRBTreePutFix(SRBTree *pTree, SRBTreeNode *z) {
+ while (z->parent->color == RED) {
+ if (z->parent == z->parent->parent->left) { // z.parent is the left child
+
+ SRBTreeNode *y = z->parent->parent->right; // uncle of z
+
+ if (y->color == RED) { // case 1
+ z->parent->color = BLACK;
+ y->color = BLACK;
+ z->parent->parent->color = RED;
+ z = z->parent->parent;
+ } else { // case2 or case3
+ if (z == z->parent->right) { // case2
+ z = z->parent; // marked z.parent as new z
+ tRBTreeRotateLeft(pTree, z);
}
+ // case3
+ z->parent->color = BLACK; // made parent black
+ z->parent->parent->color = RED; // made parent red
+ tRBTreeRotateRight(pTree, z->parent->parent);
+ }
+ } else { // z.parent is the right child
+ SRBTreeNode *y = z->parent->parent->left; // uncle of z
+
+ if (y->color == RED) {
+ z->parent->color = BLACK;
+ y->color = BLACK;
+ z->parent->parent->color = RED;
+ z = z->parent->parent;
} else {
- return NULL;
+ if (z == z->parent->left) {
+ z = z->parent; // marked z.parent as new z
+ tRBTreeRotateRight(pTree, z);
+ }
+ z->parent->color = BLACK; // made parent black
+ z->parent->parent->color = RED; // made parent red
+ tRBTreeRotateLeft(pTree, z->parent->parent);
}
}
}
+ pTree->root->color = BLACK;
+}
- // fix
- SRBTreeNode *pNode = pNew;
- while (pNode->parent && pNode->parent->color == RED) {
- SRBTreeNode *p = pNode->parent;
- SRBTreeNode *g = p->parent;
-
- if (p == g->left) {
- SRBTreeNode *u = g->right;
-
- if (RBTREE_NODE_COLOR(u) == RED) {
- p->color = BLACK;
- u->color = BLACK;
- g->color = RED;
- pNode = g;
+static void tRBTreeTransplant(SRBTree *pTree, SRBTreeNode *u, SRBTreeNode *v) {
+ if (u->parent == pTree->NIL)
+ pTree->root = v;
+ else if (u == u->parent->left)
+ u->parent->left = v;
+ else
+ u->parent->right = v;
+ v->parent = u->parent;
+}
+
+static void tRBTreeDropFix(SRBTree *pTree, SRBTreeNode *x) {
+ while (x != pTree->root && x->color == BLACK) {
+ if (x == x->parent->left) {
+ SRBTreeNode *w = x->parent->right;
+ if (w->color == RED) {
+ w->color = BLACK;
+ x->parent->color = RED;
+ tRBTreeRotateLeft(pTree, x->parent);
+ w = x->parent->right;
+ }
+ if (w->left->color == BLACK && w->right->color == BLACK) {
+ w->color = RED;
+ x = x->parent;
} else {
- if (pNode == p->right) {
- pNode = p;
- tRBTreeRotateLeft(pTree, pNode);
+ if (w->right->color == BLACK) {
+ w->left->color = BLACK;
+ w->color = RED;
+ tRBTreeRotateRight(pTree, w);
+ w = x->parent->right;
}
- pNode->parent->color = BLACK;
- pNode->parent->parent->color = RED;
- tRBTreeRotateRight(pTree, pNode->parent->parent);
+ w->color = x->parent->color;
+ x->parent->color = BLACK;
+ w->right->color = BLACK;
+ tRBTreeRotateLeft(pTree, x->parent);
+ x = pTree->root;
}
} else {
- SRBTreeNode *u = g->left;
-
- if (RBTREE_NODE_COLOR(u) == RED) {
- p->color = BLACK;
- u->color = BLACK;
- g->color = RED;
+ SRBTreeNode *w = x->parent->left;
+ if (w->color == RED) {
+ w->color = BLACK;
+ x->parent->color = RED;
+ tRBTreeRotateRight(pTree, x->parent);
+ w = x->parent->left;
+ }
+ if (w->right->color == BLACK && w->left->color == BLACK) {
+ w->color = RED;
+ x = x->parent;
} else {
- if (pNode == p->left) {
- pNode = p;
- tRBTreeRotateRight(pTree, pNode);
+ if (w->left->color == BLACK) {
+ w->right->color = BLACK;
+ w->color = RED;
+ tRBTreeRotateLeft(pTree, w);
+ w = x->parent->left;
}
- pNode->parent->color = BLACK;
- pNode->parent->parent->color = RED;
- tRBTreeRotateLeft(pTree, pNode->parent->parent);
+ w->color = x->parent->color;
+ x->parent->color = BLACK;
+ w->left->color = BLACK;
+ tRBTreeRotateRight(pTree, x->parent);
+ x = pTree->root;
}
}
}
+ x->color = BLACK;
+}
- pTree->root->color = BLACK;
- return pNew;
+static SRBTreeNode *tRBTreeSuccessor(SRBTree *pTree, SRBTreeNode *pNode) {
+ if (pNode->right != pTree->NIL) {
+ pNode = pNode->right;
+ while (pNode->left != pTree->NIL) {
+ pNode = pNode->left;
+ }
+ } else {
+ while (true) {
+ if (pNode->parent == pTree->NIL || pNode == pNode->parent->left) {
+ pNode = pNode->parent;
+ break;
+ } else {
+ pNode = pNode->parent;
+ }
+ }
+ }
+
+ return pNode;
}
-SRBTreeNode *tRBTreeDrop(SRBTree *pTree, void *pKey) {
- SRBTreeNode *pNode = pTree->root;
+static SRBTreeNode *tRBTreePredecessor(SRBTree *pTree, SRBTreeNode *pNode) {
+ if (pNode->left != pTree->NIL) {
+ pNode = pNode->left;
+ while (pNode->right != pTree->NIL) {
+ pNode = pNode->right;
+ }
+ } else {
+ while (true) {
+ if (pNode->parent == pTree->NIL || pNode == pNode->parent->right) {
+ pNode = pNode->parent;
+ break;
+ } else {
+ pNode = pNode->parent;
+ }
+ }
+ }
- while (pNode) {
- int32_t c = pTree->cmprFn(pKey, pNode->payload);
+ return pNode;
+}
+void tRBTreeCreate(SRBTree *pTree, tRBTreeCmprFn cmprFn) {
+ pTree->cmprFn = cmprFn;
+ pTree->n = 0;
+ pTree->NIL = &pTree->NILNODE;
+ pTree->NIL->color = BLACK;
+ pTree->NIL->parent = NULL;
+ pTree->NIL->left = NULL;
+ pTree->NIL->right = NULL;
+ pTree->root = pTree->NIL;
+ pTree->min = pTree->NIL;
+ pTree->max = pTree->NIL;
+}
+
+SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *z) {
+ SRBTreeNode *y = pTree->NIL; // variable for the parent of the added node
+ SRBTreeNode *temp = pTree->root;
+
+ while (temp != pTree->NIL) {
+ y = temp;
+
+ int32_t c = pTree->cmprFn(RBTREE_NODE_PAYLOAD(z), RBTREE_NODE_PAYLOAD(temp));
if (c < 0) {
- pNode = pNode->left;
+ temp = temp->left;
} else if (c > 0) {
- pNode = pNode->right;
+ temp = temp->right;
} else {
- break;
+ return NULL;
}
}
+ z->parent = y;
+
+ if (y == pTree->NIL) {
+ pTree->root = z;
+ } else if (pTree->cmprFn(RBTREE_NODE_PAYLOAD(z), RBTREE_NODE_PAYLOAD(y)) < 0) {
+ y->left = z;
+ } else {
+ y->right = z;
+ }
+
+ z->color = RED;
+ z->left = pTree->NIL;
+ z->right = pTree->NIL;
+
+ tRBTreePutFix(pTree, z);
+
+ // update min/max node
+ if (pTree->min == pTree->NIL || pTree->cmprFn(RBTREE_NODE_PAYLOAD(pTree->min), RBTREE_NODE_PAYLOAD(z)) > 0) {
+ pTree->min = z;
+ }
+ if (pTree->max == pTree->NIL || pTree->cmprFn(RBTREE_NODE_PAYLOAD(pTree->max), RBTREE_NODE_PAYLOAD(z)) < 0) {
+ pTree->max = z;
+ }
+ pTree->n++;
+ return z;
+}
+
+void tRBTreeDrop(SRBTree *pTree, SRBTreeNode *z) {
+ SRBTreeNode *y = z;
+ SRBTreeNode *x;
+ ECOLOR y_orignal_color = y->color;
+
+ // update min/max node
+ if (pTree->min == z) {
+ pTree->min = tRBTreeSuccessor(pTree, pTree->min);
+ }
+ if (pTree->max == z) {
+ pTree->max = tRBTreePredecessor(pTree, pTree->max);
+ }
+
+ // drop impl
+ if (z->left == pTree->NIL) {
+ x = z->right;
+ tRBTreeTransplant(pTree, z, z->right);
+ } else if (z->right == pTree->NIL) {
+ x = z->left;
+ tRBTreeTransplant(pTree, z, z->left);
+ } else {
+ y = tRBTreeSuccessor(pTree, z);
+ y_orignal_color = y->color;
+ x = y->right;
+ if (y->parent == z) {
+ x->parent = z;
+ } else {
+ tRBTreeTransplant(pTree, y, y->right);
+ y->right = z->right;
+ y->right->parent = y;
+ }
+ tRBTreeTransplant(pTree, z, y);
+ y->left = z->left;
+ y->left->parent = y;
+ y->color = z->color;
+ }
+
+ // fix
+ if (y_orignal_color == BLACK) {
+ tRBTreeDropFix(pTree, x);
+ }
+ pTree->n--;
+}
+
+SRBTreeNode *tRBTreeDropByKey(SRBTree *pTree, void *pKey) {
+ SRBTreeNode *pNode = tRBTreeGet(pTree, pKey);
if (pNode) {
- // TODO
+ tRBTreeDrop(pTree, pNode);
}
return pNode;
@@ -194,8 +312,8 @@ SRBTreeNode *tRBTreeDrop(SRBTree *pTree, void *pKey) {
SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey) {
SRBTreeNode *pNode = pTree->root;
- while (pNode) {
- int32_t c = pTree->cmprFn(pKey, pNode->payload);
+ while (pNode != pTree->NIL) {
+ int32_t c = pTree->cmprFn(pKey, RBTREE_NODE_PAYLOAD(pNode));
if (c < 0) {
pNode = pNode->left;
@@ -206,5 +324,23 @@ SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey) {
}
}
- return pNode;
+ return (pNode == pTree->NIL) ? NULL : pNode;
}
+
+// SRBTreeIter ================================================
+SRBTreeNode *tRBTreeIterNext(SRBTreeIter *pIter) {
+ SRBTreeNode *pNode = pIter->pNode;
+
+ if (pIter->pNode != pIter->pTree->NIL) {
+ if (pIter->asc) {
+ // ascend
+ pIter->pNode = tRBTreeSuccessor(pIter->pTree, pIter->pNode);
+ } else {
+ // descend
+ pIter->pNode = tRBTreePredecessor(pIter->pTree, pIter->pNode);
+ }
+ }
+
+_exit:
+ return (pNode == pIter->pTree->NIL) ? NULL : pNode;
+}
\ No newline at end of file
diff --git a/source/util/src/tuuid.c b/source/util/src/tuuid.c
index 9101aec949873eb976517581c6790e1bc2dac3b9..7460ccbc829e7c630d217a39f29a2eafa934cb53 100644
--- a/source/util/src/tuuid.c
+++ b/source/util/src/tuuid.c
@@ -51,11 +51,11 @@ int64_t tGenIdPI64(void) {
int64_t id;
while (true) {
- int64_t ts = taosGetTimestampMs();
+ int64_t ts = taosGetTimestampMs() >> 8;
uint64_t pid = taosGetPId();
int32_t val = atomic_add_fetch_32(&tUUIDSerialNo, 1);
- id = ((tUUIDHashId & 0x07FF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
+ id = ((tUUIDHashId & 0x07FF) << 52) | ((pid & 0x0F) << 48) | ((ts & 0x3FFFFFF) << 20) | (val & 0xFFFFF);
if (id) {
break;
}
diff --git a/source/util/src/version.c.in b/source/util/src/version.c.in
index be1a4a404875739cdef349a901e52e195c2a9cde..cb307b57fce37ba4243aea83995e66612f3c4371 100644
--- a/source/util/src/version.c.in
+++ b/source/util/src/version.c.in
@@ -1,4 +1,4 @@
-char version[12] = "${TD_VER_NUMBER}";
+char version[64] = "${TD_VER_NUMBER}";
char compatible_version[12] = "${TD_VER_COMPATIBLE}";
char gitinfo[48] = "${TD_VER_GIT}";
char buildinfo[64] = "Built at ${TD_VER_DATE}";
diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt
index d2a503e6613909c39af8cfb961dd5b51f36120c3..6e42ef7e75eac38a5a072bfef8521152fd74ae06 100644
--- a/source/util/test/CMakeLists.txt
+++ b/source/util/test/CMakeLists.txt
@@ -75,4 +75,12 @@ target_link_libraries(taosbsearchTest os util gtest_main)
add_test(
NAME taosbsearchTest
COMMAND taosbsearchTest
+)
+
+# trbtreeTest
+add_executable(rbtreeTest "trbtreeTest.cpp")
+target_link_libraries(rbtreeTest os util gtest_main)
+add_test(
+ NAME rbtreeTest
+ COMMAND rbtreeTest
)
\ No newline at end of file
diff --git a/source/util/test/hashTest.cpp b/source/util/test/hashTest.cpp
index 99f5a761c5d0d3a489176749883da981c847011d..97e67ea36e7120b5e09f1097b5fb979b6fc12224 100644
--- a/source/util/test/hashTest.cpp
+++ b/source/util/test/hashTest.cpp
@@ -197,6 +197,201 @@ void acquireRleaseTest() {
taosMemoryFreeClear(data.p);
}
+void perfTest() {
+ SHashObj* hash1h = (SHashObj*) taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash1s = (SHashObj*) taosHashInit(1000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash10s = (SHashObj*) taosHashInit(10000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash100s = (SHashObj*) taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash1m = (SHashObj*) taosHashInit(1000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash10m = (SHashObj*) taosHashInit(10000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ SHashObj* hash100m = (SHashObj*) taosHashInit(100000000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+
+ char *name = (char*)taosMemoryCalloc(50000000, 9);
+ for (int64_t i = 0; i < 50000000; ++i) {
+ sprintf(name + i * 9, "t%08d", i);
+ }
+
+ for (int64_t i = 0; i < 50; ++i) {
+ taosHashPut(hash1h, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 500; ++i) {
+ taosHashPut(hash1s, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 5000; ++i) {
+ taosHashPut(hash10s, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 50000; ++i) {
+ taosHashPut(hash100s, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 500000; ++i) {
+ taosHashPut(hash1m, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 5000000; ++i) {
+ taosHashPut(hash10m, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ for (int64_t i = 0; i < 50000000; ++i) {
+ taosHashPut(hash100m, name + i * 9, 9, &i, sizeof(i));
+ }
+
+ int64_t start1h = taosGetTimestampMs();
+ int64_t start1hCt = taosHashGetCompTimes(hash1h);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash1h, name + (i % 50) * 9, 9));
+ }
+ int64_t end1h = taosGetTimestampMs();
+ int64_t end1hCt = taosHashGetCompTimes(hash1h);
+
+ int64_t start1s = taosGetTimestampMs();
+ int64_t start1sCt = taosHashGetCompTimes(hash1s);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash1s, name + (i % 500) * 9, 9));
+ }
+ int64_t end1s = taosGetTimestampMs();
+ int64_t end1sCt = taosHashGetCompTimes(hash1s);
+
+ int64_t start10s = taosGetTimestampMs();
+ int64_t start10sCt = taosHashGetCompTimes(hash10s);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash10s, name + (i % 5000) * 9, 9));
+ }
+ int64_t end10s = taosGetTimestampMs();
+ int64_t end10sCt = taosHashGetCompTimes(hash10s);
+
+ int64_t start100s = taosGetTimestampMs();
+ int64_t start100sCt = taosHashGetCompTimes(hash100s);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash100s, name + (i % 50000) * 9, 9));
+ }
+ int64_t end100s = taosGetTimestampMs();
+ int64_t end100sCt = taosHashGetCompTimes(hash100s);
+
+ int64_t start1m = taosGetTimestampMs();
+ int64_t start1mCt = taosHashGetCompTimes(hash1m);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash1m, name + (i % 500000) * 9, 9));
+ }
+ int64_t end1m = taosGetTimestampMs();
+ int64_t end1mCt = taosHashGetCompTimes(hash1m);
+
+ int64_t start10m = taosGetTimestampMs();
+ int64_t start10mCt = taosHashGetCompTimes(hash10m);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash10m, name + (i % 5000000) * 9, 9));
+ }
+ int64_t end10m = taosGetTimestampMs();
+ int64_t end10mCt = taosHashGetCompTimes(hash10m);
+
+ int64_t start100m = taosGetTimestampMs();
+ int64_t start100mCt = taosHashGetCompTimes(hash100m);
+ for (int64_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(hash100m, name + (i % 50000000) * 9, 9));
+ }
+ int64_t end100m = taosGetTimestampMs();
+ int64_t end100mCt = taosHashGetCompTimes(hash100m);
+
+
+ SArray *sArray[1000] = {0};
+ for (int64_t i = 0; i < 1000; ++i) {
+ sArray[i] = taosArrayInit(100000, 9);
+ }
+ int64_t cap = 4;
+ while (cap < 100000000) cap = (cap << 1u);
+
+ _hash_fn_t hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ int32_t slotR = cap / 1000 + 1;
+ for (int64_t i = 0; i < 10000000; ++i) {
+ char* p = name + (i % 50000000) * 9;
+ uint32_t v = (*hashFp)(p, 9);
+ taosArrayPush(sArray[(v%cap)/slotR], p);
+ }
+ SArray *slArray = taosArrayInit(100000000, 9);
+ for (int64_t i = 0; i < 1000; ++i) {
+ int32_t num = taosArrayGetSize(sArray[i]);
+ SArray* pArray = sArray[i];
+ for (int64_t m = 0; m < num; ++m) {
+ char* p = (char*)taosArrayGet(pArray, m);
+ ASSERT(taosArrayPush(slArray, p));
+ }
+ }
+ int64_t start100mS = taosGetTimestampMs();
+ int64_t start100mSCt = taosHashGetCompTimes(hash100m);
+ int32_t num = taosArrayGetSize(slArray);
+ for (int64_t i = 0; i < num; ++i) {
+ ASSERT(taosHashGet(hash100m, (char*)TARRAY_GET_ELEM(slArray, i), 9));
+ }
+ int64_t end100mS = taosGetTimestampMs();
+ int64_t end100mSCt = taosHashGetCompTimes(hash100m);
+ for (int64_t i = 0; i < 1000; ++i) {
+ taosArrayDestroy(sArray[i]);
+ }
+ taosArrayDestroy(slArray);
+
+ printf("1h \t %" PRId64 "ms,%" PRId64 "\n", end1h - start1h, end1hCt - start1hCt);
+ printf("1s \t %" PRId64 "ms,%" PRId64 "\n", end1s - start1s, end1sCt - start1sCt);
+ printf("10s \t %" PRId64 "ms,%" PRId64 "\n", end10s - start10s, end10sCt - start10sCt);
+ printf("100s \t %" PRId64 "ms,%" PRId64 "\n", end100s - start100s, end100sCt - start100sCt);
+ printf("1m \t %" PRId64 "ms,%" PRId64 "\n", end1m - start1m, end1mCt - start1mCt);
+ printf("10m \t %" PRId64 "ms,%" PRId64 "\n", end10m - start10m, end10mCt - start10mCt);
+ printf("100m \t %" PRId64 "ms,%" PRId64 "\n", end100m - start100m, end100mCt - start100mCt);
+ printf("100mS \t %" PRId64 "ms,%" PRId64 "\n", end100mS - start100mS, end100mSCt - start100mSCt);
+
+ taosHashCleanup(hash1h);
+ taosHashCleanup(hash1s);
+ taosHashCleanup(hash10s);
+ taosHashCleanup(hash100s);
+ taosHashCleanup(hash1m);
+ taosHashCleanup(hash10m);
+ taosHashCleanup(hash100m);
+
+ SHashObj *mhash[1000] = {0};
+ for (int64_t i = 0; i < 1000; ++i) {
+ mhash[i] = (SHashObj*) taosHashInit(100000, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ }
+
+ for (int64_t i = 0; i < 50000000; ++i) {
+#if 0
+ taosHashPut(mhash[i%1000], name + i * 9, 9, &i, sizeof(i));
+#else
+ taosHashPut(mhash[i/50000], name + i * 9, 9, &i, sizeof(i));
+#endif
+ }
+
+ int64_t startMhashCt = 0;
+ for (int64_t i = 0; i < 1000; ++i) {
+ startMhashCt += taosHashGetCompTimes(mhash[i]);
+ }
+
+ int64_t startMhash = taosGetTimestampMs();
+#if 0
+ for (int32_t i = 0; i < 10000000; ++i) {
+ ASSERT(taosHashGet(mhash[i%1000], name + i * 9, 9));
+ }
+#else
+// for (int64_t i = 0; i < 10000000; ++i) {
+ for (int64_t i = 0; i < 50000000; i+=5) {
+ ASSERT(taosHashGet(mhash[i/50000], name + i * 9, 9));
+ }
+#endif
+ int64_t endMhash = taosGetTimestampMs();
+ int64_t endMhashCt = 0;
+ for (int64_t i = 0; i < 1000; ++i) {
+ printf(" %" PRId64 , taosHashGetCompTimes(mhash[i]));
+ endMhashCt += taosHashGetCompTimes(mhash[i]);
+ }
+ printf("\n100m \t %" PRId64 "ms,%" PRId64 "\n", endMhash - startMhash, endMhashCt - startMhashCt);
+
+ for (int64_t i = 0; i < 1000; ++i) {
+ taosHashCleanup(mhash[i]);
+ }
+}
+
+
}
int main(int argc, char** argv) {
@@ -210,4 +405,5 @@ TEST(testCase, hashTest) {
noLockPerformanceTest();
multithreadsTest();
acquireRleaseTest();
+ //perfTest();
}
diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp
index eaf198a483aa5e3e90595d2417516aa53f754331..534c17758714820e9a9f2bf6b81d23ac121fcaf4 100644
--- a/source/util/test/pageBufferTest.cpp
+++ b/source/util/test/pageBufferTest.cpp
@@ -18,37 +18,37 @@ void simpleTest() {
int32_t pageId = 0;
int32_t groupId = 0;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
ASSERT_EQ(getTotalBufSize(pBuf), 1024);
- SIDList list = getDataBufPagesIdList(pBuf, groupId);
+ SIDList list = getDataBufPagesIdList(pBuf);
ASSERT_EQ(taosArrayGetSize(list), 1);
- ASSERT_EQ(getNumOfBufGroupId(pBuf), 1);
+ //ASSERT_EQ(getNumOfBufGroupId(pBuf), 1);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t == pBufPage1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage4);
releaseBufPage(pBuf, pBufPage2);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage5);
@@ -64,7 +64,7 @@ void writeDownTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
*(int32_t*)(pBufPage->data) = nx;
@@ -73,22 +73,22 @@ void writeDownTest() {
setBufPageDirty(pBufPage, true);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
@@ -98,7 +98,7 @@ void writeDownTest() {
SFilePage* pBufPagex = static_cast(getBufPage(pBuf, writePageId));
ASSERT_EQ(*(int32_t*)pBufPagex->data, nx);
- SArray* pa = getDataBufPagesIdList(pBuf, groupId);
+ SArray* pa = getDataBufPagesIdList(pBuf);
ASSERT_EQ(taosArrayGetSize(pa), 5);
destroyDiskbasedBuf(pBuf);
@@ -113,32 +113,32 @@ void recyclePageTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
releaseBufPage(pBuf, t4);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t5 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t5 == pBufPage5);
ASSERT_TRUE(pageId == 5);
@@ -152,7 +152,7 @@ void recyclePageTest() {
SFilePage* pBufPagex1 = static_cast(getBufPage(pBuf, 1));
- SArray* pa = getDataBufPagesIdList(pBuf, groupId);
+ SArray* pa = getDataBufPagesIdList(pBuf);
ASSERT_EQ(taosArrayGetSize(pa), 6);
destroyDiskbasedBuf(pBuf);
diff --git a/source/util/test/trbtreeTest.cpp b/source/util/test/trbtreeTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cabf315df0a2d32c6f2adcf1ac81a3e1565e6e0c
--- /dev/null
+++ b/source/util/test/trbtreeTest.cpp
@@ -0,0 +1,40 @@
+#include
+
+#include
+#include
+
+#include "trbtree.h"
+
+static int32_t tCmprInteger(const void *p1, const void *p2) {
+ if (*(int *)p1 < *(int *)p2) {
+ return -1;
+ } else if (*(int *)p1 > *(int *)p2) {
+ return 1;
+ }
+ return 0;
+}
+
+TEST(trbtreeTest, rbtree_test1) {
+#if 0
+ SRBTree rt;
+ tRBTreeCreate(&rt, tCmprInteger);
+ int a[] = {1, 3, 4, 2, 7, 5, 8};
+
+ for (int i = 0; i < sizeof(a) / sizeof(a[0]); i++) {
+ SRBTreeNode *pNode = (SRBTreeNode *)taosMemoryMalloc(sizeof(*pNode) + sizeof(int));
+ *(int *)pNode->payload = a[i];
+
+ tRBTreePut(&rt, pNode);
+ }
+
+ SRBTreeIter rti = tRBTreeIterCreate(&rt, 1);
+ SRBTreeNode *pNode = tRBTreeIterNext(&rti);
+ int la = 0;
+ while (pNode) {
+ GTEST_ASSERT_GT(*(int *)pNode->payload, la);
+ la = *(int *)pNode->payload;
+ // printf("%d\n", la);
+ pNode = tRBTreeIterNext(&rti);
+ }
+#endif
+}
\ No newline at end of file
diff --git a/tests/docs-examples-test/jdbc.sh b/tests/docs-examples-test/jdbc.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d71085a40306956ea8d25e9b575c97ae9945df76
--- /dev/null
+++ b/tests/docs-examples-test/jdbc.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+pgrep taosd || taosd >> /dev/null 2>&1 &
+pgrep taosadapter || taosadapter >> /dev/null 2>&1 &
+cd ../../docs/examples/java
+
+mvn clean test > jdbc-out.log 2>&1
+tail -n 20 jdbc-out.log
+
+cases=`grep 'Tests run' jdbc-out.log | awk 'END{print $3}'`
+totalJDBCCases=`echo ${cases/%,}`
+failed=`grep 'Tests run' jdbc-out.log | awk 'END{print $5}'`
+JDBCFailed=`echo ${failed/%,}`
+error=`grep 'Tests run' jdbc-out.log | awk 'END{print $7}'`
+JDBCError=`echo ${error/%,}`
+
+totalJDBCFailed=`expr $JDBCFailed + $JDBCError`
+totalJDBCSuccess=`expr $totalJDBCCases - $totalJDBCFailed`
+
+if [ "$totalJDBCSuccess" -gt "0" ]; then
+ echo -e "\n${GREEN} ### Total $totalJDBCSuccess JDBC case(s) succeed! ### ${NC}"
+fi
+
+if [ "$totalJDBCFailed" -ne "0" ]; then
+ echo -e "\n${RED} ### Total $totalJDBCFailed JDBC case(s) failed! ### ${NC}"
+ exit 8
+fi
\ No newline at end of file
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
index a8117ec04c79aff5c00dcfa604c1124854473d30..8a85ce10ed53946abe4f8ecd4a022752e07f94c1 100644
--- a/tests/pytest/tools/taosdumpTest2.py
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -11,15 +11,19 @@
# -*- coding: utf-8 -*-
+from logging.config import dictConfig
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
+import string
+import random
class TDTestCase:
+
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
@@ -47,12 +51,19 @@ class TDTestCase:
return ""
return paths[0]
+ def generateString(self, length):
+ chars = string.ascii_uppercase + string.ascii_lowercase
+ v = ""
+ for i in range(length):
+ v += random.choice(chars)
+ return v
+
def run(self):
if not os.path.exists("./taosdumptest/tmp"):
os.makedirs("./taosdumptest/tmp")
else:
- os.system("rm -rf ./taosdumptest/tmp")
- os.makedirs("./taosdumptest/tmp")
+ print("directory exists")
+ os.system("rm -rf ./taosdumptest/tmp/*")
tdSql.prepare()
@@ -76,17 +87,19 @@ class TDTestCase:
tdLog.info("taosdump found in %s" % binPath)
os.system("rm ./taosdumptest/tmp/*.sql")
+ os.system("rm ./taosdumptest/tmp/*.avro*")
+ os.system("rm -rf ./taosdumptest/taosdump.*")
os.system(
- "%s --databases db -o ./taosdumptest/tmp -B 32766 -L 1048576" %
+ "%s --databases db -o ./taosdumptest/tmp " %
binPath)
tdSql.execute("drop database db")
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(2)
- os.system("%s -i ./taosdumptest/tmp" % binPath)
+ os.system("%s -i ./taosdumptest/tmp -y" % binPath)
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(3)
tdSql.checkData(2, 0, 'db')
@@ -105,23 +118,22 @@ class TDTestCase:
"create table stb(ts timestamp, c1 binary(16374), c2 binary(16374), c3 binary(16374)) tags(t1 nchar(256))")
tdSql.execute(
"insert into t1 using stb tags('t1') values(now, '%s', '%s', '%s')" %
- ("16374",
- "16374",
- "16374"))
+ (self.generateString(16374),
+ self.generateString(16374),
+ self.generateString(16374)))
-# sys.exit(0)
os.system("rm ./taosdumptest/tmp/*.sql")
os.system("rm ./taosdumptest/tmp/*.avro*")
+ os.system("rm -rf ./taosdumptest/tmp/taosdump.*")
os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath)
tdSql.execute("drop database test")
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(3)
os.system("%s -i ./taosdumptest/tmp -y" % binPath)
tdSql.execute("use test")
- tdSql.error("show vnodes '' ")
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'stb')
diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py
index 33ef92bf735a5211044ebd37c3c8300abd8843a8..9ffebcbdad5f0fa07e26f1bb4d249643ab7bbe42 100644
--- a/tests/pytest/util/common.py
+++ b/tests/pytest/util/common.py
@@ -199,22 +199,22 @@ class TDCom:
res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0])
return res
- def cleanTb(self, type="taosc"):
+ def cleanTb(self, type="taosc", dbname="db"):
'''
type is taosc or restful
'''
- query_sql = "show stables"
+ query_sql = f"show {dbname}.stables"
res_row_list = tdSql.query(query_sql, True)
stb_list = map(lambda x: x[0], res_row_list)
for stb in stb_list:
if type == "taosc":
- tdSql.execute(f'drop table if exists `{stb}`')
+ tdSql.execute(f'drop table if exists {dbname}.`{stb}`')
if not stb[0].isdigit():
- tdSql.execute(f'drop table if exists {stb}')
+ tdSql.execute(f'drop table if exists {dbname}.{stb}')
elif type == "restful":
- self.restApiPost(f"drop table if exists `{stb}`")
+ self.restApiPost(f"drop table if exists {dbname}.`{stb}`")
if not stb[0].isdigit():
- self.restApiPost(f"drop table if exists {stb}")
+ self.restApiPost(f"drop table if exists {dbname}.{stb}")
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index e530695d1e53c4628fb28175b308b67d149c16a3..89b7fe00ebb0cf04b4570643966d553a4bccea9b 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -36,9 +36,9 @@ class TDSimClient:
"rpcDebugFlag": "143",
"tmrDebugFlag": "131",
"cDebugFlag": "143",
- "udebugFlag": "143",
- "jnidebugFlag": "143",
- "qdebugFlag": "143",
+ "uDebugFlag": "143",
+ "jniDebugFlag": "143",
+ "qDebugFlag": "143",
"supportVnodes": "1024",
"telemetryReporting": "0",
}
@@ -134,7 +134,6 @@ class TDDnode:
"uDebugFlag": "131",
"sDebugFlag": "143",
"wDebugFlag": "143",
- "qdebugFlag": "143",
"numOfLogLines": "100000000",
"statusInterval": "1",
"supportVnodes": "1024",
@@ -484,7 +483,7 @@ class TDDnode:
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(
psCmd, shell=True).decode("utf-8")
-
+
onlyKillOnceWindows = 0
while(processID):
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 753c41e094701271ca3b49a53eabde1461bd1e08..b320cf5995fd0063352f0da7a2dc04933022a7d2 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -102,7 +102,7 @@ class TDSql:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
- raise Exception(repr(e))
+ raise Exception(repr(e))
i+=1
time.sleep(1)
pass
@@ -225,25 +225,21 @@ class TDSql:
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data):
- tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
else:
if self.queryResult[row][col] == _parse_datetime(data):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
return
if str(self.queryResult[row][col]) == str(data):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
return
+
elif isinstance(data, float):
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
@@ -254,21 +250,7 @@ class TDSql:
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
- if data is None:
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, str):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, datetime.date):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, float):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- else:
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
def getData(self, row, col):
self.checkRowCol(row, col)
@@ -307,7 +289,7 @@ class TDSql:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
- raise Exception(repr(e))
+ raise Exception(repr(e))
i+=1
time.sleep(1)
pass
@@ -329,7 +311,7 @@ class TDSql:
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
def __check_equal(self, elm, expect_elm):
- if not type(elm) in(list, tuple) and elm == expect_elm:
+ if elm == expect_elm:
return True
if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
if len(elm) != len(expect_elm):
diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c
index ada2039460b431363555025ec7984f6b2f1b354a..f39d5e6528275900350ffaefbee18d43ce9a9e81 100644
--- a/tests/script/api/batchprepare.c
+++ b/tests/script/api/batchprepare.c
@@ -2598,7 +2598,6 @@ void runAll(TAOS *taos) {
printf("%s Begin\n", gCaseCtrl.caseCatalog);
runCaseList(taos);
-#if 0
strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.precision = TIME_PRECISION_MICRO;
@@ -2654,7 +2653,6 @@ void runAll(TAOS *taos) {
gCaseCtrl.bindColNum = 6;
runCaseList(taos);
gCaseCtrl.bindColNum = 0;
-#endif
/*
strcpy(gCaseCtrl.caseCatalog, "Bind Col Type Test");
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 97295d75e072c3c561f579bfc8cb2c15489da858..161c87844058ca6852c6649141e37f6cdf6a202f 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -221,6 +221,7 @@
./test.sh -f tsim/table/describe.sim
./test.sh -f tsim/table/double.sim
./test.sh -f tsim/table/float.sim
+./test.sh -f tsim/table/hash.sim
./test.sh -f tsim/table/int.sim
./test.sh -f tsim/table/limit.sim
./test.sh -f tsim/table/smallint.sim
@@ -248,6 +249,12 @@
./test.sh -f tsim/stream/windowClose.sim
./test.sh -f tsim/stream/ignoreExpiredData.sim
./test.sh -f tsim/stream/sliding.sim
+#./test.sh -f tsim/stream/partitionbyColumnInterval.sim
+#./test.sh -f tsim/stream/partitionbyColumnSession.sim
+#./test.sh -f tsim/stream/partitionbyColumnState.sim
+#./test.sh -f tsim/stream/deleteInterval.sim
+#./test.sh -f tsim/stream/deleteSession.sim
+#./test.sh -f tsim/stream/deleteState.sim
# ---- transaction ----
./test.sh -f tsim/trans/lossdata1.sim
@@ -344,6 +351,7 @@
# --- scalar ----
./test.sh -f tsim/scalar/in.sim
./test.sh -f tsim/scalar/scalar.sim
+./test.sh -f tsim/scalar/filter.sim
# ---- alter ----
./test.sh -f tsim/alter/cached_schema_after_alter.sim
diff --git a/tests/script/sh/abs_max.c b/tests/script/sh/abs_max.c
deleted file mode 100644
index d623adacf941e26d0de74c0c582beb7ca83c9c13..0000000000000000000000000000000000000000
--- a/tests/script/sh/abs_max.c
+++ /dev/null
@@ -1,88 +0,0 @@
-#include
-#include
-#include
-
-typedef struct SUdfInit{
- int maybe_null; /* 1 if function can return NULL */
- int decimals; /* for real functions */
- long long length; /* For string functions */
- char *ptr; /* free pointer for function data */
- int const_item; /* 0 if result is independent of arguments */
-} SUdfInit;
-
-
-#define TSDB_DATA_INT_NULL 0x80000000LL
-#define TSDB_DATA_BIGINT_NULL 0x8000000000000000LL
-
-void abs_max(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput,
- int* numOfOutput, short otype, short obytes, SUdfInit* buf) {
- int i;
- int r = 0;
- printf("abs_max input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
- if (itype == 5) {
- r=*(long *)dataOutput;
- *numOfOutput=0;
-
- for(i=0;i r) {
- r = v;
- }
- }
-
- *(long *)dataOutput=r;
-
- printf("abs_max out, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput);
- }
-}
-
-
-
-void abs_max_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) {
- int i;
- int r = 0;
- printf("abs_max_finalize dataoutput:%p:%d, numOfOutput:%d, buf:%p\n", dataOutput, *dataOutput, *numOfOutput, buf);
- *numOfOutput=1;
- printf("abs_max finalize, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput);
-}
-
-void abs_max_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) {
- int r = 0;
-
- if (numOfRows > 0) {
- r = *((long *)data);
- }
- printf("abs_max_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf);
- for (int i = 1; i < numOfRows; ++i) {
- printf("abs_max_merge %d - %ld\n", i, *((long *)data + i));
- if (*((long*)data + i) > r) {
- r= *((long*)data + i);
- }
- }
-
- *(long*)dataOutput=r;
- if (numOfRows > 0) {
- *numOfOutput=1;
- } else {
- *numOfOutput=0;
- }
-
- printf("abs_max_merge, dataoutput:%ld, numOfOutput:%d\n", *(long *)dataOutput, *numOfOutput);
-}
-
-
-int abs_max_init(SUdfInit* buf) {
- printf("abs_max init\n");
- return 0;
-}
-
-
-void abs_max_destroy(SUdfInit* buf) {
- printf("abs_max destroy\n");
-}
-
diff --git a/tests/script/sh/add_one.c b/tests/script/sh/add_one.c
deleted file mode 100644
index e12cf8f26f6ddad67f9f7b091c033de46a3f6f50..0000000000000000000000000000000000000000
--- a/tests/script/sh/add_one.c
+++ /dev/null
@@ -1,33 +0,0 @@
-#include
-#include
-#include
-
-typedef struct SUdfInit{
- int maybe_null; /* 1 if function can return NULL */
- int decimals; /* for real functions */
- long long length; /* For string functions */
- char *ptr; /* free pointer for function data */
- int const_item; /* 0 if result is independent of arguments */
-} SUdfInit;
-
-void add_one(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBUf, char* tsOutput,
- int* numOfOutput, short otype, short obytes, SUdfInit* buf) {
- int i;
- int r = 0;
- printf("add_one input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
- if (itype == 4) {
- for(i=0;i
-#include
-#include
-
-typedef struct SUdfInit{
- int maybe_null; /* 1 if function can return NULL */
- int decimals; /* for real functions */
- long long length; /* For string functions */
- char *ptr; /* free pointer for function data */
- int const_item; /* 0 if result is independent of arguments */
-} SUdfInit;
-
-typedef struct SDemo{
- double sum;
- int num;
- short otype;
-}SDemo;
-
-#define FLOAT_NULL 0x7FF00000 // it is an NAN
-#define DOUBLE_NULL 0x7FFFFF0000000000LL // it is an NAN
-
-
-void demo(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput,
- int* numOfOutput, short otype, short obytes, SUdfInit* buf) {
- int i;
- double r = 0;
- SDemo *p = (SDemo *)interBuf;
- SDemo *q = (SDemo *)dataOutput;
- printf("demo input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, interBUf:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, interBuf, tsOutput, numOfOutput, buf);
-
- for(i=0;isum += r*r;
- }
-
- p->otype = otype;
- p->num += numOfRows;
-
- q->sum = p->sum;
- q->num = p->num;
- q->otype = p->otype;
-
- *numOfOutput=1;
-
- printf("demo out, sum:%f, num:%d, numOfOutput:%d\n", p->sum, p->num, *numOfOutput);
-}
-
-
-void demo_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) {
- int i;
- SDemo *p = (SDemo *)data;
- SDemo res = {0};
- printf("demo_merge input data:%p, rows:%d, dataoutput:%p, numOfOutput:%p, buf:%p\n", data, numOfRows, dataOutput, numOfOutput, buf);
-
- for(i=0;isum * p->sum;
- res.num += p->num;
- p++;
- }
-
- p->sum = res.sum;
- p->num = res.num;
-
- *numOfOutput=1;
-
- printf("demo out, sum:%f, num:%d, numOfOutput:%d\n", p->sum, p->num, *numOfOutput);
-}
-
-
-
-void demo_finalize(char* dataOutput, char* interBuf, int* numOfOutput, SUdfInit* buf) {
- SDemo *p = (SDemo *)interBuf;
- printf("demo_finalize interbuf:%p, numOfOutput:%p, buf:%p, sum:%f, num:%d\n", interBuf, numOfOutput, buf, p->sum, p->num);
- if (p->otype == 6) {
- if (p->num != 30000) {
- *(unsigned int *)dataOutput = FLOAT_NULL;
- } else {
- *(float *)dataOutput = (float)(p->sum / p->num);
- }
- printf("finalize values:%f\n", *(float *)dataOutput);
- } else if (p->otype == 7) {
- if (p->num != 30000) {
- *(unsigned long long *)dataOutput = DOUBLE_NULL;
- } else {
- *(double *)dataOutput = (double)(p->sum / p->num);
- }
- printf("finalize values:%f\n", *(double *)dataOutput);
- }
-
- *numOfOutput=1;
-
- printf("demo finalize, numOfOutput:%d\n", *numOfOutput);
-}
-
-
-int demo_init(SUdfInit* buf) {
- printf("demo init\n");
- return 0;
-}
-
-
-void demo_destroy(SUdfInit* buf) {
- printf("demo destroy\n");
-}
-
diff --git a/tests/script/sh/demo.lua b/tests/script/sh/demo.lua
deleted file mode 100644
index c5e5582fc30b58db30a5b18faa4ccfd0a5f656d0..0000000000000000000000000000000000000000
--- a/tests/script/sh/demo.lua
+++ /dev/null
@@ -1,43 +0,0 @@
-funcName = "test"
-
-global = {}
-
-function test_init()
- return global
-end
-
-function test_add(rows, ans, key)
- t = {}
- t["sum"] = 0.0
- t["num"] = 0
- for i=1, #rows do
- t["sum"] = t["sum"] + rows[i] * rows[i]
- end
- t["num"] = #rows
-
-
- if (ans[key] ~= nil)
- then
- ans[key]["sum"] = ans[key]["sum"] + t["sum"]
- ans[key]["num"] = ans[key]["num"] + t["num"]
- else
- ans[key] = t
- end
-
- return ans;
-end
-
-function test_finalize(ans, key)
- local ret = 0.0
-
- if (ans[key] ~= nil and ans[key]["num"] == 30000)
- then
- ret = ans[key]["sum"]/ans[key]["num"]
- ans[key]["sum"] = 0.0
- ans[key]["num"] = 0
- else
- ret = inf
- end
-
- return ret, ans
-end
diff --git a/tests/script/sh/sum_double.c b/tests/script/sh/sum_double.c
deleted file mode 100644
index d6eea5d291c4add6bd40ae36492a1366c67f2cfd..0000000000000000000000000000000000000000
--- a/tests/script/sh/sum_double.c
+++ /dev/null
@@ -1,84 +0,0 @@
-#include
-#include
-#include
-
-typedef struct SUdfInit{
- int maybe_null; /* 1 if function can return NULL */
- int decimals; /* for real functions */
- long long length; /* For string functions */
- char *ptr; /* free pointer for function data */
- int const_item; /* 0 if result is independent of arguments */
-} SUdfInit;
-
-#define TSDB_DATA_INT_NULL 0x80000000LL
-
-
-void sum_double(char* data, short itype, short ibytes, int numOfRows, long long* ts, char* dataOutput, char* interBuf, char* tsOutput,
- int* numOfOutput, short otype, short obytes, SUdfInit* buf) {
- int i;
- int r = 0;
- printf("sum_double input data:%p, type:%d, rows:%d, ts:%p,%lld, dataoutput:%p, tsOutput:%p, numOfOutput:%p, buf:%p\n", data, itype, numOfRows, ts, *ts, dataOutput, tsOutput, numOfOutput, buf);
- if (itype == 4) {
- r=*(int *)dataOutput;
- *numOfOutput=0;
-
- for(i=0;iptr)=*(int*)dataOutput*2;
- *(int*)dataOutput=*(int*)(buf->ptr);
- printf("sum_double finalize, dataoutput:%d, numOfOutput:%d\n", *(int *)dataOutput, *numOfOutput);
-}
-
-void sum_double_merge(char* data, int32_t numOfRows, char* dataOutput, int32_t* numOfOutput, SUdfInit* buf) {
- int r = 0;
- int sum = 0;
-
- printf("sum_double_merge numOfRows:%d, dataoutput:%p, buf:%p\n", numOfRows, dataOutput, buf);
- for (int i = 0; i < numOfRows; ++i) {
- printf("sum_double_merge %d - %d\n", i, *((int*)data + i));
- sum +=*((int*)data + i);
- }
-
- *(int*)dataOutput+=sum;
- if (numOfRows > 0) {
- *numOfOutput=1;
- } else {
- *numOfOutput=0;
- }
-
- printf("sum_double_merge, dataoutput:%d, numOfOutput:%d\n", *(int *)dataOutput, *numOfOutput);
-}
-
-
-int sum_double_init(SUdfInit* buf) {
- buf->maybe_null=1;
- buf->ptr = taosMemoryMalloc(sizeof(int));
- printf("sum_double init\n");
- return 0;
-}
-
-
-void sum_double_destroy(SUdfInit* buf) {
- taosMemoryFree(buf->ptr);
- printf("sum_double destroy\n");
-}
-
diff --git a/tests/script/tmp/monitor.sim b/tests/script/tmp/monitor.sim
index c0c1da567c4209a1bb7aae5203036a6974ac114a..b410e1b6ad99e8bd83dcf7dd3cf0f3c4961d0ad4 100644
--- a/tests/script/tmp/monitor.sim
+++ b/tests/script/tmp/monitor.sim
@@ -4,6 +4,7 @@ system sh/cfg.sh -n dnode1 -c monitorfqdn -v localhost
system sh/cfg.sh -n dnode1 -c monitorport -v 80
system sh/cfg.sh -n dnode1 -c monitorInterval -v 1
system sh/cfg.sh -n dnode1 -c monitorComp -v 1
+system sh/cfg.sh -n dnode1 -c uptimeInterval -v 3
#system sh/cfg.sh -n dnode1 -c supportVnodes -v 128
#system sh/cfg.sh -n dnode1 -c telemetryReporting -v 1
@@ -14,7 +15,7 @@ system sh/cfg.sh -n dnode1 -c monitorComp -v 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print =============== select * from information_schema.ins_dnodes
+print =============== create database
sql create database db vgroups 2;
sql use db;
sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 binary(16)) comment "abd";
diff --git a/tests/script/tsim/column/table.sim b/tests/script/tsim/column/table.sim
index 4f1d32c373e3712275deec14a54e7efa0e77de61..03c4799681015a63e1e343168037f6cd4d601b8b 100644
--- a/tests/script/tsim/column/table.sim
+++ b/tests/script/tsim/column/table.sim
@@ -159,6 +159,7 @@ if $data01 != 10 then
return -1
endi
if $data02 != 4.500000000 then
+ print expect 4.500000000, actual: $data02
return -1
endi
if $data03 != 4.500000000 then
diff --git a/tests/script/tsim/compute/interval.sim b/tests/script/tsim/compute/interval.sim
index 4e7960ac4ae958f2e594fbad3bb6f7b50b13ed94..dc11c20ec925be39d12d2a7d1e92bbcb1da830b1 100644
--- a/tests/script/tsim/compute/interval.sim
+++ b/tests/script/tsim/compute/interval.sim
@@ -101,7 +101,7 @@ $ms = 1601481600000 + $cc
$cc = 1 * 60000
$ms2 = 1601481600000 - $cc
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) fill(value,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m) fill(value,0,0,0,0,0)
print ===> $rows
if $rows < 30 then
print expect greater than 30, actual: $rows
@@ -180,7 +180,7 @@ $ms1 = 1601481600000 + $cc
$cc = 1 * 60000
$ms2 = 1601481600000 - $cc
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from $mt where ts <= $ms1 and ts > $ms2 interval(1m) fill(value, 0,0,0,0,0)
if $rows < 30 then
return -1
endi
diff --git a/tests/script/tsim/db/basic1.sim b/tests/script/tsim/db/basic1.sim
index 69eeb9347b22b154c7609b2ff89e36aa43f63a82..5d7d0bd9e9a682a761ffb33f486545f7028f5bfc 100644
--- a/tests/script/tsim/db/basic1.sim
+++ b/tests/script/tsim/db/basic1.sim
@@ -107,6 +107,37 @@ if $data30 != 12 then
return -1
endi
+print =============== show vnodes
+sql show vnodes 1
+if $rows != 9 then
+ return -1
+endi
+
+if $data(4)[1] != 1 then
+ return -1
+endi
+
+if $data(4)[2] != leader then
+ return -1
+endi
+
+if $data(4)[3] != d2 then
+ return -1
+endi
+
+if $data(4)[4] != 1 then
+ return -1
+endi
+
+if $data(4)[5] != localhost:7100 then
+ return -1
+endi
+
+sql show vnodes 'localhost:7100'
+if $rows != 9 then
+ return -1
+endi
+
print =============== drop database
sql drop database d2
sql drop database d3
diff --git a/tests/script/tsim/db/basic2.sim b/tests/script/tsim/db/basic2.sim
index b7ac0b5edd8663f653cc9216bceb1eee6054331e..4f0ba4a13c18f29a758a92318c2a66c133fd28f3 100644
--- a/tests/script/tsim/db/basic2.sim
+++ b/tests/script/tsim/db/basic2.sim
@@ -4,7 +4,7 @@ system sh/exec.sh -n dnode1 -s start
sql connect
print =============== conflict stb
-sql create database db vgroups 1;
+sql create database db vgroups 4;
sql use db;
sql create table stb (ts timestamp, i int) tags (j int);
sql_error create table stb using stb tags (1);
@@ -16,6 +16,9 @@ sql_error create table ctb (ts timestamp, i int) tags (j int);
sql create table ntb (ts timestamp, i int);
sql_error create table ntb (ts timestamp, i int) tags (j int);
+sql drop table ntb
+sql create table ntb (ts timestamp, i int) tags (j int);
+
sql drop database db
print =============== create database d1
diff --git a/tests/script/tsim/parser/alter1.sim b/tests/script/tsim/parser/alter1.sim
index 9d0049e45e5437d9d6de814b744d8fce3ccd876e..369419dcd9cd91688f39c27dbd54c33ee0699ae8 100644
--- a/tests/script/tsim/parser/alter1.sim
+++ b/tests/script/tsim/parser/alter1.sim
@@ -130,4 +130,4 @@ endi
# return -1
#endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/binary_escapeCharacter.sim b/tests/script/tsim/parser/binary_escapeCharacter.sim
index 0b437d8b04a39a400b25368263f88c2b846c155a..5a9c0e7bb1d2b141639a1408ffcc4ae064dd78f8 100644
--- a/tests/script/tsim/parser/binary_escapeCharacter.sim
+++ b/tests/script/tsim/parser/binary_escapeCharacter.sim
@@ -101,4 +101,4 @@ sql_error insert into tb values(now, '\');
#sql_error insert into tb values(now, '\\\n');
sql insert into tb values(now, '\n');
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/col_arithmetic_operation.sim b/tests/script/tsim/parser/col_arithmetic_operation.sim
index f22beefdf88c3d90bff8554cc44b5768bfef3d1e..9a2ba34c85e552585770bb42913b8c83ddd58131 100644
--- a/tests/script/tsim/parser/col_arithmetic_operation.sim
+++ b/tests/script/tsim/parser/col_arithmetic_operation.sim
@@ -132,4 +132,4 @@ sql_error select max(c1-c2) from $tb
print =====================> td-1764
sql select sum(c1)/count(*), sum(c1) as b, count(*) as b from $stb interval(1y)
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/columnValue_bigint.sim b/tests/script/tsim/parser/columnValue_bigint.sim
index 2cf0151a053f3c62b7ab904156361c8705dc554e..0a024029a534232aa5829d0cf59807f8c65d71f2 100644
--- a/tests/script/tsim/parser/columnValue_bigint.sim
+++ b/tests/script/tsim/parser/columnValue_bigint.sim
@@ -373,7 +373,7 @@ sql_error insert into st_bigint_e7 values (now, "123abc")
sql_error insert into st_bigint_e9 values (now, abc)
sql_error insert into st_bigint_e10 values (now, "abc")
sql_error insert into st_bigint_e11 values (now, " ")
-sql insert into st_bigint_e12 values (now, '')
+sql_error insert into st_bigint_e12 values (now, '')
sql_error insert into st_bigint_e13 using mt_bigint tags (033) values (now, 9223372036854775808)
sql insert into st_bigint_e14 using mt_bigint tags (033) values (now, -9223372036854775808)
@@ -386,7 +386,7 @@ sql_error insert into st_bigint_e20 using mt_bigint tags (033) values (now, "123
sql_error insert into st_bigint_e22 using mt_bigint tags (033) values (now, abc)
sql_error insert into st_bigint_e23 using mt_bigint tags (033) values (now, "abc")
sql_error insert into st_bigint_e24 using mt_bigint tags (033) values (now, " ")
-sql insert into st_bigint_e25 using mt_bigint tags (033) values (now, '')
+sql_error insert into st_bigint_e25 using mt_bigint tags (033) values (now, '')
sql_error insert into st_bigint_e13_0 using mt_bigint tags (9223372036854775808) values (now, -033)
sql insert into st_bigint_e14_0 using mt_bigint tags (-9223372036854775808) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_double.sim b/tests/script/tsim/parser/columnValue_double.sim
index da09b77353fc12f237e4fb94dc99b69bd8bec9c1..bfcf338faca3669b18b2e821f10b5e3b4b0f328e 100644
--- a/tests/script/tsim/parser/columnValue_double.sim
+++ b/tests/script/tsim/parser/columnValue_double.sim
@@ -476,7 +476,7 @@ sql_error insert into st_double_e7 values (now, "123abc")
sql_error insert into st_double_e9 values (now, abc)
sql_error insert into st_double_e10 values (now, "abc")
sql_error insert into st_double_e11 values (now, " ")
-sql insert into st_double_e12 values (now, '')
+sql_error insert into st_double_e12 values (now, '')
sql_error insert into st_double_e13 using mt_double tags (033) values (now, 11.7976931348623157e+308)
sql_error insert into st_double_e14 using mt_double tags (033) values (now, -11.7976931348623157e+308)
@@ -489,7 +489,7 @@ sql_error insert into st_double_e20 using mt_double tags (033) values (now, "123
sql_error insert into st_double_e22 using mt_double tags (033) values (now, abc)
sql_error insert into st_double_e23 using mt_double tags (033) values (now, "abc")
sql_error insert into st_double_e24 using mt_double tags (033) values (now, " ")
-sql insert into st_double_e25_1 using mt_double tags (033) values (now, '')
+sql_error insert into st_double_e25_1 using mt_double tags (033) values (now, '')
sql_error insert into st_double_e13 using mt_double tags (31.7976931348623157e+308) values (now, -033)
sql_error insert into st_double_e14 using mt_double tags (-31.7976931348623157e+308) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_float.sim b/tests/script/tsim/parser/columnValue_float.sim
index 3e20e178c373b9bf55b4be0e666e27e3d5787447..b2db7dff2b875d8839e1bdf356c40ba4d6d9e916 100644
--- a/tests/script/tsim/parser/columnValue_float.sim
+++ b/tests/script/tsim/parser/columnValue_float.sim
@@ -506,7 +506,7 @@ sql_error insert into st_float_e7 values (now, "123abc")
sql_error insert into st_float_e9 values (now, abc)
sql_error insert into st_float_e10 values (now, "abc")
sql_error insert into st_float_e11 values (now, " ")
-sql insert into st_float_e12 values (now, '')
+sql_error insert into st_float_e12 values (now, '')
sql_error insert into st_float_e13 using mt_float tags (033) values (now, 3.50282347e+38)
sql_error insert into st_float_e14 using mt_float tags (033) values (now, -3.50282347e+38)
@@ -519,7 +519,7 @@ sql_error insert into st_float_e20 using mt_float tags (033) values (now, "123ab
sql_error insert into st_float_e22 using mt_float tags (033) values (now, abc)
sql_error insert into st_float_e23 using mt_float tags (033) values (now, "abc")
sql_error insert into st_float_e24 using mt_float tags (033) values (now, " ")
-sql insert into st_float_e25_1 using mt_float tags (033) values (now, '')
+sql_error insert into st_float_e25_1 using mt_float tags (033) values (now, '')
sql_error insert into st_float_e13 using mt_float tags (3.50282347e+38) values (now, -033)
sql_error insert into st_float_e14 using mt_float tags (-3.50282347e+38) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_int.sim b/tests/script/tsim/parser/columnValue_int.sim
index 009fbd1ede718245b6145b9a292d9c325d2a872c..4a3b8ebd0ba46a08ee737de44cf07c1cb10aa555 100644
--- a/tests/script/tsim/parser/columnValue_int.sim
+++ b/tests/script/tsim/parser/columnValue_int.sim
@@ -371,7 +371,7 @@ sql_error insert into st_int_e7 values (now, "123abc")
sql_error insert into st_int_e9 values (now, abc)
sql_error insert into st_int_e10 values (now, "abc")
sql_error insert into st_int_e11 values (now, " ")
-sql insert into st_int_e12 values (now, '')
+sql_error insert into st_int_e12 values (now, '')
sql_error insert into st_int_e13 using mt_int tags (033) values (now, 2147483648)
sql insert into st_int_e14 using mt_int tags (033) values (now, -2147483648)
@@ -384,7 +384,7 @@ sql_error insert into st_int_e20 using mt_int tags (033) values (now, "123abc")
sql_error insert into st_int_e22 using mt_int tags (033) values (now, abc)
sql_error insert into st_int_e23 using mt_int tags (033) values (now, "abc")
sql_error insert into st_int_e24 using mt_int tags (033) values (now, " ")
-sql insert into st_int_e25 using mt_int tags (033) values (now, '')
+sql_error insert into st_int_e25 using mt_int tags (033) values (now, '')
sql_error insert into st_int_e13 using mt_int tags (2147483648) values (now, -033)
sql insert into st_int_e14_1 using mt_int tags (-2147483648) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_smallint.sim b/tests/script/tsim/parser/columnValue_smallint.sim
index 0dcb0d85f4f9dad62dc71aa7fd2df916c6fd7a63..eb364f36302df811549a968eaf5bf05a823b84b3 100644
--- a/tests/script/tsim/parser/columnValue_smallint.sim
+++ b/tests/script/tsim/parser/columnValue_smallint.sim
@@ -374,7 +374,7 @@ sql_error insert into st_smallint_e7 values (now, "123abc")
sql_error insert into st_smallint_e9 values (now, abc)
sql_error insert into st_smallint_e10 values (now, "abc")
sql_error insert into st_smallint_e11 values (now, " ")
-sql insert into st_smallint_e12 values (now, '')
+sql_error insert into st_smallint_e12 values (now, '')
sql_error insert into st_smallint_e13 using mt_smallint tags (033) values (now, 32768)
sql insert into st_smallint_e14_1 using mt_smallint tags (033) values (now, -32768)
@@ -387,7 +387,7 @@ sql_error insert into st_smallint_e20 using mt_smallint tags (033) values (now,
sql_error insert into st_smallint_e22 using mt_smallint tags (033) values (now, abc)
sql_error insert into st_smallint_e23 using mt_smallint tags (033) values (now, "abc")
sql_error insert into st_smallint_e24 using mt_smallint tags (033) values (now, " ")
-sql insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '')
+sql_error insert into st_smallint_e25_1 using mt_smallint tags (033) values (now, '')
sql_error insert into st_smallint_e13 using mt_smallint tags (32768) values (now, -033)
sql insert into st_smallint_e14 using mt_smallint tags (-32768) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_tinyint.sim b/tests/script/tsim/parser/columnValue_tinyint.sim
index 62ae4e5228f94e466dd0bc44e681b7b4b940fcdb..d7938aa739fb3584c8fd549005bd02e44ac39bc0 100644
--- a/tests/script/tsim/parser/columnValue_tinyint.sim
+++ b/tests/script/tsim/parser/columnValue_tinyint.sim
@@ -372,7 +372,7 @@ sql_error insert into st_tinyint_e7 values (now, "123abc")
sql_error insert into st_tinyint_e9 values (now, abc)
sql_error insert into st_tinyint_e10 values (now, "abc")
sql_error insert into st_tinyint_e11 values (now, " ")
-sql insert into st_tinyint_e12 values (now, '')
+sql_error insert into st_tinyint_e12 values (now, '')
sql_error insert into st_tinyint_e13 using mt_tinyint tags (033) values (now, 128)
sql insert into st_tinyint_e14_1 using mt_tinyint tags (033) values (now, -128)
@@ -385,7 +385,7 @@ sql_error insert into st_tinyint_e20 using mt_tinyint tags (033) values (now, "1
sql_error insert into st_tinyint_e22 using mt_tinyint tags (033) values (now, abc)
sql_error insert into st_tinyint_e23 using mt_tinyint tags (033) values (now, "abc")
sql_error insert into st_tinyint_e24 using mt_tinyint tags (033) values (now, " ")
-sql insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '')
+sql_error insert into st_tinyint_e25_2 using mt_tinyint tags (033) values (now, '')
sql_error insert into st_tinyint_e13 using mt_tinyint tags (128) values (now, -033)
sql insert into st_tinyint_e14 using mt_tinyint tags (-128) values (now, -033)
diff --git a/tests/script/tsim/parser/columnValue_unsign.sim b/tests/script/tsim/parser/columnValue_unsign.sim
index 85ff490bf4e520cdbbc0ed0008499af4425b2b93..7ae1b20eca18236c71277ae2c94a0976181a271a 100644
--- a/tests/script/tsim/parser/columnValue_unsign.sim
+++ b/tests/script/tsim/parser/columnValue_unsign.sim
@@ -129,4 +129,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/commit.sim b/tests/script/tsim/parser/commit.sim
index ae19a4803bc6bd0f75c2a307696d7b0fc6f1ecb6..a9bf8b26ebda178ed789f3119eac7203a6889f9b 100644
--- a/tests/script/tsim/parser/commit.sim
+++ b/tests/script/tsim/parser/commit.sim
@@ -97,6 +97,7 @@ while $loop <= $loops
endw
sql select count(*) from $stb
if $data00 != $totalNum then
+ print expect $totalNum , actual: $data00
return -1
endi
$loop = $loop + 1
diff --git a/tests/script/tsim/parser/fill.sim b/tests/script/tsim/parser/fill.sim
index f688d815e79fb76ce536fd75a1312230306dda41..4892345e12ed4b22a1c3d96ae2e6233e7e9fe642 100644
--- a/tests/script/tsim/parser/fill.sim
+++ b/tests/script/tsim/parser/fill.sim
@@ -47,31 +47,10 @@ $tsu = $tsu + $ts0
## fill syntax test
# number of fill values exceeds number of selected columns
-sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
-if $data14 != 6.000000000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
# number of fill values is smaller than number of selected columns
-sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
# unspecified filling method
sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
@@ -182,7 +161,7 @@ endi
# min_with_fill
print min_with_fill
-sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -216,7 +195,7 @@ endi
# first_with_fill
print first_with_fill
-sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -305,7 +284,7 @@ endi
# last_with_fill
print last_with_fill
-sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -351,7 +330,7 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -376,37 +355,25 @@ endi
# fill_into_nonarithmetic_fieds
print select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-if $data01 != 1 then
- return -1
-endi
-if $data11 != NULL then
- return -1
-endi
+sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to NULL automatically Note:2018-10-24
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
print select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
@@ -416,13 +383,7 @@ if $data01 != 1 then
return -1
endi
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
## linear fill
# feature currently switched off 2018/09/29
@@ -859,8 +820,8 @@ sql insert into tm0 values('2020-1-1 1:3:8', 8);
sql insert into tm0 values('2020-1-1 1:3:9', 9);
sql insert into tm0 values('2020-1-1 1:4:10', 10);
-print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85);
+print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90);
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90);
if $rows != 8 then
return -1
endi
@@ -958,14 +919,14 @@ if $data12 != NULL then
return -1
endi
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90,89,88,87,86,85) ;
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k) from tm0 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 4:2:15' interval(500a) fill(value, 99,91,90) ;
if $rows != 21749 then
print expect 21749, actual: $rows
return -1
endi
-print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ;
-sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89,88,87,86,85) ;
+print select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89) ;
+sql select _wstart, max(k)-min(k),last(k)-first(k),0-spread(k),count(1) from m1 where ts>='2020-1-1 1:1:1' and ts<='2020-1-1 1:2:15' interval(10s) fill(value, 99,91,90,89) ;
if $rows != 8 then
return -1
endi
diff --git a/tests/script/tsim/parser/fill_stb.sim b/tests/script/tsim/parser/fill_stb.sim
index 656b1ac94e8e0954e98b1d10692afc5d696bfd64..6c61631aa8b3a682b75317943ddeb3642720f588 100644
--- a/tests/script/tsim/parser/fill_stb.sim
+++ b/tests/script/tsim/parser/fill_stb.sim
@@ -279,7 +279,7 @@ endi
#endi
## linear fill
-sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(linear)
+sql select _wstart, max(c1), min(c2), avg(c3), sum(c4), first(c7), last(c8), first(c9) from $stb where ts >= $ts0 and ts <= $tsu partition by t1 interval(5m) fill(linear)
$val = $rowNum * 2
$val = $val - 1
$val = $val * $tbNum
diff --git a/tests/script/tsim/parser/fill_us.sim b/tests/script/tsim/parser/fill_us.sim
index 0a45c02f58a039baa22d5c71fff04d8e56a6fed6..f760ba3577281fa358e0da9180624b7de2e69b76 100644
--- a/tests/script/tsim/parser/fill_us.sim
+++ b/tests/script/tsim/parser/fill_us.sim
@@ -48,32 +48,11 @@ $tsu = $tsu + $ts0
## fill syntax test
# number of fill values exceeds number of selected columns
print select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-sql select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
-if $data14 != 6.000000000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
# number of fill values is smaller than number of selected columns
print sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-sql select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
-if $data11 != 6 then
- return -1
-endi
-if $data12 != 6 then
- return -1
-endi
-if $data13 != 6.00000 then
- return -1
-endi
+sql_error select _wstart, max(c1), max(c2), max(c3) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6)
# unspecified filling method
sql_error select max(c1), max(c2), max(c3), max(c4), max(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill (6, 6, 6, 6, 6)
@@ -185,7 +164,7 @@ endi
# min_with_fill
print min_with_fill
-sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select min(c1), min(c2), min(c3), min(c4), min(c5) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6)
if $rows != 9 then
return -1
endi
@@ -219,7 +198,7 @@ endi
# first_with_fill
print first_with_fill
-sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select _wstart, first(c1), first(c2), first(c3), first(c4), first(c5), first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -308,7 +287,7 @@ endi
# last_with_fill
print last_with_fill
-sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, 6, 6)
+sql select last(c1), last(c2), last(c3), last(c4), last(c5), last(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 6, 6, 6, 6, 6, 6, '6', '6')
if $rows != 9 then
return -1
endi
@@ -353,7 +332,7 @@ if $data11 != -1 then
endi
# fill_char_values_to_arithmetic_fields
-sql select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
+sql_error select sum(c1), avg(c2), max(c3), min(c4), avg(c4), count(c6), last(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'c', 'c', 'c', 'c', 'c', 'c', 'c', 'c')
# fill_multiple_columns
sql_error select _wstart, sum(c1), avg(c2), min(c3), max(c4), count(c6), first(c7), last(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 99, 99, 99, 99, 99, abc, abc)
@@ -379,34 +358,24 @@ endi
# fill_into_nonarithmetic_fieds
-sql select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-#if $data11 != 20000000 then
-if $data11 != NULL then
- return -1
-endi
+sql_error select _wstart, first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 20000000, 20000000, 20000000)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1, 1, 1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1.1, 1.1, 1.1)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1, 1e1, 1e1)
sql select first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1')
# fill quoted values into bool column will throw error unless the value is 'true' or 'false' Note:2018-10-24
# fill values into binary or nchar columns will be set to null automatically Note:2018-10-24
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e', '1e1','1e1')
-sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
+sql_error select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, true, true, true)
sql select first(c6), first(c7), first(c8) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true', 'true','true')
# fill nonarithmetic values into arithmetic fields
sql_error select count(*) where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, abc);
-sql select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
+sql_error select count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 'true');
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '1e1');
sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, 1e1);
if $rows != 9 then
@@ -416,14 +385,7 @@ if $data01 != 1 then
return -1
endi
-sql select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
-if $rows != 9 then
- return -1
-endi
-if $data01 != 1 then
- return -1
-endi
-
+sql_error select _wstart, count(*) from $tb where ts >= $ts0 and ts <= $tsu interval(5m) fill(value, '10');
## linear fill
# feature currently switched off 2018/09/29
diff --git a/tests/script/tsim/parser/groupby.sim b/tests/script/tsim/parser/groupby.sim
index 12a698b1ccb2273d10c1831948103ab88f494d54..4ee9c530a79c72ccac12a99922af1eeefc7485ed 100644
--- a/tests/script/tsim/parser/groupby.sim
+++ b/tests/script/tsim/parser/groupby.sim
@@ -557,7 +557,7 @@ if $data10 != @{slop:0.000000, intercept:1.000000}@ then
return -1
endi
-if $data90 != @{slop:0.000000, intercept:9.000000}@ then
+if $data90 != @{slop:0.000000, intercept:17.000000}@ then
return -1
endi
diff --git a/tests/script/tsim/parser/import_file.sim b/tests/script/tsim/parser/import_file.sim
index e031e0249dd5a3b9efec7b9fed2505671f645e2c..37dc0c447623a8ea54f8d0e7228e38749e7a41be 100644
--- a/tests/script/tsim/parser/import_file.sim
+++ b/tests/script/tsim/parser/import_file.sim
@@ -69,4 +69,4 @@ endi
system rm -f $inFileName
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/repeatAlter.sim b/tests/script/tsim/parser/repeatAlter.sim
index d28a03e193a031ee95b5d237481de8ed31651877..b4012048cc314682e6bdb971a8e4a97fb1c2ca65 100644
--- a/tests/script/tsim/parser/repeatAlter.sim
+++ b/tests/script/tsim/parser/repeatAlter.sim
@@ -6,4 +6,4 @@ while $i <= $loops
$i = $i + 1
endw
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/select_from_cache_disk.sim b/tests/script/tsim/parser/select_from_cache_disk.sim
index 0983e36a3a579f88bdb429e9ad62a67c4fe6823b..3c0b13c6388c2386da011b2576262b65a6f018d5 100644
--- a/tests/script/tsim/parser/select_from_cache_disk.sim
+++ b/tests/script/tsim/parser/select_from_cache_disk.sim
@@ -60,4 +60,4 @@ if $data12 != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/single_row_in_tb.sim b/tests/script/tsim/parser/single_row_in_tb.sim
index 1bd53ad24ef17c89bf5bfd1ddec9ed78b969cf9a..e7b4c9a871b4d8409a8a1624ff83b71fb77a77c2 100644
--- a/tests/script/tsim/parser/single_row_in_tb.sim
+++ b/tests/script/tsim/parser/single_row_in_tb.sim
@@ -33,4 +33,4 @@ print ================== server restart completed
run tsim/parser/single_row_in_tb_query.sim
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/parser/single_row_in_tb_query.sim b/tests/script/tsim/parser/single_row_in_tb_query.sim
index 422756b798cbeb1b3e70d14d952457df0e54a202..37e193f9d202c0f94748342f3a67e1565e8490d3 100644
--- a/tests/script/tsim/parser/single_row_in_tb_query.sim
+++ b/tests/script/tsim/parser/single_row_in_tb_query.sim
@@ -195,4 +195,4 @@ endi
print ===============>safty check TD-4927
sql select first(ts, c1) from sr_stb where ts<1 group by t1;
-sql select first(ts, c1) from sr_stb where ts>0 and ts<1;
\ No newline at end of file
+sql select first(ts, c1) from sr_stb where ts>0 and ts<1;
diff --git a/tests/script/tsim/parser/slimit_alter_tags.sim b/tests/script/tsim/parser/slimit_alter_tags.sim
index 3827b14b453132687c254250538d53ae51ab7279..b5afbfa56eea5fb6ba440e08818e861d5a96da3d 100644
--- a/tests/script/tsim/parser/slimit_alter_tags.sim
+++ b/tests/script/tsim/parser/slimit_alter_tags.sim
@@ -128,6 +128,7 @@ if $rows != 5 then
return -1
endi
if $data00 != $rowNum then
+ print expect $rowNum , actual: $data00
return -1
endi
if $data10 != $rowNum then
diff --git a/tests/script/tsim/parser/slimit_query.sim b/tests/script/tsim/parser/slimit_query.sim
index 1e04a31099b0a9d948d1fd5fff229b0db940390c..acf0489d3c667834f630b41977240da86dcf4cfd 100644
--- a/tests/script/tsim/parser/slimit_query.sim
+++ b/tests/script/tsim/parser/slimit_query.sim
@@ -93,25 +93,25 @@ if $rows != 3 then
endi
### slimit + fill
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 5 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 0 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 5 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 0 offset 0
if $rows != 0 then
return -1
endi
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
-print select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
+print select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
print $rows $data00 $data01 $data02 $data03
if $rows != 8 then
return -1
endi
# desc
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 0
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 0
if $rows != 8 then
return -1
endi
-sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(value, -1, -2) slimit 4 soffset 4 limit 2 offset 598
+sql select max(c1), min(c2), avg(c3), sum(c4), sum(c6), count(c7), first(c8), last(c9) from $stb where ts >= $ts0 and ts <= $tsu and t2 >= 2 and t3 <= 9 partition by t1 interval(5m) fill(linear) slimit 4 soffset 4 limit 2 offset 598
if $rows != 4 then
return -1
endi
diff --git a/tests/script/tsim/parser/timestamp_query.sim b/tests/script/tsim/parser/timestamp_query.sim
index 6e92dbcb3ab28518dc452e474aee955a3003c596..24058cbc84912033b41f49b3e05ee2fecbe4d221 100644
--- a/tests/script/tsim/parser/timestamp_query.sim
+++ b/tests/script/tsim/parser/timestamp_query.sim
@@ -28,7 +28,7 @@ sql select * from ts_stb0 where ts <> $ts0
##### select from supertable
$tb = $tbPrefix . 0
-sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1)
+sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, -1, -1)
$res = $rowNum * 2
$n = $res - 2
print ============>$n
@@ -47,7 +47,7 @@ if $data13 != 598.000000000 then
return -1
endi
-sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL)
+sql select _wstart, first(c1), last(c1), (1537325400 - 1537146000)/(5*60) v from $tb where ts >= $ts0 and ts < $tsu interval(5m) fill(value, NULL, NULL)
if $data13 != 598.000000000 then
print expect 598.000000000, actual $data03
return -1
diff --git a/tests/script/tsim/query/complex_group.sim b/tests/script/tsim/query/complex_group.sim
index 3dad8059cd148504118d56a63f60b25247dc0fb6..d7d14c0ee82b3e10e06f509b4e6a7821be9c901f 100644
--- a/tests/script/tsim/query/complex_group.sim
+++ b/tests/script/tsim/query/complex_group.sim
@@ -454,4 +454,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/complex_having.sim b/tests/script/tsim/query/complex_having.sim
index 9e28c3803e373e1d973b34c39573b4a7ec4f13f3..4c0af6d10c2d796638be619c6092618217b01257 100644
--- a/tests/script/tsim/query/complex_having.sim
+++ b/tests/script/tsim/query/complex_having.sim
@@ -365,4 +365,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/complex_limit.sim b/tests/script/tsim/query/complex_limit.sim
index 2a90e7ff1d1f1a4ba25f79a94339219f3d4f5683..acb133f6504f8076161476cfcf6b8f73493157fc 100644
--- a/tests/script/tsim/query/complex_limit.sim
+++ b/tests/script/tsim/query/complex_limit.sim
@@ -508,4 +508,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/complex_select.sim b/tests/script/tsim/query/complex_select.sim
index f4c9877bfd4c32622238cf21eafac8c35aaafa19..b7697e5cab0e654a40dd16f55f57cfbba4c5653e 100644
--- a/tests/script/tsim/query/complex_select.sim
+++ b/tests/script/tsim/query/complex_select.sim
@@ -558,4 +558,4 @@ if $data00 != 33 then
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/complex_where.sim b/tests/script/tsim/query/complex_where.sim
index bda1c036f02ded7953f8049a46318479b5feb106..847f67ed3461a88c16e1697386f8ee0d6f91d438 100644
--- a/tests/script/tsim/query/complex_where.sim
+++ b/tests/script/tsim/query/complex_where.sim
@@ -669,4 +669,4 @@ if $rows != 1 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/crash_sql.sim b/tests/script/tsim/query/crash_sql.sim
index 1d20491869db719c84065fb6a765268c7366c80b..79a9165e6602b1e8b1931e0f3ad9bf7d0168450f 100644
--- a/tests/script/tsim/query/crash_sql.sim
+++ b/tests/script/tsim/query/crash_sql.sim
@@ -76,7 +76,7 @@ sql insert into ct4 values ( '2022-05-21 01:01:01.000', NULL, NULL, NULL, NULL,
print ================ start query ======================
-print ================ SQL used to cause taosd or taos shell crash
+print ================ SQL used to cause taosd or TDengine CLI crash
sql_error select sum(c1) ,count(c1) from ct4 group by c1 having sum(c10) between 0 and 1 ;
-#system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+#system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/diff.sim b/tests/script/tsim/query/diff.sim
index f0d82b01e92bdffc06f951a5d3911ae4338037d9..badd139a9f7b25aa4192e3f97b0cefe825efc597 100644
--- a/tests/script/tsim/query/diff.sim
+++ b/tests/script/tsim/query/diff.sim
@@ -25,17 +25,17 @@ $i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using $mt tags( $i )
-
+
$x = 0
while $x < $rowNum
$cc = $x * 60000
$ms = 1601481600000 + $cc
- sql insert into $tb values ($ms , $x )
+ sql insert into $tb values ($ms , $x )
$x = $x + 1
- endw
-
+ endw
+
$i = $i + 1
-endw
+endw
sleep 100
@@ -61,7 +61,7 @@ sql select _rowts, diff(tbcol) from $tb where ts > $ms
print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
-if $data11 != 1 then
+if $data11 != 1 then
return -1
endi
@@ -72,7 +72,7 @@ sql select _rowts, diff(tbcol) from $tb where ts <= $ms
print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
-if $data11 != 1 then
+if $data11 != 1 then
return -1
endi
@@ -82,7 +82,7 @@ sql select _rowts, diff(tbcol) as b from $tb
print ===> rows: $rows
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
-if $data11 != 1 then
+if $data11 != 1 then
return -1
endi
@@ -107,4 +107,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/explain.sim b/tests/script/tsim/query/explain.sim
index 30a857815ceea75b399c1cf37c351ff80e37189d..2871252d91b822e02911931bf2c8a848472a5e9d 100644
--- a/tests/script/tsim/query/explain.sim
+++ b/tests/script/tsim/query/explain.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database db1 vgroups 3;
sql use db1;
sql select * from information_schema.ins_databases;
@@ -30,7 +30,7 @@ sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..2
#sql insert into tb4 values (now, 4, "Bitmap Heap Scan on tenk1 t1 (cost=5.07..229.20 rows=101 width=244) (actual time=0.080..0.526 rows=100 loops=1)");
-print ======== step2
+print ======== step2
sql explain select * from st1 where -2;
sql explain select ts from tb1;
sql explain select * from st1;
@@ -41,14 +41,14 @@ sql explain select count(*),sum(f1) from st1;
sql explain select count(*),sum(f1) from st1 group by f1;
#sql explain select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev);
-print ======== step3
+print ======== step3
sql explain verbose true select * from st1 where -2;
sql explain verbose true select ts from tb1 where f1 > 0;
sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00';
sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1;
sql explain verbose true select * from information_schema.ins_stables where db_name='db2';
-print ======== step4
+print ======== step4
sql explain analyze select ts from st1 where -2;
sql explain analyze select ts from tb1;
sql explain analyze select ts from st1;
@@ -59,7 +59,7 @@ sql explain analyze select count(*),sum(f1) from tb1;
sql explain analyze select count(*),sum(f1) from st1;
sql explain analyze select count(*),sum(f1) from st1 group by f1;
-print ======== step5
+print ======== step5
sql explain analyze verbose true select ts from st1 where -2;
sql explain analyze verbose true select ts from tb1;
sql explain analyze verbose true select ts from st1;
@@ -87,12 +87,12 @@ sql explain analyze verbose true select count(f1) from st1 group by tbname;
#sql explain select * from tb1, tb2 where tb1.ts=tb2.ts;
#sql explain select * from st1, st2 where tb1.ts=tb2.ts;
#sql explain analyze verbose true select sum(a+b) from (select _rowts, min(f1) b,count(*) a from st1 where f1 > 0 interval(1a)) where a < 0 interval(1s);
-#sql explain select min(f1) from st1 interval(1m, 2a) sliding(30s);
+#sql explain select min(f1) from st1 interval(1m, 2a) sliding(30s);
#sql explain verbose true select count(*),sum(f1) from st1 where f1 > 0 and ts > '2021-10-31 00:00:00' group by f1 having sum(f1) > 0;
-#sql explain analyze select min(f1) from st1 interval(3m, 2a) sliding(1m);
+#sql explain analyze select min(f1) from st1 interval(3m, 2a) sliding(1m);
#sql explain analyze select count(f1) from tb1 interval(10s, 2s) sliding(3s) fill(prev);
#sql explain analyze verbose true select count(*),sum(f1) from st1 where f1 > 0 and ts > '2021-10-31 00:00:00' group by f1 having sum(f1) > 0;
-#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m);
+#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/interval.sim b/tests/script/tsim/query/interval.sim
index cc8a73daec1ad54fb1448480b0efd317bbd09be9..833da4a8ba2b3daf495167f06c99d222564a6bf3 100644
--- a/tests/script/tsim/query/interval.sim
+++ b/tests/script/tsim/query/interval.sim
@@ -177,4 +177,4 @@ print =============== clear
# return -1
#endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/scalarFunction.sim b/tests/script/tsim/query/scalarFunction.sim
index 103e66e54e674c10e3fbe3bd88e044ffe7d0041d..1b8115fec635832116f722ce1fb22810d817a0b7 100644
--- a/tests/script/tsim/query/scalarFunction.sim
+++ b/tests/script/tsim/query/scalarFunction.sim
@@ -33,7 +33,7 @@ print =============== create normal table
sql create table ntb (ts timestamp, c1 int, c2 float, c3 double)
sql show tables
-if $rows != 101 then
+if $rows != 101 then
return -1
endi
@@ -444,7 +444,7 @@ if $loop_test == 0 then
print =============== stop and restart taosd
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
-
+
$loop_cnt = 0
check_dnode_ready_0:
$loop_cnt = $loop_cnt + 1
@@ -462,7 +462,7 @@ if $loop_test == 0 then
goto check_dnode_ready_0
endi
- $loop_test = 1
+ $loop_test = 1
goto loop_test_pos
endi
diff --git a/tests/script/tsim/query/scalarNull.sim b/tests/script/tsim/query/scalarNull.sim
index ec95c94f23c12babb06b25b06ce140c9a4a5368a..6abe3d62d9b1aaf88872054c5bd040098400debb 100644
--- a/tests/script/tsim/query/scalarNull.sim
+++ b/tests/script/tsim/query/scalarNull.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database db1 vgroups 3;
sql use db1;
sql select * from information_schema.ins_databases;
diff --git a/tests/script/tsim/query/session.sim b/tests/script/tsim/query/session.sim
index 158448d76537947d1f6a0fb8d9569becc33fcdd8..b6eb4ed3aa2aae6873eed4fb0c8056c95ebe6bb6 100644
--- a/tests/script/tsim/query/session.sim
+++ b/tests/script/tsim/query/session.sim
@@ -35,8 +35,8 @@ sql INSERT INTO dev_001 VALUES('2020-05-13 13:00:00.001', 12)
sql INSERT INTO dev_001 VALUES('2020-05-14 13:00:00.001', 13)
sql INSERT INTO dev_001 VALUES('2020-05-15 14:00:00.000', 14)
sql INSERT INTO dev_001 VALUES('2020-05-20 10:00:00.000', 15)
-sql INSERT INTO dev_001 VALUES('2020-05-27 10:00:00.001', 16)
-
+sql INSERT INTO dev_001 VALUES('2020-05-27 10:00:00.001', 16)
+
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.000', 1)
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.005', 2)
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.009', 3)
@@ -46,7 +46,7 @@ sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.036', 6)
sql INSERT INTO dev_002 VALUES('2020-05-13 10:00:00.51', 7)
# vnode does not return the precision of the table
-print ====> create database d1 precision 'us'
+print ====> create database d1 precision 'us'
sql create database d1 precision 'us'
sql use d1
sql create table dev_001 (ts timestamp ,i timestamp ,j int)
@@ -54,7 +54,7 @@ sql insert into dev_001 values(1623046993681000,now,1)(1623046993681001,now+1s,2
sql create table secondts(ts timestamp,t2 timestamp,i int)
sql insert into secondts values(1623046993681000,now,1)(1623046993681001,now+1s,2)(1623046993681002,now+2s,3)(1623046993681004,now+5s,4)
-$loop_test = 0
+$loop_test = 0
loop_test_pos:
sql use $dbNamme
@@ -299,7 +299,7 @@ if $loop_test == 0 then
print =============== stop and restart taosd
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start
-
+
$loop_cnt = 0
check_dnode_ready_0:
$loop_cnt = $loop_cnt + 1
@@ -317,7 +317,7 @@ if $loop_test == 0 then
goto check_dnode_ready_0
endi
- $loop_test = 1
+ $loop_test = 1
goto loop_test_pos
endi
diff --git a/tests/script/tsim/query/stddev.sim b/tests/script/tsim/query/stddev.sim
index d61c7273e19ebee84cd0117a9faf163c3a854005..b45c7d80a3edd8319f199e07fd607ab4f474df23 100644
--- a/tests/script/tsim/query/stddev.sim
+++ b/tests/script/tsim/query/stddev.sim
@@ -409,4 +409,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/time_process.sim b/tests/script/tsim/query/time_process.sim
index b3c0e9561f149445a7ae75036736bbf6f8eaf4a4..83a64458465d6d978a38a206b2a7b223cb2bf45d 100644
--- a/tests/script/tsim/query/time_process.sim
+++ b/tests/script/tsim/query/time_process.sim
@@ -111,4 +111,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim
index 7cc1403bcb215547209b1c41dcf0351f9fc80bfd..7f8b1044ef528a3a771946f878167b1123ddd9db 100644
--- a/tests/script/tsim/query/udf.sim
+++ b/tests/script/tsim/query/udf.sim
@@ -9,7 +9,7 @@ system sh/cfg.sh -n dnode1 -c udf -v 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1 udf
+print ======== step1 udf
system sh/compile_udf.sh
sql create database udf vgroups 3;
sql use udf;
diff --git a/tests/script/tsim/scalar/filter.sim b/tests/script/tsim/scalar/filter.sim
new file mode 100644
index 0000000000000000000000000000000000000000..923166227856189e91848150ed9e848f946b066d
--- /dev/null
+++ b/tests/script/tsim/scalar/filter.sim
@@ -0,0 +1,38 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ======== step1
+sql drop database if exists db1;
+sql create database db1 vgroups 3;
+sql use db1;
+sql create stable st1 (fts timestamp, fbool bool, ftiny tinyint, fsmall smallint, fint int, fbig bigint, futiny tinyint unsigned, fusmall smallint unsigned, fuint int unsigned, fubig bigint unsigned, ffloat float, fdouble double, fbin binary(10), fnchar nchar(10)) tags(tts timestamp, tbool bool, ttiny tinyint, tsmall smallint, tint int, tbig bigint, tutiny tinyint unsigned, tusmall smallint unsigned, tuint int unsigned, tubig bigint unsigned, tfloat float, tdouble double, tbin binary(10), tnchar nchar(10));
+sql create table tb1 using st1 tags('2022-07-10 16:31:00', true, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql create table tb2 using st1 tags('2022-07-10 16:32:00', false, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql create table tb3 using st1 tags('2022-07-10 16:33:00', true, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+
+sql insert into tb1 values ('2022-07-10 16:31:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb1 values ('2022-07-10 16:31:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb1 values ('2022-07-10 16:31:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb1 values ('2022-07-10 16:31:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb1 values ('2022-07-10 16:31:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql insert into tb2 values ('2022-07-10 16:32:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb2 values ('2022-07-10 16:32:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb2 values ('2022-07-10 16:32:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb2 values ('2022-07-10 16:32:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb2 values ('2022-07-10 16:32:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql insert into tb3 values ('2022-07-10 16:33:01', false, 1, 1, 1, 1, 1, 1, 1, 1, 1.0, 1.0, 'a', 'a');
+sql insert into tb3 values ('2022-07-10 16:33:02', true, 2, 2, 2, 2, 2, 2, 2, 2, 2.0, 2.0, 'b', 'b');
+sql insert into tb3 values ('2022-07-10 16:33:03', false, 3, 3, 3, 3, 3, 3, 3, 3, 3.0, 3.0, 'c', 'c');
+sql insert into tb3 values ('2022-07-10 16:33:04', true, 4, 4, 4, 4, 4, 4, 4, 4, 4.0, 4.0, 'd', 'd');
+sql insert into tb3 values ('2022-07-10 16:33:05', false, 5, 5, 5, 5, 5, 5, 5, 5, 5.0, 5.0, 'e', 'e');
+
+sql select * from st1 where (ttiny > 2 or ftiny < 5) and ftiny > 2;
+if $rows != 7 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_comment.sim b/tests/script/tsim/stable/alter_comment.sim
index beb049985c90ca7f8c7521c004c950609cc05347..7c2d6edfcbe48e6e4afc55536e893cbc7d0dbc20 100644
--- a/tests/script/tsim/stable/alter_comment.sim
+++ b/tests/script/tsim/stable/alter_comment.sim
@@ -95,7 +95,7 @@ sql_error alter table db.stb add tag t1 int
sql_error alter table db.stb add tag t2 int
sql_error alter table db.stb add tag t3 int
sql alter table db.stb add tag t4 bigint
-sql alter table db.stb add tag c1 int
+sql alter table db.stb add tag c1 int
sql alter table db.stb add tag t5 binary(12)
sql select * from information_schema.ins_stables where db_name = 'db'
diff --git a/tests/script/tsim/stable/alter_count.sim b/tests/script/tsim/stable/alter_count.sim
index 83ea4b14fa733821316814dc6b4f47c7f239e1e8..4a2aeca029175c73a82d622b59777782f27639ab 100644
--- a/tests/script/tsim/stable/alter_count.sim
+++ b/tests/script/tsim/stable/alter_count.sim
@@ -5,8 +5,8 @@ print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
-sql create database d1 replica 1 duration 7 keep 50
+print ======== step1
+sql create database d1 replica 1 duration 7 keep 50
sql use d1
sql create table tb (ts timestamp, a int)
sql insert into tb values(now-28d, -28)
@@ -83,7 +83,7 @@ if $data00 != 3 then
endi
print ======== step8
-# sql alter table tb(ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10) )
+# sql alter table tb(ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10) )
sql alter table tb add column h binary(10)
sql insert into tb values(now-7d, -7, 18, 0, 0, 0, 0, 0, '0')
sql insert into tb values(now-6d, -6, 19, 1, 1, 1, 1, 1, '1')
@@ -260,4 +260,4 @@ if $data00 != 31 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_import.sim b/tests/script/tsim/stable/alter_import.sim
index b968eb6a124a8f8d232f03090e4ce67b06be735e..7431ea698acbe5f504e5d8c6abf64c1877420fd5 100644
--- a/tests/script/tsim/stable/alter_import.sim
+++ b/tests/script/tsim/stable/alter_import.sim
@@ -5,7 +5,7 @@ print ========= start dnode1 as master
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d1 replica 1 duration 7 keep 50
sql use d1
sql create table tb (ts timestamp, a int)
@@ -42,4 +42,4 @@ if $data00 != 6 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_insert1.sim b/tests/script/tsim/stable/alter_insert1.sim
index bcea0b48c4032fa1d0ddd56a2c467559a39e8a77..0e5617e92d63a049e288c318e007bfdfd79e7b9b 100644
--- a/tests/script/tsim/stable/alter_insert1.sim
+++ b/tests/script/tsim/stable/alter_insert1.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d3
sql use d3
sql create table tb (ts timestamp, a int)
@@ -1137,4 +1137,4 @@ if $data79 != null then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_insert2.sim b/tests/script/tsim/stable/alter_insert2.sim
index faae24d32aee731b9bee8ca4e5b89816c58cfb1c..a6046f3dda81458c4f760fb0c48b1352e21105fe 100644
--- a/tests/script/tsim/stable/alter_insert2.sim
+++ b/tests/script/tsim/stable/alter_insert2.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d4
sql use d4
sql create table tb (ts timestamp, a int, b smallint, c tinyint, d int, e bigint, f float, g double, h binary(10))
@@ -662,4 +662,4 @@ if $data62 != null then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/alter_metrics.sim b/tests/script/tsim/stable/alter_metrics.sim
index e32250de130210612a8f7bf70df7225da381c1ab..203f41e18bcf3148ab6b954288320b326bb3c07d 100644
--- a/tests/script/tsim/stable/alter_metrics.sim
+++ b/tests/script/tsim/stable/alter_metrics.sim
@@ -3,7 +3,7 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/exec.sh -n dnode1 -s start
sql connect
-print ======== step1
+print ======== step1
sql create database d2
sql use d2
sql create table mt (ts timestamp, a int) TAGS (t int)
@@ -757,8 +757,8 @@ endi
print ======= over
sql drop database d2
sql select * from information_schema.ins_databases
-if $rows != 2 then
+if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/column_add.sim b/tests/script/tsim/stable/column_add.sim
index c0f3b4f4907402a863e5c4b25bcb8c4ee3d6f46f..05189f6c7d9f6800e7d8229c29c9bcd7284d238f 100644
--- a/tests/script/tsim/stable/column_add.sim
+++ b/tests/script/tsim/stable/column_add.sim
@@ -116,7 +116,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != NULL then
return -1
endi
@@ -153,7 +153,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != NULL then
return -1
endi
@@ -299,4 +299,4 @@ if $rows != 10 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/column_modify.sim b/tests/script/tsim/stable/column_modify.sim
index e2752ccf951cef30587aa1f604f92cbbaa265b85..43284ba829ecd662872b24cefc13a51db28d025b 100644
--- a/tests/script/tsim/stable/column_modify.sim
+++ b/tests/script/tsim/stable/column_modify.sim
@@ -31,7 +31,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -92,7 +92,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -106,4 +106,4 @@ if $data[1][3] != 101 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/disk.sim b/tests/script/tsim/stable/disk.sim
index e0e51b2625d5d90640dc846cefa0d151d9e4efb5..8edd0a845ecf7ced9638b32640d3278e73c93835 100644
--- a/tests/script/tsim/stable/disk.sim
+++ b/tests/script/tsim/stable/disk.sim
@@ -188,4 +188,4 @@ if $rows != 2 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/refcount.sim b/tests/script/tsim/stable/refcount.sim
index a83c0ca53f3033513e48dd21252a9db53eab4774..35d8767efd55df3d6a0126c1d9fd0e33c0f41176 100644
--- a/tests/script/tsim/stable/refcount.sim
+++ b/tests/script/tsim/stable/refcount.sim
@@ -123,4 +123,4 @@ if $rows != 2 then
endi
print =============== step6
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_add.sim b/tests/script/tsim/stable/tag_add.sim
index 7ee9aee974681a21ba7b186cd3d84f9492f6523e..4f5f0e745234e39ecce1e24a0e918094db8676f9 100644
--- a/tests/script/tsim/stable/tag_add.sim
+++ b/tests/script/tsim/stable/tag_add.sim
@@ -139,7 +139,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -170,7 +170,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -190,4 +190,4 @@ if $rows != 7 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_drop.sim b/tests/script/tsim/stable/tag_drop.sim
index 7902358817c1ee9ba6038233a08810504be6fc70..b457bf195b8991721c59a581e6bd252b8f823906 100644
--- a/tests/script/tsim/stable/tag_drop.sim
+++ b/tests/script/tsim/stable/tag_drop.sim
@@ -165,7 +165,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -196,7 +196,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 201 then
return -1
endi
@@ -229,7 +229,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 201 then
return -1
endi
@@ -261,7 +261,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 301 then
return -1
endi
@@ -323,7 +323,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 2 then
return -1
-endi
+endi
if $data[0][3] != 302 then
return -1
endi
@@ -334,4 +334,4 @@ if $data[0][5] != 304 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_filter.sim b/tests/script/tsim/stable/tag_filter.sim
index f44142fbbffdb4f19b32afd51f6d75fa2e798e88..de2a87d6c4a710d8fcb2af773d20ac1bfc252f84 100644
--- a/tests/script/tsim/stable/tag_filter.sim
+++ b/tests/script/tsim/stable/tag_filter.sim
@@ -27,47 +27,47 @@ sql create table db.ctb6 using db.stb tags(6, "102")
sql insert into db.ctb6 values(now, 6, "2")
sql select * from db.stb where t1 = 1
-if $rows != 1 then
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 < 1
-if $rows != 0 then
+sql select * from db.stb where t1 < 1
+if $rows != 0 then
return -=1
endi
-sql select * from db.stb where t1 < 2
-if $rows != 1 then
+sql select * from db.stb where t1 < 2
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 <= 2
-if $rows != 2 then
+sql select * from db.stb where t1 <= 2
+if $rows != 2 then
return -1
endi
-sql select * from db.stb where t1 >= 1
-if $rows != 6 then
+sql select * from db.stb where t1 >= 1
+if $rows != 6 then
return -1
endi
-sql select * from db.stb where t1 > 1
-if $rows != 5 then
+sql select * from db.stb where t1 > 1
+if $rows != 5 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 1
-if $rows != 1 then
+sql select * from db.stb where t1 between 1 and 1
+if $rows != 1 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 6
-if $rows != 6 then
+sql select * from db.stb where t1 between 1 and 6
+if $rows != 6 then
return -1
endi
-sql select * from db.stb where t1 between 1 and 7
-if $rows != 6 then
+sql select * from db.stb where t1 between 1 and 7
+if $rows != 6 then
return -1
endi
@@ -88,25 +88,25 @@ sql insert into db.ctbBin2 values(now, 3, "2")
sql create table db.ctbBin3 using db.stbBin tags("d")
sql insert into db.ctbBin3 values(now, 4, "2")
-sql select * from db.stbBin where t1 = "a"
-if $rows != 1 then
+sql select * from db.stbBin where t1 = "a"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbBin where t1 < "a"
-if $rows != 0 then
+sql select * from db.stbBin where t1 < "a"
+if $rows != 0 then
return -=1
endi
-sql select * from db.stbBin where t1 < "b"
-if $rows != 1 then
+sql select * from db.stbBin where t1 < "b"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbBin where t1 between "a" and "e"
-if $rows != 4 then
+sql select * from db.stbBin where t1 between "a" and "e"
+if $rows != 4 then
return -1
endi
@@ -127,25 +127,25 @@ sql insert into db.ctbNc2 values(now, 3, "2")
sql create table db.ctbNc3 using db.stbNc tags("d")
sql insert into db.ctbNc3 values(now, 4, "2")
-sql select * from db.stbNc where t1 = "a"
-if $rows != 1 then
+sql select * from db.stbNc where t1 = "a"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbNc where t1 < "a"
-if $rows != 0 then
+sql select * from db.stbNc where t1 < "a"
+if $rows != 0 then
return -=1
endi
-sql select * from db.stbNc where t1 < "b"
-if $rows != 1 then
+sql select * from db.stbNc where t1 < "b"
+if $rows != 1 then
return -1
endi
-sql select * from db.stbNc where t1 between "a" and "e"
-if $rows != 4 then
+sql select * from db.stbNc where t1 between "a" and "e"
+if $rows != 4 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_modify.sim b/tests/script/tsim/stable/tag_modify.sim
index 909ed7935944aa63f7776bbc75f27b7b156bf0fe..53e7227d1b43f32bbd14e719837b15c2b27e3ca5 100644
--- a/tests/script/tsim/stable/tag_modify.sim
+++ b/tests/script/tsim/stable/tag_modify.sim
@@ -28,7 +28,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -55,7 +55,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -120,4 +120,4 @@ if $data[4][2] != 5 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stable/tag_rename.sim b/tests/script/tsim/stable/tag_rename.sim
index 5bdfa24990d6742d25dee2a45d5aefd94230f648..c85ed183de1946ea7b876f989473189e7834e4e6 100644
--- a/tests/script/tsim/stable/tag_rename.sim
+++ b/tests/script/tsim/stable/tag_rename.sim
@@ -28,7 +28,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -52,7 +52,7 @@ if $data[0][1] != 1 then
endi
if $data[0][2] != 1234 then
return -1
-endi
+endi
if $data[0][3] != 101 then
return -1
endi
@@ -117,4 +117,4 @@ if $data[4][2] != 4 then
return -1
endi
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/basic1.sim b/tests/script/tsim/stream/basic1.sim
index 5392979c0a218884a50d0ebe9ddb39558e82304f..8942f7f702787c9e026e0c37e47ce56765f554bf 100644
--- a/tests/script/tsim/stream/basic1.sim
+++ b/tests/script/tsim/stream/basic1.sim
@@ -5,7 +5,7 @@ sleep 50
sql connect
print =============== create database
-sql create database test vgroups 1
+sql create database test vgroups 1;
sql select * from information_schema.ins_databases
if $rows != 3 then
return -1
@@ -13,7 +13,7 @@ endi
print $data00 $data01 $data02
-sql use test
+sql use test;
sql create table t1(ts timestamp, a int, b int , c int, d double);
@@ -462,10 +462,10 @@ if $data25 != 3 then
return -1
endi
-sql create database test2 vgroups 1
-sql select * from information_schema.ins_databases
+sql create database test2 vgroups 1;
+sql select * from information_schema.ins_databases;
-sql use test2
+sql use test2;
sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
sql create table t1 using st tags(1,1,1);
sql create table t2 using st tags(2,2,2);
@@ -588,4 +588,38 @@ if $data00 != 5 then
goto loop3
endi
+#max,min selectivity
+sql create database test3 vgroups 1;
+sql use test3;
+sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int);
+sql create table ts1 using st tags(1,1,1);
+sql create stream stream_t3 trigger at_once into streamtST3 as select ts, min(a) c6, a, b, c, ta, tb, tc from ts1 interval(10s) ;
+
+sql insert into ts1 values(1648791211000,1,2,3);
+sleep 50
+sql insert into ts1 values(1648791222001,2,2,3);
+sleep 50
+
+$loop_count = 0
+loop3:
+sql select * from streamtST3;
+
+sleep 300
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+# row 0
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+# row 1
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop3
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/stream/deleteInterval.sim b/tests/script/tsim/stream/deleteInterval.sim
new file mode 100644
index 0000000000000000000000000000000000000000..dfd0ddc9d3a203e617bfcdffa3b12e9414c2feb6
--- /dev/null
+++ b/tests/script/tsim/stream/deleteInterval.sim
@@ -0,0 +1,419 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 200
+sql connect
+
+sql drop stream if exists streams0;
+sql drop stream if exists streams1;
+sql drop stream if exists streams2;
+sql drop stream if exists streams3;
+sql drop stream if exists streams4;
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3 from t1 interval(10s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sleep 200
+sql delete from t1 where ts = 1648791213000;
+
+$loop_count = 0
+
+loop0:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 0 then
+ print =====rows=$rows
+ goto loop0
+endi
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop1:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,1,1,1,1.0);
+sql insert into t1 values(1648791213001,2,2,2,2.0);
+sql insert into t1 values(1648791213002,3,3,3,3.0);
+sql insert into t1 values(1648791213003,4,4,4,4.0);
+
+sleep 200
+sql delete from t1 where ts >= 1648791213001 and ts <= 1648791213002;
+
+$loop_count = 0
+
+loop3:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 4 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+
+$loop_count = 0
+
+loop4:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop4
+endi
+
+sleep 200
+
+sql delete from t1 where ts >= 1648791223000 and ts <= 1648791223003;
+
+$loop_count = 0
+
+loop5:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+if $data02 != 4 then
+ print =====data02=$data02
+ goto loop5
+endi
+
+sql insert into t1 values(1648791213000,1,1,1,1.0);
+sql insert into t1 values(1648791213005,2,2,2,2.0);
+sql insert into t1 values(1648791213006,3,3,3,3.0);
+sql insert into t1 values(1648791213007,4,4,4,4.0);
+
+sql insert into t1 values(1648791223000,1,1,1,1.0);
+sql insert into t1 values(1648791223001,2,2,2,2.0);
+sql insert into t1 values(1648791223002,3,3,3,3.0);
+sql insert into t1 values(1648791223003,4,4,4,4.0);
+
+sql insert into t1 values(1648791233000,1,1,1,1.0);
+sql insert into t1 values(1648791233001,2,2,2,2.0);
+sql insert into t1 values(1648791233008,3,3,3,3.0);
+sql insert into t1 values(1648791233009,4,4,4,4.0);
+
+sql delete from t1 where ts >= 1648791213001 and ts <= 1648791233005;
+
+$loop_count = 0
+
+loop6:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 4 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+sql drop stream if exists streams2;
+sql drop database if exists test2;
+sql create database test2 vgroups 4;
+sql use test2;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st interval(10s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop7:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 1 then
+ print =====rows=$rows
+ goto loop7
+endi
+
+sleep 200
+
+sql delete from t1 where ts = 1648791213000;
+
+$loop_count = 0
+
+loop8:
+sleep 200
+
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop8
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop8
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791223002,3,2,3,1.0);
+sql insert into t2 values(1648791223003,3,2,3,1.0);
+
+sleep 200
+
+sql delete from t2 where ts >= 1648791223000 and ts <= 1648791223001;
+
+$loop_count = 0
+
+loop11:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop11
+endi
+
+if $data11 != 6 then
+ print =====data11=$data11
+ goto loop11
+endi
+
+if $data12 != 3 then
+ print =====data12=$data12
+ goto loop11
+endi
+
+sleep 200
+
+sql delete from st where ts >= 1648791223000 and ts <= 1648791223003;
+
+$loop_count = 0
+
+loop12:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 1 then
+ print =====rows=$rows
+ goto loop12
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop12
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop12
+endi
+
+sql insert into t1 values(1648791213004,3,2,3,1.0);
+sql insert into t1 values(1648791213005,3,2,3,1.0);
+sql insert into t1 values(1648791213006,3,2,3,1.0);
+sql insert into t1 values(1648791223004,1,2,3,1.0);
+sql insert into t2 values(1648791213004,3,2,3,1.0);
+sql insert into t2 values(1648791213005,3,2,3,1.0);
+sql insert into t2 values(1648791213006,3,2,3,1.0);
+sql insert into t2 values(1648791223004,1,2,3,1.0);
+
+sleep 200
+
+sql delete from t2 where ts >= 1648791213004 and ts <= 1648791213006;
+
+$loop_count = 0
+
+loop13:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop13
+endi
+
+if $data01 != 4 then
+ print =====data01=$data01
+ goto loop13
+endi
+
+if $data02 != 3 then
+ print =====data02=$data02
+ goto loop13
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop13
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop13
+endi
+
+sql insert into t1 values(1648791223005,1,2,3,1.0);
+sql insert into t1 values(1648791223006,1,2,3,1.0);
+sql insert into t2 values(1648791223005,1,2,3,1.0);
+sql insert into t2 values(1648791223006,1,2,3,1.0);
+
+sql insert into t1 values(1648791233005,4,2,3,1.0);
+sql insert into t1 values(1648791233006,2,2,3,1.0);
+sql insert into t2 values(1648791233005,5,2,3,1.0);
+sql insert into t2 values(1648791233006,3,2,3,1.0);
+
+sleep 200
+
+sql delete from st where ts >= 1648791213001 and ts <= 1648791233005;
+
+$loop_count = 0
+
+loop14:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop14
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop14
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop14
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop14
+endi
+
+if $data12 != 3 then
+ print =====data12=$data12
+ goto loop14
+endi
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+system sh/stop_dnodes.sh
+
+#goto looptest
\ No newline at end of file
diff --git a/tests/script/tsim/stream/deleteSession.sim b/tests/script/tsim/stream/deleteSession.sim
new file mode 100644
index 0000000000000000000000000000000000000000..541609633b023611815252cde0109cdc01094198
--- /dev/null
+++ b/tests/script/tsim/stream/deleteSession.sim
@@ -0,0 +1,532 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 200
+sql connect
+
+sql drop stream if exists streams0;
+sql drop stream if exists streams1;
+sql drop stream if exists streams2;
+sql drop stream if exists streams3;
+sql drop stream if exists streams4;
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3 from t1 session(ts, 5s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sleep 200
+sql delete from t1 where ts = 1648791213000;
+
+$loop_count = 0
+
+loop0:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 0 then
+ print =====rows=$rows
+ goto loop0
+endi
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop1:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,1,1,1,1.0);
+sql insert into t1 values(1648791213001,2,2,2,2.0);
+sql insert into t1 values(1648791213002,3,3,3,3.0);
+sql insert into t1 values(1648791213003,4,4,4,4.0);
+
+sleep 200
+sql delete from t1 where ts >= 1648791213001 and ts <= 1648791213002;
+
+$loop_count = 0
+
+loop3:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 4 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+
+$loop_count = 0
+
+loop4:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop4
+endi
+
+sleep 200
+
+sql delete from t1 where ts >= 1648791223000 and ts <= 1648791223003;
+
+$loop_count = 0
+
+loop5:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+if $data02 != 4 then
+ print =====data02=$data02
+ goto loop5
+endi
+
+sql insert into t1 values(1648791213000,1,1,1,1.0);
+sql insert into t1 values(1648791213005,2,2,2,2.0);
+sql insert into t1 values(1648791213006,3,3,3,3.0);
+sql insert into t1 values(1648791213007,4,4,4,4.0);
+
+sql insert into t1 values(1648791223000,1,1,1,1.0);
+sql insert into t1 values(1648791223001,2,2,2,2.0);
+sql insert into t1 values(1648791223002,3,3,3,3.0);
+sql insert into t1 values(1648791223003,4,4,4,4.0);
+
+sql insert into t1 values(1648791233000,1,1,1,1.0);
+sql insert into t1 values(1648791233001,2,2,2,2.0);
+sql insert into t1 values(1648791233008,3,3,3,3.0);
+sql insert into t1 values(1648791233009,4,4,4,4.0);
+
+sql delete from t1 where ts >= 1648791213001 and ts <= 1648791233005;
+
+$loop_count = 0
+
+loop6:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 4 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+sql drop stream if exists streams2;
+sql drop database if exists test2;
+sql create database test2 vgroups 4;
+sql use test2;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st session(ts,5s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop7:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 1 then
+ print =====rows=$rows
+ goto loop7
+endi
+
+sleep 200
+
+sql delete from t1 where ts = 1648791213000;
+
+$loop_count = 0
+
+loop8:
+sleep 200
+
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop8
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop8
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791223002,3,2,3,1.0);
+sql insert into t2 values(1648791223003,3,2,3,1.0);
+
+sleep 200
+
+sql delete from t2 where ts >= 1648791223000 and ts <= 1648791223001;
+
+$loop_count = 0
+
+loop11:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop11
+endi
+
+if $data11 != 6 then
+ print =====data11=$data11
+ goto loop11
+endi
+
+if $data12 != 3 then
+ print =====data12=$data12
+ goto loop11
+endi
+
+sleep 200
+
+sql delete from st where ts >= 1648791223000 and ts <= 1648791223003;
+
+$loop_count = 0
+
+loop12:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 1 then
+ print =====rows=$rows
+ goto loop12
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop12
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop12
+endi
+
+sql insert into t1 values(1648791213004,3,2,3,1.0);
+sql insert into t1 values(1648791213005,3,2,3,1.0);
+sql insert into t1 values(1648791213006,3,2,3,1.0);
+sql insert into t1 values(1648791223004,1,2,3,1.0);
+sql insert into t2 values(1648791213004,3,2,3,1.0);
+sql insert into t2 values(1648791213005,3,2,3,1.0);
+sql insert into t2 values(1648791213006,3,2,3,1.0);
+sql insert into t2 values(1648791223004,1,2,3,1.0);
+
+sleep 200
+
+sql delete from t2 where ts >= 1648791213004 and ts <= 1648791213006;
+
+$loop_count = 0
+
+loop13:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop13
+endi
+
+if $data01 != 4 then
+ print =====data01=$data01
+ goto loop13
+endi
+
+if $data02 != 3 then
+ print =====data02=$data02
+ goto loop13
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop13
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop13
+endi
+
+sql insert into t1 values(1648791223005,1,2,3,1.0);
+sql insert into t1 values(1648791223006,1,2,3,1.0);
+sql insert into t2 values(1648791223005,1,2,3,1.0);
+sql insert into t2 values(1648791223006,1,2,3,1.0);
+
+sql insert into t1 values(1648791233005,4,2,3,1.0);
+sql insert into t1 values(1648791233006,2,2,3,1.0);
+sql insert into t2 values(1648791233005,5,2,3,1.0);
+sql insert into t2 values(1648791233006,3,2,3,1.0);
+
+sleep 200
+
+sql delete from st where ts >= 1648791213001 and ts <= 1648791233005;
+
+$loop_count = 0
+
+loop14:
+sleep 200
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop14
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop14
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop14
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop14
+endi
+
+if $data12 != 3 then
+ print =====data12=$data12
+ goto loop14
+endi
+
+sql drop stream if exists streams1;
+sql drop stream if exists streams2;
+sql drop stream if exists streams3;
+sql drop database if exists test3;
+sql drop database if exists test;
+sql create database test3 vgroups 4;
+sql create database test vgroups 1;
+sql use test3;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create stream streams3 trigger at_once into test.streamt3 as select _wstart c1, count(*) c2, max(a) c3 from st session(ts,5s);
+
+sql insert into t1 values(1648791210000,1,1,1,NULL);
+sql insert into t1 values(1648791210001,2,2,2,NULL);
+sql insert into t2 values(1648791213001,3,3,3,NULL);
+sql insert into t2 values(1648791213003,4,4,4,NULL);
+sql insert into t1 values(1648791216000,5,5,5,NULL);
+sql insert into t1 values(1648791216002,6,6,6,NULL);
+sql insert into t1 values(1648791216004,7,7,7,NULL);
+sql insert into t2 values(1648791218001,8,8,8,NULL);
+sql insert into t2 values(1648791218003,9,9,9,NULL);
+sql insert into t1 values(1648791222000,10,10,10,NULL);
+sql insert into t1 values(1648791222003,11,11,11,NULL);
+sql insert into t1 values(1648791222005,12,12,12,NULL);
+
+sql insert into t1 values(1648791232005,13,13,13,NULL);
+sql insert into t2 values(1648791242005,14,14,14,NULL);
+
+$loop_count = 0
+
+loop19:
+sleep 200
+sql select * from test.streamt3 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 3 then
+ print =====rows=$rows
+ goto loop19
+endi
+
+sql delete from t2 where ts >= 1648791213001 and ts <= 1648791218003;
+
+$loop_count = 0
+
+loop20:
+sleep 200
+sql select * from test.streamt3 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 5 then
+ print =====rows=$rows
+ goto loop20
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop20
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop20
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop20
+endi
+
+if $data12 != 7 then
+ print =====data12=$data12
+ goto loop20
+endi
+
+if $data21 != 3 then
+ print =====data21=$data21
+ goto loop20
+endi
+
+if $data22 != 12 then
+ print =====data22=$data22
+ goto loop20
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop20
+endi
+
+if $data32 != 13 then
+ print =====data32=$data32
+ goto loop20
+endi
+
+if $data41 != 1 then
+ print =====data41=$data41
+ goto loop20
+endi
+
+if $data42 != 14 then
+ print =====data42=$data42
+ goto loop20
+endi
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+system sh/stop_dnodes.sh
+
+#goto looptest
\ No newline at end of file
diff --git a/tests/script/tsim/stream/deleteState.sim b/tests/script/tsim/stream/deleteState.sim
new file mode 100644
index 0000000000000000000000000000000000000000..ecd9f55340edbc79265255848f5240f0c02fd737
--- /dev/null
+++ b/tests/script/tsim/stream/deleteState.sim
@@ -0,0 +1,198 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 200
+sql connect
+
+sql drop stream if exists streams0;
+sql drop stream if exists streams1;
+sql drop stream if exists streams2;
+sql drop stream if exists streams3;
+sql drop stream if exists streams4;
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(b) c3 from t1 state_window(a);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sleep 200
+sql delete from t1 where ts = 1648791213000;
+
+$loop_count = 0
+
+loop0:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 0 then
+ print =====rows=$rows
+ goto loop0
+endi
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop1:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,1,1,1,1.0);
+sql insert into t1 values(1648791213001,1,2,2,2.0);
+sql insert into t1 values(1648791213002,1,3,3,3.0);
+sql insert into t1 values(1648791213003,1,4,4,4.0);
+
+sleep 200
+sql delete from t1 where ts >= 1648791213001 and ts <= 1648791213002;
+
+$loop_count = 0
+
+loop3:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 4 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,2,2,3,1.0);
+sql insert into t1 values(1648791223001,2,2,3,1.0);
+sql insert into t1 values(1648791223002,2,2,3,1.0);
+sql insert into t1 values(1648791223003,2,2,3,1.0);
+
+$loop_count = 0
+
+loop4:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop4
+endi
+
+sleep 200
+
+sql delete from t1 where ts >= 1648791223000 and ts <= 1648791223003;
+
+$loop_count = 0
+
+loop5:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+if $data02 != 4 then
+ print =====data02=$data02
+ goto loop5
+endi
+
+sql insert into t1 values(1648791213000,1,1,1,1.0);
+sql insert into t1 values(1648791213005,1,2,2,2.0);
+sql insert into t1 values(1648791213006,1,3,3,3.0);
+sql insert into t1 values(1648791213007,1,4,4,4.0);
+
+sql insert into t1 values(1648791223000,2,1,1,1.0);
+sql insert into t1 values(1648791223001,2,2,2,2.0);
+sql insert into t1 values(1648791223002,2,3,3,3.0);
+sql insert into t1 values(1648791223003,2,4,4,4.0);
+
+sql insert into t1 values(1648791233000,3,1,1,1.0);
+sql insert into t1 values(1648791233001,3,2,2,2.0);
+sql insert into t1 values(1648791233008,3,3,3,3.0);
+sql insert into t1 values(1648791233009,3,4,4,4.0);
+
+sql delete from t1 where ts >= 1648791213001 and ts <= 1648791233005;
+
+$loop_count = 0
+
+loop6:
+sleep 200
+sql select * from streamt order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop6
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 4 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+system sh/stop_dnodes.sh
+
+#goto looptest
\ No newline at end of file
diff --git a/tests/script/tsim/stream/distributeInterval0.sim b/tests/script/tsim/stream/distributeInterval0.sim
index b6b427343ed8c5c03367aef6b7edc2cd5495d469..9b2e94055672d47ce4d9f0bd24663f7975d824c0 100644
--- a/tests/script/tsim/stream/distributeInterval0.sim
+++ b/tests/script/tsim/stream/distributeInterval0.sim
@@ -198,7 +198,7 @@ endi
sql select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5, avg(d) from st interval(10s);
-sql create database test1 vgroups 1;
+sql create database test1 vgroups 4;
sql use test1;
sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int);
sql create table ts1 using st tags(1,1,1);
@@ -232,4 +232,43 @@ if $data11 != 2 then
goto loop2
endi
+#max,min selectivity
+sql create database test3 vgroups 4;
+sql use test3;
+sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int);
+sql create table ts1 using st tags(1,1,1);
+sql create table ts2 using st tags(2,2,2);
+sql create stream stream_t3 trigger at_once into streamtST3 as select ts, min(a) c6, a, b, c, ta, tb, tc from st interval(10s) ;
+
+sql insert into ts1 values(1648791211000,1,2,3);
+sleep 50
+sql insert into ts1 values(1648791222001,2,2,3);
+sleep 50
+sql insert into ts2 values(1648791211000,1,2,3);
+sleep 50
+sql insert into ts2 values(1648791222001,2,2,3);
+sleep 50
+
+$loop_count = 0
+loop3:
+sql select * from streamtST3;
+
+sleep 300
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+# row 0
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+# row 1
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop3
+endi
+
system sh/stop_dnodes.sh
diff --git a/tests/script/tsim/stream/partitionbyColumnInterval.sim b/tests/script/tsim/stream/partitionbyColumnInterval.sim
new file mode 100644
index 0000000000000000000000000000000000000000..24fdb9c99445864b01e95b21aa2db4c103054223
--- /dev/null
+++ b/tests/script/tsim/stream/partitionbyColumnInterval.sim
@@ -0,0 +1,570 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+sql drop stream if exists streams0;
+sql drop stream if exists streams1;
+sql drop stream if exists streams2;
+sql drop stream if exists streams3;
+sql drop stream if exists streams4;
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a interval(10s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop0:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop0
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop0
+endi
+
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop1:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+
+$loop_count = 0
+
+loop2:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop2
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop2
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+
+$loop_count = 0
+
+loop3:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop3
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+$loop_count = 0
+
+loop4:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop4
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop4
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop4
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop4
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop4
+endi
+
+if $data22 != 1 then
+ print =====data22=$data22
+ goto loop4
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop4
+endi
+
+if $data32 != 2 then
+ print =====data32=$data32
+ goto loop4
+endi
+
+if $data41 != 1 then
+ print =====data41=$data41
+ goto loop4
+endi
+
+if $data42 != 3 then
+ print =====data42=$data42
+ goto loop4
+endi
+
+sql drop stream if exists streams1;
+sql drop database if exists test1;
+sql create database test1 vgroups 1;
+sql use test1;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b interval(10s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,1,2,1,1.0);
+sql insert into t1 values(1648791213001,2,1,2,2.0);
+sql insert into t1 values(1648791213001,1,2,3,2.0);
+
+$loop_count = 0
+
+loop5:
+sleep 50
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+sql insert into t1 values(1648791223000,1,2,4,2.0);
+sql insert into t1 values(1648791223001,1,2,5,2.0);
+sql insert into t1 values(1648791223002,1,2,5,2.0);
+sql insert into t1 values(1648791213001,1,1,6,2.0) (1648791223002,1,1,7,2.0);
+
+$loop_count = 0
+
+loop6:
+sleep 50
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 6 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+if $data21 != 1 then
+ print =====data21=$data21
+ goto loop6
+endi
+
+if $data22 != 7 then
+ print =====data22=$data22
+ goto loop6
+endi
+
+if $data31 != 2 then
+ print =====data31=$data31
+ goto loop6
+endi
+
+if $data32 != 5 then
+ print =====data32=$data32
+ goto loop6
+endi
+
+sql drop stream if exists streams2;
+sql drop database if exists test2;
+sql create database test2 vgroups 4;
+sql use test2;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop7:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop7
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop7
+endi
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+sql insert into t2 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop8:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop8
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop8
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+
+$loop_count = 0
+
+loop9:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop9
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop9
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213001,2,2,3,1.0);
+sql insert into t2 values(1648791213002,2,2,3,1.0);
+sql insert into t2 values(1648791213002,1,2,3,1.0);
+
+$loop_count = 0
+
+loop10:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop10
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop10
+endi
+
+if $data11 != 4 thenloop4
+ print =====data11=$data11
+ goto loop10
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop10
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791223002,3,2,3,1.0);
+sql insert into t2 values(1648791223003,3,2,3,1.0);
+sql insert into t2 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+$loop_count = 0
+
+loop11:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop11
+endi
+
+if $data11 != 4 then
+ print =====data11=$data11
+ goto loop11
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop11
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop11
+endi
+
+if $data22 != 2 then
+ print =====data22=$data22
+ goto loop11
+endi
+
+if $data31 != 2 then
+ print =====data31=$data31
+ goto loop11
+endi
+
+if $data32 != 3 then
+ print =====data32=$data32
+ goto loop11
+endi
+
+if $data41 != 4 then
+ print =====data41=$data41
+ goto loop11
+endi
+
+if $data42 != 1 then
+ print =====data42=$data42
+ goto loop11
+endi
+
+sql drop stream if exists streams4;
+sql drop database if exists test4;
+sql create database test4 vgroups 4;
+sql use test4;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create table t3 using st tags(2,2,2);
+sql create table t4 using st tags(2,2,2);
+sql create stream streams4 trigger at_once into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s);
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t3 values(1648791213000,2,2,3,1.0);
+sql insert into t4 values(1648791213000,2,2,3,1.0);
+sql insert into t4 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop13:
+sleep 50
+sql select * from test.streamt4 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop13
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop13
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop13
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop13
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop13
+endi
+
+sql insert into t4 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791233000,2,2,3,1.0);
+
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop14:
+sleep 50
+sql select * from test.streamt4 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $rows != 3 then
+ print =====rows=$rows
+ goto loop14
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop14
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop14
+endi
+
+if $data21 != 1 then
+ print =====data21=$data21
+ goto loop14
+endi
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+system sh/stop_dnodes.sh
+
+#goto looptest
\ No newline at end of file
diff --git a/tests/script/tsim/stream/partitionbyColumnSession.sim b/tests/script/tsim/stream/partitionbyColumnSession.sim
new file mode 100644
index 0000000000000000000000000000000000000000..1742d52cf03f9a6d2ec159295495cebfbb39aefc
--- /dev/null
+++ b/tests/script/tsim/stream/partitionbyColumnSession.sim
@@ -0,0 +1,567 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+sql drop stream if exists streams0;
+sql drop stream if exists streams1;
+sql drop stream if exists streams2;
+sql drop stream if exists streams3;
+sql drop stream if exists streams4;
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a session(ts, 5s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop0:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop0
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop0
+endi
+
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop1:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+
+$loop_count = 0
+
+loop2:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop2
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop2
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+
+$loop_count = 0
+
+loop3:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop3
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+$loop_count = 0
+
+loop4:
+sleep 50
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop4
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop4
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop4
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop4
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop4
+endi
+
+if $data22 != 1 then
+ print =====data22=$data22
+ goto loop4
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop4
+endi
+
+if $data32 != 2 then
+ print =====data32=$data32
+ goto loop4
+endi
+
+if $data41 != 1 then
+ print =====data41=$data41
+ goto loop4
+endi
+
+if $data42 != 3 then
+ print =====data42=$data42
+ goto loop4
+endi
+
+sql drop database if exists test1;
+sql create database test1 vgroups 1;
+sql use test1;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(c) c3, _group_key(a+b) c4 from t1 partition by a+b session(ts, 5s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,1,2,1,1.0);
+sql insert into t1 values(1648791213001,2,1,2,2.0);
+sql insert into t1 values(1648791213001,1,2,3,2.0);
+
+$loop_count = 0
+
+loop5:
+sleep 50
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+sql insert into t1 values(1648791223000,1,2,4,2.0);
+sql insert into t1 values(1648791223001,1,2,5,2.0);
+sql insert into t1 values(1648791223002,1,2,5,2.0);
+sql insert into t1 values(1648791213001,1,1,6,2.0) (1648791223002,1,1,7,2.0);
+
+$loop_count = 0
+
+loop6:
+sleep 50
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 6 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop6
+endi
+
+if $data22 != 5 then
+ print =====data22=$data22
+ goto loop6
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop6
+endi
+
+if $data32 != 7 then
+ print =====data32=$data32
+ goto loop6
+endi
+
+sql drop database if exists test2;
+sql create database test2 vgroups 4;
+sql use test2;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create stream streams2 trigger at_once into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a session(ts, 5s);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop7:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop7
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop7
+endi
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+sql insert into t2 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop8:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop8
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop8
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+
+$loop_count = 0
+
+loop9:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop9
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop9
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213001,2,2,3,1.0);
+sql insert into t2 values(1648791213002,2,2,3,1.0);
+sql insert into t2 values(1648791213002,1,2,3,1.0);
+
+$loop_count = 0
+
+loop10:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 4 then
+ print =====data01=$data01
+ goto loop10
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop10
+endi
+
+if $data11 != 2 thenloop4
+ print =====data11=$data11
+ goto loop10
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop10
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791223002,3,2,3,1.0);
+sql insert into t2 values(1648791223003,3,2,3,1.0);
+sql insert into t2 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+$loop_count = 0
+
+loop11:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop11
+endi
+
+if $data11 != 4 then
+ print =====data11=$data11
+ goto loop11
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop11
+endi
+
+if $data21 != 4 then
+ print =====data21=$data21
+ goto loop11
+endi
+
+if $data22 != 1 then
+ print =====data22=$data22
+ goto loop11
+endi
+
+if $data31 != 2 then
+ print =====data31=$data31
+ goto loop11
+endi
+
+if $data32 != 2 then
+ print =====data32=$data32
+ goto loop11
+endi
+
+if $data41 != 2 then
+ print =====data41=$data41
+ goto loop11
+endi
+
+if $data42 != 3 then
+ print =====data42=$data42
+ goto loop11
+endi
+
+sql drop database if exists test4;
+sql create database test4 vgroups 4;
+sql use test4;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+sql create table t3 using st tags(2,2,2);
+sql create table t4 using st tags(2,2,2);
+sql create stream streams4 trigger at_once into test.streamt4 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a session(ts, 5s);
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t3 values(1648791213000,2,2,3,1.0);
+sql insert into t4 values(1648791213000,2,2,3,1.0);
+sql insert into t4 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop13:
+sleep 50
+sql select * from test.streamt4 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $rows != 2 then
+ print =====rows=$rows
+ goto loop13
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop13
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop13
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop13
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop13
+endi
+
+sql insert into t4 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791233000,2,2,3,1.0);
+
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop14:
+sleep 50
+sql select * from test.streamt4 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $rows != 3 then
+ print =====rows=$rows
+ goto loop14
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop14
+endi
+
+if $data11 != 3 then
+ print =====data11=$data11
+ goto loop14
+endi
+
+if $data21 != 1 then
+ print =====data21=$data21
+ goto loop14
+endi
+
+system sh/stop_dnodes.sh
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+#goto looptest
\ No newline at end of file
diff --git a/tests/script/tsim/stream/partitionbyColumnState.sim b/tests/script/tsim/stream/partitionbyColumnState.sim
new file mode 100644
index 0000000000000000000000000000000000000000..75d01b17ec6495be9644ce7e7db401a0147b3a52
--- /dev/null
+++ b/tests/script/tsim/stream/partitionbyColumnState.sim
@@ -0,0 +1,279 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+sql drop database if exists test;
+sql create database test vgroups 1;
+sql use test;
+sql create table t1(ts timestamp, a int, b int , c int, d double);
+sql create stream streams0 trigger at_once into streamt as select _wstart c1, count(*) c2, max(a) c3, _group_key(a) c4 from t1 partition by a state_window(b);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+
+$loop_count = 0
+
+loop0:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop0
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop0
+endi
+
+
+sql insert into t1 values(1648791213000,1,1,3,1.0);
+
+$loop_count = 0
+
+loop1:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop1
+endi
+
+sql insert into t1 values(1648791213000,2,1,3,1.0);
+
+$loop_count = 0
+
+loop2:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop2
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop2
+endi
+
+sql insert into t1 values(1648791213000,2,1,3,1.0);
+sql insert into t1 values(1648791213001,2,1,3,1.0);
+sql insert into t1 values(1648791213002,2,1,3,1.0);
+sql insert into t1 values(1648791213002,1,1,3,1.0);
+
+$loop_count = 0
+
+loop3:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop3
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop3
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop3
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,1,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+$loop_count = 0
+
+loop4:
+sleep 300
+sql select * from streamt order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop4
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop4
+endi
+
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop4
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop4
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop4
+endi
+
+if $data22 != 1 then
+ print =====data22=$data22
+ goto loop4
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop4
+endi
+
+if $data32 != 2 then
+ print =====data32=$data32
+ goto loop4
+endi
+
+if $data41 != 1 then
+ print =====data41=$data41
+ goto loop4
+endi
+
+if $data42 != 3 then
+ print =====data42=$data42
+ goto loop4
+endi
+
+sql drop database if exists test1;
+sql create database test1 vgroups 1;
+sql use test1;
+sql create table t1(ts timestamp, a int, b int , c int, d int);
+sql create stream streams1 trigger at_once into streamt1 as select _wstart c1, count(*) c2, max(d) c3, _group_key(a+b) c4 from t1 partition by a+b state_window(c);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t1 values(1648791213000,1,2,1,1);
+sql insert into t1 values(1648791213001,2,1,1,2);
+sql insert into t1 values(1648791213001,1,2,1,3);
+
+$loop_count = 0
+
+loop5:
+sleep 300
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop5
+endi
+
+sql insert into t1 values(1648791223000,1,2,2,4);
+sql insert into t1 values(1648791223001,1,2,2,5);
+sql insert into t1 values(1648791223002,1,2,2,6);
+sql insert into t1 values(1648791213001,1,1,1,7) (1648791223002,1,1,2,8);
+
+$loop_count = 0
+
+loop6:
+sleep 300
+sql select * from streamt1 order by c1, c4, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+if $data01 != 1 then
+ print =====data01=$data01
+ goto loop6
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop6
+endi
+
+if $data11 != 1 then
+ print =====data11=$data11
+ goto loop6
+endi
+
+if $data12 != 7 then
+ print =====data12=$data12
+ goto loop6
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop6
+endi
+
+if $data22 != 5 then
+ print =====data22=$data22
+ goto loop6
+endi
+
+if $data31 != 1 then
+ print =====data31=$data31
+ goto loop6
+endi
+
+if $data32 != 8 then
+ print =====data32=$data32
+ goto loop6
+endi
+
+system sh/stop_dnodes.sh
+
+$loop_all = $loop_all + 1
+print ============loop_all=$loop_all
+
+#goto looptest
diff --git a/tests/script/tsim/sync/create-mnode.sim b/tests/script/tsim/sync/create-mnode.sim
new file mode 100644
index 0000000000000000000000000000000000000000..cfaafc8208e7e10f7b53be76a6eaa94e718efbbd
--- /dev/null
+++ b/tests/script/tsim/sync/create-mnode.sim
@@ -0,0 +1,20 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+system sh/deploy.sh -n dnode3 -i 3
+system sh/deploy.sh -n dnode4 -i 4
+
+system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+system sh/exec.sh -n dnode3 -s start
+system sh/exec.sh -n dnode4 -s start
+
+sql connect
+sql create dnode $hostname port 7200
+sql create dnode $hostname port 7300
+sql create dnode $hostname port 7400
+
+sql create mnode on dnode 2
+sql create mnode on dnode 3
diff --git a/tests/script/tsim/table/hash.sim b/tests/script/tsim/table/hash.sim
new file mode 100644
index 0000000000000000000000000000000000000000..664f86713720e1ae1969027508e2931d23397f08
--- /dev/null
+++ b/tests/script/tsim/table/hash.sim
@@ -0,0 +1,84 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+#=========== prepare
+#sql create database d1 vgroups 2
+sql create database d1 vgroups 2 table_prefix 3 table_suffix 2
+sql select * from information_schema.ins_databases
+print $data(d1)[27] $data(d1)[28]
+if $data(d1)[27] != 3 then
+ return -1
+endi
+if $data(d1)[28] != 2 then
+ return -1
+endi
+
+sql use d1;
+sql create table st (ts timestamp, i int) tags (j int);
+sql create table st_ct_1 using st tags(3) st_ct_2 using st tags(4) st_ct_3 using st tags(5) st_ct_4 using st tags(6) st_ct_5 using st tags(7)
+sql insert into st_ct_1 values(now+1s, 1)
+sql insert into st_ct_1 values(now+2s, 2)
+sql insert into st_ct_1 values(now+3s, 3)
+sql insert into st_ct_2 values(now+1s, 1)
+sql insert into st_ct_2 values(now+2s, 2)
+sql insert into st_ct_2 values(now+3s, 3)
+sql insert into st_ct_3 values(now+1s, 1)
+sql insert into st_ct_3 values(now+2s, 2)
+sql insert into st_ct_3 values(now+3s, 2)
+sql insert into st_ct_4 values(now+1s, 1)
+sql insert into st_ct_4 values(now+2s, 2)
+sql insert into st_ct_4 values(now+3s, 2)
+sql insert into st_ct_5 values(now+1s, 1)
+sql insert into st_ct_5 values(now+2s, 2)
+sql insert into st_ct_5 values(now+3s, 2)
+
+# check query
+sql select * from st
+if $rows != 15 then
+ return -1
+endi
+
+# check table vgroup
+sql select * from information_schema.ins_tables where db_name = 'd1'
+if $data(st_ct_1)[6] != 2 then
+ return -1
+endi
+if $data(st_ct_2)[6] != 2 then
+ return -1
+endi
+if $data(st_ct_3)[6] != 2 then
+ return -1
+endi
+if $data(st_ct_4)[6] != 2 then
+ return -1
+endi
+if $data(st_ct_5)[6] != 2 then
+ return -1
+endi
+
+# check invalid table name
+sql create table c1 using st tags(3)
+sql create table c12 using st tags(3)
+sql create table c123 using st tags(3)
+sql create table c1234 using st tags(3)
+sql create table c12345 using st tags(3)
+sql select * from information_schema.ins_tables where db_name = 'd1'
+if $data(c1)[6] != 2 then
+ return -1
+endi
+if $data(c12)[6] != 3 then
+ return -1
+endi
+if $data(c123)[6] != 2 then
+ return -1
+endi
+if $data(c1234)[6] != 3 then
+ return -1
+endi
+if $data(c12345)[6] != 3 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/user/privilege_sysinfo.sim b/tests/script/tsim/user/privilege_sysinfo.sim
index 25c1a84db699a8cdef5678abaf728f4a93690bde..86f95755d09f7e2f1c0654b94f63f471f9a074d0 100644
--- a/tests/script/tsim/user/privilege_sysinfo.sim
+++ b/tests/script/tsim/user/privilege_sysinfo.sim
@@ -8,7 +8,20 @@ sql create user sysinfo0 pass 'taosdata'
sql create user sysinfo1 pass 'taosdata'
sql alter user sysinfo0 sysinfo 0
sql alter user sysinfo1 sysinfo 1
+
sql create database db
+sql use db
+sql create table db.stb (ts timestamp, i int) tags (t int)
+sql create table db.ctb using db.stb tags (1)
+sql create table db.ntb (ts timestamp, i int)
+sql insert into db.ctb values (now, 1);
+sql insert into db.ntb values (now, 1);
+sql select * from db.stb
+sql select * from db.ctb
+sql select * from db.ntb
+
+sql create database d2
+sql GRANT all ON d2.* to sysinfo0;
print user sysinfo0 login
sql close
@@ -17,11 +30,31 @@ sql connect sysinfo0
print =============== check oper
sql_error create user u1 pass 'u1'
sql_error drop user sysinfo1
-sql_error alter user sysinfo1 pass '1'
sql_error alter user sysinfo0 pass '1'
+sql_error alter user sysinfo0 enable 0
+sql_error alter user sysinfo0 enable 1
+sql_error alter user sysinfo1 pass '1'
+sql_error alter user sysinfo1 enable 1
+sql_error alter user sysinfo1 enable 1
+sql_error GRANT read ON db.* to sysinfo0;
+sql_error GRANT read ON *.* to sysinfo0;
+sql_error REVOKE read ON db.* from sysinfo0;
+sql_error REVOKE read ON *.* from sysinfo0;
+sql_error GRANT write ON db.* to sysinfo0;
+sql_error GRANT write ON *.* to sysinfo0;
+sql_error REVOKE write ON db.* from sysinfo0;
+sql_error REVOKE write ON *.* from sysinfo0;
+sql_error REVOKE write ON *.* from sysinfo0;
sql_error create dnode $hostname port 7200
sql_error drop dnode 1
+sql_error alter dnode 1 'debugFlag 135'
+sql_error alter dnode 1 'dDebugFlag 131'
+sql_error alter dnode 1 'resetlog'
+sql_error alter dnode 1 'monitor' '1'
+sql_error alter dnode 1 'monitor' '0'
+sql_error alter dnode 1 'monitor 1'
+sql_error alter dnode 1 'monitor 0'
sql_error create qnode on dnode 1
sql_error drop qnode on dnode 1
@@ -44,20 +77,106 @@ sql_error create database d1
sql_error drop database db
sql_error use db
sql_error alter database db replica 1;
+sql_error alter database db keep 21
sql_error show db.vgroups
-sql select * from information_schema.ins_stables where db_name = 'db'
-sql select * from information_schema.ins_tables where db_name = 'db'
+
+sql_error create table db.stb1 (ts timestamp, i int) tags (t int)
+sql_error create table db.ctb1 using db.stb1 tags (1)
+sql_error create table db.ntb1 (ts timestamp, i int)
+sql_error insert into db.ctb values (now, 1);
+sql_error insert into db.ntb values (now, 1);
+sql_error select * from db.stb
+sql_error select * from db.ctb
+sql_error select * from db.ntb
+
+sql use d2
+sql create table d2.stb2 (ts timestamp, i int) tags (t int)
+sql create table d2.ctb2 using d2.stb2 tags (1)
+sql create table d2.ntb2 (ts timestamp, i int)
+sql insert into d2.ctb2 values (now, 1);
+sql insert into d2.ntb2 values (now, 1);
+sql select * from d2.stb2
+sql select * from d2.ctb2
+sql select * from d2.ntb2
print =============== check show
-sql select * from information_schema.ins_users
+sql_error show users
sql_error show cluster
-sql select * from information_schema.ins_dnodes
-sql select * from information_schema.ins_mnodes
+sql_error select * from information_schema.ins_dnodes
+sql_error select * from information_schema.ins_mnodes
sql_error show snodes
-sql select * from information_schema.ins_qnodes
+sql_error select * from information_schema.ins_qnodes
+sql_error show dnodes
+sql_error show snodes
+sql_error show qnodes
+sql_error show mnodes
sql_error show bnodes
+sql_error show db.vgroups
+sql_error show db.stables
+sql_error show db.tables
+sql_error show indexes from stb from db
+sql show databases
+sql_error show d2.vgroups
+sql show d2.stables
+sql show d2.tables
+sql show indexes from stb2 from d2
+#sql_error show create database db
+sql_error show create table db.stb;
+sql_error show create table db.ctb;
+sql_error show create table db.ntb;
+sql show streams
+sql show consumers
+sql show topics
+sql show subscriptions
+sql show functions
sql_error show grants
+sql show queries
+sql show connections
+sql show apps
+sql show transactions
+sql_error show create database d2
+sql show create table d2.stb2;
+sql show create table d2.ctb2;
+sql show create table d2.ntb2;
+sql_error show variables;
+sql show local variables;
sql_error show dnode 1 variables;
-sql show variables;
+sql_error show variables;
+
+
+print =============== check information_schema
+sql show databases
+if $rows != 3 then
+ return -1
+endi
+
+sql use information_schema;
+sql_error select * from information_schema.ins_dnodes
+sql_error select * from information_schema.ins_mnodes
+sql_error select * from information_schema.ins_modules
+sql_error select * from information_schema.ins_qnodes
+sql_error select * from information_schema.ins_cluster
+sql select * from information_schema.ins_databases
+sql select * from information_schema.ins_functions
+sql select * from information_schema.ins_indexes
+sql select * from information_schema.ins_stables
+sql select * from information_schema.ins_tables
+sql select * from information_schema.ins_tags
+sql select * from information_schema.ins_users
+sql select * from information_schema.ins_topics
+sql select * from information_schema.ins_subscriptions
+sql select * from information_schema.ins_streams
+sql_error select * from information_schema.ins_grants
+sql_error select * from information_schema.ins_vgroups
+sql_error select * from information_schema.ins_configs
+sql_error select * from information_schema.ins_dnode_variables
+
+print =============== check performance_schema
+sql use performance_schema;
+sql select * from performance_schema.perf_connections
+sql select * from performance_schema.perf_queries
+sql select * from performance_schema.perf_consumers
+sql select * from performance_schema.perf_trans
+sql select * from performance_schema.perf_apps
-system sh/exec.sh -n dnode1 -s stop -x SIGINT
+#system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim
index fcc5b04c907852f87c469a3dc9d32c5ba1295327..11a387ed4dffd7258120154adb9d7874ee0b21b5 100644
--- a/tests/script/tsim/valgrind/checkError6.sim
+++ b/tests/script/tsim/valgrind/checkError6.sim
@@ -67,17 +67,17 @@ sql select diff(tbcol) from tb1 where tbcol > 5 and tbcol < 20 order by ts
sql select first(tbcol), last(tbcol) as b from tb1 where ts <= 1601481840000 interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol order by tgcol
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from tb1 where ts <= 1601481840000 partition by tgcol interval(1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select last_row(*) from tb1 where tbcol > 5 and tbcol < 20
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from tb1 interval(10s, 2s) sliding(10s)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) order by tgcol desc
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from tb1
sql select length("abcd1234"), char_length("abcd1234=-+*") from tb1
sql select tbcol4, length(tbcol4), lower(tbcol4), upper(tbcol4), ltrim(tbcol4), rtrim(tbcol4), concat(tbcol4, tbcol5), concat_ws('_', tbcol4, tbcol5), substr(tbcol4, 1, 4) from tb1
sql select * from tb1 where tbcol not in (1,2,3,null);
sql select * from tb1 where tbcol + 3 <> null;
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from tb1 where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select tbcol5 - tbcol3 from tb1
print =============== step4: stb
@@ -97,8 +97,8 @@ sql select first(tbcol), last(tbcol) as c from stb group by tgcol
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 and tbcol2 is null partition by tgcol interval(1m)
sql select first(tbcol), last(tbcol) as b from stb where ts <= 1601481840000 partition by tgcol interval(1m)
sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), sum(tbcol), stddev(tbcol) from stb where ts <= 1601481840000 partition by tgcol interval(1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0) order by tgcol desc
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 and tgcol = 1 partition by tgcol interval(1m) fill(value, 0,0,0,0,0) order by tgcol desc
sql select last_row(tbcol), stddev(tbcol) from stb where tbcol > 5 and tbcol < 20 group by tgcol
sql select _wstart, _wend, _wduration, _qstart, _qend, count(*) from stb interval(10s, 2s) sliding(10s)
sql select log(tbcol), abs(tbcol), pow(tbcol, 2), sqrt(tbcol), sin(tbcol), cos(tbcol), tan(tbcol), asin(tbcol), acos(tbcol), atan(tbcol), ceil(tbcol), floor(tbcol), round(tbcol), atan(tbcol) from stb
@@ -108,7 +108,7 @@ sql select * from stb where tbcol not in (1,2,3,null);
sql select * from stb where tbcol + 3 <> null;
sql select count(tbcol), avg(tbcol), sum(tbcol), min(tbcol), max(tbcol), first(tbcol), last(tbcol) from stb where tbcol = 1 and tbcol2 = 1 and tbcol3 = 1 partition by tgcol interval(1d)
sql select _wstart, count(*) from tb1 session(ts, 1m)
-sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
sql select tbcol5 - tbcol3 from stb
sql select spread( tbcol2 )/44, spread(tbcol2), 0.204545455 * 44 from stb;
@@ -127,8 +127,8 @@ sql explain analyze select count(*),sum(tbcol) from stb;
sql explain analyze select count(*),sum(tbcol) from stb group by tbcol;
sql explain analyze select * from information_schema.ins_stables;
sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2';
-sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
-sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0)
+sql explain analyze verbose true select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
+sql explain select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0,0,0,0,0)
print =============== step6: in cast
sql select 1+1n;
@@ -158,6 +158,8 @@ print =============== restart
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s start -v
+sleep 1000
+
sql select avg(tbcol) as c from stb
sql select avg(tbcol) as c from stb where ts <= 1601481840000
sql select avg(tbcol) as c from stb where tgcol < 5 and ts <= 1601481840000
diff --git a/tests/system-test/0-others/user_control.py b/tests/system-test/0-others/user_control.py
index 3be59f0adf691f9479cce2c927c9161741bc8130..a20b7b17bccf9a04d52e46d56af2a2d0b0e489fb 100644
--- a/tests/system-test/0-others/user_control.py
+++ b/tests/system-test/0-others/user_control.py
@@ -282,12 +282,12 @@ class TDTestCase:
use.error(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
elif check_priv == PRIVILEGES_WRITE:
use.query(f"use {DBNAME}")
- use.query(f"show {DBNAME}.tables")
+ use.error(f"show {DBNAME}.tables")
use.error(f"select * from {DBNAME}.{CTBNAME}")
use.query(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
elif check_priv is None:
use.error(f"use {DBNAME}")
- # use.error(f"show {DBNAME}.tables")
+ use.error(f"show {DBNAME}.tables")
use.error(f"show tables")
use.error(f"select * from {DBNAME}.{CTBNAME}")
use.error(f"insert into {DBNAME}.{CTBNAME} (ts) values (now())")
diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
index 25e2378f4611aea030011ed29ecce6b9b96cad84..cae4294bc90c16ad3fed032eff610f5b943d789e 100644
--- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py
+++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
@@ -31,7 +31,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
self._conn = conn
def createDb(self, name="test", db_update_tag=0):
@@ -357,7 +357,7 @@ class TDTestCase:
"""
normal tags and cols, one for every elm
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
@@ -365,7 +365,7 @@ class TDTestCase:
"""
check all normal type
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(c0=t_type, t0=t_type)
@@ -379,7 +379,7 @@ class TDTestCase:
please test :
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = f'L{binary_symbols}'
input_sql, stb_name = self.genFullTypeSql(c7=binary_symbols, c8=nchar_symbols, t7=binary_symbols, t8=nchar_symbols)
@@ -390,7 +390,7 @@ class TDTestCase:
test ts list --> ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022"]
# ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
ts_list = ["1626006833639000000", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006822639022", 0]
for ts in ts_list:
input_sql, stb_name = self.genFullTypeSql(ts=ts)
@@ -401,7 +401,7 @@ class TDTestCase:
check id.index in tags
eg: t0=**,id=**,t1=**
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True)
self.resCmp(input_sql, stb_name)
@@ -410,7 +410,7 @@ class TDTestCase:
check id param
eg: id and ID
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True)
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, id_upper_tag=True)
@@ -420,7 +420,7 @@ class TDTestCase:
"""
id not exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
query_sql = f"select tbname from {stb_name}"
@@ -436,10 +436,10 @@ class TDTestCase:
max col count is ??
"""
for input_sql in [self.genLongSql(127, 1)[0], self.genLongSql(1, 4093)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
for input_sql in [self.genLongSql(129, 1)[0], self.genLongSql(1, 4095)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
except SchemalessError as err:
@@ -450,7 +450,7 @@ class TDTestCase:
test illegal id name
mix "~!@#$¥%^&*()-+|[]、「」【】;:《》<>?"
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("~!@#$¥%^&*()-+|[]、「」【】;:《》<>?")
for i in rstr:
stb_name=f"aaa{i}bbb"
@@ -462,7 +462,7 @@ class TDTestCase:
"""
id is start with num
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(tb_name=f"\"1aaabbb\"")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -473,7 +473,7 @@ class TDTestCase:
"""
check now unsupported
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="now")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -484,7 +484,7 @@ class TDTestCase:
"""
check date format ts unsupported
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -495,7 +495,7 @@ class TDTestCase:
"""
check ts format like 16260068336390us19
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -506,7 +506,7 @@ class TDTestCase:
"""
check full type tag value limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for t1 in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(t1=t1)
@@ -602,7 +602,7 @@ class TDTestCase:
"""
check full type col value limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for c1 in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(c1=c1)
@@ -699,7 +699,7 @@ class TDTestCase:
"""
test illegal tag col value
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
input_sql1 = self.genFullTypeSql(t0=i)[0]
@@ -758,7 +758,7 @@ class TDTestCase:
"""
check duplicate Id Tag Col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -792,7 +792,7 @@ class TDTestCase:
"""
case no id when stb exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", c0="f")
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", c0="f")
@@ -805,7 +805,7 @@ class TDTestCase:
"""
check duplicate insert when stb exist
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.LINE.value, TDSmlTimestampType.NANO_SECOND.value)
@@ -816,7 +816,7 @@ class TDTestCase:
"""
check length increase
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
tb_name = tdCom.getLongName(5, "letters")
@@ -833,7 +833,7 @@ class TDTestCase:
* col is added without value when update==0
* col is added with value when update==1
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -850,7 +850,7 @@ class TDTestCase:
"""
check column and tag count add
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", c0="f")
self.resCmp(input_sql, stb_name)
@@ -866,7 +866,7 @@ class TDTestCase:
condition: stb not change
insert two table, keep tag unchange, change col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(t0="f", c0="f", id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -888,7 +888,7 @@ class TDTestCase:
"""
every binary and nchar must be length+2
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
@@ -928,7 +928,7 @@ class TDTestCase:
"""
check nchar length limit
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
input_sql = f'{stb_name},id="{tb_name}",t0=t c0=f 1626006833639000000'
@@ -963,7 +963,7 @@ class TDTestCase:
"""
test batch insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
# tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
@@ -982,7 +982,7 @@ class TDTestCase:
"""
test multi insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = []
stb_name = tdCom.getLongName(8, "letters")
# tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -996,7 +996,7 @@ class TDTestCase:
"""
test batch error insert
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
lines = ["st123456,t1=3i64,t2=4f64,t3=\"t3\" c1=3i64,c3=L\"passit\",c2=false,c4=4f64 1626006833639000000",
f"{stb_name},t2=5f64,t3=L\"ste\" c1=tRue,c2=4i64,c3=\"iam\" 1626056811823316532ns"]
@@ -1068,7 +1068,7 @@ class TDTestCase:
"""
thread input different stb
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genSqlList()[0]
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
tdSql.query(f"show tables;")
@@ -1078,7 +1078,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1095,7 +1095,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, add columes and tags, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1112,7 +1112,7 @@ class TDTestCase:
"""
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1129,7 +1129,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
@@ -1144,7 +1144,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[5]
@@ -1159,7 +1159,7 @@ class TDTestCase:
"""
thread input same stb, different tb, different data, add tag, mul col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_tag_m_col_list = self.genSqlList(stb_name=stb_name)[6]
@@ -1171,7 +1171,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1186,7 +1186,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1205,7 +1205,7 @@ class TDTestCase:
"""
thread input same stb tb, different ts, add tag, mul col
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name)
self.resCmp(input_sql, stb_name)
@@ -1226,7 +1226,7 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
@@ -1241,7 +1241,7 @@ class TDTestCase:
"""
thread input same stb, different tb, data, ts, add col, mul tag
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_a_col_m_tag_list = self.genSqlList(stb_name=stb_name)[11]
diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
index 003abe9d10346f9b7cce1dbdb6f6f0ed73e3ea55..3b01784000b74c1f6bb072f24e8be36e99d37f4f 100644
--- a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
@@ -459,7 +459,7 @@ class TDTestCase:
normal tags and cols, one for every elm
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self.resCmp(input_json, stb_name)
@@ -468,7 +468,7 @@ class TDTestCase:
check all normal type
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_json_list = [self.genFullTypeJson(tag_value=self.genTagValue(t0_value=t_type))[0],
@@ -489,7 +489,7 @@ class TDTestCase:
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = binary_symbols
input_sql1, stb_name1 = self.genFullTypeJson(col_value=self.genTsColValue(value=binary_symbols, t_type="binary", value_type=value_type),
@@ -505,7 +505,7 @@ class TDTestCase:
# ! us级时间戳都为0时,数据库中查询显示,但python接口拿到的结果不显示 .000000的情况请确认,目前修改时间处理代码可以通过
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
ts_list = ["1626006833639000000ns", "1626006833639019us", "1626006833640ms", "1626006834s", "1626006834", 0]
for ts in ts_list:
if "s" in str(ts):
@@ -571,7 +571,7 @@ class TDTestCase:
eg: t0=**,id=**,t1=**
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_change_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
@@ -581,7 +581,7 @@ class TDTestCase:
eg: id and ID
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_upper_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
input_json, stb_name = self.genFullTypeJson(id_mixul_tag=True, value_type=value_type)
@@ -594,7 +594,7 @@ class TDTestCase:
id not exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(id_noexist_tag=True, value_type=value_type)
self.resCmp(input_json, stb_name)
query_sql = f"select tbname from {stb_name}"
@@ -610,10 +610,10 @@ class TDTestCase:
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
for input_json in [self.genLongJson(128, value_type)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
for input_json in [self.genLongJson(129, value_type)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
except SchemalessError as err:
@@ -625,7 +625,7 @@ class TDTestCase:
mix "`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("`~!@#$¥%^&*()-+={}|[]、「」【】\:;《》<>?")
for i in rstr:
input_json = self.genFullTypeJson(tb_name=f'aa{i}bb', value_type=value_type)[0]
@@ -639,7 +639,7 @@ class TDTestCase:
id is start with num
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(tb_name="1aaabbb", value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -651,7 +651,7 @@ class TDTestCase:
check now unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="now", t_type="ns", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -663,7 +663,7 @@ class TDTestCase:
check date format ts unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="2021-07-21\ 19:01:46.920", t_type="ns", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -675,7 +675,7 @@ class TDTestCase:
check ts format like 16260068336390us19
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(ts_value=self.genTsColValue(value="16260068336390us19", t_type="us", value_type=value_type))[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -690,7 +690,7 @@ class TDTestCase:
length of stb_name tb_name <= 192
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tdSql.execute('reset query cache')
stb_name_192 = tdCom.getLongName(len=192, mode="letters")
tb_name_192 = tdCom.getLongName(len=192, mode="letters")
@@ -715,7 +715,7 @@ class TDTestCase:
check tag name limit <= 62
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tag_name = tdCom.getLongName(61, "letters")
tag_name = f't{tag_name}'
stb_name = tdCom.getLongName(7, "letters")
@@ -733,7 +733,7 @@ class TDTestCase:
check full type tag value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for t1 in [-127, 127]:
input_json, stb_name = self.genFullTypeJson(tag_value=self.genTagValue(t1_value=t1, value_type=value_type))
@@ -854,12 +854,12 @@ class TDTestCase:
check full type col value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for value in [-128, 127]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-129, 128]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="tinyint"))[0]
try:
@@ -868,11 +868,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
# i16
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-32768]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-32769, 32768]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="smallint"))[0]
try:
@@ -882,11 +882,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-2147483648]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int", value_type=value_type))
self.resCmp(input_json, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-2147483649, 2147483648]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="int"))[0]
try:
@@ -896,12 +896,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-9223372036854775808]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint", value_type=value_type))
self.resCmp(input_json, stb_name)
# ! bug
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# for value in [-9223372036854775809, 9223372036854775808]:
# print(value)
# input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="bigint"))[0]
@@ -913,12 +913,12 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# f32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-3.4028234663852885981170418348451692544*(10**38), 3.4028234663852885981170418348451692544*(10**38)]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float", value_type=value_type))
self.resCmp(input_json, stb_name)
# * limit set to 4028234664*(10**38)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-3.4028234664*(10**38), 3.4028234664*(10**38)]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="float"))[0]
try:
@@ -928,12 +928,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308), -1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)]:
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))
self.resCmp(input_json, stb_name)
# * limit set to 1.797693134862316*(10**308)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [-1.797693134862316*(10**308), -1.797693134862316*(10**308)]:
input_json = self.genFullTypeJson(col_value=self.genTsColValue(value=value, t_type="double", value_type=value_type))[0]
try:
@@ -944,12 +944,12 @@ class TDTestCase:
# if value_type == "obj":
# # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16374, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(16375, "letters"), 'type': 'binary'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# try:
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -959,12 +959,12 @@ class TDTestCase:
# # nchar
# # * legal nchar could not be larger than 16374/4
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4093, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_json = {"metric": stb_name, "timestamp": {'value': 1626006833639000000, 'type': 'ns'}, "value": {'value': tdCom.getLongName(4094, "letters"), 'type': 'nchar'}, "tags": {"t0": {'value': True, 'type': 'bool'}}}
# try:
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -973,14 +973,14 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# elif value_type == "default":
# # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16374, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(4093, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# if tdSql.getVariable("defaultJSONStrType")[0].lower() == "binary":
# input_json = {"metric": stb_name, "timestamp": 1626006834, "value": tdCom.getLongName(16375, "letters"), "tags": {"t0": {'value': True, 'type': 'bool'}}}
# elif tdSql.getVariable("defaultJSONStrType")[0].lower() == "nchar":
@@ -997,7 +997,7 @@ class TDTestCase:
test illegal tag col value
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
try:
@@ -1046,7 +1046,7 @@ class TDTestCase:
check duplicate Id Tag Col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(id_double_tag=True, value_type=value_type)[0]
print(input_json)
try:
@@ -1068,7 +1068,7 @@ class TDTestCase:
case no id when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(tb_name="sub_table_0123456", col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
self.resCmp(input_json, stb_name)
input_json, stb_name = self.genFullTypeJson(stb_name=stb_name, id_noexist_tag=True, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
@@ -1081,7 +1081,7 @@ class TDTestCase:
check duplicate insert when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self.resCmp(input_json, stb_name)
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1091,7 +1091,7 @@ class TDTestCase:
"""
check length increase
"""
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(value_type=value_type)
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
self.resCmp(input_json, stb_name)
@@ -1105,7 +1105,7 @@ class TDTestCase:
check length increase
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = "test_crash"
input_json = self.genFullTypeJson(stb_name=stb_name)[0]
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1128,7 +1128,7 @@ class TDTestCase:
* col is added with value when update==1
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -1154,7 +1154,7 @@ class TDTestCase:
check tag count add
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1171,7 +1171,7 @@ class TDTestCase:
insert two table, keep tag unchange, change col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value=True, t_type="bool", value_type=value_type), tag_value=self.genTagValue(t0_value=True, value_type=value_type), id_noexist_tag=True)
self.resCmp(input_json, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -1194,7 +1194,7 @@ class TDTestCase:
every binary and nchar must be length+2
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
tag_value = {"t0": {"value": True, "type": "bool"}}
@@ -1240,7 +1240,7 @@ class TDTestCase:
check nchar length limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
tb_name = f'{stb_name}_1'
tag_value = {"t0": True}
@@ -1284,7 +1284,7 @@ class TDTestCase:
test batch insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = "stb_name"
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": 1, "type": "bigint"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
@@ -1319,7 +1319,7 @@ class TDTestCase:
test multi insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = list()
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -1335,7 +1335,7 @@ class TDTestCase:
test batch error insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = [{"metric": "st123456", "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": {"value": "tt", "type": "bool"}, "tags": {"t1": {"value": 3, "type": "bigint"}, "t2": {"value": 4, "type": "double"}, "t3": {"value": "t3", "type": "binary"}}},
{"metric": "st123456", "timestamp": {"value": 1626006933641000000, "type": "ns"}, "value": {"value": 9, "type": "bigint"}, "tags": {"t1": {"value": 4, "type": "bigint"}, "t3": {"value": "t4", "type": "binary"}, "t2": {"value": 5, "type": "double"}, "t4": {"value": 5, "type": "double"}}}]
try:
@@ -1349,7 +1349,7 @@ class TDTestCase:
test multi cols insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(c_multi_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1362,7 +1362,7 @@ class TDTestCase:
test blank col insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(c_blank_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1375,7 +1375,7 @@ class TDTestCase:
test blank tag insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(t_blank_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1388,7 +1388,7 @@ class TDTestCase:
check nchar ---> chinese
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(chinese_tag=True)
self.resCmp(input_json, stb_name)
@@ -1397,7 +1397,7 @@ class TDTestCase:
multi_field
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(multi_field_tag=True, value_type=value_type)[0]
try:
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
@@ -1407,7 +1407,7 @@ class TDTestCase:
def spellCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_json_list = [{"metric": f'{stb_name}_1', "timestamp": {"value": 1626006833639000000, "type": "Ns"}, "value": {"value": 1, "type": "Bigint"}, "tags": {"t1": {"value": 127, "type": "tinYint"}}},
{"metric": f'{stb_name}_2', "timestamp": {"value": 1626006833639000001, "type": "nS"}, "value": {"value": 32767, "type": "smallInt"}, "tags": {"t1": {"value": 32767, "type": "smallInt"}}},
@@ -1426,7 +1426,7 @@ class TDTestCase:
def tbnameTagsColsNameCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = {'metric': 'rFa$sta', 'timestamp': {'value': 1626006834, 'type': 's'}, 'value': {'value': True, 'type': 'bool'}, 'tags': {'Tt!0': {'value': False, 'type': 'bool'}, 'tT@1': {'value': 127, 'type': 'tinyint'}, 't@2': {'value': 32767, 'type': 'smallint'}, 't$3': {'value': 2147483647, 'type': 'int'}, 't%4': {'value': 9223372036854775807, 'type': 'bigint'}, 't^5': {'value': 11.12345027923584, 'type': 'float'}, 't&6': {'value': 22.123456789, 'type': 'double'}, 't*7': {'value': 'binaryTagValue', 'type': 'binary'}, 't!@#$%^&*()_+[];:<>?,9': {'value': 'ncharTagValue', 'type': 'nchar'}, 'id': 'rFas$ta_1'}}
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
query_sql = 'select * from `rFa$sta`'
@@ -1441,7 +1441,7 @@ class TDTestCase:
metric value "." trans to "_"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genFullTypeJson(point_trans_tag=True, value_type=value_type)[0]
self._conn.schemaless_insert([json.dumps(input_json)], TDSmlProtocolType.JSON.value, None)
tdSql.execute("drop table `.point.trans.test`")
@@ -1509,7 +1509,7 @@ class TDTestCase:
thread input different stb
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json = self.genSqlList(value_type=value_type)[0]
self.multiThreadRun(self.genMultiThreadSeq(input_json))
tdSql.query(f"show tables;")
@@ -1520,7 +1520,7 @@ class TDTestCase:
thread input same stb tb, different data, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1538,7 +1538,7 @@ class TDTestCase:
thread input same stb tb, different data, add columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1556,7 +1556,7 @@ class TDTestCase:
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
@@ -1574,7 +1574,7 @@ class TDTestCase:
thread input same stb, different tb, different data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[4]
@@ -1587,7 +1587,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
s_stb_d_tb_m_tag_list = [({"metric": stb_name, "timestamp": {"value": 1626006833639000000, "type": "ns"}, "value": "omfdhyom", "tags": {"t0": {"value": False, "type": "bool"}, "t1": {"value": 127, "type": "tinyint"}, "t2": {"value": 32767, "type": "smallint"}, "t3": {"value": 2147483647, "type": "int"}, "t4": {"value": 9223372036854775807, "type": "bigint"}, "t5": {"value": 11.12345, "type": "float"}, "t6": {"value": 22.123456789, "type": "double"}}}, 'yzwswz'),
@@ -1605,7 +1605,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[6]
@@ -1618,7 +1618,7 @@ class TDTestCase:
thread input same stb tb, different ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1638,7 +1638,7 @@ class TDTestCase:
thread input same stb tb, different ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1660,7 +1660,7 @@ class TDTestCase:
thread input same stb tb, different ts, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_json, stb_name = self.genFullTypeJson(tb_name=tb_name, col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
@@ -1683,7 +1683,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary", value_type=value_type))
self.resCmp(input_json, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name, value_type=value_type)[10]
@@ -1696,7 +1696,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_json, stb_name = self.genFullTypeJson(col_value=self.genTsColValue(value="binaryTagValue", t_type="binary"))
self.resCmp(input_json, stb_name)
s_stb_d_tb_d_ts_m_tag_list = [({'metric': stb_name, 'timestamp': {'value': 0, 'type': 'ns'}, 'value': 'pjndapjb', 'tags': {'t0': {'value': False, 'type': 'bool'}, 't1': {'value': 127, 'type': 'tinyint'}, 't2': {'value': 32767, 'type': 'smallint'}, 't3': {'value': 2147483647, 'type': 'int'}, 't4': {"value": 9223372036854775807, "type": "bigint"}, 't5': {'value': 11.12345027923584, 'type': 'float'}, 't6': {'value': 22.123456789, 'type': 'double'}}}, 'punftb'),
diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
index 3c47a65746b89c96b77408b6c910c88a8703e147..209cfb724e460207493dc2ca1ab0dd3522eb333b 100644
--- a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
@@ -30,7 +30,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
self._conn = conn
self.smlChildTableName_value = "id"
@@ -351,7 +351,7 @@ class TDTestCase:
normal tags and cols, one for every elm
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -360,7 +360,7 @@ class TDTestCase:
check all normal type
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
full_type_list = ["f", "F", "false", "False", "t", "T", "true", "True"]
for t_type in full_type_list:
input_sql, stb_name = self.genFullTypeSql(t0=t_type, protocol=protocol)
@@ -375,7 +375,7 @@ class TDTestCase:
binary_symbols = '\"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"\'\'"\"'
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
binary_symbols = '"abcd`~!@#$%^&*()_-{[}]|:;<.>?lfjal"'
nchar_symbols = f'L{binary_symbols}'
input_sql1, stb_name1 = self.genFullTypeSql(value=binary_symbols, t7=binary_symbols, t8=nchar_symbols, protocol=protocol)
@@ -388,7 +388,7 @@ class TDTestCase:
test ts list --> ["1626006833640ms", "1626006834s", "1626006822639022"]
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
self.resCmp(input_sql, stb_name, ts_type=TDSmlTimestampType.MILLI_SECOND.value)
input_sql, stb_name = self.genFullTypeSql(ts=1626006833640)
@@ -407,7 +407,7 @@ class TDTestCase:
def openTstbTelnetTsCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = f'{tdCom.getLongName(len=10, mode="letters")} 0 127 t0=127 t1=32767I16 t2=2147483647I32 t3=9223372036854775807 t4=11.12345027923584F32 t5=22.123456789F64'
stb_name = input_sql.split(" ")[0]
self.resCmp(input_sql, stb_name, ts=0)
@@ -431,7 +431,7 @@ class TDTestCase:
eg: t0=**,id=**,t1=**
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_change_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -441,7 +441,7 @@ class TDTestCase:
eg: id and ID
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_upper_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
input_sql, stb_name = self.genFullTypeSql(id_mixul_tag=True, protocol=protocol)
@@ -454,7 +454,7 @@ class TDTestCase:
id not exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(id_noexist_tag=True, protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
query_sql = f"select tbname from {stb_name}"
@@ -470,10 +470,10 @@ class TDTestCase:
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
for input_sql in [self.genLongSql(128)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
for input_sql in [self.genLongSql(129)[0]]:
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
raise Exception("should not reach here")
@@ -486,7 +486,7 @@ class TDTestCase:
mix "`~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
rstr = list("~!@#$¥%^&*()-+{}|[]、「」【】:;《》<>?")
for i in rstr:
input_sql, stb_name = self.genFullTypeSql(tb_name=f"\"aaa{i}bbb\"", protocol=protocol)
@@ -498,7 +498,7 @@ class TDTestCase:
id is start with num
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="1aaabbb", protocol=protocol)
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -507,7 +507,7 @@ class TDTestCase:
check now unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="now")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -520,7 +520,7 @@ class TDTestCase:
check date format ts unsupported
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="2021-07-21\ 19:01:46.920")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -533,7 +533,7 @@ class TDTestCase:
check ts format like 16260068336390us19
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(ts="16260068336390us19")[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -551,7 +551,7 @@ class TDTestCase:
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
stb_name_192 = tdCom.getLongName(len=192, mode="letters")
tb_name_192 = tdCom.getLongName(len=192, mode="letters")
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name_192, tb_name=tb_name_192)
self.resCmp(input_sql, stb_name)
tdSql.query(f'select * from {stb_name}')
@@ -581,7 +581,7 @@ class TDTestCase:
check tag name limit <= 62
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tag_name = tdCom.getLongName(61, "letters")
tag_name = f'T{tag_name}'
stb_name = tdCom.getLongName(7, "letters")
@@ -599,7 +599,7 @@ class TDTestCase:
check full type tag value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# nchar
# * legal nchar could not be larger than 16374/4
stb_name = tdCom.getLongName(7, "letters")
@@ -618,12 +618,12 @@ class TDTestCase:
check full type col value limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# i8
for value in ["-128i8", "127i8"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-129i8", "128i8"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -632,11 +632,11 @@ class TDTestCase:
except SchemalessError as err:
tdSql.checkNotEqual(err.errno, 0)
# i16
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-32768i16"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-32769i16", "32768i16"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -646,11 +646,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-2147483648i32"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-2147483649i32", "2147483648i32"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -660,11 +660,11 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# i64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-9223372036854775808i64"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in ["-9223372036854775809i64", "9223372036854775808i64"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -674,12 +674,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f32
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f"{-3.4028234663852885981170418348451692544*(10**38)}f32", f"{3.4028234663852885981170418348451692544*(10**38)}f32"]:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
# * limit set to 4028234664*(10**38)
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f"{-3.4028234664*(10**38)}f32", f"{3.4028234664*(10**38)}f32"]:
input_sql = self.genFullTypeSql(value=value)[0]
try:
@@ -689,12 +689,12 @@ class TDTestCase:
tdSql.checkNotEqual(err.errno, 0)
# f64
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
for value in [f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64', f'{-1.79769313486231570814527423731704356798070567525844996598917476803157260780*(10**308)}f64']:
input_sql, stb_name = self.genFullTypeSql(value=value)
self.resCmp(input_sql, stb_name)
# # * limit set to 1.797693134862316*(10**308)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# for value in [f'{-1.797693134862316*(10**308)}f64', f'{-1.797693134862316*(10**308)}f64']:
# input_sql = self.genFullTypeSql(value=value)[0]
# try:
@@ -704,12 +704,12 @@ class TDTestCase:
# tdSql.checkNotEqual(err.errno, 0)
# # # binary
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16374, "letters")}" t0=t'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_sql = f'{stb_name} 1626006833640 "{tdCom.getLongName(16375, "letters")}" t0=t'
# try:
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -719,12 +719,12 @@ class TDTestCase:
# # nchar
# # * legal nchar could not be larger than 16374/4
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# stb_name = tdCom.getLongName(7, "letters")
# input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4093, "letters")}" t0=t'
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
- # tdCom.cleanTb()
+ # tdCom.cleanTb(dbname="test")
# input_sql = f'{stb_name} 1626006833640 L"{tdCom.getLongName(4094, "letters")}" t0=t'
# try:
# self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -738,7 +738,7 @@ class TDTestCase:
test illegal tag col value
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# bool
for i in ["TrUe", "tRue", "trUe", "truE", "FalsE", "fAlse", "faLse", "falSe", "falsE"]:
input_sql1, stb_name = self.genFullTypeSql(t0=i)
@@ -774,7 +774,7 @@ class TDTestCase:
check blank case
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
# input_sql_list = [f'{tdCom.getLongName(7, "letters")} 1626006833640 "abc aaa" t0=t',
# f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0="abaaa"',
# f'{tdCom.getLongName(7, "letters")} 1626006833640 t t0=L"abaaa"',
@@ -792,7 +792,7 @@ class TDTestCase:
check duplicate Id Tag Col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql_id = self.genFullTypeSql(id_double_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql_id], TDSmlProtocolType.TELNET.value, None)
@@ -815,7 +815,7 @@ class TDTestCase:
case no id when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(tb_name="sub_table_0123456", t0="f", value="f")
self.resCmp(input_sql, stb_name)
input_sql, stb_name = self.genFullTypeSql(stb_name=stb_name, id_noexist_tag=True, t0="f", value="f")
@@ -828,7 +828,7 @@ class TDTestCase:
check duplicate insert when stb exist
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -840,7 +840,7 @@ class TDTestCase:
check length increase
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql()
self.resCmp(input_sql, stb_name)
tb_name = tdCom.getLongName(5, "letters")
@@ -858,7 +858,7 @@ class TDTestCase:
* col is added with value when update==1
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
for db_update_tag in [0, 1]:
if db_update_tag == 1 :
@@ -885,7 +885,7 @@ class TDTestCase:
check tag count add
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, t0="f", value="f")
self.resCmp(input_sql, stb_name)
@@ -902,7 +902,7 @@ class TDTestCase:
insert two table, keep tag unchange, change col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(t0="f", value="f", id_noexist_tag=True)
self.resCmp(input_sql, stb_name)
tb_name1 = self.getNoIdTbName(stb_name)
@@ -925,7 +925,7 @@ class TDTestCase:
check nchar length limit
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(7, "letters")
input_sql = f'{stb_name} 1626006833640 f t2={tdCom.getLongName(1, "letters")}'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -949,7 +949,7 @@ class TDTestCase:
test batch insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 bigint)')
@@ -976,7 +976,7 @@ class TDTestCase:
test multi insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
sql_list = []
stb_name = tdCom.getLongName(8, "letters")
tdSql.execute(f'create stable {stb_name}(ts timestamp, f int) tags(t1 nchar(10))')
@@ -992,7 +992,7 @@ class TDTestCase:
test batch error insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
lines = ["st123456 1626006833640 3i 64 t1=3i64 t2=4f64 t3=\"t3\"",
f"{stb_name} 1626056811823316532ns tRue t2=5f64 t3=L\"ste\""]
@@ -1007,7 +1007,7 @@ class TDTestCase:
test multi cols insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(c_multi_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1020,7 +1020,7 @@ class TDTestCase:
test blank col insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(c_blank_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1033,7 +1033,7 @@ class TDTestCase:
test blank tag insert
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(t_blank_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1046,7 +1046,7 @@ class TDTestCase:
check nchar ---> chinese
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(chinese_tag=True)
self.resCmp(input_sql, stb_name)
@@ -1055,7 +1055,7 @@ class TDTestCase:
multi_field
'''
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(multi_field_tag=True)[0]
try:
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1065,7 +1065,7 @@ class TDTestCase:
def spellCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_sql_list = [f'{stb_name}_1 1626006833640 127I8 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
f'{stb_name}_2 1626006833640 32767I16 t0=127I8 t1=32767I16 t2=2147483647I32 t3=9223372036854775807I64 t4=11.12345027923584F32 t5=22.123456789F64',
@@ -1086,7 +1086,7 @@ class TDTestCase:
metric value "." trans to "_"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(point_trans_tag=True, protocol=protocol)[0]
if protocol == 'telnet-tcp':
stb_name = f'`{input_sql.split(" ")[1]}`'
@@ -1097,7 +1097,7 @@ class TDTestCase:
def defaultTypeCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
stb_name = tdCom.getLongName(8, "letters")
input_sql_list = [f'{stb_name}_1 1626006833640 9223372036854775807 t0=f t1=127 t2=32767i16 t3=2147483647i32 t4=9223372036854775807 t5=11.12345f32 t6=22.123456789f64 t7="vozamcts" t8=L"ncharTagValue"', \
f'{stb_name}_2 1626006833641 22.123456789 t0=f t1=127i8 t2=32767I16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789 t7="vozamcts" t8=L"ncharTagValue"', \
@@ -1110,7 +1110,7 @@ class TDTestCase:
def tbnameTagsColsNameCheckCase(self):
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
if self.smlChildTableName_value == "ID":
input_sql = 'rFa$sta 1626006834 9223372036854775807 id=rFas$ta_1 Tt!0=true tT@1=127Ii8 t#2=32767i16 "t$3"=2147483647i32 t%4=9223372036854775807i64 t^5=11.12345f32 t&6=22.123456789f64 t*7=\"ddzhiksj\" t!@#$%^&*()_+[];:<>?,9=L\"ncharTagValue\"'
self._conn.schemaless_insert([input_sql], TDSmlProtocolType.TELNET.value, None)
@@ -1135,7 +1135,7 @@ class TDTestCase:
stb = "put"
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genFullTypeSql(tcp_keyword_tag=True, protocol=protocol)[0]
stb_name = f'`{input_sql.split(" ")[1]}`'
self.resCmp(input_sql, stb_name, protocol=protocol)
@@ -1204,7 +1204,7 @@ class TDTestCase:
thread input different stb
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql = self.genSqlList()[0]
print(input_sql)
self.multiThreadRun(self.genMultiThreadSeq(input_sql))
@@ -1216,7 +1216,7 @@ class TDTestCase:
thread input same stb tb, different data, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1235,7 +1235,7 @@ class TDTestCase:
thread input same stb tb, different data, add columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1254,7 +1254,7 @@ class TDTestCase:
thread input same stb tb, different data, minus columes and tags, result keep first data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1273,7 +1273,7 @@ class TDTestCase:
thread input same stb, different tb, different data
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_list = self.genSqlList(stb_name=stb_name)[4]
@@ -1286,7 +1286,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_m_tag_list = [(f'{stb_name} 1626006833640 "omfdhyom" t0=F t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'yzwswz'), \
@@ -1303,7 +1303,7 @@ class TDTestCase:
thread input same stb, different tb, different data, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_a_tag_list = self.genSqlList(stb_name=stb_name)[6]
@@ -1316,7 +1316,7 @@ class TDTestCase:
thread input same stb tb, different ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1336,7 +1336,7 @@ class TDTestCase:
thread input same stb tb, different ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1354,7 +1354,7 @@ class TDTestCase:
thread input same stb tb, different ts, add tag, mul col
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
tb_name = tdCom.getLongName(7, "letters")
input_sql, stb_name = self.genFullTypeSql(tb_name=tb_name, value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
@@ -1377,7 +1377,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_list = self.genSqlList(stb_name=stb_name)[10]
@@ -1390,7 +1390,7 @@ class TDTestCase:
thread input same stb, different tb, data, ts, add col, mul tag
"""
tdLog.info(f'{sys._getframe().f_code.co_name}() function is running')
- tdCom.cleanTb()
+ tdCom.cleanTb(dbname="test")
input_sql, stb_name = self.genFullTypeSql(value="\"binaryTagValue\"")
self.resCmp(input_sql, stb_name)
s_stb_d_tb_d_ts_m_tag_list = [(f'{stb_name} 0 "mnpmtzul" t0=False t1=127i8 t2=32767i16 t3=2147483647i32 t4=9223372036854775807i64 t5=11.12345f32 t6=22.123456789f64', 'pcppkg'), \
diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py
index fd5d6ea1cf1d42623443cbe13eb60aac6b9e80ac..946453bb23137d6ebbb67f8d588a67e054f5c2f1 100644
--- a/tests/system-test/2-query/function_diff.py
+++ b/tests/system-test/2-query/function_diff.py
@@ -193,43 +193,38 @@ class TDTestCase:
# case17: only support normal table join
case17 = {
- "col": "t1.c1",
- "table_expr": "t1, t2",
- "condition": "where t1.ts=t2.ts"
+ "col": "table1.c1 ",
+ "table_expr": "db.t1 as table1, db.t2 as table2",
+ "condition": "where table1.ts=table2.ts"
}
self.checkdiff(**case17)
- # case18~19: with group by
- # case18 = {
- # "table_expr": "db.t1",
- # "condition": "group by c6"
- # }
- # self.checkdiff(**case18)
+ # case18~19: with group by , function diff not support group by
+
case19 = {
- "table_expr": "db.stb1",
+ "table_expr": "db.stb1 where tbname =='t0' ",
"condition": "partition by tbname order by tbname" # partition by tbname
}
self.checkdiff(**case19)
- # # case20~21: with order by
- # case20 = {"condition": "order by ts"}
- # self.checkdiff(**case20)
+ # case20~21: with order by , Not a single-group group function
- # # case22: with union
+ # case22: with union
# case22 = {
- # "condition": "union all select diff(c1) from t2"
+ # "condition": "union all select diff(c1) from db.t2 "
# }
# self.checkdiff(**case22)
+ tdSql.query("select count(c1) from db.t1 union all select count(c1) from db.t2")
# case23: with limit/slimit
case23 = {
"condition": "limit 1"
}
self.checkdiff(**case23)
- # case24 = {
- # "table_expr": "db.stb1",
- # "condition": "group by tbname slimit 1 soffset 1"
- # }
- # self.checkdiff(**case24)
+ case24 = {
+ "table_expr": "db.stb1",
+ "condition": "partition by tbname order by tbname slimit 1 soffset 1"
+ }
+ self.checkdiff(**case24)
pass
@@ -284,9 +279,9 @@ class TDTestCase:
tdSql.query(self.diff_query_form(alias=", c2")) # mix with other 1
# tdSql.error(self.diff_query_form(table_expr="db.stb1")) # select stb directly
stb_join = {
- "col": "stb1.c1",
- "table_expr": "stb1, stb2",
- "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
+ "col": "stable1.c1",
+ "table_expr": "db.stb1 as stable1, db.stb2 as stable2",
+ "condition": "where stable1.ts=stable2.ts and stable1.st1=stable2.st2 order by stable1.ts"
}
tdSql.query(self.diff_query_form(**stb_join)) # stb join
interval_sql = {
@@ -315,20 +310,20 @@ class TDTestCase:
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into db.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into db.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
- f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ f"insert into db.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
pass
@@ -349,8 +344,8 @@ class TDTestCase:
"create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
)
for i in range(tbnum):
- tdSql.execute(f"create table t{i} using db.stb1 tags({i})")
- tdSql.execute(f"create table tt{i} using db.stb2 tags({i})")
+ tdSql.execute(f"create table db.t{i} using db.stb1 tags({i})")
+ tdSql.execute(f"create table db.tt{i} using db.stb2 tags({i})")
pass
def diff_support_stable(self):
@@ -398,8 +393,8 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime - 5})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime + 5})")
self.diff_current_query()
self.diff_error_query()
@@ -430,9 +425,9 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into db.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
self.diff_current_query()
self.diff_error_query()
diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py
index 934ba9e161c8787dc36cfdafc15044eb9e0ec425..5550519e05249de13d1267dd2a8f5bc1b10fae6d 100644
--- a/tests/system-test/2-query/interp.py
+++ b/tests/system-test/2-query/interp.py
@@ -551,7 +551,57 @@ class TDTestCase:
tdSql.checkData(0, 0, 15)
tdSql.checkData(1, 0, 15)
- tdLog.printNoPrefix("==========step9:test error cases")
+ tdLog.printNoPrefix("==========step9:test multi-interp cases")
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(null)")
+ tdSql.checkRows(5)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, None)
+ tdSql.checkData(1, i, None)
+ tdSql.checkData(2, i, 15)
+ tdSql.checkData(3, i, None)
+ tdSql.checkData(4, i, None)
+
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1)")
+ tdSql.checkRows(5)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, 1)
+ tdSql.checkData(1, i, 1)
+ tdSql.checkData(2, i, 15)
+ tdSql.checkData(3, i, 1)
+ tdSql.checkData(4, i, 1)
+
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(prev)")
+ tdSql.checkRows(5)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, 5)
+ tdSql.checkData(1, i, 5)
+ tdSql.checkData(2, i, 15)
+ tdSql.checkData(3, i, 15)
+ tdSql.checkData(4, i, 15)
+
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(next)")
+ tdSql.checkRows(3)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, 15)
+ tdSql.checkData(1, i, 15)
+ tdSql.checkData(2, i, 15)
+
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkCols(4)
+
+ for i in range (tdSql.queryCols):
+ tdSql.checkData(0, i, 15)
+
+ tdLog.printNoPrefix("==========step10:test error cases")
tdSql.error(f"select interp(c0) from {dbname}.{tbname}")
tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05')")
diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py
index 5533cb840e29d2e0b109687f2aa3189d2c26a381..5c8fe0f0f96e6c28aa1ef70240b3ef4d5b0598fa 100644
--- a/tests/system-test/2-query/join2.py
+++ b/tests/system-test/2-query/join2.py
@@ -52,12 +52,12 @@ class TDTestCase:
return query_condition
- def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
+ def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False, alias_tb1="tb1", alias_tb2="tb2"):
table_reference = tb_list[0]
join_condition = table_reference
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
- join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
+ join_condition += f" as {alias_tb1} {join} {tb_list[i+1]} as {alias_tb2} on {alias_tb1}.{filter}={alias_tb2}.{filter}"
return join_condition
@@ -123,28 +123,28 @@ class TDTestCase:
sqls = []
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
- for join_tb in join_tblist:
- select_claus_list = self.__query_condition(join_tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition( col=select_claus)
- where_claus = self.__where_condition( query_conditon=select_claus )
- having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
- sqls.extend(
- (
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist), having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist)),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, ),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), having_claus ),
- # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
- self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True) ),
- )
+ alias_tb = "tb1"
+ select_claus_list = self.__query_condition(alias_tb)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition( col=select_claus)
+ where_claus = self.__where_condition( query_conditon=select_claus )
+ having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null" )
+ sqls.extend(
+ (
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist), where_claus, group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus, having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), where_claus),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist), group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb), having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb)),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, group_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, having_claus),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), where_claus, ),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True), having_claus ),
+ # self.__gen_sql(select_claus, self.__join_condition(join_tblist, INNER=True), group_claus ),
+ self.__gen_sql(select_claus, self.__join_condition(join_tblist, alias_tb1=alias_tb, INNER=True) ),
)
+ )
return list(filter(None, sqls))
def __join_check(self,):
@@ -341,10 +341,8 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute(f"flush database db")
- tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py
index 856d7647477f8693e0f20f6950e1ab810c47b4d4..d9715579aed4878c1cf17642824718d412a77511 100644
--- a/tests/system-test/2-query/json_tag.py
+++ b/tests/system-test/2-query/json_tag.py
@@ -338,7 +338,7 @@ class TDTestCase:
tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 1 and 30")
tdSql.checkRows(3)
tdSql.query(f"select * from {dbname}.jsons1 where jtag->'tag1' between 'femail' and 'beijing'")
- tdSql.checkRows(2)
+ tdSql.checkRows(0)
# test with tbname/normal column
tdSql.query(f"select * from {dbname}.jsons1 where tbname = 'jsons1_1'")
diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py
index f65744a0b7666a2fc9d0e60332fc20fb53ca6886..5d435b068fb12959fd2bdc6f02968b2a7ffe7c9d 100644
--- a/tests/system-test/2-query/last_row.py
+++ b/tests/system-test/2-query/last_row.py
@@ -638,13 +638,13 @@ class TDTestCase:
tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , c1 ,t1 from {dbname}.stb1)")
tdSql.checkData(0,1,None)
- tdSql.query(f"select ts , last_row(c1) ,c1 from (select ts , max(c1) c1 ,t1 from {dbname}.stb1 where ts >now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts now -1h and ts 0:
- elem = math.log(elem)
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
- def check_result_auto_log2(self ,origin_query , log_query):
-
- log_result = tdSql.getResult(log_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = math.log(elem,2)
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
- def check_result_auto_log1(self ,origin_query , log_query):
- log_result = tdSql.getResult(log_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = None
- elif elem <=0:
- elem = None
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(log_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
- def check_result_auto_log__10(self ,origin_query , log_query):
log_result = tdSql.getResult(log_query)
origin_result = tdSql.getResult(origin_query)
@@ -163,26 +75,30 @@ class TDTestCase:
for row in origin_result:
row_check = []
for elem in row:
- if elem == None:
- elem = None
- elif elem >0:
- elem = None
- elif elem <=0:
+ if base ==1:
elem = None
+ else:
+ if elem == None:
+ elem = None
+ elif elem ==1:
+ elem = 0.0
+ elif elem >0 and elem !=1 :
+ if base==None :
+ elem = math.log(elem )
+ else:
+ print(base , elem)
+ elem = math.log(elem , base)
+ elif elem <=0:
+ elem = None
+
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(log_query)
for row_index , row in enumerate(log_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] != elem:
- check_status = False
- if not check_status:
- tdLog.notice("log function value has not as expected , sql is \"%s\" "%log_query )
- sys.exit(1)
- else:
- tdLog.info("log value check pass , it work as expected ,sql is \"%s\" "%log_query )
-
+ tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index])
+
def test_errors(self, dbname="db"):
error_sql_lists = [
f"select log from {dbname}.t1",
@@ -328,10 +244,10 @@ class TDTestCase:
tdSql.checkData(3 , 0, 1.098612289)
tdSql.checkData(4 , 0, 1.386294361)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,-10), log(c2 ,-10) ,log(c3, -10), log(c4 ,-10), log(c5 ,-10) from {dbname}.t1")
+ self.check_result_auto_log( None , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.t1")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,2), log(c2 ,2) ,log(c3, 2), log(c4 ,2), log(c5 ,2) from {dbname}.t1")
+ self.check_result_auto_log( 1, f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,1), log(c2 ,1) ,log(c3, 1), log(c4 ,1), log(c5 ,1) from {dbname}.t1")
+ self.check_result_auto_log( 10 ,f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select log(c1 ,10), log(c2 ,10) ,log(c3, 10), log(c4 ,10), log(c5 ,10) from {dbname}.t1")
# used for sub table
tdSql.query(f"select c1 ,log(c1 ,3) from {dbname}.ct1")
@@ -349,9 +265,9 @@ class TDTestCase:
tdSql.checkData(3 , 2, 0.147315235)
tdSql.checkData(4 , 2, None)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) from {dbname}.ct1")
+ self.check_result_auto_log( None ,f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) from {dbname}.ct1")
+ self.check_result_auto_log( 2, f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) from {dbname}.ct1")
+ self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) from {dbname}.ct1")
# nest query for log functions
tdSql.query(f"select c1 , log(c1,3) ,log(log(c1,3),3) , log(log(log(c1,3),3),3) from {dbname}.ct1;")
@@ -585,15 +501,15 @@ class TDTestCase:
tdSql.error(
f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_log( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
- self.check_result_auto_log2( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
- self.check_result_auto_log__10( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,-10), log(c2,-10) ,log(c3,-10), log(c4,-10), log(c5,-10) ,log(c6,-10) from {dbname}.sub1_bound")
+ self.check_result_auto_log(None , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c4), log(c5) ,log(c6) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c4,2), log(c5,2) ,log(c6,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 10 , f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select log(c1,10), log(c2,10) ,log(c3,10), log(c4,10), log(c5,10) ,log(c6,10) from {dbname}.sub1_bound")
- self.check_result_auto_log2( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound")
- self.check_result_auto_log( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound")
+ self.check_result_auto_log( 2 , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1,2), log(c2,2) ,log(c3,2), log(c3,2), log(c2,2) ,log(c1,2) from {dbname}.sub1_bound")
+ self.check_result_auto_log( None , f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select log(c1), log(c2) ,log(c3), log(c3), log(c2) ,log(c1) from {dbname}.sub1_bound")
- self.check_result_auto_log2(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" )
+ self.check_result_auto_log(2 , f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select log(abs(c1) ,2) from {dbname}.sub1_bound" )
# check basic elem for table per row
tdSql.query(f"select log(abs(c1),2) ,log(abs(c2),2) , log(abs(c3),2) , log(abs(c4),2), log(abs(c5),2), log(abs(c6),2) from {dbname}.sub1_bound ")
@@ -647,15 +563,15 @@ class TDTestCase:
def support_super_table_test(self, dbname="db"):
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 order by ts " , f"select log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 order by tbname " , f"select log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log( 2 ,f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_log2( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by ts " , f"select log(t1,2), log(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_log( 2 , f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) ,log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_log( 2 ,f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select log(t1,2) , log(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
diff --git a/tests/system-test/2-query/lower.py b/tests/system-test/2-query/lower.py
index 0917fb63fc638263849625aec5b907c05260f49f..0e33e3834ec9ecc50470f0793b29a3a4b84d4834 100644
--- a/tests/system-test/2-query/lower.py
+++ b/tests/system-test/2-query/lower.py
@@ -96,16 +96,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__lower_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__lower_err_check(tb):
@@ -113,22 +113,20 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
-
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -138,78 +136,78 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
@@ -227,10 +225,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
-
- tdSql.execute("use db")
+ tdSql.execute("flush database db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py
index 15f40a09c3db67e4324e75768532221f55f2e35f..330f688990d614c1a824fd25741f19966e227581 100644
--- a/tests/system-test/2-query/ltrim.py
+++ b/tests/system-test/2-query/ltrim.py
@@ -23,6 +23,7 @@ CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
BOOLEAN_COL = [ BOOL_COL, ]
TS_TYPE_COL = [ TS_COL, ]
+DBNAME = "db"
class TDTestCase:
@@ -120,16 +121,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname=DBNAME): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__ltrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__ltrim_err_check(tb):
@@ -142,17 +143,16 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -162,29 +162,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -200,7 +200,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -216,13 +216,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -251,8 +251,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py
index 0995dfc6ffedb28232c56d6f4826b8b2454249ff..b52217af9ac61e5a3c08d55b11e4219ec826b203 100644
--- a/tests/system-test/2-query/mavg.py
+++ b/tests/system-test/2-query/mavg.py
@@ -307,7 +307,7 @@ class TDTestCase:
pass
- def mavg_current_query(self) :
+ def mavg_current_query(self, dbname="db") :
# table schema :ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool
# c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)
@@ -325,17 +325,17 @@ class TDTestCase:
case6 = {"col": "c9"}
self.checkmavg(**case6)
- # # case7~8: nested query
- # case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"}
- # self.checkmavg(**case7)
- # case8 = {"table_expr": f"(select mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"}
+ # case7~8: nested query
+ case7 = {"table_expr": f"(select c1 from {dbname}.stb1)"}
+ self.checkmavg(**case7)
+ # case8 = {"table_expr": f"(select _c0, mavg(c1, 1) c1 from {dbname}.stb1 group by tbname)"}
# self.checkmavg(**case8)
# case9~10: mix with tbname/ts/tag/col
- # case9 = {"alias": ", tbname"}
- # self.checkmavg(**case9)
- # case10 = {"alias": ", _c0"}
- # self.checkmavg(**case10)
+ case9 = {"alias": ", tbname"}
+ self.checkmavg(**case9)
+ case10 = {"alias": ", _c0"}
+ self.checkmavg(**case10)
# case11 = {"alias": ", st1"}
# self.checkmavg(**case11)
# case12 = {"alias": ", c1"}
@@ -356,7 +356,7 @@ class TDTestCase:
# case17: only support normal table join
case17 = {
"col": "t1.c1",
- "table_expr": "t1, t2",
+ "table_expr": f"{dbname}.t1 t1, {dbname}.t2 t2",
"condition": "where t1.ts=t2.ts"
}
self.checkmavg(**case17)
@@ -367,14 +367,14 @@ class TDTestCase:
# }
# self.checkmavg(**case19)
- # case20~21: with order by
+ # # case20~21: with order by
# case20 = {"condition": "order by ts"}
# self.checkmavg(**case20)
- #case21 = {
- # "table_expr": f"{dbname}.stb1",
- # "condition": "group by tbname order by tbname"
- #}
- #self.checkmavg(**case21)
+ case21 = {
+ "table_expr": f"{dbname}.stb1",
+ "condition": "group by tbname order by tbname"
+ }
+ self.checkmavg(**case21)
# # case22: with union
# case22 = {
@@ -398,7 +398,7 @@ class TDTestCase:
pass
- def mavg_error_query(self) -> None :
+ def mavg_error_query(self, dbname="db") -> None :
# unusual test
# form test
@@ -419,9 +419,9 @@ class TDTestCase:
err8 = {"table_expr": ""}
self.checkmavg(**err8) # no table_expr
- # err9 = {"col": "st1"}
+ err9 = {"col": "st1"}
# self.checkmavg(**err9) # col: tag
- # err10 = {"col": 1}
+ err10 = {"col": 1}
# self.checkmavg(**err10) # col: value
err11 = {"col": "NULL"}
self.checkmavg(**err11) # col: NULL
@@ -496,7 +496,7 @@ class TDTestCase:
# "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
# }
# self.checkmavg(**err44) # stb join
- tdSql.query("select mavg( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;")
+ tdSql.query(f"select mavg( stb1.c1 , 1 ) from {dbname}.stb1 stb1, {dbname}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts;")
err45 = {
"condition": "where ts>0 and ts < now interval(1h) fill(next)"
}
diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py
index 34442a3725d69092535c02a509ba8cece4c10ed4..169b1c2c387c7158635483f8ce8868891e42e3c2 100644
--- a/tests/system-test/2-query/max.py
+++ b/tests/system-test/2-query/max.py
@@ -5,10 +5,7 @@ import numpy as np
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
@@ -17,60 +14,80 @@ class TDTestCase:
self.ts = 1537146000000
self.binary_str = 'taosdata'
self.nchar_str = '涛思数据'
- def max_check_stb_and_tb_base(self):
+ def max_check_stb_and_tb_base(self, dbname="db"):
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
+ tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20)) tags(loc nchar(20))''')
- tdSql.execute("create table stb_1 using stb tags('beijing')")
+ tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
for i in range(self.rowNum):
- tdSql.execute(f"insert into stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
+ tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
- for i in ['ts','col11','col12','col13']:
- for j in ['db.stb','stb','db.stb_1','stb_1']:
- tdSql.error(f'select max({i} from {j} )')
+ for i in ['col11','col12','col13']:
+ for j in ['stb','stb_1']:
+ tdSql.error(f'select max({i} from {dbname}.{j} )')
for i in range(1,11):
- for j in ['db.stb','stb','db.stb_1','stb_1']:
- tdSql.query(f"select max(col{i}) from {j}")
+ for j in ['stb', 'stb_1']:
+ tdSql.query(f"select max(col{i}) from {dbname}.{j}")
if i<9:
tdSql.checkData(0, 0, np.max(intData))
elif i>=9:
tdSql.checkData(0, 0, np.max(floatData))
- tdSql.query("select max(col1) from stb_1 where col2<=5")
+
+ tdSql.query(f"select max(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select last(ts) from {dbname}.stb_1")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select last(ts) from {dbname}.stb")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.stb")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select max(col1) from {dbname}.stb_1 where col2<=5")
tdSql.checkData(0,0,5)
- tdSql.query("select max(col1) from stb where col2<=5")
+ tdSql.query(f"select max(col1) from {dbname}.stb where col2<=5")
tdSql.checkData(0,0,5)
- tdSql.execute('drop database db')
- def max_check_ntb_base(self):
+ def max_check_ntb_base(self, dbname="db"):
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
+ tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 tinyint unsigned, col6 smallint unsigned,
col7 int unsigned, col8 bigint unsigned, col9 float, col10 double, col11 bool, col12 binary(20), col13 nchar(20))''')
for i in range(self.rowNum):
- tdSql.execute(f"insert into ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
+ tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %d, %d, %d, %d, %f, %f, %d, '{self.binary_str}%d', '{self.nchar_str}%d')"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
- for i in ['ts','col11','col12','col13']:
- for j in ['db.ntb','ntb']:
- tdSql.error(f'select max({i} from {j} )')
+ for i in ['col11','col12','col13']:
+ for j in ['ntb']:
+ tdSql.error(f'select max({i} from {dbname}.{j} )')
for i in range(1,11):
- for j in ['db.ntb','ntb']:
- tdSql.query(f"select max(col{i}) from {j}")
+ for j in ['ntb']:
+ tdSql.query(f"select max(col{i}) from {dbname}.{j}")
if i<9:
tdSql.checkData(0, 0, np.max(intData))
elif i>=9:
tdSql.checkData(0, 0, np.max(floatData))
- tdSql.query("select max(col1) from ntb where col2<=5")
- tdSql.checkData(0,0,5)
- tdSql.execute('drop database db')
+ tdSql.query(f"select max(now()) from {dbname}.ntb")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select last(ts) from {dbname}.ntb")
+ lastTs = tdSql.getData(0, 0)
+ tdSql.query(f"select max(ts) from {dbname}.ntb")
+ tdSql.checkData(0, 0, lastTs)
+
+ tdSql.query(f"select max(col1) from {dbname}.ntb where col2<=5")
+ tdSql.checkData(0,0,5)
def check_max_functions(self, tbname , col_name):
@@ -90,55 +107,55 @@ class TDTestCase:
tdLog.info(" max function work as expected, sql : %s "% max_sql)
- def support_distributed_aggregate(self):
+ def support_distributed_aggregate(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
+ tdSql.execute(f"use {dbname} ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(20):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
for i in range(1,21):
if i ==1 or i == 4:
continue
else:
- tbname = "ct"+f'{i}'
+ tbname = f"{dbname}.ct{i}"
for j in range(9):
tdSql.execute(
f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -157,7 +174,7 @@ class TDTestCase:
tdLog.info(" prepare data for distributed_aggregate done! ")
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -167,7 +184,7 @@ class TDTestCase:
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query("select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'")
+ tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -182,13 +199,13 @@ class TDTestCase:
# check max function work status
- tdSql.query("show tables like 'ct%'")
+ tdSql.query(f"show {dbname}.tables like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
tablenames.append(table_name[0])
- tdSql.query("desc stb1")
+ tdSql.query(f"desc {dbname}.stb1")
col_names = tdSql.queryResult
colnames = []
@@ -198,11 +215,7 @@ class TDTestCase:
for tablename in tablenames:
for colname in colnames:
- self.check_max_functions(tablename,colname)
-
- # max function with basic filter
- print(vnode_tables)
-
+ self.check_max_functions(f"{dbname}.{tablename}", colname)
def run(self):
diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py
index 4b9996d9c3b1d45f52e184f1da4ec8e59714feaa..01c267724210591e639753c3566c4826a5218813 100644
--- a/tests/system-test/2-query/max_partition.py
+++ b/tests/system-test/2-query/max_partition.py
@@ -12,16 +12,15 @@ class TDTestCase:
self.tb_nums = 10
self.ts = 1537146000000
- def prepare_datas(self, stb_name , tb_nums , row_nums ):
- tdSql.execute(" use db ")
- tdSql.execute(f" create stable {stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
+ def prepare_datas(self, stb_name , tb_nums , row_nums, dbname="db" ):
+ tdSql.execute(f" create stable {dbname}.{stb_name} (ts timestamp , c1 int , c2 bigint , c3 float , c4 double , c5 smallint , c6 tinyint , c7 bool , c8 binary(36) , c9 nchar(36) , uc1 int unsigned,\
uc2 bigint unsigned ,uc3 smallint unsigned , uc4 tinyint unsigned ) tags(t1 timestamp , t2 int , t3 bigint , t4 float , t5 double , t6 smallint , t7 tinyint , t8 bool , t9 binary(36)\
, t10 nchar(36) , t11 int unsigned , t12 bigint unsigned ,t13 smallint unsigned , t14 tinyint unsigned ) ")
for i in range(tb_nums):
- tbname = f"sub_{stb_name}_{i}"
+ tbname = f"{dbname}.sub_{stb_name}_{i}"
ts = self.ts + i*10000
- tdSql.execute(f"create table {tbname} using {stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
+ tdSql.execute(f"create table {tbname} using {dbname}.{stb_name} tags ({ts} , {i} , {i}*10 ,{i}*1.0,{i}*1.0 , 1 , 2, 'true', 'binary_{i}' ,'nchar_{i}',{i},{i},10,20 )")
for row in range(row_nums):
ts = self.ts + row*1000
@@ -31,191 +30,192 @@ class TDTestCase:
ts = self.ts + row_nums*1000 + null*1000
tdSql.execute(f"insert into {tbname} values({ts} , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL , NULL )")
- def basic_query(self):
- tdSql.query("select count(*) from stb")
+ def basic_query(self, dbname="db"):
+ tdSql.query(f"select count(*) from {dbname}.stb")
tdSql.checkData(0,0,(self.row_nums + 5 )*self.tb_nums)
- tdSql.query("select max(c1) from stb")
+ tdSql.query(f"select max(c1) from {dbname}.stb")
tdSql.checkData(0,0,(self.row_nums -1))
- tdSql.query(" select tbname , max(c1) from stb partition by tbname ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by t1 order by t1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by t1 order by t1 ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by c1 order by t1 ")
- tdSql.query(" select max(t2) from stb group by c1 order by t1 ")
- tdSql.query(" select max(c1) from stb group by tbname order by tbname ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by t1 ")
+ tdSql.query(f"select max(t2) from {dbname}.stb group by c1 order by t1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
# bug need fix
- tdSql.query(" select max(t2) from stb group by t2 order by t2 ")
+ tdSql.query(f"select max(t2) from {dbname}.stb group by t2 order by t2 ")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select max(c1) from stb group by c1 order by c1 ")
+ tdSql.query(f"select max(c1) from {dbname}.stb group by c1 order by c1 ")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select c1 , max(c1) from stb group by c1 order by c1 ")
+ tdSql.query(f"select c1 , max(c1) from {dbname}.stb group by c1 order by c1 ")
tdSql.checkRows(self.row_nums+1)
# support selective functions
- tdSql.query(" select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
+ tdSql.query(f"select c1 ,c2 ,c3 , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select c1, tbname , max(c1) ,c4 ,c5 ,t11 from stb group by c1 order by c1 desc ")
+ tdSql.query(f"select c1, tbname , max(c1) ,c4 ,c5 ,t11 from {dbname}.stb group by c1 order by c1 desc ")
tdSql.checkRows(self.row_nums+1)
# bug need fix
- # tdSql.query(" select tbname , max(c1) from sub_stb_1 where c1 is null group by c1 order by c1 desc ")
- # tdSql.checkRows(1)
- # tdSql.checkData(0,0,"sub_stb_1")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 where c1 is null group by c1 order by c1 desc ")
+ tdSql.checkRows(1)
+ tdSql.checkData(0,0,"sub_stb_1")
- tdSql.query("select max(c1) ,c2 ,t2,tbname from stb group by abs(c1) order by abs(c1)")
+ tdSql.query(f"select max(c1) ,c2 ,t2,tbname from {dbname}.stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select abs(c1+c3), count(c1+c3) ,max(c1+t2) from stb group by abs(c1+c3) order by abs(c1+c3)")
+ tdSql.query(f"select abs(c1+c3), count(c1+c3) ,max(c1+t2) from {dbname}.stb group by abs(c1+c3) order by abs(c1+c3)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(c1+c3)+min(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)")
+ tdSql.query(f"select max(c1+c3)+min(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from stb group by abs(c1) order by abs(t1)+c2")
- tdSql.error("select count(c1+c3)+max(c2) ,abs(c1) from stb group by abs(c1) order by abs(c1)+c2")
- tdSql.query("select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
+ tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) ,abs(t1) from {dbname}.stb group by abs(c1) order by abs(t1)+c2")
+ tdSql.error(f"select count(c1+c3)+max(c2) ,abs(c1) from {dbname}.stb group by abs(c1) order by abs(c1)+c2")
+ tdSql.query(f"select abs(c1+c3)+abs(c2) , count(c1+c3)+max(c2) from {dbname}.stb group by abs(c1+c3)+abs(c2) order by abs(c1+c3)+abs(c2)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query(" select max(c1) , max(t2) from stb where abs(c1+t2)=1 partition by tbname ")
+ tdSql.query(f"select max(c1) , max(t2) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ")
tdSql.checkRows(2)
- tdSql.query(" select max(c1) from stb where abs(c1+t2)=1 partition by tbname ")
+ tdSql.query(f"select max(c1) from {dbname}.stb where abs(c1+t2)=1 partition by tbname ")
tdSql.checkRows(2)
- tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c2) from stb partition by t1 order by t1")
- tdSql.query("select tbname , max(t2) from stb partition by t1 order by t1")
- tdSql.query("select tbname , max(t2) from stb partition by t2 order by t2")
+ tdSql.query(f"select tbname , max(c2) from {dbname}.stb partition by t1 order by t1")
+ tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t1 order by t1")
+ tdSql.query(f"select tbname , max(t2) from {dbname}.stb partition by t2 order by t2")
# # bug need fix
- tdSql.query("select t2 , max(t2) from stb partition by t2 order by t2")
+ tdSql.query(f"select t2 , max(t2) from {dbname}.stb partition by t2 order by t2")
tdSql.checkRows(self.tb_nums)
- tdSql.query("select tbname , max(c1) from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c1) from stb partition by t2 order by t2")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by t2 order by t2")
- tdSql.query("select c2, max(c1) from stb partition by c2 order by c2 desc")
+ tdSql.query(f"select c2, max(c1) from {dbname}.stb partition by c2 order by c2 desc")
tdSql.checkRows(self.tb_nums+1)
tdSql.checkData(0,1,self.row_nums-1)
- tdSql.query("select tbname , max(c1) from stb partition by c1 order by c2")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by c1 order by c2")
- tdSql.query("select tbname , abs(t2) from stb partition by c2 order by t2")
+ tdSql.query(f"select tbname , abs(t2) from {dbname}.stb partition by c2 order by t2")
tdSql.checkRows(self.tb_nums*(self.row_nums+5))
- tdSql.query("select max(c1) , count(t2) from stb partition by c2 ")
+ tdSql.query(f"select max(c1) , count(t2) from {dbname}.stb partition by c2 ")
tdSql.checkRows(self.row_nums+1)
tdSql.checkData(0,1,self.row_nums)
- tdSql.query("select count(c1) , max(t2) ,c2 from stb partition by c2 order by c2")
+ tdSql.query(f"select count(c1) , max(t2) ,c2 from {dbname}.stb partition by c2 order by c2")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select count(c1) , count(t1) ,max(c2) ,tbname from stb partition by tbname order by tbname")
+ tdSql.query(f"select count(c1) , count(t1) ,max(c2) ,tbname from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkCols(4)
- tdSql.query("select count(c1) , max(t2) ,t1 from stb partition by t1 order by t1")
+ tdSql.query(f"select count(c1) , max(t2) ,t1 from {dbname}.stb partition by t1 order by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,self.row_nums)
# bug need fix
- tdSql.query("select count(c1) , max(t2) ,abs(c1) from stb partition by abs(c1) order by abs(c1)")
+ tdSql.query(f"select count(c1) , max(t2) ,abs(c1) from {dbname}.stb partition by abs(c1) order by abs(c1)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from stb partition by abs(c2) order by abs(c2)")
+ tdSql.query(f"select max(ceil(c2)) , max(floor(t2)) ,max(floor(c2)) from {dbname}.stb partition by abs(c2) order by abs(c2)")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from stb partition by abs(floor(c1)) order by abs(floor(c1))")
+ tdSql.query(f"select max(ceil(c1-2)) , max(floor(t2+1)) ,max(c2-c1) from {dbname}.stb partition by abs(floor(c1)) order by abs(floor(c1))")
tdSql.checkRows(self.row_nums+1)
- tdSql.query("select tbname , max(c1) ,c1 from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,'sub_stb_0')
tdSql.checkData(0,1,9)
tdSql.checkData(0,2,9)
- tdSql.query("select tbname ,top(c1,1) ,c1 from stb partition by tbname order by tbname")
+ tdSql.query(f"select tbname ,top(c1,1) ,c1 from {dbname}.stb partition by tbname order by tbname")
tdSql.checkRows(self.tb_nums)
- tdSql.query(" select c1 , sample(c1,2) from stb partition by tbname order by tbname ")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by tbname order by tbname ")
tdSql.checkRows(self.tb_nums*2)
# interval
- tdSql.query("select max(c1) from stb interval(2s) sliding(1s)")
+ tdSql.query(f"select max(c1) from {dbname}.stb interval(2s) sliding(1s)")
# bug need fix
- tdSql.query('select max(c1) from stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>="2022-07-06 16:00:00.000 " and ts < "2022-07-06 17:00:00.000 " interval(50s) sliding(30s) fill(NULL)')
- tdSql.query(" select tbname , count(c1) from stb partition by tbname interval(10s) slimit 5 soffset 1 ")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.stb partition by tbname interval(10s) slimit 5 soffset 1 ")
- tdSql.query("select tbname , max(c1) from stb partition by tbname interval(10s)")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname interval(10s)")
tdSql.checkRows(self.row_nums*2)
- tdSql.query("select unique(c1) from stb partition by tbname order by tbname")
+ tdSql.query(f"select unique(c1) from {dbname}.stb partition by tbname order by tbname")
- tdSql.query("select tbname , count(c1) from sub_stb_1 partition by tbname interval(10s)")
+ tdSql.query(f"select tbname , count(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s)")
tdSql.checkData(0,0,'sub_stb_1')
tdSql.checkData(0,1,self.row_nums)
- tdSql.query("select c1 , mavg(c1 ,2 ) from stb partition by c1")
+ tdSql.query(f"select c1 , mavg(c1 ,2 ) from {dbname}.stb partition by c1")
tdSql.checkRows(90)
- tdSql.query("select c1 , diff(c1 , 0) from stb partition by c1")
+ tdSql.query(f"select c1 , diff(c1 , 0) from {dbname}.stb partition by c1")
tdSql.checkRows(90)
- tdSql.query("select c1 , csum(c1) from stb partition by c1")
+ tdSql.query(f"select c1 , csum(c1) from {dbname}.stb partition by c1")
tdSql.checkRows(100)
- tdSql.query("select c1 , sample(c1,2) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(21)
# bug need fix
- # tdSql.checkData(0,1,None)
+ tdSql.checkData(0,1,None)
- tdSql.query("select c1 , twa(c1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , twa(c1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(11)
tdSql.checkData(0,1,None)
- tdSql.query("select c1 , irate(c1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , irate(c1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(11)
tdSql.checkData(0,1,None)
- tdSql.query("select c1 , DERIVATIVE(c1,2,1) from stb partition by c1 order by c1")
+ tdSql.query(f"select c1 , DERIVATIVE(c1,2,1) from {dbname}.stb partition by c1 order by c1")
tdSql.checkRows(90)
# bug need fix
tdSql.checkData(0,1,None)
- tdSql.query(" select tbname , max(c1) from stb partition by tbname order by tbname slimit 5 soffset 0 ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.stb partition by tbname order by tbname slimit 5 soffset 0 ")
tdSql.checkRows(10)
- tdSql.query(" select tbname , max(c1) from sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
+ tdSql.query(f"select tbname , max(c1) from {dbname}.sub_stb_1 partition by tbname interval(10s) sliding(5s) ")
- tdSql.query(f'select max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
- tdSql.query(f'select tbname , max(c1) from stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
+ tdSql.query(f'select max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
+ tdSql.query(f'select tbname , max(c1) from {dbname}.stb where ts>={self.ts} and ts < {self.ts}+1000 interval(50s) sliding(30s)')
def run(self):
+ dbname = "db"
tdSql.prepare()
self.prepare_datas("stb",self.tb_nums,self.row_nums)
self.basic_query()
# # coverage case for taosd crash about bug fix
- tdSql.query(" select sum(c1) from stb where t2+10 >1 ")
- tdSql.query(" select count(c1),count(t1) from stb where -t2<1 ")
- tdSql.query(" select tbname ,max(ceil(c1)) from stb group by tbname ")
- tdSql.query(" select avg(abs(c1)) , tbname from stb group by tbname ")
- tdSql.query(" select t1,c1 from stb where abs(t2+c1)=1 ")
+ tdSql.query(f"select sum(c1) from {dbname}.stb where t2+10 >1 ")
+ tdSql.query(f"select count(c1),count(t1) from {dbname}.stb where -t2<1 ")
+ tdSql.query(f"select tbname ,max(ceil(c1)) from {dbname}.stb group by tbname ")
+ tdSql.query(f"select avg(abs(c1)) , tbname from {dbname}.stb group by tbname ")
+ tdSql.query(f"select t1,c1 from {dbname}.stb where abs(t2+c1)=1 ")
def stop(self):
diff --git a/tests/system-test/2-query/min.py b/tests/system-test/2-query/min.py
index c27e9926ff52e178afe230872d70c6ab269d6983..3d46b7b2224f834360c17cdc311dbf1e0d5a4535 100644
--- a/tests/system-test/2-query/min.py
+++ b/tests/system-test/2-query/min.py
@@ -14,198 +14,159 @@ class TDTestCase:
self.ts = 1537146000000
def run(self):
+ dbname = "db"
tdSql.prepare()
intData = []
floatData = []
- tdSql.execute('''create table stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f'''create table {dbname}.stb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned) tags(loc nchar(20))''')
- tdSql.execute("create table stb_1 using stb tags('beijing')")
- tdSql.execute('''create table ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
+ tdSql.execute(f"create table {dbname}.stb_1 using {dbname}.stb tags('beijing')")
+ tdSql.execute(f'''create table {dbname}.ntb(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum):
- tdSql.execute("insert into ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.ntb values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
for i in range(self.rowNum):
- tdSql.execute("insert into stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
+ tdSql.execute(f"insert into {dbname}.stb_1 values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
# max verifacation
- tdSql.error("select min(ts) from stb_1")
- tdSql.error("select min(ts) from db.stb_1")
- tdSql.error("select min(col7) from stb_1")
- tdSql.error("select min(col7) from db.stb_1")
- tdSql.error("select min(col8) from stb_1")
- tdSql.error("select min(col8) from db.stb_1")
- tdSql.error("select min(col9) from stb_1")
- tdSql.error("select min(col9) from db.stb_1")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from stb_1")
- tdSql.error("select min(count(c1),count(c2)) from stb_1")
+ tdSql.error(f"select min(col7) from {dbname}.stb_1")
+ tdSql.error(f"select min(col8) from {dbname}.stb_1")
+ tdSql.error(f"select min(col9) from {dbname}.stb_1")
+ tdSql.error(f"select min(a) from {dbname}.stb_1")
+ tdSql.query(f"select min(1) from {dbname}.stb_1")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1")
- tdSql.query("select min(col1) from stb_1")
+ tdSql.query(f"select min(col1) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.stb_1")
+ tdSql.query(f"select min(col2) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from stb_1")
+ tdSql.query(f"select min(col3) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.stb_1")
+ tdSql.query(f"select min(col4) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from stb_1")
+ tdSql.query(f"select min(col11) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.stb_1")
+ tdSql.query(f"select min(col12) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from stb_1")
+ tdSql.query(f"select min(col13) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.stb_1")
+ tdSql.query(f"select min(col14) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.stb_1")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from stb_1")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.stb_1")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from stb_1")
+ tdSql.query(f"select min(col5) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.stb_1")
+ tdSql.query(f"select min(col6) from {dbname}.stb_1")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from stb_1 where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.stb_1 where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
- tdSql.error("select min(ts) from stb_1")
- tdSql.error("select min(ts) from db.stb_1")
- tdSql.error("select min(col7) from stb_1")
- tdSql.error("select min(col7) from db.stb_1")
- tdSql.error("select min(col8) from stb_1")
- tdSql.error("select min(col8) from db.stb_1")
- tdSql.error("select min(col9) from stb_1")
- tdSql.error("select min(col9) from db.stb_1")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from stb_1")
- tdSql.error("select min(count(c1),count(c2)) from stb_1")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
- tdSql.query("select min(col1) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.stb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from stb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+
+ tdSql.error(f"select min(col7) from {dbname}.stb_1")
+ tdSql.error(f"select min(col8) from {dbname}.stb_1")
+ tdSql.error(f"select min(col9) from {dbname}.stb_1")
+ tdSql.error(f"select min(a) from {dbname}.stb_1")
+ tdSql.query(f"select min(1) from {dbname}.stb_1")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.stb_1")
+
+ tdSql.query(f"select min(col1) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.stb")
+ tdSql.query(f"select min(col2) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from stb")
+ tdSql.query(f"select min(col3) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.stb")
+ tdSql.query(f"select min(col4) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from stb")
+ tdSql.query(f"select min(col11) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.stb")
+ tdSql.query(f"select min(col12) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from stb")
+ tdSql.query(f"select min(col13) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.stb")
+ tdSql.query(f"select min(col14) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from stb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.stb")
+ tdSql.query(f"select min(col5) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from stb")
+ tdSql.query(f"select min(col6) from {dbname}.stb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.stb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from stb where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.stb where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
- tdSql.error("select min(ts) from ntb")
- tdSql.error("select min(ts) from db.ntb")
- tdSql.error("select min(col7) from ntb")
- tdSql.error("select min(col7) from db.ntb")
- tdSql.error("select min(col8) from ntb")
- tdSql.error("select min(col8) from db.ntb")
- tdSql.error("select min(col9) from ntb")
- tdSql.error("select min(col9) from db.ntb")
- # tdSql.error("select min(a) from stb_1")
- # tdSql.error("select min(1) from stb_1")
- tdSql.error("select min(now()) from ntb")
- tdSql.error("select min(count(c1),count(c2)) from ntb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
- tdSql.query("select min(col1) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col1) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col2) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col3) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col4) from db.ntb")
- tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from ntb")
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+ tdSql.error(f"select min(col7) from {dbname}.ntb")
+ tdSql.error(f"select min(col8) from {dbname}.ntb")
+ tdSql.error(f"select min(col9) from {dbname}.ntb")
+ tdSql.error(f"select min(a) from {dbname}.ntb")
+ tdSql.query(f"select min(1) from {dbname}.ntb")
+ tdSql.error(f"select min(count(c1),count(c2)) from {dbname}.ntb")
+
+ tdSql.query(f"select min(col1) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col11) from db.ntb")
+ tdSql.query(f"select min(col2) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from ntb")
+ tdSql.query(f"select min(col3) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col12) from db.ntb")
+ tdSql.query(f"select min(col4) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from ntb")
+ tdSql.query(f"select min(col11) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col13) from db.ntb")
+ tdSql.query(f"select min(col12) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from ntb")
+ tdSql.query(f"select min(col13) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col14) from db.ntb")
+ tdSql.query(f"select min(col14) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(intData))
- tdSql.query("select min(col5) from ntb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col5) from db.ntb")
+ tdSql.query(f"select min(col5) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from ntb")
+ tdSql.query(f"select min(col6) from {dbname}.ntb")
tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col6) from db.ntb")
- tdSql.checkData(0, 0, np.min(floatData))
- tdSql.query("select min(col1) from ntb where col2>=5")
+ tdSql.query(f"select min(col1) from {dbname}.ntb where col2>=5")
tdSql.checkData(0,0,5)
+ tdSql.query(f"select min(now()) from {dbname}.stb_1")
+ tdSql.checkRows(1)
+
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
+ tdSql.query(f"select first(ts) from {dbname}.stb_1")
+ firstTs = tdSql.getData(0, 0)
+ tdSql.query(f"select min(ts) from {dbname}.stb_1")
+ tdSql.checkData(0, 0, firstTs)
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/nestedQuery_str.py b/tests/system-test/2-query/nestedQuery_str.py
index 0d40ef8147eabe133973a15607c340243b69db92..931ff873dcce279d8ddff018549beb648c5cfbc4 100755
--- a/tests/system-test/2-query/nestedQuery_str.py
+++ b/tests/system-test/2-query/nestedQuery_str.py
@@ -24,9 +24,6 @@ from util.dnodes import tdDnodes
from util.dnodes import *
class TDTestCase:
- updatecfgDict = {'maxSQLLength':1048576,'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
diff --git a/tests/system-test/2-query/pow.py b/tests/system-test/2-query/pow.py
index 1af8bd3839beafe37f690abf14d85f3c0e224cb2..0702d05c0b7bf0989046ab1cfdfaa0d812c78407 100644
--- a/tests/system-test/2-query/pow.py
+++ b/tests/system-test/2-query/pow.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -65,257 +63,182 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
- def check_result_auto_pow2(self ,origin_query , pow_query):
+ def check_result_auto_pow(self ,base , origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- else:
- elem = math.pow(elem,2)
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(pow_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def check_result_auto_pow1(self ,origin_query , pow_query):
- pow_result = tdSql.getResult(pow_query)
- origin_result = tdSql.getResult(origin_query)
-
- auto_result =[]
-
for row in origin_result:
row_check = []
for elem in row:
if elem == None:
elem = None
else :
- elem = pow(elem ,1)
+ elem = float(pow(elem ,base))
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def check_result_auto_pow__10(self ,origin_query , pow_query):
- pow_result = tdSql.getResult(pow_query)
- origin_result = tdSql.getResult(origin_query)
+ tdSql.checkData(row_index,col_index ,auto_result[row_index][col_index])
+
- auto_result =[]
-
- for row in origin_result:
- row_check = []
- for elem in row:
- if elem == None:
- elem = None
- elif elem == 0:
- elem = None
- else:
- elem = pow(elem ,-10)
- row_check.append(elem)
- auto_result.append(row_check)
-
- check_status = True
- for row_index , row in enumerate(pow_result):
- for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("pow function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("pow value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select pow from t1",
- # "select pow(-+--+c1 ,2) from t1",
- # "select +-pow(c1,2) from t1",
- # "select ++-pow(c1,2) from t1",
- # "select ++--pow(c1,2) from t1",
- # "select - -pow(c1,2)*0 from t1",
- # "select pow(tbname+1,2) from t1 ",
- "select pow(123--123,2)==1 from t1",
- "select pow(c1,2) as 'd1' from t1",
- "select pow(c1 ,c2 ,2) from t1",
- "select pow(c1 ,NULL ,2) from t1",
- "select pow(, 2) from t1;",
- "select pow(pow(c1, 2) ab from t1)",
- "select pow(c1 ,2 ) as int from t1",
- "select pow from stb1",
- # "select pow(-+--+c1) from stb1",
- # "select +-pow(c1) from stb1",
- # "select ++-pow(c1) from stb1",
- # "select ++--pow(c1) from stb1",
- # "select - -pow(c1)*0 from stb1",
- # "select pow(tbname+1) from stb1 ",
- "select pow(123--123 ,2)==1 from stb1",
- "select pow(c1 ,2) as 'd1' from stb1",
- "select pow(c1 ,c2 ,2 ) from stb1",
- "select pow(c1 ,NULL,2) from stb1",
- "select pow(,) from stb1;",
- "select pow(pow(c1 , 2) ab from stb1)",
- "select pow(c1 , 2) as int from stb1"
+ f"select pow from {dbname}.t1",
+ # f"select pow(-+--+c1 ,2) from {dbname}.t1",
+ # f"select +-pow(c1,2) from {dbname}.t1",
+ # f"select ++-pow(c1,2) from {dbname}.t1",
+ # f"select ++--pow(c1,2) from {dbname}.t1",
+ # f"select - -pow(c1,2)*0 from {dbname}.t1",
+ # f"select pow(tbname+1,2) from {dbname}.t1 ",
+ f"select pow(123--123,2)==1 from {dbname}.t1",
+ f"select pow(c1,2) as 'd1' from {dbname}.t1",
+ f"select pow(c1 ,c2 ,2) from {dbname}.t1",
+ f"select pow(c1 ,NULL ,2) from {dbname}.t1",
+ f"select pow(, 2) from {dbname}.t1;",
+ f"select pow(pow(c1, 2) ab from {dbname}.t1)",
+ f"select pow(c1 ,2 ) as int from {dbname}.t1",
+ f"select pow from {dbname}.stb1",
+ # f"select pow(-+--+c1) from {dbname}.stb1",
+ # f"select +-pow(c1) from {dbname}.stb1",
+ # f"select ++-pow(c1) from {dbname}.stb1",
+ # f"select ++--pow(c1) from {dbname}.stb1",
+ # f"select - -pow(c1)*0 from {dbname}.stb1",
+ # f"select pow(tbname+1) from {dbname}.stb1 ",
+ f"select pow(123--123 ,2)==1 from {dbname}.stb1",
+ f"select pow(c1 ,2) as 'd1' from {dbname}.stb1",
+ f"select pow(c1 ,c2 ,2 ) from {dbname}.stb1",
+ f"select pow(c1 ,NULL,2) from {dbname}.stb1",
+ f"select pow(,) from {dbname}.stb1;",
+ f"select pow(pow(c1 , 2) ab from {dbname}.stb1)",
+ f"select pow(c1 , 2) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select pow(ts ,2 ) from t1" ,
- "select pow(c7,c1 ) from t1",
- "select pow(c8,c2) from t1",
- "select pow(c9,c3 ) from t1",
- "select pow(ts,c4 ) from ct1" ,
- "select pow(c7,c5 ) from ct1",
- "select pow(c8,c6 ) from ct1",
- "select pow(c9,c8 ) from ct1",
- "select pow(ts,2 ) from ct3" ,
- "select pow(c7,2 ) from ct3",
- "select pow(c8,2 ) from ct3",
- "select pow(c9,2 ) from ct3",
- "select pow(ts,2 ) from ct4" ,
- "select pow(c7,2 ) from ct4",
- "select pow(c8,2 ) from ct4",
- "select pow(c9,2 ) from ct4",
- "select pow(ts,2 ) from stb1" ,
- "select pow(c7,2 ) from stb1",
- "select pow(c8,2 ) from stb1",
- "select pow(c9,2 ) from stb1" ,
-
- "select pow(ts,2 ) from stbbb1" ,
- "select pow(c7,2 ) from stbbb1",
-
- "select pow(ts,2 ) from tbname",
- "select pow(c9,2 ) from tbname"
+ f"select pow(ts ,2 ) from {dbname}.t1" ,
+ f"select pow(c7,c1 ) from {dbname}.t1",
+ f"select pow(c8,c2) from {dbname}.t1",
+ f"select pow(c9,c3 ) from {dbname}.t1",
+ f"select pow(ts,c4 ) from {dbname}.ct1" ,
+ f"select pow(c7,c5 ) from {dbname}.ct1",
+ f"select pow(c8,c6 ) from {dbname}.ct1",
+ f"select pow(c9,c8 ) from {dbname}.ct1",
+ f"select pow(ts,2 ) from {dbname}.ct3" ,
+ f"select pow(c7,2 ) from {dbname}.ct3",
+ f"select pow(c8,2 ) from {dbname}.ct3",
+ f"select pow(c9,2 ) from {dbname}.ct3",
+ f"select pow(ts,2 ) from {dbname}.ct4" ,
+ f"select pow(c7,2 ) from {dbname}.ct4",
+ f"select pow(c8,2 ) from {dbname}.ct4",
+ f"select pow(c9,2 ) from {dbname}.ct4",
+ f"select pow(ts,2 ) from {dbname}.stb1" ,
+ f"select pow(c7,2 ) from {dbname}.stb1",
+ f"select pow(c8,2 ) from {dbname}.stb1",
+ f"select pow(c9,2 ) from {dbname}.stb1" ,
+
+ f"select pow(ts,2 ) from {dbname}.stbbb1" ,
+ f"select pow(c7,2 ) from {dbname}.stbbb1",
+
+ f"select pow(ts,2 ) from {dbname}.tbname",
+ f"select pow(c9,2 ) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select pow(c1,2 ) from t1",
- "select pow(c2,2 ) from t1",
- "select pow(c3,2 ) from t1",
- "select pow(c4,2 ) from t1",
- "select pow(c5,2 ) from t1",
- "select pow(c6,2 ) from t1",
-
- "select pow(c1,2 ) from ct1",
- "select pow(c2,2 ) from ct1",
- "select pow(c3,2 ) from ct1",
- "select pow(c4,2 ) from ct1",
- "select pow(c5,2 ) from ct1",
- "select pow(c6,2 ) from ct1",
-
- "select pow(c1,2 ) from ct3",
- "select pow(c2,2 ) from ct3",
- "select pow(c3,2 ) from ct3",
- "select pow(c4,2 ) from ct3",
- "select pow(c5,2 ) from ct3",
- "select pow(c6,2 ) from ct3",
-
- "select pow(c1,2 ) from stb1",
- "select pow(c2,2 ) from stb1",
- "select pow(c3,2 ) from stb1",
- "select pow(c4,2 ) from stb1",
- "select pow(c5,2 ) from stb1",
- "select pow(c6,2 ) from stb1",
-
- "select pow(c6,2) as alisb from stb1",
- "select pow(c6,2) alisb from stb1",
+ f"select pow(c1,2 ) from {dbname}.t1",
+ f"select pow(c2,2 ) from {dbname}.t1",
+ f"select pow(c3,2 ) from {dbname}.t1",
+ f"select pow(c4,2 ) from {dbname}.t1",
+ f"select pow(c5,2 ) from {dbname}.t1",
+ f"select pow(c6,2 ) from {dbname}.t1",
+
+ f"select pow(c1,2 ) from {dbname}.ct1",
+ f"select pow(c2,2 ) from {dbname}.ct1",
+ f"select pow(c3,2 ) from {dbname}.ct1",
+ f"select pow(c4,2 ) from {dbname}.ct1",
+ f"select pow(c5,2 ) from {dbname}.ct1",
+ f"select pow(c6,2 ) from {dbname}.ct1",
+
+ f"select pow(c1,2 ) from {dbname}.ct3",
+ f"select pow(c2,2 ) from {dbname}.ct3",
+ f"select pow(c3,2 ) from {dbname}.ct3",
+ f"select pow(c4,2 ) from {dbname}.ct3",
+ f"select pow(c5,2 ) from {dbname}.ct3",
+ f"select pow(c6,2 ) from {dbname}.ct3",
+
+ f"select pow(c1,2 ) from {dbname}.stb1",
+ f"select pow(c2,2 ) from {dbname}.stb1",
+ f"select pow(c3,2 ) from {dbname}.stb1",
+ f"select pow(c4,2 ) from {dbname}.stb1",
+ f"select pow(c5,2 ) from {dbname}.stb1",
+ f"select pow(c6,2 ) from {dbname}.stb1",
+
+ f"select pow(c6,2) as alisb from {dbname}.stb1",
+ f"select pow(c6,2) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_pow_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_pow_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select pow(c1 ,2) from ct3")
+ tdSql.query(f"select pow(c1 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c2 ,2) from ct3")
+ tdSql.query(f"select pow(c2 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c3 ,2) from ct3")
+ tdSql.query(f"select pow(c3 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c4 ,2) from ct3")
+ tdSql.query(f"select pow(c4 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c5 ,2) from ct3")
+ tdSql.query(f"select pow(c5 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select pow(c6 ,2) from ct3")
+ tdSql.query(f"select pow(c6 ,2) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select pow(c1 ,2) from t1")
+ tdSql.query(f"select pow(c1 ,2) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.000000000)
tdSql.checkData(3 , 0, 9.000000000)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,2), pow(c2 ,2) ,pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from t1")
- self.check_result_auto_pow1( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,1), pow(c2 ,1) ,pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from t1")
- self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from t1", "select pow(c1 ,-10), pow(c2 ,-10) ,pow(c3, -10), pow(c4 ,-10), pow(c5 ,-10) from t1")
-
+ self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,2) , pow(c3, 2), pow(c4 ,2), pow(c5 ,2) from {dbname}.t1")
+ self.check_result_auto_pow( 1,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,1) , pow(c3, 1), pow(c4 ,1), pow(c5 ,1) from {dbname}.t1")
+ self.check_result_auto_pow( 10,f"select c1, c3 , c4, c5 from {dbname}.t1", f"select pow(c1 ,10) ,pow(c3, 10), pow(c4 ,10), pow(c5 ,10) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c1 ,pow(c1 ,2) from ct1")
+ tdSql.query(f"select c1 ,pow(c1 ,2) from {dbname}.ct1")
tdSql.checkData(0, 1, 64.000000000)
tdSql.checkData(1 , 1, 49.000000000)
tdSql.checkData(3 , 1, 25.000000000)
@@ -323,7 +246,7 @@ class TDTestCase:
# # test bug fix for pow(c1,c2)
- tdSql.query("select c1, c5 ,pow(c1,c5) from ct4")
+ tdSql.query(f"select c1, c5 ,pow(c1,c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 104577724.506799981)
tdSql.checkData(2 , 2, 3684781.623933245)
@@ -331,11 +254,11 @@ class TDTestCase:
tdSql.checkData(4 , 2, 7573.273783071)
- self.check_result_auto_pow2( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,2), pow(c2,2) ,pow(c3,2), pow(c4,2), pow(c5,2) from ct1")
- self.check_result_auto_pow__10( "select c1, c2, c3 , c4, c5 from ct1", "select pow(c1,-10), pow(c2,-10) ,pow(c3,-10), pow(c4,-10), pow(c5,-10) from ct1")
+ self.check_result_auto_pow( 2, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,2), pow(c3,2), pow(c4,2), pow(c5,2) from {dbname}.ct1")
+ self.check_result_auto_pow( 10, f"select c1, c3 , c4, c5 from {dbname}.ct1", f"select pow(c1,10), pow(c3,10), pow(c4,10), pow(c5,10) from {dbname}.ct1")
# nest query for pow functions
- tdSql.query("select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from ct1;")
+ tdSql.query(f"select c1 , pow(c1,2) ,pow(pow(c1,2),2) , pow(pow(pow(c1,2),2),2) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 8)
tdSql.checkData(0 , 1 , 64.000000000)
tdSql.checkData(0 , 2 , 4096.000000000)
@@ -351,24 +274,24 @@ class TDTestCase:
tdSql.checkData(4 , 2 , 0.000000000)
tdSql.checkData(4 , 3 , 0.000000000)
- # # used for stable table
-
- tdSql.query("select pow(c1, 2) from stb1")
+ # # used for stable table
+
+ tdSql.query(f"select pow(c1, 2) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select pow(c1, 2) from stbbb1")
- tdSql.error("select pow(c1, 2) from tbname")
- tdSql.error("select pow(c1, 2) from ct5")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.stbbb1")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.tbname")
+ tdSql.error(f"select pow(c1, 2) from {dbname}.ct5")
- # mix with common col
- tdSql.query("select c1, pow(c1 ,2) from ct1")
+ # mix with common col
+ tdSql.query(f"select c1, pow(c1 ,2) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,64.000000000)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0.000000000)
- tdSql.query("select c1, pow(c1,2) from ct4")
+ tdSql.query(f"select c1, pow(c1,2) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
@@ -377,45 +300,45 @@ class TDTestCase:
tdSql.checkData(5 , 1 ,None)
# mix with common functions
- tdSql.query("select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from ct4 ")
+ tdSql.query(f"select c1, pow(c1 ,2),pow(c1,2), log(pow(c1,2) ,2) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,36.000000000)
tdSql.checkData(3 , 2 ,36.000000000)
tdSql.checkData(3 , 3 ,5.169925001)
- tdSql.query("select c1, pow(c1,1),c5, floor(c5 ) from stb1 ")
+ tdSql.query(f"select c1, pow(c1,1),c5, floor(c5 ) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from stb1 ")
- tdSql.error("select c1, pow(c1 ,2),c5, count(c5) from ct1 ")
- tdSql.error("select pow(c1 ,2), count(c5) from stb1 ")
- tdSql.error("select pow(c1 ,2), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, pow(c1 ,2),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select pow(c1 ,2), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
+
-
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
- # # bug fix for compute
- tdSql.query("select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from ct4 ")
+ # # bug fix for compute
+ tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-4 ,2)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -423,7 +346,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 64.000000000)
tdSql.checkData(1, 2, 16.000000000)
- tdSql.query(" select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from ct4")
+ tdSql.query(f"select c1, pow(c1 ,2) -0 ,pow(c1-0.1 ,2)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -431,87 +354,86 @@ class TDTestCase:
tdSql.checkData(1, 1, 64.000000000)
tdSql.checkData(1, 2, 62.310000000)
- tdSql.query("select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from ct1")
+ tdSql.query(f"select c1, pow(c1, -10), c2, pow(c2, -10), c3, pow(c3, -10) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, pow(c1, 100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(0, 1, None)
tdSql.checkData(1, 1, None)
tdSql.checkData(4, 1, 0.000000000)
- tdSql.query("select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, pow(c1, 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def pow_base_test(self):
+ def pow_base_test(self, dbname="db"):
# base is an regular number ,int or double
- tdSql.query("select c1, pow(c1, 2) from ct1")
+ tdSql.query(f"select c1, pow(c1, 2) from {dbname}.ct1")
tdSql.checkData(0, 1,64.000000000)
- tdSql.query("select c1, pow(c1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(c1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 64.000000000)
- tdSql.query("select c1, pow(1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# # bug for compute in functions
- # tdSql.query("select c1, abs(1/0) from ct1")
+ # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
- tdSql.query("select c1, pow(1, 2.0) from ct1")
+ tdSql.query(f"select c1, pow(1, 2.0) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# two cols start pow(x,y)
- tdSql.query("select c1,c2, pow(c1,c2) from ct1")
+ tdSql.query(f"select c1,c2, pow(c1,c2) from {dbname}.ct1")
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 2, None)
tdSql.checkData(4, 2, 1.000000000)
- tdSql.query("select c1,c2, pow(c2,c1) from ct1")
+ tdSql.query(f"select c1,c2, pow(c2,c1) from {dbname}.ct1")
tdSql.checkData(0, 2, 3897131646727578700481513520437089271808.000000000)
tdSql.checkData(1, 2, 17217033054561120738612297152331776.000000000)
tdSql.checkData(4, 2, 1.000000000)
- tdSql.query("select c1, pow(2.0 , c1) from ct1")
+ tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1")
tdSql.checkData(0, 1, 256.000000000)
tdSql.checkData(1, 1, 128.000000000)
tdSql.checkData(4, 1, 1.000000000)
- tdSql.query("select c1, pow(2.0 , c1) from ct1")
+ tdSql.query(f"select c1, pow(2.0 , c1) from {dbname}.ct1")
tdSql.checkData(0, 1, 256.000000000)
tdSql.checkData(1, 1, 128.000000000)
tdSql.checkData(4, 1, 1.000000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -519,7 +441,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,64.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -527,7 +449,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,25.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -535,7 +457,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,25.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(pow(c1,2)-0.5) from ct4 where c1 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select c5 from stb1 where c1 > 0 order by tbname " , "select pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 order by ts " , "select pow(t1,2), pow(c5,2) from stb1 order by ts" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) ,pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_pow2( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select pow(t1,2) , pow(c5,2) from stb1 where c1 > 0 order by tbname" )
- pass
-
-
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by ts " , f"select pow(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by ts " , f"select pow(t1,2), pow(c5,2) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) ,pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_pow(2, f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select pow(t1,2) , pow(c5,2) from {dbname}.stb1 where c1 > 0 order by tbname" )
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: pow basic query ============")
+ tdLog.printNoPrefix("==========step4: pow basic query ============")
self.basic_pow_function()
- tdLog.printNoPrefix("==========step5: big number pow query ============")
+ tdLog.printNoPrefix("==========step5: big number pow query ============")
self.test_big_number()
- tdLog.printNoPrefix("==========step6: base number for pow query ============")
+ tdLog.printNoPrefix("==========step6: base number for pow query ============")
self.pow_base_test()
- tdLog.printNoPrefix("==========step7: pow boundary query ============")
+ tdLog.printNoPrefix("==========step7: pow boundary query ============")
self.check_boundary_values()
- tdLog.printNoPrefix("==========step8: pow filter query ============")
+ tdLog.printNoPrefix("==========step8: pow filter query ============")
self.abs_func_filter()
tdLog.printNoPrefix("==========step9: check pow result of stable query ============")
- self.support_super_table_test()
+ self.support_super_table_test()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/qnodeCluster.py b/tests/system-test/2-query/qnodeCluster.py
index f68eb58a7a0820333b50258cf7cd29d860153cac..9e49bff9389deeb83839477c98e194c014a2a87f 100644
--- a/tests/system-test/2-query/qnodeCluster.py
+++ b/tests/system-test/2-query/qnodeCluster.py
@@ -13,9 +13,9 @@ from util.common import *
sys.path.append("./6-cluster/")
from clusterCommonCreate import *
-from clusterCommonCheck import clusterComCheck
+from clusterCommonCheck import clusterComCheck
-import threading
+import threading
class TDTestCase:
@@ -28,7 +28,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
@@ -47,7 +47,7 @@ class TDTestCase:
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -55,7 +55,7 @@ class TDTestCase:
dbname="db_tsbs"
stabname1="readings"
stabname2="diagnostics"
- ctbnamePre1="rct"
+ ctbnamePre1="rct"
ctbnamePre2="dct"
ctbNums=40
self.ctbNums=ctbNums
@@ -73,7 +73,7 @@ class TDTestCase:
self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums)
- for j in range(ctbNums):
+ for j in range(ctbNums):
for i in range(rowNUms):
tdSql.execute(
f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
@@ -109,19 +109,19 @@ class TDTestCase:
def tsbsIotQuery(self,tdSql):
tdSql.execute("use db_tsbs")
-
+
# test interval and partition
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
# print(tdSql.queryResult)
parRows=tdSql.queryRows
tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
tdSql.checkRows(parRows)
-
-
- # # test insert into
+
+
+ # # test insert into
# tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
# tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
-
+
# tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
@@ -141,7 +141,7 @@ class TDTestCase:
tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
- # 2 stationary-trucks
+ # 2 stationary-trucks
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
@@ -156,7 +156,7 @@ class TDTestCase:
tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
- # # 6. avg-daily-driving-session
+ # # 6. avg-daily-driving-session
# #taosc core dumped
# tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))")
# tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;")
@@ -166,7 +166,7 @@ class TDTestCase:
# 7. avg-load
tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
- # 8. daily-activity
+ # 8. daily-activity
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
@@ -184,7 +184,7 @@ class TDTestCase:
tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
-
+
#it's already supported:
# last-loc
tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
@@ -192,7 +192,7 @@ class TDTestCase:
#2. low-fuel
tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
-
+
# 3. avg-vs-projected-fuel-consumption
tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet")
@@ -213,16 +213,16 @@ class TDTestCase:
'ctbPrefix': 'ctb',
'ctbNum': 1,
}
-
+
dnodeNumbers=int(dnodeNumbers)
mnodeNums=int(mnodeNums)
vnodeNumbers = int(dnodeNumbers-mnodeNums)
-
+
tdSql.query("select * from information_schema.ins_dnodes;")
tdLog.debug(tdSql.queryResult)
clusterComCheck.checkDnodes(dnodeNumbers)
- tdLog.info("create database and stable")
+ tdLog.info("create database and stable")
tdDnodes=cluster.dnodes
stopcount =0
threads=[]
@@ -234,7 +234,7 @@ class TDTestCase:
for tr in threads:
tr.start()
- tdLog.info("Take turns stopping %s "%stopRole)
+ tdLog.info("Take turns stopping %s "%stopRole)
while stopcount < restartNumbers:
tdLog.info(" restart loop: %d"%stopcount )
if stopRole == "mnode":
@@ -242,7 +242,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
elif stopRole == "vnode":
for i in range(vnodeNumbers):
tdDnodes[i+mnodeNums].stoptaosd()
@@ -254,7 +254,7 @@ class TDTestCase:
tdDnodes[i].stoptaosd()
# sleep(10)
tdDnodes[i].starttaosd()
- # sleep(10)
+ # sleep(10)
# dnodeNumbers don't include database of schema
if clusterComCheck.checkDnodes(dnodeNumbers):
@@ -265,12 +265,12 @@ class TDTestCase:
tdLog.exit("one or more of dnodes failed to start ")
# self.check3mnode()
stopcount+=1
-
+
for tr in threads:
tr.join()
- def run(self):
+ def run(self):
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
self.createCluster()
self.prepareData()
diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py
index e0fb986d79d8491bf2bd23e82ccde85914c76541..af3fbb83c070202368f317b119377035ac133e16 100644
--- a/tests/system-test/2-query/query_cols_tags_and_or.py
+++ b/tests/system-test/2-query/query_cols_tags_and_or.py
@@ -19,7 +19,7 @@ class TDTestCase:
def init(self, conn, logSql):
## add for TD-6672
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
def insertData(self, tb_name):
insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)',
@@ -37,17 +37,17 @@ class TDTestCase:
for sql in insert_sql_list:
tdSql.execute(sql)
- def initTb(self):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initTb(self, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)")
self.insertData(tb_name)
return tb_name
- def initStb(self, count=5):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initStb(self, count=5, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
tdSql.execute(
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned) tags (t1 tinyint, t2 smallint, t3 int, t4 bigint, t5 float, t6 double, t7 binary(100), t8 nchar(200), t9 bool, t10 tinyint unsigned, t11 smallint unsigned, t12 int unsigned, t13 bigint unsigned)")
for i in range(1, count+1):
@@ -56,9 +56,10 @@ class TDTestCase:
self.insertData(f'{tb_name}_sub_{i}')
return tb_name
- def initTwoStb(self):
- tdCom.cleanTb()
- tb_name = tdCom.getLongName(8, "letters")
+ def initTwoStb(self, dbname="db"):
+ tdCom.cleanTb(dbname)
+ tb_name = f'{dbname}.{tdCom.getLongName(8, "letters")}'
+ # tb_name = tdCom.getLongName(8, "letters")
tb_name1 = f'{tb_name}1'
tb_name2 = f'{tb_name}2'
tdSql.execute(
diff --git a/tests/system-test/2-query/round.py b/tests/system-test/2-query/round.py
index 551e225a4d02025780b4238e2079b70249dcdd5a..1d69d3c9afa1d7fffc3b8eac80c2b648a54bc74e 100644
--- a/tests/system-test/2-query/round.py
+++ b/tests/system-test/2-query/round.py
@@ -8,49 +8,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -94,68 +91,68 @@ class TDTestCase:
else:
tdLog.info("round value check pass , it work as expected ,sql is \"%s\" "%round_query )
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select round from t1",
- # "select round(-+--+c1) from t1",
- # "select +-round(c1) from t1",
- # "select ++-round(c1) from t1",
- # "select ++--round(c1) from t1",
- # "select - -round(c1)*0 from t1",
- # "select round(tbname+1) from t1 ",
- "select round(123--123)==1 from t1",
- "select round(c1) as 'd1' from t1",
- "select round(c1 ,c2 ) from t1",
- "select round(c1 ,NULL) from t1",
- "select round(,) from t1;",
- "select round(round(c1) ab from t1)",
- "select round(c1) as int from t1",
- "select round from stb1",
- # "select round(-+--+c1) from stb1",
- # "select +-round(c1) from stb1",
- # "select ++-round(c1) from stb1",
- # "select ++--round(c1) from stb1",
- # "select - -round(c1)*0 from stb1",
- # "select round(tbname+1) from stb1 ",
- "select round(123--123)==1 from stb1",
- "select round(c1) as 'd1' from stb1",
- "select round(c1 ,c2 ) from stb1",
- "select round(c1 ,NULL) from stb1",
- "select round(,) from stb1;",
- "select round(round(c1) ab from stb1)",
- "select round(c1) as int from stb1"
+ f"select round from {dbname}.t1",
+ # f"select round(-+--+c1) from {dbname}.t1",
+ # f"select +-round(c1) from {dbname}.t1",
+ # f"select ++-round(c1) from {dbname}.t1",
+ # f"select ++--round(c1) from {dbname}.t1",
+ # f"select - -round(c1)*0 from {dbname}.t1",
+ # f"select round(tbname+1) from {dbname}.t1 ",
+ f"select round(123--123)==1 from {dbname}.t1",
+ f"select round(c1) as 'd1' from {dbname}.t1",
+ f"select round(c1 ,c2 ) from {dbname}.t1",
+ f"select round(c1 ,NULL) from {dbname}.t1",
+ f"select round(,) from {dbname}.t1;",
+ f"select round(round(c1) ab from {dbname}.t1)",
+ f"select round(c1) as int from {dbname}.t1",
+ f"select round from {dbname}.stb1",
+ # f"select round(-+--+c1) from {dbname}.stb1",
+ # f"select +-round(c1) from {dbname}.stb1",
+ # f"select ++-round(c1) from {dbname}.stb1",
+ # f"select ++--round(c1) from {dbname}.stb1",
+ # f"select - -round(c1)*0 from {dbname}.stb1",
+ # f"select round(tbname+1) from {dbname}.stb1 ",
+ f"select round(123--123)==1 from {dbname}.stb1",
+ f"select round(c1) as 'd1' from {dbname}.stb1",
+ f"select round(c1 ,c2 ) from {dbname}.stb1",
+ f"select round(c1 ,NULL) from {dbname}.stb1",
+ f"select round(,) from {dbname}.stb1;",
+ f"select round(round(c1) ab from {dbname}.stb1)",
+ f"select round(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select round(ts) from t1" ,
- "select round(c7) from t1",
- "select round(c8) from t1",
- "select round(c9) from t1",
- "select round(ts) from ct1" ,
- "select round(c7) from ct1",
- "select round(c8) from ct1",
- "select round(c9) from ct1",
- "select round(ts) from ct3" ,
- "select round(c7) from ct3",
- "select round(c8) from ct3",
- "select round(c9) from ct3",
- "select round(ts) from ct4" ,
- "select round(c7) from ct4",
- "select round(c8) from ct4",
- "select round(c9) from ct4",
- "select round(ts) from stb1" ,
- "select round(c7) from stb1",
- "select round(c8) from stb1",
- "select round(c9) from stb1" ,
-
- "select round(ts) from stbbb1" ,
- "select round(c7) from stbbb1",
-
- "select round(ts) from tbname",
- "select round(c9) from tbname"
+ f"select round(ts) from {dbname}.t1" ,
+ f"select round(c7) from {dbname}.t1",
+ f"select round(c8) from {dbname}.t1",
+ f"select round(c9) from {dbname}.t1",
+ f"select round(ts) from {dbname}.ct1" ,
+ f"select round(c7) from {dbname}.ct1",
+ f"select round(c8) from {dbname}.ct1",
+ f"select round(c9) from {dbname}.ct1",
+ f"select round(ts) from {dbname}.ct3" ,
+ f"select round(c7) from {dbname}.ct3",
+ f"select round(c8) from {dbname}.ct3",
+ f"select round(c9) from {dbname}.ct3",
+ f"select round(ts) from {dbname}.ct4" ,
+ f"select round(c7) from {dbname}.ct4",
+ f"select round(c8) from {dbname}.ct4",
+ f"select round(c9) from {dbname}.ct4",
+ f"select round(ts) from {dbname}.stb1" ,
+ f"select round(c7) from {dbname}.stb1",
+ f"select round(c8) from {dbname}.stb1",
+ f"select round(c9) from {dbname}.stb1" ,
+
+ f"select round(ts) from {dbname}.stbbb1" ,
+ f"select round(c7) from {dbname}.stbbb1",
+
+ f"select round(ts) from {dbname}.tbname",
+ f"select round(c9) from {dbname}.tbname"
]
@@ -164,127 +161,127 @@ class TDTestCase:
type_sql_lists = [
- "select round(c1) from t1",
- "select round(c2) from t1",
- "select round(c3) from t1",
- "select round(c4) from t1",
- "select round(c5) from t1",
- "select round(c6) from t1",
-
- "select round(c1) from ct1",
- "select round(c2) from ct1",
- "select round(c3) from ct1",
- "select round(c4) from ct1",
- "select round(c5) from ct1",
- "select round(c6) from ct1",
-
- "select round(c1) from ct3",
- "select round(c2) from ct3",
- "select round(c3) from ct3",
- "select round(c4) from ct3",
- "select round(c5) from ct3",
- "select round(c6) from ct3",
-
- "select round(c1) from stb1",
- "select round(c2) from stb1",
- "select round(c3) from stb1",
- "select round(c4) from stb1",
- "select round(c5) from stb1",
- "select round(c6) from stb1",
-
- "select round(c6) as alisb from stb1",
- "select round(c6) alisb from stb1",
+ f"select round(c1) from {dbname}.t1",
+ f"select round(c2) from {dbname}.t1",
+ f"select round(c3) from {dbname}.t1",
+ f"select round(c4) from {dbname}.t1",
+ f"select round(c5) from {dbname}.t1",
+ f"select round(c6) from {dbname}.t1",
+
+ f"select round(c1) from {dbname}.ct1",
+ f"select round(c2) from {dbname}.ct1",
+ f"select round(c3) from {dbname}.ct1",
+ f"select round(c4) from {dbname}.ct1",
+ f"select round(c5) from {dbname}.ct1",
+ f"select round(c6) from {dbname}.ct1",
+
+ f"select round(c1) from {dbname}.ct3",
+ f"select round(c2) from {dbname}.ct3",
+ f"select round(c3) from {dbname}.ct3",
+ f"select round(c4) from {dbname}.ct3",
+ f"select round(c5) from {dbname}.ct3",
+ f"select round(c6) from {dbname}.ct3",
+
+ f"select round(c1) from {dbname}.stb1",
+ f"select round(c2) from {dbname}.stb1",
+ f"select round(c3) from {dbname}.stb1",
+ f"select round(c4) from {dbname}.stb1",
+ f"select round(c5) from {dbname}.stb1",
+ f"select round(c6) from {dbname}.stb1",
+
+ f"select round(c6) as alisb from {dbname}.stb1",
+ f"select round(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_round_function(self):
+ def basic_round_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select round(c1) from ct3")
+ tdSql.query(f"select round(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c2) from ct3")
+ tdSql.query(f"select round(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c3) from ct3")
+ tdSql.query(f"select round(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c4) from ct3")
+ tdSql.query(f"select round(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c5) from ct3")
+ tdSql.query(f"select round(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select round(c6) from ct3")
+ tdSql.query(f"select round(c6) from {dbname}.ct3")
# used for regular table
- tdSql.query("select round(c1) from t1")
+ tdSql.query(f"select round(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1)
tdSql.checkData(3 , 0, 3)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from t1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from t1")
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.t1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.t1")
# used for sub table
- tdSql.query("select round(c1) from ct1")
+ tdSql.query(f"select round(c1) from {dbname}.ct1")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1 , 0, 7)
tdSql.checkData(3 , 0, 5)
tdSql.checkData(5 , 0, 4)
- tdSql.query("select round(c1) from ct1")
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct1", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct1")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct1;","select c1 from ct1" )
+ tdSql.query(f"select round(c1) from {dbname}.ct1")
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct1")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct1;",f"select c1 from {dbname}.ct1" )
# used for stable table
- tdSql.query("select round(c1) from stb1")
+ tdSql.query(f"select round(c1) from {dbname}.stb1")
tdSql.checkRows(25)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 from ct4 ", "select (c1), round(c2) ,round(c3), round(c4), round(c5) from ct4")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from ct4;" , "select c1 from ct4" )
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 from {dbname}.ct4 ", f"select (c1), round(c2) ,round(c3), round(c4), round(c5) from {dbname}.ct4")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.ct4;" , f"select c1 from {dbname}.ct4" )
# used for not exists table
- tdSql.error("select round(c1) from stbbb1")
- tdSql.error("select round(c1) from tbname")
- tdSql.error("select round(c1) from ct5")
+ tdSql.error(f"select round(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select round(c1) from {dbname}.tbname")
+ tdSql.error(f"select round(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, round(c1) from ct1")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,8)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0)
- tdSql.query("select c1, round(c1) from ct4")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,5)
tdSql.checkData(5 , 0 ,None)
tdSql.checkData(5 , 1 ,None)
- tdSql.query("select c1, round(c1) from ct4 ")
+ tdSql.query(f"select c1, round(c1) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,5)
tdSql.checkData(4 , 1 ,5)
# mix with common functions
- tdSql.query("select c1, round(c1),c5, round(c5) from ct4 ")
+ tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -300,34 +297,34 @@ class TDTestCase:
tdSql.checkData(6 , 2 ,4.44000)
tdSql.checkData(6 , 3 ,4.00000)
- tdSql.query("select c1, round(c1),c5, round(c5) from stb1 ")
+ tdSql.query(f"select c1, round(c1),c5, round(c5) from {dbname}.stb1 ")
# mix with agg functions , not support
- tdSql.error("select c1, round(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, round(c1),c5, count(c5) from ct1 ")
- tdSql.error("select round(c1), count(c5) from stb1 ")
- tdSql.error("select round(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, round(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select round(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select round(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# bug fix for compute
- tdSql.query("select c1, abs(c1) -0 ,round(c1)-0 from ct4 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,round(c1)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -335,7 +332,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 8.000000000)
- tdSql.query(" select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, abs(c1) -0 ,round(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -343,9 +340,8 @@ class TDTestCase:
tdSql.checkData(1, 1, 8.000000000)
tdSql.checkData(1, 2, 7.900000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -353,7 +349,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -361,7 +357,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -369,7 +365,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from ct4 where c1>log(c1,2) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(log(c1,2)-0.5) , round(abs(c1))-0.5 from {dbname}.ct4 where c1>log(c1,2) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -382,44 +378,42 @@ class TDTestCase:
def round_Arithmetic(self):
pass
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto( "select c1, c2, c3 , c4, c5 ,c6 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from sub1_bound")
- self.check_result_auto( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from sub1_bound")
- self.check_result_auto("select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from sub1_bound;" , "select round(c1) from sub1_bound" )
+ self.check_result_auto( f"select c1, c2, c3 , c4, c5 ,c6 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c4), round(c5) ,round(c6) from {dbname}.sub1_bound")
+ self.check_result_auto( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select round(c1), round(c2) ,round(c3), round(c3), round(c2) ,round(c1) from {dbname}.sub1_bound")
+ self.check_result_auto(f"select round(round(round(round(round(round(round(round(round(round(c1)))))))))) nest_col_func from {dbname}.sub1_bound;" , f"select round(c1) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from sub1_bound ")
+ tdSql.query(f"select round(c1+0.2) ,round(c2) , round(c3+0.3) , round(c4-0.3), round(c5/2), round(c6/2) from {dbname}.sub1_bound ")
tdSql.checkData(0, 0, 2147483647.000000000)
tdSql.checkData(0, 2, 32767.000000000)
tdSql.checkData(0, 3, 127.000000000)
@@ -430,19 +424,18 @@ class TDTestCase:
tdSql.checkData(4, 3, -123.000000000)
tdSql.checkData(4, 4, -169499995645668991474575059260979281920.000000000)
- self.check_result_auto("select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from sub1_bound" ,"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from sub1_bound ")
+ self.check_result_auto(f"select c1+1 ,c2 , c3*1 , c4/2, c5/2, c6 from {dbname}.sub1_bound" ,f"select round(c1+1) ,round(c2) , round(c3*1) , round(c4/2), round(c5)/2, round(c6) from {dbname}.sub1_bound ")
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto( " select c5 from stb1 order by ts " , "select round(c5) from stb1 order by ts" )
- self.check_result_auto( " select c5 from stb1 order by tbname " , "select round(c5) from stb1 order by tbname" )
- self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select c5 from stb1 where c1 > 0 order by tbname " , "select round(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto( f"select c5 from {dbname}.stb1 order by ts " , f"select round(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 order by tbname " , f"select round(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 order by ts " , "select round(t1), round(c5) from stb1 order by ts" )
- self.check_result_auto( " select t1,c5 from stb1 order by tbname " , "select round(t1) ,round(c5) from stb1 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) ,round(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select round(t1) , round(c5) from stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select round(t1), round(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) ,round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select round(t1) , round(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
diff --git a/tests/system-test/2-query/rtrim.py b/tests/system-test/2-query/rtrim.py
index 30624792cc33866a19c0ec1a31594cdfa438ffcf..80307e8534787889b080baa0c25a32b638c49461 100644
--- a/tests/system-test/2-query/rtrim.py
+++ b/tests/system-test/2-query/rtrim.py
@@ -120,16 +120,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__rtrim_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__rtrim_err_check(tb):
@@ -142,17 +142,15 @@ class TDTestCase:
self.__test_error()
- def __create_tb(self):
- tdSql.prepare()
-
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -162,29 +160,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -200,7 +198,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -216,13 +214,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -251,8 +249,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py
index 45be0ef8abc8e32c1dbe904fb2dcd225f3db5f94..7f1d7ab8c0d62fb4db7386caf3b4eeca4b3f8cba 100644
--- a/tests/system-test/2-query/sample.py
+++ b/tests/system-test/2-query/sample.py
@@ -11,21 +11,17 @@
# -*- coding: utf-8 -*-
-from pstats import Stats
import sys
-import subprocess
import random
-import math
-import numpy as np
-import inspect
import re
-import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
+DBNAME = "db"
+
class TDTestCase:
def init(self, conn, logSql):
@@ -33,11 +29,11 @@ class TDTestCase:
tdSql.init(conn.cursor())
self.ts = 1537146000000
- def sample_query_form(self, sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
+ def sample_query_form(self, sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
'''
sample function:
- :param sel: string, must be "select", required parameters;
+ :param sel: string, must be f"select", required parameters;
:param func: string, in this case must be "sample(", otherwise return other function, required parameters;
:param col: string, column name, required parameters;
:param m_comm: string, comma between col and k , required parameters;
@@ -47,12 +43,12 @@ class TDTestCase:
:param fr: string, must be "from", required parameters;
:param table_expr: string or expression, data source(eg,table/stable name, result set), required parameters;
:param condition: expression;
- :return: sample query statement,default: select sample(c1, 1) from t1
+ :return: sample query statement,default: select sample(c1, 1) from {dbname}.t1
'''
return f"{sel} {func} {col} {m_comm} {k} {r_comm} {alias} {fr} {table_expr} {condition}"
- def checksample(self,sel="select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr="t1", condition=""):
+ def checksample(self,sel=f"select", func="sample(", col="c1", m_comm =",", k=1,r_comm=")", alias="", fr="from",table_expr=f"{DBNAME}.t1", condition=""):
# print(self.sample_query_form(sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
# table_expr=table_expr, condition=condition))
line = sys._getframe().f_back.f_lineno
@@ -65,7 +61,7 @@ class TDTestCase:
))
- sql = "select * from t1"
+ sql = f"select * from {table_expr}"
collist = tdSql.getColNameList(sql)
if not isinstance(col, str):
@@ -125,7 +121,7 @@ class TDTestCase:
# table_expr=table_expr, condition=condition
# ))
- if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != "select"]):
+ if any( [func != "sample(" , r_comm != ")" , fr != "from", sel != f"select"]):
print(f"case in {line}: ", end='')
return tdSql.error(self.sample_query_form(
sel=sel, func=func, col=col, m_comm=m_comm, k=k, r_comm=r_comm, alias=alias, fr=fr,
@@ -286,14 +282,14 @@ class TDTestCase:
return
else:
- if "where" in condition:
- condition = re.sub('where', f"where {col} is not null and ", condition)
- else:
- condition = f"where {col} is not null" + condition
- print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
- tdSql.query(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
+ # if "where" in condition:
+ # condition = re.sub('where', f"where {col} is not null and ", condition)
+ # else:
+ # condition = f"where {col} is not null" + condition
+ # print(f"select ts, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
+ # tdSql.query(f"select _c0, {col} {alias} from {table_expr} {re.sub('limit [0-9]*|offset [0-9]*','',condition)}")
# offset_val = condition.split("offset")[1].split(" ")[1] if "offset" in condition else 0
- pre_sample = tdSql.queryResult
+ # pre_sample = tdSql.queryResult
# pre_len = tdSql.queryRows
# for i in range(sample_len):
# if sample_result[pre_row:pre_row + step][i] not in pre_sample:
@@ -301,7 +297,7 @@ class TDTestCase:
# else:
# tdLog.info(f"case in {line} is success: sample data is in {group_name}")
- pass
+ pass
def sample_current_query(self) :
@@ -322,24 +318,24 @@ class TDTestCase:
self.checksample(**case6)
# # case7~8: nested query
- # case7 = {"table_expr": "(select c1 from stb1)"}
- # self.checksample(**case7)
- # case8 = {"table_expr": "(select sample(c1, 1) c1 from stb1 group by tbname)"}
- # self.checksample(**case8)
+ case7 = {"table_expr": f"(select c1 from {DBNAME}.stb1)"}
+ self.checksample(**case7)
+ case8 = {"table_expr": f"(select sample(c1, 1) c1 from {DBNAME}.stb1 group by tbname)"}
+ self.checksample(**case8)
# case9~10: mix with tbname/ts/tag/col
- # case9 = {"alias": ", tbname"}
- # self.checksample(**case9)
- # case10 = {"alias": ", _c0"}
- # self.checksample(**case10)
- # case11 = {"alias": ", st1"}
+ case9 = {"alias": ", tbname"}
+ self.checksample(**case9)
+ case10 = {"alias": ", _c0"}
+ self.checksample(**case10)
+ case11 = {"alias": ", st1"}
# self.checksample(**case11)
- tdSql.query("select sample( c1 , 1 ) , st1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , st1 from {DBNAME}.t1")
- # case12 = {"alias": ", c1"}
+ case12 = {"alias": ", c1"}
# self.checksample(**case12)
- tdSql.query("select sample( c1 , 1 ) , c1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1")
# case13~15: with single condition
case13 = {"condition": "where c1 <= 10"}
@@ -353,32 +349,31 @@ class TDTestCase:
case16 = {"condition": "where c6=1 or c6 =0"}
self.checksample(**case16)
- # # case17: only support normal table join
- # case17 = {
- # "col": "t1.c1",
- # "table_expr": "t1, t2",
- # "condition": "where t1.ts=t2.ts"
- # }
- # self.checksample(**case17)
- # # case18~19: with group by
- # case19 = {
- # "table_expr": "stb1",
- # "condition": "partition by tbname"
- # }
+ # case17: only support normal table join
+ case17 = {
+ "col": "t1.c1",
+ "table_expr": f"{DBNAME}.t1 t1 join {DBNAME}.t2 t2 on t1.ts = t2.ts",
+ }
+ self.checksample(**case17)
+ # case18~19: with group by
+ case19 = {
+ "table_expr": f"{DBNAME}.stb1",
+ "condition": "partition by tbname"
+ }
# self.checksample(**case19)
- # # case20~21: with order by
- # case20 = {"condition": "order by ts"}
+ # case20~21: with order by
+ case20 = {"condition": "order by ts"}
# self.checksample(**case20)
- # case21 = {
- # "table_expr": "stb1",
- # "condition": "partition by tbname order by tbname"
- # }
+ case21 = {
+ "table_expr": f"{DBNAME}.stb1",
+ "condition": "partition by tbname order by tbname"
+ }
# self.checksample(**case21)
# case22: with union
case22 = {
- "condition": "union all select sample( c1 , 1 ) from t2"
+ "condition": f"union all select sample( c1 , 1 ) from {DBNAME}.t2"
}
self.checksample(**case22)
@@ -396,12 +391,12 @@ class TDTestCase:
case26 = {"k": 1000}
self.checksample(**case26)
case27 = {
- "table_expr": "stb1",
+ "table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 "
}
self.checksample(**case27) # with slimit
case28 = {
- "table_expr": "stb1",
+ "table_expr": f"{DBNAME}.stb1",
"condition": "group by tbname slimit 1 soffset 1"
}
self.checksample(**case28) # with soffset
@@ -431,7 +426,7 @@ class TDTestCase:
# err9 = {"col": "st1"}
# self.checksample(**err9) # col: tag
- tdSql.query(" select sample(st1 ,1) from t1 ")
+ tdSql.query(f"select sample(st1 ,1) from {DBNAME}.t1 ")
# err10 = {"col": 1}
# self.checksample(**err10) # col: value
# err11 = {"col": "NULL"}
@@ -494,13 +489,13 @@ class TDTestCase:
self.checksample(**err39) # mix with calculation function 2
# err40 = {"alias": "+ 2"}
# self.checksample(**err40) # mix with arithmetic 1
- # tdSql.query(" select sample(c1 , 1) + 2 from t1 ")
+ # tdSql.query(f"select sample(c1 , 1) + 2 from {dbname}.t1 ")
err41 = {"alias": "+ avg(c1)"}
# self.checksample(**err41) # mix with arithmetic 2
# err42 = {"alias": ", c1"}
# self.checksample(**err42)
- tdSql.query("select sample( c1 , 1 ) , c1 from t1")
+ tdSql.query(f"select sample( c1 , 1 ) , c1 from {DBNAME}.t1")
# mix with other col
# err43 = {"table_expr": "stb1"}
# self.checksample(**err43) # select stb directly
@@ -510,14 +505,14 @@ class TDTestCase:
# "condition": "where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts"
# }
# self.checksample(**err44) # stb join
- tdSql.query("select sample( stb1.c1 , 1 ) from stb1, stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts")
+ tdSql.query(f"select sample( stb1.c1 , 1 ) from {DBNAME}.stb1 stb1, {DBNAME}.stb2 stb2 where stb1.ts=stb2.ts and stb1.st1=stb2.st2 order by stb1.ts")
# err45 = {
# "condition": "where ts>0 and ts < now interval(1h) fill(next)"
# }
# self.checksample(**err45) # interval
- tdSql.error("select sample( c1 , 1 ) from t1 where ts>0 and ts < now interval(1h) fill(next)")
+ tdSql.error(f"select sample( c1 , 1 ) from {DBNAME}.t1 where ts>0 and ts < now interval(1h) fill(next)")
err46 = {
- "table_expr": "t1",
+ "table_expr": f"{DBNAME}.t1",
"condition": "group by c6"
}
# self.checksample(**err46) # group by normal col
@@ -563,49 +558,45 @@ class TDTestCase:
pass
- def sample_test_data(self, tbnum:int, data_row:int, basetime:int) -> None :
+ def sample_test_data(self, tbnum:int, data_row:int, basetime:int, dbname="db") -> None :
for i in range(tbnum):
for j in range(data_row):
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into {dbname}.t{i} values ("
f"{basetime + (j+1)*10}, {random.randint(-200, -1)}, {random.uniform(200, -1)}, {basetime + random.randint(-200, -1)}, "
f"'binary_{j}', {random.uniform(-200, -1)}, {random.choice([0,1])}, {random.randint(-200,-1)}, "
f"{random.randint(-200, -1)}, {random.randint(-127, -1)}, 'nchar_{j}' )"
)
tdSql.execute(
- f"insert into t{i} values ("
+ f"insert into {dbname}.t{i} values ("
f"{basetime - (j+1) * 10}, {random.randint(1, 200)}, {random.uniform(1, 200)}, {basetime - random.randint(1, 200)}, "
f"'binary_{j}_1', {random.uniform(1, 200)}, {random.choice([0, 1])}, {random.randint(1,200)}, "
f"{random.randint(1,200)}, {random.randint(1,127)}, 'nchar_{j}_1' )"
)
tdSql.execute(
- f"insert into tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
+ f"insert into {dbname}.tt{i} values ( {basetime-(j+1) * 10}, {random.randint(1, 200)} )"
)
pass
- def sample_test_table(self,tbnum: int) -> None :
- tdSql.execute("drop database if exists db")
- tdSql.execute("create database if not exists db keep 3650")
- tdSql.execute("use db")
+ def sample_test_table(self,tbnum: int, dbname="db") -> None :
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650")
tdSql.execute(
- "create stable db.stb1 (\
+ f"create stable {dbname}.stb1 (\
ts timestamp, c1 int, c2 float, c3 timestamp, c4 binary(16), c5 double, c6 bool, \
c7 bigint, c8 smallint, c9 tinyint, c10 nchar(16)\
) \
tags(st1 int)"
)
tdSql.execute(
- "create stable db.stb2 (ts timestamp, c1 int) tags(st2 int)"
+ f"create stable {dbname}.stb2 (ts timestamp, c1 int) tags(st2 int)"
)
for i in range(tbnum):
- tdSql.execute(f"create table t{i} using stb1 tags({i})")
- tdSql.execute(f"create table tt{i} using stb2 tags({i})")
-
- pass
-
+ tdSql.execute(f"create table {dbname}.t{i} using {dbname}.stb1 tags({i})")
+ tdSql.execute(f"create table {dbname}.tt{i} using {dbname}.stb2 tags({i})")
def check_sample(self , sample_query , origin_query ):
@@ -626,45 +617,43 @@ class TDTestCase:
else:
tdLog.exit(" sample data is not in datas groups ,failed sql is : %s" % sample_query )
-
- def basic_sample_query(self):
- tdSql.execute(" drop database if exists db ")
- tdSql.execute(" create database if not exists db duration 300d ")
- tdSql.execute(" use db ")
+ def basic_sample_query(self, dbname="db"):
+ tdSql.execute(f" drop database if exists {dbname} ")
+ tdSql.execute(f" create database if not exists {dbname} duration 300d ")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -683,116 +672,116 @@ class TDTestCase:
# basic query for sample
# params test for all
- tdSql.error(" select sample(c1,c1) from t1 ")
- tdSql.error(" select sample(c1,now) from t1 ")
- tdSql.error(" select sample(c1,tbname) from t1 ")
- tdSql.error(" select sample(c1,ts) from t1 ")
- tdSql.error(" select sample(c1,false) from t1 ")
- tdSql.query(" select sample(123,1) from t1 ")
-
- tdSql.query(" select sample(c1,2) from t1 ")
+ tdSql.error(f"select sample(c1,c1) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,now) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,tbname) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,ts) from {dbname}.t1 ")
+ tdSql.error(f"select sample(c1,false) from {dbname}.t1 ")
+ tdSql.query(f"select sample(123,1) from {dbname}.t1 ")
+
+ tdSql.query(f"select sample(c1,2) from {dbname}.t1 ")
tdSql.checkRows(2)
- tdSql.query(" select sample(c1,10) from t1 ")
+ tdSql.query(f"select sample(c1,10) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8,10) from t1 ")
+ tdSql.query(f"select sample(c8,10) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c1,999) from t1 ")
+ tdSql.query(f"select sample(c1,999) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c1,1000) from t1 ")
+ tdSql.query(f"select sample(c1,1000) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8,1000) from t1 ")
+ tdSql.query(f"select sample(c8,1000) from {dbname}.t1 ")
tdSql.checkRows(9)
- tdSql.error(" select sample(c1,-1) from t1 ")
+ tdSql.error(f"select sample(c1,-1) from {dbname}.t1 ")
# bug need fix
- # tdSql.query("select sample(c1 ,2) , 123 from stb1;")
+ # tdSql.query(f"select sample(c1 ,2) , 123 from {dbname}.stb1;")
# all type support
- tdSql.query(" select sample(c1 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c1 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c2 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c2 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c3 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c3 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c4 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c4 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c5 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c5 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c6 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c6 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c7 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c7 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c8 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c8 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c9 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c9 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- tdSql.query(" select sample(c10 , 20 ) from ct4 ")
+ tdSql.query(f"select sample(c10 , 20 ) from {dbname}.ct4 ")
tdSql.checkRows(9)
- # tdSql.query(" select sample(t1 , 20 ) from ct1 ")
+ # tdSql.query(f"select sample(t1 , 20 ) from {dbname}.ct1 ")
# tdSql.checkRows(13)
# filter data
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 is null ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 is null ")
tdSql.checkRows(1)
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 =6 ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 =6 ")
tdSql.checkRows(1)
- tdSql.query(" select sample(c1, 20 ) from t1 where c1 > 6 ")
+ tdSql.query(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6 ")
tdSql.checkRows(3)
- self.check_sample("select sample(c1, 20 ) from t1 where c1 > 6" , "select c1 from t1 where c1 > 6")
+ self.check_sample(f"select sample(c1, 20 ) from {dbname}.t1 where c1 > 6" , f"select c1 from {dbname}.t1 where c1 > 6")
- tdSql.query(" select sample( c1 , 1 ) from t1 where c1 in (0, 1,2) ")
+ tdSql.query(f"select sample( c1 , 1 ) from {dbname}.t1 where c1 in (0, 1,2) ")
tdSql.checkRows(1)
- tdSql.query("select sample( c1 ,3 ) from t1 where c1 between 1 and 10 ")
+ tdSql.query(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10 ")
tdSql.checkRows(3)
- self.check_sample("select sample( c1 ,3 ) from t1 where c1 between 1 and 10" ,"select c1 from t1 where c1 between 1 and 10")
+ self.check_sample(f"select sample( c1 ,3 ) from {dbname}.t1 where c1 between 1 and 10" ,f"select c1 from {dbname}.t1 where c1 between 1 and 10")
# join
- tdSql.query("select sample( ct4.c1 , 1 ) from ct1, ct4 where ct4.ts=ct1.ts")
+ tdSql.query(f"select sample( ct4.c1 , 1 ) from {dbname}.ct1 ct1, {dbname}.ct4 ct4 where ct4.ts=ct1.ts")
# partition by tbname
- tdSql.query("select sample(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select sample(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- self.check_sample("select sample(c1,2) from stb1 partition by tbname" , "select c1 from stb1 partition by tbname")
+ self.check_sample(f"select sample(c1,2) from {dbname}.stb1 partition by tbname" , f"select c1 from {dbname}.stb1 partition by tbname")
# nest query
- # tdSql.query("select sample(c1,2) from (select c1 from t1); ")
+ # tdSql.query(f"select sample(c1,2) from (select c1 from {dbname}.t1); ")
# tdSql.checkRows(2)
# union all
- tdSql.query("select sample(c1,2) from t1 union all select sample(c1,3) from t1")
+ tdSql.query(f"select sample(c1,2) from {dbname}.t1 union all select sample(c1,3) from {dbname}.t1")
tdSql.checkRows(5)
# fill interval
# not support mix with other function
- tdSql.error("select top(c1,2) , sample(c1,2) from ct1")
- tdSql.error("select max(c1) , sample(c1,2) from ct1")
- tdSql.query("select c1 , sample(c1,2) from ct1")
+ tdSql.error(f"select top(c1,2) , sample(c1,2) from {dbname}.ct1")
+ tdSql.error(f"select max(c1) , sample(c1,2) from {dbname}.ct1")
+ tdSql.query(f"select c1 , sample(c1,2) from {dbname}.ct1")
# bug for mix with scalar
- tdSql.query("select 123 , sample(c1,100) from ct1")
- tdSql.query("select sample(c1,100)+2 from ct1")
- tdSql.query("select abs(sample(c1,100)) from ct1")
+ tdSql.query(f"select 123 , sample(c1,100) from {dbname}.ct1")
+ tdSql.query(f"select sample(c1,100)+2 from {dbname}.ct1")
+ tdSql.query(f"select abs(sample(c1,100)) from {dbname}.ct1")
- def sample_test_run(self) :
+ def sample_test_run(self, dbname="db") :
tdLog.printNoPrefix("==========support sample function==========")
tbnum = 10
nowtime = int(round(time.time() * 1000))
@@ -805,28 +794,28 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert only NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime - 5})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime + 5})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime - 5})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime + 5})")
self.sample_current_query()
self.sample_error_query()
tdLog.printNoPrefix("######## insert data in the range near the max(bigint/double):")
- # self.sample_test_table(tbnum)
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
- # self.sample_current_query()
- # self.sample_error_query()
+ self.sample_test_table(tbnum)
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 1) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 2) * 10}, {2**31-1}, {3.4*10**38}, {1.7*10**308}, {2**63-1})")
+ self.sample_current_query()
+ self.sample_error_query()
tdLog.printNoPrefix("######## insert data in the range near the min(bigint/double):")
- # self.sample_test_table(tbnum)
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
- # tdSql.execute(f"insert into t1(ts, c1,c2,c5,c7) values "
- # f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
- # self.sample_current_query()
- # self.sample_error_query()
+ self.sample_test_table(tbnum)
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 1) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {1-2**63})")
+ tdSql.execute(f"insert into {dbname}.t1(ts, c1,c2,c5,c7) values "
+ f"({nowtime - (per_table_rows + 2) * 10}, {1-2**31}, {-3.4*10**38}, {-1.7*10**308}, {512-2**63})")
+ self.sample_current_query()
+ self.sample_error_query()
tdLog.printNoPrefix("######## insert data without NULL data test:")
self.sample_test_table(tbnum)
@@ -837,16 +826,16 @@ class TDTestCase:
tdLog.printNoPrefix("######## insert data mix with NULL test:")
for i in range(tbnum):
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
- tdSql.execute(f"insert into t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime-(per_table_rows+3)*10})")
+ tdSql.execute(f"insert into {dbname}.t{i}(ts) values ({nowtime+(per_table_rows+3)*10})")
self.sample_current_query()
self.sample_error_query()
tdLog.printNoPrefix("######## check after WAL test:")
- tdSql.query("select * from information_schema.ins_dnodes")
+ tdSql.query(f"select * from information_schema.ins_dnodes")
index = tdSql.getData(0, 0)
tdDnodes.stop(index)
tdDnodes.start(index)
@@ -855,19 +844,19 @@ class TDTestCase:
self.basic_sample_query()
- def sample_big_data(self):
- tdSql.execute("create database sample_db")
+ def sample_big_data(self, dbname="sample_db"):
+ tdSql.execute(f"create database {dbname}")
tdSql.execute("use sample_db")
- tdSql.execute("create stable st (ts timestamp ,c1 int ) tags(ind int)" )
- tdSql.execute("create table sub_tb using st tags(1)")
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp ,c1 int ) tags(ind int)" )
+ tdSql.execute(f"create table {dbname}.sub_tb using {dbname}.st tags(1)")
for i in range(2000):
ts = self.ts+i*10
- tdSql.execute(f"insert into sub_tb values({ts} ,{i})")
+ tdSql.execute(f"insert into {dbname}.sub_tb values({ts} ,{i})")
- tdSql.query("select count(*) from st")
+ tdSql.query(f"select count(*) from {dbname}.st")
tdSql.checkData(0,0,2000)
- tdSql.query("select sample(c1 ,1000) from st")
+ tdSql.query(f"select sample(c1 ,1000) from {dbname}.st")
tdSql.checkRows(1000)
# bug need fix
diff --git a/tests/system-test/2-query/sin.py b/tests/system-test/2-query/sin.py
index 7cb559c510f637c25fef6e7573ea44c92a2051bc..a1ba3354879eb9e1e0abe66ae445f7604734ad66 100644
--- a/tests/system-test/2-query/sin.py
+++ b/tests/system-test/2-query/sin.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -65,14 +63,15 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
+
def check_result_auto_sin(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
+
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
+
for row in origin_result:
row_check = []
for elem in row:
@@ -82,190 +81,179 @@ class TDTestCase:
elem = math.sin(elem)
row_check.append(elem)
auto_result.append(row_check)
-
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("sin function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("sin value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index])
+
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select sin from t1",
- # "select sin(-+--+c1 ) from t1",
- # "select +-sin(c1) from t1",
- # "select ++-sin(c1) from t1",
- # "select ++--sin(c1) from t1",
- # "select - -sin(c1)*0 from t1",
- # "select sin(tbname+1) from t1 ",
- "select sin(123--123)==1 from t1",
- "select sin(c1) as 'd1' from t1",
- "select sin(c1 ,c2) from t1",
- "select sin(c1 ,NULL ) from t1",
- "select sin(,) from t1;",
- "select sin(sin(c1) ab from t1)",
- "select sin(c1 ) as int from t1",
- "select sin from stb1",
- # "select sin(-+--+c1) from stb1",
- # "select +-sin(c1) from stb1",
- # "select ++-sin(c1) from stb1",
- # "select ++--sin(c1) from stb1",
- # "select - -sin(c1)*0 from stb1",
- # "select sin(tbname+1) from stb1 ",
- "select sin(123--123)==1 from stb1",
- "select sin(c1) as 'd1' from stb1",
- "select sin(c1 ,c2 ) from stb1",
- "select sin(c1 ,NULL) from stb1",
- "select sin(,) from stb1;",
- "select sin(sin(c1) ab from stb1)",
- "select sin(c1) as int from stb1"
+ f"select sin from {dbname}.t1",
+ # f"select sin(-+--+c1 ) from {dbname}.t1",
+ # f"select +-sin(c1) from {dbname}.t1",
+ # f"select ++-sin(c1) from {dbname}.t1",
+ # f"select ++--sin(c1) from {dbname}.t1",
+ # f"select - -sin(c1)*0 from {dbname}.t1",
+ # f"select sin(tbname+1) from {dbname}.t1 ",
+ f"select sin(123--123)==1 from {dbname}.t1",
+ f"select sin(c1) as 'd1' from {dbname}.t1",
+ f"select sin(c1 ,c2) from {dbname}.t1",
+ f"select sin(c1 ,NULL ) from {dbname}.t1",
+ f"select sin(,) from {dbname}.t1;",
+ f"select sin(sin(c1) ab from {dbname}.t1)",
+ f"select sin(c1 ) as int from {dbname}.t1",
+ f"select sin from {dbname}.stb1",
+ # f"select sin(-+--+c1) from {dbname}.stb1",
+ # f"select +-sin(c1) from {dbname}.stb1",
+ # f"select ++-sin(c1) from {dbname}.stb1",
+ # f"select ++--sin(c1) from {dbname}.stb1",
+ # f"select - -sin(c1)*0 from {dbname}.stb1",
+ # f"select sin(tbname+1) from {dbname}.stb1 ",
+ f"select sin(123--123)==1 from {dbname}.stb1",
+ f"select sin(c1) as 'd1' from {dbname}.stb1",
+ f"select sin(c1 ,c2 ) from {dbname}.stb1",
+ f"select sin(c1 ,NULL) from {dbname}.stb1",
+ f"select sin(,) from {dbname}.stb1;",
+ f"select sin(sin(c1) ab from {dbname}.stb1)",
+ f"select sin(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select sin(ts) from t1" ,
- "select sin(c7) from t1",
- "select sin(c8) from t1",
- "select sin(c9) from t1",
- "select sin(ts) from ct1" ,
- "select sin(c7) from ct1",
- "select sin(c8) from ct1",
- "select sin(c9) from ct1",
- "select sin(ts) from ct3" ,
- "select sin(c7) from ct3",
- "select sin(c8) from ct3",
- "select sin(c9) from ct3",
- "select sin(ts) from ct4" ,
- "select sin(c7) from ct4",
- "select sin(c8) from ct4",
- "select sin(c9) from ct4",
- "select sin(ts) from stb1" ,
- "select sin(c7) from stb1",
- "select sin(c8) from stb1",
- "select sin(c9) from stb1" ,
-
- "select sin(ts) from stbbb1" ,
- "select sin(c7) from stbbb1",
-
- "select sin(ts) from tbname",
- "select sin(c9) from tbname"
+ f"select sin(ts) from {dbname}.t1" ,
+ f"select sin(c7) from {dbname}.t1",
+ f"select sin(c8) from {dbname}.t1",
+ f"select sin(c9) from {dbname}.t1",
+ f"select sin(ts) from {dbname}.ct1" ,
+ f"select sin(c7) from {dbname}.ct1",
+ f"select sin(c8) from {dbname}.ct1",
+ f"select sin(c9) from {dbname}.ct1",
+ f"select sin(ts) from {dbname}.ct3" ,
+ f"select sin(c7) from {dbname}.ct3",
+ f"select sin(c8) from {dbname}.ct3",
+ f"select sin(c9) from {dbname}.ct3",
+ f"select sin(ts) from {dbname}.ct4" ,
+ f"select sin(c7) from {dbname}.ct4",
+ f"select sin(c8) from {dbname}.ct4",
+ f"select sin(c9) from {dbname}.ct4",
+ f"select sin(ts) from {dbname}.stb1" ,
+ f"select sin(c7) from {dbname}.stb1",
+ f"select sin(c8) from {dbname}.stb1",
+ f"select sin(c9) from {dbname}.stb1" ,
+
+ f"select sin(ts) from {dbname}.stbbb1" ,
+ f"select sin(c7) from {dbname}.stbbb1",
+
+ f"select sin(ts) from {dbname}.tbname",
+ f"select sin(c9) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select sin(c1) from t1",
- "select sin(c2) from t1",
- "select sin(c3) from t1",
- "select sin(c4) from t1",
- "select sin(c5) from t1",
- "select sin(c6) from t1",
-
- "select sin(c1) from ct1",
- "select sin(c2) from ct1",
- "select sin(c3) from ct1",
- "select sin(c4) from ct1",
- "select sin(c5) from ct1",
- "select sin(c6) from ct1",
-
- "select sin(c1) from ct3",
- "select sin(c2) from ct3",
- "select sin(c3) from ct3",
- "select sin(c4) from ct3",
- "select sin(c5) from ct3",
- "select sin(c6) from ct3",
-
- "select sin(c1) from stb1",
- "select sin(c2) from stb1",
- "select sin(c3) from stb1",
- "select sin(c4) from stb1",
- "select sin(c5) from stb1",
- "select sin(c6) from stb1",
-
- "select sin(c6) as alisb from stb1",
- "select sin(c6) alisb from stb1",
+ f"select sin(c1) from {dbname}.t1",
+ f"select sin(c2) from {dbname}.t1",
+ f"select sin(c3) from {dbname}.t1",
+ f"select sin(c4) from {dbname}.t1",
+ f"select sin(c5) from {dbname}.t1",
+ f"select sin(c6) from {dbname}.t1",
+
+ f"select sin(c1) from {dbname}.ct1",
+ f"select sin(c2) from {dbname}.ct1",
+ f"select sin(c3) from {dbname}.ct1",
+ f"select sin(c4) from {dbname}.ct1",
+ f"select sin(c5) from {dbname}.ct1",
+ f"select sin(c6) from {dbname}.ct1",
+
+ f"select sin(c1) from {dbname}.ct3",
+ f"select sin(c2) from {dbname}.ct3",
+ f"select sin(c3) from {dbname}.ct3",
+ f"select sin(c4) from {dbname}.ct3",
+ f"select sin(c5) from {dbname}.ct3",
+ f"select sin(c6) from {dbname}.ct3",
+
+ f"select sin(c1) from {dbname}.stb1",
+ f"select sin(c2) from {dbname}.stb1",
+ f"select sin(c3) from {dbname}.stb1",
+ f"select sin(c4) from {dbname}.stb1",
+ f"select sin(c5) from {dbname}.stb1",
+ f"select sin(c6) from {dbname}.stb1",
+
+ f"select sin(c6) as alisb from {dbname}.stb1",
+ f"select sin(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_sin_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_sin_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select sin(c1) from ct3")
+ tdSql.query(f"select sin(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c2) from ct3")
+ tdSql.query(f"select sin(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c3) from ct3")
+ tdSql.query(f"select sin(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c4) from ct3")
+ tdSql.query(f"select sin(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c5) from ct3")
+ tdSql.query(f"select sin(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sin(c6) from ct3")
+ tdSql.query(f"select sin(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select sin(c1) from t1")
+ tdSql.query(f"select sin(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 0.841470985)
tdSql.checkData(3 , 0, 0.141120008)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from t1")
-
+ self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c2 ,sin(c2) from ct1")
+ tdSql.query(f"select c2 ,sin(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, -0.220708349)
tdSql.checkData(1 , 1, -0.556921845)
tdSql.checkData(3 , 1, -0.798311364)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,sin(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,sin(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 0.518228108)
tdSql.checkData(2 , 2, 0.996475613)
tdSql.checkData(3 , 2, 0.367960369)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_sin( "select c1, c2, c3 , c4, c5 from ct1", "select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from ct1")
-
+ self.check_result_auto_sin( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sin(c1), sin(c2) ,sin(c3), sin(c4), sin(c5) from {dbname}.ct1")
+
# nest query for sin functions
- tdSql.query("select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from ct1;")
+ tdSql.query(f"select c4 , sin(c4) ,sin(sin(c4)) , sin(sin(sin(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 0.035398303)
tdSql.checkData(0 , 2 , 0.035390911)
@@ -281,52 +269,52 @@ class TDTestCase:
tdSql.checkData(11 , 2 , 0.841042171)
tdSql.checkData(11 , 3 , 0.745338326)
- # used for stable table
-
- tdSql.query("select sin(c1) from stb1")
+ # used for stable table
+
+ tdSql.query(f"select sin(c1) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select sin(c1) from stbbb1")
- tdSql.error("select sin(c1) from tbname")
- tdSql.error("select sin(c1) from ct5")
+ tdSql.error(f"select sin(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select sin(c1) from {dbname}.tbname")
+ tdSql.error(f"select sin(c1) from {dbname}.ct5")
+
+ # mix with common col
+ tdSql.query(f"select c1, sin(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, sin(c2) from {dbname}.ct4")
- # mix with common col
- tdSql.query("select c1, sin(c1) from ct1")
- tdSql.query("select c2, sin(c2) from ct4")
-
# mix with common functions
- tdSql.query("select c1, sin(c1),sin(c1), sin(sin(c1)) from ct4 ")
+ tdSql.query(f"select c1, sin(c1),sin(c1), sin(sin(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,-0.279415498)
tdSql.checkData(3 , 2 ,-0.279415498)
tdSql.checkData(3 , 3 ,-0.275793863)
- tdSql.query("select c1, sin(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, sin(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, sin(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, sin(c1),c5, count(c5) from ct1 ")
- tdSql.error("select sin(c1), count(c5) from stb1 ")
- tdSql.error("select sin(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, sin(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select sin(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select sin(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
-
- # # bug fix for compute
- tdSql.query("select c1, sin(c1) -0 ,sin(c1-4)-0 from ct4 ")
+
+ # # bug fix for compute
+ tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -334,7 +322,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 0.989358247)
tdSql.checkData(1, 2, -0.756802495)
- tdSql.query(" select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, sin(c1) -0 ,sin(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -342,35 +330,34 @@ class TDTestCase:
tdSql.checkData(1, 1, 0.989358247)
tdSql.checkData(1, 2, 0.898941342)
- tdSql.query("select c1, sin(c1), c2, sin(c2), c3, sin(c3) from ct1")
+ tdSql.query(f"select c1, sin(c1), c2, sin(c2), c3, sin(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, sin(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.sin(100000000))
- tdSql.query("select c1, sin(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.sin(10000000000000))
- tdSql.query("select c1, sin(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, math.sin(10000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.sin(10000000000000000000000000000000000000000.0))
- tdSql.query("select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sin(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -378,7 +365,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,1.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -386,7 +373,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,-1.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from ct4 where c1=sin(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sin(c1)-0.5) from {dbname}.ct4 where c1=sin(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,0)
tdSql.checkData(0,1,0)
@@ -394,45 +381,40 @@ class TDTestCase:
tdSql.checkData(0,3,0.000000000)
tdSql.checkData(0,4,-0.100000000)
tdSql.checkData(0,5,0.000000000)
-
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+
+ def check_boundary_values(self, dbname="testdb"):
PI=3.1415926
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_sin( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)), sin(abs(c5)) from sub1_bound")
-
- self.check_result_auto_sin( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from sub1_bound")
+ self.check_result_auto_sin( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select sin(abs(c1)), sin(abs(c2)) ,sin(abs(c3)), sin(abs(c4)) from {dbname}.sub1_bound")
+
+ self.check_result_auto_sin( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sin(c1), sin(c2) ,sin(c3), sin(c3), sin(c2) ,sin(c1) from {dbname}.sub1_bound")
+
+ self.check_result_auto_sin(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sin(abs(c1)) from {dbname}.sub1_bound" )
- self.check_result_auto_sin("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sin(abs(c1)) from sub1_bound" )
-
# check basic elem for table per row
- tdSql.query("select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sin(abs(c1)) ,sin(abs(c2)) , sin(abs(c3)) , sin(abs(c4)), sin(abs(c5)), sin(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sin(2147483647))
tdSql.checkData(0,1,math.sin(9223372036854775807))
tdSql.checkData(0,2,math.sin(32767))
@@ -450,83 +432,79 @@ class TDTestCase:
tdSql.checkData(3,4,math.sin(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sin(abs(c1+1)) ,sin(abs(c2)) , sin(abs(c3*1)) , sin(abs(c4/2)), sin(abs(c5))/2, sin(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sin(2147483648.000000000))
tdSql.checkData(0,1,math.sin(9223372036854775807))
tdSql.checkData(0,2,math.sin(32767.000000000))
tdSql.checkData(0,3,math.sin(63.500000000))
- tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);")
- tdSql.execute(f'create table tb1 using st tags (1)')
- tdSql.execute(f'create table tb2 using st tags (2)')
- tdSql.execute(f'create table tb3 using st tags (3)')
- tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- for i in range(100):
- tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2))
-
- self.check_result_auto_sin("select num1,num2 from tb3;" , "select sin(num1),sin(num2) from tb3")
-
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_sin( " select c5 from stb1 order by ts " , "select sin(c5) from stb1 order by ts" )
- self.check_result_auto_sin( " select c5 from stb1 order by tbname " , "select sin(c5) from stb1 order by tbname" )
- self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sin( " select c5 from stb1 where c1 > 0 order by tbname " , "select sin(c5) from stb1 where c1 > 0 order by tbname" )
-
- self.check_result_auto_sin( " select t1,c5 from stb1 order by ts " , "select sin(t1), sin(c5) from stb1 order by ts" )
- self.check_result_auto_sin( " select t1,c5 from stb1 order by tbname " , "select sin(t1) ,sin(c5) from stb1 order by tbname" )
- self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) ,sin(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sin( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sin(t1) , sin(c5) from stb1 where c1 > 0 order by tbname" )
- pass
-
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
+ tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
+ tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
+ tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
+
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
+
+ self.check_result_auto_sin(f"select num1,num2 from {dbname}.tb3;" , f"select sin(num1),sin(num2) from {dbname}.tb3")
+
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by ts " , f"select sin(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 order by tbname " , f"select sin(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sin( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sin(t1), sin(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) ,sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sin( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sin(t1) , sin(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: sin basic query ============")
+ tdLog.printNoPrefix("==========step4: sin basic query ============")
self.basic_sin_function()
- tdLog.printNoPrefix("==========step5: big number sin query ============")
-
- self.test_big_number()
+ tdLog.printNoPrefix("==========step5: sin filter query ============")
+ self.abs_func_filter()
- tdLog.printNoPrefix("==========step6: sin boundary query ============")
+ tdLog.printNoPrefix("==========step6: big number sin query ============")
- self.check_boundary_values()
+ self.test_big_number()
+
- tdLog.printNoPrefix("==========step7: sin filter query ============")
+ tdLog.printNoPrefix("==========step7: sin boundary query ============")
- self.abs_func_filter()
+ self.check_boundary_values()
+
tdLog.printNoPrefix("==========step8: check sin result of stable query ============")
self.support_super_table_test()
-
+
def stop(self):
tdSql.close()
diff --git a/tests/system-test/2-query/smaTest.py b/tests/system-test/2-query/smaTest.py
index 67824cc3a3c372821c5014d48f6d2dbbc9ee9066..0217b6c28c2f44deb87c032957ef749fc329742e 100644
--- a/tests/system-test/2-query/smaTest.py
+++ b/tests/system-test/2-query/smaTest.py
@@ -30,14 +30,6 @@ class TDTestCase:
# updatecfgDict = {'debugFlag': 135}
# updatecfgDict = {'fqdn': 135}
- def caseDescription(self):
- '''
- limit and offset keyword function test cases;
- case1: limit offset base function test
- case2: offset return valid
- '''
- return
-
# init
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -47,11 +39,12 @@ class TDTestCase:
self.ts = 1500000000000
- # run case
+ # run case
def run(self):
# insert data
- self.insert_data1("t1", self.ts, 1000*10000)
- self.insert_data1("t4", self.ts, 1000*10000)
+ dbname = "db"
+ self.insert_data1(f"{dbname}.t1", self.ts, 1000*10000)
+ self.insert_data1(f"{dbname}.t4", self.ts, 1000*10000)
# test base case
# self.test_case1()
tdLog.debug(" LIMIT test_case1 ............ [OK]")
@@ -60,7 +53,7 @@ class TDTestCase:
tdLog.debug(" LIMIT test_case2 ............ [OK]")
- # stop
+ # stop
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
@@ -70,16 +63,16 @@ class TDTestCase:
#
# create table
- def create_tables(self):
+ def create_tables(self, dbname="db"):
# super table
- tdSql.execute("create table st(ts timestamp, i1 int,i2 int) tags(area int)");
+ tdSql.execute(f"create table {dbname}.st(ts timestamp, i1 int,i2 int) tags(area int)")
# child table
- tdSql.execute("create table t1 using st tags(1)");
+ tdSql.execute(f"create table {dbname}.t1 using {dbname}.st tags(1)")
- tdSql.execute("create table st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) ");
- tdSql.execute("create table t4 using st1 tags(1)");
+ tdSql.execute(f"create table {dbname}.st1(ts timestamp, i1 int ,i2 int) tags(area int) sma(i2) ")
+ tdSql.execute(f"create table {dbname}.t4 using {dbname}.st1 tags(1)")
- return
+ return
# insert data1
def insert_data(self, tbname, ts_start, count):
@@ -91,7 +84,7 @@ class TDTestCase:
if i >0 and i%30000 == 0:
tdSql.execute(sql)
sql = pre_insert
- # end sql
+ # end sql
if sql != pre_insert:
tdSql.execute(sql)
@@ -107,16 +100,16 @@ class TDTestCase:
if i >0 and i%30000 == 0:
tdSql.execute(sql)
sql = pre_insert
- # end sql
+ # end sql
if sql != pre_insert:
tdSql.execute(sql)
tdLog.debug("INSERT TABLE DATA ............ [OK]")
return
- # test case1 base
+ # test case1 base
# def test_case1(self):
- # #
+ # #
# # limit base function
# #
# # base no where
diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py
index 6cfb9a1dada47949f68a6a79ef05c7c4113a0a2f..4dae2ad6c0ec289e034929e6a949eed8b665c899 100644
--- a/tests/system-test/2-query/sml.py
+++ b/tests/system-test/2-query/sml.py
@@ -20,7 +20,7 @@ class TDTestCase:
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
- def checkFileContent(self):
+ def checkFileContent(self, dbname="sml_db"):
buildPath = tdCom.getBuildPath()
cmdStr = '%s/build/bin/sml_test'%(buildPath)
tdLog.info(cmdStr)
@@ -28,8 +28,8 @@ class TDTestCase:
if ret != 0:
tdLog.exit("sml_test failed")
- tdSql.execute('use sml_db')
- tdSql.query("select * from t_b7d815c9222ca64cdf2614c61de8f211")
+ # tdSql.execute('use sml_db')
+ tdSql.query(f"select * from {dbname}.t_b7d815c9222ca64cdf2614c61de8f211")
tdSql.checkRows(1)
tdSql.checkData(0, 0, '2016-01-01 08:00:07.000')
@@ -44,35 +44,35 @@ class TDTestCase:
tdSql.checkData(0, 9, 0)
tdSql.checkData(0, 10, 25)
- tdSql.query("select * from readings")
+ tdSql.query(f"select * from {dbname}.readings")
tdSql.checkRows(9)
- tdSql.query("select distinct tbname from readings")
+ tdSql.query(f"select distinct tbname from {dbname}.readings")
tdSql.checkRows(4)
- tdSql.query("select * from t_0799064f5487946e5d22164a822acfc8 order by _ts")
+ tdSql.query(f"select * from {dbname}.t_0799064f5487946e5d22164a822acfc8 order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 3, "kk")
- tdSql.checkData(1, 3, None)
+ tdSql.checkData(1, 3, "")
- tdSql.query("select distinct tbname from `sys.if.bytes.out`")
+ tdSql.query(f"select distinct tbname from {dbname}.`sys.if.bytes.out`")
tdSql.checkRows(2)
- tdSql.query("select * from t_fc70dec6677d4277c5d9799c4da806da order by _ts")
+ tdSql.query(f"select * from {dbname}.t_fc70dec6677d4277c5d9799c4da806da order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 1.300000000)
tdSql.checkData(1, 1,13.000000000)
- tdSql.query("select * from `sys.procs.running`")
+ tdSql.query(f"select * from {dbname}.`sys.procs.running`")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 42.000000000)
tdSql.checkData(0, 2, "web01")
- tdSql.query("select distinct tbname from `sys.cpu.nice`")
+ tdSql.query(f"select distinct tbname from {dbname}.`sys.cpu.nice`")
tdSql.checkRows(2)
- tdSql.query("select * from `sys.cpu.nice` order by _ts")
+ tdSql.query(f"select * from {dbname}.`sys.cpu.nice` order by _ts")
tdSql.checkRows(2)
tdSql.checkData(0, 1, 9.000000000)
tdSql.checkData(0, 2, "lga")
@@ -83,8 +83,11 @@ class TDTestCase:
tdSql.checkData(1, 3, "web01")
tdSql.checkData(1, 4, "t1")
- tdSql.query("select * from macylr")
+ tdSql.query(f"select * from {dbname}.macylr")
tdSql.checkRows(2)
+
+ tdSql.query(f"desc {dbname}.macylr")
+ tdSql.checkRows(25)
return
def run(self):
diff --git a/tests/system-test/2-query/spread.py b/tests/system-test/2-query/spread.py
index 51c569e56567fc7fdf1e2399008eaca5acc4059d..ffe86ff36304224e2d5f776f5088a16b445a5231 100644
--- a/tests/system-test/2-query/spread.py
+++ b/tests/system-test/2-query/spread.py
@@ -26,6 +26,8 @@ TS_TYPE_COL = [ TS_COL, ]
ALL_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, BOOL_COL, BINARY_COL, NCHAR_COL, TS_COL ]
+DBNAME = "db"
+
class TDTestCase:
def init(self, conn, logSql):
@@ -88,6 +90,7 @@ class TDTestCase:
return join_condition
def __where_condition(self, col=None, tbname=None, query_conditon=None):
+ # tbname = tbname.split(".")[-1] if tbname else None
if query_conditon and isinstance(query_conditon, str):
if query_conditon.startswith("count"):
query_conditon = query_conditon[6:-1]
@@ -129,32 +132,33 @@ class TDTestCase:
return f"select spread({select_clause}) from {from_clause} {where_condition} {group_condition}"
@property
- def __tb_list(self):
+ def __tb_list(self, dbname=DBNAME):
return [
- "ct1",
- "ct4",
- "t1",
- "ct2",
- "stb1",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
+ f"{dbname}.t1",
+ f"{dbname}.ct2",
+ f"{dbname}.stb1",
]
def sql_list(self):
sqls = []
__no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
- select_claus_list = self.__query_condition(tb)
- for select_claus in select_claus_list:
- group_claus = self.__group_condition(col=select_claus)
- where_claus = self.__where_condition(query_conditon=select_claus)
- having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
- sqls.extend(
- (
- self.__single_sql(select_claus, tb, where_claus, having_claus),
- self.__single_sql(select_claus, tb,),
- self.__single_sql(select_claus, tb, where_condition=where_claus),
- self.__single_sql(select_claus, tb, group_condition=group_claus),
- )
+ tbname = tb.split(".")[-1]
+ select_claus_list = self.__query_condition(tbname)
+ for select_claus in select_claus_list:
+ group_claus = self.__group_condition(col=select_claus)
+ where_claus = self.__where_condition(query_conditon=select_claus)
+ having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
+ sqls.extend(
+ (
+ self.__single_sql(select_claus, tb, where_claus, having_claus),
+ self.__single_sql(select_claus, tb,),
+ self.__single_sql(select_claus, tb, where_condition=where_claus),
+ self.__single_sql(select_claus, tb, group_condition=group_claus),
)
+ )
# return filter(None, sqls)
return list(filter(None, sqls))
@@ -166,28 +170,28 @@ class TDTestCase:
tdLog.info(f"sql: {sqls[i]}")
tdSql.query(sqls[i])
- def __test_current(self):
- tdSql.query("select spread(ts) from ct1")
+ def __test_current(self, dbname=DBNAME):
+ tdSql.query(f"select spread(ts) from {dbname}.ct1")
tdSql.checkRows(1)
- tdSql.query("select spread(c1) from ct2")
+ tdSql.query(f"select spread(c1) from {dbname}.ct2")
tdSql.checkRows(1)
- tdSql.query("select spread(c1) from ct4 group by c1")
+ tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c1")
tdSql.checkRows(self.rows + 3)
- tdSql.query("select spread(c1) from ct4 group by c7")
+ tdSql.query(f"select spread(c1) from {dbname}.ct4 group by c7")
tdSql.checkRows(3)
- tdSql.query("select spread(ct2.c1) from ct4 join ct2 on ct4.ts=ct2.ts")
+ tdSql.query(f"select spread(ct2.c1) from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.checkRows(1)
self.spread_check()
- def __test_error(self):
+ def __test_error(self, dbname=DBNAME):
tdLog.printNoPrefix("===step 0: err case, must return err")
- tdSql.error( "select spread() from ct1" )
- tdSql.error( "select spread(1, 2) from ct2" )
- tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from ct4" )
- tdSql.error( f"select spread({BOOLEAN_COL[0]}) from t1" )
- tdSql.error( f"select spread({CHAR_COL[0]}) from stb1" )
+ tdSql.error( f"select spread() from {dbname}.ct1" )
+ tdSql.error( f"select spread(1, 2) from {dbname}.ct2" )
+ tdSql.error( f"select spread({NUM_COL[0]}, {NUM_COL[1]}) from {dbname}.ct4" )
+ tdSql.error( f"select spread({BOOLEAN_COL[0]}) from {dbname}.t1" )
+ tdSql.error( f"select spread({CHAR_COL[0]}) from {dbname}.stb1" )
# tdSql.error( ''' select spread(['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'])
# from ct1
@@ -196,20 +200,20 @@ class TDTestCase:
# having ['c1 + c1', 'c1 + c2', 'c1 + c3', 'c1 + c4', 'c1 + c5', 'c1 + c6', 'c1 + c7', 'c1 + c8', 'c1 + c9', 'c1 + c10'] is not null ''' )
# tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
- def all_test(self):
- self.__test_error()
- self.__test_current()
+ def all_test(self, dbname=DBNAME):
+ self.__test_error(dbname)
+ self.__test_current(dbname)
- def __create_tb(self):
+ def __create_tb(self, dbname=DBNAME):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (t1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -219,30 +223,30 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname=DBNAME):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -258,7 +262,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -274,13 +278,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
diff --git a/tests/system-test/2-query/sqrt.py b/tests/system-test/2-query/sqrt.py
index 425d59f1186615467f4aac8a085949029422b760..9597375885cf5fdedf1d52a547d7558430cb46e4 100644
--- a/tests/system-test/2-query/sqrt.py
+++ b/tests/system-test/2-query/sqrt.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -85,84 +83,74 @@ class TDTestCase:
row_check.append(elem)
auto_result.append(row_check)
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("sqrt function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("sqrt value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ tdSql.checkData(row_index ,col_index ,auto_result[row_index][col_index])
+
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select sqrt from t1",
- # "select sqrt(-+--+c1 ) from t1",
- # "select +-sqrt(c1) from t1",
- # "select ++-sqrt(c1) from t1",
- # "select ++--sqrt(c1) from t1",
- # "select - -sqrt(c1)*0 from t1",
- # "select sqrt(tbname+1) from t1 ",
- "select sqrt(123--123)==1 from t1",
- "select sqrt(c1) as 'd1' from t1",
- "select sqrt(c1 ,c2) from t1",
- "select sqrt(c1 ,NULL ) from t1",
- "select sqrt(,) from t1;",
- "select sqrt(sqrt(c1) ab from t1)",
- "select sqrt(c1 ) as int from t1",
- "select sqrt from stb1",
- # "select sqrt(-+--+c1) from stb1",
- # "select +-sqrt(c1) from stb1",
- # "select ++-sqrt(c1) from stb1",
- # "select ++--sqrt(c1) from stb1",
- # "select - -sqrt(c1)*0 from stb1",
- # "select sqrt(tbname+1) from stb1 ",
- "select sqrt(123--123)==1 from stb1",
- "select sqrt(c1) as 'd1' from stb1",
- "select sqrt(c1 ,c2 ) from stb1",
- "select sqrt(c1 ,NULL) from stb1",
- "select sqrt(,) from stb1;",
- "select sqrt(sqrt(c1) ab from stb1)",
- "select sqrt(c1) as int from stb1"
+ f"select sqrt from {dbname}.t1",
+ # f"select sqrt(-+--+c1 ) from {dbname}.t1",
+ # f"select +-sqrt(c1) from {dbname}.t1",
+ # f"select ++-sqrt(c1) from {dbname}.t1",
+ # f"select ++--sqrt(c1) from {dbname}.t1",
+ # f"select - -sqrt(c1)*0 from {dbname}.t1",
+ # f"select sqrt(tbname+1) from {dbname}.t1 ",
+ f"select sqrt(123--123)==1 from {dbname}.t1",
+ f"select sqrt(c1) as 'd1' from {dbname}.t1",
+ f"select sqrt(c1 ,c2) from {dbname}.t1",
+ f"select sqrt(c1 ,NULL ) from {dbname}.t1",
+ f"select sqrt(,) from {dbname}.t1;",
+ f"select sqrt(sqrt(c1) ab from {dbname}.t1)",
+ f"select sqrt(c1 ) as int from {dbname}.t1",
+ f"select sqrt from {dbname}.stb1",
+ # f"select sqrt(-+--+c1) from {dbname}.stb1",
+ # f"select +-sqrt(c1) from {dbname}.stb1",
+ # f"select ++-sqrt(c1) from {dbname}.stb1",
+ # f"select ++--sqrt(c1) from {dbname}.stb1",
+ # f"select - -sqrt(c1)*0 from {dbname}.stb1",
+ # f"select sqrt(tbname+1) from {dbname}.stb1 ",
+ f"select sqrt(123--123)==1 from {dbname}.stb1",
+ f"select sqrt(c1) as 'd1' from {dbname}.stb1",
+ f"select sqrt(c1 ,c2 ) from {dbname}.stb1",
+ f"select sqrt(c1 ,NULL) from {dbname}.stb1",
+ f"select sqrt(,) from {dbname}.stb1;",
+ f"select sqrt(sqrt(c1) ab from {dbname}.stb1)",
+ f"select sqrt(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
- def support_types(self):
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select sqrt(ts) from t1" ,
- "select sqrt(c7) from t1",
- "select sqrt(c8) from t1",
- "select sqrt(c9) from t1",
- "select sqrt(ts) from ct1" ,
- "select sqrt(c7) from ct1",
- "select sqrt(c8) from ct1",
- "select sqrt(c9) from ct1",
- "select sqrt(ts) from ct3" ,
- "select sqrt(c7) from ct3",
- "select sqrt(c8) from ct3",
- "select sqrt(c9) from ct3",
- "select sqrt(ts) from ct4" ,
- "select sqrt(c7) from ct4",
- "select sqrt(c8) from ct4",
- "select sqrt(c9) from ct4",
- "select sqrt(ts) from stb1" ,
- "select sqrt(c7) from stb1",
- "select sqrt(c8) from stb1",
- "select sqrt(c9) from stb1" ,
-
- "select sqrt(ts) from stbbb1" ,
- "select sqrt(c7) from stbbb1",
-
- "select sqrt(ts) from tbname",
- "select sqrt(c9) from tbname"
+ f"select sqrt(ts) from {dbname}.t1" ,
+ f"select sqrt(c7) from {dbname}.t1",
+ f"select sqrt(c8) from {dbname}.t1",
+ f"select sqrt(c9) from {dbname}.t1",
+ f"select sqrt(ts) from {dbname}.ct1" ,
+ f"select sqrt(c7) from {dbname}.ct1",
+ f"select sqrt(c8) from {dbname}.ct1",
+ f"select sqrt(c9) from {dbname}.ct1",
+ f"select sqrt(ts) from {dbname}.ct3" ,
+ f"select sqrt(c7) from {dbname}.ct3",
+ f"select sqrt(c8) from {dbname}.ct3",
+ f"select sqrt(c9) from {dbname}.ct3",
+ f"select sqrt(ts) from {dbname}.ct4" ,
+ f"select sqrt(c7) from {dbname}.ct4",
+ f"select sqrt(c8) from {dbname}.ct4",
+ f"select sqrt(c9) from {dbname}.ct4",
+ f"select sqrt(ts) from {dbname}.stb1" ,
+ f"select sqrt(c7) from {dbname}.stb1",
+ f"select sqrt(c8) from {dbname}.stb1",
+ f"select sqrt(c9) from {dbname}.stb1" ,
+
+ f"select sqrt(ts) from {dbname}.stbbb1" ,
+ f"select sqrt(c7) from {dbname}.stbbb1",
+
+ f"select sqrt(ts) from {dbname}.tbname",
+ f"select sqrt(c9) from {dbname}.tbname"
]
@@ -171,103 +159,103 @@ class TDTestCase:
type_sql_lists = [
- "select sqrt(c1) from t1",
- "select sqrt(c2) from t1",
- "select sqrt(c3) from t1",
- "select sqrt(c4) from t1",
- "select sqrt(c5) from t1",
- "select sqrt(c6) from t1",
-
- "select sqrt(c1) from ct1",
- "select sqrt(c2) from ct1",
- "select sqrt(c3) from ct1",
- "select sqrt(c4) from ct1",
- "select sqrt(c5) from ct1",
- "select sqrt(c6) from ct1",
-
- "select sqrt(c1) from ct3",
- "select sqrt(c2) from ct3",
- "select sqrt(c3) from ct3",
- "select sqrt(c4) from ct3",
- "select sqrt(c5) from ct3",
- "select sqrt(c6) from ct3",
-
- "select sqrt(c1) from stb1",
- "select sqrt(c2) from stb1",
- "select sqrt(c3) from stb1",
- "select sqrt(c4) from stb1",
- "select sqrt(c5) from stb1",
- "select sqrt(c6) from stb1",
-
- "select sqrt(c6) as alisb from stb1",
- "select sqrt(c6) alisb from stb1",
+ f"select sqrt(c1) from {dbname}.t1",
+ f"select sqrt(c2) from {dbname}.t1",
+ f"select sqrt(c3) from {dbname}.t1",
+ f"select sqrt(c4) from {dbname}.t1",
+ f"select sqrt(c5) from {dbname}.t1",
+ f"select sqrt(c6) from {dbname}.t1",
+
+ f"select sqrt(c1) from {dbname}.ct1",
+ f"select sqrt(c2) from {dbname}.ct1",
+ f"select sqrt(c3) from {dbname}.ct1",
+ f"select sqrt(c4) from {dbname}.ct1",
+ f"select sqrt(c5) from {dbname}.ct1",
+ f"select sqrt(c6) from {dbname}.ct1",
+
+ f"select sqrt(c1) from {dbname}.ct3",
+ f"select sqrt(c2) from {dbname}.ct3",
+ f"select sqrt(c3) from {dbname}.ct3",
+ f"select sqrt(c4) from {dbname}.ct3",
+ f"select sqrt(c5) from {dbname}.ct3",
+ f"select sqrt(c6) from {dbname}.ct3",
+
+ f"select sqrt(c1) from {dbname}.stb1",
+ f"select sqrt(c2) from {dbname}.stb1",
+ f"select sqrt(c3) from {dbname}.stb1",
+ f"select sqrt(c4) from {dbname}.stb1",
+ f"select sqrt(c5) from {dbname}.stb1",
+ f"select sqrt(c6) from {dbname}.stb1",
+
+ f"select sqrt(c6) as alisb from {dbname}.stb1",
+ f"select sqrt(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def basic_sqrt_function(self):
+ def basic_sqrt_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select sqrt(c1) from ct3")
+ tdSql.query(f"select sqrt(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c2) from ct3")
+ tdSql.query(f"select sqrt(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c3) from ct3")
+ tdSql.query(f"select sqrt(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c4) from ct3")
+ tdSql.query(f"select sqrt(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c5) from ct3")
+ tdSql.query(f"select sqrt(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select sqrt(c6) from ct3")
+ tdSql.query(f"select sqrt(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select sqrt(c1) from t1")
+ tdSql.query(f"select sqrt(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.000000000)
tdSql.checkData(3 , 0, 1.732050808)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from t1")
+ self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.t1", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.t1")
# used for sub table
- tdSql.query("select c2 ,sqrt(c2) from ct1")
+ tdSql.query(f"select c2 ,sqrt(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, 298.140906284)
tdSql.checkData(1 , 1, 278.885281074)
tdSql.checkData(3 , 1, 235.701081881)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,sqrt(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,sqrt(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, 2.979932904)
tdSql.checkData(2 , 2, 2.787471970)
tdSql.checkData(3 , 2, 2.580697551)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_sqrt( "select c1, c2, c3 , c4, c5 from ct1", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from ct1")
+ self.check_result_auto_sqrt( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c4), sqrt(c5) from {dbname}.ct1")
# nest query for sqrt functions
- tdSql.query("select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from ct1;")
+ tdSql.query(f"select c4 , sqrt(c4) ,sqrt(sqrt(c4)) , sqrt(sqrt(sqrt(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 9.380831520)
tdSql.checkData(0 , 2 , 3.062814314)
@@ -285,22 +273,22 @@ class TDTestCase:
# used for stable table
- tdSql.query("select sqrt(c1) from stb1")
+ tdSql.query(f"select sqrt(c1) from {dbname}.stb1")
tdSql.checkRows(25)
# used for not exists table
- tdSql.error("select sqrt(c1) from stbbb1")
- tdSql.error("select sqrt(c1) from tbname")
- tdSql.error("select sqrt(c1) from ct5")
+ tdSql.error(f"select sqrt(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select sqrt(c1) from {dbname}.tbname")
+ tdSql.error(f"select sqrt(c1) from {dbname}.ct5")
# mix with common col
- tdSql.query("select c1, sqrt(c1) from ct1")
+ tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1")
tdSql.checkData(0 , 0 ,8)
tdSql.checkData(0 , 1 ,2.828427125)
tdSql.checkData(4 , 0 ,0)
tdSql.checkData(4 , 1 ,0.000000000)
- tdSql.query("select c2, sqrt(c2) from ct4")
+ tdSql.query(f"select c2, sqrt(c2) from {dbname}.ct4")
tdSql.checkData(0 , 0 , None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(4 , 0 ,55555)
@@ -309,7 +297,7 @@ class TDTestCase:
tdSql.checkData(5 , 1 ,None)
# mix with common functions
- tdSql.query("select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from ct4 ")
+ tdSql.query(f"select c1, sqrt(c1),sqrt(c1), sqrt(sqrt(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
@@ -320,34 +308,34 @@ class TDTestCase:
tdSql.checkData(3 , 2 ,2.449489743)
tdSql.checkData(3 , 3 ,1.565084580)
- tdSql.query("select c1, sqrt(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, sqrt(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, sqrt(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, sqrt(c1),c5, count(c5) from ct1 ")
- tdSql.error("select sqrt(c1), count(c5) from stb1 ")
- tdSql.error("select sqrt(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, sqrt(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select sqrt(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
# bug fix for count
- tdSql.query("select count(c1) from ct4 ")
+ tdSql.query(f"select count(c1) from {dbname}.ct4 ")
tdSql.checkData(0,0,9)
- tdSql.query("select count(*) from ct4 ")
+ tdSql.query(f"select count(*) from {dbname}.ct4 ")
tdSql.checkData(0,0,12)
- tdSql.query("select count(c1) from stb1 ")
+ tdSql.query(f"select count(c1) from {dbname}.stb1 ")
tdSql.checkData(0,0,22)
- tdSql.query("select count(*) from stb1 ")
+ tdSql.query(f"select count(*) from {dbname}.stb1 ")
tdSql.checkData(0,0,25)
# # bug fix for compute
- tdSql.query("select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from ct4 ")
+ tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -355,7 +343,7 @@ class TDTestCase:
tdSql.checkData(1, 1, 2.828427125)
tdSql.checkData(1, 2, 2.000000000)
- tdSql.query(" select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, sqrt(c1) -0 ,sqrt(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -363,57 +351,56 @@ class TDTestCase:
tdSql.checkData(1, 1, 2.828427125)
tdSql.checkData(1, 2, 2.710693865)
- tdSql.query("select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from ct1")
+ tdSql.query(f"select c1, sqrt(c1), c2, sqrt(c2), c3, sqrt(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, sqrt(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, 10000.000000000)
- tdSql.query("select c1, sqrt(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, 3162277.660168380)
- tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(c1) + sqrt(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, 3162277660171.025390625)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, 100000000000000000.000000000)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, 100000000000000000000.000000000)
- tdSql.query("select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, sqrt(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def pow_base_test(self):
+ def pow_base_test(self, dbname="db"):
# base is an regular number ,int or double
- tdSql.query("select c1, sqrt(c1) from ct1")
+ tdSql.query(f"select c1, sqrt(c1) from {dbname}.ct1")
tdSql.checkData(0, 1,2.828427125)
tdSql.checkRows(13)
# # bug for compute in functions
- # tdSql.query("select c1, abs(1/0) from ct1")
+ # tdSql.query(f"select c1, abs(1/0) from {dbname}.ct1")
# tdSql.checkData(0, 0, 8)
# tdSql.checkData(0, 1, 1)
- tdSql.query("select c1, sqrt(1) from ct1")
+ tdSql.query(f"select c1, sqrt(1) from {dbname}.ct1")
tdSql.checkData(0, 1, 1.000000000)
tdSql.checkRows(13)
# two cols start sqrt(x,y)
- tdSql.query("select c1,c2, sqrt(c2) from ct1")
+ tdSql.query(f"select c1,c2, sqrt(c2) from {dbname}.ct1")
tdSql.checkData(0, 2, 298.140906284)
tdSql.checkData(1, 2, 278.885281074)
tdSql.checkData(4, 2, 0.000000000)
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -421,7 +408,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,3.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -429,7 +416,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -437,7 +424,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,2.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from ct4 where c1=sqrt(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(sqrt(c1)-0.5) from {dbname}.ct4 where c1=sqrt(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,1)
tdSql.checkData(0,1,11111)
@@ -446,42 +433,37 @@ class TDTestCase:
tdSql.checkData(0,4,0.900000000)
tdSql.checkData(0,5,1.000000000)
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_sqrt( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from sub1_bound")
+ self.check_result_auto_sqrt( f"select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from {dbname}.sub1_bound ", f"select sqrt(abs(c1)), sqrt(abs(c2)) ,sqrt(abs(c3)), sqrt(abs(c4)), sqrt(abs(c5)) from {dbname}.sub1_bound")
- self.check_result_auto_sqrt( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from sub1_bound")
+ self.check_result_auto_sqrt( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select sqrt(c1), sqrt(c2) ,sqrt(c3), sqrt(c3), sqrt(c2) ,sqrt(c1) from {dbname}.sub1_bound")
- self.check_result_auto_sqrt("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select sqrt(abs(c1)) from sub1_bound" )
+ self.check_result_auto_sqrt(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select sqrt(abs(c1)) from {dbname}.sub1_bound" )
# check basic elem for table per row
- tdSql.query("select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sqrt(abs(c1)) ,sqrt(abs(c2)) , sqrt(abs(c3)) , sqrt(abs(c4)), sqrt(abs(c5)), sqrt(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483647))
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767))
@@ -499,23 +481,22 @@ class TDTestCase:
tdSql.checkData(3,4,math.sqrt(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select sqrt(abs(c1+1)) ,sqrt(abs(c2)) , sqrt(abs(c3*1)) , sqrt(abs(c4/2)), sqrt(abs(c5))/2, sqrt(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.sqrt(2147483648.000000000))
tdSql.checkData(0,1,math.sqrt(9223372036854775807))
tdSql.checkData(0,2,math.sqrt(32767.000000000))
tdSql.checkData(0,3,math.sqrt(63.500000000))
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_sqrt( " select c5 from stb1 order by ts " , "select sqrt(c5) from stb1 order by ts" )
- self.check_result_auto_sqrt( " select c5 from stb1 order by tbname " , "select sqrt(c5) from stb1 order by tbname" )
- self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sqrt( " select c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(c5) from stb1 where c1 > 0 order by tbname" )
-
- self.check_result_auto_sqrt( " select t1,c5 from stb1 order by ts " , "select sqrt(t1), sqrt(c5) from stb1 order by ts" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 order by tbname" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) ,sqrt(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_sqrt( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select sqrt(t1) , sqrt(c5) from stb1 where c1 > 0 order by tbname" )
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by ts " , f"select sqrt(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sqrt( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select sqrt(t1), sqrt(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) ,sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_sqrt( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select sqrt(t1) , sqrt(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py
index a88c4aef9fdad7580d4d10a642093c80750b1c57..c73c955de405ee54e6924c25cd219aa8b8a7f4eb 100644
--- a/tests/system-test/2-query/statecount.py
+++ b/tests/system-test/2-query/statecount.py
@@ -11,50 +11,47 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -70,68 +67,68 @@ class TDTestCase:
'''
)
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- # "select statecount(c1,'GT',5) from t1"
- "select statecount from t1",
- "select statecount(123--123)==1 from t1",
- "select statecount(123,123) from t1",
- "select statecount(c1,ts) from t1",
- "select statecount(c1,c1,ts) from t1",
- "select statecount(c1 ,c2 ) from t1",
- "select statecount(c1 ,NULL) from t1",
- #"select statecount(c1 ,'NULL',1.0) from t1",
- "select statecount(c1 ,'GT','1') from t1",
- "select statecount(c1 ,'GT','tbname') from t1",
- "select statecount(c1 ,'GT','*') from t1",
- "select statecount(c1 ,'GT',ts) from t1",
- "select statecount(c1 ,'GT',max(c1)) from t1",
- # "select statecount(abs(c1) ,'GT',1) from t1",
- # "select statecount(c1+2 ,'GT',1) from t1",
- "select statecount(c1 ,'GT',1,1u) from t1",
- "select statecount(c1 ,'GT',1,now) from t1",
- "select statecount(c1 ,'GT','1') from t1",
- "select statecount(c1 ,'GT','1',True) from t1",
- "select statecount(statecount(c1) ab from t1)",
- "select statecount(c1 ,'GT',1,,)int from t1",
- "select statecount('c1','GT',1) from t1",
- "select statecount('c1','GT' , NULL) from t1",
- "select statecount('c1','GT', 1 , '') from t1",
- "select statecount('c1','GT', 1 ,c%) from t1",
- "select statecount(c1 ,'GT',1,t1) from t1",
- "select statecount(c1 ,'GT',1,True) from t1",
- "select statecount(c1 ,'GT',1) , count(c1) from t1",
- "select statecount(c1 ,'GT',1) , avg(c1) from t1",
- "select statecount(c1 ,'GT',1) , min(c1) from t1",
- "select statecount(c1 ,'GT',1) , spread(c1) from t1",
- "select statecount(c1 ,'GT',1) , diff(c1) from t1",
+ # f"select statecount(c1,'GT',5) from {dbname}.t1"
+ f"select statecount from {dbname}.t1",
+ f"select statecount(123--123)==1 from {dbname}.t1",
+ f"select statecount(123,123) from {dbname}.t1",
+ f"select statecount(c1,ts) from {dbname}.t1",
+ f"select statecount(c1,c1,ts) from {dbname}.t1",
+ f"select statecount(c1 ,c2 ) from {dbname}.t1",
+ f"select statecount(c1 ,NULL) from {dbname}.t1",
+ #f"select statecount(c1 ,'NULL',1.0) from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','tbname') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','*') from {dbname}.t1",
+ f"select statecount(c1 ,'GT',ts) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',max(c1)) from {dbname}.t1",
+ # f"select statecount(abs(c1) ,'GT',1) from {dbname}.t1",
+ # f"select statecount(c1+2 ,'GT',1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,1u) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,now) from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1') from {dbname}.t1",
+ f"select statecount(c1 ,'GT','1',True) from {dbname}.t1",
+ f"select statecount(statecount(c1) ab from {dbname}.t1)",
+ f"select statecount(c1 ,'GT',1,,)int from {dbname}.t1",
+ f"select statecount('c1','GT',1) from {dbname}.t1",
+ f"select statecount('c1','GT' , NULL) from {dbname}.t1",
+ f"select statecount('c1','GT', 1 , '') from {dbname}.t1",
+ f"select statecount('c1','GT', 1 ,c%) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,t1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1,True) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , count(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , avg(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , min(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , spread(c1) from {dbname}.t1",
+ f"select statecount(c1 ,'GT',1) , diff(c1) from {dbname}.t1",
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
pass
- def support_types(self):
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select statecount(ts,'GT',1) from t1" ,
- "select statecount(c7,'GT',1) from t1",
- "select statecount(c8,'GT',1) from t1",
- "select statecount(c9,'GT',1) from t1",
- "select statecount(ts,'GT',1) from ct1" ,
- "select statecount(c7,'GT',1) from ct1",
- "select statecount(c8,'GT',1) from ct1",
- "select statecount(c9,'GT',1) from ct1",
- "select statecount(ts,'GT',1) from ct3" ,
- "select statecount(c7,'GT',1) from ct3",
- "select statecount(c8,'GT',1) from ct3",
- "select statecount(c9,'GT',1) from ct3",
- "select statecount(ts,'GT',1) from ct4" ,
- "select statecount(c7,'GT',1) from ct4",
- "select statecount(c8,'GT',1) from ct4",
- "select statecount(c9,'GT',1) from ct4",
- "select statecount(ts,'GT',1) from stb1 partition by tbname" ,
- "select statecount(c7,'GT',1) from stb1 partition by tbname",
- "select statecount(c8,'GT',1) from stb1 partition by tbname",
- "select statecount(c9,'GT',1) from stb1 partition by tbname"
+ f"select statecount(ts,'GT',1) from {dbname}.t1" ,
+ f"select statecount(c7,'GT',1) from {dbname}.t1",
+ f"select statecount(c8,'GT',1) from {dbname}.t1",
+ f"select statecount(c9,'GT',1) from {dbname}.t1",
+ f"select statecount(ts,'GT',1) from {dbname}.ct1" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct1",
+ f"select statecount(c8,'GT',1) from {dbname}.ct1",
+ f"select statecount(c9,'GT',1) from {dbname}.ct1",
+ f"select statecount(ts,'GT',1) from {dbname}.ct3" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct3",
+ f"select statecount(c8,'GT',1) from {dbname}.ct3",
+ f"select statecount(c9,'GT',1) from {dbname}.ct3",
+ f"select statecount(ts,'GT',1) from {dbname}.ct4" ,
+ f"select statecount(c7,'GT',1) from {dbname}.ct4",
+ f"select statecount(c8,'GT',1) from {dbname}.ct4",
+ f"select statecount(c9,'GT',1) from {dbname}.ct4",
+ f"select statecount(ts,'GT',1) from {dbname}.stb1 partition by tbname" ,
+ f"select statecount(c7,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c8,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c9,'GT',1) from {dbname}.stb1 partition by tbname"
]
for type_sql in other_no_value_types:
@@ -139,224 +136,222 @@ class TDTestCase:
tdLog.info("support type ok , sql is : %s"%type_sql)
type_sql_lists = [
- "select statecount(c1,'GT',1) from t1",
- "select statecount(c2,'GT',1) from t1",
- "select statecount(c3,'GT',1) from t1",
- "select statecount(c4,'GT',1) from t1",
- "select statecount(c5,'GT',1) from t1",
- "select statecount(c6,'GT',1) from t1",
-
- "select statecount(c1,'GT',1) from ct1",
- "select statecount(c2,'GT',1) from ct1",
- "select statecount(c3,'GT',1) from ct1",
- "select statecount(c4,'GT',1) from ct1",
- "select statecount(c5,'GT',1) from ct1",
- "select statecount(c6,'GT',1) from ct1",
-
- "select statecount(c1,'GT',1) from ct3",
- "select statecount(c2,'GT',1) from ct3",
- "select statecount(c3,'GT',1) from ct3",
- "select statecount(c4,'GT',1) from ct3",
- "select statecount(c5,'GT',1) from ct3",
- "select statecount(c6,'GT',1) from ct3",
-
- "select statecount(c1,'GT',1) from stb1 partition by tbname",
- "select statecount(c2,'GT',1) from stb1 partition by tbname",
- "select statecount(c3,'GT',1) from stb1 partition by tbname",
- "select statecount(c4,'GT',1) from stb1 partition by tbname",
- "select statecount(c5,'GT',1) from stb1 partition by tbname",
- "select statecount(c6,'GT',1) from stb1 partition by tbname",
-
- "select statecount(c6,'GT',1) as alisb from stb1 partition by tbname",
- "select statecount(c6,'GT',1) alisb from stb1 partition by tbname",
+ f"select statecount(c1,'GT',1) from {dbname}.t1",
+ f"select statecount(c2,'GT',1) from {dbname}.t1",
+ f"select statecount(c3,'GT',1) from {dbname}.t1",
+ f"select statecount(c4,'GT',1) from {dbname}.t1",
+ f"select statecount(c5,'GT',1) from {dbname}.t1",
+ f"select statecount(c6,'GT',1) from {dbname}.t1",
+
+ f"select statecount(c1,'GT',1) from {dbname}.ct1",
+ f"select statecount(c2,'GT',1) from {dbname}.ct1",
+ f"select statecount(c3,'GT',1) from {dbname}.ct1",
+ f"select statecount(c4,'GT',1) from {dbname}.ct1",
+ f"select statecount(c5,'GT',1) from {dbname}.ct1",
+ f"select statecount(c6,'GT',1) from {dbname}.ct1",
+
+ f"select statecount(c1,'GT',1) from {dbname}.ct3",
+ f"select statecount(c2,'GT',1) from {dbname}.ct3",
+ f"select statecount(c3,'GT',1) from {dbname}.ct3",
+ f"select statecount(c4,'GT',1) from {dbname}.ct3",
+ f"select statecount(c5,'GT',1) from {dbname}.ct3",
+ f"select statecount(c6,'GT',1) from {dbname}.ct3",
+
+ f"select statecount(c1,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c2,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c3,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c4,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c5,'GT',1) from {dbname}.stb1 partition by tbname",
+ f"select statecount(c6,'GT',1) from {dbname}.stb1 partition by tbname",
+
+ f"select statecount(c6,'GT',1) as alisb from {dbname}.stb1 partition by tbname",
+ f"select statecount(c6,'GT',1) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
- def support_opers(self):
+ def support_opers(self, dbname="db"):
oper_lists = ['LT','lt','Lt','lT','GT','gt','Gt','gT','LE','le','Le','lE','GE','ge','Ge','gE','NE','ne','Ne','nE','EQ','eq','Eq','eQ']
oper_errors = [",","*","NULL","tbname","ts","sum","_c0"]
for oper in oper_lists:
- tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from t1")
+ tdSql.query(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1")
tdSql.checkRows(12)
for oper in oper_errors:
- tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from t1")
-
+ tdSql.error(f"select statecount(c1 ,'{oper}',1) as col from {dbname}.t1")
- def basic_statecount_function(self):
+ def basic_statecount_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select statecount(c6,'GT',1) from ct3")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct3")
# will support _rowts mix with
- # tdSql.query("select (c6,'GT',1),_rowts from ct3")
+ # tdSql.query(f"select (c6,'GT',1),_rowts from {dbname}.ct3")
# auto check for t1 table
# used for regular table
- tdSql.query("select statecount(c6,'GT',1) from t1")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.t1")
# unique with super tags
- tdSql.query("select statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) from ct4")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4")
tdSql.checkRows(12)
- tdSql.query("select statecount(c6,'GT',1),tbname from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1),tbname from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1),t1 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1),t1 from {dbname}.ct1")
tdSql.checkRows(13)
# unique with common col
- tdSql.query("select statecount(c6,'GT',1) ,ts from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) ,ts from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) ,c1 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) ,c1 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select c1, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select c1, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1), ts, c1, c2, c3 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, statecount(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) ,ts from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) ,ts from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) ,c1 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) ,c1 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select c1, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select c1, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1) from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1), ts, c1, c2, c3 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1), ts, c1, c2, c3 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from ct1")
+ tdSql.query(f"select ts, c1, c2, c3, stateduration(c6,'GT',1), ts, c4, c5, c6 from {dbname}.ct1")
tdSql.checkRows(13)
# unique with scalar function
- tdSql.query("select statecount(c6,'GT',1) , abs(c1) from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) , abs(c1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select statecount(c6,'GT',1) , abs(c2)+2 from ct1")
+ tdSql.query(f"select statecount(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.error("select statecount(c6,'GT',1) , unique(c2) from ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) , unique(c2) from {dbname}.ct1")
- tdSql.query("select stateduration(c6,'GT',1) , abs(c1) from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) , abs(c1) from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.query("select stateduration(c6,'GT',1) , abs(c2)+2 from ct1")
+ tdSql.query(f"select stateduration(c6,'GT',1) , abs(c2)+2 from {dbname}.ct1")
tdSql.checkRows(13)
- tdSql.error("select stateduration(c6,'GT',1) , unique(c2) from ct1")
+ tdSql.error(f"select stateduration(c6,'GT',1) , unique(c2) from {dbname}.ct1")
# unique with aggregate function
- tdSql.error("select statecount(c6,'GT',1) ,sum(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,max(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,csum(c1) from ct1")
- tdSql.error("select statecount(c6,'GT',1) ,count(c1) from ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select statecount(c6,'GT',1) ,count(c1) from {dbname}.ct1")
# unique with filter where
- tdSql.query("select statecount(c6,'GT',1) from ct4 where c1 is null")
+ tdSql.query(f"select statecount(c6,'GT',1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
tdSql.checkData(2, 0, None)
- tdSql.query("select statecount(c1,'GT',1) from t1 where c1 >2 ")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.t1 where c1 >2 ")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
tdSql.checkData(2, 0, 3)
tdSql.checkData(4, 0, 5)
tdSql.checkData(5, 0, 6)
- tdSql.query("select statecount(c2,'GT',1) from t1 where c2 between 0 and 99999")
+ tdSql.query(f"select statecount(c2,'GT',1) from {dbname}.t1 where c2 between 0 and 99999")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
tdSql.checkData(6, 0, -1)
# unique with union all
- tdSql.query("select statecount(c1,'GT',1) from ct4 union all select statecount(c1,'GT',1) from ct1")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select statecount(c1,'GT',1) from {dbname}.ct1")
tdSql.checkRows(25)
- tdSql.query("select statecount(c1,'GT',1) from ct4 union all select distinct(c1) from ct4")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4")
tdSql.checkRows(22)
# unique with join
# prepare join datas with same ts
- tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(f"create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f"create table {dbname}.tb1 using {dbname}.st1 tags(1)")
+ tdSql.execute(f"create table {dbname}.tb2 using {dbname}.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(f"create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f"create table {dbname}.ttb1 using {dbname}.st2 tags(1)")
+ tdSql.execute(f"create table {dbname}.ttb2 using {dbname}.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(10)
tdSql.checkData(0,0,-1)
tdSql.checkData(1,0,-1)
tdSql.checkData(2,0,1)
tdSql.checkData(9,0,8)
- tdSql.query("select statecount(tb1.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select statecount(tb1.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts union all select statecount(tb2.num,'GT',1) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(20)
# nest query
- # tdSql.query("select unique(c1) from (select c1 from ct1)")
- tdSql.query("select c1 from (select statecount(c1,'GT',1) c1 from t1)")
+ # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(12)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, -1)
tdSql.checkData(2, 0, 1)
tdSql.checkData(10, 0, 8)
- tdSql.query("select sum(c1) from (select statecount(c1,'GT',1) c1 from t1)")
+ tdSql.query(f"select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 35)
- tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
- tdSql.query("select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from t1)")
+ tdSql.query(f"select 1-abs(c1) from (select statecount(c1,'GT',1) c1 from {dbname}.t1)")
tdSql.checkRows(12)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 0.000000000)
@@ -365,43 +360,41 @@ class TDTestCase:
# bug for stable
#partition by tbname
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
# group by
- tdSql.error("select statecount(c1,'GT',1) from ct1 group by c1")
- tdSql.error("select statecount(c1,'GT',1) from ct1 group by tbname")
-
- # super table
-
- def check_unit_time(self):
- tdSql.execute(" use db ")
- tdSql.error("select stateduration(c1,'GT',1,1b) from ct1")
- tdSql.error("select stateduration(c1,'GT',1,1u) from ct1")
- tdSql.error("select stateduration(c1,'GT',1,1000s) from t1")
- tdSql.error("select stateduration(c1,'GT',1,10m) from t1")
- tdSql.error("select stateduration(c1,'GT',1,10d) from t1")
- tdSql.query("select stateduration(c1,'GT',1,1s) from t1")
+ tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select statecount(c1,'GT',1) from {dbname}.ct1 group by tbname")
+
+ def check_unit_time(self, dbname="db"):
+ tdSql.error(f"select stateduration(c1,'GT',1,1b) from {dbname}.ct1")
+ tdSql.error(f"select stateduration(c1,'GT',1,1u) from {dbname}.ct1")
+ tdSql.error(f"select stateduration(c1,'GT',1,1000s) from {dbname}.t1")
+ tdSql.error(f"select stateduration(c1,'GT',1,10m) from {dbname}.t1")
+ tdSql.error(f"select stateduration(c1,'GT',1,10d) from {dbname}.t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1s) from {dbname}.t1")
tdSql.checkData(10,0,63072035)
- tdSql.query("select stateduration(c1,'GT',1,1m) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1m) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60))
- tdSql.query("select stateduration(c1,'GT',1,1h) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1h) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/60))
- tdSql.query("select stateduration(c1,'GT',1,1d) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1d) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/24/60))
- tdSql.query("select stateduration(c1,'GT',1,1w) from t1")
+ tdSql.query(f"select stateduration(c1,'GT',1,1w) from {dbname}.t1")
tdSql.checkData(10,0,int(63072035/60/7/24/60))
def query_precision(self):
def generate_data(precision="ms"):
- tdSql.execute("create database if not exists db_%s precision '%s';" %(precision, precision))
+ dbname = f"db_{precision}"
+ tdSql.execute(f"create database if not exists db_%s precision '%s';" %(precision, precision))
tdSql.execute("use db_%s;" %precision)
- tdSql.execute("create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision)
- tdSql.execute("create table db_%s.tb1 using st tags(1);"%precision)
- tdSql.execute("create table db_%s.tb2 using st tags(2);"%precision)
+ tdSql.execute(f"create stable db_%s.st (ts timestamp , id int) tags(ind int);"%precision)
+ tdSql.execute(f"create table db_%s.tb1 using {dbname}.st tags(1);"%precision)
+ tdSql.execute(f"create table db_%s.tb2 using {dbname}.st tags(2);"%precision)
if precision == "ms":
start_ts = self.ts
@@ -432,55 +425,54 @@ class TDTestCase:
if pres == "ms":
if unit in ["1u","1b"]:
- tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
pass
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
elif pres == "us" and unit in ["1b"]:
if unit in ["1b"]:
- tdSql.error("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.error(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
pass
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
else:
- tdSql.query("select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
+ tdSql.query(f"select stateduration(id,'GT',1,%s) from db_%s.tb1 "%(unit,pres))
basic_result = 70
tdSql.checkData(9,0,basic_result*pow(1000,index))
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- tdSql.query("select statecount(c1,'GT',1) from sub1_bound")
+ tdSql.query(f"select statecount(c1,'GT',1) from {dbname}.sub1_bound")
tdSql.checkRows(5)
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py
index f833a42b574aac2cf8cfcab1bae7035b8273c427..ea55c5e44e780c5acdd86b8be29e8654b8d1251e 100644
--- a/tests/system-test/2-query/substr.py
+++ b/tests/system-test/2-query/substr.py
@@ -127,16 +127,16 @@ class TDTestCase:
return sqls
- def __test_current(self): # sourcery skip: use-itertools-product
+ def __test_current(self, dbname="db"): # sourcery skip: use-itertools-product
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__substr_check(tb, CURRENT_POS, LENS)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__substr_err_check(tb):
@@ -145,22 +145,21 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -170,29 +169,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -208,7 +207,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -224,13 +223,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -259,10 +258,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
-
- tdSql.execute("use db")
+ tdSql.execute("flush database db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
self.all_test()
diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py
index 4f5ed34419082d49990f14b6e8518b516c4e7df8..dbc79e25f5ba230723f54507f47da91514698c69 100644
--- a/tests/system-test/2-query/sum.py
+++ b/tests/system-test/2-query/sum.py
@@ -89,14 +89,14 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
tbname = ["ct1", "ct2", "ct4", "t1"]
for tb in tbname:
self.__sum_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
tbname = ["ct1", "ct2", "ct4", "t1"]
@@ -106,21 +106,21 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
- def __create_tb(self):
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table {DBNAME}.stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table {DBNAME}.t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -130,83 +130,82 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table {DBNAME}.ct{i+1} using {DBNAME}.stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into {DBNAME}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into {DBNAME}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into {DBNAME}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into {DBNAME}.ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into {DBNAME}.ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into {DBNAME}.ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into {DBNAME}.t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into {DBNAME}.t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
-
def run(self):
tdSql.prepare()
@@ -219,12 +218,8 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- # tdDnodes.stop(1)
- # tdDnodes.start(1)
-
tdSql.execute("flush database db")
-
tdSql.execute("use db")
tdLog.printNoPrefix("==========step4:after wal, all check again ")
diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py
index d708873d6ff608581a64120a054c81f0b3a8da1f..687023f57ec833248c2c7c472b751019a90f930f 100644
--- a/tests/system-test/2-query/tail.py
+++ b/tests/system-test/2-query/tail.py
@@ -10,49 +10,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -67,115 +64,115 @@ class TDTestCase:
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
- def test_errors(self):
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select tail from t1",
- "select tail(123--123)==1 from t1",
- "select tail(123,123) from t1",
- "select tail(c1,ts) from t1",
- "select tail(c1,c1,ts) from t1",
- "select tail(c1) as 'd1' from t1",
- "select tail(c1 ,c2 ) from t1",
- "select tail(c1 ,NULL) from t1",
- "select tail(,) from t1;",
- "select tail(tail(c1) ab from t1)",
- "select tail(c1) as int from t1",
- "select tail('c1') from t1",
- "select tail(NULL) from t1",
- "select tail('') from t1",
- "select tail(c%) from t1",
- "select tail(t1) from t1",
- "select tail(True) from t1",
- "select tail(c1,1) , count(c1) from t1",
- "select tail(c1,1) , avg(c1) from t1",
- "select tail(c1,1) , min(c1) from t1",
- "select tail(c1,1) , spread(c1) from t1",
- "select tail(c1,1) , diff(c1) from t1",
- "select tail from stb1 partition by tbname",
- "select tail(123--123)==1 from stb1 partition by tbname",
- "select tail(123,123) from stb1 partition by tbname",
- "select tail(c1,ts) from stb1 partition by tbname",
- "select tail(c1,c1,ts) from stb1 partition by tbname",
- "select tail(c1) as 'd1' from stb1 partition by tbname",
- "select tail(c1 ,c2 ) from stb1 partition by tbname",
- "select tail(c1 ,NULL) from stb1 partition by tbname",
- "select tail(,) from stb1 partition by tbname;",
- "select tail(tail(c1) ab from stb1 partition by tbname)",
- "select tail(c1) as int from stb1 partition by tbname",
- "select tail('c1') from stb1 partition by tbname",
- "select tail(NULL) from stb1 partition by tbname",
- "select tail('') from stb1 partition by tbname",
- "select tail(c%) from stb1 partition by tbname",
- "select tail(t1) from stb1 partition by tbname",
- "select tail(True) from stb1 partition by tbname",
- "select tail(c1,1) , count(c1) from stb1 partition by tbname",
- "select tail(c1,1) , avg(c1) from stb1 partition by tbname",
- "select tail(c1,1) , min(c1) from stb1 partition by tbname",
- "select tail(c1,1) , spread(c1) from stb1 partition by tbname",
- "select tail(c1,1) , diff(c1) from stb1 partition by tbname",
+ f"select tail from {dbname}.t1",
+ f"select tail(123--123)==1 from {dbname}.t1",
+ f"select tail(123,123) from {dbname}.t1",
+ f"select tail(c1,ts) from {dbname}.t1",
+ f"select tail(c1,c1,ts) from {dbname}.t1",
+ f"select tail(c1) as 'd1' from {dbname}.t1",
+ f"select tail(c1 ,c2 ) from {dbname}.t1",
+ f"select tail(c1 ,NULL) from {dbname}.t1",
+ f"select tail(,) from {dbname}.t1;",
+ f"select tail(tail(c1) ab from {dbname}.t1)",
+ f"select tail(c1) as int from {dbname}.t1",
+ f"select tail('c1') from {dbname}.t1",
+ f"select tail(NULL) from {dbname}.t1",
+ f"select tail('') from {dbname}.t1",
+ f"select tail(c%) from {dbname}.t1",
+ f"select tail(t1) from {dbname}.t1",
+ f"select tail(True) from {dbname}.t1",
+ f"select tail(c1,1) , count(c1) from {dbname}.t1",
+ f"select tail(c1,1) , avg(c1) from {dbname}.t1",
+ f"select tail(c1,1) , min(c1) from {dbname}.t1",
+ f"select tail(c1,1) , spread(c1) from {dbname}.t1",
+ f"select tail(c1,1) , diff(c1) from {dbname}.t1",
+ f"select tail from {dbname}.stb1 partition by tbname",
+ f"select tail(123--123)==1 from {dbname}.stb1 partition by tbname",
+ f"select tail(123,123) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1) as 'd1' from {dbname}.stb1 partition by tbname",
+ f"select tail(c1 ,c2 ) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1 ,NULL) from {dbname}.stb1 partition by tbname",
+ f"select tail(,) from {dbname}.stb1 partition by tbname;",
+ f"select tail(tail(c1) ab from {dbname}.stb1 partition by tbname)",
+ f"select tail(c1) as int from {dbname}.stb1 partition by tbname",
+ f"select tail('c1') from {dbname}.stb1 partition by tbname",
+ f"select tail(NULL) from {dbname}.stb1 partition by tbname",
+ f"select tail('') from {dbname}.stb1 partition by tbname",
+ f"select tail(c%) from {dbname}.stb1 partition by tbname",
+ f"select tail(t1) from {dbname}.stb1 partition by tbname",
+ f"select tail(True) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , count(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , avg(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , min(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , spread(c1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c1,1) , diff(c1) from {dbname}.stb1 partition by tbname",
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select tail(ts,1) from t1" ,
- "select tail(c7,1) from t1",
- "select tail(c8,1) from t1",
- "select tail(c9,1) from t1",
- "select tail(ts,1) from ct1" ,
- "select tail(c7,1) from ct1",
- "select tail(c8,1) from ct1",
- "select tail(c9,1) from ct1",
- "select tail(ts,1) from ct3" ,
- "select tail(c7,1) from ct3",
- "select tail(c8,1) from ct3",
- "select tail(c9,1) from ct3",
- "select tail(ts,1) from ct4" ,
- "select tail(c7,1) from ct4",
- "select tail(c8,1) from ct4",
- "select tail(c9,1) from ct4",
- "select tail(ts,1) from stb1 partition by tbname" ,
- "select tail(c7,1) from stb1 partition by tbname",
- "select tail(c8,1) from stb1 partition by tbname",
- "select tail(c9,1) from stb1 partition by tbname"
+ f"select tail(ts,1) from {dbname}.t1" ,
+ f"select tail(c7,1) from {dbname}.t1",
+ f"select tail(c8,1) from {dbname}.t1",
+ f"select tail(c9,1) from {dbname}.t1",
+ f"select tail(ts,1) from {dbname}.ct1" ,
+ f"select tail(c7,1) from {dbname}.ct1",
+ f"select tail(c8,1) from {dbname}.ct1",
+ f"select tail(c9,1) from {dbname}.ct1",
+ f"select tail(ts,1) from {dbname}.ct3" ,
+ f"select tail(c7,1) from {dbname}.ct3",
+ f"select tail(c8,1) from {dbname}.ct3",
+ f"select tail(c9,1) from {dbname}.ct3",
+ f"select tail(ts,1) from {dbname}.ct4" ,
+ f"select tail(c7,1) from {dbname}.ct4",
+ f"select tail(c8,1) from {dbname}.ct4",
+ f"select tail(c9,1) from {dbname}.ct4",
+ f"select tail(ts,1) from {dbname}.stb1 partition by tbname" ,
+ f"select tail(c7,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c8,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c9,1) from {dbname}.stb1 partition by tbname"
]
-
+
for type_sql in other_no_value_types:
tdSql.query(type_sql)
-
+
type_sql_lists = [
- "select tail(c1,1) from t1",
- "select tail(c2,1) from t1",
- "select tail(c3,1) from t1",
- "select tail(c4,1) from t1",
- "select tail(c5,1) from t1",
- "select tail(c6,1) from t1",
-
- "select tail(c1,1) from ct1",
- "select tail(c2,1) from ct1",
- "select tail(c3,1) from ct1",
- "select tail(c4,1) from ct1",
- "select tail(c5,1) from ct1",
- "select tail(c6,1) from ct1",
-
- "select tail(c1,1) from ct3",
- "select tail(c2,1) from ct3",
- "select tail(c3,1) from ct3",
- "select tail(c4,1) from ct3",
- "select tail(c5,1) from ct3",
- "select tail(c6,1) from ct3",
-
- "select tail(c1,1) from stb1 partition by tbname",
- "select tail(c2,1) from stb1 partition by tbname",
- "select tail(c3,1) from stb1 partition by tbname",
- "select tail(c4,1) from stb1 partition by tbname",
- "select tail(c5,1) from stb1 partition by tbname",
- "select tail(c6,1) from stb1 partition by tbname",
-
- "select tail(c6,1) as alisb from stb1 partition by tbname",
- "select tail(c6,1) alisb from stb1 partition by tbname",
+ f"select tail(c1,1) from {dbname}.t1",
+ f"select tail(c2,1) from {dbname}.t1",
+ f"select tail(c3,1) from {dbname}.t1",
+ f"select tail(c4,1) from {dbname}.t1",
+ f"select tail(c5,1) from {dbname}.t1",
+ f"select tail(c6,1) from {dbname}.t1",
+
+ f"select tail(c1,1) from {dbname}.ct1",
+ f"select tail(c2,1) from {dbname}.ct1",
+ f"select tail(c3,1) from {dbname}.ct1",
+ f"select tail(c4,1) from {dbname}.ct1",
+ f"select tail(c5,1) from {dbname}.ct1",
+ f"select tail(c6,1) from {dbname}.ct1",
+
+ f"select tail(c1,1) from {dbname}.ct3",
+ f"select tail(c2,1) from {dbname}.ct3",
+ f"select tail(c3,1) from {dbname}.ct3",
+ f"select tail(c4,1) from {dbname}.ct3",
+ f"select tail(c5,1) from {dbname}.ct3",
+ f"select tail(c6,1) from {dbname}.ct3",
+
+ f"select tail(c1,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c2,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c3,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c4,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c5,1) from {dbname}.stb1 partition by tbname",
+ f"select tail(c6,1) from {dbname}.stb1 partition by tbname",
+
+ f"select tail(c6,1) as alisb from {dbname}.stb1 partition by tbname",
+ f"select tail(c6,1) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
@@ -189,7 +186,6 @@ class TDTestCase:
tail_result = tdSql.queryResult
tdSql.query(equal_sql)
- print(equal_sql)
equal_result = tdSql.queryResult
@@ -198,257 +194,255 @@ class TDTestCase:
else:
tdLog.exit(" tail query check fail , tail sql is: %s " %tail_sql)
- def basic_tail_function(self):
+ def basic_tail_function(self, dbname="db"):
- # basic query
- tdSql.query("select c1 from ct3")
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select tail(c1,1) from ct3")
+ tdSql.query(f"select tail(c1,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c2,1) from ct3")
+ tdSql.query(f"select tail(c2,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c3,1) from ct3")
+ tdSql.query(f"select tail(c3,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c4,1) from ct3")
+ tdSql.query(f"select tail(c4,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c5,1) from ct3")
+ tdSql.query(f"select tail(c5,1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tail(c6,1) from ct3")
-
+ tdSql.query(f"select tail(c6,1) from {dbname}.ct3")
+
# auto check for t1 table
# used for regular table
- tdSql.query("select tail(c1,1) from t1")
-
- tdSql.query("desc t1")
+ tdSql.query(f"select tail(c1,1) from {dbname}.t1")
+
+ tdSql.query(f"desc {dbname}.t1")
col_lists_rows = tdSql.queryResult
col_lists = []
for col_name in col_lists_rows:
if col_name[0] =="ts":
continue
-
+
col_lists.append(col_name[0])
-
+
for col in col_lists:
- for loop in range(100):
+ for loop in range(100):
limit = randint(1,100)
offset = randint(0,100)
- self.check_tail_table("t1" , col , limit , offset)
+ self.check_tail_table(f"{dbname}.t1" , col , limit , offset)
# tail for invalid params
-
- tdSql.error("select tail(c1,-10,10) from ct1")
- tdSql.error("select tail(c1,10,10000) from ct1")
- tdSql.error("select tail(c1,10,-100) from ct1")
- tdSql.error("select tail(c1,100/2,10) from ct1")
- tdSql.error("select tail(c1,5,10*2) from ct1")
- tdSql.query("select tail(c1,100,100) from ct1")
+
+ tdSql.error(f"select tail(c1,-10,10) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10000) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,-100) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,100/2,10) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,5,10*2) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,100,100) from {dbname}.ct1")
tdSql.checkRows(0)
- tdSql.query("select tail(c1,10,100) from ct1")
+ tdSql.query(f"select tail(c1,10,100) from {dbname}.ct1")
tdSql.checkRows(0)
- tdSql.error("select tail(c1,10,101) from ct1")
- tdSql.query("select tail(c1,10,0) from ct1")
- tdSql.query("select tail(c1,100,10) from ct1")
+ tdSql.error(f"select tail(c1,10,101) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,0) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,100,10) from {dbname}.ct1")
tdSql.checkRows(3)
-
+
# tail with super tags
- tdSql.query("select tail(c1,10,10) from ct1")
+ tdSql.query(f"select tail(c1,10,10) from {dbname}.ct1")
tdSql.checkRows(3)
- tdSql.query("select tail(c1,10,10),tbname from ct1")
- tdSql.query("select tail(c1,10,10),t1 from ct1")
+ tdSql.query(f"select tail(c1,10,10),tbname from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10),t1 from {dbname}.ct1")
+
+ # tail with common col
+ tdSql.query(f"select tail(c1,10,10) ,ts from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10) ,c1 from {dbname}.ct1")
- # tail with common col
- tdSql.query("select tail(c1,10,10) ,ts from ct1")
- tdSql.query("select tail(c1,10,10) ,c1 from ct1")
+ # tail with scalar function
+ tdSql.query(f"select tail(c1,10,10) ,abs(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) , tail(c2,10,10) from {dbname}.ct1")
+ tdSql.query(f"select tail(c1,10,10) , abs(c2)+2 from {dbname}.ct1")
- # tail with scalar function
- tdSql.query("select tail(c1,10,10) ,abs(c1) from ct1")
- tdSql.error("select tail(c1,10,10) , tail(c2,10,10) from ct1")
- tdSql.query("select tail(c1,10,10) , abs(c2)+2 from ct1")
-
# bug need fix for scalar value or compute again
- # tdSql.error(" select tail(c1,10,10) , 123 from ct1")
- # tdSql.error(" select abs(tail(c1,10,10)) from ct1")
- # tdSql.error(" select abs(tail(c1,10,10)) + 2 from ct1")
+ # tdSql.error(f"select tail(c1,10,10) , 123 from {dbname}.ct1")
+ # tdSql.error(f"select abs(tail(c1,10,10)) from {dbname}.ct1")
+ # tdSql.error(f"select abs(tail(c1,10,10)) + 2 from {dbname}.ct1")
- # tail with aggregate function
- tdSql.error("select tail(c1,10,10) ,sum(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,max(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,csum(c1) from ct1")
- tdSql.error("select tail(c1,10,10) ,count(c1) from ct1")
+ # tail with aggregate function
+ tdSql.error(f"select tail(c1,10,10) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select tail(c1,10,10) ,count(c1) from {dbname}.ct1")
# tail with filter where
- tdSql.query("select tail(c1,3,1) from ct4 where c1 is null")
+ tdSql.query(f"select tail(c1,3,1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, None)
- tdSql.query("select tail(c1,3,2) from ct4 where c1 >2 order by 1")
+ tdSql.query(f"select tail(c1,3,2) from {dbname}.ct4 where c1 >2 order by 1")
tdSql.checkData(0, 0, 5)
tdSql.checkData(1, 0, 6)
tdSql.checkData(2, 0, 7)
- tdSql.query("select tail(c1,2,1) from ct4 where c2 between 0 and 99999 order by 1")
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1")
tdSql.checkData(0, 0, 1)
tdSql.checkData(1, 0, 2)
- # tail with union all
- tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct1")
+ # tail with union all
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct1")
tdSql.checkRows(15)
- tdSql.query("select tail(c1,2,1) from ct4 union all select c1 from ct2 order by 1")
+ tdSql.query(f"select tail(c1,2,1) from {dbname}.ct4 union all select c1 from {dbname}.ct2 order by 1")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 0)
tdSql.checkData(1, 0, 1)
- tdSql.query("select tail(c2,2,1) from ct4 union all select abs(c2)/2 from ct4")
+ tdSql.query(f"select tail(c2,2,1) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4")
tdSql.checkRows(14)
- # tail with join
- # prepare join datas with same ts
+ # tail with join
+ # prepare join datas with same ts
- tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(f" create stable {dbname}.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f" create table {dbname}.tb1 using {dbname}.st1 tags(1)")
+ tdSql.execute(f" create table {dbname}.tb2 using {dbname}.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(f" create stable {dbname}.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(f" create table {dbname}.ttb1 using {dbname}.st2 tags(1)")
+ tdSql.execute(f" create table {dbname}.ttb2 using {dbname}.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select tail(tb2.num,3,2) from tb1, tb2 where tb1.ts=tb2.ts order by 1 desc")
+ tdSql.query(f"select tail(tb2.num,3,2) from {dbname}.tb1 tb1, {dbname}.tb2 tb2 where tb1.ts=tb2.ts order by 1 desc")
tdSql.checkRows(3)
tdSql.checkData(0,0,7)
tdSql.checkData(1,0,6)
tdSql.checkData(2,0,5)
# nest query
- # tdSql.query("select tail(c1,2) from (select _rowts , c1 from ct1)")
- tdSql.query("select c1 from (select tail(c1,2) c1 from ct4) order by 1 nulls first")
+ # tdSql.query(f"select tail(c1,2) from (select _rowts , c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select tail(c1,2) c1 from {dbname}.ct4) order by 1 nulls first")
tdSql.checkRows(2)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 0)
- tdSql.query("select sum(c1) from (select tail(c1,2) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select tail(c1,2) c1 from {dbname}.ct1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 18)
- tdSql.query("select abs(c1) from (select tail(c1,2) c1 from ct1)")
+ tdSql.query(f"select abs(c1) from (select tail(c1,2) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 9)
-
+
#partition by tbname
- tdSql.query(" select tail(c1,5) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(c1,5) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(10)
- tdSql.query(" select tail(c1,3) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(c1,3) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(6)
-
- # group by
- tdSql.error("select tail(c1,2) from ct1 group by c1")
- tdSql.error("select tail(c1,2) from ct1 group by tbname")
+
+ # group by
+ tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select tail(c1,2) from {dbname}.ct1 group by tbname")
# super table
- tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname")
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- # bug need fix
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname")
+ # bug need fix
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname")
# tdSql.checkRows(4)
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname")
# tdSql.checkRows(4)
- # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
- # # bug need fix
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname ")
+ # # bug need fix
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where t1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , tail(c1,2) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- tdSql.query(" select tail(t1,2) from stb1 ")
+ tdSql.query(f"select tail(t1,2) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select tail(t1+c1,2) from stb1 ")
+ tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select tail(t1+c1,2) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(t1+c1,2) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(4)
- tdSql.query(" select tail(t1,2) from stb1 partition by tbname ")
+ tdSql.query(f"select tail(t1,2) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(4)
- # nest query
- tdSql.query(" select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from stb1 ) ")
+ # nest query
+ tdSql.query(f"select tail(c1,2) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ")
tdSql.checkRows(2)
tdSql.checkData(0,0,None)
tdSql.checkData(1,0,9)
- tdSql.query("select tail(t1,2) from (select _rowts , t1 , tbname from stb1 )")
+ tdSql.query(f"select tail(t1,2) from (select _rowts , t1 , tbname from {dbname}.stb1 )")
tdSql.checkRows(2)
tdSql.checkData(0,0,4)
tdSql.checkData(1,0,1)
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
-
+
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
-
- tdSql.query("select tail(c2,2) from sub1_bound order by 1 desc")
+
+ tdSql.query(f"select tail(c2,2) from {dbname}.sub1_bound order by 1 desc")
tdSql.checkRows(2)
tdSql.checkData(0,0,9223372036854775803)
@@ -456,22 +450,22 @@ class TDTestCase:
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: tail basic query ============")
+ tdLog.printNoPrefix("==========step4: tail basic query ============")
self.basic_tail_function()
- tdLog.printNoPrefix("==========step5: tail boundary query ============")
+ tdLog.printNoPrefix("==========step5: tail boundary query ============")
self.check_boundary_values()
diff --git a/tests/system-test/2-query/tan.py b/tests/system-test/2-query/tan.py
index da47c1c2b2560bf617681df10e8788f518b11ac1..683cee37ff7c81ca45b628852134ddbab6e342cf 100644
--- a/tests/system-test/2-query/tan.py
+++ b/tests/system-test/2-query/tan.py
@@ -9,48 +9,46 @@ from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
+
def init(self, conn, powSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
-
- def prepare_datas(self):
+
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
-
+
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -61,18 +59,18 @@ class TDTestCase:
( '2022-02-01 01:01:20.000', 6, 66666, 666, 66, 6.66, 66.66, 1, "binary6", "nchar6", now()+6a )
( '2022-10-28 01:01:26.000', 7, 00000, 000, 00, 0.00, 00.00, 1, "binary7", "nchar7", "1970-01-01 08:00:00.000" )
( '2022-12-01 01:01:30.000', 8, -88888, -888, -88, -8.88, -88.88, 0, "binary8", "nchar8", "1969-01-01 01:00:00.000" )
- ( '2022-12-31 01:01:36.000', 9, -99999999999999999, -999, -99, -9.99, -999999999999999999999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
+ ( '2022-12-31 01:01:36.000', 9, -99999, -999, -99, -9.99, -99999.99, 1, "binary9", "nchar9", "1900-01-01 00:00:00.000" )
( '2023-02-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
'''
)
-
+
def check_result_auto_tan(self ,origin_query , pow_query):
pow_result = tdSql.getResult(pow_query)
origin_result = tdSql.getResult(origin_query)
auto_result =[]
-
+
for row in origin_result:
row_check = []
for elem in row:
@@ -82,190 +80,178 @@ class TDTestCase:
elem = math.tan(elem)
row_check.append(elem)
auto_result.append(row_check)
-
- check_status = True
-
+ tdSql.query(pow_query)
for row_index , row in enumerate(pow_result):
for col_index , elem in enumerate(row):
- if auto_result[row_index][col_index] == None and not (auto_result[row_index][col_index] == None and elem == None):
- check_status = False
- elif auto_result[row_index][col_index] != None and (auto_result[row_index][col_index] - elem > 0.00000001):
- check_status = False
- else:
- pass
- if not check_status:
- tdLog.notice("tan function value has not as expected , sql is \"%s\" "%pow_query )
- sys.exit(1)
- else:
- tdLog.info("tan value check pass , it work as expected ,sql is \"%s\" "%pow_query )
-
- def test_errors(self):
+ tdSql.checkData(row_index , col_index ,auto_result[row_index][col_index] )
+
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select tan from t1",
- # "select tan(-+--+c1 ) from t1",
- # "select +-tan(c1) from t1",
- # "select ++-tan(c1) from t1",
- # "select ++--tan(c1) from t1",
- # "select - -tan(c1)*0 from t1",
- # "select tan(tbname+1) from t1 ",
- "select tan(123--123)==1 from t1",
- "select tan(c1) as 'd1' from t1",
- "select tan(c1 ,c2) from t1",
- "select tan(c1 ,NULL ) from t1",
- "select tan(,) from t1;",
- "select tan(tan(c1) ab from t1)",
- "select tan(c1 ) as int from t1",
- "select tan from stb1",
- # "select tan(-+--+c1) from stb1",
- # "select +-tan(c1) from stb1",
- # "select ++-tan(c1) from stb1",
- # "select ++--tan(c1) from stb1",
- # "select - -tan(c1)*0 from stb1",
- # "select tan(tbname+1) from stb1 ",
- "select tan(123--123)==1 from stb1",
- "select tan(c1) as 'd1' from stb1",
- "select tan(c1 ,c2 ) from stb1",
- "select tan(c1 ,NULL) from stb1",
- "select tan(,) from stb1;",
- "select tan(tan(c1) ab from stb1)",
- "select tan(c1) as int from stb1"
+ f"select tan from {dbname}.t1",
+ # f"select tan(-+--+c1 ) from {dbname}.t1",
+ # f"select +-tan(c1) from {dbname}.t1",
+ # f"select ++-tan(c1) from {dbname}.t1",
+ # f"select ++--tan(c1) from {dbname}.t1",
+ # f"select - -tan(c1)*0 from {dbname}.t1",
+ # f"select tan(tbname+1) from {dbname}.t1 ",
+ f"select tan(123--123)==1 from {dbname}.t1",
+ f"select tan(c1) as 'd1' from {dbname}.t1",
+ f"select tan(c1 ,c2) from {dbname}.t1",
+ f"select tan(c1 ,NULL ) from {dbname}.t1",
+ f"select tan(,) from {dbname}.t1;",
+ f"select tan(tan(c1) ab from {dbname}.t1)",
+ f"select tan(c1 ) as int from {dbname}.t1",
+ f"select tan from {dbname}.stb1",
+ # f"select tan(-+--+c1) from {dbname}.stb1",
+ # f"select +-tan(c1) from {dbname}.stb1",
+ # f"select ++-tan(c1) from {dbname}.stb1",
+ # f"select ++--tan(c1) from {dbname}.stb1",
+ # f"select - -tan(c1)*0 from {dbname}.stb1",
+ # f"select tan(tbname+1) from {dbname}.stb1 ",
+ f"select tan(123--123)==1 from {dbname}.stb1",
+ f"select tan(c1) as 'd1' from {dbname}.stb1",
+ f"select tan(c1 ,c2 ) from {dbname}.stb1",
+ f"select tan(c1 ,NULL) from {dbname}.stb1",
+ f"select tan(,) from {dbname}.stb1;",
+ f"select tan(tan(c1) ab from {dbname}.stb1)",
+ f"select tan(c1) as int from {dbname}.stb1"
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
-
- def support_types(self):
+
+ def support_types(self, dbname="db"):
type_error_sql_lists = [
- "select tan(ts) from t1" ,
- "select tan(c7) from t1",
- "select tan(c8) from t1",
- "select tan(c9) from t1",
- "select tan(ts) from ct1" ,
- "select tan(c7) from ct1",
- "select tan(c8) from ct1",
- "select tan(c9) from ct1",
- "select tan(ts) from ct3" ,
- "select tan(c7) from ct3",
- "select tan(c8) from ct3",
- "select tan(c9) from ct3",
- "select tan(ts) from ct4" ,
- "select tan(c7) from ct4",
- "select tan(c8) from ct4",
- "select tan(c9) from ct4",
- "select tan(ts) from stb1" ,
- "select tan(c7) from stb1",
- "select tan(c8) from stb1",
- "select tan(c9) from stb1" ,
-
- "select tan(ts) from stbbb1" ,
- "select tan(c7) from stbbb1",
-
- "select tan(ts) from tbname",
- "select tan(c9) from tbname"
+ f"select tan(ts) from {dbname}.t1" ,
+ f"select tan(c7) from {dbname}.t1",
+ f"select tan(c8) from {dbname}.t1",
+ f"select tan(c9) from {dbname}.t1",
+ f"select tan(ts) from {dbname}.ct1" ,
+ f"select tan(c7) from {dbname}.ct1",
+ f"select tan(c8) from {dbname}.ct1",
+ f"select tan(c9) from {dbname}.ct1",
+ f"select tan(ts) from {dbname}.ct3" ,
+ f"select tan(c7) from {dbname}.ct3",
+ f"select tan(c8) from {dbname}.ct3",
+ f"select tan(c9) from {dbname}.ct3",
+ f"select tan(ts) from {dbname}.ct4" ,
+ f"select tan(c7) from {dbname}.ct4",
+ f"select tan(c8) from {dbname}.ct4",
+ f"select tan(c9) from {dbname}.ct4",
+ f"select tan(ts) from {dbname}.stb1" ,
+ f"select tan(c7) from {dbname}.stb1",
+ f"select tan(c8) from {dbname}.stb1",
+ f"select tan(c9) from {dbname}.stb1" ,
+
+ f"select tan(ts) from {dbname}.stbbb1" ,
+ f"select tan(c7) from {dbname}.stbbb1",
+
+ f"select tan(ts) from {dbname}.tbname",
+ f"select tan(c9) from {dbname}.tbname"
]
-
+
for type_sql in type_error_sql_lists:
tdSql.error(type_sql)
-
-
+
+
type_sql_lists = [
- "select tan(c1) from t1",
- "select tan(c2) from t1",
- "select tan(c3) from t1",
- "select tan(c4) from t1",
- "select tan(c5) from t1",
- "select tan(c6) from t1",
-
- "select tan(c1) from ct1",
- "select tan(c2) from ct1",
- "select tan(c3) from ct1",
- "select tan(c4) from ct1",
- "select tan(c5) from ct1",
- "select tan(c6) from ct1",
-
- "select tan(c1) from ct3",
- "select tan(c2) from ct3",
- "select tan(c3) from ct3",
- "select tan(c4) from ct3",
- "select tan(c5) from ct3",
- "select tan(c6) from ct3",
-
- "select tan(c1) from stb1",
- "select tan(c2) from stb1",
- "select tan(c3) from stb1",
- "select tan(c4) from stb1",
- "select tan(c5) from stb1",
- "select tan(c6) from stb1",
-
- "select tan(c6) as alisb from stb1",
- "select tan(c6) alisb from stb1",
+ f"select tan(c1) from {dbname}.t1",
+ f"select tan(c2) from {dbname}.t1",
+ f"select tan(c3) from {dbname}.t1",
+ f"select tan(c4) from {dbname}.t1",
+ f"select tan(c5) from {dbname}.t1",
+ f"select tan(c6) from {dbname}.t1",
+
+ f"select tan(c1) from {dbname}.ct1",
+ f"select tan(c2) from {dbname}.ct1",
+ f"select tan(c3) from {dbname}.ct1",
+ f"select tan(c4) from {dbname}.ct1",
+ f"select tan(c5) from {dbname}.ct1",
+ f"select tan(c6) from {dbname}.ct1",
+
+ f"select tan(c1) from {dbname}.ct3",
+ f"select tan(c2) from {dbname}.ct3",
+ f"select tan(c3) from {dbname}.ct3",
+ f"select tan(c4) from {dbname}.ct3",
+ f"select tan(c5) from {dbname}.ct3",
+ f"select tan(c6) from {dbname}.ct3",
+
+ f"select tan(c1) from {dbname}.stb1",
+ f"select tan(c2) from {dbname}.stb1",
+ f"select tan(c3) from {dbname}.stb1",
+ f"select tan(c4) from {dbname}.stb1",
+ f"select tan(c5) from {dbname}.stb1",
+ f"select tan(c6) from {dbname}.stb1",
+
+ f"select tan(c6) as alisb from {dbname}.stb1",
+ f"select tan(c6) alisb from {dbname}.stb1",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
-
- def basic_tan_function(self):
- # basic query
- tdSql.query("select c1 from ct3")
+ def basic_tan_function(self, dbname="db"):
+
+ # basic query
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select tan(c1) from ct3")
+ tdSql.query(f"select tan(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c2) from ct3")
+ tdSql.query(f"select tan(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c3) from ct3")
+ tdSql.query(f"select tan(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c4) from ct3")
+ tdSql.query(f"select tan(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c5) from ct3")
+ tdSql.query(f"select tan(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select tan(c6) from ct3")
+ tdSql.query(f"select tan(c6) from {dbname}.ct3")
tdSql.checkRows(0)
# # used for regular table
- tdSql.query("select tan(c1) from t1")
+ tdSql.query(f"select tan(c1) from {dbname}.t1")
tdSql.checkData(0, 0, None)
tdSql.checkData(1 , 0, 1.557407725)
tdSql.checkData(3 , 0, -0.142546543)
tdSql.checkData(5 , 0, None)
- tdSql.query("select c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 4, 1.11000)
tdSql.checkData(3, 3, 33)
tdSql.checkData(5, 4, None)
- tdSql.query("select ts,c1, c2, c3 , c4, c5 from t1")
+ tdSql.query(f"select ts,c1, c2, c3 , c4, c5 from {dbname}.t1")
tdSql.checkData(1, 5, 1.11000)
tdSql.checkData(3, 4, 33)
tdSql.checkData(5, 5, None)
- self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from t1", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from t1")
-
+ self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.t1", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.t1")
+
# used for sub table
- tdSql.query("select c2 ,tan(c2) from ct1")
+ tdSql.query(f"select c2 ,tan(c2) from {dbname}.ct1")
tdSql.checkData(0, 1, -0.226288661)
tdSql.checkData(1 , 1, 0.670533806)
tdSql.checkData(3 , 1, -1.325559275)
tdSql.checkData(4 , 1, 0.000000000)
- tdSql.query("select c1, c5 ,tan(c5) from ct4")
+ tdSql.query(f"select c1, c5 ,tan(c5) from {dbname}.ct4")
tdSql.checkData(0 , 2, None)
tdSql.checkData(1 , 2, -0.605942929)
tdSql.checkData(2 , 2, 11.879355609)
tdSql.checkData(3 , 2, 0.395723765)
tdSql.checkData(5 , 2, None)
- self.check_result_auto_tan( "select c1, c2, c3 , c4, c5 from ct1", "select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from ct1")
-
+ self.check_result_auto_tan( f"select c1, c2, c3 , c4, c5 from {dbname}.ct1", f"select tan(c1), tan(c2) ,tan(c3), tan(c4), tan(c5) from {dbname}.ct1")
+
# nest query for tan functions
- tdSql.query("select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from ct1;")
+ tdSql.query(f"select c4 , tan(c4) ,tan(tan(c4)) , tan(tan(tan(c4))) from {dbname}.ct1;")
tdSql.checkData(0 , 0 , 88)
tdSql.checkData(0 , 1 , 0.035420501)
tdSql.checkData(0 , 2 , 0.035435322)
@@ -281,52 +267,52 @@ class TDTestCase:
tdSql.checkData(11 , 2 , -0.040227928)
tdSql.checkData(11 , 3 , -0.040249642)
- # used for stable table
-
- tdSql.query("select tan(c1) from stb1")
+ # used for stable table
+
+ tdSql.query(f"select tan(c1) from {dbname}.stb1")
tdSql.checkRows(25)
-
+
# used for not exists table
- tdSql.error("select tan(c1) from stbbb1")
- tdSql.error("select tan(c1) from tbname")
- tdSql.error("select tan(c1) from ct5")
+ tdSql.error(f"select tan(c1) from {dbname}.stbbb1")
+ tdSql.error(f"select tan(c1) from {dbname}.tbname")
+ tdSql.error(f"select tan(c1) from {dbname}.ct5")
+
+ # mix with common col
+ tdSql.query(f"select c1, tan(c1) from {dbname}.ct1")
+ tdSql.query(f"select c2, tan(c2) from {dbname}.ct4")
- # mix with common col
- tdSql.query("select c1, tan(c1) from ct1")
- tdSql.query("select c2, tan(c2) from ct4")
-
# mix with common functions
- tdSql.query("select c1, tan(c1),tan(c1), tan(tan(c1)) from ct4 ")
+ tdSql.query(f"select c1, tan(c1),tan(c1), tan(tan(c1)) from {dbname}.ct4 ")
tdSql.checkData(0 , 0 ,None)
tdSql.checkData(0 , 1 ,None)
tdSql.checkData(0 , 2 ,None)
tdSql.checkData(0 , 3 ,None)
-
+
tdSql.checkData(3 , 0 , 6)
tdSql.checkData(3 , 1 ,-0.291006191)
tdSql.checkData(3 , 2 ,-0.291006191)
tdSql.checkData(3 , 3 ,-0.299508909)
- tdSql.query("select c1, tan(c1),c5, floor(c5) from stb1 ")
+ tdSql.query(f"select c1, tan(c1),c5, floor(c5) from {dbname}.stb1 ")
# # mix with agg functions , not support
- tdSql.error("select c1, tan(c1),c5, count(c5) from stb1 ")
- tdSql.error("select c1, tan(c1),c5, count(c5) from ct1 ")
- tdSql.error("select tan(c1), count(c5) from stb1 ")
- tdSql.error("select tan(c1), count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from ct1 ")
- tdSql.error("select c1, count(c5) from stb1 ")
+ tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select c1, tan(c1),c5, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select tan(c1), count(c5) from {dbname}.stb1 ")
+ tdSql.error(f"select tan(c1), count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.ct1 ")
+ tdSql.error(f"select c1, count(c5) from {dbname}.stb1 ")
# agg functions mix with agg functions
- tdSql.query("select max(c5), count(c5) from stb1")
- tdSql.query("select max(c5), count(c5) from ct1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.stb1")
+ tdSql.query(f"select max(c5), count(c5) from {dbname}.ct1")
+
-
- # # bug fix for compute
- tdSql.query("select c1, tan(c1) -0 ,tan(c1-4)-0 from ct4 ")
+ # # bug fix for compute
+ tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-4)-0 from {dbname}.ct4 ")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -334,7 +320,7 @@ class TDTestCase:
tdSql.checkData(1, 1, -6.799711455)
tdSql.checkData(1, 2, 1.157821282)
- tdSql.query(" select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from ct4")
+ tdSql.query(f"select c1, tan(c1) -0 ,tan(c1-0.1)-0.1 from {dbname}.ct4")
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, None)
tdSql.checkData(0, 2, None)
@@ -342,35 +328,33 @@ class TDTestCase:
tdSql.checkData(1, 1, -6.799711455)
tdSql.checkData(1, 2, -21.815112681)
- tdSql.query("select c1, tan(c1), c2, tan(c2), c3, tan(c3) from ct1")
+ tdSql.query(f"select c1, tan(c1), c2, tan(c2), c3, tan(c3) from {dbname}.ct1")
- def test_big_number(self):
+ def test_big_number(self, dbname="db"):
- tdSql.query("select c1, tan(100000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(100000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.tan(100000000))
-
- tdSql.query("select c1, tan(10000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000) from {dbname}.ct1") # bigint to double data overflow
tdSql.checkData(4, 1, math.tan(10000000000000))
- tdSql.query("select c1, tan(10000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(1, 1, math.tan(10000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000.0) from ct1") # 10000000000000000000000000.0 is a double value
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000.0) from {dbname}.ct1") # 10000000000000000000000000.0 is a double value
tdSql.checkData(4, 1, math.tan(10000000000000000000000000000000000000000.0))
- tdSql.query("select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from ct1") # bigint to double data overflow
+ tdSql.query(f"select c1, tan(10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000) from {dbname}.ct1") # bigint to double data overflow
- def abs_func_filter(self):
- tdSql.execute("use db")
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>5 ")
+ def abs_func_filter(self, dbname="db"):
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>5 ")
tdSql.checkRows(3)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,8.000000000)
@@ -378,7 +362,7 @@ class TDTestCase:
tdSql.checkData(0,3,7.900000000)
tdSql.checkData(0,4,-7.000000000)
- tdSql.query("select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1=5 ")
+ tdSql.query(f"select c1, abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1=5 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,5)
tdSql.checkData(0,1,5.000000000)
@@ -386,7 +370,7 @@ class TDTestCase:
tdSql.checkData(0,3,4.900000000)
tdSql.checkData(0,4,-3.000000000)
- tdSql.query("select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from ct4 where c1>tan(c1) limit 1 ")
+ tdSql.query(f"select c1,c2 , abs(c1) -0 ,ceil(c1-0.1)-0 ,floor(c1+0.1)-0.1 ,ceil(tan(c1)-0.5) from {dbname}.ct4 where c1>tan(c1) limit 1 ")
tdSql.checkRows(1)
tdSql.checkData(0,0,8)
tdSql.checkData(0,1,88888)
@@ -394,45 +378,40 @@ class TDTestCase:
tdSql.checkData(0,3,8.000000000)
tdSql.checkData(0,4,7.900000000)
tdSql.checkData(0,5,-7.000000000)
-
- def pow_Arithmetic(self):
- pass
-
- def check_boundary_values(self):
+
+ def check_boundary_values(self, dbname="bound_test"):
PI=3.1415926
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- time.sleep(3)
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, -2147483647, -9223372036854775807, -32767, -127, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- self.check_result_auto_tan( "select abs(c1), abs(c2), abs(c3) , abs(c4), abs(c5) from sub1_bound ", "select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)), tan(abs(c5)) from sub1_bound")
-
- self.check_result_auto_tan( "select c1, c2, c3 , c3, c2 ,c1 from sub1_bound ", "select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from sub1_bound")
+ self.check_result_auto_tan( f"select abs(c1), abs(c2), abs(c3) , abs(c4) from {dbname}.sub1_bound ", f"select tan(abs(c1)), tan(abs(c2)) ,tan(abs(c3)), tan(abs(c4)) from {dbname}.sub1_bound")
+
+ self.check_result_auto_tan( f"select c1, c2, c3 , c3, c2 ,c1 from {dbname}.sub1_bound ", f"select tan(c1), tan(c2) ,tan(c3), tan(c3), tan(c2) ,tan(c1) from {dbname}.sub1_bound")
+
+ self.check_result_auto_tan(f"select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from {dbname}.sub1_bound" , f"select tan(abs(c1)) from {dbname}.sub1_bound" )
- self.check_result_auto_tan("select abs(abs(abs(abs(abs(abs(abs(abs(abs(c1))))))))) nest_col_func from sub1_bound" , "select tan(abs(c1)) from sub1_bound" )
-
# check basic elem for table per row
- tdSql.query("select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select tan(abs(c1)) ,tan(abs(c2)) , tan(abs(c3)) , tan(abs(c4)), tan(abs(c5)), tan(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.tan(2147483647))
tdSql.checkData(0,1,math.tan(9223372036854775807))
tdSql.checkData(0,2,math.tan(32767))
@@ -450,76 +429,71 @@ class TDTestCase:
tdSql.checkData(3,4,math.tan(339999995214436424907732413799364296704.00000))
# check + - * / in functions
- tdSql.query("select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from sub1_bound ")
+ tdSql.query(f"select tan(abs(c1+1)) ,tan(abs(c2)) , tan(abs(c3*1)) , tan(abs(c4/2)), tan(abs(c5))/2, tan(abs(c6)) from {dbname}.sub1_bound ")
tdSql.checkData(0,0,math.tan(2147483648.000000000))
tdSql.checkData(0,1,math.tan(9223372036854775807))
tdSql.checkData(0,2,math.tan(32767.000000000))
tdSql.checkData(0,3,math.tan(63.500000000))
- tdSql.execute("create stable st (ts timestamp, num1 float, num2 double) tags (t1 int);")
- tdSql.execute(f'create table tb1 using st tags (1)')
- tdSql.execute(f'create table tb2 using st tags (2)')
- tdSql.execute(f'create table tb3 using st tags (3)')
- tdSql.execute('insert into tb1 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb1 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb1 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb1 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb1 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- tdSql.execute('insert into tb2 values (now()-40s, {}, {})'.format(PI/2 ,PI/2 ))
- tdSql.execute('insert into tb2 values (now()-30s, {}, {})'.format(PI ,PI ))
- tdSql.execute('insert into tb2 values (now()-20s, {}, {})'.format(PI*1.5 ,PI*1.5))
- tdSql.execute('insert into tb2 values (now()-10s, {}, {})'.format(PI*2 ,PI*2))
- tdSql.execute('insert into tb2 values (now(), {}, {})'.format(PI*2.5 ,PI*2.5))
-
- for i in range(100):
- tdSql.execute('insert into tb3 values (now()+{}s, {}, {})'.format(i,PI*(5+i)/2 ,PI*(5+i)/2))
-
- self.check_result_auto_tan("select num1,num2 from tb3;" , "select tan(num1),tan(num2) from tb3")
-
- def support_super_table_test(self):
- tdSql.execute(" use db ")
- self.check_result_auto_tan( " select c5 from stb1 order by ts " , "select tan(c5) from stb1 order by ts" )
- self.check_result_auto_tan( " select c5 from stb1 order by tbname " , "select tan(c5) from stb1 order by tbname" )
- self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_tan( " select c5 from stb1 where c1 > 0 order by tbname " , "select tan(c5) from stb1 where c1 > 0 order by tbname" )
-
- self.check_result_auto_tan( " select t1,c5 from stb1 order by ts " , "select tan(t1), tan(c5) from stb1 order by ts" )
- self.check_result_auto_tan( " select t1,c5 from stb1 order by tbname " , "select tan(t1) ,tan(c5) from stb1 order by tbname" )
- self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) ,tan(c5) from stb1 where c1 > 0 order by tbname" )
- self.check_result_auto_tan( " select t1,c5 from stb1 where c1 > 0 order by tbname " , "select tan(t1) , tan(c5) from stb1 where c1 > 0 order by tbname" )
+ tdSql.execute(f"create stable {dbname}.st (ts timestamp, num1 float, num2 double) tags (t1 int);")
+ tdSql.execute(f'create table {dbname}.tb1 using {dbname}.st tags (1)')
+ tdSql.execute(f'create table {dbname}.tb2 using {dbname}.st tags (2)')
+ tdSql.execute(f'create table {dbname}.tb3 using {dbname}.st tags (3)')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb1 values (now(), {PI*2.5}, {PI*2.5})')
+
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-40s, {PI/2}, {PI/2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-30s, {PI}, {PI})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-20s, {PI*1.5}, {PI*1.5})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now()-10s, {PI*2}, {PI*2})')
+ tdSql.execute(f'insert into {dbname}.tb2 values (now(), {PI*2.5}, {PI*2.5})')
+
+ self.check_result_auto_tan(f"select num1,num2 from {dbname}.tb3;" , f"select tan(num1),tan(num2) from {dbname}.tb3")
+
+ def support_super_table_test(self, dbname="db"):
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by ts " , f"select tan(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 order by tbname " , f"select tan(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_tan( f"select c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by ts " , f"select tan(t1), tan(c5) from {dbname}.stb1 order by ts" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 order by tbname" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) ,tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
+ self.check_result_auto_tan( f"select t1,c5 from {dbname}.stb1 where c1 > 0 order by tbname " , f"select tan(t1) , tan(c5) from {dbname}.stb1 where c1 > 0 order by tbname" )
pass
-
-
+
+
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table ==============")
-
+
self.prepare_datas()
- tdLog.printNoPrefix("==========step2:test errors ==============")
+ tdLog.printNoPrefix("==========step2:test errors ==============")
self.test_errors()
-
- tdLog.printNoPrefix("==========step3:support types ============")
+
+ tdLog.printNoPrefix("==========step3:support types ============")
self.support_types()
- tdLog.printNoPrefix("==========step4: tan basic query ============")
+ tdLog.printNoPrefix("==========step4: tan basic query ============")
self.basic_tan_function()
- tdLog.printNoPrefix("==========step5: big number tan query ============")
+ tdLog.printNoPrefix("==========step5: big number tan query ============")
self.test_big_number()
-
- tdLog.printNoPrefix("==========step6: tan boundary query ============")
+ tdLog.printNoPrefix("==========step6: tan boundary query ============")
self.check_boundary_values()
- tdLog.printNoPrefix("==========step7: tan filter query ============")
+ tdLog.printNoPrefix("==========step7: tan filter query ============")
self.abs_func_filter()
diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py
index 3551d8ee2cfb0669c23ed1754ebcb65c69e48daa..d773114c3c3d84bb6b102852d84223d68e0c0a2f 100644
--- a/tests/system-test/2-query/timetruncate.py
+++ b/tests/system-test/2-query/timetruncate.py
@@ -25,6 +25,7 @@ class TDTestCase:
self.ntbname = f'{self.dbname}.ntb'
self.stbname = f'{self.dbname}.stb'
self.ctbname = f'{self.dbname}.ctb'
+
def check_ms_timestamp(self,unit,date_time):
if unit.lower() == '1a':
for i in range(len(self.ts_str)):
@@ -45,11 +46,12 @@ class TDTestCase:
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0]))
- tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000)
+ tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24)*24*60*60*1000)
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_ms_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/60/60/24/7)*7*24*60*60*1000)
+
def check_us_timestamp(self,unit,date_time):
if unit.lower() == '1u':
for i in range(len(self.ts_str)):
@@ -74,11 +76,12 @@ class TDTestCase:
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0]))
- tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 )
+ tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24)*24*60*60*1000*1000 )
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
ts_result = self.get_time.get_us_timestamp(str(tdSql.queryResult[i][0]))
tdSql.checkEqual(ts_result,int(date_time[i]/1000/1000/60/60/24/7)*7*24*60*60*1000*1000)
+
def check_ns_timestamp(self,unit,date_time):
if unit.lower() == '1b':
for i in range(len(self.ts_str)):
@@ -100,21 +103,23 @@ class TDTestCase:
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60)*60*60*1000*1000*1000 )
elif unit.lower() == '1d':
for i in range(len(self.ts_str)):
- tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 )
+ tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24)*24*60*60*1000*1000*1000 )
elif unit.lower() == '1w':
for i in range(len(self.ts_str)):
tdSql.checkEqual(tdSql.queryResult[i][0],int(date_time[i]*1000/1000/1000/1000/1000/60/60/24/7)*7*24*60*60*1000*1000*1000)
+
def check_tb_type(self,unit,tb_type):
- if tb_type.lower() == 'ntb':
+ if tb_type.lower() == 'ntb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.query(f'select timetruncate(ts,{unit}) from {self.stbname}')
+
def data_check(self,date_time,precision,tb_type):
for unit in self.time_unit:
if (unit.lower() == '1u' and precision.lower() == 'ms') or (unit.lower() == '1b' and precision.lower() == 'us') or (unit.lower() == '1b' and precision.lower() == 'ms'):
- if tb_type.lower() == 'ntb':
+ if tb_type.lower() == 'ntb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ntbname}')
elif tb_type.lower() == 'ctb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}')
@@ -139,16 +144,19 @@ class TDTestCase:
tdSql.error(f'select timetruncate(ts,{unit}) from {self.ctbname}')
elif tb_type.lower() == 'stb':
tdSql.error(f'select timetruncate(ts,{unit}) from {self.stbname}')
+
def function_check_ntb(self):
for precision in self.db_param_precision:
tdSql.execute(f'drop database if exists {self.dbname}')
tdSql.execute(f'create database {self.dbname} precision "{precision}"')
+ tdLog.info(f"=====now is in a {precision} database=====")
tdSql.execute(f'use {self.dbname}')
tdSql.execute(f'create table {self.ntbname} (ts timestamp,c0 int)')
for ts in self.ts_str:
tdSql.execute(f'insert into {self.ntbname} values("{ts}",1)')
date_time = self.get_time.time_transform(self.ts_str,precision)
self.data_check(date_time,precision,'ntb')
+
def function_check_stb(self):
for precision in self.db_param_precision:
tdSql.execute(f'drop database if exists {self.dbname}')
@@ -161,9 +169,11 @@ class TDTestCase:
date_time = self.get_time.time_transform(self.ts_str,precision)
self.data_check(date_time,precision,'ctb')
self.data_check(date_time,precision,'stb')
+
def run(self):
self.function_check_ntb()
self.function_check_stb()
+
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py
index 617f7e74643c9b1dbb24834e3535b4bac669e4bb..04a80a74ad2d6ec21a97dc17bba05fb02df3830b 100644
--- a/tests/system-test/2-query/tsbsQuery.py
+++ b/tests/system-test/2-query/tsbsQuery.py
@@ -22,7 +22,7 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
- tdSql.init(conn.cursor(), True)
+ tdSql.init(conn.cursor(), False)
def create_ctable(self,tsql=None, dbName='db',stbName='stb',ctbPrefix='ctb',ctbNum=1):
tsql.execute("use %s" %dbName)
@@ -32,16 +32,16 @@ class TDTestCase:
for i in range(ctbNum):
tagValue = 'beijing'
if (i % 10 == 0):
- sql += " %s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i))
+ sql += f" {dbName}.%s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i))
else:
model = 'H-%d'%i
- sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i))
+ sql += f" {dbName}.%s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i))
if (i > 0) and (i%1000 == 0):
tsql.execute(sql)
sql = pre_create
if sql != pre_create:
tsql.execute(sql)
-
+
tdLog.debug("complete to create %d child tables in %s.%s" %(ctbNum, dbName, stbName))
return
@@ -54,32 +54,32 @@ class TDTestCase:
startTs = int(round(t * 1000))
for i in range(ctbNum):
- sql += " %s%d values "%(ctbPrefix,i)
+ sql += f" {dbName}.%s%d values "%(ctbPrefix,i)
for j in range(rowsPerTbl):
if(ctbPrefix=="rct"):
sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}) "
elif ( ctbPrefix=="dct"):
status= random.randint(0,1)
- sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) "
+ sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) "
# tdLog.debug("1insert sql:%s"%sql)
if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)):
# tdLog.debug("2insert sql:%s"%sql)
tsql.execute(sql)
if j < rowsPerTbl - 1:
- sql = "insert into %s%d values " %(ctbPrefix,i)
+ sql = f"insert into {dbName}.%s%d values " %(ctbPrefix,i)
else:
sql = "insert into "
if sql != pre_insert:
# tdLog.debug("3insert sql:%s"%sql)
- tsql.execute(sql)
+ tsql.execute(sql)
tdLog.debug("insert data ............ [OK]")
return
def prepareData(self):
dbname="db_tsbs"
- stabname1="readings"
- stabname2="diagnostics"
- ctbnamePre1="rct"
+ stabname1=f"{dbname}.readings"
+ stabname2=f"{dbname}.diagnostics"
+ ctbnamePre1="rct"
ctbnamePre2="dct"
ctbNums=50
self.ctbNums=ctbNums
@@ -107,7 +107,7 @@ class TDTestCase:
# tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}',NULL ,'v2.3')")
# else:
# tdSql.execute(f"create table dct{i} using diagnostics (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}','H-{i}','v2.3')")
- # for j in range(ctbNums):
+ # for j in range(ctbNums):
# for i in range(rowNUms):
# tdSql.execute(
# f"insert into rct{j} values ( {ts+i*60000}, {80+i}, {90+i}, {85+i}, {30+i*10}, {1.2*i}, {221+i*2}, {20+i*0.2}, {1500+i*20}, {150+i*2},{5+i} )"
@@ -133,106 +133,106 @@ class TDTestCase:
# tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query )
- def tsbsIotQuery(self,insertinto=True):
-
+ def tsbsIotQuery(self,insertinto=True, dbname="db_tsbs"):
+
tdSql.execute("use db_tsbs")
-
+
# test interval and partition
- tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
+ tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet; ")
parRows=tdSql.queryRows
- tdSql.query(" SELECT avg(velocity) as mean_velocity ,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
+ tdSql.query(f"select avg(velocity) as mean_velocity ,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet interval(10m); ")
tdSql.checkRows(parRows)
-
-
- # test insert into
+
+
+ # test insert into
if insertinto == True :
- tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
- tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
-
- tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
+ tdSql.execute(f"create table {dbname}.testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;")
+ tdSql.query(f"insert into {dbname}.testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
+
+ tdSql.query(f"insert into {dbname}.testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet from {dbname}.readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);")
# test paitition interval fill
- tdSql.query("SELECT name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
+ tdSql.query(f"select name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0) ;")
# test partition interval limit (PRcore-TD-17410)
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings partition BY name,driver,fleet interval (10m) limit 1);")
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings partition BY name,driver,fleet interval (10m) limit 1);")
tdSql.checkRows(self.ctbNums)
# test partition interval Pseudo time-column
- tdSql.query("SELECT count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ tdSql.query(f"select count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
# 1 high-load:
- tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
+ tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name desc, ts DESC;")
- tdSql.query("SELECT ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity FROM diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
+ tdSql.query(f"select ts,name,driver,current_load,load_capacity FROM (SELECT last(ts) as ts,name,driver, current_load,load_capacity from {dbname}.diagnostics WHERE fleet = 'South' partition by name,driver) WHERE current_load>= (0.9 * load_capacity) partition by name ORDER BY name ;")
- # 2 stationary-trucks
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
- tdSql.query("select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity FROM readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
+ # 2 stationary-trucks
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1)")
+ tdSql.query(f"select name,driver from (SELECT name,driver,fleet ,avg(velocity) as mean_velocity from {dbname}.readings WHERE ts > '2016-01-01T15:07:21Z' AND ts <= '2016-01-01T16:17:21Z' partition BY name,driver,fleet interval(10m) LIMIT 1) WHERE fleet = 'West' AND mean_velocity < 1000 partition BY name")
# 3 long-driving-sessions
- tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity FROM readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
+ tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT _wstart as ts,name,driver,avg(velocity) as mean_velocity from {dbname}.readings where ts > '2016-01-01T00:00:34Z' AND ts <= '2016-01-01T04:00:34Z' partition BY name,driver interval(10m)) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 22 ;")
#4 long-daily-sessions
- tdSql.query("SELECT name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity FROM readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
+ tdSql.query(f"select name,driver FROM(SELECT name,driver,count(*) AS ten_min FROM(SELECT name,driver,avg(velocity) as mean_velocity from {dbname}.readings WHERE fleet ='West' AND ts > '2016-01-01T12:31:37Z' AND ts <= '2016-01-05T12:31:37Z' partition BY name,driver interval(10m) ) WHERE mean_velocity > 1 GROUP BY name,driver) WHERE ten_min > 60")
# 5. avg-daily-driving-duration
- tdSql.query("select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
+ tdSql.query(f"select _wstart as ts,fleet,name,driver,count(mv)/6 as hours_driven from ( select _wstart as ts,fleet,name,driver,avg(velocity) as mv from {dbname}.readings where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(10m)) where ts > '2016-01-01T00:00:00Z' and ts < '2016-01-05T00:00:01Z' partition by fleet,name,driver interval(1d) ;")
- # # 6. avg-daily-driving-session
+ # # 6. avg-daily-driving-session
# #taosc core dumped
- tdSql.query(" SELECT _wstart as ts,name,floor(avg(velocity)/5) AS mv FROM readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);")
- # tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
- # tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
+ tdSql.query(f"select _wstart as ts,name,floor(avg(velocity)/5) AS mv from {dbname}.readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);")
+ # tdSql.query(f"select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;")
+ # tdSql.query(f"select _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv from {dbname}.readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)")
# 7. avg-load
- tdSql.query("SELECT fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml FROM diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
+ tdSql.query(f"select fleet, model,avg(ml) AS mean_load_percentage FROM (SELECT fleet, model,current_load/load_capacity AS ml from {dbname}.diagnostics partition BY name, fleet, model) partition BY fleet, model order by fleet ;")
- # 8. daily-activity
- tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ # 8. daily-activity
+ tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
- tdSql.query(" SELECT model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
+ tdSql.query(f"select model,ms1 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1;")
- tdSql.query("SELECT _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
+ tdSql.query(f"select _wstart,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) fill(value,0)) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
- tdSql.query("SELECT _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
+ tdSql.query(f"select _wstart as ts,model,fleet,count(ms1)/144 FROM (SELECT _wstart as ts1,model, fleet,avg(status) AS ms1 from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by model, fleet interval(10m) ) WHERE ts1 >= '2016-01-01T00:00:00Z' AND ts1 < '2016-01-05T00:00:01Z' AND ms1<1 partition by model, fleet interval(1d) ;")
# 9. breakdown-frequency
# NULL ---count(NULL)=0 expect count(NULL)= 100
- tdSql.query("SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
+ tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where model is null partition BY model,state_changed ")
parRows=tdSql.queryRows
assert parRows != 0 , "query result is wrong, query rows %d but expect > 0 " %parRows
- tdSql.query(" SELECT model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
- sql="select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs FROM diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;"
+ tdSql.query(f"select model,state_changed,count(state_changed) FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT _wstart,model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' ) WHERE ts >= '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition BY model interval(10m)) partition BY model) where state_changed =1 partition BY model,state_changed ;")
+ sql=f"select model,ctc from (SELECT model,count(state_changed) as ctc FROM (SELECT model,diff(broken_down) AS state_changed FROM (SELECT model,cast(cast(floor(2*(sum(nzs)/count(nzs))) as bool) as int) AS broken_down FROM (SELECT ts,model, cast(cast(status as bool) as int) AS nzs from {dbname}.diagnostics WHERE ts >= 1451606400000 AND ts < 1451952001000 ) WHERE ts >= 1451606400000 AND ts < 1451952001000 partition BY model interval(10m)) partition BY model) WHERE state_changed = 1 partition BY model )where model is null;"
# for i in range(2):
# tdSql.query("%s"%sql)
- # quertR1=tdSql.queryResult
+ # quertR1=tdSql.queryResult
# for j in range(50):
# tdSql.query("%s"%sql)
# quertR2=tdSql.queryResult
- # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2)
+ # assert quertR1 == quertR2 , "%s != %s ,The results of multiple queries are different" %(quertR1,quertR2)
+
-
#it's already supported:
# last-loc
- tdSql.query("SELECT last_row(ts),latitude,longitude,name,driver FROM readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
+ tdSql.query(f"select last_row(ts),latitude,longitude,name,driver from {dbname}.readings WHERE fleet='South' and name IS NOT NULL partition BY name,driver order by name ;")
#2. low-fuel
- tdSql.query("SELECT last_row(ts),name,driver,fuel_state,driver FROM diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
-
+ tdSql.query(f"select last_row(ts),name,driver,fuel_state,driver from {dbname}.diagnostics WHERE fuel_state <= 0.1 AND fleet = 'South' and name IS NOT NULL GROUP BY name,driver order by name;")
+
# 3. avg-vs-projected-fuel-consumption
- tdSql.query("select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from readings where velocity > 1 group by fleet")
-
- def run(self):
+ tdSql.query(f"select avg(fuel_consumption) as avg_fuel_consumption,avg(nominal_fuel_consumption) as nominal_fuel_consumption from {dbname}.readings where velocity > 1 group by fleet")
+
+ def run(self):
tdLog.printNoPrefix("==========step1:create database and table,insert data ==============")
self.prepareData()
self.tsbsIotQuery()
diff --git a/tests/system-test/2-query/ttl_comment.py b/tests/system-test/2-query/ttl_comment.py
index 33bd61b66c85a2519513b9eee10bfcdaff8e8925..c26393158cefe46fb054d7bd3e28a621cab73199 100644
--- a/tests/system-test/2-query/ttl_comment.py
+++ b/tests/system-test/2-query/ttl_comment.py
@@ -26,20 +26,21 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor(), logSql)
+ tdSql.init(conn.cursor(), False)
def run(self):
+ dbname="db"
tdSql.prepare()
- tdSql.error("create table ttl_table1(ts timestamp, i int) ttl 1.1")
- tdSql.error("create table ttl_table2(ts timestamp, i int) ttl 1e1")
- tdSql.error("create table ttl_table3(ts timestamp, i int) ttl -1")
+ tdSql.error(f"create table {dbname}.ttl_table1(ts timestamp, i int) ttl 1.1")
+ tdSql.error(f"create table {dbname}.ttl_table2(ts timestamp, i int) ttl 1e1")
+ tdSql.error(f"create table {dbname}.ttl_table3(ts timestamp, i int) ttl -1")
print("============== STEP 1 ===== test normal table")
- tdSql.execute("create table normal_table1(ts timestamp, i int)")
- tdSql.execute("create table normal_table2(ts timestamp, i int) comment '' ttl 3")
- tdSql.execute("create table normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'")
+ tdSql.execute(f"create table {dbname}.normal_table1(ts timestamp, i int)")
+ tdSql.execute(f"create table {dbname}.normal_table2(ts timestamp, i int) comment '' ttl 3")
+ tdSql.execute(f"create table {dbname}.normal_table3(ts timestamp, i int) ttl 2100000000020 comment 'hello'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
@@ -58,32 +59,32 @@ class TDTestCase:
tdSql.checkData(0, 7, 2147483647)
tdSql.checkData(0, 8, 'hello')
- tdSql.execute("alter table normal_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.normal_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 8, 'nihao')
- tdSql.execute("alter table normal_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.normal_table1 comment ''")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 8, '')
- tdSql.execute("alter table normal_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.normal_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table2'")
tdSql.checkData(0, 0, 'normal_table2')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table normal_table3 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.normal_table3 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'")
tdSql.checkData(0, 0, 'normal_table3')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table normal_table1 ttl 1")
+ tdSql.execute(f"alter table {dbname}.normal_table1 ttl 1")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table1'")
tdSql.checkData(0, 0, 'normal_table1')
tdSql.checkData(0, 7, 1)
- tdSql.execute("alter table normal_table3 ttl 0")
+ tdSql.execute(f"alter table {dbname}.normal_table3 ttl 0")
tdSql.query("select * from information_schema.ins_tables where table_name like 'normal_table3'")
tdSql.checkData(0, 0, 'normal_table3')
tdSql.checkData(0, 7, 0)
@@ -91,9 +92,9 @@ class TDTestCase:
print("============== STEP 2 ===== test super table")
- tdSql.execute("create table super_table1(ts timestamp, i int) tags(t int)")
- tdSql.execute("create table super_table2(ts timestamp, i int) tags(t int) comment ''")
- tdSql.execute("create table super_table3(ts timestamp, i int) tags(t int) comment 'super'")
+ tdSql.execute(f"create table {dbname}.super_table1(ts timestamp, i int) tags(t int)")
+ tdSql.execute(f"create table {dbname}.super_table2(ts timestamp, i int) tags(t int) comment ''")
+ tdSql.execute(f"create table {dbname}.super_table3(ts timestamp, i int) tags(t int) comment 'super'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
@@ -110,32 +111,32 @@ class TDTestCase:
tdSql.checkData(0, 6, 'super')
- tdSql.execute("alter table super_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.super_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
tdSql.checkData(0, 6, 'nihao')
- tdSql.execute("alter table super_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.super_table1 comment ''")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table1'")
tdSql.checkData(0, 0, 'super_table1')
tdSql.checkData(0, 6, '')
- tdSql.execute("alter table super_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.super_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table2'")
tdSql.checkData(0, 0, 'super_table2')
tdSql.checkData(0, 6, 'fly')
- tdSql.execute("alter table super_table3 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.super_table3 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_stables where stable_name like 'super_table3'")
tdSql.checkData(0, 0, 'super_table3')
tdSql.checkData(0, 6, 'tdengine')
print("============== STEP 3 ===== test child table")
- tdSql.execute("create table child_table1 using super_table1 tags(1) ttl 10")
- tdSql.execute("create table child_table2 using super_table1 tags(1) comment ''")
- tdSql.execute("create table child_table3 using super_table1 tags(1) comment 'child'")
- tdSql.execute("insert into child_table4 using super_table1 tags(1) values(now, 1)")
+ tdSql.execute(f"create table {dbname}.child_table1 using {dbname}.super_table1 tags(1) ttl 10")
+ tdSql.execute(f"create table {dbname}.child_table2 using {dbname}.super_table1 tags(1) comment ''")
+ tdSql.execute(f"create table {dbname}.child_table3 using {dbname}.super_table1 tags(1) comment 'child'")
+ tdSql.execute(f"insert into {dbname}.child_table4 using {dbname}.super_table1 tags(1) values(now, 1)")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
@@ -160,38 +161,38 @@ class TDTestCase:
tdSql.checkData(0, 8, None)
- tdSql.execute("alter table child_table1 comment 'nihao'")
+ tdSql.execute(f"alter table {dbname}.child_table1 comment 'nihao'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
tdSql.checkData(0, 0, 'child_table1')
tdSql.checkData(0, 8, 'nihao')
- tdSql.execute("alter table child_table1 comment ''")
+ tdSql.execute(f"alter table {dbname}.child_table1 comment ''")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table1'")
tdSql.checkData(0, 0, 'child_table1')
tdSql.checkData(0, 8, '')
- tdSql.execute("alter table child_table2 comment 'fly'")
+ tdSql.execute(f"alter table {dbname}.child_table2 comment 'fly'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table2'")
tdSql.checkData(0, 0, 'child_table2')
tdSql.checkData(0, 8, 'fly')
- tdSql.execute("alter table child_table3 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.child_table3 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'")
tdSql.checkData(0, 0, 'child_table3')
tdSql.checkData(0, 8, 'tdengine')
- tdSql.execute("alter table child_table4 comment 'tdengine'")
+ tdSql.execute(f"alter table {dbname}.child_table4 comment 'tdengine'")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'")
tdSql.checkData(0, 0, 'child_table4')
tdSql.checkData(0, 8, 'tdengine')
- tdSql.execute("alter table child_table4 ttl 9")
+ tdSql.execute(f"alter table {dbname}.child_table4 ttl 9")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table4'")
tdSql.checkData(0, 0, 'child_table4')
tdSql.checkData(0, 7, 9)
- tdSql.execute("alter table child_table3 ttl 9")
+ tdSql.execute(f"alter table {dbname}.child_table3 ttl 9")
tdSql.query("select * from information_schema.ins_tables where table_name like 'child_table3'")
tdSql.checkData(0, 0, 'child_table3')
tdSql.checkData(0, 7, 9)
@@ -203,4 +204,3 @@ class TDTestCase:
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())
-
diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py
index 8281527bd46be8f1b14d6ee2098a2888c20a737a..62940477cf701d69e8c8e7568ae4b56d68518d81 100644
--- a/tests/system-test/2-query/twa.py
+++ b/tests/system-test/2-query/twa.py
@@ -7,10 +7,7 @@ import platform
import math
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143,
- "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
+ updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
@@ -21,46 +18,45 @@ class TDTestCase:
self.row_nums = 100
self.time_step = 1000
- def prepare_datas_of_distribute(self):
+ def prepare_datas_of_distribute(self, dbname="testdb"):
# prepate datas for 20 tables distributed at different vgroups
- tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5")
- tdSql.execute(" use testdb ")
+ tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32))
'''
)
for i in range(self.tb_nums):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )')
ts = self.ts
for j in range(self.row_nums):
ts+=j*self.time_step
tdSql.execute(
- f"insert into ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
+ f"insert into {dbname}.ct{i+1} values({ts}, 1, 11111, 111, 1, 1.11, 11.11, 2, 'binary{j}', 'nchar{j}', now()+{1*j}a )"
)
- tdSql.execute("insert into ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdLog.info(" prepare data for distributed_aggregate done! ")
- def twa_support_types(self):
- tdSql.query("desc stb1 ")
+ def twa_support_types(self, dbname="testdb"):
+ tdSql.query(f"desc {dbname}.stb1 ")
schema_list = tdSql.queryResult
for col_type in schema_list:
if col_type[1] in ["TINYINT" ,"SMALLINT","BIGINT" ,"INT","FLOAT","DOUBLE"]:
- tdSql.query(f" select twa({col_type[0]}) from stb1 partition by tbname ")
+ tdSql.query(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ")
else:
- tdSql.error(f" select twa({col_type[0]}) from stb1 partition by tbname ")
+ tdSql.error(f"select twa({col_type[0]}) from {dbname}.stb1 partition by tbname ")
- def check_distribute_datas(self):
+ def check_distribute_datas(self, dbname="testdb"):
# get vgroup_ids of all
- tdSql.query("show vgroups ")
+ tdSql.query(f"show {dbname}.vgroups ")
vgroups = tdSql.queryResult
vnode_tables={}
@@ -69,7 +65,7 @@ class TDTestCase:
vnode_tables[vgroup_id[0]]=[]
# check sub_table of per vnode ,make sure sub_table has been distributed
- tdSql.query(f"select * from information_schema.ins_tables where db_name = 'testdb' and table_name like 'ct%'")
+ tdSql.query(f"select * from information_schema.ins_tables where db_name = '{dbname}' and table_name like 'ct%'")
table_names = tdSql.queryResult
tablenames = []
for table_name in table_names:
@@ -83,28 +79,28 @@ class TDTestCase:
if count < 2:
tdLog.exit(" the datas of all not satisfy sub_table has been distributed ")
- def distribute_twa_query(self):
+ def distribute_twa_query(self, dbname="testdb"):
# basic filter
- tdSql.query(" select twa(c1) from ct1 ")
+ tdSql.query(f"select twa(c1) from {dbname}.ct1 ")
tdSql.checkData(0,0,1.000000000)
- tdSql.query(" select twa(c1) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
- tdSql.query(" select twa(c2) from stb1 group by tbname ")
+ tdSql.query(f"select twa(c2) from {dbname}.stb1 group by tbname ")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,11111.000000000)
- tdSql.query("select twa(c1+c2) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1+c2) from {dbname}.stb1 partition by tbname ")
tdSql.checkData(0,0,11112.000000000)
- tdSql.query("select twa(c1) from stb1 partition by t1")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by t1")
tdSql.checkRows(self.tb_nums)
tdSql.checkData(0,0,1.000000000)
# union all
- tdSql.query(" select twa(c1) from stb1 partition by tbname union all select twa(c1) from stb1 partition by tbname ")
+ tdSql.query(f"select twa(c1) from {dbname}.stb1 partition by tbname union all select twa(c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(40)
tdSql.checkData(0,0,1.000000000)
@@ -112,26 +108,23 @@ class TDTestCase:
tdSql.execute(" create database if not exists db ")
tdSql.execute(" use db ")
- tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
- tdSql.execute(" create table tb1 using st tags(1) ")
- tdSql.execute(" create table tb2 using st tags(2) ")
+ tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ")
+ tdSql.execute(" create table db.tb1 using db.st tags(1) ")
+ tdSql.execute(" create table db.tb2 using db.st tags(2) ")
for i in range(10):
ts = i*10 + self.ts
- tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)")
- tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)")
+ tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)")
- tdSql.query(" select twa(tb1.c1), twa(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts ")
+ tdSql.query(f"select twa(tb1.c1), twa(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts ")
tdSql.checkRows(1)
tdSql.checkData(0,0,4.500000000)
tdSql.checkData(0,1,4.500000000)
- # group by
- tdSql.execute(" use testdb ")
-
# mixup with other functions
- tdSql.query(" select twa(c1),twa(c2),max(c1),elapsed(ts) from stb1 ")
+ tdSql.query(f"select twa(c1),twa(c2),max(c1),elapsed(ts) from {dbname}.stb1 ")
tdSql.checkData(0,0,1.000000000)
tdSql.checkData(0,1,11111.000000000)
tdSql.checkData(0,2,1)
diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py
index 88767ab888c9bfe11c329eecd41f78442436cafb..4040bb71cbb92849dd63d11627c93a2954a4a0d1 100644
--- a/tests/system-test/2-query/union.py
+++ b/tests/system-test/2-query/union.py
@@ -58,10 +58,10 @@ class TDTestCase:
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
table_reference = tb_list[0]
- join_condition = table_reference
+ join_condition = f'{table_reference} {table_reference.split(".")[-1]}'
join = "inner join" if INNER else "join"
for i in range(len(tb_list[1:])):
- join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
+ join_condition += f" {join} {tb_list[i+1]} {tb_list[i+1].split('.')[-1]} on {table_reference.split('.')[-1]}.{filter}={tb_list[i+1].split('.')[-1]}.{filter}"
return join_condition
@@ -76,7 +76,6 @@ class TDTestCase:
elif query_conditon.startswith("min"):
query_conditon = query_conditon[4:-1]
-
if query_conditon:
return f" where {query_conditon} is not null"
if col in NUM_COL:
@@ -108,10 +107,10 @@ class TDTestCase:
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
- def __join_tblist(self):
+ def __join_tblist(self, dbname="db"):
return [
- ["ct1", "t1"],
- ["ct4", "t1"],
+ [f"{dbname}.ct1", f"{dbname}.t1"],
+ [f"{dbname}.ct4", f"{dbname}.t1"],
# ["ct1", "ct2", "ct4"],
# ["ct1", "ct2", "t1"],
# ["ct1", "ct4", "t1"],
@@ -120,10 +119,10 @@ class TDTestCase:
]
@property
- def __tb_liast(self):
+ def __tb_list(self, dbname="db"):
return [
- "ct1",
- "ct4",
+ f"{dbname}.ct1",
+ f"{dbname}.ct4",
]
def sql_list(self):
@@ -131,7 +130,8 @@ class TDTestCase:
__join_tblist = self.__join_tblist
for join_tblist in __join_tblist:
for join_tb in join_tblist:
- select_claus_list = self.__query_condition(join_tb)
+ join_tb_name = join_tb.split(".")[-1]
+ select_claus_list = self.__query_condition(join_tb_name)
for select_claus in select_claus_list:
group_claus = self.__group_condition( col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
@@ -141,9 +141,10 @@ class TDTestCase:
self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
)
)
- __no_join_tblist = self.__tb_liast
+ __no_join_tblist = self.__tb_list
for tb in __no_join_tblist:
- select_claus_list = self.__query_condition(tb)
+ tb_name = join_tb.split(".")[-1]
+ select_claus_list = self.__query_condition(tb_name)
for select_claus in select_claus_list:
group_claus = self.__group_condition(col=select_claus)
where_claus = self.__where_condition(query_conditon=select_claus)
@@ -230,31 +231,29 @@ class TDTestCase:
else:
tdSql.error(f"{sqls[i]} union {sqls[j+i]}")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
- tdSql.error( "show tables union show tables" )
- tdSql.error( "create table errtb1 union all create table errtb2" )
- tdSql.error( "drop table ct1 union all drop table ct3" )
- tdSql.error( "select c1 from ct1 union all drop table ct3" )
- tdSql.error( "select c1 from ct1 union all '' " )
- tdSql.error( " '' union all select c1 from ct1 " )
- # tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
+ tdSql.error( f"show {dbname}.tables union show {dbname}.tables" )
+ tdSql.error( f"create table {dbname}.errtb1 union all create table {dbname}.errtb2" )
+ tdSql.error( f"drop table {dbname}.ct1 union all drop table {dbname}.ct3" )
+ tdSql.error( f"select c1 from {dbname}.ct1 union all drop table {dbname}.ct3" )
+ tdSql.error( f"select c1 from {dbname}.ct1 union all '' " )
+ tdSql.error( f" '' union all select c1 from{dbname}. ct1 " )
def all_test(self):
self.__test_error()
self.union_check()
-
- def __create_tb(self):
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -264,30 +263,29 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
- { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
+ f'''insert into {dbname}.ct1 values
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -303,7 +301,7 @@ class TDTestCase:
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -319,13 +317,13 @@ class TDTestCase:
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
@@ -341,7 +339,6 @@ class TDTestCase:
'''
)
-
def run(self):
tdSql.prepare()
@@ -355,8 +352,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py
index ccf7e287e27d7768acedc17b55969d1fab6d30cd..ec77cbbcdc9d83d0a63b54fbe377c14d8645ce52 100644
--- a/tests/system-test/2-query/unique.py
+++ b/tests/system-test/2-query/unique.py
@@ -11,49 +11,46 @@ from util.sql import *
from util.cases import *
class TDTestCase:
- updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 ,
- "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
- "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
- def prepare_datas(self):
+ def prepare_datas(self, dbname="db"):
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a )
@@ -69,84 +66,84 @@ class TDTestCase:
'''
)
- def test_errors(self):
+ def test_errors(self, dbname="db"):
error_sql_lists = [
- "select unique from t1",
- "select unique(123--123)==1 from t1",
- "select unique(123,123) from t1",
- "select unique(c1,ts) from t1",
- "select unique(c1,c1,ts) from t1",
- "select unique(c1) as 'd1' from t1",
- "select unique(c1 ,c2 ) from t1",
- "select unique(c1 ,NULL) from t1",
- "select unique(,) from t1;",
- "select unique(floor(c1) ab from t1)",
- "select unique(c1) as int from t1",
- "select unique('c1') from t1",
- "select unique(NULL) from t1",
- "select unique('') from t1",
- "select unique(c%) from t1",
- "select unique(t1) from t1",
- "select unique(True) from t1",
- "select unique(c1) , count(c1) from t1",
- "select unique(c1) , avg(c1) from t1",
- "select unique(c1) , min(c1) from t1",
- "select unique(c1) , spread(c1) from t1",
- "select unique(c1) , diff(c1) from t1",
- #"select unique(c1) , abs(c1) from t1", # support
- #"select unique(c1) , c1 from t1",
- "select unique from stb1 partition by tbname",
- "select unique(123--123)==1 from stb1 partition by tbname",
- "select unique(123) from stb1 partition by tbname",
- "select unique(c1,ts) from stb1 partition by tbname",
- "select unique(c1,c1,ts) from stb1 partition by tbname",
- "select unique(c1) as 'd1' from stb1 partition by tbname",
- "select unique(c1 ,c2 ) from stb1 partition by tbname",
- "select unique(c1 ,NULL) from stb1 partition by tbname",
- "select unique(,) from stb1 partition by tbname;",
- #"select unique(floor(c1) ab from stb1 partition by tbname)", # support
- #"select unique(c1) as int from stb1 partition by tbname",
- "select unique('c1') from stb1 partition by tbname",
- "select unique(NULL) from stb1 partition by tbname",
- "select unique('') from stb1 partition by tbname",
- "select unique(c%) from stb1 partition by tbname",
- #"select unique(t1) from stb1 partition by tbname", # support
- "select unique(True) from stb1 partition by tbname",
- "select unique(c1) , count(c1) from stb1 partition by tbname",
- "select unique(c1) , avg(c1) from stb1 partition by tbname",
- "select unique(c1) , min(c1) from stb1 partition by tbname",
- "select unique(c1) , spread(c1) from stb1 partition by tbname",
- "select unique(c1) , diff(c1) from stb1 partition by tbname",
- #"select unique(c1) , abs(c1) from stb1 partition by tbname", # support
- #"select unique(c1) , c1 from stb1 partition by tbname" # support
+ f"select unique from {dbname}.t1",
+ f"select unique(123--123)==1 from {dbname}.t1",
+ f"select unique(123,123) from {dbname}.t1",
+ f"select unique(c1,ts) from {dbname}.t1",
+ f"select unique(c1,c1,ts) from {dbname}.t1",
+ f"select unique(c1) as 'd1' from {dbname}.t1",
+ f"select unique(c1 ,c2 ) from {dbname}.t1",
+ f"select unique(c1 ,NULL) from {dbname}.t1",
+ f"select unique(,) from {dbname}.t1;",
+ f"select unique(floor(c1) ab from {dbname}.t1)",
+ f"select unique(c1) as int from {dbname}.t1",
+ f"select unique('c1') from {dbname}.t1",
+ f"select unique(NULL) from {dbname}.t1",
+ f"select unique('') from {dbname}.t1",
+ f"select unique(c%) from {dbname}.t1",
+ f"select unique(t1) from {dbname}.t1",
+ f"select unique(True) from {dbname}.t1",
+ f"select unique(c1) , count(c1) from {dbname}.t1",
+ f"select unique(c1) , avg(c1) from {dbname}.t1",
+ f"select unique(c1) , min(c1) from {dbname}.t1",
+ f"select unique(c1) , spread(c1) from {dbname}.t1",
+ f"select unique(c1) , diff(c1) from {dbname}.t1",
+ #f"select unique(c1) , abs(c1) from {dbname}.t1", # support
+ #f"select unique(c1) , c1 from {dbname}.t1",
+ f"select unique from {dbname}.stb1 partition by tbname",
+ f"select unique(123--123)==1 from {dbname}.stb1 partition by tbname",
+ f"select unique(123) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1,c1,ts) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) as 'd1' from {dbname}.stb1 partition by tbname",
+ f"select unique(c1 ,c2 ) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1 ,NULL) from {dbname}.stb1 partition by tbname",
+ f"select unique(,) from {dbname}.stb1 partition by tbname;",
+ #f"select unique(floor(c1) ab from {dbname}.stb1 partition by tbname)", # support
+ #f"select unique(c1) as int from {dbname}.stb1 partition by tbname",
+ f"select unique('c1') from {dbname}.stb1 partition by tbname",
+ f"select unique(NULL) from {dbname}.stb1 partition by tbname",
+ f"select unique('') from {dbname}.stb1 partition by tbname",
+ f"select unique(c%) from {dbname}.stb1 partition by tbname",
+ #f"select unique(t1) from {dbname}.stb1 partition by tbname", # support
+ f"select unique(True) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , count(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , avg(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , min(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , spread(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c1) , diff(c1) from {dbname}.stb1 partition by tbname",
+ #f"select unique(c1) , abs(c1) from {dbname}.stb1 partition by tbname", # support
+ #f"select unique(c1) , c1 from {dbname}.stb1 partition by tbname" # support
]
for error_sql in error_sql_lists:
tdSql.error(error_sql)
pass
- def support_types(self):
+ def support_types(self, dbname="db"):
other_no_value_types = [
- "select unique(ts) from t1" ,
- "select unique(c7) from t1",
- "select unique(c8) from t1",
- "select unique(c9) from t1",
- "select unique(ts) from ct1" ,
- "select unique(c7) from ct1",
- "select unique(c8) from ct1",
- "select unique(c9) from ct1",
- "select unique(ts) from ct3" ,
- "select unique(c7) from ct3",
- "select unique(c8) from ct3",
- "select unique(c9) from ct3",
- "select unique(ts) from ct4" ,
- "select unique(c7) from ct4",
- "select unique(c8) from ct4",
- "select unique(c9) from ct4",
- "select unique(ts) from stb1 partition by tbname" ,
- "select unique(c7) from stb1 partition by tbname",
- "select unique(c8) from stb1 partition by tbname",
- "select unique(c9) from stb1 partition by tbname"
+ f"select unique(ts) from {dbname}.t1" ,
+ f"select unique(c7) from {dbname}.t1",
+ f"select unique(c8) from {dbname}.t1",
+ f"select unique(c9) from {dbname}.t1",
+ f"select unique(ts) from {dbname}.ct1" ,
+ f"select unique(c7) from {dbname}.ct1",
+ f"select unique(c8) from {dbname}.ct1",
+ f"select unique(c9) from {dbname}.ct1",
+ f"select unique(ts) from {dbname}.ct3" ,
+ f"select unique(c7) from {dbname}.ct3",
+ f"select unique(c8) from {dbname}.ct3",
+ f"select unique(c9) from {dbname}.ct3",
+ f"select unique(ts) from {dbname}.ct4" ,
+ f"select unique(c7) from {dbname}.ct4",
+ f"select unique(c8) from {dbname}.ct4",
+ f"select unique(c9) from {dbname}.ct4",
+ f"select unique(ts) from {dbname}.stb1 partition by tbname" ,
+ f"select unique(c7) from {dbname}.stb1 partition by tbname",
+ f"select unique(c8) from {dbname}.stb1 partition by tbname",
+ f"select unique(c9) from {dbname}.stb1 partition by tbname"
]
for type_sql in other_no_value_types:
@@ -154,43 +151,43 @@ class TDTestCase:
tdLog.info("support type ok , sql is : %s"%type_sql)
type_sql_lists = [
- "select unique(c1) from t1",
- "select unique(c2) from t1",
- "select unique(c3) from t1",
- "select unique(c4) from t1",
- "select unique(c5) from t1",
- "select unique(c6) from t1",
-
- "select unique(c1) from ct1",
- "select unique(c2) from ct1",
- "select unique(c3) from ct1",
- "select unique(c4) from ct1",
- "select unique(c5) from ct1",
- "select unique(c6) from ct1",
-
- "select unique(c1) from ct3",
- "select unique(c2) from ct3",
- "select unique(c3) from ct3",
- "select unique(c4) from ct3",
- "select unique(c5) from ct3",
- "select unique(c6) from ct3",
-
- "select unique(c1) from stb1 partition by tbname",
- "select unique(c2) from stb1 partition by tbname",
- "select unique(c3) from stb1 partition by tbname",
- "select unique(c4) from stb1 partition by tbname",
- "select unique(c5) from stb1 partition by tbname",
- "select unique(c6) from stb1 partition by tbname",
-
- "select unique(c6) as alisb from stb1 partition by tbname",
- "select unique(c6) alisb from stb1 partition by tbname",
+ f"select unique(c1) from {dbname}.t1",
+ f"select unique(c2) from {dbname}.t1",
+ f"select unique(c3) from {dbname}.t1",
+ f"select unique(c4) from {dbname}.t1",
+ f"select unique(c5) from {dbname}.t1",
+ f"select unique(c6) from {dbname}.t1",
+
+ f"select unique(c1) from {dbname}.ct1",
+ f"select unique(c2) from {dbname}.ct1",
+ f"select unique(c3) from {dbname}.ct1",
+ f"select unique(c4) from {dbname}.ct1",
+ f"select unique(c5) from {dbname}.ct1",
+ f"select unique(c6) from {dbname}.ct1",
+
+ f"select unique(c1) from {dbname}.ct3",
+ f"select unique(c2) from {dbname}.ct3",
+ f"select unique(c3) from {dbname}.ct3",
+ f"select unique(c4) from {dbname}.ct3",
+ f"select unique(c5) from {dbname}.ct3",
+ f"select unique(c6) from {dbname}.ct3",
+
+ f"select unique(c1) from {dbname}.stb1 partition by tbname",
+ f"select unique(c2) from {dbname}.stb1 partition by tbname",
+ f"select unique(c3) from {dbname}.stb1 partition by tbname",
+ f"select unique(c4) from {dbname}.stb1 partition by tbname",
+ f"select unique(c5) from {dbname}.stb1 partition by tbname",
+ f"select unique(c6) from {dbname}.stb1 partition by tbname",
+
+ f"select unique(c6) as alisb from {dbname}.stb1 partition by tbname",
+ f"select unique(c6) alisb from {dbname}.stb1 partition by tbname",
]
for type_sql in type_sql_lists:
tdSql.query(type_sql)
def check_unique_table(self , unique_sql):
- # unique_sql = "select unique(c1) from ct1"
+ # unique_sql = f"select unique(c1) from {dbname}.ct1"
origin_sql = unique_sql.replace("unique(","").replace(")","")
tdSql.query(unique_sql)
unique_result = tdSql.queryResult
@@ -219,83 +216,83 @@ class TDTestCase:
else:
tdLog.exit(" unique query check fail , unique sql is: %s " %unique_sql)
- def basic_unique_function(self):
+ def basic_unique_function(self, dbname="db"):
# basic query
- tdSql.query("select c1 from ct3")
+ tdSql.query(f"select c1 from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select c1 from t1")
+ tdSql.query(f"select c1 from {dbname}.t1")
tdSql.checkRows(12)
- tdSql.query("select c1 from stb1")
+ tdSql.query(f"select c1 from {dbname}.stb1")
tdSql.checkRows(25)
# used for empty table , ct3 is empty
- tdSql.query("select unique(c1) from ct3")
+ tdSql.query(f"select unique(c1) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c2) from ct3")
+ tdSql.query(f"select unique(c2) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c3) from ct3")
+ tdSql.query(f"select unique(c3) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c4) from ct3")
+ tdSql.query(f"select unique(c4) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c5) from ct3")
+ tdSql.query(f"select unique(c5) from {dbname}.ct3")
tdSql.checkRows(0)
- tdSql.query("select unique(c6) from ct3")
+ tdSql.query(f"select unique(c6) from {dbname}.ct3")
# will support _rowts mix with
- # tdSql.query("select unique(c6),_rowts from ct3")
+ # tdSql.query(f"select unique(c6),_rowts from {dbname}.ct3")
# auto check for t1 table
# used for regular table
- tdSql.query("select unique(c1) from t1")
+ tdSql.query(f"select unique(c1) from {dbname}.t1")
- tdSql.query("desc t1")
+ tdSql.query(f"desc {dbname}.t1")
col_lists_rows = tdSql.queryResult
col_lists = []
for col_name in col_lists_rows:
col_lists.append(col_name[0])
for col in col_lists:
- self.check_unique_table(f"select unique({col}) from t1")
+ self.check_unique_table(f"select unique({col}) from {dbname}.t1")
# unique with super tags
- tdSql.query("select unique(c1) from ct1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct1")
tdSql.checkRows(10)
- tdSql.query("select unique(c1) from ct4")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4")
tdSql.checkRows(10)
- #tdSql.error("select unique(c1),tbname from ct1") #support
- #tdSql.error("select unique(c1),t1 from ct1") #support
+ #tdSql.error(f"select unique(c1),tbname from {dbname}.ct1") #support
+ #tdSql.error(f"select unique(c1),t1 from {dbname}.ct1") #support
# unique with common col
- #tdSql.error("select unique(c1) ,ts from ct1")
- #tdSql.error("select unique(c1) ,c1 from ct1")
+ #tdSql.error(f"select unique(c1) ,ts from {dbname}.ct1")
+ #tdSql.error(f"select unique(c1) ,c1 from {dbname}.ct1")
# unique with scalar function
- #tdSql.error("select unique(c1) ,abs(c1) from ct1")
- tdSql.error("select unique(c1) , unique(c2) from ct1")
- #tdSql.error("select unique(c1) , abs(c2)+2 from ct1")
+ #tdSql.error(f"select unique(c1) ,abs(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) , unique(c2) from {dbname}.ct1")
+ #tdSql.error(f"select unique(c1) , abs(c2)+2 from {dbname}.ct1")
# unique with aggregate function
- tdSql.error("select unique(c1) ,sum(c1) from ct1")
- tdSql.error("select unique(c1) ,max(c1) from ct1")
- tdSql.error("select unique(c1) ,csum(c1) from ct1")
- tdSql.error("select unique(c1) ,count(c1) from ct1")
+ tdSql.error(f"select unique(c1) ,sum(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,max(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,csum(c1) from {dbname}.ct1")
+ tdSql.error(f"select unique(c1) ,count(c1) from {dbname}.ct1")
# unique with filter where
- tdSql.query("select unique(c1) from ct4 where c1 is null")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 is null")
tdSql.checkData(0, 0, None)
- tdSql.query("select unique(c1) from ct4 where c1 >2 order by 1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c1 >2 order by 1")
tdSql.checkData(0, 0, 3)
tdSql.checkData(1, 0, 4)
tdSql.checkData(2, 0, 5)
tdSql.checkData(5, 0, 8)
- tdSql.query("select unique(c1) from ct4 where c2 between 0 and 99999 order by 1 desc")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 where c2 between 0 and 99999 order by 1 desc")
tdSql.checkData(0, 0, 8)
tdSql.checkData(1, 0, 7)
tdSql.checkData(2, 0, 6)
@@ -307,43 +304,43 @@ class TDTestCase:
tdSql.checkData(8, 0, 0)
# unique with union all
- tdSql.query("select unique(c1) from ct4 union all select c1 from ct1")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select c1 from {dbname}.ct1")
tdSql.checkRows(23)
- tdSql.query("select unique(c1) from ct4 union all select distinct(c1) from ct4")
+ tdSql.query(f"select unique(c1) from {dbname}.ct4 union all select distinct(c1) from {dbname}.ct4")
tdSql.checkRows(20)
- tdSql.query("select unique(c2) from ct4 union all select abs(c2)/2 from ct4")
+ tdSql.query(f"select unique(c2) from {dbname}.ct4 union all select abs(c2)/2 from {dbname}.ct4")
tdSql.checkRows(22)
# unique with join
# prepare join datas with same ts
tdSql.execute(" use db ")
- tdSql.execute(" create stable st1 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table tb1 using st1 tags(1)")
- tdSql.execute(" create table tb2 using st1 tags(2)")
+ tdSql.execute(" create stable db.st1 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(" create table db.tb1 using db.st1 tags(1)")
+ tdSql.execute(" create table db.tb2 using db.st1 tags(2)")
- tdSql.execute(" create stable st2 (ts timestamp , num int) tags(ind int)")
- tdSql.execute(" create table ttb1 using st2 tags(1)")
- tdSql.execute(" create table ttb2 using st2 tags(2)")
+ tdSql.execute(" create stable db.st2 (ts timestamp , num int) tags(ind int)")
+ tdSql.execute(" create table db.ttb1 using db.st2 tags(1)")
+ tdSql.execute(" create table db.ttb2 using db.st2 tags(2)")
start_ts = 1622369635000 # 2021-05-30 18:13:55
for i in range(10):
ts_value = start_ts+i*1000
- tdSql.execute(f" insert into tb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into tb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.tb2 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb1 values({ts_value} , {i})")
- tdSql.execute(f" insert into ttb2 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb1 values({ts_value} , {i})")
+ tdSql.execute(f" insert into {dbname}.ttb2 values({ts_value} , {i})")
- tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1")
+ tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1")
tdSql.checkRows(10)
tdSql.checkData(0,0,0)
tdSql.checkData(1,0,1)
tdSql.checkData(2,0,2)
tdSql.checkData(9,0,9)
- tdSql.query("select unique(tb2.num) from tb1, tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from tb1, tb2 where tb1.ts=tb2.ts order by 1")
+ tdSql.query(f"select unique(tb2.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts union all select unique(tb1.num) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts order by 1")
tdSql.checkRows(20)
tdSql.checkData(0,0,0)
tdSql.checkData(2,0,1)
@@ -351,23 +348,23 @@ class TDTestCase:
tdSql.checkData(18,0,9)
# nest query
- # tdSql.query("select unique(c1) from (select c1 from ct1)")
- tdSql.query("select c1 from (select unique(c1) c1 from ct4) order by 1 desc nulls first")
+ # tdSql.query(f"select unique(c1) from (select c1 from {dbname}.ct1)")
+ tdSql.query(f"select c1 from (select unique(c1) c1 from {dbname}.ct4) order by 1 desc nulls first")
tdSql.checkRows(10)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, 8)
tdSql.checkData(9, 0, 0)
- tdSql.query("select sum(c1) from (select unique(c1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 45)
- tdSql.query("select sum(c1) from (select distinct(c1) c1 from ct1) union all select sum(c1) from (select unique(c1) c1 from ct1)")
+ tdSql.query(f"select sum(c1) from (select distinct(c1) c1 from {dbname}.ct1) union all select sum(c1) from (select unique(c1) c1 from {dbname}.ct1)")
tdSql.checkRows(2)
tdSql.checkData(0, 0, 45)
tdSql.checkData(1, 0, 45)
- tdSql.query("select 1-abs(c1) from (select unique(c1) c1 from ct4) order by 1 nulls first")
+ tdSql.query(f"select 1-abs(c1) from (select unique(c1) c1 from {dbname}.ct4) order by 1 nulls first")
tdSql.checkRows(10)
tdSql.checkData(0, 0, None)
tdSql.checkData(1, 0, -7.000000000)
@@ -375,104 +372,103 @@ class TDTestCase:
# bug for stable
#partition by tbname
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
- # tdSql.query(" select unique(c1) from stb1 partition by tbname ")
+ # tdSql.query(f"select unique(c1) from {dbname}.stb1 partition by tbname ")
# tdSql.checkRows(21)
# group by
- tdSql.error("select unique(c1) from ct1 group by c1")
- tdSql.error("select unique(c1) from ct1 group by tbname")
+ tdSql.error(f"select unique(c1) from {dbname}.ct1 group by c1")
+ tdSql.error(f"select unique(c1) from {dbname}.ct1 group by tbname")
# super table
# super table
- tdSql.error("select tbname , tail(c1,2) from stb1 group by tbname")
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.error(f"select tbname , tail(c1,2) from {dbname}.stb1 group by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
# bug need fix
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname")
# tdSql.checkRows(4)
- # tdSql.query("select tbname , tail(c1,2) from stb1 partition by tbname order by tbname")
+ # tdSql.query(f"select tbname , tail(c1,2) from {dbname}.stb1 partition by tbname order by tbname")
# tdSql.checkRows(4)
- # tdSql.query(" select tbname , count(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , count(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , max(c1) ,c1 from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , max(c1) ,c1 from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname ,first(c1) from stb1 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname ,first(c1) from {dbname}.stb1 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- tdSql.query("select tail(c1,2) from stb1 partition by tbname")
+ tdSql.query(f"select tail(c1,2) from {dbname}.stb1 partition by tbname")
tdSql.checkRows(4)
# # bug need fix
- # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , unique(c1) from stb1 where t1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where t1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(2)
- # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname order by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname order by tbname ")
# tdSql.checkRows(3)
- # tdSql.query(" select tbname , unique(c1) from stb1 where c1 = 0 partition by tbname ")
+ # tdSql.query(f"select tbname , unique(c1) from {dbname}.stb1 where c1 = 0 partition by tbname ")
# tdSql.checkRows(3)
- tdSql.query(" select unique(t1) from stb1 ")
+ tdSql.query(f"select unique(t1) from {dbname}.stb1 ")
tdSql.checkRows(2)
- tdSql.query(" select unique(t1+c1) from stb1 ")
+ tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 ")
tdSql.checkRows(13)
- tdSql.query(" select unique(t1+c1) from stb1 partition by tbname ")
+ tdSql.query(f"select unique(t1+c1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(20)
- tdSql.query(" select unique(t1) from stb1 partition by tbname ")
+ tdSql.query(f"select unique(t1) from {dbname}.stb1 partition by tbname ")
tdSql.checkRows(2)
# nest query
- tdSql.query(" select unique(c1) from (select _rowts , t1 ,c1 , tbname from stb1 ) ")
+ tdSql.query(f"select unique(c1) from (select _rowts , t1 ,c1 , tbname from {dbname}.stb1 ) ")
tdSql.checkRows(11)
tdSql.checkData(0,0,6)
tdSql.checkData(10,0,3)
- tdSql.query("select unique(t1) from (select _rowts , t1 , tbname from stb1 )")
+ tdSql.query(f"select unique(t1) from (select _rowts , t1 , tbname from {dbname}.stb1 )")
tdSql.checkRows(2)
tdSql.checkData(0,0,4)
tdSql.checkData(1,0,1)
- def check_boundary_values(self):
+ def check_boundary_values(self, dbname="bound_test"):
- tdSql.execute("drop database if exists bound_test")
- tdSql.execute("create database if not exists bound_test")
- tdSql.execute("use bound_test")
+ tdSql.execute(f"drop database if exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname}")
tdSql.execute(
- "create table stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
+ f"create table {dbname}.stb_bound (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(32),c9 nchar(32), c10 timestamp) tags (t1 int);"
)
- tdSql.execute(f'create table sub1_bound using stb_bound tags ( 1 )')
+ tdSql.execute(f'create table {dbname}.sub1_bound using {dbname}.stb_bound tags ( 1 )')
tdSql.execute(
- f"insert into sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()-1s, 2147483647, 9223372036854775807, 32767, 127, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now(), 2147483646, 9223372036854775806, 32766, 126, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, -2147483646, -9223372036854775806, -32766, -126, -3.40E+38, -1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+2s, 2147483643, 9223372036854775803, 32763, 123, 3.39E+38, 1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.execute(
- f"insert into sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+3s, -2147483643, -9223372036854775803, -32763, -123, -3.39E+38, -1.69e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
tdSql.error(
- f"insert into sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
+ f"insert into {dbname}.sub1_bound values ( now()+1s, 2147483648, 9223372036854775808, 32768, 128, 3.40E+38, 1.7e+308, True, 'binary_tb1', 'nchar_tb1', now() )"
)
- tdSql.query("select unique(c2) from sub1_bound order by 1 desc")
+ tdSql.query(f"select unique(c2) from {dbname}.sub1_bound order by 1 desc")
tdSql.checkRows(5)
tdSql.checkData(0,0,9223372036854775807)
diff --git a/tests/system-test/2-query/upper.py b/tests/system-test/2-query/upper.py
index bb485161dd12885175c470e8b5542b1ab011f186..f15a6f3ba76d3acb5645f443cf068d4cce7d9755 100644
--- a/tests/system-test/2-query/upper.py
+++ b/tests/system-test/2-query/upper.py
@@ -95,16 +95,16 @@ class TDTestCase:
return sqls
- def __test_current(self):
+ def __test_current(self, dbname="db"):
tdLog.printNoPrefix("==========current sql condition check , must return query ok==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
self.__upper_current_check(tb)
tdLog.printNoPrefix(f"==========current sql condition check in {tb} over==========")
- def __test_error(self):
+ def __test_error(self, dbname="db"):
tdLog.printNoPrefix("==========err sql condition check , must return error==========")
- tbname = ["ct1", "ct2", "ct4", "t1", "stb1"]
+ tbname = [f"{dbname}.ct1", f"{dbname}.ct2", f"{dbname}.ct4", f"{dbname}.t1", f"{dbname}.stb1"]
for tb in tbname:
for errsql in self.__upper_err_check(tb):
@@ -112,22 +112,20 @@ class TDTestCase:
tdLog.printNoPrefix(f"==========err sql condition check in {tb} over==========")
- def all_test(self):
- self.__test_current()
- self.__test_error()
+ def all_test(self, dbname="db"):
+ self.__test_current(dbname)
+ self.__test_error(dbname)
-
- def __create_tb(self):
- tdSql.prepare()
+ def __create_tb(self, dbname="db"):
tdLog.printNoPrefix("==========step1:create table")
- create_stb_sql = f'''create table stb1(
+ create_stb_sql = f'''create table {dbname}.stb1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
- ) tags (t1 int)
+ ) tags (tag1 int)
'''
- create_ntb_sql = f'''create table t1(
+ create_ntb_sql = f'''create table {dbname}.t1(
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
@@ -137,83 +135,82 @@ class TDTestCase:
tdSql.execute(create_ntb_sql)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
- def __insert_data(self, rows):
+ def __insert_data(self, rows, dbname="db"):
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
for i in range(rows):
tdSql.execute(
- f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar{i}', { now_time + 1 * i } )"
+ f"insert into {dbname}.ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
)
tdSql.execute(
- f'''insert into ct1 values
- ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', { now_time + 8 } )
- ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', { now_time + 9 } )
+ f'''insert into {dbname}.ct1 values
+ ( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
+ ( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
'''
)
tdSql.execute(
- f'''insert into ct4 values
+ f'''insert into {dbname}.ct4 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
- { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000}
+ { 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
)
(
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
- { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000}
+ { 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
)
'''
)
tdSql.execute(
- f'''insert into ct2 values
+ f'''insert into {dbname}.ct2 values
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
- ( { now_time - rows * 3888000000+ 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
+ ( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
(
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
- { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ { -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
- { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ { - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
for i in range(rows):
- insert_data = f'''insert into t1 values
+ insert_data = f'''insert into {dbname}.t1 values
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
- "binary_{i}", "nchar_{i}", { now_time - 1000 * i } )
+ "binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
'''
tdSql.execute(insert_data)
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
- "binary_limit-1", "nchar_limit-1", { now_time - 86400000 }
+ "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
)
(
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
- "binary_limit-2", "nchar_limit-2", { now_time - 172800000 }
+ "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
)
'''
)
-
def run(self):
tdSql.prepare()
@@ -226,8 +223,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3:all check")
self.all_test()
- tdDnodes.stop(1)
- tdDnodes.start(1)
+ tdSql.execute("flush database db")
tdSql.execute("use db")
diff --git a/tests/system-test/2-query/varchar.py b/tests/system-test/2-query/varchar.py
index 5cc6c8e39965453c646cb267774564af1a66f42d..17c3ea633357cf16a8b17e52c180192d07e52a87 100644
--- a/tests/system-test/2-query/varchar.py
+++ b/tests/system-test/2-query/varchar.py
@@ -14,43 +14,44 @@ class TDTestCase:
tdSql.init(conn.cursor())
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
+ dbname = "db"
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
tdSql.execute(
- '''create table stb1
+ f'''create table {dbname}.stb1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp)
tags (t1 int)
'''
)
tdSql.execute(
- '''
- create table t1
+ f'''
+ create table {dbname}.t1
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 varchar(16),c9 nchar(32), c10 timestamp)
'''
)
for i in range(4):
- tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
+ tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( {i+1} )')
tdLog.printNoPrefix("==========step2:insert data")
for i in range(9):
tdSql.execute(
- f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
)
tdSql.execute(
- f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
+ f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'varchar{i}', 'nchar{i}', now()+{1*i}a )"
)
- tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )")
- tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'varchar0', 'nchar0', now()+8a )")
+ tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'varchar9', 'nchar9', now()+9a )")
- tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
- tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
+ tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ")
tdSql.execute(
- f'''insert into t1 values
+ f'''insert into {dbname}.t1 values
( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "varchar1", "nchar1", now()+1a )
( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "varchar2", "nchar2", now()+2a )
@@ -70,7 +71,7 @@ class TDTestCase:
tdLog.printNoPrefix("==========step3: cast on varchar")
- tdSql.query("select c8 from ct1")
+ tdSql.query(f"select c8 from {dbname}.ct1")
for i in range(tdSql.queryRows):
tdSql.checkData(i,0, data_ct1_c8[i])
diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
index 6a26d2ce1f38774b2d63031c518883641c23f864..6cb152342be5c80b5f755d0b3f2f7e7bf1c7894a 100644
--- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py
+++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
@@ -259,7 +259,6 @@ class TDTestCase:
self.tmqCase1()
self.tmqCase2()
-
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
index 20e363341f914b66e5ba73f0d5521b393e5743f1..4cb208b616097815ce8dfb099854c5c936fcf08c 100644
--- a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
+++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
@@ -99,8 +99,8 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
- if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)):
- tdLog.exit("tmq consume rows error with snapshot = 0!")
+ # if not ((totalConsumeRows >= expectrowcnt * 3/4) and (totalConsumeRows < expectrowcnt)):
+ # tdLog.exit("tmq consume rows error with snapshot = 0!")
tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
@@ -192,8 +192,8 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
- if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)):
- tdLog.exit("tmq consume rows error with snapshot = 0!")
+ # if not ((totalConsumeRows >= expectrowcnt / 2 * (1 + 3/4)) and (totalConsumeRows < expectrowcnt)):
+ # tdLog.exit("tmq consume rows error with snapshot = 0!")
tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py
index 992a128ac077a35708a1ef123ba61bf3352feb78..704811d083c47db53592cce8db85c71733a29057 100644
--- a/tests/system-test/7-tmq/tmqDropStbCtb.py
+++ b/tests/system-test/7-tmq/tmqDropStbCtb.py
@@ -155,8 +155,9 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
- if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
- tdLog.exit("tmq consume rows error with snapshot = 0!")
+ if self.snapshot == 0:
+ if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
@@ -246,8 +247,9 @@ class TDTestCase:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
- if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
- tdLog.exit("tmq consume rows error with snapshot = 0!")
+ if self.snapshot == 0:
+ if not ((totalConsumeRows > expectrowcnt / 2) and (totalConsumeRows < expectrowcnt)):
+ tdLog.exit("tmq consume rows error with snapshot = 0!")
tdLog.info("wait subscriptions exit ....")
tmqCom.waitSubscriptionExit(tdSql, topicFromDb)
diff --git a/tests/system-test/7-tmq/tmqShow.py b/tests/system-test/7-tmq/tmqShow.py
index 6f8183bf06cfa501f62c22c82c2915638ea7414b..c0f33d92049efe6eceffd01353e3bedc2c406ee9 100644
--- a/tests/system-test/7-tmq/tmqShow.py
+++ b/tests/system-test/7-tmq/tmqShow.py
@@ -19,6 +19,11 @@ class TDTestCase:
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
+ def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,offset=1,cdbName='cdb'):
+ sql = "insert into %s.consumeinfo values "%cdbName
+ sql += "(now+%ds, %d, '%s', '%s', %d, %d, %d)"%(offset,consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
+ tdLog.info("consume info sql: %s"%sql)
+ tdSql.query(sql)
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
@@ -95,19 +100,23 @@ class TDTestCase:
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[0]
- tmqCom.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=1
+ self.insertConsumerInfo(consumerIdList[0], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[1]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[1]
- tmqCom.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=2
+ self.insertConsumerInfo(consumerIdList[1], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[2]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[2]
- tmqCom.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=3
+ self.insertConsumerInfo(consumerIdList[2], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
topicList = topicNameList[3]
keyList = 'group.id:%s, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'%consumeGroupIdList[3]
- tmqCom.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+ tsOffset=4
+ self.insertConsumerInfo(consumerIdList[3], expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit,tsOffset)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py
index cd13535684501d98673923254c7fe83adc432851..07602ec29f69f9fbd0dab90935e0922996c80f80 100644
--- a/tests/system-test/7-tmq/tmq_taosx.py
+++ b/tests/system-test/7-tmq/tmq_taosx.py
@@ -20,15 +20,9 @@ class TDTestCase:
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
- def checkFileContent(self):
- buildPath = tdCom.getBuildPath()
- cfgPath = tdCom.getClientCfgPath()
- cmdStr = '%s/build/bin/tmq_taosx_ci -c %s'%(buildPath, cfgPath)
- tdLog.info(cmdStr)
- os.system(cmdStr)
-
- srcFile = '%s/../log/tmq_taosx_tmp.source'%(cfgPath)
- dstFile = '%s/../log/tmq_taosx_tmp.result'%(cfgPath)
+ def checkJson(self, cfgPath, name):
+ srcFile = '%s/../log/%s.source'%(cfgPath, name)
+ dstFile = '%s/../log/%s.result'%(cfgPath, name)
tdLog.info("compare file: %s, %s"%(srcFile, dstFile))
consumeFile = open(srcFile, mode='r')
@@ -43,7 +37,31 @@ class TDTestCase:
tdLog.exit("compare error: %s != %s"%src, dst)
else:
break
+ return
+ def checkDropData(self):
+ tdSql.execute('use db_taosx')
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+ tdSql.query("select * from jt order by i")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 11)
+ tdSql.checkData(0, 2, '{"k1":1,"k2":"hello"}')
+ tdSql.checkData(1, 2, None)
+
+ tdSql.execute('use abc1')
+ tdSql.query("show tables")
+ tdSql.checkRows(2)
+ tdSql.query("select * from jt order by i")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 11)
+ tdSql.checkData(0, 2, '{"k1":1,"k2":"hello"}')
+ tdSql.checkData(1, 2, None)
+ return
+
+ def checkData(self):
tdSql.execute('use db_taosx')
tdSql.query("select * from ct3 order by c1 desc")
tdSql.checkRows(2)
@@ -52,6 +70,48 @@ class TDTestCase:
tdSql.checkData(1, 1, 23)
tdSql.checkData(1, 4, None)
+ tdSql.query("select * from st1 order by ts")
+ tdSql.checkRows(8)
+ tdSql.checkData(0, 1, 1)
+ tdSql.checkData(1, 1, 3)
+ tdSql.checkData(4, 1, 4)
+ tdSql.checkData(6, 1, 23)
+
+ tdSql.checkData(0, 2, 2)
+ tdSql.checkData(1, 2, 4)
+ tdSql.checkData(4, 2, 3)
+ tdSql.checkData(6, 2, 32)
+
+ tdSql.checkData(0, 3, 'a')
+ tdSql.checkData(1, 3, 'b')
+ tdSql.checkData(4, 3, 'hwj')
+ tdSql.checkData(6, 3, 's21ds')
+
+ tdSql.checkData(0, 4, None)
+ tdSql.checkData(1, 4, None)
+ tdSql.checkData(5, 4, 940)
+ tdSql.checkData(6, 4, None)
+
+ tdSql.checkData(0, 5, 1000)
+ tdSql.checkData(1, 5, 2000)
+ tdSql.checkData(4, 5, 1000)
+ tdSql.checkData(6, 5, 5000)
+
+ tdSql.checkData(0, 6, 'ttt')
+ tdSql.checkData(1, 6, None)
+ tdSql.checkData(4, 6, 'ttt')
+ tdSql.checkData(6, 6, None)
+
+ tdSql.checkData(0, 7, True)
+ tdSql.checkData(1, 7, None)
+ tdSql.checkData(4, 7, True)
+ tdSql.checkData(6, 7, None)
+
+ tdSql.checkData(0, 8, None)
+ tdSql.checkData(1, 8, None)
+ tdSql.checkData(4, 8, None)
+ tdSql.checkData(6, 8, None)
+
tdSql.query("select * from ct1")
tdSql.checkRows(4)
@@ -74,12 +134,82 @@ class TDTestCase:
tdSql.checkData(0, 2, None)
tdSql.checkData(1, 1, 1)
tdSql.checkData(1, 2, '{"k1":1,"k2":"hello"}')
+ return
+
+ def checkWal1Vgroup(self):
+ buildPath = tdCom.getBuildPath()
+ cfgPath = tdCom.getClientCfgPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -c %s -sv 1 -dv 1'%(buildPath, cfgPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkJson(cfgPath, "tmq_taosx_tmp")
+ self.checkData()
+
+ return
+
+ def checkWalMultiVgroups(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 3 -dv 5'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkData()
+
+ return
+
+ def checkWalMultiVgroupsWithDropTable(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 3 -dv 5 -d'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkDropData()
+
+ return
+
+ def checkSnapshot1Vgroup(self):
+ buildPath = tdCom.getBuildPath()
+ cfgPath = tdCom.getClientCfgPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -c %s -sv 1 -dv 1 -s'%(buildPath, cfgPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkJson(cfgPath, "tmq_taosx_tmp_snapshot")
+ self.checkData()
+
+ return
+
+ def checkSnapshotMultiVgroups(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 2 -dv 4 -s'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkData()
+
+ return
+
+ def checkSnapshotMultiVgroupsWithDropTable(self):
+ buildPath = tdCom.getBuildPath()
+ cmdStr = '%s/build/bin/tmq_taosx_ci -sv 2 -dv 4 -s -d'%(buildPath)
+ tdLog.info(cmdStr)
+ os.system(cmdStr)
+
+ self.checkDropData()
return
def run(self):
tdSql.prepare()
- self.checkFileContent()
+ self.checkWal1Vgroup()
+ self.checkSnapshot1Vgroup()
+
+ self.checkWalMultiVgroups()
+ self.checkSnapshotMultiVgroups()
+
+ self.checkWalMultiVgroupsWithDropTable()
+ self.checkSnapshotMultiVgroupsWithDropTable()
def stop(self):
tdSql.close()
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index 1e958bdb298507113f51b4ce717314df634dea17..8987ba3bbdda362e5bfc290b6dd736b9049606ad 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -124,44 +124,97 @@ python3 ./test.py -f 2-query/leastsquares.py
python3 ./test.py -f 2-query/leastsquares.py -R
python3 ./test.py -f 2-query/length.py
python3 ./test.py -f 2-query/length.py -R
+python3 ./test.py -f 2-query/log.py
+# python3 ./test.py -f 2-query/log.py -R
+python3 ./test.py -f 2-query/lower.py
+python3 ./test.py -f 2-query/lower.py -R
+python3 ./test.py -f 2-query/ltrim.py
+python3 ./test.py -f 2-query/ltrim.py -R
+python3 ./test.py -f 2-query/mavg.py
+python3 ./test.py -f 2-query/mavg.py -R
+python3 ./test.py -f 2-query/max_partition.py
+python3 ./test.py -f 2-query/max_partition.py -R
+python3 ./test.py -f 2-query/max.py
+python3 ./test.py -f 2-query/max.py -R
+python3 ./test.py -f 2-query/min.py
+python3 ./test.py -f 2-query/min.py -R
+python3 ./test.py -f 2-query/Now.py
+python3 ./test.py -f 2-query/Now.py -R
+python3 ./test.py -f 2-query/percentile.py
+python3 ./test.py -f 2-query/percentile.py -R
+python3 ./test.py -f 2-query/pow.py
+python3 ./test.py -f 2-query/pow.py -R
+python3 ./test.py -f 2-query/query_cols_tags_and_or.py
+python3 ./test.py -f 2-query/query_cols_tags_and_or.py -R
+python3 ./test.py -f 2-query/round.py
+python3 ./test.py -f 2-query/round.py -R
+python3 ./test.py -f 2-query/rtrim.py
+python3 ./test.py -f 2-query/rtrim.py -R
+python3 ./test.py -f 2-query/sample.py
+python3 ./test.py -f 2-query/sample.py -R
+python3 ./test.py -f 2-query/sin.py
+python3 ./test.py -f 2-query/sin.py -R
+python3 ./test.py -f 2-query/smaTest.py
+python3 ./test.py -f 2-query/smaTest.py -R
+#python3 ./test.py -f 2-query/sml.py
+#python3 ./test.py -f 2-query/sml.py -R
+python3 ./test.py -f 2-query/spread.py
+python3 ./test.py -f 2-query/spread.py -R
+python3 ./test.py -f 2-query/sqrt.py
+python3 ./test.py -f 2-query/sqrt.py -R
+python3 ./test.py -f 2-query/statecount.py
+python3 ./test.py -f 2-query/statecount.py -R
+python3 ./test.py -f 2-query/stateduration.py
+python3 ./test.py -f 2-query/stateduration.py -R
+python3 ./test.py -f 2-query/substr.py
+python3 ./test.py -f 2-query/substr.py -R
+python3 ./test.py -f 2-query/sum.py
+python3 ./test.py -f 2-query/sum.py -R
+python3 ./test.py -f 2-query/tail.py
+python3 ./test.py -f 2-query/tail.py -R
+python3 ./test.py -f 2-query/tan.py
+# python3 ./test.py -f 2-query/tan.py -R
+python3 ./test.py -f 2-query/Timediff.py
+python3 ./test.py -f 2-query/Timediff.py -R
+python3 ./test.py -f 2-query/timetruncate.py
+# python3 ./test.py -f 2-query/timetruncate.py -R
+python3 ./test.py -f 2-query/timezone.py
+python3 ./test.py -f 2-query/timezone.py -R
+python3 ./test.py -f 2-query/To_iso8601.py
+python3 ./test.py -f 2-query/To_iso8601.py -R
+python3 ./test.py -f 2-query/To_unixtimestamp.py
+python3 ./test.py -f 2-query/To_unixtimestamp.py -R
+python3 ./test.py -f 2-query/Today.py
+# python3 ./test.py -f 2-query/Today.py -R
+python3 ./test.py -f 2-query/top.py
+python3 ./test.py -f 2-query/top.py -R
+python3 ./test.py -f 2-query/tsbsQuery.py
+python3 ./test.py -f 2-query/tsbsQuery.py -R
+python3 ./test.py -f 2-query/ttl_comment.py
+python3 ./test.py -f 2-query/ttl_comment.py -R
+python3 ./test.py -f 2-query/twa.py
+python3 ./test.py -f 2-query/twa.py -R
+python3 ./test.py -f 2-query/union.py
+python3 ./test.py -f 2-query/union.py -R
+python3 ./test.py -f 2-query/unique.py
+python3 ./test.py -f 2-query/unique.py -R
+python3 ./test.py -f 2-query/upper.py
+python3 ./test.py -f 2-query/upper.py -R
+python3 ./test.py -f 2-query/varchar.py
+python3 ./test.py -f 2-query/varchar.py -R
+
python3 ./test.py -f 1-insert/update_data.py
python3 ./test.py -f 1-insert/delete_data.py
-python3 ./test.py -f 2-query/varchar.py
-python3 ./test.py -f 2-query/ltrim.py
-python3 ./test.py -f 2-query/rtrim.py
-python3 ./test.py -f 2-query/upper.py
-python3 ./test.py -f 2-query/lower.py
python3 ./test.py -f 2-query/join2.py
-python3 ./test.py -f 2-query/substr.py
-python3 ./test.py -f 2-query/union.py
python3 ./test.py -f 2-query/union1.py
python3 ./test.py -f 2-query/concat2.py
-python3 ./test.py -f 2-query/spread.py
-python3 ./test.py -f 2-query/timezone.py
-python3 ./test.py -f 2-query/Now.py
-python3 ./test.py -f 2-query/Today.py
-python3 ./test.py -f 2-query/max.py
-python3 ./test.py -f 2-query/min.py
-python3 ./test.py -f 2-query/To_iso8601.py
-python3 ./test.py -f 2-query/To_unixtimestamp.py
-python3 ./test.py -f 2-query/timetruncate.py
-python3 ./test.py -f 2-query/Timediff.py
python3 ./test.py -f 2-query/json_tag.py
-python3 ./test.py -f 2-query/top.py
-python3 ./test.py -f 2-query/percentile.py
-python3 ./test.py -f 2-query/round.py
-python3 ./test.py -f 2-query/log.py
-python3 ./test.py -f 2-query/pow.py
-python3 ./test.py -f 2-query/sqrt.py
-python3 ./test.py -f 2-query/sin.py
-python3 ./test.py -f 2-query/tan.py
-python3 ./test.py -f 2-query/query_cols_tags_and_or.py
# python3 ./test.py -f 2-query/nestedQuery.py
# TD-15983 subquery output duplicate name column.
# Please Xiangyang Guo modify the following script
@@ -169,18 +222,8 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py
python3 ./test.py -f 2-query/elapsed.py
python3 ./test.py -f 2-query/csum.py
-python3 ./test.py -f 2-query/mavg.py
-python3 ./test.py -f 2-query/sample.py
python3 ./test.py -f 2-query/function_diff.py
-python3 ./test.py -f 2-query/unique.py
-python3 ./test.py -f 2-query/stateduration.py
-python3 ./test.py -f 2-query/statecount.py
-python3 ./test.py -f 2-query/tail.py
-python3 ./test.py -f 2-query/ttl_comment.py
-python3 ./test.py -f 2-query/twa.py
python3 ./test.py -f 2-query/queryQnode.py
-python3 ./test.py -f 2-query/max_partition.py
-python3 ./test.py -f 2-query/tsbsQuery.py
python3 ./test.py -f 6-cluster/5dnode1mnode.py
python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 -M 3
@@ -271,8 +314,8 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
-#python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
-#python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
+python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py
+python3 ./test.py -f 7-tmq/tmqDnodeRestart.py
python3 ./test.py -f 7-tmq/tmqUpdate-1ctb.py
python3 ./test.py -f 7-tmq/tmqUpdateWithConsume.py
python3 ./test.py -f 7-tmq/tmqUpdate-multiCtb-snapshot0.py
@@ -290,7 +333,7 @@ python3 ./test.py -f 7-tmq/stbTagFilter-1ctb.py
python3 ./test.py -f 7-tmq/dataFromTsdbNWal.py
python3 ./test.py -f 7-tmq/dataFromTsdbNWal-multiCtb.py
python3 ./test.py -f 7-tmq/tmq_taosx.py
-# python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
+python3 ./test.py -f 7-tmq/stbTagFilter-multiCtb.py
#------------querPolicy 2-----------
@@ -358,7 +401,7 @@ python3 ./test.py -f 2-query/interp.py -Q 2
python3 ./test.py -f 2-query/avg.py -Q 2
# python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/csum.py -Q 2
-python3 ./test.py -f 2-query/mavg.py -Q 2
+#python3 ./test.py -f 2-query/mavg.py -Q 2
python3 ./test.py -f 2-query/sample.py -Q 2
python3 ./test.py -f 2-query/function_diff.py -Q 2
python3 ./test.py -f 2-query/unique.py -Q 2
@@ -445,7 +488,7 @@ python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
# python3 ./test.py -f 2-query/avg.py -Q 3
# python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/csum.py -Q 3
-python3 ./test.py -f 2-query/mavg.py -Q 3
+#python3 ./test.py -f 2-query/mavg.py -Q 3
python3 ./test.py -f 2-query/sample.py -Q 3
python3 ./test.py -f 2-query/function_diff.py -Q 3
python3 ./test.py -f 2-query/unique.py -Q 3
@@ -469,5 +512,6 @@ python3 ./test.py -f 2-query/count_partition.py -Q 3
python3 ./test.py -f 2-query/max_partition.py -Q 3
python3 ./test.py -f 2-query/last_row.py -Q 3
python3 ./test.py -f 2-query/tsbsQuery.py -Q 3
-python3 ./test.py -f 2-query/sml.py -Q 3
+#python3 ./test.py -f 2-query/sml.py -Q 3
python3 ./test.py -f 2-query/interp.py -Q 3
+
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index d27fb99ef7cddc4e2c61b60f8fb0da6b4ddc4d0d..03097e31b9d65c48cdc543524d12dc0a80e2c6b3 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -82,6 +82,7 @@ ELSE ()
COMMAND cd ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
)
EXECUTE_PROCESS(
+ WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/taosadapter
COMMAND git rev-parse --short HEAD
RESULT_VARIABLE commit_sha1
OUTPUT_VARIABLE taosadapter_commit_sha1
@@ -118,8 +119,8 @@ ELSE ()
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND ${_upx_prefix}/src/upx/upx taosadapter
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
@@ -141,8 +142,8 @@ ELSE ()
PATCH_COMMAND
COMMAND git clean -f -d
BUILD_COMMAND
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib go build -a -o taosadapter-debug -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND cmake -E copy taosadapter ${CMAKE_BINARY_DIR}/build/bin
COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/
@@ -174,8 +175,8 @@ ELSE ()
BUILD_COMMAND
COMMAND set CGO_CFLAGS=-I${CMAKE_CURRENT_SOURCE_DIR}/../include/client
COMMAND set CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/build/lib
- COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
- COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/version.Version=${taos_version} -X github.com/taosdata/taosadapter/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter.exe -ldflags "-s -w -X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
+ COMMAND go build -a -o taosadapter-debug.exe -ldflags "-X github.com/taosdata/taosadapter/v3/version.Version=${taos_version} -X github.com/taosdata/taosadapter/v3/version.CommitID=${taosadapter_commit_sha1}"
INSTALL_COMMAND
COMMAND ${_upx_prefix}/src/upx/upx taosadapter.exe
COMMAND cmake -E copy taosadapter.exe ${CMAKE_BINARY_DIR}/build/bin
diff --git a/tools/shell/inc/shellInt.h b/tools/shell/inc/shellInt.h
index 26ca6895ace188257ad9b16642cfe1f09bc792b4..15f6f6dc6a362c8c94994727fe19fa090ca94c57 100644
--- a/tools/shell/inc/shellInt.h
+++ b/tools/shell/inc/shellInt.h
@@ -113,7 +113,7 @@ int32_t shellExecute();
int32_t shellCalcColWidth(TAOS_FIELD *field, int32_t precision);
void shellPrintHeader(TAOS_FIELD *fields, int32_t *width, int32_t num_fields);
void shellPrintField(const char *val, TAOS_FIELD *field, int32_t width, int32_t length, int32_t precision);
-void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision);
+void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision);
// shellUtil.c
int32_t shellCheckIntSize();
void shellPrintVersion();
diff --git a/tools/shell/src/shellCommand.c b/tools/shell/src/shellCommand.c
index d87e10fd0897aaeea8a203d7b3d26e1fa02425cf..b73317e991042f6ce96a470ca9325cc2754fe47a 100644
--- a/tools/shell/src/shellCommand.c
+++ b/tools/shell/src/shellCommand.c
@@ -510,7 +510,10 @@ int32_t shellReadCommand(char *command) {
shellClearLineAfter(&cmd);
break;
case 12: // Ctrl + L;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
system("clear");
+#pragma GCC diagnostic pop
shellShowOnScreen(&cmd);
break;
case 21: // Ctrl + U;
diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c
index 68e3a272c33d2b97da02d5a27561de2b8dd5fa6f..45d5489803fb5a0f7ec5506320d9e21257c8281b 100644
--- a/tools/shell/src/shellEngine.c
+++ b/tools/shell/src/shellEngine.c
@@ -62,7 +62,10 @@ int32_t shellRunSingleCommand(char *command) {
}
if (shellRegexMatch(command, "^[\t ]*clear[ \t;]*$", REG_EXTENDED | REG_ICASE)) {
- system("clear");
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
+ system("clear");
+#pragma GCC diagnostic pop
return 0;
}
@@ -266,7 +269,6 @@ char *shellFormatTimestamp(char *buf, int64_t val, int32_t precision) {
void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, int32_t length, int32_t precision) {
if (val == NULL) {
- taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR);
return;
}
@@ -314,13 +316,34 @@ void shellDumpFieldToFile(TdFilePtr pFile, const char *val, TAOS_FIELD *field, i
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_JSON:
- memcpy(buf, val, length);
- buf[length] = 0;
- taosFprintfFile(pFile, "\'%s\'", buf);
+ {
+ char quotationStr[2];
+ int32_t bufIndex = 0;
+ quotationStr[0] = 0;
+ quotationStr[1] = 0;
+ for (int32_t i = 0; i < length; i++) {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ if (val[i] == '\"') {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ quotationStr[0] = '\"';
+ }
+ if (val[i] == ',') {
+ quotationStr[0] = '\"';
+ }
+ }
+ buf[bufIndex] = 0;
+ if (length == 0) {
+ quotationStr[0] = '\"';
+ }
+
+ taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr);
+ }
break;
case TSDB_DATA_TYPE_TIMESTAMP:
shellFormatTimestamp(buf, *(int64_t *)val, precision);
- taosFprintfFile(pFile, "'%s'", buf);
+ taosFprintfFile(pFile, "%s", buf);
break;
default:
break;
diff --git a/tools/shell/src/shellWebsocket.c b/tools/shell/src/shellWebsocket.c
index 2dcab04b3f4bfd072d766ff1e25c015cf609466f..b8b8392b961a92263f791ee4b480e61a8c148efd 100644
--- a/tools/shell/src/shellWebsocket.c
+++ b/tools/shell/src/shellWebsocket.c
@@ -18,19 +18,19 @@
#include "shellInt.h"
int shell_conn_ws_server(bool first) {
- shell.ws_conn = ws_connect_with_dsn(shell.args.dsn);
- if (!shell.ws_conn) {
- fprintf(stderr, "failed to connect %s, reason: %s\n",
- shell.args.dsn, ws_errstr(NULL));
- return -1;
- }
- if (first && shell.args.restful) {
- fprintf(stdout, "successfully connect to %s\n\n",
- shell.args.dsn);
- } else if (first && shell.args.cloud) {
- fprintf(stdout, "successfully connect to cloud service\n");
- }
- return 0;
+ shell.ws_conn = ws_connect_with_dsn(shell.args.dsn);
+ if (!shell.ws_conn) {
+ fprintf(stderr, "failed to connect %s, reason: %s\n",
+ shell.args.dsn, ws_errstr(NULL));
+ return -1;
+ }
+ if (first && shell.args.restful) {
+ fprintf(stdout, "successfully connect to %s\n\n",
+ shell.args.dsn);
+ } else if (first && shell.args.cloud) {
+ fprintf(stdout, "successfully connect to cloud service\n");
+ }
+ return 0;
}
static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
@@ -39,7 +39,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
ws_fetch_block(wres, &data, &rows);
*execute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
- return 0;
+ return 0;
}
int num_fields = ws_field_count(wres);
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -64,7 +64,7 @@ static int horizontalPrintWebsocket(WS_RES* wres, double* execute_time) {
putchar(' ');
putchar('|');
}
- putchar('\r');
+ putchar('\r');
putchar('\n');
}
numOfRows += rows;
@@ -79,7 +79,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) {
ws_fetch_block(wres, &data, &rows);
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
- return 0;
+ return 0;
}
int num_fields = ws_field_count(wres);
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -98,7 +98,7 @@ static int verticalPrintWebsocket(WS_RES* wres, double* pexecute_time) {
uint32_t len;
for (int i = 0; i < rows; i++) {
printf("*************************** %d.row ***************************\n",
- numOfRows + 1);
+ numOfRows + 1);
for (int j = 0; j < num_fields; j++) {
TAOS_FIELD* field = fields + j;
int padding = (int)(maxColNameLen - strlen(field->name));
@@ -121,7 +121,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute
}
TdFilePtr pFile = taosOpenFile(fullname,
- TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
+ TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
if (pFile == NULL) {
fprintf(stderr, "failed to open file: %s\r\n", fullname);
return -1;
@@ -132,7 +132,7 @@ static int dumpWebsocketToFile(const char* fname, WS_RES* wres, double* pexecute
*pexecute_time += (double)(ws_take_timing(wres)/1E6);
if (!rows) {
taosCloseFile(&pFile);
- return 0;
+ return 0;
}
int numOfRows = 0;
TAOS_FIELD* fields = (TAOS_FIELD*)ws_fetch_fields(wres);
@@ -207,7 +207,7 @@ void shellRunSingleCommandWebsocketImp(char *command) {
}
if (!shell.ws_conn && shell_conn_ws_server(0)) {
- return;
+ return;
}
shell.stop_query = false;
@@ -216,16 +216,16 @@ void shellRunSingleCommandWebsocketImp(char *command) {
WS_RES* res = ws_query_timeout(shell.ws_conn, command, shell.args.timeout);
int code = ws_errno(res);
if (code != 0) {
- et = taosGetTimestampUs();
- fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
- if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
- fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
- } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
- fprintf(stderr, "TDengine server is down, will try to reconnect\n");
- shell.ws_conn = NULL;
- }
- ws_free_result(res);
- return;
+ et = taosGetTimestampUs();
+ fprintf(stderr, "\nDB: error: %s (%.6fs)\n", ws_errstr(res), (et - st)/1E6);
+ if (code == TSDB_CODE_WS_SEND_TIMEOUT || code == TSDB_CODE_WS_RECV_TIMEOUT) {
+ fprintf(stderr, "Hint: use -t to increase the timeout in seconds\n");
+ } else if (code == TSDB_CODE_WS_INTERNAL_ERRO || code == TSDB_CODE_WS_CLOSED) {
+ fprintf(stderr, "TDengine server is down, will try to reconnect\n");
+ shell.ws_conn = NULL;
+ }
+ ws_free_result(res);
+ return;
}
double execute_time = ws_take_timing(res)/1E6;
@@ -233,36 +233,36 @@ void shellRunSingleCommandWebsocketImp(char *command) {
if (shellRegexMatch(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) {
fprintf(stdout, "Database changed.\r\n\r\n");
fflush(stdout);
- ws_free_result(res);
+ ws_free_result(res);
return;
}
int numOfRows = 0;
if (ws_is_update_query(res)) {
- numOfRows = ws_affected_rows(res);
- et = taosGetTimestampUs();
+ numOfRows = ws_affected_rows(res);
+ et = taosGetTimestampUs();
double total_time = (et - st)/1E3;
double net_time = total_time - (double)execute_time;
- printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows);
+ printf("Query Ok, %d of %d row(s) in database\n", numOfRows, numOfRows);
printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
} else {
- int error_no = 0;
- numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time);
- if (numOfRows < 0) {
- ws_free_result(res);
- return;
- }
- et = taosGetTimestampUs();
+ int error_no = 0;
+ numOfRows = shellDumpWebsocket(res, fname, &error_no, printMode, &execute_time);
+ if (numOfRows < 0) {
+ ws_free_result(res);
+ return;
+ }
+ et = taosGetTimestampUs();
double total_time = (et - st) / 1E3;
double net_time = total_time - execute_time;
- if (error_no == 0 && !shell.stop_query) {
- printf("Query OK, %d row(s) in set\n", numOfRows);
- printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
- } else {
- printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows,
- (et - st)/1E6);
- printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
- }
+ if (error_no == 0 && !shell.stop_query) {
+ printf("Query OK, %d row(s) in set\n", numOfRows);
+ printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
+ } else {
+ printf("Query interrupted, %d row(s) in set (%.6fs)\n", numOfRows,
+ (et - st)/1E6);
+ printf("Execute: %.2f ms Network: %.2f ms Total: %.2f ms\n", execute_time, net_time, total_time);
+ }
}
printf("\n");
ws_free_result(res);
diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..59cbbb31471acc377dd614610aa715ae72c9ec31
--- /dev/null
+++ b/utils/CMakeLists.txt
@@ -0,0 +1,4 @@
+#ADD_SUBDIRECTORY(examples/c)
+ADD_SUBDIRECTORY(tsim)
+ADD_SUBDIRECTORY(test/c)
+#ADD_SUBDIRECTORY(comparisonTest/tdengine)
diff --git a/tests/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt
similarity index 97%
rename from tests/test/c/CMakeLists.txt
rename to utils/test/c/CMakeLists.txt
index 31331b52651fee79c837a077869bc45ec7acfe6c..839c65b633b17872051075be33ce291fbeaefd5a 100644
--- a/tests/test/c/CMakeLists.txt
+++ b/utils/test/c/CMakeLists.txt
@@ -1,4 +1,5 @@
add_executable(tmq_demo tmqDemo.c)
+add_dependencies(tmq_demo taos)
add_executable(tmq_sim tmqSim.c)
add_executable(create_table createTable.c)
add_executable(tmq_taosx_ci tmq_taosx_ci.c)
diff --git a/tests/test/c/createTable.c b/utils/test/c/createTable.c
similarity index 100%
rename from tests/test/c/createTable.c
rename to utils/test/c/createTable.c
diff --git a/tests/test/c/sdbDump.c b/utils/test/c/sdbDump.c
similarity index 99%
rename from tests/test/c/sdbDump.c
rename to utils/test/c/sdbDump.c
index aef50560316c87ca91d766a99bc7acc0509e0866..b90b781e4469f004df742b047d046ff7574601ba 100644
--- a/tests/test/c/sdbDump.c
+++ b/utils/test/c/sdbDump.c
@@ -20,6 +20,9 @@
#include "tconfig.h"
#include "tjson.h"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
+
#define TMP_DNODE_DIR TD_TMP_DIR_PATH "dumpsdb"
#define TMP_MNODE_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode"
#define TMP_SDB_DATA_DIR TD_TMP_DIR_PATH "dumpsdb" TD_DIRSEP "mnode" TD_DIRSEP "data"
@@ -429,6 +432,7 @@ int32_t parseArgs(int32_t argc, char *argv[]) {
char cmd[PATH_MAX * 2] = {0};
snprintf(cmd, sizeof(cmd), "rm -rf %s", TMP_DNODE_DIR);
+
system(cmd);
#ifdef WINDOWS
taosMulMkDir(TMP_SDB_DATA_DIR);
@@ -467,3 +471,5 @@ int32_t main(int32_t argc, char *argv[]) {
return dumpSdb();
}
+
+#pragma GCC diagnostic pop
\ No newline at end of file
diff --git a/tests/test/c/sml_test.c b/utils/test/c/sml_test.c
similarity index 96%
rename from tests/test/c/sml_test.c
rename to utils/test/c/sml_test.c
index 50249a5c5621aad4821fd7866950021f240c1c8a..ca3d464da769a9da8c87abd2be466ac2269fabcd 100644
--- a/tests/test/c/sml_test.c
+++ b/utils/test/c/sml_test.c
@@ -63,6 +63,7 @@ int smlProcess_influx_Test() {
printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
int code = taos_errno(pRes);
taos_free_result(pRes);
+
return code;
}
@@ -91,7 +92,7 @@ int smlProcess_telnet_Test() {
int smlProcess_json1_Test() {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
- TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db");
taos_free_result(pRes);
pRes = taos_query(taos, "use sml_db");
@@ -111,7 +112,7 @@ int smlProcess_json1_Test() {
" },"
" {"
" \"metric\": \"sys.cpu.nice\","
- " \"timestamp\": 1346846400,"
+ " \"timestamp\": 1662344042,"
" \"value\": 9,"
" \"tags\": {"
" \"host\": \"web02\","
@@ -140,7 +141,7 @@ int smlProcess_json2_Test() {
"{"
" \"metric\": \"meter_current0\","
" \"timestamp\": {"
- " \"value\" : 1346846400,"
+ " \"value\" : 1662344042,"
" \"type\" : \"s\""
" },"
" \"value\": {"
@@ -180,7 +181,7 @@ int smlProcess_json3_Test() {
"{"
" \"metric\": \"meter_current1\","
" \"timestamp\": {"
- " \"value\" : 1346846400,"
+ " \"value\" : 1662344042,"
" \"type\" : \"s\""
" },"
" \"value\": {"
@@ -248,7 +249,7 @@ int smlProcess_json4_Test() {
"{"
" \"metric\": \"meter_current2\","
" \"timestamp\": {"
- " \"value\" : 1346846500000,"
+ " \"value\" : 1662344042000,"
" \"type\" : \"ms\""
" },"
" \"value\": \"ni\","
@@ -1089,7 +1090,7 @@ int sml_add_tag_col_Test() {
if (code) return code;
const char *sql1[] = {
- "macylr,id=macylr_17875_1804,t0=f,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c7=\"binaryColValue\",c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000"
+ "macylr,id=macylr_17875_1804,t1=127i8,t2=32767i16,t3=2147483647i32,t4=9223372036854775807i64,t5=11.12345f32,t6=22.123456789f64,t7=\"binaryTagValue\",t8=L\"ncharTagValue\",t11=127i8,t10=L\"ncharTagValue\" c0=f,c1=127i8,c2=32767i16,c3=2147483647i32,c4=9223372036854775807i64,c5=11.12345f32,c6=22.123456789f64,c8=L\"ncharColValue\",c9=7u64,c11=L\"ncharColValue\",c10=f 1626006833639000000"
};
pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL, 0);
@@ -1100,34 +1101,91 @@ int sml_add_tag_col_Test() {
return code;
}
+int smlProcess_18784_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "create database if not exists sml_db schemaless 1");
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use sml_db");
+ taos_free_result(pRes);
+
+ const char *sql[] = {
+ "disk,device=sdc inodes_used=176059i,total=1081101176832i 1661943960000000000",
+ "disk,device=sdc inodes_free=66932805i 1661943960000000000",
+ };
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL, 0);
+ printf("%s result:%s, rows:%d\n", __FUNCTION__, taos_errstr(pRes), taos_affected_rows(pRes));
+ int code = taos_errno(pRes);
+ ASSERT(!code);
+ ASSERT(taos_affected_rows(pRes) == 2);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "select * from disk");
+ ASSERT(pRes);
+ int fieldNum = taos_field_count(pRes);
+ ASSERT(fieldNum == 5);
+ printf("fieldNum:%d\n", fieldNum);
+ TAOS_ROW row = NULL;
+ int32_t rowIndex = 0;
+ while((row = taos_fetch_row(pRes)) != NULL) {
+ int64_t ts = *(int64_t*)row[0];
+ int64_t used = *(int64_t*)row[1];
+ int64_t total = *(int64_t*)row[2];
+ int64_t freed = *(int64_t*)row[3];
+ if(rowIndex == 0){
+ ASSERT(ts == 1661943960000);
+ ASSERT(used == 176059);
+ ASSERT(total == 1081101176832);
+ ASSERT(freed == 66932805);
+// ASSERT_EQ(latitude, 24.5208);
+// ASSERT_EQ(longitude, 28.09377);
+// ASSERT_EQ(elevation, 428);
+// ASSERT_EQ(velocity, 0);
+// ASSERT_EQ(heading, 304);
+// ASSERT_EQ(grade, 0);
+// ASSERT_EQ(fuel_consumption, 25);
+ }else{
+// ASSERT(0);
+ }
+ rowIndex++;
+ }
+ taos_free_result(pRes);
+
+ return code;
+}
+
int main(int argc, char *argv[]) {
int ret = 0;
ret = smlProcess_influx_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_telnet_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_json1_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_json2_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_json3_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = smlProcess_json4_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_TD15662_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_TD15742_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_16384_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_oom_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_16368_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_dup_time_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_16960_Test();
- if(ret) return ret;
+ ASSERT(!ret);
ret = sml_add_tag_col_Test();
+ ASSERT(!ret);
+ ret = smlProcess_18784_Test();
+ ASSERT(!ret);
return ret;
}
diff --git a/tests/test/c/tmqDemo.c b/utils/test/c/tmqDemo.c
similarity index 100%
rename from tests/test/c/tmqDemo.c
rename to utils/test/c/tmqDemo.c
diff --git a/tests/test/c/tmqSim.c b/utils/test/c/tmqSim.c
similarity index 95%
rename from tests/test/c/tmqSim.c
rename to utils/test/c/tmqSim.c
index d39ade7e91495d2b3ff1924efdb78103d7b423cc..71b31ba1071c977d9fd3d2ceb046bdff02ca53df 100644
--- a/tests/test/c/tmqSim.c
+++ b/utils/test/c/tmqSim.c
@@ -492,7 +492,6 @@ static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) {
static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length,
int32_t precision) {
if (val == NULL) {
- taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR);
return;
}
@@ -540,13 +539,34 @@ static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* f
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_JSON:
- memcpy(buf, val, length);
- buf[length] = 0;
- taosFprintfFile(pFile, "\'%s\'", buf);
+ {
+ char quotationStr[2];
+ int32_t bufIndex = 0;
+ quotationStr[0] = 0;
+ quotationStr[1] = 0;
+ for (int32_t i = 0; i < length; i++) {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ if (val[i] == '\"') {
+ buf[bufIndex] = val[i];
+ bufIndex++;
+ quotationStr[0] = '\"';
+ }
+ if (val[i] == ',') {
+ quotationStr[0] = '\"';
+ }
+ }
+ buf[bufIndex] = 0;
+ if (length == 0) {
+ quotationStr[0] = '\"';
+ }
+
+ taosFprintfFile(pFile, "%s%s%s", quotationStr, buf, quotationStr);
+ }
break;
case TSDB_DATA_TYPE_TIMESTAMP:
shellFormatTimestamp(buf, *(int64_t*)val, precision);
- taosFprintfFile(pFile, "'%s'", buf);
+ taosFprintfFile(pFile, "%s", buf);
break;
default:
break;
diff --git a/tests/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c
similarity index 53%
rename from tests/test/c/tmq_taosx_ci.c
rename to utils/test/c/tmq_taosx_ci.c
index ece7ad4819f2947cb0a474491255dd296136581b..f917b9159e9914682c277329ddcfa4e269dc4908 100644
--- a/tests/test/c/tmq_taosx_ci.c
+++ b/utils/test/c/tmq_taosx_ci.c
@@ -22,8 +22,17 @@
#include "types.h"
static int running = 1;
-TdFilePtr g_fp = NULL;
-char dir[64]={0};
+TdFilePtr g_fp = NULL;
+typedef struct{
+ bool snapShot;
+ bool dropTable;
+ bool subTable;
+ int srcVgroups;
+ int dstVgroups;
+ char dir[64];
+}Config;
+
+Config g_conf = {0};
static TAOS* use_db(){
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@@ -41,7 +50,6 @@ static TAOS* use_db(){
}
static void msg_process(TAOS_RES* msg) {
- /*memset(buf, 0, 1024);*/
printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
printf("db: %s\n", tmq_get_db_name(msg));
printf("vg: %d\n", tmq_get_vgroup_id(msg));
@@ -51,8 +59,11 @@ static void msg_process(TAOS_RES* msg) {
if (result) {
printf("meta result: %s\n", result);
}
- taosFprintfFile(g_fp, result);
- taosFprintfFile(g_fp, "\n");
+ if(g_fp){
+ taosFprintfFile(g_fp, result);
+ taosFprintfFile(g_fp, "\n");
+ }
+
tmq_free_json_meta(result);
}
@@ -61,62 +72,10 @@ static void msg_process(TAOS_RES* msg) {
int32_t ret = tmq_write_raw(pConn, raw);
printf("write raw data: %s\n", tmq_err2str(ret));
-// else{
-// while(1){
-// int numOfRows = 0;
-// void *pData = NULL;
-// taos_fetch_raw_block(msg, &numOfRows, &pData);
-// if(numOfRows == 0) break;
-// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
-// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
-// printf("write raw data: %s\n", tmq_err2str(ret));
-// }
-// }
-
taos_close(pConn);
}
-int32_t init_env() {
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 1");
- if (taos_errno(pRes) != 0) {
- printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop database if exists abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
- if (taos_errno(pRes) != 0) {
- printf("error in create db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
+int buildDatabase(TAOS* pConn, TAOS_RES* pRes){
pRes = taos_query(pConn,
"create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
"nchar(8), t4 bool)");
@@ -133,7 +92,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
+ pRes = taos_query(pConn, "insert into ct0 values(1626006833400, 1, 2, 'a')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -168,7 +127,7 @@ int32_t init_env() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
+ pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
@@ -224,6 +183,22 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(g_conf.dropTable){
+ pRes = taos_query(pConn, "drop table ct3 ct1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop table st1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
+
pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
if (taos_errno(pRes) != 0) {
printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
@@ -273,6 +248,15 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(g_conf.dropTable){
+ pRes = taos_query(pConn, "drop table n1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
+
pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
@@ -308,6 +292,129 @@ int32_t init_env() {
}
taos_free_result(pRes);
+ if(g_conf.dropTable){
+ pRes = taos_query(pConn,
+ "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
+ "nchar(8), t4 bool)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop table st1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }
+ return 0;
+}
+
+int buildStable(TAOS* pConn, TAOS_RES* pRes){
+ pRes = taos_query(pConn, "CREATE STABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` VARCHAR(16))");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table meters, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table d0 using meters tags(1, 'San Francisco')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create child table d0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create table d1 using meters tags(2, 'Beijing')");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create child table d1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "create stream meters_summary_s into meters_summary as select _wstart, max(current) as current, groupid, location from meters partition by groupid, location interval(10m)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create super table meters_summary, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "insert into d0 (ts, current) values (now, 120)");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to insert into table d0, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ return 0;
+}
+
+int32_t init_env() {
+ TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ if (pConn == NULL) {
+ return -1;
+ }
+
+ TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ char sql[128] = {0};
+ snprintf(sql, 128, "create database if not exists db_taosx vgroups %d", g_conf.dstVgroups);
+ pRes = taos_query(pConn, sql);
+ if (taos_errno(pRes) != 0) {
+ printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop topic if exists topic_db");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop topic, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop topic if exists meters_summary_t1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop topic, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "drop database if exists abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in drop db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ snprintf(sql, 128, "create database if not exists abc1 vgroups %d", g_conf.srcVgroups);
+ pRes = taos_query(pConn, sql);
+ if (taos_errno(pRes) != 0) {
+ printf("error in create db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ pRes = taos_query(pConn, "use abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("error in use db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+
+ if(g_conf.subTable){
+ buildStable(pConn, pRes);
+ }else{
+ buildDatabase(pConn, pRes);
+ }
+
taos_close(pConn);
return 0;
}
@@ -327,12 +434,21 @@ int32_t create_topic() {
}
taos_free_result(pRes);
- pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
- if (taos_errno(pRes) != 0) {
- printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
- return -1;
+ if(g_conf.subTable){
+ pRes = taos_query(pConn, "create topic meters_summary_t1 with meta as stable meters_summary");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create topic meters_summary_t1, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
+ }else{
+ pRes = taos_query(pConn, "create topic topic_db with meta as database abc1");
+ if (taos_errno(pRes) != 0) {
+ printf("failed to create topic topic_db, reason:%s\n", taos_errstr(pRes));
+ return -1;
+ }
+ taos_free_result(pRes);
}
- taos_free_result(pRes);
taos_close(pConn);
return 0;
@@ -343,17 +459,6 @@ void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
}
tmq_t* build_consumer() {
-#if 0
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- assert(pConn != NULL);
-
- TAOS_RES* pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- }
- taos_free_result(pRes);
-#endif
-
tmq_conf_t* conf = tmq_conf_new();
tmq_conf_set(conf, "group.id", "tg2");
tmq_conf_set(conf, "client.id", "my app 1");
@@ -363,7 +468,9 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "enable.heartbeat.background", "true");
- /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
+ if(g_conf.snapShot){
+ tmq_conf_set(conf, "experimental.snapshot.enable", "true");
+ }
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
@@ -374,8 +481,11 @@ tmq_t* build_consumer() {
tmq_list_t* build_topic_list() {
tmq_list_t* topic_list = tmq_list_new();
- tmq_list_append(topic_list, "topic_ctb_column");
- /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
+ if(g_conf.subTable){
+ tmq_list_append(topic_list, "meters_summary_t1");
+ }else{
+ tmq_list_append(topic_list, "topic_db");
+ }
return topic_list;
}
@@ -393,12 +503,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
if (tmqmessage) {
cnt++;
msg_process(tmqmessage);
- /*if (cnt >= 2) break;*/
- /*printf("get data\n");*/
taos_free_result(tmqmessage);
- /*} else {*/
- /*break;*/
- /*tmq_commit_sync(tmq, NULL);*/
}else{
break;
}
@@ -411,52 +516,18 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
fprintf(stderr, "%% Consumer closed\n");
}
-void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- static const int MIN_COMMIT_COUNT = 1;
-
- int msg_count = 0;
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- return;
- }
-
- tmq_list_t* subList = NULL;
- tmq_subscription(tmq, &subList);
- char** subTopics = tmq_list_to_c_array(subList);
- int32_t sz = tmq_list_get_size(subList);
- printf("subscribed topics: ");
- for (int32_t i = 0; i < sz; i++) {
- printf("%s, ", subTopics[i]);
- }
- printf("\n");
- tmq_list_destroy(subList);
-
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- msg_process(tmqmessage);
- taos_free_result(tmqmessage);
-
- /*tmq_commit_sync(tmq, NULL);*/
- /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
void initLogFile() {
char f1[256] = {0};
char f2[256] = {0};
- sprintf(f1, "%s/../log/tmq_taosx_tmp.source", dir);
- sprintf(f2, "%s/../log/tmq_taosx_tmp.result", dir);
+ if(g_conf.snapShot){
+ sprintf(f1, "%s/../log/tmq_taosx_tmp_snapshot.source", g_conf.dir);
+ sprintf(f2, "%s/../log/tmq_taosx_tmp_snapshot.result", g_conf.dir);
+ }else{
+ sprintf(f1, "%s/../log/tmq_taosx_tmp.source", g_conf.dir);
+ sprintf(f2, "%s/../log/tmq_taosx_tmp.result", g_conf.dir);
+ }
+
TdFilePtr pFile = taosOpenFile(f1, TD_FILE_TEXT | TD_FILE_TRUNC | TD_FILE_STREAM);
if (NULL == pFile) {
fprintf(stderr, "Failed to open %s for save result\n", f1);
@@ -469,43 +540,76 @@ void initLogFile() {
fprintf(stderr, "Failed to open %s for save result\n", f2);
exit(-1);
}
- char *result[] = {
- "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}",
- "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
- "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
- "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}",
- "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}",
- "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}",
- "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}",
- "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}",
- "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}",
- "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
- "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
- "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}"
- };
-
- for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
- taosFprintfFile(pFile2, result[i]);
- taosFprintfFile(pFile2, "\n");
+
+ if(g_conf.snapShot){
+ char *result[] = {
+ "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":64},{\"name\":\"c4\",\"type\":5}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1},{\"name\":\"t2\",\"type\":8,\"length\":64}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":4,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":5000}]}",
+ "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c2\",\"type\":10,\"length\":8},{\"name\":\"cc3\",\"type\":5}],\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}",
+ };
+
+ for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
+ taosFprintfFile(pFile2, result[i]);
+ taosFprintfFile(pFile2, "\n");
+ }
+ }else{
+ char *result[] = {
+ "{\"type\":\"create\",\"tableName\":\"st1\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":6},{\"name\":\"c3\",\"type\":8,\"length\":16}],\"tags\":[{\"name\":\"t1\",\"type\":4},{\"name\":\"t3\",\"type\":10,\"length\":8},{\"name\":\"t4\",\"type\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct0\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":1000},{\"name\":\"t3\",\"type\":10,\"value\":\"\\\"ttt\\\"\"},{\"name\":\"t4\",\"type\":1,\"value\":1}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct1\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":2000}]}",
+ "{\"type\":\"create\",\"tableName\":\"ct2\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[]}",
+ "{\"type\":\"create\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"using\":\"st1\",\"tagNum\":3,\"tags\":[{\"name\":\"t1\",\"type\":4,\"value\":3000}]}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":7,\"colName\":\"c3\",\"colType\":8,\"colLength\":64}",
+ "{\"type\":\"alter\",\"tableName\":\"st1\",\"tableType\":\"super\",\"alterType\":1,\"colName\":\"t2\",\"colType\":8,\"colLength\":64}",
+ "{\"type\":\"alter\",\"tableName\":\"ct3\",\"tableType\":\"child\",\"alterType\":4,\"colName\":\"t1\",\"colValue\":\"5000\",\"colValueNull\":false}",
+ "{\"type\":\"create\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"c1\",\"type\":4},{\"name\":\"c2\",\"type\":10,\"length\":4}],\"tags\":[]}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":5,\"colName\":\"c3\",\"colType\":5}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":7,\"colName\":\"c2\",\"colType\":10,\"colLength\":8}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":10,\"colName\":\"c3\",\"colNewName\":\"cc3\"}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":9}",
+ "{\"type\":\"alter\",\"tableName\":\"n1\",\"tableType\":\"normal\",\"alterType\":6,\"colName\":\"c1\"}",
+ "{\"type\":\"create\",\"tableName\":\"jt\",\"tableType\":\"super\",\"columns\":[{\"name\":\"ts\",\"type\":9},{\"name\":\"i\",\"type\":4}],\"tags\":[{\"name\":\"t\",\"type\":15}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt1\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[{\"name\":\"t\",\"type\":15,\"value\":\"{\\\"k1\\\":1,\\\"k2\\\":\\\"hello\\\"}\"}]}",
+ "{\"type\":\"create\",\"tableName\":\"jt2\",\"tableType\":\"child\",\"using\":\"jt\",\"tagNum\":1,\"tags\":[]}"
+ };
+
+ for(int i = 0; i < sizeof(result)/sizeof(result[0]); i++){
+ taosFprintfFile(pFile2, result[i]);
+ taosFprintfFile(pFile2, "\n");
+ }
}
+
taosCloseFile(&pFile2);
}
int main(int argc, char* argv[]) {
- if(argc == 3 && strcmp(argv[1], "-c") == 0) {
- strcpy(dir, argv[2]);
- }else{
- strcpy(dir, "../../../sim/psim/cfg");
+ for (int32_t i = 1; i < argc; i++) {
+ if(strcmp(argv[i], "-c") == 0){
+ strcpy(g_conf.dir, argv[++i]);
+ }else if(strcmp(argv[i], "-s") == 0){
+ g_conf.snapShot = true;
+ }else if(strcmp(argv[i], "-d") == 0){
+ g_conf.dropTable = true;
+ }else if(strcmp(argv[i], "-sv") == 0){
+ g_conf.srcVgroups = atol(argv[++i]);
+ }else if(strcmp(argv[i], "-dv") == 0){
+ g_conf.dstVgroups = atol(argv[++i]);
+ }else if(strcmp(argv[i], "-t") == 0){
+ g_conf.subTable = true;
+ }
}
printf("env init\n");
- initLogFile();
+ if(strlen(g_conf.dir) != 0){
+ initLogFile();
+ }
if (init_env() < 0) {
return -1;
@@ -515,6 +619,5 @@ int main(int argc, char* argv[]) {
tmq_t* tmq = build_consumer();
tmq_list_t* topic_list = build_topic_list();
basic_consume_loop(tmq, topic_list);
- /*sync_consume_loop(tmq, topic_list);*/
taosCloseFile(&g_fp);
}
diff --git a/tests/tsim/CMakeLists.txt b/utils/tsim/CMakeLists.txt
similarity index 100%
rename from tests/tsim/CMakeLists.txt
rename to utils/tsim/CMakeLists.txt
diff --git a/tests/tsim/inc/simInt.h b/utils/tsim/inc/simInt.h
similarity index 100%
rename from tests/tsim/inc/simInt.h
rename to utils/tsim/inc/simInt.h
diff --git a/tests/tsim/inc/simParse.h b/utils/tsim/inc/simParse.h
similarity index 100%
rename from tests/tsim/inc/simParse.h
rename to utils/tsim/inc/simParse.h
diff --git a/tests/tsim/src/simExe.c b/utils/tsim/src/simExe.c
similarity index 99%
rename from tests/tsim/src/simExe.c
rename to utils/tsim/src/simExe.c
index b993a8dbf1377c57af81d767173f5bfd27b688e3..16732ff9a191576d871584253f2c70e9187b6d2f 100644
--- a/tests/tsim/src/simExe.c
+++ b/utils/tsim/src/simExe.c
@@ -464,7 +464,10 @@ void simStoreSystemContentResult(SScript *script, char *filename) {
taosCloseFile(&pFile);
char rmCmd[MAX_FILE_NAME_LEN] = {0};
sprintf(rmCmd, "rm -f %s", filename);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
system(rmCmd);
+#pragma GCC diagnostic pop
}
}
diff --git a/tests/tsim/src/simMain.c b/utils/tsim/src/simMain.c
similarity index 100%
rename from tests/tsim/src/simMain.c
rename to utils/tsim/src/simMain.c
diff --git a/tests/tsim/src/simParse.c b/utils/tsim/src/simParse.c
similarity index 100%
rename from tests/tsim/src/simParse.c
rename to utils/tsim/src/simParse.c
diff --git a/tests/tsim/src/simSystem.c b/utils/tsim/src/simSystem.c
similarity index 100%
rename from tests/tsim/src/simSystem.c
rename to utils/tsim/src/simSystem.c