diff --git a/.clang-format b/.clang-format
index e58d518b3b8cacdd1e13dd965805fa364a996eb2..56ca83e724ad0b804a10b9be0dd42aa7a05eeaf7 100644
--- a/.clang-format
+++ b/.clang-format
@@ -88,4 +88,3 @@ Standard: Auto
TabWidth: 8
UseTab: Never
...
-
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..912b302ad23d47c46708d672175a908f2dbc74e8
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.py linguist-detectable=false
diff --git a/.gitignore b/.gitignore
index 76b581b18224c4036c59573900943804aeabe905..5f1e24109df622591bdd31536d0316d2bb2c5450 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,6 @@
build/
compile_commands.json
+CMakeSettings.json
.cache
.ycm_extra_conf.py
.tasks
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 0c7e31bfbb891cc23c2cd5e788772d6f33bc329e..90e841d5e04fd72338f38ca11f1dd5a522b61918 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -34,7 +34,7 @@ endif(${BUILD_TEST})
add_subdirectory(source)
add_subdirectory(tools)
-add_subdirectory(tests)
+add_subdirectory(utils)
add_subdirectory(examples/c)
# docs
diff --git a/CMakeSettings.json b/CMakeSettings.json
deleted file mode 100644
index d3f2c27bf6ecb5bb433e468c34577408c7ba6603..0000000000000000000000000000000000000000
--- a/CMakeSettings.json
+++ /dev/null
@@ -1,25 +0,0 @@
-{
- "configurations": [
- {
- "name": "WSL-GCC-Debug",
- "generator": "Unix Makefiles",
- "configurationType": "Debug",
- "buildRoot": "${projectDir}\\build\\",
- "installRoot": "${projectDir}\\build\\",
- "cmakeExecutable": "/usr/bin/cmake",
- "cmakeCommandArgs": "",
- "buildCommandArgs": "",
- "ctestCommandArgs": "",
- "inheritEnvironments": [ "linux_x64" ],
- "wslPath": "${defaultWSLPath}",
- "addressSanitizerRuntimeFlags": "detect_leaks=0",
- "variables": [
- {
- "name": "CMAKE_INSTALL_PREFIX",
- "value": "/mnt/d/TDengine/TDengine/build",
- "type": "PATH"
- }
- ]
- }
- ]
-}
\ No newline at end of file
diff --git a/Jenkinsfile b/Jenkinsfile
index 4b84e1f88e71b3a43bc63df10edffe8a8758052a..0842795f0a496103f23fcee1c59a87c5c2bd4f3b 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -79,7 +79,7 @@ def pre_test(){
rm -rf debug
mkdir debug
cd debug
- cmake .. > /dev/null
+ cmake .. -DBUILD_TEST=true > /dev/null
make -j4> /dev/null
'''
diff --git a/Jenkinsfile2 b/Jenkinsfile2
index d7df07f06afd8e1e483455e3ce925a03f28740fd..ac152e2aa4605b570d6db4289c594fcb3d01aa0e 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -173,7 +173,7 @@ def pre_test_build_mac() {
'''
sh '''
cd ${WK}/debug
- cmake ..
+ cmake .. -DBUILD_TEST=true
make -j8
'''
sh '''
@@ -218,12 +218,12 @@ def pre_test_win(){
if (env.CHANGE_URL =~ /\/TDengine\//) {
bat '''
cd %WIN_INTERNAL_ROOT%
- git pull
+ git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
git remote prune origin
- git pull
+ git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_COMMUNITY_ROOT%
@@ -236,7 +236,7 @@ def pre_test_win(){
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
bat '''
cd %WIN_INTERNAL_ROOT%
- git pull
+ git pull origin ''' + env.CHANGE_TARGET + '''
'''
bat '''
cd %WIN_INTERNAL_ROOT%
@@ -302,7 +302,7 @@ def pre_test_build_win() {
set CL=/MP8
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> cmake"
time /t
- cmake .. -G "NMake Makefiles JOM" || exit 7
+ cmake .. -G "NMake Makefiles JOM" -DBUILD_TEST=true || exit 7
echo ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> jom -j 6"
time /t
jom -j 6 || exit 8
diff --git a/README.md b/README.md
index 611d97aac9436bdcc732efcf98822f2dd11d74ab..fe9bb49ed8d70afb7b1f3a84933efd62b672229e 100644
--- a/README.md
+++ b/README.md
@@ -15,24 +15,26 @@
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[](https://bestpractices.coreinfrastructure.org/projects/4201)
-English | [简体中文](README-CN.md) | We are hiring, check [here](https://tdengine.com/careers)
+English | [简体中文](README-CN.md) | [Learn more about TSDB](https://tdengine.com/tsdb/)
# What is TDengine?
-TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/what-is-a-time-series-database/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
+TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. TDengine differentiates itself from other time-seires databases with the following advantages:
-- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
+- **[High Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
-- **[Ease of Use](https://docs.tdengine.com/get-started/docker/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
+- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered 18.8k stars on GitHub. There is an active developer community, and over 139k running instances worldwide.
+For a full list of TDengine competitive advantages, please [check here](https://tdengine.com/tdengine/)
+
# Documentation
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
@@ -230,9 +232,9 @@ After building successfully, TDengine can be installed by
sudo make install
```
-Users can find more information about directories installed on the system in the [directory and files](https://docs.taosdata.com/reference/directory/) section.
+Users can find more information about directories installed on the system in the [directory and files](https://docs.tdengine.com/reference/directory/) section.
-Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.taosdata.com/get-started/package/) for it.
+Installing from source code will also configure service management for TDengine.Users can also choose to [install from packages](https://docs.tdengine.com/get-started/package/) for it.
To start the service after installation, in a terminal, use:
@@ -319,6 +321,11 @@ TDengine provides abundant developing tools for users to develop on TDengine. Fo
Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to the project.
-# Join TDengine WeChat Group
+# Join the TDengine Community
+
+For more information about TDengine, you can follow us on social media and join our Discord server:
-Add WeChat “tdengine” to join the group,you can communicate with other users.
+- [Discord](https://discord.com/invite/VZdSuUg4pS)
+- [Twitter](https://twitter.com/TaosData)
+- [LinkedIn](https://www.linkedin.com/company/tdengine/)
+- [YouTube](https://www.youtube.com/channel/UCmp-1U6GS_3V3hjir6Uq5DQ)
diff --git a/TDenginelogo.png b/TDenginelogo.png
deleted file mode 100644
index 19a92592d7e8871778f5f3a6edd6314260d62551..0000000000000000000000000000000000000000
Binary files a/TDenginelogo.png and /dev/null differ
diff --git a/cmake/cmake.define b/cmake/cmake.define
index 376a55d3963932275286821a067039501eecef5a..5d64815a9aa90741a0d6aca7e51518d2263932a2 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -2,8 +2,6 @@ cmake_minimum_required(VERSION 3.0)
set(CMAKE_VERBOSE_MAKEFILE OFF)
-SET(BUILD_SHARED_LIBS "OFF")
-
#set output directory
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib)
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/bin)
diff --git a/cmake/cmake.install b/cmake/cmake.install
index 6dc6864975c0d36a024500d8a09fe3b6f9a6a850..fd1e080ddab1478f73689e7cced405ae8404fbc2 100644
--- a/cmake/cmake.install
+++ b/cmake/cmake.install
@@ -1,3 +1,19 @@
+SET(PREPARE_ENV_CMD "prepare_env_cmd")
+SET(PREPARE_ENV_TARGET "prepare_env_target")
+ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}
+ POST_BUILD
+ COMMAND echo "make test directory"
+ DEPENDS taosd
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/cfg/
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/log/
+ COMMAND ${CMAKE_COMMAND} -E make_directory ${TD_TESTS_OUTPUT_DIR}/data/
+ COMMAND ${CMAKE_COMMAND} -E echo dataDir ${TD_TESTS_OUTPUT_DIR}/data > ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
+ COMMAND ${CMAKE_COMMAND} -E echo logDir ${TD_TESTS_OUTPUT_DIR}/log >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
+ COMMAND ${CMAKE_COMMAND} -E echo charset UTF-8 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
+ COMMAND ${CMAKE_COMMAND} -E echo monitor 0 >> ${TD_TESTS_OUTPUT_DIR}/cfg/taos.cfg
+ COMMENT "prepare taosd environment")
+ADD_CUSTOM_TARGET(${PREPARE_ENV_TARGET} ALL WORKING_DIRECTORY ${TD_EXECUTABLE_OUTPUT_PATH} DEPENDS ${PREPARE_ENV_CMD})
+
IF (TD_LINUX)
SET(TD_MAKE_INSTALL_SH "${TD_SOURCE_DIR}/packaging/tools/make_install.sh")
INSTALL(CODE "MESSAGE(\"make install script: ${TD_MAKE_INSTALL_SH}\")")
diff --git a/cmake/cmake.options b/cmake/cmake.options
index bec64f7bf00cdb0c6fddc713af0801eae08d45ea..60ff00affc01408b084a8993441e4fe7052f4977 100644
--- a/cmake/cmake.options
+++ b/cmake/cmake.options
@@ -2,6 +2,12 @@
# Deps options
# =========================================================
+option(
+ BUILD_TEST
+ "If build unit tests using googletest"
+ OFF
+)
+
IF(${TD_WINDOWS})
MESSAGE("build pthread Win32")
@@ -45,12 +51,6 @@ IF(${TD_WINDOWS})
"If build wingetopt on Windows"
ON
)
-
- option(
- BUILD_TEST
- "If build unit tests using googletest"
- ON
- )
option(
TDENGINE_3
@@ -65,31 +65,17 @@ IF(${TD_WINDOWS})
)
ELSEIF (TD_DARWIN_64)
- add_definitions(-DCOMPILER_SUPPORTS_CXX13)
- option(
- BUILD_TEST
- "If build unit tests using googletest"
- ON
- )
-ELSE ()
- include(CheckCXXCompilerFlag)
- CHECK_CXX_COMPILER_FLAG("-std=c++13" COMPILER_SUPPORTS_CXX13)
- IF(${COMPILER_SUPPORTS_CXX13})
+ IF(${BUILD_TEST})
add_definitions(-DCOMPILER_SUPPORTS_CXX13)
- option(
- BUILD_TEST
- "If build unit tests using googletest"
- ON
- )
- ELSE ()
- option(
- BUILD_TEST
- "If build unit tests using googletest"
- OFF
- )
ENDIF ()
ENDIF ()
+option(
+ BUILD_SHARED_LIBS
+ ""
+ OFF
+ )
+
option(
RUST_BINDINGS
"If build with rust-bindings"
diff --git a/cmake/cmake.platform b/cmake/cmake.platform
index 887fbd86d55d782cdf3c1d7c95dfee2dc2ec446d..e4d440d76edbfcf0d1d6f932cfa598fe0a0f43d2 100644
--- a/cmake/cmake.platform
+++ b/cmake/cmake.platform
@@ -46,7 +46,7 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
MESSAGE("Current system processor is ${CMAKE_SYSTEM_PROCESSOR}.")
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
- MESSAGE("Current system arch is arm64")
+ MESSAGE("Current system arch is 64")
SET(TD_DARWIN_64 TRUE)
ADD_DEFINITIONS("-D_TD_DARWIN_64")
ENDIF ()
@@ -87,7 +87,7 @@ IF ("${CPUTYPE}" STREQUAL "")
SET(TD_ARM_32 TRUE)
ADD_DEFINITIONS("-D_TD_ARM_")
ADD_DEFINITIONS("-D_TD_ARM_32")
- ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
+ ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64)|(arm64)")
MESSAGE(STATUS "The current platform is aarch64")
SET(PLATFORM_ARCH_STR "arm64")
SET(TD_ARM_64 TRUE)
diff --git a/cmake/cmake.version b/cmake/cmake.version
index db29644b387306ce8f3ee473921dab4c7d05b10a..05094f10cccea02ecb6a9cc4b9283335cb4424b6 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.0.0.1")
+ SET(TD_VER_NUMBER "3.0.1.4")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in
index f182beed33c76200649f93d96b68c153ec452b9a..16444c07f24bf97b174bf3bb92d1776ab5906816 100644
--- a/cmake/taosadapter_CMakeLists.txt.in
+++ b/cmake/taosadapter_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
- GIT_TAG abed566
+ GIT_TAG be729ab
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index 2d9b00eee75c5f3283122cc8a5636f096d90fda6..5d2fcf27b2ea091b1b6dcfe32e474c410d83f2e5 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 833b721
+ GIT_TAG 70f5a1c
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in
index 506559a245ee7a3e506e8481a12a3fe7f01dd5ac..ca8fff8da511ef86baa699af8246822d91982238 100644
--- a/cmake/taosws_CMakeLists.txt.in
+++ b/cmake/taosws_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosws-rs
ExternalProject_Add(taosws-rs
GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git
- GIT_TAG 7a54d21
+ GIT_TAG 1bdfca3
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/01-index.md b/docs/en/01-index.md
index 22e62bc5e05fe83065b0e101644b01dbba2b5874..13552ea9dcfbae71a368cad9d55b4a0030ac266a 100644
--- a/docs/en/01-index.md
+++ b/docs/en/01-index.md
@@ -4,25 +4,26 @@ sidebar_label: Documentation Home
slug: /
---
-
-TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators.
+TDengine is an [open-source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud-native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for the Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design, and other topics. It’s written mainly for architects, developers, and system administrators.
To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section.
-TDengine greatly improves the efficiency of data ingestion, querying and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [“Concepts”](./concept) thoroughly.
+TDengine greatly improves the efficiency of data ingestion, querying, and storage by exploiting the characteristics of time series data, introducing the novel concepts of "one table for one data collection point" and "super table", and designing an innovative storage engine. To understand the new concepts in TDengine and make full use of the features and capabilities of TDengine, please read [Concepts](./concept) thoroughly.
+
+If you are a developer, please read the [Developer Guide](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, and make a few changes to accommodate your application, and it will work.
-If you are a developer, please read the [“Developer Guide”](./develop) carefully. This section introduces the database connection, data modeling, data ingestion, query, continuous query, cache, data subscription, user-defined functions, and other functionality in detail. Sample code is provided for a variety of programming languages. In most cases, you can just copy and paste the sample code, make a few changes to accommodate your application, and it will work.
+We live in the era of big data, and scale-up is unable to meet the growing needs of the business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to [Cluster Deployment](../deployment).
-We live in the era of big data, and scale-up is unable to meet the growing needs of business. Any modern data system must have the ability to scale out, and clustering has become an indispensable feature of big data systems. Not only did the TDengine team develop the cluster feature, but also decided to open source this important feature. To learn how to deploy, manage and maintain a TDengine cluster please refer to ["cluster deployment"](../deployment).
+TDengine uses ubiquitous SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll-up, interpolation, and time-weighted average, among many others. The [SQL Reference](./taos-sql) chapter describes the SQL syntax in detail and lists the various supported commands and functions.
-TDengine uses ubiquitious SQL as its query language, which greatly reduces learning costs and migration costs. In addition to the standard SQL, TDengine has extensions to better support time series data analysis. These extensions include functions such as roll up, interpolation and time weighted average, among many others. The ["SQL Reference"](./taos-sql) chapter describes the SQL syntax in detail, and lists the various supported commands and functions.
+If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the [Administration](./operation) section.
-If you are a system administrator who cares about installation, upgrade, fault tolerance, disaster recovery, data import, data export, system configuration, how to monitor whether TDengine is running healthily, and how to improve system performance, please refer to, and thoroughly read the ["Administration"](./operation) section.
+If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the [Reference](./reference) chapter.
-If you want to know more about TDengine tools, the REST API, and connectors for various programming languages, please see the ["Reference"](./reference) chapter.
+If you are very interested in the internal design of TDengine, please read the chapter [Inside TDengine](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
-If you are very interested in the internal design of TDengine, please read the chapter ["Inside TDengine”](./tdinternal), which introduces the cluster design, data partitioning, sharding, writing, and reading processes in detail. If you want to study TDengine code or even contribute code, please read this chapter carefully.
+To get more general introduction about time series database, please read through [a series of articles](https://tdengine.com/tsdb/). To lean more competitive advantages about TDengine, please read through [a series of blogs](https://tdengine.com/tdengine/).
-TDengine is an open source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation, or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
+TDengine is an open-source database, and we would love for you to be a part of TDengine. If you find any errors in the documentation or see parts where more clarity or elaboration is needed, please click "Edit this page" at the bottom of each page to edit it directly.
-Together, we make a difference.
+Together, we make a difference!
diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md
index b4636e54a676598d0fc513034d972c4b365a620c..20840883523607512bd8c21643d49c5b50d3a528 100644
--- a/docs/en/02-intro/index.md
+++ b/docs/en/02-intro/index.md
@@ -3,7 +3,7 @@ title: Introduction
toc_max_heading_level: 2
---
-TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
+TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [high-performance](https://tdengine.com/tdengine/high-performance-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation.
This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine.
@@ -11,49 +11,69 @@ This section introduces the major features, competitive advantages, typical use-
The major features are listed below:
-1. While TDengine supports [using SQL to insert](/develop/insert-data/sql-writing), it also supports [Schemaless writing](/reference/schemaless/) just like NoSQL databases. TDengine also supports standard protocols like [InfluxDB LINE](/develop/insert-data/influxdb-line),[OpenTSDB Telnet](/develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](/develop/insert-data/opentsdb-json) among others.
-2. TDengine supports seamless integration with third-party data collection agents like [Telegraf](/third-party/telegraf),[Prometheus](/third-party/prometheus),[StatsD](/third-party/statsd),[collectd](/third-party/collectd),[icinga2](/third-party/icinga2), [TCollector](/third-party/tcollector), [EMQX](/third-party/emq-broker), [HiveMQ](/third-party/hive-mq-broker). These agents can write data into TDengine with simple configuration and without a single line of code.
-3. Support for [all kinds of queries](/develop/query-data), including aggregation, nested query, downsampling, interpolation and others.
-4. Support for [user defined functions](/develop/udf).
-5. Support for [caching](/develop/cache). TDengine always saves the last data point in cache, so Redis is not needed in some scenarios.
-6. Support for [continuous query](../develop/stream).
-7. Support for [data subscription](../develop/tmq) with the capability to specify filter conditions.
-8. Support for [cluster](../deployment/), with the capability of increasing processing power by adding more nodes. High availability is supported by replication.
-9. Provides an interactive [command-line interface](/reference/taos-shell) for management, maintenance and ad-hoc queries.
-10. Provides many ways to [import](/operation/import) and [export](/operation/export) data.
-11. Provides [monitoring](/operation/monitor) on running instances of TDengine.
-12. Provides [connectors](/reference/connector/) for [C/C++](/reference/connector/cpp), [Java](/reference/connector/java), [Python](/reference/connector/python), [Go](/reference/connector/go), [Rust](/reference/connector/rust), [Node.js](/reference/connector/node) and other programming languages.
-13. Provides a [REST API](/reference/rest-api/).
-14. Supports seamless integration with [Grafana](/third-party/grafana) for visualization.
-15. Supports seamless integration with Google Data Studio.
-
-For more details on features, please read through the entire documentation.
+1. Insert data
+ - Supports [using SQL to insert](../develop/insert-data/sql-writing).
+ - Supports [schemaless writing](../reference/schemaless/) just like NoSQL databases. It also supports standard protocols like [InfluxDB Line](../develop/insert-data/influxdb-line), [OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json) among others.
+ - Supports seamless integration with third-party tools like [Telegraf](../third-party/telegraf/), [Prometheus](../third-party/prometheus/), [collectd](../third-party/collectd/), [StatsD](../third-party/statsd/), [TCollector](../third-party/tcollector/), [EMQX](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker), and [Icinga2](../third-party/icinga2/), they can write data into TDengine with simple configuration and without a single line of code.
+2. Query data
+ - Supports standard [SQL](../taos-sql/), including nested query.
+ - Supports [time series specific functions](../taos-sql/function/#time-series-extensions) and [time series specific queries](../taos-sql/distinguished), like downsampling, interpolation, cumulated sum, time weighted average, state window, session window and many others.
+ - Supports [User Defined Functions (UDF)](../taos-sql/udf).
+3. [Caching](../develop/cache/): TDengine always saves the last data point in cache, so Redis is not needed for time-series data processing.
+4. [Stream Processing](../develop/stream/): Not only is the continuous query is supported, but TDengine also supports event driven stream processing, so Flink or Spark is not needed for time-series data processing.
+5. [Data Subscription](../develop/tmq/): Application can subscribe a table or a set of tables. API is the same as Kafka, but you can specify filter conditions.
+6. Visualization
+ - Supports seamless integration with [Grafana](../third-party/grafana/) for visualization.
+ - Supports seamless integration with Google Data Studio.
+7. Cluster
+ - Supports [cluster](../deployment/) with the capability of increasing processing power by adding more nodes.
+ - Supports [deployment on Kubernetes](../deployment/k8s/).
+ - Supports high availability via data replication.
+8. Administration
+ - Provides [monitoring](../operation/monitor) on running instances of TDengine.
+ - Provides many ways to [import](../operation/import) and [export](../operation/export) data.
+9. Tools
+ - Provides an interactive [Command-line Interface (CLI)](../reference/taos-shell) for management, maintenance and ad-hoc queries.
+ - Provides a tool [taosBenchmark](../reference/taosbenchmark/) for testing the performance of TDengine.
+10. Programming
+ - Provides [connectors](../reference/connector/) for [C/C++](../reference/connector/cpp), [Java](../reference/connector/java), [Python](../reference/connector/python), [Go](../reference/connector/go), [Rust](../reference/connector/rust), [Node.js](../reference/connector/node) and other programming languages.
+ - Provides a [REST API](../reference/rest-api/).
+
+For more details on features, please read through the entire documentation.
## Competitive Advantages
-By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages.
+By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly.
-- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds.
+- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for Kubernetes deployment and full observability, TDengine is a cloud native Time-series Database and can be deployed on public, private or hybrid clouds.
- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[
-](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
+ ](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access.
-- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
+- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way.
- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide.
-With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
+With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced.
+
+1. With its superior performance, the computing and storage resources are reduced significantly.
+2. With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly.
+3. With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly.
## Technical Ecosystem
+
This is how TDengine would be situated, in a typical time-series data processing platform:
+
+

-Figure 1. TDengine Technical Ecosystem
+Figure 1. TDengine Technical Ecosystem
+
On the left-hand side, there are data collection agents like OPC-UA, MQTT, Telegraf and Kafka. On the right-hand side, visualization/BI tools, HMI, Python/R, and IoT Apps can be connected. TDengine itself provides an interactive command-line interface and a web interface for management and maintenance.
@@ -63,42 +83,42 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
### Characteristics and Requirements of Data Sources
-| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| -------------------------------------------------------- | ------------------ | ----------------------- | ------------------- | :----------------------------------------------------------- |
-| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry.|
-| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
-| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
+| **Data Source Characteristics and Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------ | ------------------ | ----------------------- | ------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| A massive amount of total data | | | √ | TDengine provides excellent scale-out functions in terms of capacity, and has a storage structure with matching high compression ratio to achieve the best storage efficiency in the industry. |
+| Data input velocity is extremely high | | | √ | TDengine's performance is much higher than that of other similar products. It can continuously process larger amounts of input data in the same hardware environment, and provides a performance evaluation tool that can easily run in the user environment. |
+| A huge number of data sources | | | √ | TDengine is optimized specifically for a huge number of data sources. It is especially suitable for efficiently ingesting, writing and querying data from billions of data sources. |
### System Architecture Requirements
-| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
+| **System Architecture Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ----------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| A simple and reliable system architecture | | | √ | TDengine's system architecture is very simple and reliable, with its own message queue, cache, stream computing, monitoring and other functions. There is no need to integrate any additional third-party products. |
-| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
-| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
+| Fault-tolerance and high-reliability | | | √ | TDengine has cluster functions to automatically provide high-reliability and high-availability functions such as fault tolerance and disaster recovery. |
+| Standardization support | | | √ | TDengine supports standard SQL and provides SQL extensions for time-series data analysis. |
### System Function Requirements
-| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level.|
-| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
+| **System Function Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| -------------------------------------------- | ------------------ | ----------------------- | ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Complete data processing algorithms built-in | | √ | | While TDengine implements various general data processing algorithms, industry specific algorithms and special types of processing will need to be implemented at the application level. |
+| A large number of crosstab queries | | √ | | This type of processing is better handled by general purpose relational database systems but TDengine can work in concert with relational database systems to provide more complete solutions. |
### System Performance Requirements
-| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
-| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products.|
-| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
+| **System Performance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------- |
+| Very large total processing capacity | | | √ | TDengine’s cluster functions can easily improve processing capacity via multi-server coordination. |
+| Extremely high-speed data processing | | | √ | TDengine’s storage and data processing are optimized for IoT, and can process data many times faster than similar products. |
+| Extremely fast processing of high resolution data | | | √ | TDengine has achieved the same or better performance than other relational and NoSQL data processing systems. |
### System Maintenance Requirements
-| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
-| ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ |
-| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
-| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.|
-| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.|
+| **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** |
+| --------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. |
+| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs. |
+| Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine. |
## Comparison with other databases
@@ -107,3 +127,8 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html)
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html)
- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html)
+
+## More readings
+- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
+- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
+
diff --git a/docs/en/04-concept/index.md b/docs/en/04-concept/index.md
index 5a9c55fdd68d7c2f36fd9a718efa07c4388d55a7..0b1b226c17100d56313b5480e26f437841afe8c7 100644
--- a/docs/en/04-concept/index.md
+++ b/docs/en/04-concept/index.md
@@ -6,101 +6,100 @@ In order to explain the basic concepts and provide some sample code, the TDengin
-
- Device ID
- Time Stamp
- Collected Metrics
- Tags
+
+
+ Device ID
+ Timestamp
+ Collected Metrics
+ Tags
-
-Device ID
-Time Stamp
-current
-voltage
-phase
-location
-groupId
-
-
-
-
-d1001
-1538548685000
-10.3
-219
-0.31
-California.SanFrancisco
-2
-
-
-d1002
-1538548684000
-10.2
-220
-0.23
-California.SanFrancisco
-3
-
-
-d1003
-1538548686500
-11.5
-221
-0.35
-California.LosAngeles
-3
-
-
-d1004
-1538548685500
-13.4
-223
-0.29
-California.LosAngeles
-2
-
-
-d1001
-1538548695000
-12.6
-218
-0.33
-California.SanFrancisco
-2
-
-
-d1004
-1538548696600
-11.8
-221
-0.28
-California.LosAngeles
-2
-
-
-d1002
-1538548696650
-10.3
-218
-0.25
-California.SanFrancisco
-3
-
-
-d1001
-1538548696800
-12.3
-221
-0.31
-California.SanFrancisco
-2
-
-
+
+ current
+ voltage
+ phase
+ location
+ groupid
+
+
+
+
+ d1001
+ 1538548685000
+ 10.3
+ 219
+ 0.31
+ California.SanFrancisco
+ 2
+
+
+ d1002
+ 1538548684000
+ 10.2
+ 220
+ 0.23
+ California.SanFrancisco
+ 3
+
+
+ d1003
+ 1538548686500
+ 11.5
+ 221
+ 0.35
+ California.LosAngeles
+ 3
+
+
+ d1004
+ 1538548685500
+ 13.4
+ 223
+ 0.29
+ California.LosAngeles
+ 2
+
+
+ d1001
+ 1538548695000
+ 12.6
+ 218
+ 0.33
+ California.SanFrancisco
+ 2
+
+
+ d1004
+ 1538548696600
+ 11.8
+ 221
+ 0.28
+ California.LosAngeles
+ 2
+
+
+ d1002
+ 1538548696650
+ 10.3
+ 218
+ 0.25
+ California.SanFrancisco
+ 3
+
+
+ d1001
+ 1538548696800
+ 12.3
+ 221
+ 0.31
+ California.SanFrancisco
+ 2
+
+
Table 1: Smart meter example data
-Each row contains the device ID, time stamp, collected metrics (current, voltage, phase as above), and static tags (location and groupId in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated time stamps.
+Each row contains the device ID, timestamp, collected metrics (`current`, `voltage`, `phase` as above), and static tags (`location` and `groupid` in Table 1) associated with the devices. Each smart meter generates a row (measurement) in a pre-defined time interval or triggered by an external event. The device produces a sequence of measurements with associated timestamps.
## Metric
@@ -112,22 +111,22 @@ Label/Tag refers to the static properties of sensors, equipment or other types o
## Data Collection Point
-Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same time stamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
+Data Collection Point (DCP) refers to hardware or software that collects metrics based on preset time periods or triggered by events. A data collection point can collect one or multiple metrics, but these metrics are collected at the same time and have the same timestamp. For some complex equipment, there are often multiple data collection points, and the sampling rate of each collection point may be different, and fully independent. For example, for a car, there could be a data collection point to collect GPS position metrics, a data collection point to collect engine status metrics, and a data collection point to collect the environment metrics inside the car. So in this example the car would have three data collection points. In the smart meters example, d1001, d1002, d1003, and d1004 are the data collection points.
## Table
Since time-series data is most likely to be structured data, TDengine adopts the traditional relational database model to process them with a short learning curve. You need to create a database, create tables, then insert data points and execute queries to explore the data.
-To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices D1001, D1002, D1003, and D1004 to store the data collected. This design has several benefits:
+To make full use of time-series data characteristics, TDengine adopts a strategy of "**One Table for One Data Collection Point**". TDengine requires the user to create a table for each data collection point (DCP) to store collected time-series data. For example, if there are over 10 million smart meters, it means 10 million tables should be created. For the table above, 4 tables should be created for devices d1001, d1002, d1003, and d1004 to store the data collected. This design has several benefits:
1. Since the metric data from different DCP are fully independent, the data source of each DCP is unique, and a table has only one writer. In this way, data points can be written in a lock-free manner, and the writing speed can be greatly improved.
2. For a DCP, the metric data generated by DCP is ordered by timestamp, so the write operation can be implemented by simple appending, which further greatly improves the data writing speed.
3. The metric data from a DCP is continuously stored, block by block. If you read data for a period of time, it can greatly reduce random read operations and improve read and query performance by orders of magnitude.
4. Inside a data block for a DCP, columnar storage is used, and different compression algorithms are used for different data types. Metrics generally don't vary as significantly between themselves over a time range as compared to other metrics, which allows for a higher compression rate.
-If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. ** One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
+If the metric data of multiple DCPs are traditionally written into a single table, due to uncontrollable network delays, the timing of the data from different DCPs arriving at the server cannot be guaranteed, write operations must be protected by locks, and metric data from one DCP cannot be guaranteed to be continuously stored together. **One table for one data collection point can ensure the best performance of insert and query of a single data collection point to the greatest possible extent.**
-TDengine suggests using DCP ID as the table name (like D1001 in the above table). Each DCP may collect one or multiple metrics (like the current, voltage, phase as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the time stamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
+TDengine suggests using DCP ID as the table name (like d1001 in the above table). Each DCP may collect one or multiple metrics (like the `current`, `voltage`, `phase` as above). Each metric has a corresponding column in the table. The data type for a column can be int, float, string and others. In addition, the first column in the table must be a timestamp. TDengine uses the timestamp as the index, and won’t build the index on any metrics stored. Column wise storage is used.
Complex devices, such as connected cars, may have multiple DCPs. In this case, multiple tables are created for a single device, one table per DCP.
@@ -156,13 +155,20 @@ The relationship between a STable and the subtables created based on this STable
Queries can be executed on both a table (subtable) and a STable. For a query on a STable, TDengine will treat the data in all its subtables as a whole data set for processing. TDengine will first find the subtables that meet the tag filter conditions, then scan the time-series data of these subtables to perform aggregation operation, which reduces the number of data sets to be scanned which in turn greatly improves the performance of data aggregation across multiple DCPs. In essence, querying a supertable is a very efficient aggregate query on multiple DCPs of the same type.
-In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table meters.
+In TDengine, it is recommended to use a subtable instead of a regular table for a DCP. In the smart meters example, we can create subtables like d1001, d1002, d1003, and d1004 under super table `meters`.
+
+To better understand the data model using metrics, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example.
+
+
+
+
-To better understand the data model using metri, tags, super table and subtable, please refer to the diagram below which demonstrates the data model of the smart meters example. 
+Figure 1. Meters Data Model Diagram
+
## Database
-A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://www.taosdata.com/blog/2019/07/09/86.html) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
+A database is a collection of tables. TDengine allows a running instance to have multiple databases, and each database can be configured with different storage policies. The [characteristics of time-series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/) from different data collection points may be different. Characteristics include collection frequency, retention policy and others which determine how you create and configure the database. For e.g. days to keep, number of replicas, data block size, whether data updates are allowed and other configurable parameters would be determined by the characteristics of your data and your business requirements. In order for TDengine to work with maximum efficiency in various scenarios, TDengine recommends that STables with different data characteristics be created in different databases.
In a database, there can be one or more STables, but a STable belongs to only one database. All tables owned by a STable are stored in only one database.
@@ -172,4 +178,4 @@ FQDN (Fully Qualified Domain Name) is the full domain name of a specific compute
Each node of a TDengine cluster is uniquely identified by an End Point, which consists of an FQDN and a Port, such as h1.tdengine.com:6030. In this way, when the IP changes, we can still use the FQDN to dynamically find the node without changing any configuration of the cluster. In addition, FQDN is used to facilitate unified access to the same cluster from the Intranet and the Internet.
-TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
+TDengine does not recommend using an IP address to access the cluster. FQDN is recommended for cluster management.
diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md
index de5b620a779557a8a3b8422a14caf67b354d1e7a..d0874c331e81c8f174ab2d90c06302f58af03ea6 100644
--- a/docs/en/05-get-started/01-docker.md
+++ b/docs/en/05-get-started/01-docker.md
@@ -3,7 +3,11 @@ sidebar_label: Docker
title: Quick Install on Docker
---
-This document describes how to install TDengine in a Docker container and perform queries and inserts. To get started with TDengine in a non-containerized environment, see [Quick Install](../../get-started/package). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
+This document describes how to install TDengine in a Docker container and perform queries and inserts.
+
+- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
+- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
+- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
## Run TDengine
@@ -13,7 +17,7 @@ If Docker is already installed on your computer, run the following command:
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
```
-Note that TDengine Server uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
+Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed.
Run the following command to ensure that your container is running:
@@ -21,7 +25,7 @@ Run the following command to ensure that your container is running:
docker ps
```
-Enter the container and open the bash shell:
+Enter the container and open the `bash` shell:
```shell
docker exec -it bash
@@ -31,68 +35,68 @@ You can now access TDengine or run other Linux commands.
Note: For information about installing docker, see the [official documentation](https://docs.docker.com/get-docker/).
-## Insert Data into TDengine
+## Open the TDengine CLI
-You can use the `taosBenchmark` tool included with TDengine to write test data into your deployment.
+On the container, run the following command to open the TDengine CLI:
-To do so, run the following command:
+```
+$ taos
- ```bash
- $ taosBenchmark
-
- ```
+taos>
-This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
+```
- The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required depends on the hardware specifications of the local system.
+## Test data insert performance
- You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](/reference/taosbenchmark).
+After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
-## Open the TDengine CLI
-
-On the container, run the following command to open the TDengine CLI:
+Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
+```bash
+taosBenchmark
```
-$ taos
-taos>
+This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`.
-```
+The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
-## Query Data in TDengine
+You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
-After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance. For example:
+## Test data query performance
-From the TDengine CLI query the number of rows in the `meters` supertable:
+After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
+
+From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
```sql
-select count(*) from test.meters;
+SELECT COUNT(*) FROM test.meters;
```
Query the average, maximum, and minimum values of all 100 million rows of data:
```sql
-select avg(current), max(voltage), min(phase) from test.meters;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
-Query the number of rows whose `location` tag is `San Francisco`:
+Query the number of rows whose `location` tag is `California.SanFrancisco`:
```sql
-select count(*) from test.meters where location="San Francisco";
+SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
```
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
```sql
-select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
+Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
```sql
-select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
+SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
```
-In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
+
+In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
## Additional Information
diff --git a/docs/en/05-get-started/03-package.md b/docs/en/05-get-started/03-package.md
index 88096a759c58529d4150c0a750a4354a88da988f..25a92573faa6d6b3a9625f161bc7d0258492b91a 100644
--- a/docs/en/05-get-started/03-package.md
+++ b/docs/en/05-get-started/03-package.md
@@ -7,25 +7,30 @@ import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
import PkgListV3 from "/components/PkgListV3";
-For information about installing TDengine on Docker, see [Quick Install on Docker](../../get-started/docker). If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
+This document describes how to install TDengine on Linux and Windows and perform queries and inserts.
-The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
+- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
+- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
+- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
-The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download a lite package that includes only `taosd` and the C/C++ connector.
+The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface (CLI, taos), and some tools. Note that taosAdapter supports Linux only. In addition to connectors for multiple languages, TDengine also provides a [REST API](../../reference/rest-api) through [taosAdapter](../../reference/taosadapter).
-The TDengine Community Edition is released as .deb and .rpm packages. The .deb package can be installed on Debian, Ubuntu, and derivative systems. The .rpm package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the .deb or .rpm package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows servers.
+The standard server installation package includes `taos`, `taosd`, `taosAdapter`, `taosBenchmark`, and sample code. You can also download the Lite package that includes only `taosd` and the C/C++ connector.
+
+The TDengine Community Edition is released as Deb and RPM packages. The Deb package can be installed on Debian, Ubuntu, and derivative systems. The RPM package can be installed on CentOS, RHEL, SUSE, and derivative systems. A .tar.gz package is also provided for enterprise customers, and you can install TDengine over `apt-get` as well. The .tar.tz package includes `taosdump` and the TDinsight installation script. If you want to use these utilities with the Deb or RPM package, download and install taosTools separately. TDengine can also be installed on 64-bit Windows.
## Installation
-1. Download the .deb installation package.
-
+1. Download the Deb installation package.
+
2. In the directory where the package is located, use `dpkg` to install the package:
+> Please replace `` with the corresponding version of the package downloaded
+
```bash
-# Enter the name of the package that you downloaded.
sudo dpkg -i TDengine-server--Linux-x64.deb
```
@@ -34,11 +39,12 @@ sudo dpkg -i TDengine-server--Linux-x64.deb
1. Download the .rpm installation package.
-
+
2. In the directory where the package is located, use rpm to install the package:
+> Please replace `` with the corresponding version of the package downloaded
+
```bash
-# Enter the name of the package that you downloaded.
sudo rpm -ivh TDengine-server--Linux-x64.rpm
```
@@ -47,11 +53,12 @@ sudo rpm -ivh TDengine-server--Linux-x64.rpm
1. Download the .tar.gz installation package.
-
+
2. In the directory where the package is located, use `tar` to decompress the package:
+> Please replace `` with the corresponding version of the package downloaded
+
```bash
-# Enter the name of the package that you downloaded.
tar -zxvf TDengine-server--Linux-x64.tar.gz
```
@@ -96,23 +103,23 @@ sudo apt-get install tdengine
This installation method is supported only for Debian and Ubuntu.
::::
-
+
-Note: TDengine only supports Windows Server 2016/2019 and windows 10/11 system versions on the windows platform.
+Note: TDengine only supports Windows Server 2016/2019 and Windows 10/11 on the Windows platform.
1. Download the Windows installation package.
-
+
2. Run the downloaded package to install TDengine.
:::info
-For information about TDengine releases, see [Release History](../../releases).
+For information about TDengine releases, see [Release History](../../releases/tdengine).
:::
:::note
-On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the end point of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
+On the first node in your TDengine cluster, leave the `Enter FQDN:` prompt blank and press **Enter**. On subsequent nodes, you can enter the endpoint of the first dnode in the cluster. You can also configure this setting after you have finished installing TDengine.
:::
@@ -147,7 +154,7 @@ Active: inactive (dead)
After confirming that TDengine is running, run the `taos` command to access the TDengine CLI.
-The following `systemctl` commands can help you manage TDengine:
+The following `systemctl` commands can help you manage TDengine service:
- Start TDengine Server: `systemctl start taosd`
@@ -159,7 +166,7 @@ The following `systemctl` commands can help you manage TDengine:
:::info
-- The `systemctl` command requires _root_ privileges. If you are not logged in as the `root` user, use the `sudo` command.
+- The `systemctl` command requires _root_ privileges. If you are not logged in as the _root_ user, use the `sudo` command.
- The `systemctl stop taosd` command does not instantly stop TDengine Server. The server is stopped only after all data in memory is flushed to disk. The time required depends on the cache size.
- If your system does not include `systemd`, you can run `/usr/local/taos/bin/taosd` to start TDengine manually.
@@ -174,23 +181,9 @@ After the installation is complete, run `C:\TDengine\taosd.exe` to start TDengin
-## Test data insert performance
-
-After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
-
-```bash
-taosBenchmark
-```
-
-This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `Campbell`, `Cupertino`, `Los Angeles`, `Mountain View`, `Palo Alto`, `San Diego`, `San Francisco`, `San Jose`, `Santa Clara` or `Sunnyvale`.
-
-The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in less than a minute.
-
-You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
-
-## Command Line Interface
+## Command Line Interface (CLI)
-You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, run the following command:
+You can use the TDengine CLI to monitor your TDengine deployment and execute ad hoc queries. To open the CLI, you can execute `taos` in the Linux terminal where TDengine is installed, or you can run `taos.exe` in the `C:\TDengine` directory of the Windows terminal where TDengine is installed to start the TDengine command line.
```bash
taos
@@ -205,52 +198,71 @@ taos>
For example, you can create and delete databases and tables and run all types of queries. Each SQL command must be end with a semicolon (;). For example:
```sql
-create database demo;
-use demo;
-create table t (ts timestamp, speed int);
-insert into t values ('2019-07-15 00:00:00', 10);
-insert into t values ('2019-07-15 01:00:00', 20);
-select * from t;
+CREATE DATABASE demo;
+USE demo;
+CREATE TABLE t (ts TIMESTAMP, speed INT);
+INSERT INTO t VALUES ('2019-07-15 00:00:00', 10);
+INSERT INTO t VALUES ('2019-07-15 01:00:00', 20);
+SELECT * FROM t;
+
ts | speed |
========================================
2019-07-15 00:00:00.000 | 10 |
2019-07-15 01:00:00.000 | 20 |
+
Query OK, 2 row(s) in set (0.003128s)
```
You can also can monitor the deployment status, add and remove user accounts, and manage running instances. You can run the TDengine CLI on either Linux or Windows machines. For more information, see [TDengine CLI](../../reference/taos-shell/).
-
+
+## Test data insert performance
+
+After your TDengine Server is running normally, you can run the taosBenchmark utility to test its performance:
+
+Start TDengine service and execute `taosBenchmark` (formerly named `taosdemo`) in a Linux or Windows terminal.
+
+```bash
+taosBenchmark
+```
+
+This command creates the `meters` supertable in the `test` database. In the `meters` supertable, it then creates 10,000 subtables named `d0` to `d9999`. Each table has 10,000 rows and each row has four columns: `ts`, `current`, `voltage`, and `phase`. The timestamps of the data in these columns range from 2017-07-14 10:40:00 000 to 2017-07-14 10:40:09 999. Each table is randomly assigned a `groupId` tag from 1 to 10 and a `location` tag of either `California.Campbell`, `California.Cupertino`, `California.LosAngeles`, `California.MountainView`, `California.PaloAlto`, `California.SanDiego`, `California.SanFrancisco`, `California.SanJose`, `California.SantaClara` or `California.Sunnyvale`.
+
+The `taosBenchmark` command creates a deployment with 100 million data points that you can use for testing purposes. The time required to create the deployment depends on your hardware. On most modern servers, the deployment is created in ten to twenty seconds.
+
+You can customize the test deployment that taosBenchmark creates by specifying command-line parameters. For information about command-line parameters, run the `taosBenchmark --help` command. For more information about taosBenchmark, see [taosBenchmark](../../reference/taosbenchmark).
+
## Test data query performance
-After using taosBenchmark to create your test deployment, you can run queries in the TDengine CLI to test its performance:
+After using `taosBenchmark` to create your test deployment, you can run queries in the TDengine CLI to test its performance:
-From the TDengine CLI query the number of rows in the `meters` supertable:
+From the TDengine CLI (taos) query the number of rows in the `meters` supertable:
```sql
-select count(*) from test.meters;
+SELECT COUNT(*) FROM test.meters;
```
Query the average, maximum, and minimum values of all 100 million rows of data:
```sql
-select avg(current), max(voltage), min(phase) from test.meters;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
-Query the number of rows whose `location` tag is `San Francisco`:
+Query the number of rows whose `location` tag is `California.SanFrancisco`:
```sql
-select count(*) from test.meters where location="San Francisco";
+SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
```
Query the average, maximum, and minimum values of all rows whose `groupId` tag is `10`:
```sql
-select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-Query the average, maximum, and minimum values for table `d10` in 1 second intervals:
+Query the average, maximum, and minimum values for table `d10` in 10 second intervals:
```sql
-select first(ts), avg(current), max(voltage), min(phase) from test.d10 interval(1s);
+SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
```
-In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be _wstart which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
+
+In the query above you are selecting the first timestamp (ts) in the interval, another way of selecting this would be `\_wstart` which will give the start of the time window. For more information about windowed queries, see [Time-Series Extensions](../../taos-sql/distinguished/).
diff --git a/docs/en/05-get-started/index.md b/docs/en/05-get-started/index.md
index cf475a8cd79e15880a4356a89f46c0dd6a8c307d..09875362f422e925b33c2f4143377e9ca8403c91 100644
--- a/docs/en/05-get-started/index.md
+++ b/docs/en/05-get-started/index.md
@@ -3,9 +3,9 @@ title: Get Started
description: This article describes how to install TDengine and test its performance.
---
-The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
+You can install and run TDengine on Linux and Windows machines as well as Docker containers. You can also deploy TDengine as a managed service with TDengine Cloud.
-You can install and run TDengine on Linux and Windows machines as well as Docker containers.
+The full package of TDengine includes the TDengine Server (`taosd`), TDengine Client (`taosc`), taosAdapter for connecting with third-party systems and providing a RESTful interface, a command-line interface, and some tools. In addition to connectors for multiple languages, TDengine also provides a [RESTful interface](/reference/rest-api) through [taosAdapter](/reference/taosadapter).
```mdx-code-block
import DocCardList from '@theme/DocCardList';
diff --git a/docs/en/07-develop/01-connect/_connect_cs.mdx b/docs/en/07-develop/01-connect/_connect_cs.mdx
index f8d8e519fde7fc6d0954bbfe865155221c0b0595..b81f49b2f0593c65ed6b51b6824d3936f00f2993 100644
--- a/docs/en/07-develop/01-connect/_connect_cs.mdx
+++ b/docs/en/07-develop/01-connect/_connect_cs.mdx
@@ -1,8 +1,7 @@
```csharp title="Native Connection"
-{{#include docs/examples/csharp/ConnectExample.cs}}
+{{#include docs/examples/csharp/connect/Program.cs}}
```
-:::info
-C# connector supports only native connection for now.
-
-:::
+```csharp title="WebSocket Connection"
+{{#include docs/examples/csharp/wsConnect/Program.cs}}
+```
diff --git a/docs/en/07-develop/01-connect/index.md b/docs/en/07-develop/01-connect/index.md
index 20537064216f812990414ffd7260dbda64c56251..61eb8f04eb3fb8cea5096b321105fa9e88722bda 100644
--- a/docs/en/07-develop/01-connect/index.md
+++ b/docs/en/07-develop/01-connect/index.md
@@ -1,6 +1,7 @@
---
-title: Connect
-description: "This document explains how to establish connections to TDengine and how to install and use TDengine connectors."
+sidebar_label: Connect
+title: Connect to TDengine
+description: "How to establish connections to TDengine and how to install and use TDengine connectors."
---
import Tabs from "@theme/Tabs";
@@ -279,6 +280,6 @@ Prior to establishing connection, please make sure TDengine is already running a
:::tip
-If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.taosdata.com/train-faq/faq).
+If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
:::
diff --git a/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx b/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx
index 11db3daeb054b2cac29c6a0ccde2add27774f3da..c91dbba5d2786f8a5a78ed77c105e3661ae8641f 100644
--- a/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx
+++ b/docs/en/07-develop/03-insert-data/02-influxdb-line.mdx
@@ -37,7 +37,8 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- All the data in `tag_set` will be converted to nchar type automatically .
- Each data in `field_set` must be self-descriptive for its data type. For example 1.2f32 means a value 1.2 of float type. Without the "f" type suffix, it will be treated as type double.
- Multiple kinds of precision can be used for the `timestamp` field. Time precision can be from nanosecond (ns) to hour (h).
-
+- You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
+- It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
:::
For more details please refer to [InfluxDB Line Protocol](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) and [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
@@ -64,3 +65,7 @@ For more details please refer to [InfluxDB Line Protocol](https://docs.influxdat
+
+## Query Examples
+If you want query the data of `location=California.LosAngeles,groupid=2`,here is the query sql:
+select * from `meters.voltage` where location="California.LosAngeles" and groupid=2
diff --git a/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
index db9bfd73649709cf806ae6499513191db8321107..d88a6335cb52602c371cb677afa1488d746cab95 100644
--- a/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
+++ b/docs/en/07-develop/03-insert-data/03-opentsdb-telnet.mdx
@@ -31,7 +31,7 @@ For example:
```txt
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
-
+- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify chile table names, for example, `smlChildTableName=tname`. You can insert `meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
Please refer to [OpenTSDB Telnet API](http://opentsdb.net/docs/build/html/api_telnet/put.html) for more details.
## Examples
@@ -79,3 +79,6 @@ taos> select tbname, * from `meters.current`;
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
Query OK, 4 row(s) in set (0.005399s)
```
+## Query Examples
+If you want query the data of `location=California.LosAngeles groupid=3`,here is the query sql:
+select * from `meters.voltage` where location="California.LosAngeles" and groupid=3
diff --git a/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
index 23703f4087483373a15e9cf7604bb67ca62888f5..e2e6d6fc9f099485a6bb9c3bf8d8ea580be824de 100644
--- a/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
+++ b/docs/en/07-develop/03-insert-data/04-opentsdb-json.mdx
@@ -48,7 +48,7 @@ Please refer to [OpenTSDB HTTP API](http://opentsdb.net/docs/build/html/api_http
- In JSON protocol, strings will be converted to nchar type and numeric values will be converted to double type.
- Only data in array format is accepted and so an array must be used even if there is only one row.
-
+- The defult child table name is generated by rules.You can configure smlChildTableName in taos.cfg to specify chile table names, for example, `smlChildTableName=tname`. You can insert `"tags": { "host": "web02","dc": "lga","tname":"cpu1"}` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
:::
## Examples
@@ -94,3 +94,6 @@ taos> select * from `meters.current`;
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.004076s)
```
+## Query Examples
+If you want query the data of "tags": {"location": "California.LosAngeles", "groupid": 1},here is the query sql:
+select * from `meters.voltage` where location="California.LosAngeles" and groupid=1
diff --git a/docs/en/07-develop/03-insert-data/05-high-volume.md b/docs/en/07-develop/03-insert-data/05-high-volume.md
new file mode 100644
index 0000000000000000000000000000000000000000..9ea0c884473e670d0624cb3be737830f46bedc38
--- /dev/null
+++ b/docs/en/07-develop/03-insert-data/05-high-volume.md
@@ -0,0 +1,441 @@
+---
+sidebar_label: High Performance Writing
+title: High Performance Writing
+---
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+This chapter introduces how to write data into TDengine with high throughput.
+
+## How to achieve high performance data writing
+
+To achieve high performance writing, there are a few aspects to consider. In the following sections we will describe these important factors in achieving high performance writing.
+
+### Application Program
+
+From the perspective of application program, you need to consider:
+
+1. The data size of each single write, also known as batch size. Generally speaking, higher batch size generates better writing performance. However, once the batch size is over a specific value, you will not get any additional benefit anymore. When using SQL to write into TDengine, it's better to put as much as possible data in single SQL. The maximum SQL length supported by TDengine is 1,048,576 bytes, i.e. 1 MB.
+
+2. The number of concurrent connections. Normally more connections can get better result. However, once the number of connections exceeds the processing ability of the server side, the performance may downgrade.
+
+3. The distribution of data to be written across tables or sub-tables. Writing to single table in one batch is more efficient than writing to multiple tables in one batch.
+
+4. Data Writing Protocol.
+ - Prameter binding mode is more efficient than SQL because it doesn't have the cost of parsing SQL.
+ - Writing to known existing tables is more efficient than wirting to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it
+ - Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creats table automatically and may alter table schema
+
+Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
+
+### Data Source
+
+Application programs need to read data from data source then write into TDengine. If you meet one or more of below situations, you need to setup message queues between the threads for reading from data source and the threads for writing into TDengine.
+
+1. There are multiple data sources, the data generation speed of each data source is much slower than the speed of single writing thread. In this case, the purpose of message queues is to consolidate the data from multiple data sources together to increase the batch size of single write.
+2. The speed of data generation from single data source is much higher than the speed of single writing thread. The purpose of message queue in this case is to provide buffer so that data is not lost and multiple writing threads can get data from the buffer.
+3. The data for single table are from multiple data source. In this case the purpose of message queues is to combine the data for single table together to improve the write efficiency.
+
+If the data source is Kafka, then the appication program is a consumer of Kafka, you can benefit from some kafka features to achieve high performance writing:
+
+1. Put the data for a table in single partition of single topic so that it's easier to put the data for each table together and write in batch
+2. Subscribe multiple topics to accumulate data together.
+3. Add more consumers to gain more concurrency and throughput.
+4. Incrase the size of single fetch to increase the size of write batch.
+
+### Tune TDengine
+
+On the server side, database configuration parameter `vgroups` needs to be set carefully to maximize the system performance. If it's set too low, the system capability can't be utilized fully; if it's set too big, unnecessary resource competition may be produced. A normal recommendation for `vgroups` parameter is 2 times of the number of CPU cores. However, depending on the actual system resources, it may still need to tuned.
+
+For more configuration parameters, please refer to [Database Configuration](../../../taos-sql/database) and [Server Configuration](../../../reference/config)。
+
+## Sample Programs
+
+This section will introduce the sample programs to demonstrate how to write into TDengine with high performance.
+
+### Scenario
+
+Below are the scenario for the sample programs of high performance wrting.
+
+- Application program reads data from data source, the sample program simulates a data source by generating data
+- The speed of single writing thread is much slower than the speed of generating data, so the program starts multiple writing threads while each thread establish a connection to TDengine and each thread has a message queue of fixed size.
+- Application program maps the received data to different writing threads based on table name to make sure all the data for each table is always processed by a specific writing thread.
+- Each writing thread writes the received data into TDengine once the message queue becomes empty or the read data meets a threshold.
+
+
+
+### Sample Programs
+
+The sample programs listed in this section are based on the scenario described previously. If your scenarios is different, please try to adjust the code based on the principles described in this chapter.
+
+The sample programs assume the source data is for all the different sub tables in same super table (meters). The super table has been created before the sample program starts to writing data. Sub tables are created automatically according to received data. If there are multiple super tables in your case, please try to adjust the part of creating table automatically.
+
+
+
+
+**Program Inventory**
+
+| Class | Description |
+| ---------------- | ----------------------------------------------------------------------------------------------------- |
+| FastWriteExample | Main Program |
+| ReadTask | Read data from simulated data source and put into a queue according to the hash value of table name |
+| WriteTask | Read data from Queue, compose a wirte batch and write into TDengine |
+| MockDataSource | Generate data for some sub tables of super table meters |
+| SQLWriter | WriteTask uses this class to compose SQL, create table automatically, check SQL length and write data |
+| StmtWriter | Write in Parameter binding mode (Not finished yet) |
+| DataBaseMonitor | Calculate the writing speed and output on console every 10 seconds |
+
+Below is the list of complete code of the classes in above table and more detailed description.
+
+
+FastWriteExample
+The main Program is responsible for:
+
+1. Create message queues
+2. Start writing threads
+3. Start reading threads
+4. Otuput writing speed every 10 seconds
+
+The main program provides 4 parameters for tuning:
+
+1. The number of reading threads, default value is 1
+2. The number of writing threads, default alue is 2
+3. The total number of tables in the generated data, default value is 1000. These tables are distributed evenly across all writing threads. If the number of tables is very big, it will cost much time to firstly create these tables.
+4. The batch size of single write, default value is 3,000
+
+The capacity of message queue also impacts performance and can be tuned by modifying program. Normally it's always better to have a larger message queue. A larger message queue means lower possibility of being blocked when enqueueing and higher throughput. But a larger message queue consumes more memory space. The default value used in the sample programs is already big enoug.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
+```
+
+
+
+
+ReadTask
+
+ReadTask reads data from data source. Each ReadTask is associated with a simulated data source, each data source generates data for a group of specific tables, and the data of any table is only generated from a single specific data source.
+
+ReadTask puts data in message queue in blocking mode. That means, the putting operation is blocked if the message queue is full.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
+```
+
+
+
+
+WriteTask
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
+```
+
+
+
+
+
+MockDataSource
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
+```
+
+
+
+
+
+SQLWriter
+
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug.
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
+```
+
+
+
+
+
+DataBaseMonitor
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
+```
+
+
+
+**Steps to Launch**
+
+
+Launch Java Sample Program
+
+You need to set environment variable `TDENGINE_JDBC_URL` before launching the program. If TDengine Server is setup on localhost, then the default value for user name, password and port can be used, like below:
+
+```
+TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+```
+
+**Launch in IDE**
+
+1. Clone TDengine repolitory
+ ```
+ git clone git@github.com:taosdata/TDengine.git --depth 1
+ ```
+2. Use IDE to open `docs/examples/java` directory
+3. Configure environment variable `TDENGINE_JDBC_URL`, you can also configure it before launching the IDE, if so you can skip this step.
+4. Run class `com.taos.example.highvolume.FastWriteExample`
+
+**Launch on server**
+
+If you want to launch the sample program on a remote server, please follow below steps:
+
+1. Package the sample programs. Execute below command under directory `TDengine/docs/examples/java` :
+ ```
+ mvn package
+ ```
+2. Create `examples/java` directory on the server
+ ```
+ mkdir -p examples/java
+ ```
+3. Copy dependencies (below commands assume you are working on a local Windows host and try to launch on a remote Linux host)
+ - Copy dependent packages
+ ```
+ scp -r .\target\lib @:~/examples/java
+ ```
+ - Copy the jar of sample programs
+ ```
+ scp -r .\target\javaexample-1.0.jar @:~/examples/java
+ ```
+4. Configure environment variable
+ Edit `~/.bash_profile` or `~/.bashrc` and add below:
+
+ ```
+ export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+ ```
+
+ If your TDengine server is not deployed on localhost or doesn't use default port, you need to change the above URL to correct value in your environment.
+
+5. Launch the sample program
+
+ ```
+ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample
+ ```
+
+6. The sample program doesn't exit unless you press CTRL + C to terminate it.
+ Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
+
+ ```
+ root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
+ 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
+ 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
+ 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
+ 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
+ 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
+ 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
+ 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
+ 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
+ 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
+ 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
+ ```
+
+
+
+
+
+
+**Program Inventory**
+
+Sample programs in Python uses multi-process and cross-process message queues.
+
+| Function/CLass | Description |
+| ---------------------------- | --------------------------------------------------------------------------- |
+| main Function | Program entry point, create child processes and message queues |
+| run_monitor_process Function | Create database, super table, calculate writing speed and output to console |
+| run_read_task Function | Read data and distribute to message queues |
+| MockDataSource Class | Simulate data source, return next 1,000 rows of each table |
+| run_write_task Function | Read as much as possible data from message queue and write in batch |
+| SQLWriter Class | Write in SQL and create table utomatically |
+| StmtWriter Class | Write in parameter binding mode (not finished yet) |
+
+
+main function
+
+`main` function is responsible for creating message queues and fork child processes, there are 3 kinds of child processes:
+
+1. Monitoring process, initializes database and calculating writing speed
+2. Reading process (n), reads data from data source
+3. Writing process (m), wirtes data into TDengine
+
+`main` function provides 5 parameters:
+
+1. The number of reading tasks, default value is 1
+2. The number of writing tasks, default value is 1
+3. The number of tables, default value is 1,000
+4. The capacity of message queue, default value is 1,000,000 bytes
+5. The batch size in single write, default value is 3000
+
+```python
+{{#include docs/examples/python/fast_write_example.py:main}}
+```
+
+
+
+
+run_monitor_process
+
+Monitoring process initilizes database and monitoring writing speed.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:monitor}}
+```
+
+
+
+
+
+run_read_task function
+
+Reading process reads data from other data system and distributes to the message queue allocated for it.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:read}}
+```
+
+
+
+
+
+MockDataSource
+
+Below is the simulated data source, we assume table name exists in each generated data.
+
+```python
+{{#include docs/examples/python/mockdatasource.py}}
+```
+
+
+
+
+run_write_task function
+
+Writing process tries to read as much as possible data from message queue and writes in batch.
+
+```python
+{{#include docs/examples/python/fast_write_example.py:write}}
+```
+
+
+
+
+
+SQLWriter class encapsulates the logic of composing SQL and writing data. Please be noted that the tables have not been created before writing, but are created automatically when catching the exception of table doesn't exist. For other exceptions caught, the SQL which caused the exception are logged for you to debug. This class also checks the SQL length, and passes the maximum SQL length by parameter maxSQLLength according to actual TDengine limit.
+
+SQLWriter
+
+```python
+{{#include docs/examples/python/sql_writer.py}}
+```
+
+
+
+**Steps to Launch**
+
+
+
+Launch Sample Program in Python
+
+1. Prerequisities
+
+ - TDengine client driver has been installed
+ - Python3 has been installed, the the version >= 3.8
+ - TDengine Python connector `taospy` has been installed
+
+2. Install faster-fifo to replace python builtin multiprocessing.Queue
+
+ ```
+ pip3 install faster-fifo
+ ```
+
+3. Click the "Copy" in the above sample programs to copy `fast_write_example.py` 、 `sql_writer.py` and `mockdatasource.py`.
+
+4. Execute the program
+
+ ```
+ python3 fast_write_example.py
+ ```
+
+ Below is the output of running on a server of 16 cores, 64GB memory and SSD hard disk.
+
+ ```
+ root@vm85$ python3 fast_write_example.py 8 8
+ 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
+ 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
+ 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
+ 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
+ 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
+ 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
+ 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
+ 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
+ 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
+ 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
+ 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
+ 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
+ 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
+ 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
+ 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
+ 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
+ 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
+ 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
+ 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
+ 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
+ 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
+ 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
+ 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
+ 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
+ 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
+ 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
+ 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
+ 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
+ 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
+ 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
+ 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
+ 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
+ ```
+
+
+
+:::note
+Don't establish connection to TDengine in the parent process if using Python connector in multi-process way, otherwise all the connections in child processes are blocked always. This is a known issue.
+
+:::
+
+
+
diff --git a/docs/en/07-develop/03-insert-data/_cs_line.mdx b/docs/en/07-develop/03-insert-data/_cs_line.mdx
index 71f46c62be3dfe7d771a35b2298e476bed353aba..ae49901c3ac0a34218def4b1e12702e79960d0b6 100644
--- a/docs/en/07-develop/03-insert-data/_cs_line.mdx
+++ b/docs/en/07-develop/03-insert-data/_cs_line.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/InfluxDBLineExample.cs}}
+{{#include docs/examples/csharp/influxdbLine/Program.cs}}
```
diff --git a/docs/en/07-develop/03-insert-data/_cs_opts_json.mdx b/docs/en/07-develop/03-insert-data/_cs_opts_json.mdx
index 8d80d042c984c513df5ca91813c0cd0a17b58eb5..2627648616b9ac8c92e0d76097d517c066232ef2 100644
--- a/docs/en/07-develop/03-insert-data/_cs_opts_json.mdx
+++ b/docs/en/07-develop/03-insert-data/_cs_opts_json.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/OptsJsonExample.cs}}
+{{#include docs/examples/csharp/optsJSON/Program.cs}}
```
diff --git a/docs/en/07-develop/03-insert-data/_cs_opts_telnet.mdx b/docs/en/07-develop/03-insert-data/_cs_opts_telnet.mdx
index cff32abf1feaf703971111542749fbe40152bc33..660db13fd1816150880883cf801ff50019fbae8d 100644
--- a/docs/en/07-develop/03-insert-data/_cs_opts_telnet.mdx
+++ b/docs/en/07-develop/03-insert-data/_cs_opts_telnet.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/OptsTelnetExample.cs}}
+{{#include docs/examples/csharp/optsTelnet/Program.cs}}
```
diff --git a/docs/en/07-develop/03-insert-data/_cs_sql.mdx b/docs/en/07-develop/03-insert-data/_cs_sql.mdx
index 1dc7bb3d1366aa3000212786756506eb5eb280e6..42a6bc4315393de0b2cba37caffbfbc1c07b952b 100644
--- a/docs/en/07-develop/03-insert-data/_cs_sql.mdx
+++ b/docs/en/07-develop/03-insert-data/_cs_sql.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/SQLInsertExample.cs}}
+{{#include docs/examples/csharp/sqlInsert/Program.cs}}
```
diff --git a/docs/en/07-develop/03-insert-data/_cs_stmt.mdx b/docs/en/07-develop/03-insert-data/_cs_stmt.mdx
index 229c874ab9f515e7eae66890a3dfe2e59c129e86..d8d73ca15ebdce28a40a6c922293493679491e97 100644
--- a/docs/en/07-develop/03-insert-data/_cs_stmt.mdx
+++ b/docs/en/07-develop/03-insert-data/_cs_stmt.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/StmtInsertExample.cs}}
+{{#include docs/examples/csharp/stmtInsert/Program.cs}}
```
diff --git a/docs/en/07-develop/03-insert-data/highvolume.webp b/docs/en/07-develop/03-insert-data/highvolume.webp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad
Binary files /dev/null and b/docs/en/07-develop/03-insert-data/highvolume.webp differ
diff --git a/docs/en/07-develop/04-query-data/_cs.mdx b/docs/en/07-develop/04-query-data/_cs.mdx
index 4bb582ecbfaeceac679af975e7752d1caeacb018..745ab368115ca0dfbaff1f3a326abfd9bed02430 100644
--- a/docs/en/07-develop/04-query-data/_cs.mdx
+++ b/docs/en/07-develop/04-query-data/_cs.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/QueryExample.cs}}
+{{#include docs/examples/csharp/query/Program.cs}}
```
diff --git a/docs/en/07-develop/04-query-data/_cs_async.mdx b/docs/en/07-develop/04-query-data/_cs_async.mdx
index 3ecf635fd39db402d1db68de6d7336b7b2d9d8e8..19c8e58f32ed3598b5ccb953085b97ef2e4ce067 100644
--- a/docs/en/07-develop/04-query-data/_cs_async.mdx
+++ b/docs/en/07-develop/04-query-data/_cs_async.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/AsyncQueryExample.cs}}
+{{#include docs/examples/csharp/asyncQuery/Program.cs}}
```
diff --git a/docs/en/07-develop/_sub_cs.mdx b/docs/en/07-develop/_sub_cs.mdx
index a435ea0273c94cbe75eaf7431e1a9c39d49d92e3..093b617e9bb9c7da7bc9392f91316b9f3342bae6 100644
--- a/docs/en/07-develop/_sub_cs.mdx
+++ b/docs/en/07-develop/_sub_cs.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/SubscribeDemo.cs}}
+{{#include docs/examples/csharp/subscribe/Program.cs}}
```
\ No newline at end of file
diff --git a/docs/en/10-deployment/01-deploy.md b/docs/en/10-deployment/01-deploy.md
index bfbb547bd4177cba369ec9d3d2541bceed853ef0..5dfcd3108d8b10cf24cdd5c852c4225ced0f16b2 100644
--- a/docs/en/10-deployment/01-deploy.md
+++ b/docs/en/10-deployment/01-deploy.md
@@ -39,18 +39,18 @@ To get the hostname on any host, the command `hostname -f` can be executed.
On the physical machine running the application, ping the dnode that is running taosd. If the dnode is not accessible, the application cannot connect to taosd. In this case, verify the DNS and hosts settings on the physical node running the application.
-The end point of each dnode is the output hostname and port, such as h1.taosdata.com:6030.
+The end point of each dnode is the output hostname and port, such as h1.tdengine.com:6030.
### Step 5
-Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.taosdata.com:6030", its `taos.cfg` is configured as following.
+Modify the TDengine configuration file `/etc/taos/taos.cfg` on each node. Assuming the first dnode of TDengine cluster is "h1.tdengine.com:6030", its `taos.cfg` is configured as following.
```c
// firstEp is the end point to connect to when any dnode starts
-firstEp h1.taosdata.com:6030
+firstEp h1.tdengine.com:6030
// must be configured to the FQDN of the host where the dnode is launched
-fqdn h1.taosdata.com
+fqdn h1.tdengine.com
// the port used by the dnode, default is 6030
serverPort 6030
@@ -76,13 +76,13 @@ The first dnode can be started following the instructions in [Get Started](/get-
taos> show dnodes;
id | endpoint | vnodes | support_vnodes | status | create_time | note |
============================================================================================================================================
-1 | h1.taosdata.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
+1 | h1.tdengine.com:6030 | 0 | 1024 | ready | 2022-07-16 10:50:42.673 | |
Query OK, 1 rows affected (0.007984s)
```
-From the above output, it is shown that the end point of the started dnode is "h1.taosdata.com:6030", which is the `firstEp` of the cluster.
+From the above output, it is shown that the end point of the started dnode is "h1.tdengine.com:6030", which is the `firstEp` of the cluster.
## Add DNODE
@@ -90,7 +90,7 @@ There are a few steps necessary to add other dnodes in the cluster.
Second, we can start `taosd` as instructed in [Get Started](/get-started/).
-Then, on the first dnode i.e. h1.taosdata.com in our example, use TDengine CLI `taos` to execute the following command:
+Then, on the first dnode i.e. h1.tdengine.com in our example, use TDengine CLI `taos` to execute the following command:
```sql
CREATE DNODE "h2.taos.com:6030";
@@ -98,7 +98,7 @@ CREATE DNODE "h2.taos.com:6030";
This adds the end point of the new dnode (from Step 4) into the end point list of the cluster. In the command "fqdn:port" should be quoted using double quotes. Change `"h2.taos.com:6030"` to the end point of your new dnode.
-Then on the first dnode h1.taosdata.com, execute `show dnodes` in `taos`
+Then on the first dnode h1.tdengine.com, execute `show dnodes` in `taos`
```sql
SHOW DNODES;
@@ -114,7 +114,9 @@ The above process can be repeated to add more dnodes in the cluster.
Any node that is in the cluster and online can be the firstEp of new nodes.
Nodes use the firstEp parameter only when joining a cluster for the first time. After a node has joined the cluster, it stores the latest mnode in its end point list and no longer makes use of firstEp.
-However, firstEp is used by clients that connect to the cluster. For example, if you run `taos shell` without arguments, it connects to the firstEp by default.
+
+However, firstEp is used by clients that connect to the cluster. For example, if you run TDengine CLI `taos` without arguments, it connects to the firstEp by default.
+
Two dnodes that are launched without a firstEp value operate independently of each other. It is not possible to add one dnode to the other dnode and form a cluster. It is also not possible to form two independent clusters into a new cluster.
:::
diff --git a/docs/en/10-deployment/03-k8s.md b/docs/en/10-deployment/03-k8s.md
index b3f71ed5bd0e0dbaf3108cc40be6b18bdf5fb7e8..b0aa6777130864404e97dc332cf0e5ce830bf8ed 100644
--- a/docs/en/10-deployment/03-k8s.md
+++ b/docs/en/10-deployment/03-k8s.md
@@ -9,6 +9,7 @@ TDengine is a cloud-native time-series database that can be deployed on Kubernet
Before deploying TDengine on Kubernetes, perform the following:
+* Current steps are compatible with Kubernetes v1.5 and later version.
* Install and configure minikube, kubectl, and helm.
* Install and deploy Kubernetes and ensure that it can be accessed and used normally. Update any container registries or other services as necessary.
@@ -100,7 +101,7 @@ spec:
# Must set if you want a cluster.
- name: TAOS_FIRST_EP
value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
- # TAOS_FQND should always be setted in k8s env.
+ # TAOS_FQDN should always be set in k8s env.
- name: TAOS_FQDN
value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
volumeMounts:
diff --git a/docs/en/10-deployment/05-helm.md b/docs/en/10-deployment/05-helm.md
index 302730f1b571ef12a2d523f22c418f2b9f5f99cd..a4fa68100078efe85fff5e1b078ebd07e5337d5a 100644
--- a/docs/en/10-deployment/05-helm.md
+++ b/docs/en/10-deployment/05-helm.md
@@ -152,7 +152,7 @@ clusterDomainSuffix: ""
# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
# to a camelCase taos config variable `debugFlag`.
#
-# See the variable list at https://www.taosdata.com/cn/documentation/administrator .
+# See the [Configuration Variables](../../reference/config)
#
# Note:
# 1. firstEp/secondEp: should not be setted here, it's auto generated at scale-up.
diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md
index 876de50f35ee3ba533bd7d5916632de853a84c0e..60046629a4c6f89ccfe9b20adcbb2fdba2ffb261 100644
--- a/docs/en/12-taos-sql/01-data-type.md
+++ b/docs/en/12-taos-sql/01-data-type.md
@@ -1,70 +1,70 @@
---
sidebar_label: Data Types
title: Data Types
-description: "TDengine supports a variety of data types including timestamp, float, JSON and many others."
+description: 'TDengine supports a variety of data types including timestamp, float, JSON and many others.'
---
## Timestamp
When using TDengine to store and query data, the most important part of the data is timestamp. Timestamp must be specified when creating and inserting data rows. Timestamp must follow the rules below:
-- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`
-- Internal function `now` can be used to get the current timestamp on the client side
-- The current timestamp of the client side is applied when `now` is used to insert data
+- The format must be `YYYY-MM-DD HH:mm:ss.MS`, the default time precision is millisecond (ms), for example `2017-08-12 18:25:58.128`.
+- Internal function `NOW` can be used to get the current timestamp on the client side.
+- The current timestamp of the client side is applied when `NOW` is used to insert data.
- Epoch Time:timestamp can also be a long integer number, which means the number of seconds, milliseconds or nanoseconds, depending on the time precision, from UTC 1970-01-01 00:00:00.
-- Add/subtract operations can be carried out on timestamps. For example `now-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `select * from t1 where ts > now-2w and ts <= now-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
+- Add/subtract operations can be carried out on timestamps. For example `NOW-2h` means 2 hours prior to the time at which query is executed. The units of time in operations can be b(nanosecond), u(microsecond), a(millisecond), s(second), m(minute), h(hour), d(day), or w(week). So `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w` means the data between two weeks ago and one week ago. The time unit can also be n (calendar month) or y (calendar year) when specifying the time window for down sampling operations.
Time precision in TDengine can be set by the `PRECISION` parameter when executing `CREATE DATABASE`. The default time precision is millisecond. In the statement below, the precision is set to nanonseconds.
```sql
CREATE DATABASE db_name PRECISION 'ns';
```
+
## Data Types
In TDengine, the data types below can be used when specifying a column or tag.
-| # | **type** | **Bytes** | **Description** |
-| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported |
-| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1] |
-| 3 | INT UNSIGNED| 4| unsigned integer, the value range is [0, 2^32-1]
-| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1] |
-| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1] |
-| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38] |
-| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308] |
-| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
-| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767] |
-| 10 | INT UNSIGNED| 2| unsigned integer, the value range is [0, 65535]|
-| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127] |
-| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255] |
-| 13 | BOOL | 1 | Bool, the value range is {true, false} |
-| 14 | NCHAR | User Defined| Multi-Byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\’`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
-| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type |
-| 16 | VARCHAR | User-defined | Alias of BINARY |
-
+| # | **type** | **Bytes** | **Description** |
+| --- | :--------------: | ------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | TIMESTAMP | 8 | Default precision is millisecond, microsecond and nanosecond are also supported. |
+| 2 | INT | 4 | Integer, the value range is [-2^31, 2^31-1]. |
+| 3 | INT UNSIGNED | 4 | Unsigned integer, the value range is [0, 2^32-1]. |
+| 4 | BIGINT | 8 | Long integer, the value range is [-2^63, 2^63-1]. |
+| 5 | BIGINT UNSIGNED | 8 | unsigned long integer, the value range is [0, 2^64-1]. |
+| 6 | FLOAT | 4 | Floating point number, the effective number of digits is 6-7, the value range is [-3.4E38, 3.4E38]. |
+| 7 | DOUBLE | 8 | Double precision floating point number, the effective number of digits is 15-16, the value range is [-1.7E308, 1.7E308]. |
+| 8 | BINARY | User Defined | Single-byte string for ASCII visible characters. Length must be specified when defining a column or tag of binary type. |
+| 9 | SMALLINT | 2 | Short integer, the value range is [-32768, 32767]. |
+| 10 | INT UNSIGNED | 2 | unsigned integer, the value range is [0, 65535]. |
+| 11 | TINYINT | 1 | Single-byte integer, the value range is [-128, 127]. |
+| 12 | TINYINT UNSIGNED | 1 | unsigned single-byte integer, the value range is [0, 255]. |
+| 13 | BOOL | 1 | Bool, the value range is {true, false}. |
+| 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. |
+| 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. |
+| 16 | VARCHAR | User-defined | Alias of BINARY |
:::note
-- TDengine is case insensitive and treats any characters in the sql command as lower case by default, case sensitive strings must be quoted with single quotes.
-- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
-- The length of BINARY can be up to 16374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
+
+- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
+- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
:::
-
## Constants
+
TDengine supports a variety of constants:
-| # | **Syntax** | **Type** | **Description** |
-| --- | :-------: | --------- | -------------------------------------- |
-| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
-| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
-| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
-| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash (\'). |
-| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash (\"). |
-| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
-| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
-| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
+| # | **Syntax** | **Type** | **Description** |
+| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | [{+ \| -}]123 | BIGINT | Integer literals are of type BIGINT. Data that exceeds the length of the BIGINT type is truncated. |
+| 2 | 123.45 | DOUBLE | Floating-point literals are of type DOUBLE. Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used. |
+| 3 | 1.2E3 | DOUBLE | Literals in scientific notation are of type DOUBLE. |
+| 4 | 'abc' | BINARY | Content enclosed in single quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal single quote inside the string must be escaped with a backslash `\'`. |
+| 5 | 'abc' | BINARY | Content enclosed in double quotation marks is of type BINARY. The size of a BINARY is the size of the string in bytes. A literal double quote inside the string must be escaped with a backslash `\"`. |
+| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | The TIMESTAMP keyword indicates that the following string literal is interpreted as a timestamp. The string must be in YYYY-MM-DD HH:mm:ss.MS format. The precision is inherited from the database configuration. |
+| 7 | {TRUE \| FALSE} | BOOL | Boolean literals are of type BOOL. |
+| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | The preceding characters indicate null literals. These can be used with any data type. |
:::note
Numeric values will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
diff --git a/docs/en/12-taos-sql/02-database.md b/docs/en/12-taos-sql/02-database.md
index d9dadae976bf07bbf6cfb49401d55bb0bf18da49..5a84bbf3709ff2355157409ae11d5f85191a8271 100644
--- a/docs/en/12-taos-sql/02-database.md
+++ b/docs/en/12-taos-sql/02-database.md
@@ -71,9 +71,9 @@ database_option: {
- SINGLE_STABLE: specifies whether the database can contain more than one supertable.
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
-- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
-- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted.
-- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
+- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days.
+- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1.
+- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day.
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk.
### Example Statement
diff --git a/docs/en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md
index bf32cf171bbeea23ada946d5011a73dd70ddd6ca..5a2c8ed6ee4a5ea129023fec68fa97d577832f60 100644
--- a/docs/en/12-taos-sql/03-table.md
+++ b/docs/en/12-taos-sql/03-table.md
@@ -57,7 +57,7 @@ table_option: {
3. MAX_DELAY: specifies the maximum latency for pushing computation results. The default value is 15 minutes or the value of the INTERVAL parameter, whichever is smaller. Enter a value between 0 and 15 minutes in milliseconds, seconds, or minutes. You can enter multiple values separated by commas (,). Note: Retain the default value if possible. Configuring a small MAX_DELAY may cause results to be frequently pushed, affecting storage and query performance. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database.
4. ROLLUP: specifies aggregate functions to roll up. Rolling up a function provides downsampled results based on multiple axes. This parameter applies only to supertables and takes effect only when the RETENTIONS parameter has been specified for the database. You can specify only one function to roll up. The rollup takes effect on all columns except TS. Enter one of the following values: avg, sum, min, max, last, or first.
5. SMA: specifies functions on which to enable small materialized aggregates (SMA). SMA is user-defined precomputation of aggregates based on data blocks. Enter one of the following values: max, min, or sum This parameter can be used with supertables and standard tables.
-6. TTL: specifies the time to live (TTL) for the table. If the period specified by the TTL parameter elapses without any data being written to the table, TDengine will automatically delete the table. Note: The system may not delete the table at the exact moment that the TTL expires. Enter a value in days. The default value is 0. Note: The TTL parameter has a higher priority than the KEEP parameter. If a table is marked for deletion because the TTL has expired, it will be deleted even if the time specified by the KEEP parameter has not elapsed. This parameter can be used with standard tables and subtables.
+6. TTL: specifies the time to live (TTL) for the table. If TTL is specified when creatinga table, after the time period for which the table has been existing is over TTL, TDengine will automatically delete the table. Please be noted that the system may not delete the table at the exact moment that the TTL expires but guarantee there is such a system and finally the table will be deleted. The unit of TTL is in days. The default value is 0, i.e. never expire.
## Create Subtables
diff --git a/docs/en/12-taos-sql/05-insert.md b/docs/en/12-taos-sql/05-insert.md
index e7d56fb3c734affa92c8c71c190b1132cd89e335..da21896866d74c141b933f85cbe87952f53c7fc4 100644
--- a/docs/en/12-taos-sql/05-insert.md
+++ b/docs/en/12-taos-sql/05-insert.md
@@ -16,6 +16,8 @@ INSERT INTO
[(field1_name, ...)]
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
...];
+
+INSERT INTO tb_name [(field1_name, ...)] subquery
```
**Timestamps**
@@ -37,7 +39,7 @@ INSERT INTO
4. The FILE clause inserts tags or data from a comma-separates values (CSV) file. Do not include headers in your CSV files.
-5. A single INSERT statement can write data to multiple tables.
+5. A single `INSERT ... VALUES` statement and `INSERT ... FILE` statement can write data to multiple tables.
6. The INSERT statement is fully parsed before being executed, so that if any element of the statement fails, the entire statement will fail. For example, the following statement will not create a table because the latter part of the statement is invalid:
@@ -47,6 +49,8 @@ INSERT INTO
7. However, an INSERT statement that writes data to multiple subtables can succeed for some tables and fail for others. This situation is caused because vnodes perform write operations independently of each other. One vnode failing to write data does not affect the ability of other vnodes to write successfully.
+8. Data from TDengine can be inserted into a specified table using the `INSERT ... subquery` statement. Arbitrary query statements are supported. This syntax can only be used for subtables and normal tables, and does not support automatic table creation.
+
## Insert a Record
Single row or multiple rows specified with VALUES can be inserted into a specific table. A single row is inserted using the below statement.
diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md
index 1dd0caed38235d3d10813b2cd74fec6446c5ec24..c065245827f8e7edbb0297abef4aa9e5de4a45cc 100644
--- a/docs/en/12-taos-sql/06-select.md
+++ b/docs/en/12-taos-sql/06-select.md
@@ -52,11 +52,6 @@ window_clause: {
| STATE_WINDOW(col)
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
-changes_option: {
- DURATION duration_val
- | ROWS rows_val
-}
-
group_by_clause:
GROUP BY expr [, expr] ... HAVING condition
@@ -71,9 +66,9 @@ order_expr:
A query can be performed on some or all columns. Data and tag columns can all be included in the SELECT list.
-## Wildcards
+### Wildcards
-You can use an asterisk (\*) as a wildcard character to indicate all columns. For standard tables, the asterisk indicates only data columns. For supertables and subtables, tag columns are also included.
+You can use an asterisk (\*) as a wildcard character to indicate all columns. For normal tables or sub-tables, the asterisk indicates only data columns. For supertables, tag columns are also included when using asterisk (\*).
```sql
SELECT * FROM d1001;
@@ -126,7 +121,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
1. Configuration parameter `maxNumOfDistinctRes` in `taos.cfg` is used to control the number of rows to output. The minimum configurable value is 100,000, the maximum configurable value is 100,000,000, the default value is 1,000,000. If the actual number of rows exceeds the value of this parameter, only the number of rows specified by this parameter will be output.
2. It can't be guaranteed that the results selected by using `DISTINCT` on columns of `FLOAT` or `DOUBLE` are exactly unique because of the precision errors in floating point numbers.
-3. `DISTINCT` can't be used in the sub-query of a nested query statement, and can't be used together with aggregate functions, `GROUP BY` or `JOIN` in the same SQL statement.
:::
@@ -142,6 +136,8 @@ taos> SELECT ts, ts AS primary_key_ts FROM d1001;
### Pseudocolumns
+**Pseudocolumn:** A pseudo-column behaves like a table column but is not actually stored in the table. You can select from pseudo-columns, but you cannot insert, update, or delete their values. A pseudo-column is also similar to a function without arguments. This section describes these pseudo-columns:
+
**TBNAME**
The TBNAME pseudocolumn in a supertable contains the names of subtables within the supertable.
@@ -185,6 +181,14 @@ In TDengine, the first column of all tables must be a timestamp. This column is
select _rowts, max(current) from meters;
```
+**\_IROWTS**
+
+The \_IROWTS pseudocolumn can only be used with INTERP function. This pseudocolumn can be used to retrieve the corresponding timestamp column associated with the interpolation results.
+
+```sql
+select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
+```
+
## Query Objects
`FROM` can be followed by a number of tables or super tables, or can be followed by a sub-query.
@@ -354,19 +358,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::info
-- Only one layer of nesting is allowed, that means no sub query is allowed within a sub query
-- The result set returned by the inner query will be used as a "virtual table" by the outer query. The "virtual table" can be renamed using `AS` keyword for easy reference in the outer query.
-- Sub query is not allowed in continuous query.
+- The result of a nested query is returned as a virtual table used by the outer query. It's recommended to give an alias to this table for the convenience of using it in the outer query.
- JOIN operation is allowed between tables/STables inside both inner and outer queries. Join operation can be performed on the result set of the inner query.
-- UNION operation is not allowed in either inner query or outer query.
-- The functions that can be used in the inner query are the same as those that can be used in a non-nested query.
+- The features that can be used in the inner query are the same as those that can be used in a non-nested query.
- `ORDER BY` inside the inner query is unnecessary and will slow down the query performance significantly. It is best to avoid the use of `ORDER BY` inside the inner query.
- Compared to the non-nested query, the functionality that can be used in the outer query has the following restrictions:
- Functions
- - If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like `TOP`, `BOTTOM`, `FIRST`, `LAST`, `DIFF`.
- - Functions that need to scan the data twice can't be used in the outer query, like `STDDEV`, `PERCENTILE`.
- - `IN` operator is not allowed in the outer query but can be used in the inner query.
- - `GROUP BY` is not supported in the outer query.
+ - If the result set returned by the inner query doesn't contain timestamp column, then functions relying on timestamp can't be used in the outer query, like INTERP,DERIVATIVE, IRATE, LAST_ROW, FIRST, LAST, TWA, STATEDURATION, TAIL, UNIQUE.
+ - If the result set returned by the inner query are not sorted in order by timestamp, then functions relying on data ordered by timestamp can't be used in the outer query, like LEASTSQUARES, ELAPSED, INTERP, DERIVATIVE, IRATE, TWA, DIFF, STATECOUNT, STATEDURATION, CSUM, MAVG, TAIL, UNIQUE.
+ - Functions that need to scan the data twice can't be used in the outer query, like PERCENTILE.
:::
diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md
index d35fd3109998608475e4e0429265c8ac7274f57d..243ede5fcbe157c933b655fe91e95d1d87084a7e 100644
--- a/docs/en/12-taos-sql/10-function.md
+++ b/docs/en/12-taos-sql/10-function.md
@@ -13,7 +13,7 @@ Single row functions return a result for each row.
#### ABS
```sql
-SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+ABS(expr)
```
**Description**: The absolute value of a specific field.
@@ -31,7 +31,7 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### ACOS
```sql
-SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+ACOS(expr)
```
**Description**: The arc cosine of a specific field.
@@ -49,7 +49,7 @@ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### ASIN
```sql
-SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+ASIN(expr)
```
**Description**: The arc sine of a specific field.
@@ -68,7 +68,7 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### ATAN
```sql
-SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+ATAN(expr)
```
**Description**: The arc tangent of a specific field.
@@ -87,7 +87,7 @@ SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### CEIL
```sql
-SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
+CEIL(expr)
```
**Description**: The rounded up value of a specific field
@@ -105,7 +105,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### COS
```sql
-SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+COS(expr)
```
**Description**: The cosine of a specific field.
@@ -123,16 +123,16 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### FLOOR
```sql
-SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
+FLOOR(expr)
```
-**Description**: The rounded down value of a specific field
+**Description**: The rounded down value of a specific field
**More explanations**: The restrictions are same as those of the `CEIL` function.
#### LOG
```sql
-SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
+LOG(expr [, base])
```
**Description**: The logarithm of a specific field with `base` as the radix. If you do not enter a base, the natural logarithm of the field is returned.
@@ -151,7 +151,7 @@ SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
#### POW
```sql
-SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
+POW(expr, power)
```
**Description**: The power of a specific field with `power` as the exponent.
@@ -170,17 +170,17 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
#### ROUND
```sql
-SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
+ROUND(expr)
```
-**Description**: The rounded value of a specific field.
+**Description**: The rounded value of a specific field.
**More explanations**: The restrictions are same as those of the `CEIL` function.
#### SIN
```sql
-SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+SIN(expr)
```
**Description**: The sine of a specific field.
@@ -198,7 +198,7 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### SQRT
```sql
-SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
+SQRT(expr)
```
**Description**: The square root of a specific field.
@@ -216,7 +216,7 @@ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### TAN
```sql
-SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+TAN(expr)
```
**Description**: The tangent of a specific field.
@@ -238,7 +238,7 @@ Concatenation functions take strings as input and produce string or numeric valu
#### CHAR_LENGTH
```sql
-SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+CHAR_LENGTH(expr)
```
**Description**: The length in number of characters of a string
@@ -254,7 +254,7 @@ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### CONCAT
```sql
-SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+CONCAT(expr1, expr2 [, expr] ...)
```
**Description**: The concatenation result of two or more strings
@@ -271,7 +271,7 @@ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHER
#### CONCAT_WS
```sql
-SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
```
**Description**: The concatenation result of two or more strings with separator
@@ -288,7 +288,7 @@ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | st
#### LENGTH
```sql
-SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+LENGTH(expr)
```
**Description**: The length in bytes of a string
@@ -305,7 +305,7 @@ SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### LOWER
```sql
-SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+LOWER(expr)
```
**Description**: Convert the input string to lower case
@@ -322,7 +322,7 @@ SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### LTRIM
```sql
-SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+LTRIM(expr)
```
**Description**: Remove the left leading blanks of a string
@@ -339,7 +339,7 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### RTRIM
```sql
-SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+LTRIM(expr)
```
**Description**: Remove the right tailing blanks of a string
@@ -356,7 +356,7 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### SUBSTR
```sql
-SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
+SUBSTR(expr, pos [, len])
```
**Description**: The sub-string starting from `pos` with length of `len` from the original string `str` - If `len` is not specified, it means from `pos` to the end.
@@ -373,7 +373,7 @@ SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
#### UPPER
```sql
-SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+UPPER(expr)
```
**Description**: Convert the input string to upper case
@@ -394,10 +394,10 @@ Conversion functions change the data type of a value.
#### CAST
```sql
-SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
+CAST(expr AS type_name)
```
-**Description**: Convert the input data `expression` into the type specified by `type_name`. This function can be used only in SELECT statements.
+**Description**: Convert the input data `expr` into the type specified by `type_name`. This function can be used only in SELECT statements.
**Return value type**: The type specified by parameter `type_name`
@@ -418,7 +418,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
#### TO_ISO8601
```sql
-SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
+TO_ISO8601(expr [, timezone])
```
**Description**: The ISO8601 date/time format converted from a UNIX timestamp, plus the timezone. You can specify any time zone with the timezone parameter. If you do not enter this parameter, the time zone on the client is used.
@@ -434,14 +434,14 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
**More explanations**:
- You can specify a time zone in the following format: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。 For example, TO_ISO8601(1, "+00:00").
-- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
+- If the input is a UNIX timestamp, the precision of the returned value is determined by the digits of the input timestamp
- If the input is a column of TIMESTAMP type, the precision of the returned value is same as the precision set for the current data base in use
#### TO_JSON
```sql
-SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
+TO_JSON(str_literal)
```
**Description**: Converts a string into JSON.
@@ -458,7 +458,7 @@ SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
#### TO_UNIXTIMESTAMP
```sql
-SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause];
+TO_UNIXTIMESTAMP(expr)
```
**Description**: UNIX timestamp converted from a string of date/time format
@@ -486,9 +486,7 @@ All functions that return the current time, such as `NOW`, `TODAY`, and `TIMEZON
#### NOW
```sql
-SELECT NOW() FROM { tb_name | stb_name } [WHERE clause];
-SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW();
-INSERT INTO tb_name VALUES (NOW(), ...);
+NOW()
```
**Description**: The current time of the client side system
@@ -511,7 +509,7 @@ INSERT INTO tb_name VALUES (NOW(), ...);
#### TIMEDIFF
```sql
-SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
+TIMEDIFF(expr1, expr2 [, time_unit])
```
**Description**: The difference between two timestamps, and rounded to the time unit specified by `time_unit`
@@ -534,7 +532,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM
#### TIMETRUNCATE
```sql
-SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause];
+TIMETRUNCATE(expr, time_unit)
```
**Description**: Truncate the input timestamp with unit specified by `time_unit`
@@ -555,7 +553,7 @@ SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name
#### TIMEZONE
```sql
-SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
+TIMEZONE()
```
**Description**: The timezone of the client side system
@@ -570,9 +568,7 @@ SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
#### TODAY
```sql
-SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause];
-SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()];
-INSERT INTO tb_name VALUES (TODAY(), ...);
+TODAY()
```
**Description**: The timestamp of 00:00:00 of the client side system
@@ -599,7 +595,12 @@ TDengine supports the following aggregate functions:
### APERCENTILE
```sql
-SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause]
+APERCENTILE(expr, p [, algo_type])
+
+algo_type: {
+ "default"
+ | "t-digest"
+}
```
**Description**: Similar to `PERCENTILE`, but a simulated result is returned
@@ -611,13 +612,14 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
**Applicable table types**: standard tables and supertables
**Explanations**:
-- _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
+- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
- `algo_type` can only be input as `default` or `t-digest` Enter `default` to use a histogram-based algorithm. Enter `t-digest` to use the t-digest algorithm to calculate the approximation of the quantile. `default` is used by default.
+- The approximation result of `t-digest` algorithm is sensitive to input data order. For example, when querying STable with different input data order there might be minor differences in calculated results.
### AVG
```sql
-SELECT AVG(field_name) FROM tb_name [WHERE clause];
+AVG(expr)
```
**Description**: The average value of the specified fields.
@@ -632,7 +634,7 @@ SELECT AVG(field_name) FROM tb_name [WHERE clause];
### COUNT
```sql
-SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
+COUNT({* | expr})
```
**Description**: The number of records in the specified fields.
@@ -652,7 +654,7 @@ If you input a specific column, the number of non-null values in the column is r
### ELAPSED
```sql
-SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
+ELAPSED(ts_primary_key [, time_unit])
```
**Description**:`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
@@ -664,7 +666,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
**Applicable tables**: table, STable, outter in nested query
**Explanations**:
-- `field_name` parameter can only be the first column of a table, i.e. timestamp primary key.
+- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
- The minimum value of `time_unit` is the time precision of the database. If `time_unit` is not specified, the time precision of the database is used as the default time unit. Time unit specified by `time_unit` can be:
1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks)
- It can be used with `INTERVAL` to get the time valid time length of each time window. Please be noted that the return value is same as the time window for all time windows except for the first and the last time window.
@@ -678,7 +680,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
### LEASTSQUARES
```sql
-SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
+LEASTSQUARES(expr, start_val, step_val)
```
**Description**: The linear regression function of the specified column and the timestamp column (primary key), `start_val` is the initial value and `step_val` is the step value.
@@ -693,7 +695,7 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
### SPREAD
```sql
-SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
+SPREAD(expr)
```
**Description**: The difference between the max and the min of a specific column
@@ -708,7 +710,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
### STDDEV
```sql
-SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
+STDDEV(expr)
```
**Description**: Standard deviation of a specific column in a table or STable
@@ -723,7 +725,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
### SUM
```sql
-SELECT SUM(field_name) FROM tb_name [WHERE clause];
+SUM(expr)
```
**Description**: The sum of a specific column in a table or STable
@@ -738,7 +740,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause];
### HYPERLOGLOG
```sql
-SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
+HYPERLOGLOG(expr)
```
**Description**:
@@ -755,7 +757,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
### HISTOGRAM
```sql
-SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
+HISTOGRAM(expr,bin_type, bin_description, normalized)
```
**Description**:Returns count of data points in user-specified ranges.
@@ -768,14 +770,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
**Explanations**:
- bin_type: parameter to indicate the bucket type, valid inputs are: "user_input", "linear_bin", "log_bin"。
-- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
- - "user_input": "[1, 3, 5, 7]":
+- bin_description: parameter to describe how to generate buckets,can be in the following JSON formats for each bin_type respectively:
+ - "user_input": "[1, 3, 5, 7]":
User specified bin values.
-
+
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
"start" - bin starting point. "width" - bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated set of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf].
-
+
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
"start" - bin starting point. "factor" - exponential factor of bin offset. "count" - number of bins generated. "infinity" - whether to add(-inf, inf)as start/end point in generated range of bins.
The above "linear_bin" descriptor generates a set of bins: [-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf].
@@ -785,7 +787,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
### PERCENTILE
```sql
-SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
+PERCENTILE(expr, p)
```
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
@@ -796,7 +798,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
**Applicable table types**: table only
-**More explanations**: _P_ is in range [0,100], when _P_ is 0, the result is same as using function MIN; when _P_ is 100, the result is same as function MAX.
+**More explanations**: _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
## Selection Functions
@@ -806,7 +808,7 @@ Selection functions return one or more results depending. You can specify the ti
### BOTTOM
```sql
-SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
+BOTTOM(expr, k)
```
**Description**: The least _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
@@ -826,7 +828,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
### FIRST
```sql
-SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
+FIRST(expr)
```
**Description**: The first non-null value of a specific column in a table or STable
@@ -846,7 +848,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
### INTERP
```sql
-SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT});
+INTERP(expr)
```
**Description**: The value that matches the specified timestamp range is returned, if existing; or an interpolation value is returned.
@@ -861,15 +863,16 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RA
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
-- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
-- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
-- Interpolation is performed based on `FILL` parameter.
+- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1<=timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
+- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY`. Starting from timestamp1, one interpolation is performed for every time interval specified `EVERY` parameter.
+- Interpolation is performed based on `FILL` parameter.
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
+- Pseudo column `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.1.4).
### LAST
```sql
-SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
+LAST(expr)
```
**Description**: The last non-NULL value of a specific column in a table or STable
@@ -890,7 +893,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
### LAST_ROW
```sql
-SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
+LAST_ROW(expr)
```
**Description**: The last row of a table or STable
@@ -909,7 +912,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
### MAX
```sql
-SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
+MAX(expr)
```
**Description**: The maximum value of a specific column of a table or STable
@@ -924,7 +927,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
### MIN
```sql
-SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
+MIN(expr)
```
**Description**: The minimum value of a specific column in a table or STable
@@ -939,7 +942,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
### MODE
```sql
-SELECT MODE(field_name) FROM tb_name [WHERE clause];
+MODE(expr)
```
**Description**:The value which has the highest frequency of occurrence. NULL is returned if there are multiple values which have highest frequency of occurrence.
@@ -954,7 +957,7 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
### SAMPLE
```sql
-SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
+SAMPLE(expr, k)
```
**Description**: _k_ sampling values of a specific column. The applicable range of _k_ is [1,1000].
@@ -967,7 +970,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
**Applicable table types**: standard tables and supertables
-**More explanations**:
+**More explanations**:
This function cannot be used in expression calculation.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
@@ -976,7 +979,7 @@ This function cannot be used in expression calculation.
### TAIL
```sql
-SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
+TAIL(expr, k, offset_val)
```
**Description**: The next _k_ rows are returned after skipping the last `offset_val` rows, NULL values are not ignored. `offset_val` is optional parameter. When it's not specified, the last _k_ rows are returned. When `offset_val` is used, the effect is same as `order by ts desc LIMIT k OFFSET offset_val`.
@@ -993,7 +996,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
### TOP
```sql
-SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
+TOP(expr, k)
```
**Description**: The greatest _k_ values of a specific column in a table or STable. If a value has multiple occurrences in the column but counting all of them in will exceed the upper limit _k_, then a part of them will be returned randomly.
@@ -1013,7 +1016,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
### UNIQUE
```sql
-SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
+UNIQUE(expr)
```
**Description**: The values that occur the first time in the specified column. The effect is similar to `distinct` keyword, but it can also be used to match tags or timestamp. The first occurrence of a timestamp or tag is used.
@@ -1032,7 +1035,7 @@ TDengine includes extensions to standard SQL that are intended specifically for
### CSUM
```sql
-SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
+CSUM(expr)
```
**Description**: The cumulative sum of each row for a specific column. The number of output rows is same as that of the input rows.
@@ -1045,17 +1048,22 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
**Applicable table types**: standard tables and supertables
-**More explanations**:
-
+**More explanations**:
+
- Arithmetic operation can't be performed on the result of `csum` function
-- Can only be used with aggregate functions This function can be used with supertables and standard tables.
+- Can only be used with aggregate functions This function can be used with supertables and standard tables.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### DERIVATIVE
```sql
-SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
+DERIVATIVE(expr, time_inerval, ignore_negative)
+
+ignore_negative: {
+ 0
+ | 1
+}
```
**Description**: The derivative of a specific column. The time rage can be specified by parameter `time_interval`, the minimum allowed time range is 1 second (1s); the value of `ignore_negative` can be 0 or 1, 1 means negative values are ignored.
@@ -1066,15 +1074,20 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
**Applicable table types**: standard tables and supertables
-**More explanation**:
-
+**More explanation**:
+
- It can be used together with `PARTITION BY tbname` against a STable.
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from。
### DIFF
```sql
-SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
+DIFF(expr [, ignore_negative])
+
+ignore_negative: {
+ 0
+ | 1
+}
```
**Description**: The different of each row with its previous row for a specific column. `ignore_negative` can be specified as 0 or 1, the default value is 1 if it's not specified. `1` means negative values are ignored.
@@ -1085,7 +1098,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
**Applicable table types**: standard tables and supertables
-**More explanation**:
+**More explanation**:
- The number of result rows is the number of rows subtracted by one, no output for the first row
- It can be used together with a selected column. For example: select \_rowts, DIFF() from。
@@ -1094,7 +1107,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
### IRATE
```sql
-SELECT IRATE(field_name) FROM tb_name WHERE clause;
+IRATE(expr)
```
**Description**: instantaneous rate on a specific column. The last two samples in the specified time range are used to calculate instantaneous rate. If the last sample value is smaller, then only the last sample value is used instead of the difference between the last two sample values.
@@ -1109,7 +1122,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
### MAVG
```sql
-SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
+MAVG(expr, k)
```
**Description**: The moving average of continuous _k_ values of a specific column. If the number of input rows is less than _k_, nothing is returned. The applicable range of _k_ is [1,1000].
@@ -1122,9 +1135,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
**Applicable table types**: standard tables and supertables
-**More explanations**:
-
-- Arithmetic operation can't be performed on the result of `MAVG`.
+**More explanations**:
+
+- Arithmetic operation can't be performed on the result of `MAVG`.
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
@@ -1132,14 +1145,14 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
### STATECOUNT
```sql
-SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause];
+STATECOUNT(expr, oper, val)
```
**Description**: The number of continuous rows satisfying the specified conditions for a specific column. The result is shown as an extra column for each row. If the specified condition is evaluated as true, the number is increased by 1; otherwise the number is reset to -1. If the input value is NULL, then the corresponding row is skipped.
**Applicable parameter values**:
-- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive
+- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val : Numeric types
**Return value type**: Integer
@@ -1159,14 +1172,14 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
### STATEDURATION
```sql
-SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause];
+STATEDURATION(expr, oper, val, unit)
```
**Description**: The length of time range in which all rows satisfy the specified condition for a specific column. The result is shown as an extra column for each row. The length for the first row that satisfies the condition is 0. Next, if the condition is evaluated as true for a row, the time interval between current row and its previous row is added up to the time range; otherwise the time range length is reset to -1. If the value of the column is NULL, the corresponding row is skipped.
**Applicable parameter values**:
-- oper : Can be one of `LT` (lower than), `GT` (greater than), `LE` (lower than or equal to), `GE` (greater than or equal to), `NE` (not equal to), `EQ` (equal to), the value is case insensitive
+- oper : Can be one of `'LT'` (lower than), `'GT'` (greater than), `'LE'` (lower than or equal to), `'GE'` (greater than or equal to), `'NE'` (not equal to), `'EQ'` (equal to), the value is case insensitive, the value must be in quotes.
- val : Numeric types
- unit: The unit of time interval. Enter one of the following options: 1b (nanoseconds), 1u (microseconds), 1a (milliseconds), 1s (seconds), 1m (minutes), 1h (hours), 1d (days), or 1w (weeks) If you do not enter a unit of time, the precision of the current database is used by default.
@@ -1187,7 +1200,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
### TWA
```sql
-SELECT TWA(field_name) FROM tb_name WHERE clause;
+TWA(expr)
```
**Description**: Time weighted average on a specific column within a time range
diff --git a/docs/en/12-taos-sql/12-distinguished.md b/docs/en/12-taos-sql/12-distinguished.md
index 707089abe54fc12bb09de47c1c51af1a32b8cbcd..296c2376427b28ea0fa8404517af7215ffa7030f 100644
--- a/docs/en/12-taos-sql/12-distinguished.md
+++ b/docs/en/12-taos-sql/12-distinguished.md
@@ -5,11 +5,11 @@ title: Time-Series Extensions
As a purpose-built database for storing and processing time-series data, TDengine provides time-series-specific extensions to standard SQL.
-These extensions include tag-partitioned queries and windowed queries.
+These extensions include partitioned queries and windowed queries.
-## Tag-Partitioned Queries
+## Partitioned Queries
-When you query a supertable, you may need to partition the supertable by tag and perform additional operations on a specific partition. In this case, you can use the following SQL clause:
+When you query a supertable, you may need to partition the supertable by some dimensions and perform additional operations on a specific partition. In this case, you can use the following SQL clause:
```sql
PARTITION BY part_list
@@ -17,22 +17,24 @@ PARTITION BY part_list
part_list can be any scalar expression, such as a column, constant, scalar function, or a combination of the preceding items.
-A PARTITION BY clause with a tag is processed as follows:
+A PARTITION BY clause is processed as follows:
-- The PARTITION BY clause must occur after the WHERE clause and cannot be used with a JOIN clause.
-- The PARTITION BY clause partitions the super table by the specified tag group, and the specified calculation is performed on each partition. The calculation performed is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
-- You can use PARTITION BY together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
+- The PARTITION BY clause must occur after the WHERE clause
+- The PARTITION BY caluse partitions the data according to the specified dimentions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
+- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
```sql
select max(current) from meters partition by location interval(10m)
```
+The most common usage of PARTITION BY is partitioning the data in subtables by tags then perform computation when querying data in a supertable. More specifically, `PARTITION BY TBNAME` partitions the data of each subtable into a single timeline, and this method facilitates the statistical analysis in many use cases of processing timeseries data.
+
## Windowed Queries
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. The query syntax is as follows:
```sql
-SELECT function_list FROM tb_name
+SELECT select_list FROM tb_name
[WHERE where_condition]
[SESSION(ts_col, tol_val)]
[STATE_WINDOW(col)]
@@ -42,15 +44,9 @@ SELECT function_list FROM tb_name
The following restrictions apply:
-### Restricted Functions
-
-- Aggregate functions and select functions can be used in `function_list`, with each function having only one output. For example COUNT, AVG, SUM, STDDEV, LEASTSQUARES, PERCENTILE, MIN, MAX, FIRST, LAST. Functions having multiple outputs, such as DIFF or arithmetic operations can't be used.
-- `LAST_ROW` can't be used together with window aggregate.
-- Scalar functions, like CEIL/FLOOR, can't be used with window aggregate.
-
### Other Rules
-- The window clause must occur after the PARTITION BY clause and before the GROUP BY clause. It cannot be used with a GROUP BY clause.
+- The window clause must occur after the PARTITION BY clause. It cannot be used with a GROUP BY clause.
- SELECT clauses on windows can contain only the following expressions:
- Constants
- Aggregate functions
@@ -82,7 +78,7 @@ These pseudocolumns occur after the aggregation clause.
1. A huge volume of interpolation output may be returned using `FILL`, so it's recommended to specify the time range when using `FILL`. The maximum number of interpolation values that can be returned in a single query is 10,000,000.
2. The result set is in ascending order of timestamp when you aggregate by time window.
-3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `GROUP BY` is not used in the query, the result set will be returned in ascending order of timestamp; otherwise the result set is not exactly in the order of ascending timestamp in each group.
+3. If aggregate by window is used on STable, the aggregate function is performed on all the rows matching the filter conditions. If `PARTITION BY` is not used in the query, the result set will be returned in strict ascending order of timestamp; otherwise the result set will be returned in the order of ascending timestamp in each group.
:::
@@ -112,9 +108,9 @@ When using time windows, note the following:
Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
- The result set is in ascending order of timestamp when you aggregate by time window.
-### Status Window
+### State Window
-In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two status windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12]. Status window is not applicable to STable for now.
+In case of using integer, bool, or string to represent the status of a device at any given moment, continuous rows with the same status belong to a status window. Once the status changes, the status window closes. As shown in the following figure, there are two state windows according to status, [2019-04-28 14:22:07,2019-04-28 14:22:10] and [2019-04-28 14:22:11,2019-04-28 14:22:12].

@@ -124,13 +120,19 @@ In case of using integer, bool, or string to represent the status of a device at
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
```
+Only care about the information of the status window when the status is 2. For example:
+
+```
+SELECT * FROM (SELECT COUNT(*) AS cnt, FIRST(ts) AS fst, status FROM temp_tb_1 STATE_WINDOW(status)) t WHERE status = 2;
+```
+
### Session Window
The primary key, i.e. timestamp, is used to determine which session window a row belongs to. As shown in the figure below, if the limit of time interval for the session window is specified as 12 seconds, then the 6 rows in the figure constitutes 2 time windows, [2019-04-28 14:22:10,2019-04-28 14:22:30] and [2019-04-28 14:23:10,2019-04-28 14:23:30] because the time difference between 2019-04-28 14:22:30 and 2019-04-28 14:23:10 is 40 seconds, which exceeds the time interval limit of 12 seconds.

-If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically. Session window is not supported on STable for now.
+If the time interval between two continuous rows are within the time interval specified by `tol_value` they belong to the same session window; otherwise a new session window is started automatically.
```
diff --git a/docs/en/12-taos-sql/14-stream.md b/docs/en/12-taos-sql/14-stream.md
index fcd78765104af17285b43749969821ceb98da33b..17e4e4d1b0da6d0461c9ab478a9430855379fb12 100644
--- a/docs/en/12-taos-sql/14-stream.md
+++ b/docs/en/12-taos-sql/14-stream.md
@@ -44,13 +44,13 @@ For example, the following SQL statement creates a stream and automatically crea
```sql
CREATE STREAM avg_vol_s INTO avg_vol AS
-SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
+SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
```
## Delete a Stream
```sql
-DROP STREAM [IF NOT EXISTS] stream_name
+DROP STREAM [IF EXISTS] stream_name
```
This statement deletes the stream processing service only. The data generated by the stream is retained.
diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md
index 0486ea30940cdcb5d034bb730d12c0c120a59cd1..678c38a22ea763187cd0c87dceae3bf6ca03957c 100644
--- a/docs/en/12-taos-sql/19-limit.md
+++ b/docs/en/12-taos-sql/19-limit.md
@@ -30,7 +30,7 @@ The following characters cannot occur in a password: single quotation marks ('),
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- The maximum length of a tag name is 64 bytes
- Maximum number of tags is 128. There must be at least 1 tag. The total length of tag values cannot exceed 16 KB.
-- Maximum length of single SQL statement is 1 MB (1048576 bytes). It can be configured in the parameter `maxSQLLength` in the client side, the applicable range is [65480, 1048576].
+- Maximum length of single SQL statement is 1 MB (1048576 bytes).
- At most 4096 columns can be returned by `SELECT`. Functions in the query statement constitute columns. An error is returned if the limit is exceeded.
- Maximum numbers of databases, STables, tables are dependent only on the system resources.
- The number of replicas can only be 1 or 3.
diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md
index 6f166c8034382b0613845d18470556622106e673..4b479b866b77e1e354d20376ccb869755af76d00 100644
--- a/docs/en/12-taos-sql/20-keywords.md
+++ b/docs/en/12-taos-sql/20-keywords.md
@@ -5,7 +5,9 @@ title: Reserved Keywords
## Keyword List
-There are about 200 keywords reserved by TDengine, they can't be used as the name of database, STable or table with either upper case, lower case or mixed case. The following list shows all reserved keywords:
+There are more than 200 keywords reserved by TDengine, they can't be used as the name of database, table, STable, subtable, column or tag with either upper case, lower case or mixed case. If you need to use these keywords, use the symbol `` ` `` to enclose the keywords, e.g. \`ADD\`.
+
+The following list shows all reserved keywords:
### A
@@ -14,15 +16,20 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
- ACCOUNTS
- ADD
- AFTER
+- AGGREGATE
- ALL
- ALTER
+- ANALYZE
- AND
+- APPS
- AS
- ASC
+- AT_ONCE
- ATTACH
### B
+- BALANCE
- BEFORE
- BEGIN
- BETWEEN
@@ -32,19 +39,27 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
- BITNOT
- BITOR
- BLOCKS
+- BNODE
+- BNODES
- BOOL
+- BUFFER
+- BUFSIZE
- BY
### C
- CACHE
-- CACHELAST
+- CACHEMODEL
+- CACHESIZE
- CASCADE
+- CAST
- CHANGE
+- CLIENT_VERSION
- CLUSTER
- COLON
- COLUMN
- COMMA
+- COMMENT
- COMP
- COMPACT
- CONCAT
@@ -52,15 +67,18 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
- CONNECTION
- CONNECTIONS
- CONNS
+- CONSUMER
+- CONSUMERS
+- CONTAINS
- COPY
+- COUNT
- CREATE
-- CTIME
+- CURRENT_USER
### D
- DATABASE
- DATABASES
-- DAYS
- DBS
- DEFERRED
- DELETE
@@ -69,18 +87,23 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
- DESCRIBE
- DETACH
- DISTINCT
+- DISTRIBUTED
- DIVIDE
- DNODE
- DNODES
- DOT
- DOUBLE
- DROP
+- DURATION
### E
+- EACH
+- ENABLE
- END
-- EQ
+- EVERY
- EXISTS
+- EXPIRED
- EXPLAIN
### F
@@ -88,18 +111,20 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
- FAIL
- FILE
- FILL
+- FIRST
- FLOAT
+- FLUSH
- FOR
- FROM
-- FSYNC
+- FUNCTION
+- FUNCTIONS
### G
-- GE
- GLOB
+- GRANT
- GRANTS
- GROUP
-- GT
### H
@@ -110,15 +135,18 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
- ID
- IF
- IGNORE
-- IMMEDIA
+- IMMEDIATE
- IMPORT
- IN
-- INITIAL
+- INDEX
+- INDEXES
+- INITIALLY
+- INNER
- INSERT
- INSTEAD
- INT
- INTEGER
-- INTERVA
+- INTERVAL
- INTO
- IS
- ISNULL
@@ -126,6 +154,7 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
### J
- JOIN
+- JSON
### K
@@ -135,46 +164,57 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
### L
-- LE
+- LAST
+- LAST_ROW
+- LICENCES
- LIKE
- LIMIT
- LINEAR
- LOCAL
-- LP
-- LSHIFT
-- LT
### M
- MATCH
+- MAX_DELAY
- MAXROWS
+- MERGE
+- META
- MINROWS
- MINUS
+- MNODE
- MNODES
- MODIFY
- MODULES
### N
-- NE
+- NCHAR
+- NEXT
+- NMATCH
- NONE
- NOT
- NOTNULL
- NOW
- NULL
+- NULLS
### O
- OF
- OFFSET
+- ON
- OR
- ORDER
+- OUTPUTTYPE
### P
-- PARTITION
+- PAGES
+- PAGESIZE
+- PARTITIONS
- PASS
- PLUS
+- PORT
- PPS
- PRECISION
- PREV
@@ -182,47 +222,63 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
### Q
+- QNODE
+- QNODES
- QTIME
-- QUERIE
+- QUERIES
- QUERY
-- QUORUM
### R
- RAISE
-- REM
+- RANGE
+- RATIO
+- READ
+- REDISTRIBUTE
+- RENAME
- REPLACE
- REPLICA
- RESET
-- RESTRIC
+- RESTRICT
+- RETENTIONS
+- REVOKE
+- ROLLUP
- ROW
-- RP
-- RSHIFT
### S
+- SCHEMALESS
- SCORES
- SELECT
- SEMI
+- SERVER_STATUS
+- SERVER_VERSION
- SESSION
- SET
- SHOW
-- SLASH
+- SINGLE_STABLE
- SLIDING
- SLIMIT
-- SMALLIN
+- SMA
+- SMALLINT
+- SNODE
+- SNODES
- SOFFSET
-- STable
-- STableS
+- SPLIT
+- STABLE
+- STABLES
- STAR
- STATE
-- STATEMEN
-- STATE_WI
+- STATE_WINDOW
+- STATEMENT
- STORAGE
- STREAM
- STREAMS
+- STRICT
- STRING
+- SUBSCRIPTIONS
- SYNCDB
+- SYSINFO
### T
@@ -233,19 +289,24 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
- TBNAME
- TIMES
- TIMESTAMP
+- TIMEZONE
- TINYINT
+- TO
+- TODAY
- TOPIC
- TOPICS
+- TRANSACTION
+- TRANSACTIONS
- TRIGGER
+- TRIM
- TSERIES
+- TTL
### U
-- UMINUS
- UNION
- UNSIGNED
- UPDATE
-- UPLUS
- USE
- USER
- USERS
@@ -253,9 +314,13 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
### V
+- VALUE
- VALUES
+- VARCHAR
- VARIABLE
- VARIABLES
+- VERBOSE
+- VGROUP
- VGROUPS
- VIEW
- VNODES
@@ -263,14 +328,26 @@ There are about 200 keywords reserved by TDengine, they can't be used as the nam
### W
- WAL
+- WAL_FSYNC_PERIOD
+- WAL_LEVEL
+- WAL_RETENTION_PERIOD
+- WAL_RETENTION_SIZE
+- WAL_ROLL_PERIOD
+- WAL_SEGMENT_SIZE
+- WATERMARK
- WHERE
+- WINDOW_CLOSE
+- WITH
+- WRITE
### \_
- \_C0
-- \_QSTART
-- \_QSTOP
+- \_IROWTS
- \_QDURATION
-- \_WSTART
-- \_WSTOP
+- \_QEND
+- \_QSTART
+- \_ROWTS
- \_WDURATION
+- \_WEND
+- \_WSTART
diff --git a/docs/en/12-taos-sql/22-meta.md b/docs/en/12-taos-sql/22-meta.md
index 796b25dcb0a425aa0ffd76a6e9b8de45ba069357..0a597779466c3552c0653227e86831668a32fc4f 100644
--- a/docs/en/12-taos-sql/22-meta.md
+++ b/docs/en/12-taos-sql/22-meta.md
@@ -11,7 +11,15 @@ TDengine includes a built-in database named `INFORMATION_SCHEMA` to provide acce
4. Future versions of TDengine can add new columns to INFORMATION_SCHEMA tables without affecting existing business systems.
5. It is easier for users coming from other database management systems. For example, Oracle users can query data dictionary tables.
-Note: SHOW statements are still supported for the convenience of existing users.
+:::info
+
+- SHOW statements are still supported for the convenience of existing users.
+- Some columns in the system table may be keywords, and you need to use the escape character '\`' when querying, for example, to query the VGROUPS in the database `test`:
+```sql
+ select `vgroups` from ins_databases where name = 'test';
+```
+
+:::
This document introduces the tables of INFORMATION_SCHEMA and their structure.
@@ -102,7 +110,11 @@ Provides information about user-created databases. Similar to SHOW DATABASES.
| 24 | wal_retention_period | INT | WAL retention period |
| 25 | wal_retention_size | INT | Maximum WAL size |
| 26 | wal_roll_period | INT | WAL rotation period |
-| 27 | wal_segment_size | WAL file size |
+| 27 | wal_segment_size | BIGINT | WAL file size |
+| 28 | stt_trigger | SMALLINT | The threshold for number of files to trigger file merging |
+| 29 | table_prefix | SMALLINT | The prefix length in the table name that is ignored when distributing table to vnode based on table name |
+| 30 | table_suffix | SMALLINT | The suffix length in the table name that is ignored when distributing table to vnode based on table name |
+| 31 | tsdb_pagesize | INT | The page size for internal storage engine, its unit is KB |
## INS_FUNCTIONS
@@ -245,3 +257,35 @@ Provides dnode configuration information.
| 1 | dnode_id | INT | Dnode ID |
| 2 | name | BINARY(32) | Parameter |
| 3 | value | BINARY(64) | Value |
+
+## INS_TOPICS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :---------: | ------------ | ------------------------------ |
+| 1 | topic_name | BINARY(192) | Topic name |
+| 2 | db_name | BINARY(64) | Database for the topic |
+| 3 | create_time | TIMESTAMP | Creation time |
+| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
+
+## INS_SUBSCRIPTIONS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :------------: | ------------ | ------------------------ |
+| 1 | topic_name | BINARY(204) | Subscribed topic |
+| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
+| 3 | vgroup_id | INT | Vgroup ID for the consumer |
+| 4 | consumer_id | BIGINT | Consumer ID |
+
+## INS_STREAMS
+
+| # | **Column** | **Data Type** | **Description** |
+| --- | :----------: | ------------ | --------------------------------------- |
+| 1 | stream_name | BINARY(64) | Stream name |
+| 2 | create_time | TIMESTAMP | Creation time |
+| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
+| 4 | status | BIANRY(20) | Current status |
+| 5 | source_db | BINARY(64) | Source database |
+| 6 | target_db | BIANRY(64) | Target database |
+| 7 | target_table | BINARY(192) | Target table |
+| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
+| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
diff --git a/docs/en/12-taos-sql/23-perf.md b/docs/en/12-taos-sql/23-perf.md
index 10a93380220d357261914066d2fe036b8470e224..29cf3af6abfbbc06e42ae99c78f35f33a3c7c30a 100644
--- a/docs/en/12-taos-sql/23-perf.md
+++ b/docs/en/12-taos-sql/23-perf.md
@@ -61,15 +61,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 12 | sub_status | BINARY(1000) | Subquery status |
| 13 | sql | BINARY(1024) | SQL statement |
-## PERF_TOPICS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :---------: | ------------ | ------------------------------ |
-| 1 | topic_name | BINARY(192) | Topic name |
-| 2 | db_name | BINARY(64) | Database for the topic |
-| 3 | create_time | TIMESTAMP | Creation time |
-| 4 | sql | BINARY(1024) | SQL statement used to create the topic |
-
## PERF_CONSUMERS
| # | **Column** | **Data Type** | **Description** |
@@ -83,15 +74,6 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 7 | subscribe_time | TIMESTAMP | Time of first subscription |
| 8 | rebalance_time | TIMESTAMP | Time of first rebalance triggering |
-## PERF_SUBSCRIPTIONS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :------------: | ------------ | ------------------------ |
-| 1 | topic_name | BINARY(204) | Subscribed topic |
-| 2 | consumer_group | BINARY(193) | Subscribed consumer group |
-| 3 | vgroup_id | INT | Vgroup ID for the consumer |
-| 4 | consumer_id | BIGINT | Consumer ID |
-
## PERF_TRANS
| # | **Column** | **Data Type** | **Description** |
@@ -113,17 +95,3 @@ Provides information about SQL queries currently running. Similar to SHOW QUERIE
| 2 | create_time | TIMESTAMP | Creation time |
| 3 | stable_name | BINARY(192) | Supertable name |
| 4 | vgroup_id | INT | Dedicated vgroup name |
-
-## PERF_STREAMS
-
-| # | **Column** | **Data Type** | **Description** |
-| --- | :----------: | ------------ | --------------------------------------- |
-| 1 | stream_name | BINARY(64) | Stream name |
-| 2 | create_time | TIMESTAMP | Creation time |
-| 3 | sql | BINARY(1024) | SQL statement used to create the stream |
-| 4 | status | BIANRY(20) | Current status |
-| 5 | source_db | BINARY(64) | Source database |
-| 6 | target_db | BIANRY(64) | Target database |
-| 7 | target_table | BINARY(192) | Target table |
-| 8 | watermark | BIGINT | Watermark (see stream processing documentation) |
-| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation) |
diff --git a/docs/en/12-taos-sql/24-show.md b/docs/en/12-taos-sql/24-show.md
index 6b56161322ff65d1af4eb9e4b7d7e7e88e569446..5f3bef3546ea05745070268e1f6add25add4773b 100644
--- a/docs/en/12-taos-sql/24-show.md
+++ b/docs/en/12-taos-sql/24-show.md
@@ -5,16 +5,6 @@ title: SHOW Statement for Metadata
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
-## SHOW ACCOUNTS
-
-```sql
-SHOW ACCOUNTS;
-```
-
-Shows information about tenants on the system.
-
-Note: TDengine Enterprise Edition only.
-
## SHOW APPS
```sql
@@ -194,7 +184,7 @@ Shows information about streams in the system.
SHOW SUBSCRIPTIONS;
```
-Shows all subscriptions in the current database.
+Shows all subscriptions in the system.
## SHOW TABLES
diff --git a/docs/en/12-taos-sql/25-grant.md b/docs/en/12-taos-sql/25-grant.md
index 37438ee780cac17b463e0dbb1b5385d0f3965de7..f895567c621f123778c8a711a68357a889ca0098 100644
--- a/docs/en/12-taos-sql/25-grant.md
+++ b/docs/en/12-taos-sql/25-grant.md
@@ -1,6 +1,7 @@
---
-sidebar_label: Permissions Management
-title: Permissions Management
+sidebar_label: Access Control
+title: User and Access Control
+description: Manage user and user's permission
---
This document describes how to manage permissions in TDengine.
@@ -8,15 +9,54 @@ This document describes how to manage permissions in TDengine.
## Create a User
```sql
-CREATE USER use_name PASS 'password';
+CREATE USER user_name PASS 'password' [SYSINFO {1|0}];
```
This statement creates a user account.
-The maximum length of use_name is 23 bytes.
+The maximum length of user_name is 23 bytes.
The maximum length of password is 128 bytes. The password can include leters, digits, and special characters excluding single quotation marks, double quotation marks, backticks, backslashes, and spaces. The password cannot be empty.
+`SYSINFO` indicates whether the user is allowed to view system information. `1` means allowed, `0` means not allowed. System information includes server configuration, dnode, vnode, storage. The default value is `1`.
+
+For example, we can create a user whose password is `123456` and is able to view system information.
+
+```sql
+taos> create user test pass '123456' sysinfo 1;
+Query OK, 0 of 0 rows affected (0.001254s)
+```
+
+## View Users
+
+To show the users in the system, please use
+
+```sql
+SHOW USERS;
+```
+
+This is an example:
+
+```sql
+taos> show users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001657s)
+```
+
+Alternatively, you can get the user information by querying a built-in table, INFORMATION_SCHEMA.INS_USERS. For example:
+
+```sql
+taos> select * from information_schema.ins_users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001953s)
+```
+
## Delete a User
```sql
@@ -39,6 +79,13 @@ alter_user_clause: {
- ENABLE: Specify whether the user is enabled or disabled. 1 indicates enabled and 0 indicates disabled.
- SYSINFO: Specify whether the user can query system information. 1 indicates that the user can query system information and 0 indicates that the user cannot query system information.
+For example, you can use below command to disable user `test`:
+
+```sql
+taos> alter user test enable 0;
+Query OK, 0 of 0 rows affected (0.001160s)
+```
+
## Grant Permissions
@@ -61,7 +108,7 @@ priv_level : {
}
```
-Grant permissions to a user.
+Grant permissions to a user, this feature is only available in enterprise edition.
Permissions are granted on the database level. You can grant read or write permissions.
@@ -91,4 +138,4 @@ priv_level : {
```
-Revoke permissions from a user.
+Revoke permissions from a user, this feature is only available in enterprise edition.
diff --git a/docs/en/12-taos-sql/29-changes.md b/docs/en/12-taos-sql/29-changes.md
index 8532eeac5d599ca2739393c9e38eec52631e407a..78b6d5fc05b9b03e1e8b3af268bc357dfaa401bc 100644
--- a/docs/en/12-taos-sql/29-changes.md
+++ b/docs/en/12-taos-sql/29-changes.md
@@ -11,12 +11,13 @@ description: "This document explains how TDengine SQL has changed in version 3.0
| 1 | VARCHAR | Added | Alias of BINARY.
| 2 | TIMESTAMP literal | Added | TIMESTAMP 'timestamp format' syntax now supported.
| 3 | _ROWTS pseudocolumn | Added | Indicates the primary key. Alias of _C0.
-| 4 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions
-| 5 | PERFORMANCE_SCHEMA | Added | Database for system performance information.
-| 6 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated.
-| 7 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
-| 8 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
-| 9 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
+| 4 | _IROWTS pseudocolumn | Added | Used to retrieve timestamps with INTERP function.
+| 5 | INFORMATION_SCHEMA | Added | Database for system metadata containing all schema definitions
+| 6 | PERFORMANCE_SCHEMA | Added | Database for system performance information.
+| 7 | Connection queries | Deprecated | Connection queries are no longer supported. The syntax and interfaces are deprecated.
+| 8 | Mixed operations | Enhanced | Mixing scalar and vector operations in queries has been enhanced and is supported in all SELECT clauses.
+| 9 | Tag operations | Added | Tag columns can be used in queries and clauses like data columns.
+| 10 | Timeline clauses and time functions in supertables | Enhanced | When PARTITION BY is not used, data in supertables is merged into a single timeline.
## SQL Syntax
diff --git a/docs/en/12-taos-sql/index.md b/docs/en/12-taos-sql/index.md
index e243cd23186a6b9286d3297e467567c26c316112..a5ffc9dc8dce158eccc0fa0519f09ba346710c31 100644
--- a/docs/en/12-taos-sql/index.md
+++ b/docs/en/12-taos-sql/index.md
@@ -1,6 +1,6 @@
---
title: TDengine SQL
-description: "The syntax supported by TDengine SQL "
+description: 'The syntax supported by TDengine SQL '
---
This section explains the syntax of SQL to perform operations on databases, tables and STables, insert data, select data and use functions. We also provide some tips that can be used in TDengine SQL. If you have previous experience with SQL this section will be fairly easy to understand. If you do not have previous experience with SQL, you'll come to appreciate the simplicity and power of SQL. TDengine SQL has been enhanced in version 3.0, and the query engine has been rearchitected. For information about how TDengine SQL has changed, see [Changes in TDengine 3.0](../taos-sql/changes).
@@ -15,7 +15,7 @@ Syntax Specifications used in this chapter:
- | means one of a few options, excluding | itself.
- … means the item prior to it can be repeated multiple times.
-To better demonstrate the syntax, usage and rules of TAOS SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
+To better demonstrate the syntax, usage and rules of TDengine SQL, hereinafter it's assumed that there is a data set of data from electric meters. Each meter collects 3 data measurements: current, voltage, phase. The data model is shown below:
```
taos> DESCRIBE meters;
diff --git a/docs/en/13-operation/01-pkg-install.md b/docs/en/13-operation/01-pkg-install.md
index b6cc0582bcfe66890cecb0572b6bcf30cf1af70c..d7713b943f5fe8fbd5e685b8ba03ff8cc8ed4e53 100644
--- a/docs/en/13-operation/01-pkg-install.md
+++ b/docs/en/13-operation/01-pkg-install.md
@@ -1,12 +1,12 @@
---
-title: Install & Uninstall
+title: Install and Uninstall
description: Install, Uninstall, Start, Stop and Upgrade
---
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-TDengine community version provides deb and rpm packages for users to choose from, based on their system environment. The deb package supports Debian, Ubuntu and derivative systems. The rpm package supports CentOS, RHEL, SUSE and derivative systems. Furthermore, a tar.gz package is provided for TDengine Enterprise customers.
+This document gives more information about installing, uninstalling, and upgrading TDengine.
## Install
@@ -35,12 +35,28 @@ TDengine is removed successfully!
```
+Apt-get package of taosTools can be uninstalled as below:
+
+```
+$ sudo apt remove taostools
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ taostools
+0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n]
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
Deb package of TDengine can be uninstalled as below:
-```bash
+```
$ sudo dpkg -r tdengine
(Reading database ... 137504 files and directories currently installed.)
Removing tdengine (3.0.0.0) ...
@@ -48,6 +64,14 @@ TDengine is removed successfully!
```
+Deb package of taosTools can be uninstalled as below:
+
+```
+$ sudo dpkg -r taostools
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
@@ -59,6 +83,13 @@ $ sudo rpm -e tdengine
TDengine is removed successfully!
```
+RPM package of taosTools can be uninstalled as below:
+
+```
+sudo rpm -e taostools
+taosToole is removed successfully!
+```
+
@@ -67,115 +98,69 @@ tar.gz package of TDengine can be uninstalled as below:
```
$ rmtaos
-Nginx for TDengine is running, stopping it...
TDengine is removed successfully!
-
-taosKeeper is removed successfully!
-```
-
-
-
-
-:::note
-
-- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine.
-- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
-
-```bash
- $ sudo rm -f /var/lib/dpkg/info/tdengine*
```
-- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information. You can then reinstall if needed.
+tar.gz package of taosTools can be uninstalled as below:
-```bash
- $ sudo rpm -e --noscripts tdengine
```
+$ rmtaostools
+Start to uninstall taos tools ...
-:::
-
-## Installation Directory
-
-TDengine is installed at /usr/local/taos if successful.
-
-```bash
-$ cd /usr/local/taos
-$ ll
-$ ll
-total 28
-drwxr-xr-x 7 root root 4096 Feb 22 09:34 ./
-drwxr-xr-x 12 root root 4096 Feb 22 09:34 ../
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 bin/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 cfg/
-lrwxrwxrwx 1 root root 13 Feb 22 09:34 data -> /var/lib/taos/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 driver/
-drwxr-xr-x 10 root root 4096 Feb 22 09:34 examples/
-drwxr-xr-x 2 root root 4096 Feb 22 09:34 include/
-lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
+taos tools is uninstalled successfully!
```
-During the installation process:
+
+
+Run C:\TDengine\unins000.exe to uninstall TDengine on a Windows system.
+
+
-- Configuration directory, data directory, and log directory are created automatically if they don't exist
-- The default configuration file is located at /etc/taos/taos.cfg, which is a copy of /usr/local/taos/cfg/taos.cfg
-- The default data directory is /var/lib/taos, which is a soft link to /usr/local/taos/data
-- The default log directory is /var/log/taos, which is a soft link to /usr/local/taos/log
-- The executables at /usr/local/taos/bin are linked to /usr/bin
-- The DLL files at /usr/local/taos/driver are linked to /usr/lib
-- The header files at /usr/local/taos/include are linked to /usr/include
+:::info
-:::note
+- We strongly recommend not to use multiple kinds of installation packages on a single host TDengine. The packages may affect each other and cause errors.
-- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
-- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
+- After deb package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
-## Start and Stop
+ ```
+ $ sudo rm -f /var/lib/dpkg/info/tdengine*
+ ```
-Linux system services `systemd`, `systemctl` or `service` are used to start, stop and restart TDengine. The server process of TDengine is `taosd`, which is started automatically after the Linux system is started. System operators can use `systemd`, `systemctl` or `service` to start, stop or restart TDengine server.
+You can then reinstall if needed.
-For example, if using `systemctl` , the commands to start, stop, restart and check TDengine server are below:
+- After rpm package is installed, if the installation directory is removed manually, uninstall or reinstall will not work. This issue can be resolved by using the command below which cleans up TDengine package information.
-- Start server:`systemctl start taosd`
+ ```
+ $ sudo rpm -e --noscripts tdengine
+ ```
-- Stop server:`systemctl stop taosd`
+You can then reinstall if needed.
-- Restart server:`systemctl restart taosd`
-
-- Check server status:`systemctl status taosd`
-
-Another component named as `taosAdapter` is to provide HTTP service for TDengine, it should be started and stopped using `systemctl`.
+:::
-If the server process is OK, the output of `systemctl status` is like below:
+Uninstalling and Modifying Files
-```
-Active: active (running)
-```
+- When TDengine is uninstalled, the configuration /etc/taos/taos.cfg, data directory /var/lib/taos, log directory /var/log/taos are kept. They can be deleted manually with caution, because data can't be recovered. Please follow data integrity, security, backup or relevant SOPs before deleting any data.
-Otherwise, the output is as below:
+- When reinstalling TDengine, if the default configuration file /etc/taos/taos.cfg exists, it will be kept and the configuration file in the installation package will be renamed to taos.cfg.orig and stored at /usr/local/taos/cfg to be used as configuration sample. Otherwise the configuration file in the installation package will be installed to /etc/taos/taos.cfg and used.
-```
-Active: inactive (dead)
-```
## Upgrade
-
There are two aspects in upgrade operation: upgrade installation package and upgrade a running server.
To upgrade a package, follow the steps mentioned previously to first uninstall the old version then install the new version.
Upgrading a running server is much more complex. First please check the version number of the old version and the new version. The version number of TDengine consists of 4 sections, only if the first 3 sections match can the old version be upgraded to the new version. The steps of upgrading a running server are as below:
-
- Stop inserting data
- Make sure all data is persisted to disk
-- Make some simple queries (Such as total rows in stables, tables and so on. Note down the values. Follow best practices and relevant SOPs.)
- Stop the cluster of TDengine
- Uninstall old version and install new version
- Start the cluster of TDengine
-- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
+- Execute simple queries, such as the ones executed prior to installing the new package, to make sure there is no data loss
- Run some simple data insertion statements to make sure the cluster works well
- Restore business services
:::warning
-
TDengine doesn't guarantee any lower version is compatible with the data generated by a higher version, so it's never recommended to downgrade the version.
:::
diff --git a/docs/en/13-operation/02-planning.mdx b/docs/en/13-operation/02-planning.mdx
index c1baf92dbfa8d93f83174c05c2ea631d1a469739..2dffa7bb8747e21e4754740208eafed65d341217 100644
--- a/docs/en/13-operation/02-planning.mdx
+++ b/docs/en/13-operation/02-planning.mdx
@@ -1,40 +1,32 @@
---
+sidebar_label: Resource Planning
title: Resource Planning
---
It is important to plan computing and storage resources if using TDengine to build an IoT, time-series or Big Data platform. How to plan the CPU, memory and disk resources required, will be described in this chapter.
-## Memory Requirement of Server Side
+## Server Memory Requirements
-By default, the number of vgroups created for each database is the same as the number of CPU cores. This can be configured by the parameter `maxVgroupsPerDb`. Each vnode in a vgroup stores one replica. Each vnode consumes a fixed amount of memory, i.e. `blocks` \* `cache`. In addition, some memory is required for tag values associated with each table. A fixed amount of memory is required for each cluster. So, the memory required for each DB can be calculated using the formula below:
+Each database creates a fixed number of vgroups. This number is 2 by default and can be configured with the `vgroups` parameter. The number of replicas can be controlled with the `replica` parameter. Each replica requires one vnode per vgroup. Altogether, the memory required by each database depends on the following configuration options:
-```
-Database Memory Size = maxVgroupsPerDb * replica * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB)
-```
+- vgroups
+- replica
+- buffer
+- pages
+- pagesize
+- cachesize
-For example, assuming the default value of `maxVgroupPerDB` is 64, the default value of `cache` is 16M, the default value of `blocks` is 6, there are 100,000 tables in a DB, the replica number is 1, total length of tag values is 256 bytes, the total memory required for this DB is: 64 \* 1 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 6792M.
+For more information, see [Database](../../taos-sql/database).
-In the real operation of TDengine, we are more concerned about the memory used by each TDengine server process `taosd`.
+The memory required by a database is therefore greater than or equal to:
```
- taosd_memory = vnode_memory + mnode_memory + query_memory
+vgroups * replica * (buffer + pages * pagesize + cachesize)
```
-In the above formula:
-
-1. "vnode_memory" of a `taosd` process is the memory used by all vnodes hosted by this `taosd` process. It can be roughly calculated by firstly adding up the total memory of all DBs whose memory usage can be derived according to the formula for Database Memory Size, mentioned above, then dividing by number of dnodes and multiplying the number of replicas.
-
-```
- vnode_memory = (sum(Database Memory Size) / number_of_dnodes) * replica
-```
-
-2. "mnode_memory" of a `taosd` process is the memory consumed by a mnode. If there is one (and only one) mnode hosted in a `taosd` process, the memory consumed by "mnode" is "0.2KB \* the total number of tables in the cluster".
-
-3. "query_memory" is the memory used when processing query requests. Each ongoing query consumes at least "0.2 KB \* total number of involved tables".
-
-Please note that the above formulas can only be used to estimate the minimum memory requirement, instead of maximum memory usage. In a real production environment, it's better to reserve some redundance beyond the estimated minimum memory requirement. If memory is abundant, it's suggested to increase the value of parameter `blocks` to speed up data insertion and data query.
+However, note that this requirement is spread over all dnodes in the cluster, not on a single physical machine. The physical servers that run dnodes meet the requirement together. If a cluster has multiple databases, the memory required increases accordingly. In complex environments where dnodes were added after initial deployment in response to increasing resource requirements, load may not be balanced among the original dnodes and newer dnodes. In this situation, the actual status of your dnodes is more important than theoretical calculations.
-## Memory Requirement of Client Side
+## Client Memory Requirements
For the client programs using TDengine client driver `taosc` to connect to the server side there is a memory requirement as well.
@@ -56,10 +48,10 @@ So, at least 3GB needs to be reserved for such a client.
The CPU resources required depend on two aspects:
-- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
+- **Data Insertion** Each dnode of TDengine can process at least 10,000 insertion requests in one second, while each insertion request can have multiple rows. The difference in computing resource consumed, between inserting 1 row at a time, and inserting 10 rows at a time is very small. So, the more the number of rows that can be inserted one time, the higher the efficiency. If each insert request contains more than 200 records, a single core can process more than 1 million records per second. Inserting in batch also imposes requirements on the client side which needs to cache rows to insert in batch once the number of cached rows reaches a threshold.
- **Data Query** High efficiency query is provided in TDengine, but it's hard to estimate the CPU resource required because the queries used in different use cases and the frequency of queries vary significantly. It can only be verified with the query statements, query frequency, data size to be queried, and other requirements provided by users.
-In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. In real operation, it's suggested to control CPU usage below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
+In short, the CPU resource required for data insertion can be estimated but it's hard to do so for query use cases. If possible, ensure that CPU usage remains below 50%. If this threshold is exceeded, it's a reminder for system operator to add more nodes in the cluster to expand resources.
## Disk Requirement
@@ -77,6 +69,6 @@ To increase performance, multiple disks can be setup for parallel data reading o
## Number of Hosts
-A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
+A host can be either physical or virtual. The total memory, total CPU, total disk required can be estimated according to the formulae mentioned previously. If the number of data replicas is not 1, the required resources are multiplied by the number of replicas.
-**Quick Estimation for CPU, Memory and Disk** Please refer to [Resource Estimate](https://www.taosdata.com/config/config.html).
+Then, according to the system resources that a single host can provide, assuming all hosts have the same resources, the number of hosts can be derived easily.
diff --git a/docs/en/13-operation/03-tolerance.md b/docs/en/13-operation/03-tolerance.md
index d4d48d7fcdc2c990b6ea0821e2347c70a809ed79..21a5a902822d7b85f555114a112686d4e35c64aa 100644
--- a/docs/en/13-operation/03-tolerance.md
+++ b/docs/en/13-operation/03-tolerance.md
@@ -1,6 +1,5 @@
---
-sidebar_label: Fault Tolerance
-title: Fault Tolerance & Disaster Recovery
+title: Fault Tolerance and Disaster Recovery
---
## Fault Tolerance
@@ -11,22 +10,21 @@ When a data block is received by TDengine, the original data block is first writ
There are 2 configuration parameters related to WAL:
-- walLevel:
- - 0:wal is disabled
- - 1:wal is enabled without fsync
- - 2:wal is enabled with fsync
-- fsync:This parameter is only valid when walLevel is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
+- wal_level: Specifies the WAL level. 1 indicates that WAL is enabled but fsync is disabled. 2 indicates that WAL and fsync are both enabled. The default value is 1.
+- wal_fsync_period: This parameter is only valid when wal_level is set to 2. It specifies the interval, in milliseconds, of invoking fsync. If set to 0, it means fsync is invoked immediately once WAL is written.
-To achieve absolutely no data loss, walLevel should be set to 2 and fsync should be set to 1. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when fsync is set to 3,000 milliseconds.
+To achieve absolutely no data loss, set wal_level to 2 and wal_fsync_period to 0. There is a performance penalty to the data ingestion rate. However, if the concurrent data insertion threads on the client side can reach a big enough number, for example 50, the data ingestion performance will be still good enough. Our verification shows that the drop is only 30% when wal_fsync_period is set to 3000 milliseconds.
## Disaster Recovery
-TDengine uses replication to provide high availability and disaster recovery capability.
+TDengine uses replication to provide high availability.
-A TDengine cluster is managed by mnode. To ensure the high availability of mnode, multiple replicas can be configured by the system parameter `numOfMnodes`. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
+A TDengine cluster is managed by mnodes. You can configure up to three mnodes to ensure high availability. The data replication between mnode replicas is performed in a synchronous way to guarantee metadata consistency.
-The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, parameter `replica` is used to configure the number of replications. To achieve high availability, `replica` needs to be higher than 1.
+The number of replicas for time series data in TDengine is associated with each database. There can be many databases in a cluster and each database can be configured with a different number of replicas. When creating a database, the parameter `replica` is used to specify the number of replicas. To achieve high availability, set `replica` to 3.
The number of dnodes in a TDengine cluster must NOT be lower than the number of replicas for any database, otherwise it would fail when trying to create a table.
As long as the dnodes of a TDengine cluster are deployed on different physical machines and the replica number is higher than 1, high availability can be achieved without any other assistance. For disaster recovery, dnodes of a TDengine cluster should be deployed in geographically different data centers.
+
+Alternatively, you can use taosX to synchronize the data from one TDengine cluster to another cluster in a remote location. However, taosX is only available in TDengine enterprise version, for more information please contact tdengine.com.
diff --git a/docs/en/13-operation/10-monitor.md b/docs/en/13-operation/10-monitor.md
index a4679983f2bc77bb4e438f5d43fa1b8beb39b120..74a5564a2a7b82009e6b2406290a5c15afbf3c1d 100644
--- a/docs/en/13-operation/10-monitor.md
+++ b/docs/en/13-operation/10-monitor.md
@@ -2,7 +2,7 @@
title: TDengine Monitoring
---
-After TDengine is started, a database named `log` is created automatically to help with monitoring. Information that includes CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, is written into the `log` database at a predefined interval. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
+After TDengine is started, it automatically writes monitoring data including CPU, memory and disk usage, bandwidth, number of requests, disk I/O speed, slow queries, into a designated database at a predefined interval through taosKeeper. Additionally, some important system operations, like logon, create user, drop database, and alerts and warnings generated in TDengine are written into the `log` database too. A system operator can view the data in `log` database from TDengine CLI or from a web console.
The collection of the monitoring information is enabled by default, but can be disabled by parameter `monitor` in the configuration file.
@@ -10,7 +10,7 @@ The collection of the monitoring information is enabled by default, but can be d
TDinsight is a complete solution which uses the monitoring database `log` mentioned previously, and Grafana, to monitor a TDengine cluster.
-From version 2.3.3.0, more monitoring data has been added in the `log` database. Please refer to [TDinsight Grafana Dashboard](https://grafana.com/grafana/dashboards/15167) to learn more details about using TDinsight to monitor TDengine.
+Please refer to [TDinsight Grafana Dashboard](../../reference/tdinsight) to learn more details about using TDinsight to monitor TDengine.
A script `TDinsight.sh` is provided to deploy TDinsight automatically.
@@ -30,31 +30,14 @@ Prepare:
2. Grafana Alert Notification
-There are two ways to setup Grafana alert notification.
+You can use below command to setup Grafana alert notification.
-- An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
+An existing Grafana Notification Channel can be specified with parameter `-E`, the notifier uid of the channel can be obtained by `curl -u admin:admin localhost:3000/api/alert-notifications |jq`
```bash
sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -E
```
-- The AliCloud SMS alert built in TDengine data source plugin can be enabled with parameter `-s`, the parameters of enabling this plugin are listed below:
-
- - `-I`: AliCloud SMS Key ID
- - `-K`: AliCloud SMS Key Secret
- - `-S`: AliCloud SMS Signature
- - `-C`: SMS notification template
- - `-T`: Input parameters in JSON format for the SMS notification template, for example`{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}`
- - `-B`: List of mobile numbers to be notified
-
- Below is an example of the full command using the AliCloud SMS alert.
-
- ```bash
- sudo ./TDinsight.sh -a http://localhost:6041 -u root -p taosdata -s \
- -I XXXXXXX -K XXXXXXXX -S taosdata -C SMS_1111111 -B 18900000000 \
- -T '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
- ```
-
Launch `TDinsight.sh` with the command above and restart Grafana, then open Dashboard `http://localhost:3000/d/tdinsight`.
For more use cases and restrictions please refer to [TDinsight](/reference/tdinsight/).
diff --git a/docs/en/13-operation/17-diagnose.md b/docs/en/13-operation/17-diagnose.md
index 2b474fddba4af5ba0c29103cd8ab1249d10d055b..d01d12e831956e6a6db654e1f6dbf5072ac6b243 100644
--- a/docs/en/13-operation/17-diagnose.md
+++ b/docs/en/13-operation/17-diagnose.md
@@ -13,110 +13,59 @@ Diagnostic steps:
1. If the port range to be diagnosed is being occupied by a `taosd` server process, please first stop `taosd.
2. On the server side, execute command `taos -n server -P -l ` to monitor the port range starting from the port specified by `-P` parameter with the role of "server".
3. On the client side, execute command `taos -n client -h -P -l ` to send a testing package to the specified server and port.
-
--l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000. Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
+
+-l : The size of the testing package, in bytes. The value range is [11, 64,000] and default value is 1,000.
+Please note that the package length must be same in the above 2 commands executed on server side and client side respectively.
Output of the server side for the example is below:
```bash
-# taos -n server -P 6000
-12/21 14:50:13.522509 0x7f536f455200 UTL work as server, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
-
-12/21 14:50:13.522659 0x7f5352242700 UTL TCP server at port:6000 is listening
-12/21 14:50:13.522727 0x7f5351240700 UTL TCP server at port:6001 is listening
-...
-...
+# taos -n server -P 6030 -l 1000
+network test server is initialized, port:6030
+request is received, size:1000
+request is received, size:1000
...
-12/21 14:50:13.523954 0x7f5342fed700 UTL TCP server at port:6011 is listening
-12/21 14:50:13.523989 0x7f53437ee700 UTL UDP server at port:6010 is listening
-12/21 14:50:13.524019 0x7f53427ec700 UTL UDP server at port:6011 is listening
-12/21 14:50:22.192849 0x7f5352242700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6000
-12/21 14:50:22.192993 0x7f5352242700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6000
-12/21 14:50:22.237082 0x7f5351a41700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6000
-12/21 14:50:22.237203 0x7f5351a41700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6000
-12/21 14:50:22.237450 0x7f5351240700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6001
-12/21 14:50:22.237576 0x7f5351240700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6001
-12/21 14:50:22.281038 0x7f5350a3f700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6001
-12/21 14:50:22.281141 0x7f5350a3f700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6001
...
...
-...
-12/21 14:50:22.677443 0x7f5342fed700 UTL TCP: read:1000 bytes from 172.27.0.8 at 6011
-12/21 14:50:22.677576 0x7f5342fed700 UTL TCP: write:1000 bytes to 172.27.0.8 at 6011
-12/21 14:50:22.721144 0x7f53427ec700 UTL UDP: recv:1000 bytes from 172.27.0.8 at 6011
-12/21 14:50:22.721261 0x7f53427ec700 UTL UDP: send:1000 bytes to 172.27.0.8 at 6011
+request is received, size:1000
+request is received, size:1000
```
Output of the client side for the example is below:
```bash
# taos -n client -h 172.27.0.7 -P 6000
-12/21 14:50:22.192434 0x7fc95d859200 UTL work as client, host:172.27.0.7 startPort:6000 endPort:6011 pkgLen:1000
-
-12/21 14:50:22.192472 0x7fc95d859200 UTL server ip:172.27.0.7 is resolved from host:172.27.0.7
-12/21 14:50:22.236869 0x7fc95d859200 UTL successed to test TCP port:6000
-12/21 14:50:22.237215 0x7fc95d859200 UTL successed to test UDP port:6000
+taos -n client -h v3s2 -P 6030 -l 1000
+network test client is initialized, the server is v3s2:6030
+request is sent, size:1000
+response is received, size:1000
+request is sent, size:1000
+response is received, size:1000
...
...
...
-12/21 14:50:22.676891 0x7fc95d859200 UTL successed to test TCP port:6010
-12/21 14:50:22.677240 0x7fc95d859200 UTL successed to test UDP port:6010
-12/21 14:50:22.720893 0x7fc95d859200 UTL successed to test TCP port:6011
-12/21 14:50:22.721274 0x7fc95d859200 UTL successed to test UDP port:6011
-```
-
-The output needs to be checked carefully for the system operator to find the root cause and resolve the problem.
-
-## Startup Status and RPC Diagnostic
-
-`taos -n startup -h ` can be used to check the startup status of a `taosd` process. This is a common task which should be performed by a system operator, especially in the case of a cluster, to determine whether `taosd` has been started successfully.
-
-`taos -n rpc -h ` can be used to check whether the port of a started `taosd` can be accessed or not. If `taosd` process doesn't respond or is working abnormally, this command can be used to initiate a rpc communication with the specified fqdn to determine whether it's a network problem or whether `taosd` is abnormal.
-
-## Sync and Arbitrator Diagnostic
+request is sent, size:1000
+response is received, size:1000
+request is sent, size:1000
+response is received, size:1000
-```bash
-taos -n sync -P 6040 -h
-taos -n sync -P 6042 -h
+total succ: 100/100 cost: 16.23 ms speed: 5.87 MB/s
```
-The above commands can be executed in a Linux shell to check whether the port for sync is working well and whether the sync module on the server side is working well. Additionally, `-P 6042` is used to check whether the arbitrator is configured properly and is working well.
-
-## Network Speed Diagnostic
-
-`taos -n speed -h -P 6030 -N 10 -l 10000000 -S TCP`
-
-From version 2.2.0.0 onwards, the above command can be executed in a Linux shell to test network speed. The command sends uncompressed packages to a running `taosd` server process or a simulated server process started by `taos -n server` to test the network speed. Parameters can be used when testing network speed are as below:
-
--n:When set to "speed", it means testing network speed.
--h:The FQDN or IP of the server process to be connected to; if not set, the FQDN configured in `taos.cfg` is used.
--P:The port of the server process to connect to, the default value is 6030.
--N:The number of packages that will be sent in the test, range is [1,10000], default value is 100.
--l:The size of each package in bytes, range is [1024, 1024 \* 1024 \* 1024], default value is 1024.
--S:The type of network packages to send, can be either TCP or UDP, default value is TCP.
-
-## FQDN Resolution Diagnostic
-
-`taos -n fqdn -h `
-
-From version 2.2.0.0 onward, the above command can be executed in a Linux shell to test the resolution speed of FQDN. It can be used to try to resolve a FQDN to an IP address and record the time spent in this process. The parameters that can be used for this purpose are as below:
-
--n:When set to "fqdn", it means testing the speed of resolving FQDN.
--h:The FQDN to be resolved. If not set, the `FQDN` parameter in `taos.cfg` is used by default.
+The output needs to be checked carefully for the system operator to find the root cause and resolve the problem.
## Server Log
-The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
-
-Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. If all the logs are stored together, some important information may be missed very easily and so on the server side, important information is stored in a different place from other logs.
+The parameter `debugFlag` is used to control the log level of the `taosd` server process. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
-- The log at level of INFO, WARNING and ERROR is stored in `taosinfo` so that it is easy to find important information
-- The log at level of DEBUG (135) and TRACE (143) and other information not handled by `taosinfo` are stored in `taosdlog`
+Once this parameter is set to 135 or 143, the log file grows very quickly especially when there is a huge volume of data insertion and data query requests. Ensure that the disk drive on which logs are stored has sufficient space.
## Client Log
-An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
+An independent log file, named as "taoslog+" is generated for each client program, i.e. a client process. The parameter `debugFlag` is used to control the log level. The default value is 131. For debugging and tracing, it needs to be set to either 135 or 143 respectively.
+
+The default value of `debugFlag` is also 131 and only logs at level of INFO/ERROR/WARNING are recorded. As stated above, for debugging and tracing, it needs to be changed to 135 or 143 respectively, so that logs at DEBUG or TRACE level can be recorded.
The maximum length of a single log file is controlled by parameter `numOfLogLines` and only 2 log files are kept for each `taosd` server process.
-Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions.
+Log files are written in an async way to minimize the workload on disk, but the trade off for performance is that a few log lines may be lost in some extreme conditions. You can configure asynclog to 0 when needed for troubleshooting purposes to ensure that no log information is lost.
diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
index 74ba78b7fc1115f074ae6d8c3aa7723bce984f86..da26b34c6f77861a5a2308204dba56964ce05430 100644
--- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx
+++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx
@@ -18,12 +18,12 @@ If the TDengine server is already installed, it can be verified as follows:
The following example is in an Ubuntu environment and uses the `curl` tool to verify that the REST interface is working. Note that the `curl` tool may need to be installed in your environment.
-The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
+The following example lists all databases on the host h1.tdengine.com. To use it in your environment, replace `h1.tdengine.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```bash
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
-d "select name, ntables, status from information_schema.ins_databases;" \
- h1.taosdata.com:6041/rest/sql
+ h1.tdengine.com:6041/rest/sql
```
The following return value results indicate that the verification passed.
@@ -123,7 +123,7 @@ where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.
### HTTP body structure
-#### Successful Operation
+#### Successful Insert Operation
Example:
@@ -143,7 +143,7 @@ Description:
- rows: (`int`) Only returns `1`.
- data: (`[][]any`) Returns the number of rows affected.
-#### Successful Query
+#### Successful Query Operation
Example:
diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
index afa3200387083a544851c98381f5df7d5486124f..c032687d0ff979aea52faad9cdef2413756280e5 100644
--- a/docs/en/14-reference/03-connector/04-java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -133,8 +133,6 @@ The configuration parameters in the URL are as follows:
- batchfetch: true: pulls result sets in batches when executing queries; false: pulls result sets row by row. The default value is true. Enabling batch pulling and obtaining a batch of data can improve query performance when the query data volume is large.
- batchErrorIgnore:true: When executing statement executeBatch, if there is a SQL execution failure in the middle, the following SQL will continue to be executed. false: No more statements after the failed SQL are executed. The default value is: false.
-For more information about JDBC native connections, see [Video Tutorial](https://www.taosdata.com/blog/2020/11/11/1955.html).
-
**Connect using the TDengine client-driven configuration file **
When you use a JDBC native connection to connect to a TDengine cluster, you can use the TDengine client driver configuration file to specify parameters such as `firstEp` and `secondEp` of the cluster in the configuration file as below:
diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx
index 1bf99d72faad52c858544edad7606b6a1a70d9b4..f00e635af9bdda737f752208f4b34c9ff634d075 100644
--- a/docs/en/14-reference/03-connector/05-go.mdx
+++ b/docs/en/14-reference/03-connector/05-go.mdx
@@ -7,7 +7,7 @@ title: TDengine Go Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import GoInsert from "../../07-develop/03-insert-data/_go_sql.mdx"
import GoInfluxLine from "../../07-develop/03-insert-data/_go_line.mdx"
import GoOpenTSDBTelnet from "../../07-develop/03-insert-data/_go_opts_telnet.mdx"
diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx
index 9d2e840e9eb000cd6a66324ed6f99c3e4b88a24b..4e2a7848dc87ac0522b6d5aa5855b5a0998dc789 100644
--- a/docs/en/14-reference/03-connector/06-rust.mdx
+++ b/docs/en/14-reference/03-connector/06-rust.mdx
@@ -7,7 +7,7 @@ title: TDengine Rust Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
@@ -118,7 +118,7 @@ The parameters are described as follows:
- **protocol**: Specify which connection method to use. For example, `taos+ws://localhost:6041` uses Websocket to establish connections.
- **username/password**: Username and password used to create connections.
- **host/port**: Specifies the server and port to establish a connection. If you do not specify a hostname or port, native connections default to `localhost:6030` and Websocket connections default to `localhost:6041`.
-- **database**: Specify the default database to connect to.
+- **database**: Specify the default database to connect to. It's optional.
- **params**:Optional parameters.
A sample DSN description string is as follows:
@@ -157,15 +157,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
let inserted = taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
- TAGS (`groupid` INT, `location` BINARY(16))",
+ TAGS (`groupid` INT, `location` BINARY(24))",
// create child table
- "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
+ "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
- "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
index 355ba06ee4718e91ceae37f730b91292e1ee4be9..5fdb42a2cdb3d170e7bd1ee89917814196857a63 100644
--- a/docs/en/14-reference/03-connector/07-python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -7,7 +7,7 @@ description: "taospy is the official Python connector for TDengine. taospy provi
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-`taospy is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
+`taospy` is the official Python connector for TDengine. taospy provides a rich API that makes it easy for Python applications to use TDengine. `taospy` wraps both the [native interface](/reference/connector/cpp) and [REST interface](/reference/rest-api) of TDengine, which correspond to the `taos` and `taosrest` modules of the `taospy` package, respectively.
In addition to wrapping the native and REST interfaces, `taospy` also provides a set of programming interfaces that conforms to the [Python Data Access Specification (PEP 249)](https://peps.python.org/pep-0249/). It is easy to integrate `taospy` with many third-party tools, such as [SQLAlchemy](https://www.sqlalchemy.org/) and [pandas](https://pandas.pydata.org/).
The direct connection to the server using the native interface provided by the client driver is referred to hereinafter as a "native connection"; the connection to the server using the REST interface provided by taosAdapter is referred to hereinafter as a "REST connection".
diff --git a/docs/en/14-reference/03-connector/08-node.mdx b/docs/en/14-reference/03-connector/08-node.mdx
index 259ee590efb7ef2f7f164c89e5a5ba4332b76ed0..a36cf0efc9432425ad16c4d8112cc813a8c528b9 100644
--- a/docs/en/14-reference/03-connector/08-node.mdx
+++ b/docs/en/14-reference/03-connector/08-node.mdx
@@ -7,7 +7,7 @@ title: TDengine Node.js Connector
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-import Preparition from "./_preparition.mdx";
+import Preparition from "./_preparation.mdx";
import NodeInsert from "../../07-develop/03-insert-data/_js_sql.mdx";
import NodeInfluxLine from "../../07-develop/03-insert-data/_js_line.mdx";
import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet.mdx";
diff --git a/docs/en/14-reference/03-connector/09-csharp.mdx b/docs/en/14-reference/03-connector/09-csharp.mdx
index c745b8dd1ad829171a667eef653d5dfc7490241a..87a10e17cafa2578b76ca768eb51f8d784fc6e7f 100644
--- a/docs/en/14-reference/03-connector/09-csharp.mdx
+++ b/docs/en/14-reference/03-connector/09-csharp.mdx
@@ -7,7 +7,7 @@ title: C# Connector
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
-import Preparition from "./_preparition.mdx"
+import Preparition from "./_preparation.mdx"
import CSInsert from "../../07-develop/03-insert-data/_cs_sql.mdx"
import CSInfluxLine from "../../07-develop/03-insert-data/_cs_line.mdx"
import CSOpenTSDBTelnet from "../../07-develop/03-insert-data/_cs_opts_telnet.mdx"
@@ -17,7 +17,7 @@ import CSAsyncQuery from "../../07-develop/04-query-data/_cs_async.mdx"
`TDengine.Connector` is a C# language connector provided by TDengine that allows C# developers to develop C# applications that access TDengine cluster data.
-The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc. The `TDengine.Connector` currently does not provide a REST connection interface. Developers can write their RESTful application by referring to the [REST API](/reference/rest-api/) documentation.
+The `TDengine.Connector` connector supports connect to TDengine instances via the TDengine client driver (taosc), providing data writing, querying, subscription, schemaless writing, bind interface, etc.The `TDengine.Connector` also supports WebSocket and developers can build connection through DSN, which supports data writing, querying, and parameter binding, etc.
This article describes how to install `TDengine.Connector` in a Linux or Windows environment and connect to TDengine clusters via `TDengine.Connector` to perform basic operations such as data writing and querying.
@@ -35,6 +35,10 @@ Please refer to [version support list](/reference/connector#version-support)
## Supported features
+
+
+
+
1. Connection Management
2. General Query
3. Continuous Query
@@ -42,6 +46,18 @@ Please refer to [version support list](/reference/connector#version-support)
5. Subscription
6. Schemaless
+
+
+
+
+1. Connection Management
+2. General Query
+3. Continuous Query
+4. Parameter Binding
+
+
+
+
## Installation Steps
### Pre-installation preparation
@@ -74,12 +90,18 @@ cp -r src/ myProject
cd myProject
dotnet add exmaple.csproj reference src/TDengine.csproj
```
+
## Establish a Connection
-``` C#
+
+
+
+
+
+``` csharp
using TDengineDriver;
namespace TDengineExample
@@ -112,14 +134,62 @@ namespace TDengineExample
```
+
+
+
+
+The structure of the DSN description string is as follows:
+
+```text
+[]://[[:@]:][/][?=[&=]]
+|------------|---|-----------|-----------|------|------|------------|-----------------------|
+| protocol | | username | password | host | port | database | params |
+```
+
+The parameters are described as follows:
+
+* **protocol**: Specify which connection method to use (support http/ws). For example, `ws://localhost:6041` uses Websocket to establish connections.
+* **username/password**: Username and password used to create connections.
+* **host/port**: Specifies the server and port to establish a connection. Websocket connections default to `localhost:6041`.
+* **database**: Specify the default database to connect to. It's optional.
+* **params**:Optional parameters.
+
+A sample DSN description string is as follows:
+
+```text
+ws://localhost:6041/test
+```
+
+``` csharp
+{{#include docs/examples/csharp/wsConnect/Program.cs}}
+```
+
+
+
+
## Usage examples
### Write data
#### SQL Write
+
+
+
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsInsert/Program.cs}}
+```
+
+
+
+
#### InfluxDB line protocol write
@@ -132,12 +202,48 @@ namespace TDengineExample
+#### Parameter Binding
+
+
+
+
+
+``` csharp
+{{#include docs/examples/csharp/stmtInsert/Program.cs}}
+```
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsStmt/Program.cs}}
+```
+
+
+
+
### Query data
#### Synchronous Query
+
+
+
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsQuery/Program.cs}}
+```
+
+
+
+
#### Asynchronous query
@@ -145,18 +251,21 @@ namespace TDengineExample
### More sample programs
|Sample program |Sample program description |
-|--------------------------------------------------------------------------------------------------------------------|------------ --------------------------------|
+|--------------------------------------------------------------------------------------------------------------------|--------------------------------------------|
| [CURD](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/Query/Query.cs) | Table creation, data insertion, and query examples with TDengine.Connector |
| [JSON Tag](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/JSONTag) | Writing and querying JSON tag data with TDengine Connector |
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | Parameter binding with TDengine Connector |
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | Schemaless writes with TDengine Connector |
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | Asynchronous queries with TDengine Connector |
-| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Data subscription with TDengine Connector |
+| [Subscription](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | Subscription example with TDengine Connector |
+| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | WebSocket basic data in and out with TDengine connector |
+| [WebSocket Parameter Binding](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | WebSocket parameter binding example |
## Important update records
| TDengine.Connector | Description |
|--------------------|--------------------------------|
+| 3.0.1 | Support WebSocket and Cloud,With function query, insert, and parameter binding|
| 3.0.0 | Supports TDengine 3.0.0.0. TDengine 2.x is not supported. Added `TDengine.Impl.GetData()` interface to deserialize query results. |
| 1.0.7 | Fixed TDengine.Query() memory leak. |
| 1.0.6 | Fix schemaless bug in 1.0.4 and 1.0.5. |
@@ -172,7 +281,6 @@ namespace TDengineExample
`Taos` is an ADO.NET connector for TDengine, supporting Linux and Windows platforms. Community contributor `Maikebing@@maikebing contributes the connector`. Please refer to:
* Interface download:
-* Usage notes:
## Frequently Asked Questions
diff --git a/docs/en/14-reference/03-connector/_linux_install.mdx b/docs/en/14-reference/03-connector/_linux_install.mdx
index 07f8fb5c7118d84c53017f44d9811a3357944cfc..97f6cd333961d24bd015a2ebcf7223f89fecca2c 100644
--- a/docs/en/14-reference/03-connector/_linux_install.mdx
+++ b/docs/en/14-reference/03-connector/_linux_install.mdx
@@ -4,7 +4,7 @@ import PkgListV3 from "/components/PkgListV3";
- [All Downloads](../../releases)
+ [All Downloads](../../releases/tdengine)
2. Unzip
diff --git a/docs/en/14-reference/03-connector/_preparation.mdx b/docs/en/14-reference/03-connector/_preparation.mdx
index 07ebdbca3d891ff51a254bc1b83016f1404bb47e..c6e42ce02348595da0fdd75847d6442c285dc10a 100644
--- a/docs/en/14-reference/03-connector/_preparation.mdx
+++ b/docs/en/14-reference/03-connector/_preparation.mdx
@@ -2,7 +2,7 @@
:::info
-Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding [Windows client](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) for TDengine.
+Since the TDengine client driver is written in C, using the native connection requires loading the client driver shared library file, which is usually included in the TDengine installer. You can install either standard TDengine server installation package or [TDengine client installation package](/get-started/). For Windows development, you need to install the corresponding Windows client, please refer to [Install TDengine](../../get-started/package).
- libtaos.so: After successful installation of TDengine on a Linux system, the dependent Linux version of the client driver `libtaos.so` file will be automatically linked to `/usr/lib/libtaos.so`, which is included in the Linux scannable path and does not need to be specified separately.
- taos.dll: After installing the client on Windows, the dependent Windows version of the client driver taos.dll file will be automatically copied to the system default search path C:/Windows/System32, again without the need to specify it separately.
diff --git a/docs/en/14-reference/03-connector/_preparition.mdx b/docs/en/14-reference/03-connector/_preparition.mdx
deleted file mode 100644
index 87538ebfd8c60507aec90ee86e427d85979dbc4a..0000000000000000000000000000000000000000
--- a/docs/en/14-reference/03-connector/_preparition.mdx
+++ /dev/null
@@ -1,10 +0,0 @@
-- 已安装客户端驱动(使用原生连接必须安装,使用 REST 连接无需安装)
-
-:::info
-
-由于 TDengine 的客户端驱动使用 C 语言编写,使用原生连接时需要加载系统对应安装在本地的客户端驱动共享库文件,通常包含在 TDengine 安装包。TDengine Linux 服务端安装包附带了 TDengine 客户端,也可以单独安装 [Linux 客户端](/get-started/) 。在 Windows 环境开发时需要安装 TDengine 对应的 [Windows 客户端](https://www.taosdata.com/cn/all-downloads/#TDengine-Windows-Client) 。
-
-- libtaos.so: 在 Linux 系统中成功安装 TDengine 后,依赖的 Linux 版客户端驱动 libtaos.so 文件会被自动拷贝至 /usr/lib/libtaos.so,该目录包含在 Linux 自动扫描路径上,无需单独指定。
-- taos.dll: 在 Windows 系统中安装完客户端之后,依赖的 Windows 版客户端驱动 taos.dll 文件会自动拷贝到系统默认搜索路径 C:/Windows/System32 下,同样无需要单独指定。
-
-:::
diff --git a/docs/en/14-reference/03-connector/_windows_install.mdx b/docs/en/14-reference/03-connector/_windows_install.mdx
index ea638ed1ed6c64c3ec4ceaea436f65dd1f09a27e..81fc5573bb85c6fd1b501f1ff4ec072df8f23db1 100644
--- a/docs/en/14-reference/03-connector/_windows_install.mdx
+++ b/docs/en/14-reference/03-connector/_windows_install.mdx
@@ -4,7 +4,7 @@ import PkgListV3 from "/components/PkgListV3";
- [All Downloads](../../releases)
+ [All Downloads](../../releases/tdengine)
2. Execute the installer, select the default value as prompted, and complete the installation
3. Installation path
diff --git a/docs/en/14-reference/03-connector/index.mdx b/docs/en/14-reference/03-connector/index.mdx
index 49e2dceec531cf8449749ea9dbb111079771a788..5dc54f09343853b8c93cab6478a7323c8e617538 100644
--- a/docs/en/14-reference/03-connector/index.mdx
+++ b/docs/en/14-reference/03-connector/index.mdx
@@ -39,14 +39,14 @@ Comparing the connector support for TDengine functional features as follows.
### Using the native interface (taosc)
-| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
-| -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
-| **Connection Management** | Support | Support | Support | Support | Support | Support |
-| **Regular Query** | Support | Support | Support | Support | Support | Support |
-| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
-| ** TMQ ** | Support | Support | Support | Support | Support | Support |
-| **Schemaless** | Support | Support | Support | Support | Support | Support |
-| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
+| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
+| ----------------------------- | ------------- | ---------- | ------------- | ------------- | ------------- | ------------- |
+| **Connection Management** | Support | Support | Support | Support | Support | Support |
+| **Regular Query** | Support | Support | Support | Support | Support | Support |
+| **Parameter Binding** | Support | Support | Support | Support | Support | Support |
+| **Subscription (TMQ)** | Support | Support | Support | Support | Support | Support |
+| **Schemaless** | Support | Support | Support | Support | Support | Support |
+| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
:::info
The different database framework specifications for various programming languages do not mean that all C/C++ interfaces need a wrapper.
@@ -54,16 +54,15 @@ The different database framework specifications for various programming language
### Use HTTP Interfaces (REST or WebSocket)
-| **Functional Features** | **Java** | **Python** | **Go** | **C# (not supported yet)** | **Node.js** | **Rust** |
-| ------------------------------ | -------- | ---------- | -------- | ------------------ | ----------- | -------- |
-| **Connection Management** | Support | Support | Support | N/A | Support | Support |
-| **Regular Query** | Support | Support | Support | N/A | Support | Support |
-| **Continous Query ** | Support | Support | Support | N/A | Support | Support |
-| **Parameter Binding** | Not supported | Not supported | Not supported | N/A | Not supported | Support |
-| ** TMQ ** | Not supported | Not supported | Not supported | N/A | Not supported | Support |
-| **Schemaless** | Not supported | Not supported | Not supported | N/A | Not supported | Not supported |
-| **Bulk Pulling (based on WebSocket) **| Support | Support | Not Supported | N/A | Not Supported | Supported |
-| **DataFrame** | Not supported | Support | Not supported | N/A | Not supported | Not supported |
+| **Functional Features** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
+| -------------------------------------- | ------------- | --------------- | ------------- | ------------- | ------------- | ------------- |
+| **Connection Management** | Support | Support | Support | Support | Support | Support |
+| **Regular Query** | Support | Support | Support | Support | Support | Support |
+| **Parameter Binding** | Not supported | Not supported | Not supported | Support | Not supported | Support |
+| **Subscription (TMQ) ** | Not supported | Not supported | Not supported | Not supported | Not supported | Support |
+| **Schemaless** | Not supported | Not supported | Not supported | Not supported | Not supported | Not supported |
+| **Bulk Pulling (based on WebSocket) ** | Support | Support | Not Supported | support | Not Supported | Supported |
+| **DataFrame** | Not supported | Support | Not supported | Not supported | Not supported | Not supported |
:::warning
diff --git a/docs/en/14-reference/04-taosadapter.md b/docs/en/14-reference/04-taosadapter.md
index e7ea620d0bed3aee3ff0acf8063120acca33c335..78c4febb92f11225ad2aa82630f4892f6e786a4d 100644
--- a/docs/en/14-reference/04-taosadapter.md
+++ b/docs/en/14-reference/04-taosadapter.md
@@ -30,7 +30,7 @@ taosAdapter provides the following features.
### Install taosAdapter
-If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
+If you use the TDengine server, you don't need additional steps to install taosAdapter. You can download taosAdapter from [TDengine 3.0 released versions](../../releases/tdengine) to download the TDengine server installation package. If you need to deploy taosAdapter separately on another server other than the TDengine server, you should install the full TDengine server package on that server to install taosAdapter. If you need to build taosAdapter from source code, you can refer to the [Building taosAdapter]( https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) documentation.
### Start/Stop taosAdapter
diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md
index 2105ba83fad9700674e28609016b07ef6de66833..e73441a96b087062b2e3912ed73010fc3e761bb9 100644
--- a/docs/en/14-reference/06-taosdump.md
+++ b/docs/en/14-reference/06-taosdump.md
@@ -116,5 +116,4 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
Mandatory or optional arguments to long options are also mandatory or optional
for any corresponding short options.
-Report bugs to .
```
diff --git a/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json b/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json
index 54dc1062d6440cc0fc7b8c69d9e4c6b53e4cd01e..f651983528ca824b4e6b14586aac5a5bfb4ecab8 100644
--- a/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json
+++ b/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json
@@ -211,7 +211,7 @@
],
"timeFrom": null,
"timeShift": null,
- "title": "Leader MNode",
+ "title": "Master MNode",
"transformations": [
{
"id": "filterByValue",
@@ -221,7 +221,7 @@
"config": {
"id": "regex",
"options": {
- "value": "leader"
+ "value": "master"
}
},
"fieldName": "role"
@@ -300,7 +300,7 @@
],
"timeFrom": null,
"timeShift": null,
- "title": "Leader MNode Create Time",
+ "title": "Master MNode Create Time",
"transformations": [
{
"id": "filterByValue",
@@ -310,7 +310,7 @@
"config": {
"id": "regex",
"options": {
- "value": "leader"
+ "value": "master"
}
},
"fieldName": "role"
diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp
index a78e18028a94c2f6a783b08d992a25c791527407..3bc0d960f1db45ee8d2adcee26de89334e681956 100644
Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-1-cluster-status.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp
index b152418d0902b8ebdf62ebce6705c10dd5ab4fbf..f5a602d3f9dcecb64ded5e1f463ba460daab0024 100644
Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-2-dnodes.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp
index f58f48b7f17375cb8e62e7c0126ca3aea56a13f6..f155fa42a0fb5df71ee48c8c65a8c7d8851ddc3e 100644
Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-3-mnodes.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp
index 00afcce013602dce0da17bfd033f65aaa8e43bb7..dc0b85e262bd4340e986a42105e0ff9838d12fa6 100644
Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-4-requests.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp
index 567e5694f9d7a035a3eb354493d3df8ed64db251..342c8cfc0a8e852e7cd092aff453ed1fd2ec85a2 100644
Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-5-database.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp b/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp
index 8666193f59497180574fd2786266e5baabbe9761..942130d4fabf7944c7add10acb3bb42ca7f51e0f 100644
Binary files a/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp and b/docs/en/14-reference/07-tdinsight/assets/TDinsight-8-taosadapter.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp b/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp
index 06d0ff6ed50091a6340508bc5b2b3f78b65dcb18..d7fc9e233acd1a4b1bbb940b13bc4296c261a33a 100644
Binary files a/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp and b/docs/en/14-reference/07-tdinsight/assets/howto-add-datasource.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp b/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp
index fb7958f1b9fbd43c8f63136024842790e711c490..ae2a1e8e9b7b63a68d56dfcd2187eca614da9a3d 100644
Binary files a/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp and b/docs/en/14-reference/07-tdinsight/assets/import_dashboard.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/import_dashboard_view.webp b/docs/en/14-reference/07-tdinsight/assets/import_dashboard_view.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1b10e41c75fbbb9a30bce4aa8d1adb8216fbe127
Binary files /dev/null and b/docs/en/14-reference/07-tdinsight/assets/import_dashboard_view.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/select_dashboard_db.webp b/docs/en/14-reference/07-tdinsight/assets/select_dashboard_db.webp
new file mode 100644
index 0000000000000000000000000000000000000000..956132e37e9df255d3ff82654fd357bec001e695
Binary files /dev/null and b/docs/en/14-reference/07-tdinsight/assets/select_dashboard_db.webp differ
diff --git a/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json
index 1add8522a712aa2cfef6187e577c42d205432b66..b4254c428b28a0084e54b5e3c509dd2e0ec651b9 100644
--- a/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json
+++ b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json
@@ -153,7 +153,7 @@
],
"timeFrom": null,
"timeShift": null,
- "title": "Leader MNode",
+ "title": "Master MNode",
"transformations": [
{
"id": "filterByValue",
@@ -163,7 +163,7 @@
"config": {
"id": "regex",
"options": {
- "value": "leader"
+ "value": "master"
}
},
"fieldName": "role"
@@ -246,7 +246,7 @@
],
"timeFrom": null,
"timeShift": null,
- "title": "Leader MNode Create Time",
+ "title": "Master MNode Create Time",
"transformations": [
{
"id": "filterByValue",
@@ -256,7 +256,7 @@
"config": {
"id": "regex",
"options": {
- "value": "leader"
+ "value": "master"
}
},
"fieldName": "role"
diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md
index e74c9de7b2aa71278a99d45f250e0dcaf86d4704..d03c16a8bc25a39fb751ded4b5c12df4b69a8e4e 100644
--- a/docs/en/14-reference/07-tdinsight/index.md
+++ b/docs/en/14-reference/07-tdinsight/index.md
@@ -5,15 +5,23 @@ sidebar_label: TDinsight
TDinsight is a solution for monitoring TDengine using the builtin native monitoring database and [Grafana].
-After TDengine starts, it will automatically create a monitoring database `log`. TDengine will automatically write many metrics in specific intervals into the `log` database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script.
+After TDengine starts, it automatically writes many metrics in specific intervals into a designated database. The metrics may include the server's CPU, memory, hard disk space, network bandwidth, number of requests, disk read/write speed, slow queries, other information like important system operations (user login, database creation, database deletion, etc.), and error alarms. With [Grafana] and [TDengine Data Source Plugin](https://github.com/taosdata/grafanaplugin/releases), TDinsight can visualize cluster status, node information, insertion and query requests, resource usage, vnode, dnode, and mnode status, exception alerts and many other metrics. This is very convenient for developers who want to monitor TDengine cluster status in real-time. This article will guide users to install the Grafana server, automatically install the TDengine data source plug-in, and deploy the TDinsight visualization panel using the `TDinsight.sh` installation script.
## System Requirements
-To deploy TDinsight, a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 2.3.3.0 and above, with the `log` database enabled (`monitor = 1`).
+To deploy TDinsight, we need
+- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
+- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
+- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
+
+Please record
+- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
+- Authentication of taosAdapter, e.g. user name and password
+- The database name used by taosKeeper to store monitoring data
## Installing Grafana
-We recommend using the latest [Grafana] version 7 or 8 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana].
+We recommend using the latest [Grafana] version 8 or 9 here. You can install Grafana on any [supported operating system](https://grafana.com/docs/grafana/latest/installation/requirements/#supported-operating-systems) by following the [official Grafana documentation Instructions](https://grafana.com/docs/grafana/latest/installation/) to install [Grafana].
### Installing Grafana on Debian or Ubuntu
@@ -71,7 +79,7 @@ chmod +x TDinsight.sh
./TDinsight.sh
```
-This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://grafana.com/grafana/dashboards/15167) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
+This script will automatically download the latest [Grafana TDengine data source plugin](https://github.com/taosdata/grafanaplugin/releases/latest) and [TDinsight dashboard](https://github.com/taosdata/grafanaplugin/blob/master/dashboards/TDinsightV3.json) with configurable parameters for command-line options to the [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) configuration file to automate deployment and updates, etc. With the alert setting options provided by this script, you can also get built-in support for AliCloud SMS alert notifications.
Assume you use TDengine and Grafana's default services on the same host. Run `. /TDinsight.sh` and open the Grafana browser window to see the TDinsight dashboard.
@@ -106,18 +114,6 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
-E, --external-notifier Apply external notifier uid to TDinsight dashboard.
-Alibaba Cloud SMS as Notifier:
--s, --sms-enabled To enable tdengine-datasource plugin builtin Alibaba Cloud SMS webhook.
--N, --sms-notifier-name Provisioning notifier name.[default: TDinsight Builtin SMS]
--U, --sms-notifier-uid Provisioning notifier uid, use lowercase notifier name by default.
--D, --sms-notifier-is-default Set notifier as default.
--I, --sms-access-key-id Alibaba Cloud SMS access key id
--K, --sms-access-key-secret Alibaba Cloud SMS access key secret
--S, --sms-sign-name Sign name
--C, --sms-template-code Template code
--T, --sms-template-param Template param, a escaped JSON string like '{"alarm_level":"%s","time":"%s","name":"%s","content":"%s"}'
--B, --sms-phone-numbers Comma-separated numbers list, eg "189xxxxxxxx,132xxxxxxxx"
--L, --sms-listen-addr [default: 127.0.0.1:9100]
```
Most command-line options can take effect the same as environment variables.
@@ -136,17 +132,6 @@ Most command-line options can take effect the same as environment variables.
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight dashboard title. [Default: TDinsight] | -e | -tdinsight-title
| -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | If the dashboard is configured to be editable. [Default: false] | -e | --external
| -E | --external-notifier | EXTERNAL_NOTIFIER | Apply the external notifier uid to the TDinsight dashboard. | -s
-| -s | --sms-enabled | SMS_ENABLED | Enable the tdengine-datasource plugin built into Alibaba Cloud SMS webhook. | -s
-| -N | --sms-notifier-name | SMS_NOTIFIER_NAME | The name of the provisioning notifier. [Default: `TDinsight Builtin SMS`] | -U
-| -U | --sms-notifier-uid | SMS_NOTIFIER_UID | "Notification Channel" `uid`, lowercase of the program name is used by default, other characters are replaced by "-". |-sms
-| -D | --sms-notifier-is-default | SMS_NOTIFIER_IS_DEFAULT | Set built-in SMS notification to default value. |-sms-notifier-is-default
-| -I | --sms-access-key-id | SMS_ACCESS_KEY_ID | Alibaba Cloud SMS access key id |
-| -K | --sms-access-key-secret | SMS_ACCESS_KEY_SECRET | AliCloud SMS-access-secret-key |
-| -S | --sms-sign-name | SMS_SIGN_NAME | Signature |
-| -C | --sms-template-code | SMS_TEMPLATE_CODE | Template code |
-| -T | --sms-template-param | SMS_TEMPLATE_PARAM | JSON template for template parameters |
-| -B | --sms-phone-numbers | SMS_PHONE_NUMBERS | A comma-separated list of phone numbers, e.g. `"189xxxxxxxx,132xxxxxxxx"` |
-| -L | --sms-listen-addr | SMS_LISTEN_ADDR | Built-in SMS webhook listener address, default is `127.0.0.1:9100` |
Suppose you start a TDengine database on host `tdengine` with HTTP API port `6041`, user `root1`, and password `pass5ord`. Execute the script.
@@ -166,24 +151,10 @@ Use the `uid` value obtained above as `-E` input.
sudo ./TDinsight.sh -a http://tdengine:6041 -u root1 -p pass5ord -E existing-notifier
```
-If you want to use the [Alibaba Cloud SMS](https://www.aliyun.com/product/sms) service as a notification channel, you should enable it with the `-s` flag add the following parameters.
-
-- `-N`: Notification Channel name, default is `TDinsight Builtin SMS`.
-- `-U`: Channel uid, default is lowercase of `name`, any other character is replaced with -, for the default `-N`, its uid is `tdinsight-builtin-sms`.
-- `-I`: Alibaba Cloud SMS access key id.
-- `-K`: Alibaba Cloud SMS access secret key.
-- `-S`: Alibaba Cloud SMS signature.
-- `-C`: Alibaba Cloud SMS template id.
-- `-T`: Alibaba Cloud SMS template parameters, for JSON format template, example is as follows `'{"alarm_level":"%s", "time":"%s", "name":"%s", "content":"%s"}'`. There are four parameters: alarm level, time, name and alarm content.
-- `-B`: a list of phone numbers, separated by a comma `,`.
-
If you want to monitor multiple TDengine clusters, you need to set up numerous TDinsight dashboards. Setting up non-default TDinsight requires some changes: the `-n` `-i` `-t` options need to be changed to non-default names, and `-N` and `-L` should also be changed if using the built-in SMS alerting feature.
```bash
sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1'
-# If using built-in SMS notifications
-sudo . /TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -i tdinsight-env1 -t 'TDinsight Env1' \
- -s -N 'Env1 SMS' -I xx -K xx -S xx -C SMS_XX -T '' -B 00000000000 -L 127.0.0.01:10611
```
Please note that the configuration data source, notification channel, and dashboard are not changeable on the front end. You should update the configuration again via this script or manually change the configuration file in the `/etc/grafana/provisioning` directory (this is the default directory for Grafana, use the `-P` option to change it as needed).
@@ -249,21 +220,23 @@ Save and test. It will report 'TDengine Data source is working' under normal cir
### Importing dashboards
-Point to **+** / **Create** - **import** (or `/dashboard/import` url).
+In the page of configuring data source, click **Dashboards** tab.

-Type the dashboard ID `15167` in the **Import via grafana.com** location and **Load**.
+Choose `TDengine for 3.x` and click `import`.
+
+After the importing is done, `TDinsight for 3.x` dashboard is available on the page of `search dashboards by name`.
-
+
-Once the import is complete, the full page view of TDinsight is shown below.
+In the `TDinsight for 3.x` dashboard, choose the database used by taosKeeper to store monitoring data, you can see the monitoring result.
-
+
## TDinsight dashboard details
-The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources [dnodes, mnodes, vnodes](https://www.taosdata.com/cn/documentation/architecture#cluster) or databases.
+The TDinsight dashboard is designed to provide the usage and status of TDengine-related resources, e.g. dnodes, mnodes, vnodes and databases.
Details of the metrics are as follows.
@@ -285,7 +258,6 @@ This section contains the current information and status of the cluster, the ale
- **Measuring Points Used**: The number of measuring points used to enable the alert rule (no data available in the community version, healthy by default).
- **Grants Expire Time**: the expiration time of the enterprise version of the enabled alert rule (no data available for the community version, healthy by default).
- **Error Rate**: Aggregate error rate (average number of errors per second) for alert-enabled clusters.
-- **Variables**: `show variables` table display.
### DNodes Status
@@ -294,7 +266,6 @@ This section contains the current information and status of the cluster, the ale
- **DNodes Status**: simple table view of `show dnodes`.
- **DNodes Lifetime**: the time elapsed since the dnode was created.
- **DNodes Number**: the number of DNodes changes.
-- **Offline Reason**: if any dnode status is offline, the reason for offline is shown as a pie chart.
### MNode Overview
@@ -309,7 +280,6 @@ This section contains the current information and status of the cluster, the ale
1. **Requests Rate(Inserts per Second)**: average number of inserts per second.
2. **Requests (Selects)**: number of query requests and change rate (count of second).
-3. **Requests (HTTP)**: number of HTTP requests and request rate (count of second).
### Database
@@ -319,9 +289,8 @@ Database usage, repeated for each value of the variable `$database` i.e. multipl
1. **STables**: number of super tables.
2. **Total Tables**: number of all tables.
-3. **Sub Tables**: the number of all super table subtables.
-4. **Tables**: graph of all normal table numbers over time.
-5. **Tables Number Foreach VGroups**: The number of tables contained in each VGroups.
+3. **Tables**: number of normal tables.
+4. **Table number for each vgroup**: number of tables per vgroup.
### DNode Resource Usage
@@ -356,12 +325,11 @@ Currently, only the number of logins per minute is reported.
Support monitoring taosAdapter request statistics and status details. Includes.
-1. **http_request**: contains the total number of requests, the number of failed requests, and the number of requests being processed
-2. **top 3 request endpoint**: data of the top 3 requests by endpoint group
-3. **Memory Used**: taosAdapter memory usage
-4. **latency_quantile(ms)**: quantile of (1, 2, 5, 9, 99) stages
-5. **top 3 failed request endpoint**: data of the top 3 failed requests by endpoint grouping
-6. **CPU Used**: taosAdapter CPU usage
+1. **http_request_inflight**: number of real-time requests.
+2. **http_request_total**: number of total requests.
+3. **http_request_fail**: number of failed requets.
+4. **CPU Used**: CPU usage of taosAdapter.
+5. **Memory Used**: Memory usage of taosAdapter.
## Upgrade
@@ -403,13 +371,6 @@ services:
TDENGINE_API: ${TDENGINE_API}
TDENGINE_USER: ${TDENGINE_USER}
TDENGINE_PASS: ${TDENGINE_PASS}
- SMS_ACCESS_KEY_ID: ${SMS_ACCESS_KEY_ID}
- SMS_ACCESS_KEY_SECRET: ${SMS_ACCESS_KEY_SECRET}
- SMS_SIGN_NAME: ${SMS_SIGN_NAME}
- SMS_TEMPLATE_CODE: ${SMS_TEMPLATE_CODE}
- SMS_TEMPLATE_PARAM: '${SMS_TEMPLATE_PARAM}'
- SMS_PHONE_NUMBERS: $SMS_PHONE_NUMBERS
- SMS_LISTEN_ADDR: ${SMS_LISTEN_ADDR}
ports:
- 3000:3000
volumes:
diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/14-reference/11-docker/index.md
index b3c3cddd9a9958dcb0bab477128c0339da1f0aa3..7cd1e810dca010d16b0f2e257d47e012c6ef06cc 100644
--- a/docs/en/14-reference/11-docker/index.md
+++ b/docs/en/14-reference/11-docker/index.md
@@ -72,7 +72,7 @@ Next, ensure the hostname "tdengine" is resolvable in `/etc/hosts`.
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
```
-Finally, the TDengine service can be accessed from the taos shell or any connector with "tdengine" as the server address.
+Finally, the TDengine service can be accessed from the TDengine CLI or any connector with "tdengine" as the server address.
```shell
taos -h tdengine -P 6030
@@ -116,7 +116,7 @@ If you want to start your application in a container, you need to add the corres
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -217,7 +217,7 @@ Here is the full Dockerfile:
```docker
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -233,7 +233,7 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index cb7daf3c476b2117b5de53c683e76ce07de97bc5..726a1ccd69389b48e0d1f9075e316fd58034c012 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -1,16 +1,13 @@
---
-sidebar_label: Configuration
title: Configuration Parameters
description: "Configuration parameters for client and server in TDengine"
---
-In this chapter, all the configuration parameters on both server and client side are described thoroughly.
-
## Configuration File on Server Side
On the server side, the actual service of TDengine is provided by an executable `taosd` whose parameters can be configured in file `taos.cfg` to meet the requirements of different use cases. The default location of `taos.cfg` is `/etc/taos`, but can be changed by using `-c` parameter on the CLI of `taosd`. For example, the configuration file can be put under `/home/user` and used like below
-```bash
+```
taosd -c /home/user
```
@@ -24,8 +21,6 @@ taosd -C
TDengine CLI `taos` is the tool for users to interact with TDengine. It can share same configuration file as `taosd` or use a separate configuration file. When launching `taos`, parameter `-c` can be used to specify the location where its configuration file is. For example `taos -c /home/cfg` means `/home/cfg/taos.cfg` will be used. If `-c` is not used, the default location of the configuration file is `/etc/taos`. For more details please use `taos --help` to get.
-From version 2.0.10.0 below commands can be used to show the configuration parameters of the client side.
-
```bash
taos -C
```
@@ -36,6 +31,11 @@ taos --dump-config
# Configuration Parameters
+:::note
+The parameters described in this document by the effect that they have on the system.
+
+:::
+
:::note
`taosd` needs to be restarted for the parameters changed in the configuration file to take effect.
@@ -45,19 +45,19 @@ taos --dump-config
### firstEp
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | The end point of the first dnode in the cluster to be connected to when `taosd` or `taos` is started |
-| Default Value | localhost:6030 |
+| Default | localhost:6030 |
### secondEp
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ------------------------------------------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | The end point of the second dnode to be connected to if the firstEp is not available when `taosd` or `taos` is started |
-| Default Value | None |
+| Default | None |
### fqdn
@@ -65,35 +65,28 @@ taos --dump-config
| ------------- | ------------------------------------------------------------------------ |
| Applicable | Server Only |
| Meaning | The FQDN of the host where `taosd` will be started. It can be IP address |
-| Default Value | The first hostname configured for the host |
-| Note | It should be within 96 bytes |
+| Default Value | The first hostname configured for the host |
+| Note | It should be within 96 bytes | |
### serverPort
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | ----------------------------------------------------------------------------------------------------------------------- |
+| Applicable | Server Only |
| Meaning | The port for external access after `taosd` is started |
| Default Value | 6030 |
:::note
-TDengine uses 13 continuous ports, both TCP and UDP, starting with the port specified by `serverPort`. You should ensure, in your firewall rules, that these ports are kept open. Below table describes the ports used by TDengine in details.
-
+- Ensure that your firewall rules do not block TCP port 6042 on any host in the cluster. Below table describes the ports used by TDengine in details.
:::
-
| Protocol | Default Port | Description | How to configure |
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
-| TCP | 6030 | Communication between client and server | serverPort |
-| TCP | 6035 | Communication among server nodes in cluster | serverPort+5 |
-| TCP | 6040 | Data syncup among server nodes in cluster | serverPort+10 |
-| TCP | 6041 | REST connection between client and server | Please refer to [taosAdapter](../taosadapter/) |
-| TCP | 6042 | Service Port of Arbitrator | The parameter of Arbitrator |
-| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper |
-| TCP | 6044 | Data access port for StatsD | refer to [taosAdapter](../taosadapter/) |
-| UDP | 6045 | Data access for statsd | refer to [taosAdapter](../taosadapter/) |
-| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
-| UDP | 6030-6034 | Communication between client and server | serverPort |
-| UDP | 6035-6039 | Communication among server nodes in cluster | serverPort |
+| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
+| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
+| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper |
+| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
+| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
+| TCP | 6060 | Port of Monitoring Service in Enterprise version | |
### maxShellConns
@@ -104,104 +97,118 @@ TDengine uses 13 continuous ports, both TCP and UDP, starting with the port spec
| Value Range | 10-50000000 |
| Default Value | 5000 |
-### maxConnections
+## Monitoring Parameters
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The maximum number of connections allowed by a database |
-| Value Range | 1-100000 |
-| Default Value | 5000 |
-| Note | The maximum number of worker threads on the client side is maxConnections/100 |
+### monitor
-### rpcForceTcp
+| Attribute | Description |
+| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Applicable | Server only |
+| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
+| Value Range | 0: monitoring disabled, 1: monitoring enabled |
+| Default | 1 |
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------- |
-| Applicable | Server and Client |
-| Meaning | TCP is used by force |
-| Value Range | 0: disabled 1: enabled |
-| Default Value | 0 |
-| Note | It's suggested to configure to enable if network is not good enough |
+### monitorFqdn
-## Monitoring Parameters
+| Attribute | Description |
+| -------- | -------------------------- |
+| Applicable | Server Only |
+| Meaning | FQDN of taosKeeper monitoring service |
+| Default | None |
-### monitor
+### monitorPort
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The switch for monitoring inside server. The workload of the hosts, including CPU, memory, disk, network, TTP requests, are collected and stored in a system builtin database `LOG` |
-| Value Range | 0: monitoring disabled, 1: monitoring enabled |
-| Default Value | 1 |
+| Attribute | Description |
+| -------- | --------------------------- |
+| Applicable | Server Only |
+| Meaning | Port of taosKeeper monitoring service |
+| Default Value | 6043 |
### monitorInterval
-| Attribute | Description |
-| ------------- | ------------------------------------------ |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | -------------------------------------------- |
+| Applicable | Server Only |
| Meaning | The interval of collecting system workload |
| Unit | second |
-| Value Range | 1-600 |
-| Default Value | 30 |
+| Value Range | 1-200000 |
+| Default Value | 30 |
### telemetryReporting
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | ---------------------------------------- |
+| Applicable | Server Only |
| Meaning | Switch for allowing TDengine to collect and report service usage information |
| Value Range | 0: Not allowed; 1: Allowed |
-| Default Value | 1 |
+| Default Value | 1 |
## Query Parameters
-### queryBufferSize
+### queryPolicy
+
+| Attribute | Description |
+| -------- | ----------------------------- |
+| Applicable | Client only |
+| Meaning | Execution policy for query statements |
+| Unit | None |
+| Default | 1 |
+| Notes | 1: Run queries on vnodes and not on qnodes |
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The total memory size reserved for all queries |
-| Unit | MB |
-| Default Value | None |
-| Note | It can be estimated by "maximum number of concurrent queries" _ "number of tables" _ 170 |
+2: Run subtasks without scan operators on qnodes and subtasks with scan operators on vnodes.
+
+3: Only run scan operators on vnodes; run all other operators on qnodes.
+
+### querySmaOptimize
+
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Client only |
+| Meaning | SMA index optimization policy |
+| Unit | None |
+| Default Value | 0 |
+| Notes |
-### ratioOfQueryCores
+0: Disable SMA indexing and perform all queries on non-indexed data.
+
+1: Enable SMA indexing and perform queries from suitable statements on precomputation results.|
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Maximum number of query threads |
-| Default Value | 1 |
-| Note | value range: float number between [0, 2] 0: only 1 query thread; >0: the times of the number of cores |
### maxNumOfDistinctRes
-| Attribute | Description |
-| ------------- | -------------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | -------------------------------- |
+| Applicable | Server Only |
| Meaning | The maximum number of distinct rows returned |
| Value Range | [100,000 - 100,000,000] |
| Default Value | 100,000 |
-| Note | After version 2.3.0.0 |
+
+### keepColumnName
+
+| Attribute | Description |
+| -------- | -------------------------------- |
+| Applicable | Client only |
+| Meaning | When the Last, First, LastRow function is queried, whether the returned column name contains the function name. |
+| Value Range | 0 means including the function name, 1 means not including the function name. |
+| Default Value | 0 |
## Locale Parameters
### timezone
-| Attribute | Description |
-| ------------- | ------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ------------------------------ |
+| Applicable | Server and Client |
| Meaning | TimeZone |
| Default Value | TimeZone configured in the host |
:::info
-To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
+To handle the data insertion and data query from multiple timezones, Unix Timestamp is used and stored in TDengine. The timestamp generated from any timezones at same time is same in Unix timestamp. Note that Unix timestamps are converted and recorded on the client side. To make sure the time on client side can be converted to Unix timestamp correctly, the timezone must be set properly.
-On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below.
+On Linux system, TDengine clients automatically obtain timezone from the host. Alternatively, the timezone can be configured explicitly in configuration file `taos.cfg` like below. For example:
```
-timezone UTC-7
+timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
@@ -239,11 +246,11 @@ To avoid the problems of using time strings, Unix timestamp can be used directly
| Default Value | Locale configured in host |
:::info
-A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
+A specific type "nchar" is provided in TDengine to store non-ASCII characters such as Chinese, Japanese, and Korean. The characters to be stored in nchar type are firstly encoded in UCS4-LE before sending to server side. Note that the correct encoding is determined by the user. To store non-ASCII characters correctly, the encoding format of the client side needs to be set properly.
The characters input on the client side are encoded using the default system encoding, which is UTF-8 on Linux, or GB18030 or GBK on some systems in Chinese, POSIX in docker, CP936 on Windows in Chinese. The encoding of the operating system in use must be set correctly so that the characters in nchar type can be converted to UCS4-LE.
-The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
+The locale definition standard on Linux is: \_., for example, in "zh_CN.UTF-8", "zh" means Chinese, "CN" means China mainland, "UTF-8" means charset. The charset indicates how to display the characters. On Linux and Mac OSX, the charset can be set by locale in the system. On Windows system another configuration parameter `charset` must be used to configure charset because the locale used on Windows is not POSIX standard. Of course, `charset` can also be used on Linux to specify the charset.
:::
@@ -256,29 +263,37 @@ The locale definition standard on Linux is: \_., f
| Default Value | charset set in the system |
:::info
-On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start. So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
+On Linux, if `charset` is not set in `taos.cfg`, when `taos` is started, the charset is obtained from system locale. If obtaining charset from system locale fails, `taos` would fail to start.
+
+So on Linux system, if system locale is set properly, it's not necessary to set `charset` in `taos.cfg`. For example:
```
locale zh_CN.UTF-8
```
+On Windows system, it's not possible to obtain charset from system locale. If it's not set in configuration file `taos.cfg`, it would be default to CP936, same as set as below in `taos.cfg`. For example
+
+```
+charset CP936
+```
+
+Refer to the documentation for your operating system before changing the charset.
+
On a Linux system, if the charset contained in `locale` is not consistent with that set by `charset`, the later setting in the configuration file takes precedence.
-```title="Effective charset is GBK"
+```
locale zh_CN.UTF-8
charset GBK
```
-```title="Effective charset is UTF-8"
+The charset that takes effect is GBK.
+
+```
charset GBK
locale zh_CN.UTF-8
```
-On Windows system, it's not possible to obtain charset from system locale. If it's not set in configuration file `taos.cfg`, it would be default to CP936, same as set as below in `taos.cfg`. For example
-
-```
-charset CP936
-```
+The charset that takes effect is UTF-8.
:::
@@ -286,429 +301,98 @@ charset CP936
### dataDir
-| Attribute | Description |
-| ------------- | ------------------------------------------- |
+| Attribute | Description |
+| -------- | ------------------------------------------ |
| Applicable | Server Only |
| Meaning | All data files are stored in this directory |
| Default Value | /var/lib/taos |
-### cache
-
-| Attribute | Description |
-| ------------- | ----------------------------- |
-| Applicable | Server Only |
-| Meaning | The size of each memory block |
-| Unit | MB |
-| Default Value | 16 |
-
-### blocks
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of memory blocks of size `cache` used by each vnode |
-| Default Value | 6 |
-
-### days
-
-| Attribute | Description |
-| ------------- | ----------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The time range of the data stored in single data file |
-| Unit | day |
-| Default Value | 10 |
-
-### keep
-
-| Attribute | Description |
-| ------------- | -------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of days for data to be kept |
-| Unit | day |
-| Default Value | 3650 |
-
-### minRows
-
-| Attribute | Description |
-| ------------- | ------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | minimum number of rows in single data file |
-| Default Value | 100 |
-
-### maxRows
-
-| Attribute | Description |
-| ------------- | ------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | maximum number of rows in single data file |
-| Default Value | 4096 |
-
-### walLevel
-
-| Attribute | Description |
-| ------------- | ---------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | WAL level |
-| Value Range | 0: wal disabled 1: wal enabled without fsync 2: wal enabled with fsync |
-| Default Value | 1 |
-
-### fsync
-
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The waiting time for invoking fsync when walLevel is 2 |
-| Unit | millisecond |
-| Value Range | 0: no waiting time, fsync is performed immediately once WAL is written; maximum value is 180000, i.e. 3 minutes |
-| Default Value | 3000 |
-
-### update
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | If it's allowed to update existing data |
-| Value Range | 0: not allowed 1: a row can only be updated as a whole 2: a part of columns can be updated |
-| Default Value | 0 |
-| Note | Not available from version 2.0.8.0 |
-
-### cacheLast
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether to cache the latest rows of each sub table in memory |
-| Value Range | 0: not cached 1: the last row of each sub table is cached 2: the last non-null value of each column is cached 3: identical to both 1 and 2 are set |
-| Default Value | 0 |
-
### minimalTmpDirGB
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ------------------------------------------------ |
+| Applicable | Server and Client |
| Meaning | When the available disk space in tmpDir is below this threshold, writing to tmpDir is suspended |
-| Unit | GB |
-| Default Value | 1.0 |
+| Unit | GB |
+| Default Value | 1.0 |
### minimalDataDirGB
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | hen the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
-| Unit | GB |
-| Default Value | 2.0 |
-
-### vnodeBak
-
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether to backup the corresponding vnode directory when a vnode is deleted |
-| Value Range | 0: not backed up, 1: backup |
-| Default Value | 1 |
+| Attribute | Description |
+| -------- | ------------------------------------------------ |
+| Applicable | Server Only |
+| Meaning | When the available disk space in dataDir is below this threshold, writing to dataDir is suspended |
+| Unit | GB |
+| Default Value | 2.0 |
## Cluster Parameters
-### numOfMnodes
-
-| Attribute | Description |
-| ------------- | ------------------------------ |
-| Applicable | Server Only |
-| Meaning | The number of management nodes |
-| Default Value | 3 |
-
-### replica
-
-| Attribute | Description |
-| ------------- | -------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of replications |
-| Value Range | 1-3 |
-| Default Value | 1 |
-
-### quorum
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | The number of required confirmations for data replication in case of multiple replications |
-| Value Range | 1,2 |
-| Default Value | 1 |
-
-### role
-
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The role of the dnode |
-| Value Range | 0: both mnode and vnode 1: mnode only 2: dnode only |
-| Default Value | 0 |
-
-### balance
-
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server Only |
-| Meaning | Automatic load balancing |
-| Value Range | 0: disabled, 1: enabled |
-| Default Value | 1 |
+### supportVnodes
-### balanceInterval
-
-| Attribute | Description |
-| ------------- | ----------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The interval for checking load balance by mnode |
-| Unit | second |
-| Value Range | 1-30000 |
-| Default Value | 300 |
-
-### arbitrator
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | End point of arbitrator, format is same as firstEp |
-| Default Value | None |
+| Attribute | Description |
+| -------- | --------------------------- |
+| Applicable | Server Only |
+| Meaning | Maximum number of vnodes per dnode |
+| Value Range | 0-4096 |
+| Default Value | 2x the CPU cores |
## Time Parameters
-### precision
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------- |
-| Applicable | Server only |
-| Meaning | Time precision used for each database |
-| Value Range | ms: millisecond; us: microsecond ; ns: nanosecond |
-| Default Value | ms |
-
-### rpcTimer
-
-| Attribute | Description |
-| ------------- | ------------------ |
-| Applicable | Server and Client |
-| Meaning | rpc retry interval |
-| Unit | milliseconds |
-| Value Range | 100-3000 |
-| Default Value | 300 |
-
-### rpcMaxTime
-
-| Attribute | Description |
-| ------------- | ---------------------------------- |
-| Applicable | Server and Client |
-| Meaning | maximum wait time for rpc response |
-| Unit | second |
-| Value Range | 100-7200 |
-| Default Value | 600 |
-
### statusInterval
-| Attribute | Description |
-| ------------- | ----------------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | --------------------------- |
+| Applicable | Server Only |
| Meaning | the interval of dnode reporting status to mnode |
| Unit | second |
-| Value Range | 1-10 |
-| Default Value | 1 |
+| Value Range | 1-10 |
+| Default Value | 1 |
### shellActivityTimer
-| Attribute | Description |
-| ------------- | ------------------------------------------------------ |
-| Applicable | Server and Client |
-| Meaning | The interval for taos shell to send heartbeat to mnode |
-| Unit | second |
-| Value Range | 1-120 |
-| Default Value | 3 |
-
-### tableMetaKeepTimer
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The expiration time for metadata in cache, once it's reached the client would refresh the metadata |
-| Unit | second |
-| Value Range | 1-8640000 |
-| Default Value | 7200 |
-
-### maxTmrCtrl
-
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server and Client |
-| Meaning | Maximum number of timers |
-| Unit | None |
-| Value Range | 8-2048 |
-| Default Value | 512 |
-
-### offlineThreshold
-
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The expiration time for dnode online status, once it's reached before receiving status from a node, the dnode becomes offline |
-| Unit | second |
-| Value Range | 5-7200000 |
-| Default Value | 86400\*10 (i.e. 10 days) |
-
-## Performance Optimization Parameters
-
-### numOfThreadsPerCore
-
-| Attribute | Description |
-| ------------- | ------------------------------------------- |
-| Applicable | Server and Client |
-| Meaning | The number of consumer threads per CPU core |
-| Default Value | 1.0 |
-
-### ratioOfQueryThreads
-
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Maximum number of query threads |
-| Value Range | 0: Only one query thread 1: Same as number of CPU cores 2: two times of CPU cores |
-| Default Value | 1 |
-| Note | This value can be a float number, 0.5 means half of the CPU cores |
-
-### maxVgroupsPerDb
-
-| Attribute | Description |
-| ------------- | ------------------------------------ |
-| Applicable | Server Only |
-| Meaning | Maximum number of vnodes for each DB |
-| Value Range | 0-8192 |
-| Default Value | |
-
-### maxTablesPerVnode
-
-| Attribute | Description |
-| ------------- | -------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Maximum number of tables in each vnode |
-| Default Value | 1000000 |
-
-### minTablesPerVnode
-
| Attribute | Description |
-| ------------- | -------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Minimum number of tables in each vnode |
-| Default Value | 1000 |
-
-### tableIncStepPerVnode
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | When minTablesPerVnode is reached, the number of tables are allocated for a vnode each time |
-| Default Value | 1000 |
-
-### maxNumOfOrderedRes
-
-| Attribute | Description |
-| ------------- | ------------------------------------------- |
-| Applicable | Server and Client |
-| Meaning | Maximum number of rows ordered for a STable |
-| Default Value | 100,000 |
-
-### mnodeEqualVnodeNum
+| -------- | --------------------------------- |
+| Applicable | Server and Client |
+| Meaning | The interval for TDengine CLI to send heartbeat to mnode |
+| Unit | second |
+| Value Range | 1-120 |
+| Default Value | 3 |
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of vnodes whose system resources consumption are considered as equal to single mnode |
-| Default Value | 4 |
+## Performance Optimization Parameters
### numOfCommitThreads
-| Attribute | Description |
-| ------------- | ----------------------------------------- |
-| Applicable | Server Only |
+| Attribute | Description |
+| -------- | ---------------------- |
+| Applicable | Server Only |
| Meaning | Maximum of threads for committing to disk |
-| Default Value | |
+| Default Value | |
## Compression Parameters
-### comp
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether data is compressed |
-| Value Range | 0: uncompressed, 1: One phase compression, 2: Two phase compression |
-| Default Value | 2 |
-
-### tsdbMetaCompactRatio
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------- |
-| Meaning | The threshold for percentage of redundant in meta file to trigger compression for meta file |
-| Value Range | 0: no compression forever, [1-100]: The threshold percentage |
-| Default Value | 0 |
-
### compressMsgSize
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The threshold for message size to compress the message.. |
+| Attribute | Description |
+| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Applicable | Server Only |
+| Meaning | The threshold for message size to compress the message. | Set the value to 64330 bytes for good message compression. |
| Unit | bytes |
| Value Range | 0: already compress; >0: compress when message exceeds it; -1: always uncompress |
-| Default Value | -1 |
+| Default Value | -1 |
### compressColData
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The threshold for size of column data to trigger compression for the query result |
+| Attribute | Description |
+| -------- | --------------------------------------------------------------------------------------- |
+| Applicable | Server Only |
+| Meaning | The threshold for size of column data to trigger compression for the query result |
| Unit | bytes |
| Value Range | 0: always compress; >0: only compress when the size of any column data exceeds the threshold; -1: always uncompress |
+| Default Value | -1 |
| Default Value | -1 |
-| Note | available from version 2.3.0.0 |
-
-### lossyColumns
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The floating number types for lossy compression |
-| Value Range | "": lossy compression is disabled float: only for float double: only for double float \| double: for both float and double |
-| Default Value | "" , i.e. disabled |
-
-### fPrecision
-
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Compression precision for float type |
-| Value Range | 0.1 ~ 0.00000001 |
-| Default Value | 0.00000001 |
-| Note | The fractional part lower than this value will be discarded |
-
-### dPrecision
+| Note | available from version 2.3.0.0 | |
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Compression precision for double type |
-| Value Range | 0.1 ~ 0.0000000000000001 |
-| Default Value | 0.0000000000000001 |
-| Note | The fractional part lower than this value will be discarded |
-
-## Continuous Query Parameters
-
-### stream
-
-| Attribute | Description |
-| ------------- | ---------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether to enable continuous query |
-| Value Range | 0: disabled 1: enabled |
-| Default Value | 1 |
+## Continuous Query Parameters |
### minSlidingTime
@@ -730,375 +414,446 @@ charset CP936
| Value Range | 1-1000000 |
| Default Value | 10 |
-### maxStreamCompDelay
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | Maximum delay before starting a continuous query |
-| Unit | millisecond |
-| Value Range | 10-1000000000 |
-| Default Value | 20000 |
-
-### maxFirstStreamCompDelay
-
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Maximum delay time before starting a continuous query the first time |
-| Unit | millisecond |
-| Value Range | 10-1000000000 |
-| Default Value | 10000 |
-
-### retryStreamCompDelay
-
-| Attribute | Description |
-| ------------- | --------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Delay time before retrying a continuous query |
-| Unit | millisecond |
-| Value Range | 10-1000000000 |
-| Default Value | 10 |
-
-### streamCompDelayRatio
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | The delay ratio, with time window size as the base, for continuous query |
-| Value Range | 0.1-0.9 |
-| Default Value | 0.1 |
-
:::info
To prevent system resource from being exhausted by multiple concurrent streams, a random delay is applied on each stream automatically. `maxFirstStreamCompDelay` is the maximum delay time before a continuous query is started the first time. `streamCompDelayRatio` is the ratio for calculating delay time, with the size of the time window as base. `maxStreamCompDelay` is the maximum delay time. The actual delay time is a random time not bigger than `maxStreamCompDelay`. If a continuous query fails, `retryStreamComDelay` is the delay time before retrying it, also not bigger than `maxStreamCompDelay`.
:::
-## HTTP Parameters
-
-### http
-
-| Attribute | Description |
-| ------------- | ------------------------------ |
-| Applicable | Server Only |
-| Meaning | Whether to enable http service |
-| Value Range | 0: disabled, 1: enabled |
-| Default Value | 1 |
-
-### httpEnableRecordSql
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether to record the SQL invocation through REST interface |
-| Default Value | 0: false; 1: true |
-| Note | The resulting files, i.e. httpnote.0/httpnote.1, are located under logDir |
-
-### httpMaxThreads
-
-| Attribute | Description |
-| ------------- | -------------------------------------------- |
-| Applicable | Server Only |
-| Meaning | The number of threads for RESTFul interface. |
-| Default Value | 2 |
-
-### restfulRowLimit
-
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------ |
-| Applicable | Server Only |
-| Meaning | Maximum number of rows returned each time by REST interface. |
-| Default Value | 10240 |
-| Note | Maximum value is 10,000,000 |
-
-### httpDBNameMandatory
-
-| Attribute | Description |
-| ------------- | ---------------------------------------- |
-| Applicable | Server Only |
-| Meaning | Whether database name is required in URL |
-| Value Range | 0:not required, 1: required |
-| Default Value | 0 |
-| Note | From version 2.3.0.0 |
-
## Log Parameters
### logDir
-| Attribute | Description |
-| ------------- | ----------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | The directory for writing log files |
| Default Value | /var/log/taos |
### minimalLogDirGB
-| Attribute | Description |
-| ------------- | -------------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | When the available disk space in logDir is below this threshold, writing to log files is suspended |
-| Unit | GB |
-| Default Value | 1.0 |
+| Unit | GB |
+| Default Value | 1.0 |
### numOfLogLines
-| Attribute | Description |
-| ------------- | ------------------------------------------ |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ---------------------------- |
+| Applicable | Server and Client |
| Meaning | Maximum number of lines in single log file |
-| Default Value | 10,000,000 |
+| Default Value | 10000000 |
### asyncLog
-| Attribute | Description |
-| ------------- | ---------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server and Client |
| Meaning | The mode of writing log file |
| Value Range | 0: sync way; 1: async way |
-| Default Value | 1 |
+| Default Value | 1 |
### logKeepDays
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ----------------------------------------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | The number of days for log files to be kept |
-| Unit | day |
-| Default Value | 0 |
+| Unit | day |
+| Default Value | 0 |
| Note | When it's bigger than 0, the log file would be renamed to "taosdlog.xxx" in which "xxx" is the timestamp when the file is changed last time |
### debugFlag
-| Attribute | Description |
-| ------------- | --------------------------------------------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | ------------------------------------------------------------------------------------------------- |
+| Applicable | Server and Client |
| Meaning | Log level |
| Value Range | 131: INFO/WARNING/ERROR; 135: plus DEBUG; 143: plus TRACE |
| Default Value | 131 or 135, depending on the module |
-### mDebugFlag
+### tmrDebugFlag
-| Attribute | Description |
-| ------------- | ------------------ |
-| Applicable | Server Only |
-| Meaning | Log level of mnode |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server and Client |
+| Meaning | Log level of timer module |
| Value Range | same as debugFlag |
-| Default Value | 135 |
+| Default Value | |
-### dDebugFlag
+### uDebugFlag
-| Attribute | Description |
-| ------------- | ------------------ |
-| Applicable | Server and Client |
-| Meaning | Log level of dnode |
+| Attribute | Description |
+| -------- | ---------------------- |
+| Applicable | Server and Client |
+| Meaning | Log level of common module |
| Value Range | same as debugFlag |
-| Default Value | 135 |
-
-### sDebugFlag
-
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server and Client |
-| Meaning | Log level of sync module |
-| Value Range | same as debugFlag |
-| Default Value | 135 |
-
-### wDebugFlag
-
-| Attribute | Description |
-| ------------- | ----------------------- |
-| Applicable | Server and Client |
-| Meaning | Log level of WAL module |
-| Value Range | same as debugFlag |
-| Default Value | 135 |
-
-### sdbDebugFlag
-
-| Attribute | Description |
-| ------------- | ---------------------- |
-| Applicable | Server and Client |
-| Meaning | logLevel of sdb module |
-| Value Range | same as debugFlag |
-| Default Value | 135 |
+| Default Value | |
### rpcDebugFlag
-| Attribute | Description |
-| ------------- | ----------------------- |
-| Applicable | Server and Client |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server and Client |
| Meaning | Log level of rpc module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Value Range | same as debugFlag |
+| Default Value | |
-### tmrDebugFlag
+### jniDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------- |
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Client Only |
+| Meaning | Log level of jni module |
+| Value Range | same as debugFlag |
+| Default Value | |
+
+### qDebugFlag
+
+| Attribute | Description |
+| -------- | -------------------- |
| Applicable | Server and Client |
-| Meaning | Log level of timer module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Meaning | Log level of query module |
+| Value Range | same as debugFlag |
+| Default Value | |
### cDebugFlag
-| Attribute | Description |
-| ------------- | ------------------- |
+| Attribute | Description |
+| -------- | --------------------- |
| Applicable | Client Only |
| Meaning | Log level of Client |
-| Value Range | Same as debugFlag |
-| Default Value | |
-
-### jniDebugFlag
-
-| Attribute | Description |
-| ------------- | ----------------------- |
-| Applicable | Client Only |
-| Meaning | Log level of jni module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Value Range | same as debugFlag |
+| Default Value | |
-### odbcDebugFlag
+### dDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Client Only |
-| Meaning | Log level of odbc module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of dnode |
+| Value Range | same as debugFlag |
+| Default Value | 135 |
-### uDebugFlag
+### vDebugFlag
-| Attribute | Description |
-| ------------- | -------------------------- |
-| Applicable | Server and Client |
-| Meaning | Log level of common module |
-| Value Range | Same as debugFlag |
-| Default Value | | |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of vnode |
+| Value Range | same as debugFlag |
+| Default Value | |
-### mqttDebugFlag
+### mDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server Only |
-| Meaning | Log level of mqtt module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of mnode module |
+| Value Range | same as debugFlag |
+| Default Value | 135 |
-### monitorDebugFlag
+### wDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------------ |
-| Applicable | Server Only |
-| Meaning | Log level of monitoring module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Log level of WAL module |
+| Value Range | same as debugFlag |
+| Default Value | 135 |
-### qDebugFlag
+### sDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------- |
+| Attribute | Description |
+| -------- | -------------------- |
| Applicable | Server and Client |
-| Meaning | Log level of query module |
-| Value Range | Same as debugFlag |
-| Default Value | |
-
-### vDebugFlag
-
-| Attribute | Description |
-| ------------- | ------------------ |
-| Applicable | Server and Client |
-| Meaning | Log level of vnode |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Meaning | Log level of sync module |
+| Value Range | same as debugFlag |
+| Default Value | 135 |
### tsdbDebugFlag
-| Attribute | Description |
-| ------------- | ------------------------ |
-| Applicable | Server Only |
-| Meaning | Log level of TSDB module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| Attribute | Description |
+| -------- | ------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of TSDB module |
+| Value Range | same as debugFlag |
+| Default Value | |
-### cqDebugFlag
+### tqDebugFlag
| Attribute | Description |
-| ------------- | ------------------------------------ |
-| Applicable | Server and Client |
-| Meaning | Log level of continuous query module |
-| Value Range | Same as debugFlag |
-| Default Value | |
+| -------- | ----------------- |
+| Applicable | Server only |
+| Meaning | Log level of TQ module |
+| Value Range | same as debugFlag |
+| Default Value | |
-## Client Only
+### fsDebugFlag
-### maxSQLLength
+| Attribute | Description |
+| -------- | ----------------- |
+| Applicable | Server only |
+| Meaning | Log level of FS module |
+| Value Range | same as debugFlag |
+| Default Value | |
+
+### udfDebugFlag
| Attribute | Description |
-| ------------- | -------------------------------------- |
-| Applicable | Client Only |
-| Meaning | Maximum length of single SQL statement |
-| Unit | bytes |
-| Value Range | 65480-1048576 |
-| Default Value | 1048576 |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Log level of UDF module |
+| Value Range | same as debugFlag |
+| Default Value | |
+
+### smaDebugFlag
-### tscEnableRecordSql
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Log level of SMA module |
+| Value Range | same as debugFlag |
+| Default Value | |
-| Attribute | Description |
-| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- |
-| Meaning | Whether to record SQL statements in file |
-| Value Range | 0: false, 1: true |
-| Default Value | 0 |
-| Note | The generated files are named as "tscnote-xxxx.0/tscnote-xxx.1" in which "xxxx" is the pid of the client, and located at same place as client log |
+### idxDebugFlag
-### maxBinaryDisplayWidth
+| Attribute | Description |
+| -------- | -------------------- |
+| Applicable | Server Only |
+| Meaning | Log level of index module |
+| Value Range | same as debugFlag |
+| Default Value | |
-| Attribute | Description |
-| ------------- | --------------------------------------------------------------------------------------------------- |
-| Meaning | Maximum display width of binary and nchar in taos shell. Anything beyond this limit would be hidden |
-| Value Range | 5 - |
-| Default Value | 30 |
+### tdbDebugFlag
-:::info
-If the length of value exceeds `maxBinaryDisplayWidth`, then the actual display width is max(column name, maxBinaryDisplayLength); otherwise the actual display width is max(length of column name, length of column value). This parameter can also be changed dynamically using `set max_binary_display_width ` in TDengine CLI `taos`.
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Log level of TDB module |
+| Value Range | same as debugFlag |
+| Default Value | |
-:::
+## Schemaless Parameters
-### maxWildCardsLength
+### smlChildTableName
-| Attribute | Description |
-| ------------- | ----------------------------------------------------- |
-| Meaning | The maximum length for wildcard string used with LIKE |
-| Unit | bytes |
-| Value Range | 0-16384 |
-| Default Value | 100 |
-| Note | From version 2.1.6.1 |
+| Attribute | Description |
+| -------- | ------------------------- |
+| Applicable | Client only |
+| Meaning | Custom subtable name for schemaless writes |
+| Type | String |
+| Default Value | None |
-### clientMerge
+### smlTagName
-| Attribute | Description |
-| ------------- | --------------------------------------------------- |
-| Meaning | Whether to filter out duplicate data on client side |
-| Value Range | 0: false; 1: true |
-| Default Value | 0 |
-| Note | From version 2.3.0.0 |
+| Attribute | Description |
+| -------- | ------------------------------------ |
+| Applicable | Client only |
+| Meaning | Default tag for schemaless writes without tag value specified |
+| Type | String |
+| Default Value | _tag_null |
-### maxRegexStringLen
+### smlDataFormat
| Attribute | Description |
-| ------------- | ------------------------------------ |
-| Meaning | Maximum length of regular expression |
-| Value Range | [128, 16384] |
-| Default Value | 128 |
-| Note | From version 2.3.0.0 |
+| -------- | ----------------------------- |
+| Applicable | Client only |
+| Meaning | Whether schemaless columns are consistently ordered |
+| Value Range | 0: not consistent; 1: consistent. |
+| Default | 1 |
## Other Parameters
### enableCoreFile
-| Attribute | Description |
-| ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Attribute | Description |
+| -------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
| Applicable | Server and Client |
| Meaning | Whether to generate core file when server crashes |
| Value Range | 0: false, 1: true |
| Default Value | 1 |
| Note | The core file is generated under root directory `systemctl start taosd` is used to start, or under the working directory if `taosd` is started directly on Linux Shell. |
+
+### udf
+
+| Attribute | Description |
+| -------- | ------------------ |
+| Applicable | Server Only |
+| Meaning | Whether the UDF service is enabled |
+| Value Range | 0: disable UDF; 1: enabled UDF |
+| Default Value | 1 |
+
+## Parameter Comparison of TDengine 2.x and 3.0
+| # | **Parameter** | **In 2.x** | **In 3.0** |
+| --- | :-----------------: | --------------- | --------------- |
+| 1 | firstEp | Yes | Yes |
+| 2 | secondEp | Yes | Yes |
+| 3 | fqdn | Yes | Yes |
+| 4 | serverPort | Yes | Yes |
+| 5 | maxShellConns | Yes | Yes |
+| 6 | monitor | Yes | Yes |
+| 7 | monitorFqdn | No | Yes |
+| 8 | monitorPort | No | Yes |
+| 9 | monitorInterval | Yes | Yes |
+| 10 | monitorMaxLogs | No | Yes |
+| 11 | monitorComp | No | Yes |
+| 12 | telemetryReporting | Yes | Yes |
+| 13 | telemetryInterval | No | Yes |
+| 14 | telemetryServer | No | Yes |
+| 15 | telemetryPort | No | Yes |
+| 16 | queryPolicy | No | Yes |
+| 17 | querySmaOptimize | No | Yes |
+| 18 | queryRsmaTolerance | No | Yes |
+| 19 | queryBufferSize | Yes | Yes |
+| 20 | maxNumOfDistinctRes | Yes | Yes |
+| 21 | minSlidingTime | Yes | Yes |
+| 22 | minIntervalTime | Yes | Yes |
+| 23 | countAlwaysReturnValue | Yes | Yes |
+| 24 | dataDir | Yes | Yes |
+| 25 | minimalDataDirGB | Yes | Yes |
+| 26 | supportVnodes | No | Yes |
+| 27 | tempDir | Yes | Yes |
+| 28 | minimalTmpDirGB | Yes | Yes |
+| 29 | compressMsgSize | Yes | Yes |
+| 30 | compressColData | Yes | Yes |
+| 31 | smlChildTableName | Yes | Yes |
+| 32 | smlTagName | Yes | Yes |
+| 33 | smlDataFormat | No | Yes |
+| 34 | statusInterval | Yes | Yes |
+| 35 | shellActivityTimer | Yes | Yes |
+| 36 | transPullupInterval | No | Yes |
+| 37 | mqRebalanceInterval | No | Yes |
+| 38 | ttlUnit | No | Yes |
+| 39 | ttlPushInterval | No | Yes |
+| 40 | numOfTaskQueueThreads | No | Yes |
+| 41 | numOfRpcThreads | No | Yes |
+| 42 | numOfCommitThreads | Yes | Yes |
+| 43 | numOfMnodeReadThreads | No | Yes |
+| 44 | numOfVnodeQueryThreads | No | Yes |
+| 45 | numOfVnodeStreamThreads | No | Yes |
+| 46 | numOfVnodeFetchThreads | No | Yes |
+| 47 | numOfVnodeWriteThreads | No | Yes |
+| 48 | numOfVnodeSyncThreads | No | Yes |
+| 49 | numOfVnodeRsmaThreads | No | Yes |
+| 50 | numOfQnodeQueryThreads | No | Yes |
+| 51 | numOfQnodeFetchThreads | No | Yes |
+| 52 | numOfSnodeSharedThreads | No | Yes |
+| 53 | numOfSnodeUniqueThreads | No | Yes |
+| 54 | rpcQueueMemoryAllowed | No | Yes |
+| 55 | logDir | Yes | Yes |
+| 56 | minimalLogDirGB | Yes | Yes |
+| 57 | numOfLogLines | Yes | Yes |
+| 58 | asyncLog | Yes | Yes |
+| 59 | logKeepDays | Yes | Yes |
+| 60 | debugFlag | Yes | Yes |
+| 61 | tmrDebugFlag | Yes | Yes |
+| 62 | uDebugFlag | Yes | Yes |
+| 63 | rpcDebugFlag | Yes | Yes |
+| 64 | jniDebugFlag | Yes | Yes |
+| 65 | qDebugFlag | Yes | Yes |
+| 66 | cDebugFlag | Yes | Yes |
+| 67 | dDebugFlag | Yes | Yes |
+| 68 | vDebugFlag | Yes | Yes |
+| 69 | mDebugFlag | Yes | Yes |
+| 70 | wDebugFlag | Yes | Yes |
+| 71 | sDebugFlag | Yes | Yes |
+| 72 | tsdbDebugFlag | Yes | Yes |
+| 73 | tqDebugFlag | No | Yes |
+| 74 | fsDebugFlag | Yes | Yes |
+| 75 | udfDebugFlag | No | Yes |
+| 76 | smaDebugFlag | No | Yes |
+| 77 | idxDebugFlag | No | Yes |
+| 78 | tdbDebugFlag | No | Yes |
+| 79 | metaDebugFlag | No | Yes |
+| 80 | timezone | Yes | Yes |
+| 81 | locale | Yes | Yes |
+| 82 | charset | Yes | Yes |
+| 83 | udf | Yes | Yes |
+| 84 | enableCoreFile | Yes | Yes |
+| 85 | arbitrator | Yes | No |
+| 86 | numOfThreadsPerCore | Yes | No |
+| 87 | numOfMnodes | Yes | No |
+| 88 | vnodeBak | Yes | No |
+| 89 | balance | Yes | No |
+| 90 | balanceInterval | Yes | No |
+| 91 | offlineThreshold | Yes | No |
+| 92 | role | Yes | No |
+| 93 | dnodeNopLoop | Yes | No |
+| 94 | keepTimeOffset | Yes | No |
+| 95 | rpcTimer | Yes | No |
+| 96 | rpcMaxTime | Yes | No |
+| 97 | rpcForceTcp | Yes | No |
+| 98 | tcpConnTimeout | Yes | No |
+| 99 | syncCheckInterval | Yes | No |
+| 100 | maxTmrCtrl | Yes | No |
+| 101 | monitorReplica | Yes | No |
+| 102 | smlTagNullName | Yes | No |
+| 103 | keepColumnName | Yes | No |
+| 104 | ratioOfQueryCores | Yes | No |
+| 105 | maxStreamCompDelay | Yes | No |
+| 106 | maxFirstStreamCompDelay | Yes | No |
+| 107 | retryStreamCompDelay | Yes | No |
+| 108 | streamCompDelayRatio | Yes | No |
+| 109 | maxVgroupsPerDb | Yes | No |
+| 110 | maxTablesPerVnode | Yes | No |
+| 111 | minTablesPerVnode | Yes | No |
+| 112 | tableIncStepPerVnode | Yes | No |
+| 113 | cache | Yes | No |
+| 114 | blocks | Yes | No |
+| 115 | days | Yes | No |
+| 116 | keep | Yes | No |
+| 117 | minRows | Yes | No |
+| 118 | maxRows | Yes | No |
+| 119 | quorum | Yes | No |
+| 120 | comp | Yes | No |
+| 121 | walLevel | Yes | No |
+| 122 | fsync | Yes | No |
+| 123 | replica | Yes | No |
+| 124 | partitions | Yes | No |
+| 125 | quorum | Yes | No |
+| 126 | update | Yes | No |
+| 127 | cachelast | Yes | No |
+| 128 | maxSQLLength | Yes | No |
+| 129 | maxWildCardsLength | Yes | No |
+| 130 | maxRegexStringLen | Yes | No |
+| 131 | maxNumOfOrderedRes | Yes | No |
+| 132 | maxConnections | Yes | No |
+| 133 | mnodeEqualVnodeNum | Yes | No |
+| 134 | http | Yes | No |
+| 135 | httpEnableRecordSql | Yes | No |
+| 136 | httpMaxThreads | Yes | No |
+| 137 | restfulRowLimit | Yes | No |
+| 138 | httpDbNameMandatory | Yes | No |
+| 139 | httpKeepAlive | Yes | No |
+| 140 | enableRecordSql | Yes | No |
+| 141 | maxBinaryDisplayWidth | Yes | No |
+| 142 | stream | Yes | No |
+| 143 | retrieveBlockingModel | Yes | No |
+| 144 | tsdbMetaCompactRatio | Yes | No |
+| 145 | defaultJSONStrType | Yes | No |
+| 146 | walFlushSize | Yes | No |
+| 147 | keepTimeOffset | Yes | No |
+| 148 | flowctrl | Yes | No |
+| 149 | slaveQuery | Yes | No |
+| 150 | adjustMaster | Yes | No |
+| 151 | topicBinaryLen | Yes | No |
+| 152 | telegrafUseFieldNum | Yes | No |
+| 153 | deadLockKillQuery | Yes | No |
+| 154 | clientMerge | Yes | No |
+| 155 | sdbDebugFlag | Yes | No |
+| 156 | odbcDebugFlag | Yes | No |
+| 157 | httpDebugFlag | Yes | No |
+| 158 | monDebugFlag | Yes | No |
+| 159 | cqDebugFlag | Yes | No |
+| 160 | shortcutFlag | Yes | No |
+| 161 | probeSeconds | Yes | No |
+| 162 | probeKillSeconds | Yes | No |
+| 163 | probeInterval | Yes | No |
+| 164 | lossyColumns | Yes | No |
+| 165 | fPrecision | Yes | No |
+| 166 | dPrecision | Yes | No |
+| 167 | maxRange | Yes | No |
+| 168 | range | Yes | No |
diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md
index 8b6a26ae52af42e339e2f5a8d0824a9e1be3f386..5b7924ce56b240d34ab139f6160839a56438dc6b 100644
--- a/docs/en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/en/14-reference/13-schemaless/13-schemaless.md
@@ -1,9 +1,10 @@
---
title: Schemaless Writing
-description: "The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface."
+description: 'The Schemaless write method eliminates the need to create super tables/sub tables in advance and automatically creates the storage structure corresponding to the data, as it is written to the interface.'
---
-In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. To provide the flexibility needed in such cases and in a rapidly changing IoT landscape, TDengine provides a series of interfaces for the schemaless writing method. These interfaces eliminate the need to create super tables and subtables in advance by automatically creating the storage structure corresponding to the data as the data is written to the interface. When necessary, schemaless writing will automatically add the required columns to ensure that the data written by the user is stored correctly.
+In IoT applications, data is collected for many purposes such as intelligent control, business analysis, device monitoring and so on. Due to changes in business or functional requirements or changes in device hardware, the application logic and even the data collected may change. Schemaless writing automatically creates storage structures for your data as it is being written to TDengine, so that you do not need to create supertables in advance. When necessary, schemaless writing
+will automatically add the required columns to ensure that the data written by the user is stored correctly.
The schemaless writing method creates super tables and their corresponding subtables. These are completely indistinguishable from the super tables and subtables created directly via SQL. You can write data directly to them via SQL statements. Note that the names of tables created by schemaless writing are based on fixed mapping rules for tag values, so they are not explicitly ideographic and they lack readability.
@@ -19,12 +20,12 @@ With the following formatting conventions, schemaless writing uses a single stri
measurement,tag_set field_set timestamp
```
-where :
+where:
- measurement will be used as the data table name. It will be separated from tag_set by a comma.
-- tag_set will be used as tag data in the format `=,=`, i.e. multiple tags' data can be separated by a comma. It is separated from field_set by space.
-- field_set will be used as normal column data in the format of `=,=`, again using a comma to separate multiple normal columns of data. It is separated from the timestamp by a space.
-- The timestamp is the primary key corresponding to the data in this row.
+- `tag_set` will be used as tags, with format like `=,=` Enter a space between `tag_set` and `field_set`.
+- `field_set`will be used as data columns, with format like `=,=` Enter a space between `field_set` and `timestamp`.
+- `timestamp` is the primary key timestamp corresponding to this row of data
All data in tag_set is automatically converted to the NCHAR data type and does not require double quotes (").
@@ -35,18 +36,19 @@ In the schemaless writing data line protocol, each data item in the field_set ne
- Spaces, equal signs (=), commas (,), and double quotes (") need to be escaped with a backslash (\\) in front. (All refer to the ASCII character)
- Numeric types will be distinguished from data types by the suffix.
-| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
-| -------- | -------- | ------------ | -------------- |
-| 1 | none or f64 | double | 8 |
-| 2 | f32 | float | 4 |
-| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
-| 4 | i16/u16 | SmallInt/USmallInt | 2 |
-| 5 | i32/u32 | Int/UInt | 4 |
-| 6 | i64/i/u64/u | Bigint/Bigint/UBigint/UBigint | 8 |
+| **Serial number** | **Postfix** | **Mapping type** | **Size (bytes)** |
+| ----------------- | ----------- | ----------------------------- | ---------------- |
+| 1 | None or f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- `t`, `T`, `true`, `True`, `TRUE`, `f`, `F`, `false`, and `False` will be handled directly as BOOL types.
-For example, the following data rows indicate that the t1 label is "3" (NCHAR), the t2 label is "4" (NCHAR), and the t3 label is "t3" to the super table named `st` labeled "t3" (NCHAR), write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column is "passit" (BINARY), c4 column is 4 (DOUBLE), and the primary key timestamp is 1626006833639000000 in one row.
+For example, the following data rows write c1 column as 3 (BIGINT), c2 column as false (BOOL), c3 column
+as "passit" (BINARY), c4 column as 4 (DOUBLE), and the primary key timestamp as 1626006833639000000 to child table with the t1 label as "3" (NCHAR), the t2 label as "4" (NCHAR), and the t3 label as "t3" (NCHAR) and the super table named `st`.
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
@@ -58,102 +60,105 @@ Note that if the wrong case is used when describing the data type suffix, or if
Schemaless writes process row data according to the following principles.
-1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
+1. You can use the following rules to generate the subtable names: first, combine the measurement name and the key and value of the label into the next string:
```json
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
Note that tag_key1, tag_key2 are not the original order of the tags entered by the user but the result of using the tag names in ascending order of the strings. Therefore, tag_key1 is not the first tag entered in the line protocol.
-The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t*" is a fixed prefix that every table generated by this mapping relationship has.
+The string's MD5 hash value "md5_val" is calculated after the ranking is completed. The calculation result is then combined with the string to generate the table name: "t_md5_val". "t_" is a fixed prefix that every table generated by this mapping relationship has.
+You can configure smlChildTableName in taos.cfg to specify table names, for example, `smlChildTableName=tname`. You can insert `st,tname=cpul,t1=4 c1=3 1626006833639000000` and the cpu1 table will be automatically created. Note that if multiple rows have the same tname but different tag_set values, the tag_set of the first row is used to create the table and the others are ignored.
2. If the super table obtained by parsing the line protocol does not exist, this super table is created.
-If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
+3. If the subtable obtained by the parse line protocol does not exist, Schemaless creates the sub-table according to the subtable name determined in steps 1 or 2.
4. If the specified tag or regular column in the data row does not exist, the corresponding tag or regular column is added to the super table (only incremental).
-5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to NULL.
+5. If there are some tag columns or regular columns in the super table that are not specified to take values in a data row, then the values of these columns are set to
+ NULL.
6. For BINARY or NCHAR columns, if the length of the value provided in a data row exceeds the column type limit, the maximum length of characters allowed to be stored in the column is automatically increased (only incremented and not decremented) to ensure complete preservation of the data.
7. Errors encountered throughout the processing will interrupt the writing process and return an error code.
-8. In order to improve the efficiency of writing, it is assumed by default that the order of the fields in the same Super is the same (the first data contains all fields, and the following data is in this order). If the order is different, the parameter smlDataFormat needs to be configured to be false. Otherwise, the data is written in the same order, and the data in the library will be abnormal.
+8. It is assumed that the order of field_set in a supertable is consistent, meaning that the first record contains all fields and subsequent records store fields in the same order. If the order is not consistent, set smlDataFormat in taos.cfg to false. Otherwise, data will be written out of order and a database error will occur.(smlDataFormat in taos.cfg default to false after version of 3.0.1.3)
:::tip
-All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 16k bytes. See [TAOS SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
+16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+
:::
## Time resolution recognition
Three specified modes are supported in the schemaless writing process, as follows:
-| **Serial** | **Value** | **Description** |
-| -------- | ------------------- | ------------------------------- |
-| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
-| 2 | SML_TELNET_PROTOCOL | OpenTSDB Text Line Protocol |
-| 3 | SML_JSON_PROTOCOL | JSON protocol format |
-
-In the SML_LINE_PROTOCOL parsing mode, the user is required to specify the time resolution of the input timestamp. The available time resolutions are shown in the following table.
+| **Serial** | **Value** | **Description** |
+| ---------- | ------------------- | ---------------------- |
+| 1 | SML_LINE_PROTOCOL | InfluxDB Line Protocol |
+| 2 | SML_TELNET_PROTOCOL | OpenTSDB file protocol |
+| 3 | SML_JSON_PROTOCOL | OpenTSDB JSON protocol |
-| **Serial Number** | **Time Resolution Definition** | **Meaning** |
-| -------- | --------------------------------- | -------------- |
-| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
-| 2 | TSDB_SML_TIMESTAMP_HOURS | hour |
-| 3 | TSDB_SML_TIMESTAMP_MINUTES | MINUTES
-| 4 | TSDB_SML_TIMESTAMP_SECONDS | SECONDS
-| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | milliseconds
-| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | microseconds
-| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | nanoseconds |
+In InfluxDB line protocol mode, you must specify the precision of the input timestamp. Valid precisions are described in the following table.
-In SML_TELNET_PROTOCOL and SML_JSON_PROTOCOL modes, the time precision is determined based on the length of the timestamp (in the same way as the OpenTSDB standard operation), and the user-specified time resolution is ignored at this point.
+| **No.** | **Precision** | **Description** |
+| ------- | --------------------------------- | --------------------- |
+| 1 | TSDB_SML_TIMESTAMP_NOT_CONFIGURED | Not defined (invalid) |
+| 2 | TSDB_SML_TIMESTAMP_HOURS | Hours |
+| 3 | TSDB_SML_TIMESTAMP_MINUTES | Minutes |
+| 4 | TSDB_SML_TIMESTAMP_SECONDS | Seconds |
+| 5 | TSDB_SML_TIMESTAMP_MILLI_SECONDS | Milliseconds |
+| 6 | TSDB_SML_TIMESTAMP_MICRO_SECONDS | Microseconds |
+| 7 | TSDB_SML_TIMESTAMP_NANO_SECONDS | Nanoseconds |
-## Data schema mapping rules
+In OpenTSDB file and JSON protocol modes, the precision of the timestamp is determined from its length in the standard OpenTSDB manner. User input is ignored.
-This section describes how data for line protocols are mapped to data with a schema. The data measurement in each line protocol is mapped as follows:
-- The tag name in tag_set is the name of the tag in the data schema
-- The name in field_set is the column's name.
+## Data Model Mapping
-The following data is used as an example to illustrate the mapping rules.
+This section describes how data in line protocol is mapped to a schema. The data measurement in each line is mapped to a
+supertable name. The tag name in tag_set is the tag name in the schema, and the name in field_set is the column name in the schema. The following example shows how data is mapped:
```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
```
-The row data mapping generates a super table: `st`, which contains three labels of type NCHAR: t1, t2, t3. Five data columns are ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), c4 (bigint). The mapping becomes the following SQL statement.
+This row is mapped to a supertable: `st` contains three NCHAR tags: t1, t2, and t3. Five columns are created: ts (timestamp), c1 (bigint), c3 (binary), c2 (bool), and c4 (bigint). The following SQL statement is generated:
```json
create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) tags(t1 nchar(1), t2 nchar(1), t3 nchar(2))
```
-## Data schema change handling
+## Processing Schema Changes
-This section describes the impact on the data schema for different line protocol data writing cases.
+This section describes the impact on the schema caused by different data being written.
-When writing to an explicitly identified field type using the line protocol, subsequent changes to the field's type definition will result in an explicit data schema error, i.e., will trigger a write API report error. As shown below, the
+If you use line protocol to write to a specific tag field and then later change the field type, a schema error will ocur. This triggers an error on the write API. This is shown as follows:
```json
-st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
-st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4i 1626006833640000000
```
-The data type mapping in the first row defines column c4 as DOUBLE, but the data in the second row is declared as BIGINT by the numeric suffix, which triggers a parsing error with schemaless writing.
+The first row defines c4 as a double. However, in the second row, the suffix indicates that the value of c4 is a bigint. This causes schemaless writing to throw an error.
-If the line protocol before the column declares the data column as BINARY, the subsequent one requires a longer binary length, which triggers a super table schema change.
+An error also occurs if data input into a binary column exceeds the defined length of the column.
```json
-st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
-st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c5="pass" 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c5="passit" 1626006833640000000
```
-The first line of the line protocol parsing will declare column c5 is a BINARY(4) field. The second line data write will parse column c5 as a BINARY column. But in the second line, c5's width is 6 so you need to increase the width of the BINARY field to be able to accommodate the new string.
+The first row defines c5 as a binary(4). but the second row writes 6 bytes to it. This means that the length of the binary column must be expanded to contain the data.
```json
-st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
-st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
+st,t1=3,t2=4,t3=t3 c1=3i64 1626006833639000000
+st,t1=3,t2=4,t3=t3 c1=3i64,c6="passit" 1626006833640000000
```
-The second line of data has an additional column c6 of type BINARY(6) compared to the first row. Then a column c6 of type BINARY(6) is automatically added at this point.
+The preceding data includes a new entry, c6, with type binary(6). When this occurs, a new column c6 with type binary(6) is added automatically.
-## Write integrity
+## Write Integrity
-TDengine provides idempotency guarantees for data writing, i.e., you can repeatedly call the API to write data with errors. However, it does not give atomicity guarantees for writing multiple rows of data. During the process of writing numerous rows of data in one batch, some data will be written successfully, and some data will fail.
+TDengine guarantees the idempotency of data writes. This means that you can repeatedly call the API to perform write operations with bad data. However, TDengine does not guarantee the atomicity of multi-row writes. In a multi-row write, some data may be written successfully and other data unsuccessfully.
-## Error code
+##: Error Codes
-If it is an error in the data itself during the schemaless writing process, the application will get `TSDB_CODE_TSC_LINE_SYNTAX_ERROR` error message, which indicates that the error occurred in writing. The other error codes are consistent with the TDengine and can be obtained via the `taos_errstr()` to get the specific cause of the error.
+The TSDB_CODE_TSC_LINE_SYNTAX_ERROR indicates an error in the schemaless writing component.
+This error occurs when writing text. For other errors, schemaless writing uses the standard TDengine error codes
+found in taos_errstr.
diff --git a/docs/en/14-reference/14-taosKeeper.md b/docs/en/14-reference/14-taosKeeper.md
index 476b5a1fd20b4dce4379026a6300ae8e26db6656..665bc75380d4f59666d792d074fb37c65c810264 100644
--- a/docs/en/14-reference/14-taosKeeper.md
+++ b/docs/en/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: Instructions and tips for using taosKeeper
+description: exports TDengine monitoring metrics.
---
## Introduction
@@ -22,26 +22,35 @@ You can compile taosKeeper separately and install it. Please refer to the [taosK
### Configuration and running methods
-
-taosKeeper needs to be executed on the terminal of the operating system. To run taosKeeper, see [configuration file](#configuration-file-parameters-in-detail).
+taosKeeper needs to be executed on the terminal of the operating system, it supports three configuration methods: [Command-line arguments](#command-line-arguments-in-detail), [environment variable](#environment-variable-in-detail) and [configuration file](#configuration-file-parameters-in-detail). The precedence of those is Command-line, environment variable and configuration file.
**Make sure that the TDengine cluster is running correctly before running taosKeeper. ** Ensure that the monitoring service in TDengine has been started. For more information, see [TDengine Monitoring Configuration](../config/#monitoring).
-
+### Environment variable
+
+You can use Environment variable to run taosKeeper and control its behavior:
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+you can run `taoskeeper -h` for more detail.
+
### Configuration File
You can quickly launch taosKeeper with the following commands. If you do not specify a configuration file, `/etc/taos/keeper.toml` is used by default. If this file does not specify configurations, the default values are used.
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**Sample configuration files**
@@ -110,7 +119,7 @@ Query OK, 1 rows in database (0.036162s)
#### Export Monitoring Metrics
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
Sample result set (excerpt):
diff --git a/docs/en/14-reference/_telegraf.mdx b/docs/en/14-reference/_telegraf.mdx
index e32fb256936a5f2c00bbb3f37529e895d260fc5c..bcf1a0893fff7c06127da9e8117a778a76bfb0d9 100644
--- a/docs/en/14-reference/_telegraf.mdx
+++ b/docs/en/14-reference/_telegraf.mdx
@@ -22,5 +22,4 @@ An example is as follows.
username = "root"
password = "taosdata"
data_format = "influx"
- influx_max_line_bytes = 250
```
diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx
index 5dbeb31a231464e48b4f977420f03f0ede81e78e..e0fbefd5a8634d2001f2cc0601afa110aff33632 100644
--- a/docs/en/20-third-party/01-grafana.mdx
+++ b/docs/en/20-third-party/01-grafana.mdx
@@ -6,9 +6,7 @@ title: Grafana
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
-TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard.
-
-You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md).
+TDengine can be quickly integrated with the open-source data visualization system [Grafana](https://www.grafana.com/) to build a data monitoring and alerting system. The whole process does not require any code development. And you can visualize the contents of the data tables in TDengine on a dashboard. You can learn more about using the TDengine plugin on [GitHub](https://github.com/taosdata/grafanaplugin/blob/master/README.md).
## Prerequisites
@@ -65,7 +63,6 @@ Restart Grafana service and open Grafana in web-browser, usually
-
Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
@@ -76,7 +73,7 @@ grafana-cli plugins install tdengine-datasource
sudo -u grafana grafana-cli plugins install tdengine-datasource
```
-Alternatively, you can manually download the .zip file from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and unpack it into your grafana plugins directory.
+You can also download zip files from [GitHub](https://github.com/taosdata/grafanaplugin/releases/tag/latest) or [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) and install manually. The commands are as follows:
```bash
GF_VERSION=3.2.2
@@ -131,7 +128,7 @@ docker run -d \
grafana/grafana
```
-You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
+You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
1. Save the provisioning configuration file to `tdengine.yml`.
@@ -196,7 +193,7 @@ Go back to the main interface to create a dashboard and click Add Query to enter
As shown above, select the `TDengine` data source in the `Query` and enter the corresponding SQL in the query box below for query.
-- INPUT SQL: enter the statement to be queried (the result set of the SQL statement should be two columns and multiple rows), for example: `select avg(mem_system) from log.dn where ts >= $from and ts < $to interval($interval)`, where, from, to and interval are built-in variables of the TDengine plugin, indicating the range and time interval of queries fetched from the Grafana plugin panel. In addition to the built-in variables, custom template variables are also supported.
+- INPUT SQL: Enter the desired query (the results being two columns and multiple rows), such as `select _wstart, avg(mem_system) from log.dnodes_info where ts >= $from and ts < $to interval($interval)`. In this statement, $from, $to, and $interval are variables that Grafana replaces with the query time range and interval. In addition to the built-in variables, custom template variables are also supported.
- ALIAS BY: This allows you to set the current query alias.
- GENERATE SQL: Clicking this button will automatically replace the corresponding variables and generate the final executed statement.
@@ -208,7 +205,11 @@ Follow the default prompt to query the average system memory usage for the speci
### Importing the Dashboard
-You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. The dashboard is published in Grafana as [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167). Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
+You can install TDinsight dashboard in data source configuration page (like `http://localhost:3000/datasources/edit/1/dashboards`) as a monitoring visualization tool for TDengine cluster. Ensure that you use TDinsight for 3.x.
+
+
+
+A dashboard for TDengine 2.x has been published on Grafana: [Dashboard 15167 - TDinsight](https://grafana.com/grafana/dashboards/15167)) 。 Check the [TDinsight User Manual](/reference/tdinsight/) for the details.
For more dashboards using TDengine data source, [search here in Grafana](https://grafana.com/grafana/dashboards/?dataSource=tdengine-datasource). Here is a sub list:
diff --git a/docs/en/20-third-party/03-telegraf.md b/docs/en/20-third-party/03-telegraf.md
index 6a7aac322f9def880f58d7ed0adcc4a8f3687ed1..e5c6d6f25497d546c958ad104e68636b51d8375d 100644
--- a/docs/en/20-third-party/03-telegraf.md
+++ b/docs/en/20-third-party/03-telegraf.md
@@ -15,6 +15,7 @@ To write Telegraf data to TDengine requires the following preparations.
- The TDengine cluster is deployed and functioning properly
- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
- Telegraf has been installed. Please refer to the [official documentation](https://docs.influxdata.com/telegraf/v1.22/install/) for Telegraf installation.
+- Telegraf collects the running status measurements of current system. You can enable [input plugins](https://docs.influxdata.com/telegraf/v1.22/plugins/) to insert [other formats](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/) data to Telegraf then forward to TDengine.
## Configuration steps
@@ -31,11 +32,12 @@ Use TDengine CLI to verify Telegraf correctly writing data to TDengine and read
```
taos> show databases;
- name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
-====================================================================================================================================================================================================================================================================================
- telegraf | 2022-04-20 08:47:53.488 | 22 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
- log | 2022-04-20 07:19:50.260 | 9 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
-Query OK, 2 row(s) in set (0.002401s)
+ name |
+=================================
+ information_schema |
+ performance_schema |
+ telegraf |
+Query OK, 3 rows in database (0.010568s)
taos> use telegraf;
Database changed.
@@ -65,3 +67,11 @@ taos> select * from telegraf.system limit 10;
|
Query OK, 3 row(s) in set (0.013269s)
```
+
+:::note
+
+- TDengine take influxdb format data and create unique ID for table names by the rule.
+The user can configure `smlChildTableName` paramter to generate specified table names if he/she needs. And he/she also need to insert data with specified data format.
+For example, Add `smlChildTableName=tname` in the taos.cfg file. Insert data `st,tname=cpu1,t1=4 c1=3 1626006833639000000` then the table name will be cpu1. If there are multiple lines has same tname but different tag_set, the first line's tag_set will be used to automatically creating table and ignore other lines. Please refer to [TDengine Schemaless](/reference/schemaless/#Schemaless-Line-Protocol)
+:::
+
diff --git a/docs/en/20-third-party/06-statsd.md b/docs/en/20-third-party/06-statsd.md
index 1cddbcf63db5bf64c77f40c7a3aa95698362fdac..32b1bbb97acafd2494c7fadb8af3d06cf69219ea 100644
--- a/docs/en/20-third-party/06-statsd.md
+++ b/docs/en/20-third-party/06-statsd.md
@@ -1,6 +1,6 @@
---
sidebar_label: StatsD
-title: StatsD writing
+title: StatsD Writing
---
import StatsD from "../14-reference/_statsd.mdx"
@@ -12,8 +12,8 @@ You can write StatsD data to TDengine by simply modifying the configuration file
## Prerequisites
To write StatsD data to TDengine requires the following preparations.
-- The TDengine cluster has been deployed and is working properly
-- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
+1. The TDengine cluster is deployed and functioning properly
+2. taosAdapter is installed and running properly. Please refer to the taosAdapter manual for details.
- StatsD has been installed. To install StatsD, please refer to [official documentation](https://github.com/statsd/statsd)
## Configuration steps
@@ -39,8 +39,12 @@ $ echo "foo:1|c" | nc -u -w0 127.0.0.1 8125
Use the TDengine CLI to verify that StatsD data is written to TDengine and can read out correctly.
```
-Welcome to the TDengine shell from Linux, Client Version:3.0.0.0
-Copyright (c) 2022 by TAOS Data, Inc. All rights reserved.
+taos> show databases;
+ name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
+====================================================================================================================================================================================================================================================================================
+ log | 2022-04-20 07:19:50.260 | 11 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
+ statsd | 2022-04-20 09:54:51.220 | 1 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
+Query OK, 2 row(s) in set (0.003142s)
taos> use statsd;
Database changed.
diff --git a/docs/en/20-third-party/09-emq-broker.md b/docs/en/20-third-party/09-emq-broker.md
index 0900dd3d7571dc0ab8d93174aa2d7b5eccf1fbf5..2ead1bbaf40f06fec2a5cbf85e46fdfdcc5216df 100644
--- a/docs/en/20-third-party/09-emq-broker.md
+++ b/docs/en/20-third-party/09-emq-broker.md
@@ -9,7 +9,7 @@ MQTT is a popular IoT data transfer protocol. [EMQX](https://github.com/emqx/emq
The following preparations are required for EMQX to add TDengine data sources correctly.
- The TDengine cluster is deployed and working properly
-- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](/reference/taosadapter) for details.
+- taosAdapter is installed and running properly. Please refer to the [taosAdapter manual](../../reference/taosadapter) for details.
- If you use the emulated writers described later, you need to install the appropriate version of Node.js. V12 is recommended.
## Install and start EMQX
@@ -28,8 +28,6 @@ USE test;
CREATE TABLE sensor_data (ts TIMESTAMP, temperature FLOAT, humidity FLOAT, volume FLOAT, pm10 FLOAT, pm25 FLOAT, so2 FLOAT, no2 FLOAT, co FLOAT, sensor_id NCHAR(255), area TINYINT, coll_time TIMESTAMP);
```
-Note: The table schema is based on the blog [(In Chinese) Data Transfer, Storage, Presentation, EMQX + TDengine Build MQTT IoT Data Visualization Platform](https://www.taosdata.com/blog/2020/08/04/1722.html) as an example. Subsequent operations are carried out with this blog scenario too. Please modify it according to your actual application scenario.
-
## Configuring EMQX Rules
Since the configuration interface of EMQX differs from version to version, here is v4.4.5 as an example. For other versions, please refer to the corresponding official documentation.
@@ -137,5 +135,5 @@ Use the TDengine CLI program to log in and query the appropriate databases and t

-Please refer to the [TDengine official documentation](https://docs.taosdata.com/) for more details on how to use TDengine.
+Please refer to the [TDengine official documentation](https://docs.tdengine.com/) for more details on how to use TDengine.
EMQX Please refer to the [EMQX official documentation](https://www.emqx.io/docs/en/v4.4/rule/rule-engine.html) for details on how to use EMQX.
diff --git a/docs/en/20-third-party/10-hive-mq-broker.md b/docs/en/20-third-party/10-hive-mq-broker.md
index 333e00fa0e9b724ffbb067a83ad07d0b846b1a23..828a62ac5b336766d5c3770cc42cd3a61cfd8d5d 100644
--- a/docs/en/20-third-party/10-hive-mq-broker.md
+++ b/docs/en/20-third-party/10-hive-mq-broker.md
@@ -1,6 +1,6 @@
---
sidebar_label: HiveMQ Broker
-title: HiveMQ Broker writing
+title: HiveMQ Broker Writing
---
-[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. Please refer to the [HiveMQ extension - TDengine documentation](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md) for details on how to use it.
\ No newline at end of file
+[HiveMQ](https://www.hivemq.com/) is an MQTT broker that provides community and enterprise editions. HiveMQ is mainly for enterprise emerging machine-to-machine M2M communication and internal transport, meeting scalability, ease of management, and security features. HiveMQ provides an open-source plug-in development kit. MQTT data can be saved to TDengine via TDengine extension for HiveMQ. For more information, see [HiveMQ TDengine Extension](https://github.com/huskar-t/hivemq-tdengine-extension/blob/b62a26ecc164a310104df57691691b237e091c89/README_EN.md).
diff --git a/docs/en/20-third-party/12-google-data-studio.md b/docs/en/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000000000000000000000000000000000..fc94f98056bbeeeec88ca7ea12a4a6a7e6f15dc5
--- /dev/null
+++ b/docs/en/20-third-party/12-google-data-studio.md
@@ -0,0 +1,36 @@
+---
+sidebar_label: Google Data Studio
+title: Use Google Data Studio to access TDengine
+---
+
+Data Studio is a powerful tool for reporting and visualization, offering a wide variety of charts and connectors and making it easy to generate reports based on predefined templates. Its ease of use and robust ecosystem have made it one of the first choices for people working in data analysis.
+
+TDengine is a high-performance, scalable time-series database that supports SQL. Many businesses and developers in fields spanning from IoT and Industry Internet to IT and finance are using TDengine as their time-series database management solution.
+
+The TDengine team immediately saw the benefits of using TDengine to process time-series data with Data Studio to analyze it, and they got to work to create a connector for Data Studio.
+
+With the release of the TDengine connector in Data Studio, you can now get even more out of your data. To obtain the connector, first go to the Data Studio Connector Gallery, click Connect to Data, and search for “TDengine”.
+
+
+
+Select the TDengine connector and click Authorize.
+
+
+
+Then sign in to your Google Account and click Allow to enable the connection to TDengine.
+
+
+
+In the Enter URL field, type the hostname and port of the server running the TDengine REST service. In the following fields, type your username, password, database name, table name, and the start and end times of your query range. Then, click Connect.
+
+
+
+After the connection is established, you can use Data Studio to process your data and create reports.
+
+
+
+In Data Studio, TDengine timestamps and tags are considered dimensions, and all other items are considered metrics. You can create all kinds of custom charts with your data – some examples are shown below.
+
+
+
+With the ability to process petabytes of data per day and provide monitoring and alerting in real time, TDengine is a great solution for time-series data management. Now, with the Data Studio connector, we’re sure you’ll be able to gain new insights and obtain even more value from your data.
diff --git a/docs/en/20-third-party/13-Jupyter.md b/docs/en/20-third-party/13-Jupyter.md
new file mode 100644
index 0000000000000000000000000000000000000000..fbd7e530f0959740c53e48ce1d73d92ce0d6c5c5
--- /dev/null
+++ b/docs/en/20-third-party/13-Jupyter.md
@@ -0,0 +1,98 @@
+---
+sidebar_label: JupyterLab
+title: Connect JupyterLab to TDengine
+---
+
+JupyterLab is the next generation of the ubiquitous Jupyter Notebook. In this note we show you how to install the TDengine Python connector to connect to TDengine in JupyterLab. You can then insert data and perform queries against the TDengine instance within JupyterLab.
+
+## Install JupyterLab
+Installing JupyterLab is very easy. Installation instructions can be found at:
+
+https://jupyterlab.readthedocs.io/en/stable/getting_started/installation.html.
+
+If you don't feel like clicking on the link here are the instructions.
+Jupyter's preferred Python package manager is pip, so we show the instructions for pip.
+You can also use **conda** or **pipenv** if you are managing Python environments.
+````
+pip install jupyterlab
+````
+
+For **conda** you can run:
+````
+conda install -c conda-forge jupyterlab
+````
+
+For **pipenv** you can run:
+````
+pipenv install jupyterlab
+pipenv shell
+````
+
+## Run JupyterLab
+You can start JupyterLab from the command line by running:
+````
+jupyter lab
+````
+This will automatically launch your default browser and connect to your JupyterLab instance, usually on port 8888.
+
+## Install the TDengine Python connector
+You can now install the TDengine Python connector as follows.
+
+Start a new Python kernel in JupyterLab.
+
+If using **conda** run the following:
+````
+# Install a conda package in the current Jupyter kernel
+import sys
+!conda install --yes --prefix {sys.prefix} taospy
+````
+If using **pip** run the following:
+````
+# Install a pip package in the current Jupyter kernel
+import sys
+!{sys.executable} -m pip install taospy
+````
+
+## Connect to TDengine
+You can find detailed examples to use the Python connector, in the TDengine documentation here.
+Once you have installed the TDengine Python connector in your JupyterLab kernel, the process of connecting to TDengine is the same as that you would use if you weren't using JupyterLab.
+Each TDengine instance, has a database called "log" which has monitoring information about the TDengine instance.
+In the "log" database there is a [supertable](https://docs.tdengine.com/taos-sql/stable/) called "disks_info".
+
+The structure of this table is as follows:
+````
+taos> desc disks_info;
+ Field | Type | Length | Note |
+=================================================================================
+ ts | TIMESTAMP | 8 | |
+ datadir_l0_used | FLOAT | 4 | |
+ datadir_l0_total | FLOAT | 4 | |
+ datadir_l1_used | FLOAT | 4 | |
+ datadir_l1_total | FLOAT | 4 | |
+ datadir_l2_used | FLOAT | 4 | |
+ datadir_l2_total | FLOAT | 4 | |
+ dnode_id | INT | 4 | TAG |
+ dnode_ep | BINARY | 134 | TAG |
+Query OK, 9 row(s) in set (0.000238s)
+````
+
+The code below is used to fetch data from this table into a pandas DataFrame.
+
+````
+import sys
+import taos
+import pandas
+
+def sqlQuery(conn):
+ df: pandas.DataFrame = pandas.read_sql("select * from log.disks_info limit 500", conn)
+ print(df)
+ return df
+
+conn = taos.connect()
+
+result = sqlQuery(conn)
+
+print(result)
+````
+
+TDengine has connectors for various languages including Node.js, Go, PHP and there are kernels for these languages which can be found [here](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels).
diff --git a/docs/en/20-third-party/gds/gds-01.webp b/docs/en/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000000000000000000000000000000000..2e5f9e4ff5db1e37718e2397c9a13a9f0e05602d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-01.webp differ
diff --git a/docs/en/20-third-party/gds/gds-02.png.webp b/docs/en/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3b3537f5a488019482f94452e70bd1bd79867ab5
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-03.png.webp b/docs/en/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5719436d5b2f21aa861067b966511e4b34d17dce
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-04.png.webp b/docs/en/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ddaae5c1a63b6b4db692e12491df55b88dcaadee
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-05.png.webp b/docs/en/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9a917678fc7e60f0a739fa1e2b0f4fa010d12708
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-06.png.webp b/docs/en/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..c76b68d32b5907bd5ba4e4010456f2ca5303448f
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-07.png.webp b/docs/en/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1386ae9c4db4f2465dd071afc5a047658b47031c
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-08.png.webp b/docs/en/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..59dcf8b31df8bde8d4073ee0c7b1c7bdd7bd439d
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-09.png.webp b/docs/en/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b94439f211a814f66d41231c9386c57f3ffe8322
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-10.png.webp b/docs/en/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a63cad9e9a3d412b1132359506530498fb1a0e57
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/en/20-third-party/gds/gds-11.png.webp b/docs/en/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fc38cd9a29c00afa48238741c33b439f737a7b8f
Binary files /dev/null and b/docs/en/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/en/20-third-party/import_dashboard.webp b/docs/en/20-third-party/import_dashboard.webp
new file mode 100644
index 0000000000000000000000000000000000000000..164e3f4690a5a55f937a3c29e1e8ca026648e6b1
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard.webp differ
diff --git a/docs/en/20-third-party/import_dashboard1.webp b/docs/en/20-third-party/import_dashboard1.webp
new file mode 100644
index 0000000000000000000000000000000000000000..d4fb374ce8bb75c8a0fbdbb9cab5b30eb29ab06d
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard1.webp differ
diff --git a/docs/en/20-third-party/import_dashboard2.webp b/docs/en/20-third-party/import_dashboard2.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9f74dc96be20ab64b5fb555aaccdaa1c1139b35c
Binary files /dev/null and b/docs/en/20-third-party/import_dashboard2.webp differ
diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md
index 44651c0496481c410640e577aaad5781f846e302..2f876adffc2543bb9f117e5812ccc5241d7a6d99 100644
--- a/docs/en/21-tdinternal/01-arch.md
+++ b/docs/en/21-tdinternal/01-arch.md
@@ -12,6 +12,7 @@ The design of TDengine is based on the assumption that any hardware or software
Logical structure diagram of TDengine's distributed architecture is as follows:

+
Figure 1: TDengine architecture diagram
A complete TDengine system runs on one or more physical nodes. Logically, it includes data node (dnode), TDengine client driver (TAOSC) and application (app). There are one or more data nodes in the system, which form a cluster. The application interacts with the TDengine cluster through TAOSC's API. The following is a brief introduction to each logical unit.
@@ -38,15 +39,16 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Cluster external connection**: TDengine cluster can accommodate a single, multiple or even thousands of data nodes. The application only needs to initiate a connection to any data node in the cluster. The network parameter required for connection is the End Point (FQDN plus configured port number) of a data node. When starting the application taos through CLI, the FQDN of the data node can be specified through the option `-h`, and the configured port number can be specified through `-p`. If the port is not configured, the system configuration parameter “serverPort” of TDengine will be adopted.
-**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
+**Inter-cluster communication**: Data nodes connect with each other through TCP/UDP. When a data node starts, it will obtain the EP information of the dnode where the mnode is located, and then establish a connection with the mnode in the system to exchange information. There are three steps to obtain EP information of the mnode:
-1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
+1. Check whether the mnodeEpList file exists, if it does not exist or cannot be opened normally to obtain EP information of the mnode, skip to the second step;
2. Check the system configuration file taos.cfg to obtain node configuration parameters “firstEp” and “secondEp” (the node specified by these two parameters can be a normal node without mnode, in this case, the node will try to redirect to the mnode node when connected). If these two configuration parameters do not exist or do not exist in taos.cfg, or are invalid, skip to the third step;
3. Set your own EP as a mnode EP and run it independently. After obtaining the mnode EP list, the data node initiates the connection. It will successfully join the working cluster after connection. If not successful, it will try the next item in the mnode EP list. If all attempts are made, but the connection still fails, sleep for a few seconds before trying again.
**The choice of MNODE**: TDengine logically has a management node, but there is no separate execution code. The server-side only has one set of execution code, taosd. So which data node will be the management node? This is determined automatically by the system without any manual intervention. The principle is as follows: when a data node starts, it will check its End Point and compare it with the obtained mnode EP List. If its EP exists in it, the data node shall start the mnode module and become a mnode. If your own EP is not in the mnode EP List, the mnode module will not start. During the system operation, due to load balancing, downtime and other reasons, mnode may migrate to the new dnode, totally transparently and without manual intervention. The modification of configuration parameters is the decision made by mnode itself according to resources usage.
-**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+**Add new data nodes:** After the system has a data node, it has become a working system. There are two steps to add a new node into the cluster.
+
- Step1: Connect to the existing working data node using TDengine CLI, and then add the End Point of the new data node with the command "create dnode"
- Step 2: In the system configuration parameter file taos.cfg of the new data node, set the “firstEp” and “secondEp” parameters to the EP of any two data nodes in the existing cluster. Please refer to the user tutorial for detailed steps. In this way, the cluster will be established step by step.
@@ -57,6 +59,7 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
To explain the relationship between vnode, mnode, TAOSC and application and their respective roles, the following is an analysis of a typical data writing process.

+
Figure 2: Typical process of TDengine
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
@@ -121,16 +124,17 @@ The load balancing process does not require any manual intervention, and it is t
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
-### Leader vnode Writing Process
+### Leader vnode Writing Process
Leader Vnode uses a writing process as follows:

+
Figure 3: TDengine Leader writing process
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
-3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
+3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
@@ -140,6 +144,7 @@ Leader Vnode uses a writing process as follows:
For a follower vnode, the write process as follows:

+
Figure 4: TDengine Follower Writing Process
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
@@ -212,6 +217,7 @@ When data is written to disk, the system decideswhether to compress the data bas
By default, TDengine saves all data in /var/lib/taos directory, and the data files of each vnode are saved in a different directory under this directory. In order to expand the storage space, minimize the bottleneck of file reading and improve the data throughput rate, TDengine can configure the system parameter “dataDir” to allow multiple mounted hard disks to be used by system at the same time. In addition, TDengine also provides the function of tiered data storage, i.e. storage on different storage media according to the time stamps of data files. For example, the latest data is stored on SSD, the data older than a week is stored on local hard disk, and data older than four weeks is stored on network storage device. This reduces storage costs and ensures efficient data access. The movement of data on different storage media is automatically done by the system and is completely transparent to applications. Tiered storage of data is also configured through the system parameter “dataDir”.
dataDir format is as follows:
+
```
dataDir data_path [tier_level]
```
@@ -270,6 +276,7 @@ For the data collected by device D1001, the number of records per hour is counte
TDengine creates a separate table for each data collection point, but in practical applications, it is often necessary to aggregate data from different data collection points. In order to perform aggregation operations efficiently, TDengine introduces the concept of STable (super table). STable is used to represent a specific type of data collection point. It is a table set containing multiple tables. The schema of each table in the set is the same, but each table has its own static tag. There can be multiple tags which can be added, deleted and modified at any time. Applications can aggregate or statistically operate on all or a subset of tables under a STABLE by specifying tag filters. This greatly simplifies the development of applications. The process is shown in the following figure:

+
Figure 5: Diagram of multi-table aggregation query
1. Application sends a query condition to system;
@@ -279,9 +286,8 @@ TDengine creates a separate table for each data collection point, but in practic
5. Each vnode first finds the set of tables within its own node that meet the tag filters from memory, then scans the stored time-series data, completes corresponding aggregation calculations, and returns result to TAOSC;
6. TAOSC finally aggregates the results returned by multiple data nodes and send them back to application.
-Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TAOS SQL for details.
+Since TDengine stores tag data and time-series data separately in vnode, by filtering tag data in memory, the set of tables that need to participate in aggregation operation is first found, which reduces the volume of data to be scanned and improves aggregation speed. At the same time, because the data is distributed in multiple vnodes/dnodes, the aggregation operation is carried out concurrently in multiple vnodes, which further improves the aggregation speed. Aggregation functions for ordinary tables and most operations are applicable to STables. The syntax is exactly the same. Please see TDengine SQL for details.
### Precomputation
In order to effectively improve the performance of query processing, based-on the unchangeable feature of IoT data, statistical information of data stored in data block is recorded in the head of data block, including max value, min value, and sum. We call it a precomputing unit. If the query processing involves all the data of a whole data block, the pre-calculated results are directly used, and no need to read the data block contents at all. Since the amount of pre-calculated data is much smaller than the actual size of data block stored on disk, for query processing with disk IO as bottleneck, the use of pre-calculated results can greatly reduce the pressure of reading IO and accelerate the query process. The precomputation mechanism is similar to the BRIN (Block Range Index) of PostgreSQL.
-
diff --git a/docs/en/25-application/01-telegraf.md b/docs/en/25-application/01-telegraf.md
index 59491152bcda3e26ec12aaa59ac1041ef23c4e7e..f7003264496e61f33e843a4c8f2ec8227ba571b6 100644
--- a/docs/en/25-application/01-telegraf.md
+++ b/docs/en/25-application/01-telegraf.md
@@ -60,7 +60,6 @@ For the configuration method, add the following text to `/etc/telegraf/telegraf.
username = ""
password = ""
data_format = "influx"
- influx_max_line_bytes = 250
```
Then restart telegraf:
diff --git a/docs/en/25-application/03-immigrate.md b/docs/en/25-application/03-immigrate.md
index 9614574c71b0a28853de413ea6928101da899bf7..1aabaa43e77660d72bca00d7d59cdee69b1a7c92 100644
--- a/docs/en/25-application/03-immigrate.md
+++ b/docs/en/25-application/03-immigrate.md
@@ -41,7 +41,7 @@ The agents deployed in the application nodes are responsible for providing opera
- **TDengine installation and deployment**
-First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to the blog ["Installation and Uninstallation of TDengine Multiple Installation Packages"](https://www.taosdata.com/blog/2019/08/09/566.html).
+First of all, please install TDengine. Download the latest stable version of TDengine from the official website and install it. For help with using various installation packages, please refer to [Install TDengine](../../get-started/package)
Note that once the installation is complete, do not start the `taosd` service before properly configuring the parameters.
@@ -51,7 +51,7 @@ TDengine version 2.4 and later version includes `taosAdapter`. taosAdapter is a
Users can flexibly deploy taosAdapter instances, based on their requirements, to improve data writing throughput and provide guarantees for data writes in different application scenarios.
-Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](/reference/taosadapter/).
+Through taosAdapter, users can directly write the data collected by `collectd` or `StatsD` to TDengine to achieve easy, convenient and seamless migration in application scenarios. taosAdapter also supports Telegraf, Icinga, TCollector, and node_exporter data. For more details, please refer to [taosAdapter](../../reference/taosadapter/).
If using collectd, modify the configuration file in its default location `/etc/collectd/collectd.conf` to point to the IP address and port of the node where to deploy taosAdapter. For example, assuming the taosAdapter IP address is 192.168.1.130 and port 6046, configure it as follows.
@@ -411,7 +411,7 @@ TDengine provides a wealth of help documents to explain many aspects of cluster
### Cluster Deployment
-The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to the blog ["Installation and Uninstallation of Various Installation Packages of TDengine"](https://www.taosdata.com/blog/2019/08/09/566.html) for the various installation package formats.
+The first is TDengine installation. Download the latest stable version of TDengine from the official website, and install it. Please refer to [Install TDengine](../../get-started/package) for more details.
Note that once the installation is complete, do not immediately start the `taosd` service, but start it after correctly configuring the parameters.
diff --git a/docs/en/27-train-faq/01-faq.md b/docs/en/27-train-faq/01-faq.md
index c10bca1d05edd8cebe451901b3abb91923618a26..733b4184741ec3bdcea5ae5ef4b236493a03be35 100644
--- a/docs/en/27-train-faq/01-faq.md
+++ b/docs/en/27-train-faq/01-faq.md
@@ -1,114 +1,163 @@
---
-sidebar_label: FAQ
title: Frequently Asked Questions
---
## Submit an Issue
-If the tips in FAQ don't help much, please submit an issue on [GitHub](https://github.com/taosdata/TDengine) to describe your problem. In your description please include the TDengine version, hardware and OS information, the steps to reproduce the problem and any other relevant information. It would be very helpful if you can package the contents in `/var/log/taos` and `/etc/taos` and upload. These two are the default directories used by TDengine. If you have changed the default directories in your configuration, please package the files in your configured directories. We recommended setting `debugFlag` to 135 in `taos.cfg`, restarting `taosd`, then reproducing the problem and collecting the logs. If you don't want to restart, an alternative way of setting `debugFlag` is executing `alter dnode debugFlag 135` command in TDengine CLI `taos`. During normal running, however, please make sure `debugFlag` is set to 131.
+If your issue could not be resolved by reviewing this documentation, you can submit your issue on GitHub and receive support from the TDengine Team. When you submit an issue, attach the following directories from your TDengine deployment:
-## Frequently Asked Questions
+1. The directory containing TDengine logs (`/var/log/taos` by default)
+2. The directory containing TDengine configuration files (`/etc/taos` by default)
-### 1. How to upgrade to TDengine 2.0 from older version?
+In your GitHub issue, provide the version of TDengine and the operating system and environment for your deployment, the operations that you performed when the issue occurred, and the time of occurrence and affected tables.
-version 2.x is not compatible with version 1.x. With regard to the configuration and data files, please perform the following steps before upgrading. Please follow data integrity, security, backup and other relevant SOPs, best practices before removing/deleting any data.
+To obtain more debugging information, open `taos.cfg` and set the `debugFlag` parameter to `135`. Then restart TDengine Server and reproduce the issue. The debug-level logs generated help the TDengine Team to resolve your issue. If it is not possible to restart TDengine Server, you can run the following command in the TDengine CLI to set the debug flag:
-1. Delete configuration files: `sudo rm -rf /etc/taos/taos.cfg`
-2. Delete log files: `sudo rm -rf /var/log/taos/`
-3. Delete data files if the data doesn't need to be kept: `sudo rm -rf /var/lib/taos/`
-4. Install latest 2.x version
-5. If the data needs to be kept and migrated to newer version, please contact professional service at TDengine for assistance.
+```
+ alter dnode 'debugFlag' '135';
+```
-### 2. How to handle "Unable to establish connection"?
+You can run the `SHOW DNODES` command to determine the dnode ID.
-When the client is unable to connect to the server, you can try the following ways to troubleshoot and resolve the problem.
+When debugging information is no longer needed, set `debugFlag` to 131.
-1. Check the network
+## Frequently Asked Questions
- - Check if the hosts where the client and server are running are accessible to each other, for example by `ping` command.
- - Check if the TCP/UDP on port 6030-6042 are open for access if firewall is enabled. If possible, disable the firewall for diagnostics, but please ensure that you are following security and other relevant protocols.
- - Check if the FQDN and serverPort are configured correctly in `taos.cfg` used by the server side.
- - Check if the `firstEp` is set properly in the `taos.cfg` used by the client side.
+### 1. What are the best practices for upgrading a previous version of TDengine to version 3.0?
-2. Make sure the client version and server version are same.
+TDengine 3.0 is not compatible with the configuration and data files from previous versions. Before upgrading, perform the following steps:
-3. On server side, check the running status of `taosd` by executing `systemctl status taosd` . If your server is started using another way instead of `systemctl`, use the proper method to check whether the server process is running normally.
+1. Run `sudo rm -rf /etc/taos/taos.cfg` to delete your configuration file.
+2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
+3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
+4. Install TDengine 3.0.
+5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
-4. If using connector of Python, Java, Go, Rust, C#, node.JS on Linux to connect to the server, please make sure `libtaos.so` is in directory `/usr/local/taos/driver` and `/usr/local/taos/driver` is in system lib search environment variable `LD_LIBRARY_PATH`.
+### 4. How can I resolve the "Unable to establish connection" error?
-5. If using connector on Windows, please make sure `C:\TDengine\driver\taos.dll` is in your system lib search path. We recommend putting `taos.dll` under `C:\Windows\System32`.
+This error indicates that the client could not connect to the server. Perform the following troubleshooting steps:
-6. Some advanced network diagnostics tools
+1. Check the network.
- - On Linux system tool `nc` can be used to check whether the TCP/UDP can be accessible on a specified port
- Check whether a UDP port is open: `nc -vuz {hostIP} {port} `
- Check whether a TCP port on server side is open: `nc -l {port}`
- Check whether a TCP port on client side is open: `nc {hostIP} {port}`
+ - For machines deployed in the cloud, verify that your security group can access ports 6030 and 6031 (TCP and UDP).
+ - For virtual machines deployed locally, verify that the hosts where the client and server are running are accessible to each other. Do not use localhost as the hostname.
+ - For machines deployed on a corporate network, verify that your NAT configuration allows the server to respond to the client.
- - On Windows system `Test-NetConnection -ComputerName {fqdn} -Port {port}` on PowerShell can be used to check whether the port on server side is open for access.
+2. Verify that the client and server are running the same version of TDengine.
-7. TDengine CLI `taos` can also be used to check network, please refer to [TDengine CLI](/reference/taos-shell).
+3. On the server, run `systemctl status taosd` to verify that taosd is running normally. If taosd is stopped, run `systemctl start taosd`.
-### 3. How to handle "Unexpected generic error in RPC" or "Unable to resolve FQDN" ?
+4. Verify that the client is configured with the correct FQDN for the server.
-This error is caused because the FQDN can't be resolved. Please try following ways:
+5. If the server cannot be reached with the `ping` command, verify that network and DNS or hosts file settings are correct. For a TDengine cluster, the client must be able to ping the FQDN of every node in the cluster.
-1. Check whether the FQDN is configured properly on the server side
-2. If DSN server is configured in the network, please check whether it works; otherwise, check `/etc/hosts` to see whether the FQDN is configured with correct IP
-3. If the network configuration on the server side is OK, try to ping the server from the client side.
-4. If TDengine has been used before with an old hostname then the hostname has been changed, please check `/var/lib/taos/taos/dnode/dnodeEps.json`. Before setting up a new TDengine cluster, it's better to cleanup the directories configured.
+6. Verify that your firewall settings allow all hosts in the cluster to communicate on ports 6030 and 6041 (TCP and UDP). You can run `ufw status` (Ubuntu) or `firewall-cmd --list-port` (CentOS) to check the configuration.
-### 4. "Invalid SQL" is returned even though the Syntax is correct
+7. If you are using the Python, Java, Go, Rust, C#, or Node.js connector on Linux to connect to the server, verify that `libtaos.so` is in the `/usr/local/taos/driver` directory and `/usr/local/taos/driver` is in the `LD_LIBRARY_PATH` environment variable.
-"Invalid SQL" is returned when the length of SQL statement exceeds maximum allowed length or the syntax is not correct.
+8. If you are using Windows, verify that `C:\TDengine\driver\taos.dll` is in the `PATH` environment variable. If possible, move `taos.dll` to the `C:\Windows\System32` directory.
-### 5. Whether validation queries are supported?
+9. On Linux systems, you can use the `nc` tool to check whether a port is accessible:
+ - To check whether a UDP port is open, run `nc -vuz {hostIP} {port}`.
+ - To check whether a TCP port on the server side is open, run `nc -l {port}`.
+ - To check whether a TCP port on client side is open, run `nc {hostIP} {port}`.
-It's suggested to use a builtin database named as `log` to monitor.
+10. On Windows systems, you can run `Test-NetConnection -ComputerName {fqdn} -Port {port}` in PowerShell to check whether a port on the server side is accessible.
-
+11. You can also use the TDengine CLI to diagnose network issues. For more information, see [Problem Diagnostics](https://docs.tdengine.com/operation/diagnose/).
-### 6. Can I delete a record?
+### 5. How can I resolve the "Unable to resolve FQDN" error?
-From version 2.6.0.0 Enterprise version, deleting data can be supported.
+Clients and dnodes must be able to resolve the FQDN of each required node. You can confirm your configuration as follows:
-### 7. How to create a table of over 1024 columns?
+1. Verify that the FQDN is configured properly on the server.
+2. If your network has a DNS server, verify that it is operational.
+3. If your network does not have a DNS server, verify that the FQDNs in the `hosts` file are correct.
+4. On the client, use the `ping` command to test your connection to the server. If you cannot ping an FQDN, TDengine cannot reach it.
+5. If TDengine has been previously installed and the `hostname` was modified, open `dnode.json` in the `data` folder and verify that the endpoint configuration is correct. The default location of the dnode file is `/var/lib/taos/dnode`. Ensure that you clean up previous installations before reinstalling TDengine.
+6. Confirm whether FQDNs are preconfigured in `/etc/hosts` and `/etc/hostname`.
-From version 2.1.7.0, at most 4096 columns can be defined for a table.
+### 6. What is the most effective way to write data to TDengine?
-### 8. How to improve the efficiency of inserting data?
+Writing data in batches provides higher efficiency in most situations. You can insert one or more data records into one or more tables in a single SQL statement.
-Inserting data in batch is a good practice. Single SQL statement can insert data for one or multiple tables in batch.
+### 9. Why are table names not fully displayed?
-### 9. JDBC Error: the executed SQL is not a DML or a DDL?
+The number of columns in the TDengine CLI terminal display is limited. This can cause table names to be cut off, and if you use an incomplete name in a statement, the "Table does not exist" error will occur. You can increase the display size with the `maxBinaryDisplayWidth` parameter or the SQL statement `set max_binary_display_width`. You can also append `\G` to your SQL statement to bypass this limitation.
-Please upgrade to latest JDBC driver, for details please refer to [Java Connector](/reference/connector/java)
+### 10. How can I migrate data?
-### 10. Failed to connect with error "invalid timestamp"
+In TDengine, the `hostname` uniquely identifies a machine. When you move data files to a new machine, you must configure the new machine to have the same `host name` as the original machine.
-The most common reason is that the time setting is not aligned on the client side and the server side. On Linux system, please use `ntpdate` command. On Windows system, please enable automatic sync in system time setting.
+:::note
-### 11. Table name is not shown in full
+The data structure of previous versions of TDengine is not compatible with version 3.0. To migrate from TDengine 1.x or 2.x to 3.0, you must export data from your older deployment and import it back into TDengine 3.0.
-There is a display width setting in TDengine CLI `taos`. It can be controlled by configuration parameter `maxBinaryDisplayWidth`, or can be set using SQL command `set max_binary_display_width`. A more convenient way is to append `\G` in a SQL command to bypass this limitation.
+:::
-### 12. How to change log level temporarily?
+### 11. How can I temporary change the log level from the TDengine Client?
-Below SQL command can be used to adjust log level temporarily
+To change the log level for debugging purposes, you can use the following command:
```sql
-ALTER LOCAL flag_name flag_value;
+ALTER LOCAL local_option
+
+local_option: {
+ 'resetLog'
+ | 'rpcDebugFlag' value
+ | 'tmrDebugFlag' value
+ | 'cDebugFlag' value
+ | 'uDebugFlag' value
+ | 'debugFlag' value
+}
```
- - flag_name can be: debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag
- - flag_value can be: 131 (INFO/WARNING/ERROR), 135 (plus DEBUG), 143 (plus TRACE)
-
+Use `resetlog` to remove all logs generated on the local client. Use the other parameters to specify a log level for a specific component.
-### 13. What to do if go compilation fails?
+For each parameter, you can set the value to `131` (error and warning), `135` (error, warning, and debug), or `143` (error, warning, debug, and trace).
-From version 2.3.0.0, a new component named `taosAdapter` is introduced. Its' developed in Go. If you want to compile from source code and meet go compilation problems, try to do below steps to resolve Go environment problems.
+### Why do TDengine components written in Go fail to compile?
-```sh
-go env -w GO111MODULE=on
-go env -w GOPROXY=https://goproxy.cn,direct
-```
+TDengine includes taosAdapter, an independent component written in Go. This component provides the REST API as well as data access for other products such as Prometheus and Telegraf.
+When using the develop branch, you must run `git submodule update --init --recursive` to download the taosAdapter repository and then compile it.
+
+TDengine Go components require Go version 1.14 or later.
+
+### 13. How can I query the storage space being used by my data?
+
+The TDengine data files are stored in `/var/lib/taos` by default. Log files are stored in `/var/log/taos`.
+
+To see how much space your data files occupy, run `du -sh /var/lib/taos/vnode --exclude='wal'`. This excludes the write-ahead log (WAL) because its size is relatively fixed while writes are occurring, and it is written to disk and cleared when you shut down TDengine.
+
+If you want to see how much space is occupied by a single database, first determine which vgroup is storing the database by running `show vgroups`. Then check `/var/lib/taos/vnode` for the files associated with the vgroup ID.
+
+### 15. How is timezone information processed for timestamps?
+
+TDengine uses the timezone of the client for timestamps. The server timezone does not affect timestamps. The client converts Unix timestamps in SQL statements to UTC before sending them to the server. When you query data on the server, it provides timestamps in UTC to the client, which converts them to its local time.
+
+Timestamps are processed as follows:
+
+1. The client uses its system timezone unless it has been configured otherwise.
+2. A timezone configured in `taos.cfg` takes precedence over the system timezone.
+3. A timezone explicitly specified when establishing a connection to TDengine through a connector takes precedence over `taos.cfg` and the system timezone. For example, the Java connector allows you to specify a timezone in the JDBC URL.
+4. If you use an RFC 3339 timestamp (2013-04-12T15:52:01.123+08:00), or an ISO 8601 timestamp (2013-04-12T15:52:01.123+0800), the timezone specified in the timestamp is used instead of the timestamps configured using any other method.
+
+### 16. Which network ports are required by TDengine?
+
+See [serverPort](https://docs.tdengine.com/reference/config/#serverport) in Configuration Parameters.
+
+Note that ports are specified using 6030 as the default first port. If you change this port, all other ports change as well.
+
+### 17. Why do applications such as Grafana fail to connect to TDengine over the REST API?
+
+In TDengine, the REST API is provided by taosAdapter. Ensure that taosAdapter is running before you connect an application to TDengine over the REST API. You can run `systemctl start taosadapter` to start the service.
+
+Note that the log path for taosAdapter must be configured separately. The default path is `/var/log/taos`. You can choose one of eight log levels. The default is `info`. You can set the log level to `panic` to disable log output. You can modify the taosAdapter configuration file to change these settings. The default location is `/etc/taos/taosadapter.toml`.
+
+For more information, see [taosAdapter](https://docs.tdengine.com/reference/taosadapter/).
+
+### 18. How can I resolve out-of-memory (OOM) errors?
+
+OOM errors are thrown by the operating system when its memory, including swap, becomes insufficient and it needs to terminate processes to remain operational. Most OOM errors in TDengine occur for one of the following reasons: free memory is less than the value of `vm.min_free_kbytes` or free memory is less than the size of the request. If TDengine occupies reserved memory, an OOM error can occur even when free memory is sufficient.
+
+TDengine preallocates memory to each vnode. The number of vnodes per database is determined by the `vgroups` parameter, and the amount of memory per vnode is determined by the `buffer` parameter. To prevent OOM errors from occurring, ensure that you prepare sufficient memory on your hosts to support the number of vnodes that your deployment requires. Configure an appropriately sized swap space. If you continue to receive OOM errors, your SQL statements may be querying too much data for your system. TDengine Enterprise Edition includes optimized memory management that increases stability for enterprise customers.
diff --git a/docs/en/28-releases.md b/docs/en/28-releases.md
deleted file mode 100644
index a0c9eb119999571fb973b5e2243f237b8833b167..0000000000000000000000000000000000000000
--- a/docs/en/28-releases.md
+++ /dev/null
@@ -1,9 +0,0 @@
----
-sidebar_label: Releases
-title: Released Versions
----
-
-import Release from "/components/ReleaseV3";
-
-
-
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
new file mode 100644
index 0000000000000000000000000000000000000000..a65a2fff632fd176fb236389b1133e9e3625f4b5
--- /dev/null
+++ b/docs/en/28-releases/01-tdengine.md
@@ -0,0 +1,24 @@
+---
+sidebar_label: TDengine
+title: TDengine
+description: TDengine release history, Release Notes and download links.
+---
+
+import Release from "/components/ReleaseV3";
+
+## 3.0.1.3
+
+
+
+## 3.0.1.2
+
+
+
+## 3.0.1.1
+
+
+
+## 3.0.1.0
+
+
+
diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md
new file mode 100644
index 0000000000000000000000000000000000000000..a83723bff7e98e18c6be9ed3ad8f19f3b91c40da
--- /dev/null
+++ b/docs/en/28-releases/02-tools.md
@@ -0,0 +1,23 @@
+---
+sidebar_label: taosTools
+title: taosTools
+description: taosTools release history, Release Notes, download links.
+---
+
+import Release from "/components/ReleaseV3";
+
+## 2.2.3
+
+
+
+## 2.2.2
+
+
+
+## 2.2.0
+
+
+
+## 2.1.3
+
+
diff --git a/docs/en/28-releases/_category_.yml b/docs/en/28-releases/_category_.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c1638352061f083c3b283d7517466fef5abdc9ea
--- /dev/null
+++ b/docs/en/28-releases/_category_.yml
@@ -0,0 +1 @@
+label: Releases
\ No newline at end of file
diff --git a/docs/examples/csharp/.gitignore b/docs/examples/csharp/.gitignore
index b3aff79f3706e23aa74199a7f521f7912d2b0e45..c228f1be2a4b210c9ec11dc31b12571c24f7f0f5 100644
--- a/docs/examples/csharp/.gitignore
+++ b/docs/examples/csharp/.gitignore
@@ -1,4 +1,27 @@
-bin
-obj
.vs
-*.sln
\ No newline at end of file
+asyncQuery/bin
+connect/bin
+influxdbLine/bin
+optsJSON/bin
+optsTelnet/bin
+query/bin
+sqlInsert/bin
+stmtInsert/bin
+subscribe/bin
+wsConnect/bin
+wsInsert/bin
+wsQuery/bin
+wsStmt/bin
+asyncQuery/obj
+connect/obj
+influxdbLine/obj
+optsJSON/obj
+optsTelnet/obj
+query/obj
+sqlInsert/obj
+stmtInsert/obj
+subscribe/obj
+wsConnect/obj
+wsInsert/obj
+wsQuery/obj
+wsStmt/obj
\ No newline at end of file
diff --git a/docs/examples/csharp/QueryExample.cs b/docs/examples/csharp/QueryExample.cs
deleted file mode 100644
index d75bb8d6611f5b3899485eb1a63a42ed6995847d..0000000000000000000000000000000000000000
--- a/docs/examples/csharp/QueryExample.cs
+++ /dev/null
@@ -1,82 +0,0 @@
-using TDengineDriver;
-using TDengineDriver.Impl;
-using System.Runtime.InteropServices;
-
-namespace TDengineExample
-{
- internal class QueryExample
- {
- static void Main()
- {
- IntPtr conn = GetConnection();
- // run query
- IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2");
- if (TDengine.ErrorNo(res) != 0)
- {
- Console.WriteLine("Failed to query since: " + TDengine.Error(res));
- TDengine.Close(conn);
- TDengine.Cleanup();
- return;
- }
-
- // get filed count
- int fieldCount = TDengine.FieldCount(res);
- Console.WriteLine("fieldCount=" + fieldCount);
-
- // print column names
- List metas = LibTaos.GetMeta(res);
- for (int i = 0; i < metas.Count; i++)
- {
- Console.Write(metas[i].name + "\t");
- }
- Console.WriteLine();
-
- // print values
- List resData = LibTaos.GetData(res);
- for (int i = 0; i < resData.Count; i++)
- {
- Console.Write($"|{resData[i].ToString()} \t");
- if (((i + 1) % metas.Count == 0))
- {
- Console.WriteLine("");
- }
- }
- Console.WriteLine();
-
- if (TDengine.ErrorNo(res) != 0)
- {
- Console.WriteLine($"Query is not complete, Error {TDengine.ErrorNo(res)} {TDengine.Error(res)}");
- }
- // exit
- TDengine.FreeResult(res);
- TDengine.Close(conn);
- TDengine.Cleanup();
- }
- static IntPtr GetConnection()
- {
- string host = "localhost";
- short port = 6030;
- string username = "root";
- string password = "taosdata";
- string dbname = "power";
- var conn = TDengine.Connect(host, username, password, dbname, port);
- if (conn == IntPtr.Zero)
- {
- Console.WriteLine("Connect to TDengine failed");
- System.Environment.Exit(0);
- }
- else
- {
- Console.WriteLine("Connect to TDengine success");
- }
- return conn;
- }
- }
-}
-
-// output:
-// Connect to TDengine success
-// fieldCount=6
-// ts current voltage phase location groupid
-// 1648432611249 10.3 219 0.31 California.SanFrancisco 2
-// 1648432611749 12.6 218 0.33 California.SanFrancisco 2
\ No newline at end of file
diff --git a/docs/examples/csharp/SQLInsertExample.cs b/docs/examples/csharp/SQLInsertExample.cs
deleted file mode 100644
index 192ea96d5713bbf7f37f2208687c41e3e66d473b..0000000000000000000000000000000000000000
--- a/docs/examples/csharp/SQLInsertExample.cs
+++ /dev/null
@@ -1,69 +0,0 @@
-using TDengineDriver;
-
-
-namespace TDengineExample
-{
- internal class SQLInsertExample
- {
-
- static void Main()
- {
- IntPtr conn = GetConnection();
- IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
- CheckRes(conn, res, "failed to create database");
- res = TDengine.Query(conn, "USE power");
- CheckRes(conn, res, "failed to change database");
- res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
- CheckRes(conn, res, "failed to create stable");
- var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
- "d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
- "d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
- "d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
- res = TDengine.Query(conn, sql);
- CheckRes(conn, res, "failed to insert data");
- int affectedRows = TDengine.AffectRows(res);
- Console.WriteLine("affectedRows " + affectedRows);
- ExitProgram(conn, 0);
- }
-
- static IntPtr GetConnection()
- {
- string host = "localhost";
- short port = 6030;
- string username = "root";
- string password = "taosdata";
- string dbname = "";
- var conn = TDengine.Connect(host, username, password, dbname, port);
- if (conn == IntPtr.Zero)
- {
- Console.WriteLine("Connect to TDengine failed");
- Environment.Exit(0);
- }
- else
- {
- Console.WriteLine("Connect to TDengine success");
- }
- return conn;
- }
-
- static void CheckRes(IntPtr conn, IntPtr res, String errorMsg)
- {
- if (TDengine.ErrorNo(res) != 0)
- {
- Console.Write(errorMsg + " since: " + TDengine.Error(res));
- ExitProgram(conn, 1);
- }
- }
-
- static void ExitProgram(IntPtr conn, int exitCode)
- {
- TDengine.Close(conn);
- TDengine.Cleanup();
- Environment.Exit(exitCode);
- }
- }
-}
-
-// output:
-// Connect to TDengine success
-// affectedRows 8
diff --git a/docs/examples/csharp/AsyncQueryExample.cs b/docs/examples/csharp/asyncQuery/Program.cs
similarity index 81%
rename from docs/examples/csharp/AsyncQueryExample.cs
rename to docs/examples/csharp/asyncQuery/Program.cs
index 0d47325932e2f01fec8d55cfdb64c636258f4a03..864f06a15e5d7c9fb8fcfb25c81915e3f2e13f9d 100644
--- a/docs/examples/csharp/AsyncQueryExample.cs
+++ b/docs/examples/csharp/asyncQuery/Program.cs
@@ -11,11 +11,17 @@ namespace TDengineExample
static void Main()
{
IntPtr conn = GetConnection();
- QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback);
- TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero);
- Thread.Sleep(2000);
- TDengine.Close(conn);
- TDengine.Cleanup();
+ try
+ {
+ QueryAsyncCallback queryAsyncCallback = new QueryAsyncCallback(QueryCallback);
+ TDengine.QueryAsync(conn, "select * from meters", queryAsyncCallback, IntPtr.Zero);
+ Thread.Sleep(2000);
+ }
+ finally
+ {
+ TDengine.Close(conn);
+ }
+
}
static void QueryCallback(IntPtr param, IntPtr taosRes, int code)
@@ -27,11 +33,11 @@ namespace TDengineExample
}
else
{
- Console.WriteLine($"async query data failed, failed code {code}");
+ throw new Exception($"async query data failed,code:{code},reason:{TDengine.Error(taosRes)}");
}
}
- // Iteratively call this interface until "numOfRows" is no greater than 0.
+ // Iteratively call this interface until "numOfRows" is no greater than 0.
static void FetchRawBlockCallback(IntPtr param, IntPtr taosRes, int numOfRows)
{
if (numOfRows > 0)
@@ -43,7 +49,7 @@ namespace TDengineExample
for (int i = 0; i < dataList.Count; i++)
{
- if (i != 0 && (i+1) % metaList.Count == 0)
+ if (i != 0 && (i + 1) % metaList.Count == 0)
{
Console.WriteLine("{0}\t|", dataList[i]);
}
@@ -63,7 +69,7 @@ namespace TDengineExample
}
else
{
- Console.WriteLine($"FetchRawBlockCallback callback error, error code {numOfRows}");
+ throw new Exception($"FetchRawBlockCallback callback error, error code {numOfRows}");
}
TDengine.FreeResult(taosRes);
}
@@ -79,8 +85,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
- Console.WriteLine("Connect to TDengine failed");
- Environment.Exit(0);
+ throw new Exception("Connect to TDengine failed");
}
else
{
diff --git a/docs/examples/csharp/asyncquery.csproj b/docs/examples/csharp/asyncQuery/asyncquery.csproj
similarity index 98%
rename from docs/examples/csharp/asyncquery.csproj
rename to docs/examples/csharp/asyncQuery/asyncquery.csproj
index 045969edd7febbd11cc6577c8ba958669a5a7e3b..23e590cd25aa88e58cabf81717a6baf320f447bc 100644
--- a/docs/examples/csharp/asyncquery.csproj
+++ b/docs/examples/csharp/asyncQuery/asyncquery.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/ConnectExample.cs b/docs/examples/csharp/connect/Program.cs
similarity index 90%
rename from docs/examples/csharp/ConnectExample.cs
rename to docs/examples/csharp/connect/Program.cs
index f3548ee65daab8a59695499339a8f89b0aa33a10..955db40c7c80e60350f9c0e8c6f50e7eb85246c2 100644
--- a/docs/examples/csharp/ConnectExample.cs
+++ b/docs/examples/csharp/connect/Program.cs
@@ -16,7 +16,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
- Console.WriteLine("Connect to TDengine failed");
+ throw new Exception("Connect to TDengine failed");
}
else
{
diff --git a/docs/examples/csharp/connect.csproj b/docs/examples/csharp/connect/connect.csproj
similarity index 100%
rename from docs/examples/csharp/connect.csproj
rename to docs/examples/csharp/connect/connect.csproj
diff --git a/docs/examples/csharp/csharp.sln b/docs/examples/csharp/csharp.sln
new file mode 100644
index 0000000000000000000000000000000000000000..560dde55cbddd4e7928598e7dd940c2721bd7b9c
--- /dev/null
+++ b/docs/examples/csharp/csharp.sln
@@ -0,0 +1,94 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 16
+VisualStudioVersion = 16.0.30114.105
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "asyncquery", "asyncQuery\asyncquery.csproj", "{E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "connect", "connect\connect.csproj", "{CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "influxdbline", "influxdbLine\influxdbline.csproj", "{6A24FB80-1E3C-4E2D-A5AB-914FA583874D}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "optsJSON", "optsJSON\optsJSON.csproj", "{6725A961-0C66-4196-AC98-8D3F3D757D6C}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "optstelnet", "optsTelnet\optstelnet.csproj", "{B3B50D25-688B-44D4-8683-482ABC52FFCA}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "query", "query\query.csproj", "{F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "stmtinsert", "stmtInsert\stmtinsert.csproj", "{B40D6BED-BE3C-4B44-9B12-28BE441311BA}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "subscribe", "subscribe\subscribe.csproj", "{C3D45A8E-AFC0-4547-9F3C-467B0B583DED}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsConnect", "wsConnect\wsConnect.csproj", "{51E19494-845E-49ED-97C7-749AE63111BD}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsInsert", "wsInsert\wsInsert.csproj", "{13E2233B-4AFF-40D9-AF42-AB3F01617540}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsQuery", "wsQuery\wsQuery.csproj", "{0F394169-C456-442C-929D-C2D43A0EEC7B}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "wsStmt", "wsStmt\wsStmt.csproj", "{27B9C9AB-9055-4BF2-8A14-4E59F09D5985}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "sqlinsert", "sqlInsert\sqlinsert.csproj", "{CD24BD12-8550-4627-A11D-707B446F48C3}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {E2A5F00C-14E7-40E1-A2DE-6AB2975616D3}.Release|Any CPU.Build.0 = Release|Any CPU
+ {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {CCC5042D-93FC-4AE0-B2F6-7E692FD476B7}.Release|Any CPU.Build.0 = Release|Any CPU
+ {6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {6A24FB80-1E3C-4E2D-A5AB-914FA583874D}.Release|Any CPU.Build.0 = Release|Any CPU
+ {6725A961-0C66-4196-AC98-8D3F3D757D6C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {6725A961-0C66-4196-AC98-8D3F3D757D6C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {6725A961-0C66-4196-AC98-8D3F3D757D6C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {6725A961-0C66-4196-AC98-8D3F3D757D6C}.Release|Any CPU.Build.0 = Release|Any CPU
+ {B3B50D25-688B-44D4-8683-482ABC52FFCA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {B3B50D25-688B-44D4-8683-482ABC52FFCA}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {B3B50D25-688B-44D4-8683-482ABC52FFCA}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {B3B50D25-688B-44D4-8683-482ABC52FFCA}.Release|Any CPU.Build.0 = Release|Any CPU
+ {F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {F2B7D13B-FE04-4C5C-BB6D-C12E0A9D9970}.Release|Any CPU.Build.0 = Release|Any CPU
+ {B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {B40D6BED-BE3C-4B44-9B12-28BE441311BA}.Release|Any CPU.Build.0 = Release|Any CPU
+ {C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {C3D45A8E-AFC0-4547-9F3C-467B0B583DED}.Release|Any CPU.Build.0 = Release|Any CPU
+ {51E19494-845E-49ED-97C7-749AE63111BD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {51E19494-845E-49ED-97C7-749AE63111BD}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {51E19494-845E-49ED-97C7-749AE63111BD}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {51E19494-845E-49ED-97C7-749AE63111BD}.Release|Any CPU.Build.0 = Release|Any CPU
+ {13E2233B-4AFF-40D9-AF42-AB3F01617540}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {13E2233B-4AFF-40D9-AF42-AB3F01617540}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {13E2233B-4AFF-40D9-AF42-AB3F01617540}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {13E2233B-4AFF-40D9-AF42-AB3F01617540}.Release|Any CPU.Build.0 = Release|Any CPU
+ {0F394169-C456-442C-929D-C2D43A0EEC7B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {0F394169-C456-442C-929D-C2D43A0EEC7B}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {0F394169-C456-442C-929D-C2D43A0EEC7B}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {0F394169-C456-442C-929D-C2D43A0EEC7B}.Release|Any CPU.Build.0 = Release|Any CPU
+ {27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {27B9C9AB-9055-4BF2-8A14-4E59F09D5985}.Release|Any CPU.Build.0 = Release|Any CPU
+ {CD24BD12-8550-4627-A11D-707B446F48C3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {CD24BD12-8550-4627-A11D-707B446F48C3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {CD24BD12-8550-4627-A11D-707B446F48C3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {CD24BD12-8550-4627-A11D-707B446F48C3}.Release|Any CPU.Build.0 = Release|Any CPU
+ EndGlobalSection
+EndGlobal
diff --git a/docs/examples/csharp/InfluxDBLineExample.cs b/docs/examples/csharp/influxdbLine/Program.cs
similarity index 73%
rename from docs/examples/csharp/InfluxDBLineExample.cs
rename to docs/examples/csharp/influxdbLine/Program.cs
index 7b4453f4ac0b14dd76d166e395bdacb46a5d3fbc..fa3cb21fe04977b5081c922d623dee5514056770 100644
--- a/docs/examples/csharp/InfluxDBLineExample.cs
+++ b/docs/examples/csharp/influxdbLine/Program.cs
@@ -17,8 +17,7 @@ namespace TDengineExample
IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_LINE_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_MILLI_SECONDS);
if (TDengine.ErrorNo(res) != 0)
{
- Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
}
else
{
@@ -26,7 +25,6 @@ namespace TDengineExample
Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
}
TDengine.FreeResult(res);
- ExitProgram(conn, 0);
}
static IntPtr GetConnection()
@@ -39,9 +37,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
- Console.WriteLine("Connect to TDengine failed");
- TDengine.Cleanup();
- Environment.Exit(1);
+ throw new Exception("Connect to TDengine failed");
}
else
{
@@ -55,23 +51,15 @@ namespace TDengineExample
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
if (TDengine.ErrorNo(res) != 0)
{
- Console.WriteLine("failed to create database, reason: " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
- Console.WriteLine("failed to change database, reason: " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ throw new Exception("failed to change database, reason: " + TDengine.Error(res));
}
}
- static void ExitProgram(IntPtr conn, int exitCode)
- {
- TDengine.Close(conn);
- TDengine.Cleanup();
- Environment.Exit(exitCode);
- }
}
}
diff --git a/docs/examples/csharp/influxdbline.csproj b/docs/examples/csharp/influxdbLine/influxdbline.csproj
similarity index 100%
rename from docs/examples/csharp/influxdbline.csproj
rename to docs/examples/csharp/influxdbLine/influxdbline.csproj
diff --git a/docs/examples/csharp/OptsJsonExample.cs b/docs/examples/csharp/optsJSON/Program.cs
similarity index 53%
rename from docs/examples/csharp/OptsJsonExample.cs
rename to docs/examples/csharp/optsJSON/Program.cs
index 2c41acc5c9628befda7eb4ad5c30af5b921de948..b67b5af62bf0a1fd9028125da0b665f723f2e4ec 100644
--- a/docs/examples/csharp/OptsJsonExample.cs
+++ b/docs/examples/csharp/optsJSON/Program.cs
@@ -7,27 +7,31 @@ namespace TDengineExample
static void Main()
{
IntPtr conn = GetConnection();
- PrepareDatabase(conn);
- string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
+ try
+ {
+ PrepareDatabase(conn);
+ string[] lines = { "[{\"metric\": \"meters.current\", \"timestamp\": 1648432611249, \"value\": 10.3, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611249, \"value\": 219, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}, " +
"{\"metric\": \"meters.current\", \"timestamp\": 1648432611250, \"value\": 12.6, \"tags\": {\"location\": \"California.SanFrancisco\", \"groupid\": 2}}," +
" {\"metric\": \"meters.voltage\", \"timestamp\": 1648432611250, \"value\": 221, \"tags\": {\"location\": \"California.LosAngeles\", \"groupid\": 1}}]"
};
- IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
- if (TDengine.ErrorNo(res) != 0)
- {
- Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ IntPtr res = TDengine.SchemalessInsert(conn, lines, 1, (int)TDengineSchemalessProtocol.TSDB_SML_JSON_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
+ if (TDengine.ErrorNo(res) != 0)
+ {
+ throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
+ }
+ else
+ {
+ int affectedRows = TDengine.AffectRows(res);
+ Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
+ }
+ TDengine.FreeResult(res);
}
- else
+ finally
{
- int affectedRows = TDengine.AffectRows(res);
- Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
+ TDengine.Close(conn);
}
- TDengine.FreeResult(res);
- ExitProgram(conn, 0);
-
}
static IntPtr GetConnection()
{
@@ -39,9 +43,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
- Console.WriteLine("Connect to TDengine failed");
- TDengine.Cleanup();
- Environment.Exit(1);
+ throw new Exception("Connect to TDengine failed");
}
else
{
@@ -55,22 +57,13 @@ namespace TDengineExample
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
if (TDengine.ErrorNo(res) != 0)
{
- Console.WriteLine("failed to create database, reason: " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
- Console.WriteLine("failed to change database, reason: " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ throw new Exception("failed to change database, reason: " + TDengine.Error(res));
}
}
-
- static void ExitProgram(IntPtr conn, int exitCode)
- {
- TDengine.Close(conn);
- TDengine.Cleanup();
- Environment.Exit(exitCode);
- }
}
}
diff --git a/docs/examples/csharp/optsjson.csproj b/docs/examples/csharp/optsJSON/optsJSON.csproj
similarity index 100%
rename from docs/examples/csharp/optsjson.csproj
rename to docs/examples/csharp/optsJSON/optsJSON.csproj
diff --git a/docs/examples/csharp/OptsTelnetExample.cs b/docs/examples/csharp/optsTelnet/Program.cs
similarity index 59%
rename from docs/examples/csharp/OptsTelnetExample.cs
rename to docs/examples/csharp/optsTelnet/Program.cs
index bb752db1afbbb2ef68df9ca25314c8b91cd9a266..e73ceb041accf88222176342d46fe1a669584211 100644
--- a/docs/examples/csharp/OptsTelnetExample.cs
+++ b/docs/examples/csharp/optsTelnet/Program.cs
@@ -7,8 +7,10 @@ namespace TDengineExample
static void Main()
{
IntPtr conn = GetConnection();
- PrepareDatabase(conn);
- string[] lines = {
+ try
+ {
+ PrepareDatabase(conn);
+ string[] lines = {
"meters.current 1648432611249 10.3 location=California.SanFrancisco groupid=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco groupid=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles groupid=3",
@@ -18,20 +20,22 @@ namespace TDengineExample
"meters.voltage 1648432611249 221 location=California.LosAngeles groupid=3",
"meters.voltage 1648432611250 217 location=California.LosAngeles groupid=3",
};
- IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
- if (TDengine.ErrorNo(res) != 0)
- {
- Console.WriteLine("SchemalessInsert failed since " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ IntPtr res = TDengine.SchemalessInsert(conn, lines, lines.Length, (int)TDengineSchemalessProtocol.TSDB_SML_TELNET_PROTOCOL, (int)TDengineSchemalessPrecision.TSDB_SML_TIMESTAMP_NOT_CONFIGURED);
+ if (TDengine.ErrorNo(res) != 0)
+ {
+ throw new Exception("SchemalessInsert failed since " + TDengine.Error(res));
+ }
+ else
+ {
+ int affectedRows = TDengine.AffectRows(res);
+ Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
+ }
+ TDengine.FreeResult(res);
}
- else
+ catch
{
- int affectedRows = TDengine.AffectRows(res);
- Console.WriteLine($"SchemalessInsert success, affected {affectedRows} rows");
+ TDengine.Close(conn);
}
- TDengine.FreeResult(res);
- ExitProgram(conn, 0);
-
}
static IntPtr GetConnection()
{
@@ -43,9 +47,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
- Console.WriteLine("Connect to TDengine failed");
- TDengine.Cleanup();
- Environment.Exit(1);
+ throw new Exception("Connect to TDengine failed");
}
else
{
@@ -59,22 +61,13 @@ namespace TDengineExample
IntPtr res = TDengine.Query(conn, "CREATE DATABASE test");
if (TDengine.ErrorNo(res) != 0)
{
- Console.WriteLine("failed to create database, reason: " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ throw new Exception("failed to create database, reason: " + TDengine.Error(res));
}
res = TDengine.Query(conn, "USE test");
if (TDengine.ErrorNo(res) != 0)
{
- Console.WriteLine("failed to change database, reason: " + TDengine.Error(res));
- ExitProgram(conn, 1);
+ throw new Exception("failed to change database, reason: " + TDengine.Error(res));
}
}
-
- static void ExitProgram(IntPtr conn, int exitCode)
- {
- TDengine.Close(conn);
- TDengine.Cleanup();
- Environment.Exit(exitCode);
- }
}
}
diff --git a/docs/examples/csharp/optstelnet.csproj b/docs/examples/csharp/optsTelnet/optstelnet.csproj
similarity index 100%
rename from docs/examples/csharp/optstelnet.csproj
rename to docs/examples/csharp/optsTelnet/optstelnet.csproj
diff --git a/docs/examples/csharp/query/Program.cs b/docs/examples/csharp/query/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..84c7f9db1f8a87289b73662c72c63c0078b45678
--- /dev/null
+++ b/docs/examples/csharp/query/Program.cs
@@ -0,0 +1,80 @@
+using TDengineDriver;
+using TDengineDriver.Impl;
+using System.Runtime.InteropServices;
+
+namespace TDengineExample
+{
+ internal class QueryExample
+ {
+ static void Main()
+ {
+ IntPtr conn = GetConnection();
+ try
+ {
+ // run query
+ IntPtr res = TDengine.Query(conn, "SELECT * FROM meters LIMIT 2");
+ if (TDengine.ErrorNo(res) != 0)
+ {
+ throw new Exception("Failed to query since: " + TDengine.Error(res));
+ }
+
+ // get filed count
+ int fieldCount = TDengine.FieldCount(res);
+ Console.WriteLine("fieldCount=" + fieldCount);
+
+ // print column names
+ List metas = LibTaos.GetMeta(res);
+ for (int i = 0; i < metas.Count; i++)
+ {
+ Console.Write(metas[i].name + "\t");
+ }
+ Console.WriteLine();
+
+ // print values
+ List resData = LibTaos.GetData(res);
+ for (int i = 0; i < resData.Count; i++)
+ {
+ Console.Write($"|{resData[i].ToString()} \t");
+ if (((i + 1) % metas.Count == 0))
+ {
+ Console.WriteLine("");
+ }
+ }
+ Console.WriteLine();
+
+ // Free result after use
+ TDengine.FreeResult(res);
+ }
+ finally
+ {
+ TDengine.Close(conn);
+ }
+
+ }
+ static IntPtr GetConnection()
+ {
+ string host = "localhost";
+ short port = 6030;
+ string username = "root";
+ string password = "taosdata";
+ string dbname = "power";
+ var conn = TDengine.Connect(host, username, password, dbname, port);
+ if (conn == IntPtr.Zero)
+ {
+ throw new Exception("Connect to TDengine failed");
+ }
+ else
+ {
+ Console.WriteLine("Connect to TDengine success");
+ }
+ return conn;
+ }
+ }
+}
+
+// output:
+// Connect to TDengine success
+// fieldCount=6
+// ts current voltage phase location groupid
+// 1648432611249 10.3 219 0.31 California.SanFrancisco 2
+// 1648432611749 12.6 218 0.33 California.SanFrancisco 2
\ No newline at end of file
diff --git a/docs/examples/csharp/query.csproj b/docs/examples/csharp/query/query.csproj
similarity index 98%
rename from docs/examples/csharp/query.csproj
rename to docs/examples/csharp/query/query.csproj
index 39fc135d5ab9f5a8397b412e2307a2306abd4f2a..c97dbd3051e1a415b192e73d6753266b0b41b07d 100644
--- a/docs/examples/csharp/query.csproj
+++ b/docs/examples/csharp/query/query.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/sqlInsert/Program.cs b/docs/examples/csharp/sqlInsert/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..f23a6e1663023d1d2fafb3e92e0b605f8ac55e52
--- /dev/null
+++ b/docs/examples/csharp/sqlInsert/Program.cs
@@ -0,0 +1,69 @@
+using TDengineDriver;
+
+
+namespace TDengineExample
+{
+ internal class SQLInsertExample
+ {
+
+ static void Main()
+ {
+ IntPtr conn = GetConnection();
+ try
+ {
+ IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
+ CheckRes(conn, res, "failed to create database");
+ res = TDengine.Query(conn, "USE power");
+ CheckRes(conn, res, "failed to change database");
+ res = TDengine.Query(conn, "CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
+ CheckRes(conn, res, "failed to create stable");
+ var sql = "INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) " +
+ "d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) " +
+ "d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
+ "d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
+ res = TDengine.Query(conn, sql);
+ CheckRes(conn, res, "failed to insert data");
+ int affectedRows = TDengine.AffectRows(res);
+ Console.WriteLine("affectedRows " + affectedRows);
+ TDengine.FreeResult(res);
+ }
+ finally
+ {
+ TDengine.Close(conn);
+ }
+
+ }
+
+ static IntPtr GetConnection()
+ {
+ string host = "localhost";
+ short port = 6030;
+ string username = "root";
+ string password = "taosdata";
+ string dbname = "";
+ var conn = TDengine.Connect(host, username, password, dbname, port);
+ if (conn == IntPtr.Zero)
+ {
+ throw new Exception("Connect to TDengine failed");
+ }
+ else
+ {
+ Console.WriteLine("Connect to TDengine success");
+ }
+ return conn;
+ }
+
+ static void CheckRes(IntPtr conn, IntPtr res, String errorMsg)
+ {
+ if (TDengine.ErrorNo(res) != 0)
+ {
+ throw new Exception($"{errorMsg} since: {TDengine.Error(res)}");
+ }
+ }
+
+ }
+}
+
+// output:
+// Connect to TDengine success
+// affectedRows 8
diff --git a/docs/examples/csharp/sqlinsert.csproj b/docs/examples/csharp/sqlInsert/sqlinsert.csproj
similarity index 100%
rename from docs/examples/csharp/sqlinsert.csproj
rename to docs/examples/csharp/sqlInsert/sqlinsert.csproj
diff --git a/docs/examples/csharp/StmtInsertExample.cs b/docs/examples/csharp/stmtInsert/Program.cs
similarity index 52%
rename from docs/examples/csharp/StmtInsertExample.cs
rename to docs/examples/csharp/stmtInsert/Program.cs
index 0a4098091f6371a674eee6f158e1c57bff2b6862..87e1971feb8499c515206f05a1e916070ac57f4c 100644
--- a/docs/examples/csharp/StmtInsertExample.cs
+++ b/docs/examples/csharp/stmtInsert/Program.cs
@@ -9,45 +9,50 @@ namespace TDengineExample
static void Main()
{
conn = GetConnection();
- PrepareSTable();
- // 1. init and prepare
- stmt = TDengine.StmtInit(conn);
- if (stmt == IntPtr.Zero)
+ try
{
- Console.WriteLine("failed to init stmt, " + TDengine.Error(stmt));
- ExitProgram();
- }
- int res = TDengine.StmtPrepare(stmt, "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)");
- CheckStmtRes(res, "failed to prepare stmt");
+ PrepareSTable();
+ // 1. init and prepare
+ stmt = TDengine.StmtInit(conn);
+ if (stmt == IntPtr.Zero)
+ {
+ throw new Exception("failed to init stmt.");
+ }
+ int res = TDengine.StmtPrepare(stmt, "INSERT INTO ? USING meters TAGS(?, ?) VALUES(?, ?, ?, ?)");
+ CheckStmtRes(res, "failed to prepare stmt");
- // 2. bind table name and tags
- TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[]{"California.SanFrancisco"}), TaosMultiBind.MultiBindInt(new int?[] {2}) };
- res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags);
- CheckStmtRes(res, "failed to bind table name and tags");
+ // 2. bind table name and tags
+ TAOS_MULTI_BIND[] tags = new TAOS_MULTI_BIND[2] { TaosMultiBind.MultiBindBinary(new string[] { "California.SanFrancisco" }), TaosMultiBind.MultiBindInt(new int?[] { 2 }) };
+ res = TDengine.StmtSetTbnameTags(stmt, "d1001", tags);
+ CheckStmtRes(res, "failed to bind table name and tags");
- // 3. bind values
- TAOS_MULTI_BIND[] values = new TAOS_MULTI_BIND[4] {
+ // 3. bind values
+ TAOS_MULTI_BIND[] values = new TAOS_MULTI_BIND[4] {
TaosMultiBind.MultiBindTimestamp(new long[2] { 1648432611249, 1648432611749}),
TaosMultiBind.MultiBindFloat(new float?[2] { 10.3f, 12.6f}),
TaosMultiBind.MultiBindInt(new int?[2] { 219, 218}),
TaosMultiBind.MultiBindFloat(new float?[2]{ 0.31f, 0.33f})
};
- res = TDengine.StmtBindParamBatch(stmt, values);
- CheckStmtRes(res, "failed to bind params");
+ res = TDengine.StmtBindParamBatch(stmt, values);
+ CheckStmtRes(res, "failed to bind params");
- // 4. add batch
- res = TDengine.StmtAddBatch(stmt);
- CheckStmtRes(res, "failed to add batch");
+ // 4. add batch
+ res = TDengine.StmtAddBatch(stmt);
+ CheckStmtRes(res, "failed to add batch");
- // 5. execute
- res = TDengine.StmtExecute(stmt);
- CheckStmtRes(res, "faild to execute");
+ // 5. execute
+ res = TDengine.StmtExecute(stmt);
+ CheckStmtRes(res, "faild to execute");
+
+ // 6. free
+ TaosMultiBind.FreeTaosBind(tags);
+ TaosMultiBind.FreeTaosBind(values);
+ }
+ finally
+ {
+ TDengine.Close(conn);
+ }
- // 6. free
- TaosMultiBind.FreeTaosBind(tags);
- TaosMultiBind.FreeTaosBind(values);
- TDengine.Close(conn);
- TDengine.Cleanup();
}
static IntPtr GetConnection()
@@ -60,8 +65,7 @@ namespace TDengineExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
- Console.WriteLine("Connect to TDengine failed");
- Environment.Exit(0);
+ throw new Exception("Connect to TDengine failed");
}
else
{
@@ -70,8 +74,6 @@ namespace TDengineExample
return conn;
}
-
-
static void PrepareSTable()
{
IntPtr res = TDengine.Query(conn, "CREATE DATABASE power");
@@ -90,9 +92,8 @@ namespace TDengineExample
int code = TDengine.StmtClose(stmt);
if (code != 0)
{
- Console.WriteLine($"falied to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
+ throw new Exception($"falied to close stmt, {code} reason: {TDengine.StmtErrorStr(stmt)} ");
}
- ExitProgram();
}
}
@@ -100,16 +101,9 @@ namespace TDengineExample
{
if (TDengine.ErrorNo(res) != 0)
{
- Console.WriteLine(errorMsg + " since:" + TDengine.Error(res));
- ExitProgram();
+ throw new Exception(errorMsg + " since:" + TDengine.Error(res));
}
}
- static void ExitProgram()
- {
- TDengine.Close(conn);
- TDengine.Cleanup();
- Environment.Exit(1);
- }
}
}
diff --git a/docs/examples/csharp/stmtinsert.csproj b/docs/examples/csharp/stmtInsert/stmtinsert.csproj
similarity index 100%
rename from docs/examples/csharp/stmtinsert.csproj
rename to docs/examples/csharp/stmtInsert/stmtinsert.csproj
diff --git a/docs/examples/csharp/SubscribeDemo.cs b/docs/examples/csharp/subscribe/Program.cs
similarity index 90%
rename from docs/examples/csharp/SubscribeDemo.cs
rename to docs/examples/csharp/subscribe/Program.cs
index b62ff12e5ea38eb27ae5de8e8027aa41b1873d23..1fba209f22740e4efe5efb6996902159b2809035 100644
--- a/docs/examples/csharp/SubscribeDemo.cs
+++ b/docs/examples/csharp/subscribe/Program.cs
@@ -11,11 +11,10 @@ namespace TMQExample
{
IntPtr conn = GetConnection();
string topic = "topic_example";
- Console.WriteLine($"create topic if not exist {topic} as select * from meters");
//create topic
IntPtr res = TDengine.Query(conn, $"create topic if not exists {topic} as select * from meters");
-
- if (res == IntPtr.Zero)
+
+ if (TDengine.ErrorNo(res) != 0 )
{
throw new Exception($"create topic failed, reason:{TDengine.Error(res)}");
}
@@ -26,7 +25,7 @@ namespace TMQExample
TDConnectUser = "root",
TDConnectPasswd = "taosdata",
MsgWithTableName = "true",
- TDConnectIp = "127.0.0.1",
+ TDConnectIp = "127.0.0.1",
};
// create consumer
@@ -65,7 +64,6 @@ namespace TMQExample
List topics = consumer.Subscription();
topics.ForEach(t => Console.WriteLine("topic name:{0}", t));
-
// unsubscribe
consumer.Unsubscribe();
@@ -73,7 +71,6 @@ namespace TMQExample
consumer.Close();
TDengine.Close(conn);
-
}
static IntPtr GetConnection()
@@ -86,8 +83,7 @@ namespace TMQExample
var conn = TDengine.Connect(host, username, password, dbname, port);
if (conn == IntPtr.Zero)
{
- Console.WriteLine("Connect to TDengine failed");
- System.Environment.Exit(0);
+ throw new Exception("Connect to TDengine failed");
}
else
{
diff --git a/docs/examples/csharp/subscribe.csproj b/docs/examples/csharp/subscribe/subscribe.csproj
similarity index 98%
rename from docs/examples/csharp/subscribe.csproj
rename to docs/examples/csharp/subscribe/subscribe.csproj
index eff29b3bf42bde521aae70bfd1ed555ac72bfce9..8ae1cf6bc6023558c28797a0d9fcccb2f2e87653 100644
--- a/docs/examples/csharp/subscribe.csproj
+++ b/docs/examples/csharp/subscribe/subscribe.csproj
@@ -9,7 +9,7 @@
-
+
diff --git a/docs/examples/csharp/wsConnect/Program.cs b/docs/examples/csharp/wsConnect/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..2e89372c3e3dd23c16bad0362f494b2c64191cbc
--- /dev/null
+++ b/docs/examples/csharp/wsConnect/Program.cs
@@ -0,0 +1,25 @@
+using System;
+using TDengineWS.Impl;
+
+namespace Examples
+{
+ public class WSConnExample
+ {
+ static void Main(string[] args)
+ {
+ string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
+ IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+ if (wsConn == IntPtr.Zero)
+ {
+ throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ }
+ else
+ {
+ Console.WriteLine("Establish connect success.");
+ }
+
+ // close connection.
+ LibTaosWS.WSClose(wsConn);
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/csharp/wsConnect/wsConnect.csproj b/docs/examples/csharp/wsConnect/wsConnect.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..34951dc761903e5a4b7a4bec5dfe55a965ab88be
--- /dev/null
+++ b/docs/examples/csharp/wsConnect/wsConnect.csproj
@@ -0,0 +1,13 @@
+
+
+
+ Exe
+ net5.0
+ enable
+
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsInsert/Program.cs b/docs/examples/csharp/wsInsert/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..4cd812cda916308f80f7bf2001d38046434056c1
--- /dev/null
+++ b/docs/examples/csharp/wsInsert/Program.cs
@@ -0,0 +1,58 @@
+using System;
+using TDengineWS.Impl;
+
+namespace Examples
+{
+ public class WSInsertExample
+ {
+ static void Main(string[] args)
+ {
+ string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
+ IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+
+ // Assert if connection is validate
+ if (wsConn == IntPtr.Zero)
+ {
+ throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ }
+ else
+ {
+ Console.WriteLine("Establish connect success.");
+ }
+
+ string createTable = "CREATE STABLE test.meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);";
+ string insert = "INSERT INTO test.d1001 USING test.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)" +
+ "test.d1002 USING test.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)" +
+ "test.d1003 USING test.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000)('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) " +
+ "test.d1004 USING test.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000)('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)";
+
+ IntPtr wsRes = LibTaosWS.WSQuery(wsConn, createTable);
+ ValidInsert("create table", wsRes);
+ LibTaosWS.WSFreeResult(wsRes);
+
+ wsRes = LibTaosWS.WSQuery(wsConn, insert);
+ ValidInsert("insert data", wsRes);
+ LibTaosWS.WSFreeResult(wsRes);
+
+ // close connection.
+ LibTaosWS.WSClose(wsConn);
+ }
+
+ static void ValidInsert(string desc, IntPtr wsRes)
+ {
+ int code = LibTaosWS.WSErrorNo(wsRes);
+ if (code != 0)
+ {
+ throw new Exception($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
+ }
+ else
+ {
+ Console.WriteLine("{0} success affect {2} rows, cost {1} nanoseconds", desc, LibTaosWS.WSTakeTiming(wsRes), LibTaosWS.WSAffectRows(wsRes));
+ }
+ }
+ }
+
+}
+// Establish connect success.
+// create table success affect 0 rows, cost 3717542 nanoseconds
+// insert data success affect 8 rows, cost 2613637 nanoseconds
\ No newline at end of file
diff --git a/docs/examples/csharp/wsInsert/wsInsert.csproj b/docs/examples/csharp/wsInsert/wsInsert.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..34951dc761903e5a4b7a4bec5dfe55a965ab88be
--- /dev/null
+++ b/docs/examples/csharp/wsInsert/wsInsert.csproj
@@ -0,0 +1,13 @@
+
+
+
+ Exe
+ net5.0
+ enable
+
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsQuery/Program.cs b/docs/examples/csharp/wsQuery/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..de5591aa536655606547cf8998145025642989cd
--- /dev/null
+++ b/docs/examples/csharp/wsQuery/Program.cs
@@ -0,0 +1,74 @@
+using System;
+using TDengineWS.Impl;
+using System.Collections.Generic;
+using TDengineDriver;
+
+namespace Examples
+{
+ public class WSQueryExample
+ {
+ static void Main(string[] args)
+ {
+ string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
+ IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+ if (wsConn == IntPtr.Zero)
+ {
+ throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ }
+ else
+ {
+ Console.WriteLine("Establish connect success.");
+ }
+
+ string select = "select * from test.meters";
+
+ // optional:wsRes = LibTaosWS.WSQuery(wsConn, select);
+ IntPtr wsRes = LibTaosWS.WSQueryTimeout(wsConn, select, 1);
+ // Assert if query execute success.
+ int code = LibTaosWS.WSErrorNo(wsRes);
+ if (code != 0)
+ {
+ throw new Exception($"execute SQL failed: reason: {LibTaosWS.WSErrorStr(wsRes)}, code:{code}");
+ }
+
+ // get meta data
+ List metas = LibTaosWS.WSGetFields(wsRes);
+ // get retrieved data
+ List dataSet = LibTaosWS.WSGetData(wsRes);
+
+ // do something with result.
+ foreach (var meta in metas)
+ {
+ Console.Write("{0} {1}({2}) \t|\t", meta.name, meta.TypeName(), meta.size);
+ }
+ Console.WriteLine("");
+
+ for (int i = 0; i < dataSet.Count;)
+ {
+ for (int j = 0; j < metas.Count; j++)
+ {
+ Console.Write("{0}\t|\t", dataSet[i]);
+ i++;
+ }
+ Console.WriteLine("");
+ }
+
+ // Free result after use.
+ LibTaosWS.WSFreeResult(wsRes);
+
+ // close connection.
+ LibTaosWS.WSClose(wsConn);
+ }
+ }
+}
+
+// Establish connect success.
+// ts TIMESTAMP(8) | current FLOAT(4) | voltage INT(4) | phase FLOAT(4) | location BINARY(64) | groupid INT(4) |
+// 1538548685000 | 10.8 | 223 | 0.29 | California.LosAngeles | 3 |
+// 1538548686500 | 11.5 | 221 | 0.35 | California.LosAngeles | 3 |
+// 1538548685500 | 11.8 | 221 | 0.28 | California.LosAngeles | 2 |
+// 1538548696600 | 13.4 | 223 | 0.29 | California.LosAngeles | 2 |
+// 1538548685000 | 10.3 | 219 | 0.31 | California.SanFrancisco | 2 |
+// 1538548695000 | 12.6 | 218 | 0.33 | California.SanFrancisco | 2 |
+// 1538548696800 | 12.3 | 221 | 0.31 | California.SanFrancisco | 2 |
+// 1538548696650 | 10.3 | 218 | 0.25 | California.SanFrancisco | 3 |
\ No newline at end of file
diff --git a/docs/examples/csharp/wsQuery/wsQuery.csproj b/docs/examples/csharp/wsQuery/wsQuery.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..34951dc761903e5a4b7a4bec5dfe55a965ab88be
--- /dev/null
+++ b/docs/examples/csharp/wsQuery/wsQuery.csproj
@@ -0,0 +1,13 @@
+
+
+
+ Exe
+ net5.0
+ enable
+
+
+
+
+
+
+
diff --git a/docs/examples/csharp/wsStmt/Program.cs b/docs/examples/csharp/wsStmt/Program.cs
new file mode 100644
index 0000000000000000000000000000000000000000..54de77ec1f98deaf14fd2ad9ca0acd57e6b38f63
--- /dev/null
+++ b/docs/examples/csharp/wsStmt/Program.cs
@@ -0,0 +1,95 @@
+using System;
+using TDengineWS.Impl;
+using TDengineDriver;
+using System.Runtime.InteropServices;
+
+namespace Examples
+{
+ public class WSStmtExample
+ {
+ static void Main(string[] args)
+ {
+ const string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
+ const string table = "meters";
+ const string database = "test";
+ const string childTable = "d1005";
+ string insert = $"insert into ? using {database}.{table} tags(?,?) values(?,?,?,?)";
+ const int numOfTags = 2;
+ const int numOfColumns = 4;
+
+ // Establish connection
+ IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+ if (wsConn == IntPtr.Zero)
+ {
+ throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ }
+ else
+ {
+ Console.WriteLine("Establish connect success...");
+ }
+
+ // init stmt
+ IntPtr wsStmt = LibTaosWS.WSStmtInit(wsConn);
+ if (wsStmt != IntPtr.Zero)
+ {
+ int code = LibTaosWS.WSStmtPrepare(wsStmt, insert);
+ ValidStmtStep(code, wsStmt, "WSStmtPrepare");
+
+ TAOS_MULTI_BIND[] wsTags = new TAOS_MULTI_BIND[] { WSMultiBind.WSBindNchar(new string[] { "California.SanDiego" }), WSMultiBind.WSBindInt(new int?[] { 4 }) };
+ code = LibTaosWS.WSStmtSetTbnameTags(wsStmt, $"{database}.{childTable}", wsTags, numOfTags);
+ ValidStmtStep(code, wsStmt, "WSStmtSetTbnameTags");
+
+ TAOS_MULTI_BIND[] data = new TAOS_MULTI_BIND[4];
+ data[0] = WSMultiBind.WSBindTimestamp(new long[] { 1538548687000, 1538548688000, 1538548689000, 1538548690000, 1538548691000 });
+ data[1] = WSMultiBind.WSBindFloat(new float?[] { 10.30F, 10.40F, 10.50F, 10.60F, 10.70F });
+ data[2] = WSMultiBind.WSBindInt(new int?[] { 223, 221, 222, 220, 219 });
+ data[3] = WSMultiBind.WSBindFloat(new float?[] { 0.31F, 0.32F, 0.33F, 0.35F, 0.28F });
+ code = LibTaosWS.WSStmtBindParamBatch(wsStmt, data, numOfColumns);
+ ValidStmtStep(code, wsStmt, "WSStmtBindParamBatch");
+
+ code = LibTaosWS.WSStmtAddBatch(wsStmt);
+ ValidStmtStep(code, wsStmt, "WSStmtAddBatch");
+
+ IntPtr stmtAffectRowPtr = Marshal.AllocHGlobal(Marshal.SizeOf(typeof(Int32)));
+ code = LibTaosWS.WSStmtExecute(wsStmt, stmtAffectRowPtr);
+ ValidStmtStep(code, wsStmt, "WSStmtExecute");
+ Console.WriteLine("WS STMT insert {0} rows...", Marshal.ReadInt32(stmtAffectRowPtr));
+ Marshal.FreeHGlobal(stmtAffectRowPtr);
+
+ LibTaosWS.WSStmtClose(wsStmt);
+
+ // Free unmanaged memory
+ WSMultiBind.WSFreeTaosBind(wsTags);
+ WSMultiBind.WSFreeTaosBind(data);
+
+ //check result with SQL "SELECT * FROM test.d1005;"
+ }
+ else
+ {
+ throw new Exception("Init STMT failed...");
+ }
+
+ // close connection.
+ LibTaosWS.WSClose(wsConn);
+ }
+
+ static void ValidStmtStep(int code, IntPtr wsStmt, string desc)
+ {
+ if (code != 0)
+ {
+ throw new Exception($"{desc} failed,reason: {LibTaosWS.WSErrorStr(wsStmt)}, code: {code}");
+ }
+ else
+ {
+ Console.WriteLine("{0} success...", desc);
+ }
+ }
+ }
+}
+
+// WSStmtPrepare success...
+// WSStmtSetTbnameTags success...
+// WSStmtBindParamBatch success...
+// WSStmtAddBatch success...
+// WSStmtExecute success...
+// WS STMT insert 5 rows...
\ No newline at end of file
diff --git a/docs/examples/csharp/wsStmt/wsStmt.csproj b/docs/examples/csharp/wsStmt/wsStmt.csproj
new file mode 100644
index 0000000000000000000000000000000000000000..34951dc761903e5a4b7a4bec5dfe55a965ab88be
--- /dev/null
+++ b/docs/examples/csharp/wsStmt/wsStmt.csproj
@@ -0,0 +1,13 @@
+
+
+
+ Exe
+ net5.0
+ enable
+
+
+
+
+
+
+
diff --git a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
index af97fe4373ca964260e5614f133f359e229b0e15..9d85bf2a94abda71bcdab89d46008b70e52ce437 100644
--- a/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
+++ b/docs/examples/java/src/main/java/com/taos/example/RestInsertExample.java
@@ -16,14 +16,14 @@ public class RestInsertExample {
private static List getRawData() {
return Arrays.asList(
- "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2",
- "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2",
- "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2",
- "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3",
- "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2",
- "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2",
- "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3",
- "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3"
+ "d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2",
+ "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2",
+ "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2",
+ "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3",
+ "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2",
+ "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2",
+ "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3",
+ "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3"
);
}
diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
index 50e8b357719fc6d1f4707e474afdf58fb4531970..e9af5e9ce0c0473f4513cbb949dcbd9f433c0c92 100644
--- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
@@ -38,12 +38,12 @@ public class SubscribeDemo {
statement.executeUpdate("create database " + DB_NAME);
statement.executeUpdate("use " + DB_NAME);
statement.executeUpdate(
- "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(16))");
- statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')");
+ "CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT) TAGS (`groupid` INT, `location` BINARY(24))");
+ statement.executeUpdate("CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')");
statement.executeUpdate("INSERT INTO `d0` values(now - 10s, 0.32, 116)");
statement.executeUpdate("INSERT INTO `d0` values(now - 8s, NULL, NULL)");
statement.executeUpdate(
- "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119)");
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119)");
statement.executeUpdate(
"INSERT INTO `d1` values (now-8s, 10, 120) (now - 6s, 10, 119) (now - 4s, 11.2, 118)");
// create topic
@@ -57,7 +57,7 @@ public class SubscribeDemo {
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
properties.setProperty(TMQConstants.GROUP_ID, "test");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
- "com.taosdata.jdbc.MetersDeserializer");
+ "com.taos.example.MetersDeserializer");
// poll data
try (TaosConsumer consumer = new TaosConsumer<>(properties)) {
@@ -75,4 +75,4 @@ public class SubscribeDemo {
}
timer.cancel();
}
-}
\ No newline at end of file
+}
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java
new file mode 100644
index 0000000000000000000000000000000000000000..04b149a4b96441ecfd1b0bdde54c9ed71349cab2
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java
@@ -0,0 +1,63 @@
+package com.taos.example.highvolume;
+
+import java.sql.*;
+
+/**
+ * Prepare target database.
+ * Count total records in database periodically so that we can estimate the writing speed.
+ */
+public class DataBaseMonitor {
+ private Connection conn;
+ private Statement stmt;
+
+ public DataBaseMonitor init() throws SQLException {
+ if (conn == null) {
+ String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
+ conn = DriverManager.getConnection(jdbcURL);
+ stmt = conn.createStatement();
+ }
+ return this;
+ }
+
+ public void close() {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ }
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+
+ public void prepareDatabase() throws SQLException {
+ stmt.execute("DROP DATABASE IF EXISTS test");
+ stmt.execute("CREATE DATABASE test");
+ stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
+ }
+
+ public Long count() throws SQLException {
+ if (!stmt.isClosed()) {
+ ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters");
+ result.next();
+ return result.getLong(1);
+ }
+ return null;
+ }
+
+ /**
+ * show test.stables;
+ *
+ * name | created_time | columns | tags | tables |
+ * ============================================================================================
+ * meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 |
+ */
+ public Long getTableCount() throws SQLException {
+ if (!stmt.isClosed()) {
+ ResultSet result = stmt.executeQuery("show test.stables");
+ result.next();
+ return result.getLong(5);
+ }
+ return null;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java
new file mode 100644
index 0000000000000000000000000000000000000000..41b59551ca69a4056c2f2b572d169bd08dc4fcfe
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java
@@ -0,0 +1,70 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.*;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
+
+
+public class FastWriteExample {
+ final static Logger logger = LoggerFactory.getLogger(FastWriteExample.class);
+
+ final static int taskQueueCapacity = 1000000;
+ final static List> taskQueues = new ArrayList<>();
+ final static List readTasks = new ArrayList<>();
+ final static List writeTasks = new ArrayList<>();
+ final static DataBaseMonitor databaseMonitor = new DataBaseMonitor();
+
+ public static void stopAll() {
+ logger.info("shutting down");
+ readTasks.forEach(task -> task.stop());
+ writeTasks.forEach(task -> task.stop());
+ databaseMonitor.close();
+ }
+
+ public static void main(String[] args) throws InterruptedException, SQLException {
+ int readTaskCount = args.length > 0 ? Integer.parseInt(args[0]) : 1;
+ int writeTaskCount = args.length > 1 ? Integer.parseInt(args[1]) : 3;
+ int tableCount = args.length > 2 ? Integer.parseInt(args[2]) : 1000;
+ int maxBatchSize = args.length > 3 ? Integer.parseInt(args[3]) : 3000;
+
+ logger.info("readTaskCount={}, writeTaskCount={} tableCount={} maxBatchSize={}",
+ readTaskCount, writeTaskCount, tableCount, maxBatchSize);
+
+ databaseMonitor.init().prepareDatabase();
+
+ // Create task queues, whiting tasks and start writing threads.
+ for (int i = 0; i < writeTaskCount; ++i) {
+ BlockingQueue queue = new ArrayBlockingQueue<>(taskQueueCapacity);
+ taskQueues.add(queue);
+ WriteTask task = new WriteTask(queue, maxBatchSize);
+ Thread t = new Thread(task);
+ t.setName("WriteThread-" + i);
+ t.start();
+ }
+
+ // create reading tasks and start reading threads
+ int tableCountPerTask = tableCount / readTaskCount;
+ for (int i = 0; i < readTaskCount; ++i) {
+ ReadTask task = new ReadTask(i, taskQueues, tableCountPerTask);
+ Thread t = new Thread(task);
+ t.setName("ReadThread-" + i);
+ t.start();
+ }
+
+ Runtime.getRuntime().addShutdownHook(new Thread(FastWriteExample::stopAll));
+
+ long lastCount = 0;
+ while (true) {
+ Thread.sleep(10000);
+ long numberOfTable = databaseMonitor.getTableCount();
+ long count = databaseMonitor.count();
+ logger.info("numberOfTable={} count={} speed={}", numberOfTable, count, (count - lastCount) / 10);
+ lastCount = count;
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java
new file mode 100644
index 0000000000000000000000000000000000000000..f0ebc53b4b9a588ac4a23461553dd5c9f1a9f00b
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java
@@ -0,0 +1,53 @@
+package com.taos.example.highvolume;
+
+import java.util.Iterator;
+
+/**
+ * Generate test data
+ */
+class MockDataSource implements Iterator {
+ private String tbNamePrefix;
+ private int tableCount;
+ private long maxRowsPerTable = 1000000000L;
+
+ // 100 milliseconds between two neighbouring rows.
+ long startMs = System.currentTimeMillis() - maxRowsPerTable * 100;
+ private int currentRow = 0;
+ private int currentTbId = -1;
+
+ // mock values
+ String[] location = {"California.LosAngeles", "California.SanDiego", "California.SanJose", "California.Campbell", "California.SanFrancisco"};
+ float[] current = {8.8f, 10.7f, 9.9f, 8.9f, 9.4f};
+ int[] voltage = {119, 116, 111, 113, 118};
+ float[] phase = {0.32f, 0.34f, 0.33f, 0.329f, 0.141f};
+
+ public MockDataSource(String tbNamePrefix, int tableCount) {
+ this.tbNamePrefix = tbNamePrefix;
+ this.tableCount = tableCount;
+ }
+
+ @Override
+ public boolean hasNext() {
+ currentTbId += 1;
+ if (currentTbId == tableCount) {
+ currentTbId = 0;
+ currentRow += 1;
+ }
+ return currentRow < maxRowsPerTable;
+ }
+
+ @Override
+ public String next() {
+ long ts = startMs + 100 * currentRow;
+ int groupId = currentTbId % 5 == 0 ? currentTbId / 5 : currentTbId / 5 + 1;
+ StringBuilder sb = new StringBuilder(tbNamePrefix + "_" + currentTbId + ","); // tbName
+ sb.append(ts).append(','); // ts
+ sb.append(current[currentRow % 5]).append(','); // current
+ sb.append(voltage[currentRow % 5]).append(','); // voltage
+ sb.append(phase[currentRow % 5]).append(','); // phase
+ sb.append(location[currentRow % 5]).append(','); // location
+ sb.append(groupId); // groupID
+
+ return sb.toString();
+ }
+}
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..a6fcfed1d28281d46aff493ef9783972858ebe62
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java
@@ -0,0 +1,58 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+
+class ReadTask implements Runnable {
+ private final static Logger logger = LoggerFactory.getLogger(ReadTask.class);
+ private final int taskId;
+ private final List> taskQueues;
+ private final int queueCount;
+ private final int tableCount;
+ private boolean active = true;
+
+ public ReadTask(int readTaskId, List> queues, int tableCount) {
+ this.taskId = readTaskId;
+ this.taskQueues = queues;
+ this.queueCount = queues.size();
+ this.tableCount = tableCount;
+ }
+
+ /**
+ * Assign data received to different queues.
+ * Here we use the suffix number in table name.
+ * You are expected to define your own rule in practice.
+ *
+ * @param line record received
+ * @return which queue to use
+ */
+ public int getQueueId(String line) {
+ String tbName = line.substring(0, line.indexOf(',')); // For example: tb1_101
+ String suffixNumber = tbName.split("_")[1];
+ return Integer.parseInt(suffixNumber) % this.queueCount;
+ }
+
+ @Override
+ public void run() {
+ logger.info("started");
+ Iterator it = new MockDataSource("tb" + this.taskId, tableCount);
+ try {
+ while (it.hasNext() && active) {
+ String line = it.next();
+ int queueId = getQueueId(line);
+ taskQueues.get(queueId).put(line);
+ }
+ } catch (Exception e) {
+ logger.error("Read Task Error", e);
+ }
+ }
+
+ public void stop() {
+ logger.info("stop");
+ this.active = false;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..c2989acdbe3d0f56d7451ac86051a55955ce14de
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java
@@ -0,0 +1,205 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.*;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * A helper class encapsulate the logic of writing using SQL.
+ *
+ * The main interfaces are two methods:
+ *
+ * {@link SQLWriter#processLine}, which receive raw lines from WriteTask and group them by table names.
+ * {@link SQLWriter#flush}, which assemble INSERT statement and execute it.
+ *
+ *
+ * There is a technical skill worth mentioning: we create table as needed when "table does not exist" error occur instead of creating table automatically using syntax "INSET INTO tb USING stb".
+ * This ensure that checking table existence is a one-time-only operation.
+ *
+ *
+ *
+ */
+public class SQLWriter {
+ final static Logger logger = LoggerFactory.getLogger(SQLWriter.class);
+
+ private Connection conn;
+ private Statement stmt;
+
+ /**
+ * current number of buffered records
+ */
+ private int bufferedCount = 0;
+ /**
+ * Maximum number of buffered records.
+ * Flush action will be triggered if bufferedCount reached this value,
+ */
+ private int maxBatchSize;
+
+
+ /**
+ * Maximum SQL length.
+ */
+ private int maxSQLLength;
+
+ /**
+ * Map from table name to column values. For example:
+ * "tb001" -> "(1648432611249,2.1,114,0.09) (1648432611250,2.2,135,0.2)"
+ */
+ private Map tbValues = new HashMap<>();
+
+ /**
+ * Map from table name to tag values in the same order as creating stable.
+ * Used for creating table.
+ */
+ private Map tbTags = new HashMap<>();
+
+ public SQLWriter(int maxBatchSize) {
+ this.maxBatchSize = maxBatchSize;
+ }
+
+
+ /**
+ * Get Database Connection
+ *
+ * @return Connection
+ * @throws SQLException
+ */
+ private static Connection getConnection() throws SQLException {
+ String jdbcURL = System.getenv("TDENGINE_JDBC_URL");
+ return DriverManager.getConnection(jdbcURL);
+ }
+
+ /**
+ * Create Connection and Statement
+ *
+ * @throws SQLException
+ */
+ public void init() throws SQLException {
+ conn = getConnection();
+ stmt = conn.createStatement();
+ stmt.execute("use test");
+ ResultSet rs = stmt.executeQuery("show variables");
+ while (rs.next()) {
+ String configName = rs.getString(1);
+ if ("maxSQLLength".equals(configName)) {
+ maxSQLLength = Integer.parseInt(rs.getString(2));
+ logger.info("maxSQLLength={}", maxSQLLength);
+ }
+ }
+ }
+
+ /**
+ * Convert raw data to SQL fragments, group them by table name and cache them in a HashMap.
+ * Trigger writing when number of buffered records reached maxBachSize.
+ *
+ * @param line raw data get from task queue in format: tbName,ts,current,voltage,phase,location,groupId
+ */
+ public void processLine(String line) throws SQLException {
+ bufferedCount += 1;
+ int firstComma = line.indexOf(',');
+ String tbName = line.substring(0, firstComma);
+ int lastComma = line.lastIndexOf(',');
+ int secondLastComma = line.lastIndexOf(',', lastComma - 1);
+ String value = "(" + line.substring(firstComma + 1, secondLastComma) + ") ";
+ if (tbValues.containsKey(tbName)) {
+ tbValues.put(tbName, tbValues.get(tbName) + value);
+ } else {
+ tbValues.put(tbName, value);
+ }
+ if (!tbTags.containsKey(tbName)) {
+ String location = line.substring(secondLastComma + 1, lastComma);
+ String groupId = line.substring(lastComma + 1);
+ String tagValues = "('" + location + "'," + groupId + ')';
+ tbTags.put(tbName, tagValues);
+ }
+ if (bufferedCount == maxBatchSize) {
+ flush();
+ }
+ }
+
+
+ /**
+ * Assemble INSERT statement using buffered SQL fragments in Map {@link SQLWriter#tbValues} and execute it.
+ * In case of "Table does not exit" exception, create all tables in the sql and retry the sql.
+ */
+ public void flush() throws SQLException {
+ StringBuilder sb = new StringBuilder("INSERT INTO ");
+ for (Map.Entry entry : tbValues.entrySet()) {
+ String tableName = entry.getKey();
+ String values = entry.getValue();
+ String q = tableName + " values " + values + " ";
+ if (sb.length() + q.length() > maxSQLLength) {
+ executeSQL(sb.toString());
+ logger.warn("increase maxSQLLength or decrease maxBatchSize to gain better performance");
+ sb = new StringBuilder("INSERT INTO ");
+ }
+ sb.append(q);
+ }
+ executeSQL(sb.toString());
+ tbValues.clear();
+ bufferedCount = 0;
+ }
+
+ private void executeSQL(String sql) throws SQLException {
+ try {
+ stmt.executeUpdate(sql);
+ } catch (SQLException e) {
+ // convert to error code defined in taoserror.h
+ int errorCode = e.getErrorCode() & 0xffff;
+ if (errorCode == 0x362 || errorCode == 0x218) {
+ // Table does not exist
+ createTables();
+ executeSQL(sql);
+ } else {
+ logger.error("Execute SQL: {}", sql);
+ throw e;
+ }
+ } catch (Throwable throwable) {
+ logger.error("Execute SQL: {}", sql);
+ throw throwable;
+ }
+ }
+
+ /**
+ * Create tables in batch using syntax:
+ *
+ * CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF NOT EXISTS] tb_name2 USING stb_name TAGS (tag_value2, ...) ...;
+ *
+ */
+ private void createTables() throws SQLException {
+ StringBuilder sb = new StringBuilder("CREATE TABLE ");
+ for (String tbName : tbValues.keySet()) {
+ String tagValues = tbTags.get(tbName);
+ sb.append("IF NOT EXISTS ").append(tbName).append(" USING meters TAGS ").append(tagValues).append(" ");
+ }
+ String sql = sb.toString();
+ try {
+ stmt.executeUpdate(sql);
+ } catch (Throwable throwable) {
+ logger.error("Execute SQL: {}", sql);
+ throw throwable;
+ }
+ }
+
+ public boolean hasBufferedValues() {
+ return bufferedCount > 0;
+ }
+
+ public int getBufferedCount() {
+ return bufferedCount;
+ }
+
+ public void close() {
+ try {
+ stmt.close();
+ } catch (SQLException e) {
+ }
+ try {
+ conn.close();
+ } catch (SQLException e) {
+ }
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java
new file mode 100644
index 0000000000000000000000000000000000000000..8ade06625d708a112c85d5657aa00bcd0e605ff4
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/StmtWriter.java
@@ -0,0 +1,4 @@
+package com.taos.example.highvolume;
+
+public class StmtWriter {
+}
diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java
new file mode 100644
index 0000000000000000000000000000000000000000..de9e5463d7dc59478f991e4783aacaae527b4c4b
--- /dev/null
+++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java
@@ -0,0 +1,58 @@
+package com.taos.example.highvolume;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.concurrent.BlockingQueue;
+
+class WriteTask implements Runnable {
+ private final static Logger logger = LoggerFactory.getLogger(WriteTask.class);
+ private final int maxBatchSize;
+
+ // the queue from which this writing task get raw data.
+ private final BlockingQueue queue;
+
+ // A flag indicate whether to continue.
+ private boolean active = true;
+
+ public WriteTask(BlockingQueue taskQueue, int maxBatchSize) {
+ this.queue = taskQueue;
+ this.maxBatchSize = maxBatchSize;
+ }
+
+ @Override
+ public void run() {
+ logger.info("started");
+ String line = null; // data getting from the queue just now.
+ SQLWriter writer = new SQLWriter(maxBatchSize);
+ try {
+ writer.init();
+ while (active) {
+ line = queue.poll();
+ if (line != null) {
+ // parse raw data and buffer the data.
+ writer.processLine(line);
+ } else if (writer.hasBufferedValues()) {
+ // write data immediately if no more data in the queue
+ writer.flush();
+ } else {
+ // sleep a while to avoid high CPU usage if no more data in the queue and no buffered records, .
+ Thread.sleep(100);
+ }
+ }
+ if (writer.hasBufferedValues()) {
+ writer.flush();
+ }
+ } catch (Exception e) {
+ String msg = String.format("line=%s, bufferedCount=%s", line, writer.getBufferedCount());
+ logger.error(msg, e);
+ } finally {
+ writer.close();
+ }
+ }
+
+ public void stop() {
+ logger.info("stop");
+ this.active = false;
+ }
+}
\ No newline at end of file
diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java
index 42db24485afec05298159f7b0c3a4e15835d98ed..8d201da0745e1d2d36220c9d78383fc37d4a813a 100644
--- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java
+++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java
@@ -23,16 +23,16 @@ public class TestAll {
String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata";
try (Connection conn = DriverManager.getConnection(jdbcUrl)) {
try (Statement stmt = conn.createStatement()) {
- String sql = "INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
- " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
- " power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
- " power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
- " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
- " power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
- " power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
+ String sql = "INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 14:38:05.000',10.30000,219,0.31000)\n" +
+ " power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:15.000',12.60000,218,0.33000)\n" +
+ " power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES('2018-10-03 15:38:16.800',12.30000,221,0.31000)\n" +
+ " power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES('2018-10-03 15:38:16.650',10.30000,218,0.25000)\n" +
+ " power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:05.500',11.80000,221,0.28000)\n" +
+ " power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES('2018-10-03 15:38:16.600',13.40000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:05.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:06.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:07.000',10.80000,223,0.29000)\n" +
+ " power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES('2018-10-03 15:38:08.500',11.50000,221,0.35000)";
stmt.execute(sql);
}
diff --git a/docs/examples/python/fast_write_example.py b/docs/examples/python/fast_write_example.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9d606388fdecd85f1468f24cc497ecc5941f035
--- /dev/null
+++ b/docs/examples/python/fast_write_example.py
@@ -0,0 +1,180 @@
+# install dependencies:
+# recommend python >= 3.8
+# pip3 install faster-fifo
+#
+
+import logging
+import math
+import sys
+import time
+import os
+from multiprocessing import Process
+from faster_fifo import Queue
+from mockdatasource import MockDataSource
+from queue import Empty
+from typing import List
+
+logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format="%(asctime)s [%(name)s] - %(message)s")
+
+READ_TASK_COUNT = 1
+WRITE_TASK_COUNT = 1
+TABLE_COUNT = 1000
+QUEUE_SIZE = 1000000
+MAX_BATCH_SIZE = 3000
+
+read_processes = []
+write_processes = []
+
+
+def get_connection():
+ """
+ If variable TDENGINE_FIRST_EP is provided then it will be used. If not, firstEP in /etc/taos/taos.cfg will be used.
+ You can also override the default username and password by supply variable TDENGINE_USER and TDENGINE_PASSWORD
+ """
+ import taos
+ firstEP = os.environ.get("TDENGINE_FIRST_EP")
+ if firstEP:
+ host, port = firstEP.split(":")
+ else:
+ host, port = None, 0
+ user = os.environ.get("TDENGINE_USER", "root")
+ password = os.environ.get("TDENGINE_PASSWORD", "taosdata")
+ return taos.connect(host=host, port=int(port), user=user, password=password)
+
+
+# ANCHOR: read
+
+def run_read_task(task_id: int, task_queues: List[Queue]):
+ table_count_per_task = TABLE_COUNT // READ_TASK_COUNT
+ data_source = MockDataSource(f"tb{task_id}", table_count_per_task)
+ try:
+ for batch in data_source:
+ for table_id, rows in batch:
+ # hash data to different queue
+ i = table_id % len(task_queues)
+ # block putting forever when the queue is full
+ task_queues[i].put_many(rows, block=True, timeout=-1)
+ except KeyboardInterrupt:
+ pass
+
+
+# ANCHOR_END: read
+
+# ANCHOR: write
+def run_write_task(task_id: int, queue: Queue):
+ from sql_writer import SQLWriter
+ log = logging.getLogger(f"WriteTask-{task_id}")
+ writer = SQLWriter(get_connection)
+ lines = None
+ try:
+ while True:
+ try:
+ # get as many as possible
+ lines = queue.get_many(block=False, max_messages_to_get=MAX_BATCH_SIZE)
+ writer.process_lines(lines)
+ except Empty:
+ time.sleep(0.01)
+ except KeyboardInterrupt:
+ pass
+ except BaseException as e:
+ log.debug(f"lines={lines}")
+ raise e
+
+
+# ANCHOR_END: write
+
+def set_global_config():
+ argc = len(sys.argv)
+ if argc > 1:
+ global READ_TASK_COUNT
+ READ_TASK_COUNT = int(sys.argv[1])
+ if argc > 2:
+ global WRITE_TASK_COUNT
+ WRITE_TASK_COUNT = int(sys.argv[2])
+ if argc > 3:
+ global TABLE_COUNT
+ TABLE_COUNT = int(sys.argv[3])
+ if argc > 4:
+ global QUEUE_SIZE
+ QUEUE_SIZE = int(sys.argv[4])
+ if argc > 5:
+ global MAX_BATCH_SIZE
+ MAX_BATCH_SIZE = int(sys.argv[5])
+
+
+# ANCHOR: monitor
+def run_monitor_process():
+ log = logging.getLogger("DataBaseMonitor")
+ conn = get_connection()
+ conn.execute("DROP DATABASE IF EXISTS test")
+ conn.execute("CREATE DATABASE test")
+ conn.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) "
+ "TAGS (location BINARY(64), groupId INT)")
+
+ def get_count():
+ res = conn.query("SELECT count(*) FROM test.meters")
+ rows = res.fetch_all()
+ return rows[0][0] if rows else 0
+
+ last_count = 0
+ while True:
+ time.sleep(10)
+ count = get_count()
+ log.info(f"count={count} speed={(count - last_count) / 10}")
+ last_count = count
+
+
+# ANCHOR_END: monitor
+# ANCHOR: main
+def main():
+ set_global_config()
+ logging.info(f"READ_TASK_COUNT={READ_TASK_COUNT}, WRITE_TASK_COUNT={WRITE_TASK_COUNT}, "
+ f"TABLE_COUNT={TABLE_COUNT}, QUEUE_SIZE={QUEUE_SIZE}, MAX_BATCH_SIZE={MAX_BATCH_SIZE}")
+
+ monitor_process = Process(target=run_monitor_process)
+ monitor_process.start()
+ time.sleep(3) # waiting for database ready.
+
+ task_queues: List[Queue] = []
+ # create task queues
+ for i in range(WRITE_TASK_COUNT):
+ queue = Queue(max_size_bytes=QUEUE_SIZE)
+ task_queues.append(queue)
+
+ # create write processes
+ for i in range(WRITE_TASK_COUNT):
+ p = Process(target=run_write_task, args=(i, task_queues[i]))
+ p.start()
+ logging.debug(f"WriteTask-{i} started with pid {p.pid}")
+ write_processes.append(p)
+
+ # create read processes
+ for i in range(READ_TASK_COUNT):
+ queues = assign_queues(i, task_queues)
+ p = Process(target=run_read_task, args=(i, queues))
+ p.start()
+ logging.debug(f"ReadTask-{i} started with pid {p.pid}")
+ read_processes.append(p)
+
+ try:
+ monitor_process.join()
+ except KeyboardInterrupt:
+ monitor_process.terminate()
+ [p.terminate() for p in read_processes]
+ [p.terminate() for p in write_processes]
+ [q.close() for q in task_queues]
+
+
+def assign_queues(read_task_id, task_queues):
+ """
+ Compute target queues for a specific read task.
+ """
+ ratio = WRITE_TASK_COUNT / READ_TASK_COUNT
+ from_index = math.floor(read_task_id * ratio)
+ end_index = math.ceil((read_task_id + 1) * ratio)
+ return task_queues[from_index:end_index]
+
+
+if __name__ == '__main__':
+ main()
+# ANCHOR_END: main
diff --git a/docs/examples/python/mockdatasource.py b/docs/examples/python/mockdatasource.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c516a800e007934f8e6815f82024a53fea70073
--- /dev/null
+++ b/docs/examples/python/mockdatasource.py
@@ -0,0 +1,49 @@
+import time
+
+
+class MockDataSource:
+ samples = [
+ "8.8,119,0.32,California.LosAngeles,0",
+ "10.7,116,0.34,California.SanDiego,1",
+ "9.9,111,0.33,California.SanJose,2",
+ "8.9,113,0.329,California.Campbell,3",
+ "9.4,118,0.141,California.SanFrancisco,4"
+ ]
+
+ def __init__(self, tb_name_prefix, table_count):
+ self.table_name_prefix = tb_name_prefix + "_"
+ self.table_count = table_count
+ self.max_rows = 10000000
+ self.current_ts = round(time.time() * 1000) - self.max_rows * 100
+ # [(tableId, tableName, values),]
+ self.data = self._init_data()
+
+ def _init_data(self):
+ lines = self.samples * (self.table_count // 5 + 1)
+ data = []
+ for i in range(self.table_count):
+ table_name = self.table_name_prefix + str(i)
+ data.append((i, table_name, lines[i])) # tableId, row
+ return data
+
+ def __iter__(self):
+ self.row = 0
+ return self
+
+ def __next__(self):
+ """
+ next 1000 rows for each table.
+ return: {tableId:[row,...]}
+ """
+ # generate 1000 timestamps
+ ts = []
+ for _ in range(1000):
+ self.current_ts += 100
+ ts.append(str(self.current_ts))
+ # add timestamp to each row
+ # [(tableId, ["tableName,ts,current,voltage,phase,location,groupId"])]
+ result = []
+ for table_id, table_name, values in self.data:
+ rows = [table_name + ',' + t + ',' + values for t in ts]
+ result.append((table_id, rows))
+ return result
diff --git a/docs/examples/python/sql_writer.py b/docs/examples/python/sql_writer.py
new file mode 100644
index 0000000000000000000000000000000000000000..758167376b009f21afc701be7d89c1bfbabdeb9f
--- /dev/null
+++ b/docs/examples/python/sql_writer.py
@@ -0,0 +1,90 @@
+import logging
+import taos
+
+
+class SQLWriter:
+ log = logging.getLogger("SQLWriter")
+
+ def __init__(self, get_connection_func):
+ self._tb_values = {}
+ self._tb_tags = {}
+ self._conn = get_connection_func()
+ self._max_sql_length = self.get_max_sql_length()
+ self._conn.execute("USE test")
+
+ def get_max_sql_length(self):
+ rows = self._conn.query("SHOW variables").fetch_all()
+ for r in rows:
+ name = r[0]
+ if name == "maxSQLLength":
+ return int(r[1])
+ return 1024 * 1024
+
+ def process_lines(self, lines: str):
+ """
+ :param lines: [[tbName,ts,current,voltage,phase,location,groupId]]
+ """
+ for line in lines:
+ ps = line.split(",")
+ table_name = ps[0]
+ value = '(' + ",".join(ps[1:-2]) + ') '
+ if table_name in self._tb_values:
+ self._tb_values[table_name] += value
+ else:
+ self._tb_values[table_name] = value
+
+ if table_name not in self._tb_tags:
+ location = ps[-2]
+ group_id = ps[-1]
+ tag_value = f"('{location}',{group_id})"
+ self._tb_tags[table_name] = tag_value
+ self.flush()
+
+ def flush(self):
+ """
+ Assemble INSERT statement and execute it.
+ When the sql length grows close to MAX_SQL_LENGTH, the sql will be executed immediately, and a new INSERT statement will be created.
+ In case of "Table does not exit" exception, tables in the sql will be created and the sql will be re-executed.
+ """
+ sql = "INSERT INTO "
+ sql_len = len(sql)
+ buf = []
+ for tb_name, values in self._tb_values.items():
+ q = tb_name + " VALUES " + values
+ if sql_len + len(q) >= self._max_sql_length:
+ sql += " ".join(buf)
+ self.execute_sql(sql)
+ sql = "INSERT INTO "
+ sql_len = len(sql)
+ buf = []
+ buf.append(q)
+ sql_len += len(q)
+ sql += " ".join(buf)
+ self.execute_sql(sql)
+ self._tb_values.clear()
+
+ def execute_sql(self, sql):
+ try:
+ self._conn.execute(sql)
+ except taos.Error as e:
+ error_code = e.errno & 0xffff
+ # Table does not exit
+ if error_code == 9731:
+ self.create_tables()
+ else:
+ self.log.error("Execute SQL: %s", sql)
+ raise e
+ except BaseException as baseException:
+ self.log.error("Execute SQL: %s", sql)
+ raise baseException
+
+ def create_tables(self):
+ sql = "CREATE TABLE "
+ for tb in self._tb_values.keys():
+ tag_values = self._tb_tags[tb]
+ sql += "IF NOT EXISTS " + tb + " USING meters TAGS " + tag_values + " "
+ try:
+ self._conn.execute(sql)
+ except BaseException as e:
+ self.log.error("Execute SQL: %s", sql)
+ raise e
diff --git a/docs/examples/rust/nativeexample/examples/stmt_example.rs b/docs/examples/rust/nativeexample/examples/stmt_example.rs
index 26084746f20a3662383b417eb98016f09ad0913e..9cf8e8e1fc2526206486fa9a61c01f6320564131 100644
--- a/docs/examples/rust/nativeexample/examples/stmt_example.rs
+++ b/docs/examples/rust/nativeexample/examples/stmt_example.rs
@@ -12,7 +12,7 @@ async fn main() -> anyhow::Result<()> {
// bind table name and tags
stmt.set_tbname_tags(
"d1001",
- &[Value::VarChar("San Fransico".into()), Value::Int(2)],
+ &[Value::VarChar("California.SanFransico".into()), Value::Int(2)],
)?;
// bind values.
let values = vec![
diff --git a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
index 7e0a347948fc8450dead0babbbdd1eace2f06d1e..11d6d4e0043fddeff73c09d86c0fce0abc903a08 100644
--- a/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
+++ b/docs/examples/rust/nativeexample/examples/subscribe_demo.rs
@@ -19,13 +19,13 @@ struct Record {
async fn prepare(taos: Taos) -> anyhow::Result<()> {
let inserted = taos.exec_many([
// create child table
- "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
+ "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
- "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
@@ -48,7 +48,7 @@ async fn main() -> anyhow::Result<()> {
format!("CREATE DATABASE `{db}`"),
format!("USE `{db}`"),
// create super table
- format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(16))"),
+ format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"),
// create topic for subscription
format!("CREATE TOPIC tmq_meters with META AS DATABASE {db}")
])
diff --git a/docs/examples/rust/restexample/examples/insert_example.rs b/docs/examples/rust/restexample/examples/insert_example.rs
index 27b2bb4788615810d097b88f0dd616b96885538c..11a84f166103eba03b43549d4db77100a92a58e6 100644
--- a/docs/examples/rust/restexample/examples/insert_example.rs
+++ b/docs/examples/rust/restexample/examples/insert_example.rs
@@ -14,14 +14,14 @@ async fn main() -> anyhow::Result<()> {
]).await?;
let inserted = taos.exec("INSERT INTO
- power.d1001 USING power.meters TAGS('San Francisco', 2)
+ power.d1001 USING power.meters TAGS('California.SanFrancisco', 2)
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000)
('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
- power.d1002 USING power.meters TAGS('San Francisco', 3)
+ power.d1002 USING power.meters TAGS('California.SanFrancisco', 3)
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
- power.d1003 USING power.meters TAGS('Los Angeles', 2)
+ power.d1003 USING power.meters TAGS('California.LosAngeles', 2)
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
- power.d1004 USING power.meters TAGS('Los Angeles', 3)
+ power.d1004 USING power.meters TAGS('California.LosAngeles', 3)
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)").await?;
assert_eq!(inserted, 8);
diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md
index 79d5424ac2e67e05c346e546847c743595d7a82b..f9127121f35c8cdb9d28e121c20b9b7bb9101625 100644
--- a/docs/zh/01-index.md
+++ b/docs/zh/01-index.md
@@ -4,22 +4,22 @@ sidebar_label: 文档首页
slug: /
---
-TDengine是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB ), 它专为物联网、工业互联网、金融等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一极简的时序数据处理平台。本文档是 TDengine 用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发者与系统管理员的。
+TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB ), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。
-TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用TDengine, 无论如何,请您仔细阅读[基本概念](./concept)一章。
+TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[基本概念](./concept)一章。
-如果你是开发者,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要把示例代码拷贝粘贴,针对自己的应用稍作改动,就能跑起来。
+如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。
-我们已经生活在大数据的时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请参考[部署集群](./deployment)一章。
+我们已经生活在大数据时代,纵向扩展已经无法满足日益增长的业务需求,任何系统都必须具有水平扩展的能力,集群成为大数据以及 Database 系统的不可缺失功能。TDengine 团队不仅实现了集群功能,而且将这一重要核心功能开源。怎么部署、管理和维护 TDengine 集群,请仔细参考[部署集群](./deployment)一章。
-TDengine 采用 SQL 作为其查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
+TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移成本,但同时针对时序数据场景,又做了一些扩展,以支持插值、降采样、时间加权平均等操作。[SQL 手册](./taos-sql)一章详细描述了 SQL 语法、详细列出了各种支持的命令和函数。
-如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出,配置参数,怎么监测 TDengine 是否健康运行,怎么提升系统运行的性能,那么请仔细参考[运维指南](./operation)一章。
+如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。
-如果你对 TDengine 外围工具,REST API, 各种编程语言的连接器想做更多详细了解,请看[参考指南](./reference)一章。
+如果你对 TDengine 的外围工具、REST API、各种编程语言的连接器(Connector)想做更多详细了解,请看[参考指南](./reference)一章。
-如果你对 TDengine 内部的架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
+如果你对 TDengine 的内部架构设计很有兴趣,欢迎仔细阅读[技术内幕](./tdinternal)一章,里面对集群的设计、数据分区、分片、写入、读出、查询、聚合查询的流程都做了详细的介绍。如果你想研读 TDengine 代码甚至贡献代码,请一定仔细读完这一章。
-最后,作为一个开源软件,欢迎大家的参与。如果发现文档的任何错误,描述不清晰的地方,都请在每个页面的最下方,点击“编辑本文档“直接进行修改。
+最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。
Together, we make a difference!
diff --git a/docs/zh/02-intro.md b/docs/zh/02-intro.md
index f6779b8776af308d4cf14168c528ddc60c47ac13..47bfd3f96b6fdbb27d3f3e326e14a6b22108d508 100644
--- a/docs/zh/02-intro.md
+++ b/docs/zh/02-intro.md
@@ -4,72 +4,95 @@ description: 简要介绍 TDengine 的主要功能
toc_max_heading_level: 2
---
-TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/tdengine/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库 (Time Series Database , TSDB )。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。
+TDengine 是一款开源、高性能、云原生的[时序数据库](https://tdengine.com/tsdb/),且针对物联网、车联网、工业互联网、金融、IT 运维等场景进行了优化。TDengine 的代码,包括集群功能,都在 GNU AGPL v3.0 下开源。除核心的时序数据库功能外,TDengine 还提供[缓存](../develop/cache/)、[数据订阅](../develop/tmq)、[流式计算](../develop/stream)等其它功能以降低系统复杂度及研发和运维成本。
-本章节介绍TDengine的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对TDengine有个整体的了解。
+本章节介绍 TDengine 的主要功能、竞争优势、适用场景、与其他数据库的对比测试等等,让大家对 TDengine 有个整体的了解。
## 主要功能
-TDengine的主要功能如下:
-
-1. 高速数据写入,除 [SQL 写入](../develop/insert-data/sql-writing)外,还支持 [Schemaless 写入](../reference/schemaless/),支持 [InfluxDB LINE 协议](../develop/insert-data/influxdb-line),[OpenTSDB Telnet](../develop/insert-data/opentsdb-telnet), [OpenTSDB JSON ](../develop/insert-data/opentsdb-json)等协议写入;
-2. 第三方数据采集工具 [Telegraf](../third-party/telegraf),[Prometheus](../third-party/prometheus),[StatsD](../third-party/statsd),[collectd](../third-party/collectd),[icinga2](../third-party/icinga2), [TCollector](../third-party/tcollector), [EMQ](../third-party/emq-broker), [HiveMQ](../third-party/hive-mq-broker) 等都可以进行配置后,不用任何代码,即可将数据写入;
-3. 支持[各种查询](../develop/query-data),包括聚合查询、嵌套查询、降采样查询、插值等
-4. 支持[用户自定义函数](../develop/udf)
-5. 支持[缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis
-6. 支持[流式计算](../develop/stream)(Stream Processing)
-7. 支持[数据订阅](../develop/tmq),而且可以指定过滤条件
-8. 支持[集群](../deployment/),可以通过多节点进行水平扩展,并通过多副本实现高可靠
-9. 提供[命令行程序](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
-10. 提供多种数据的[导入](../operation/import)、[导出](../operation/export)
-11. 支持对[TDengine 集群本身的监控](../operation/monitor)
-12. 提供各种语言的[连接器](../connector): 如 C/C++, Java, Go, Node.JS, Rust, Python, C# 等
-13. 支持 [REST 接口](../connector/rest-api/)
-14. 支持与[ Grafana 无缝集成](../third-party/grafana)
-15. 支持与 Google Data Studio 无缝集成
-16. 支持 [Kubernetes 部署](../deployment/k8s)
-
-更多细小的功能,请阅读整个文档。
+TDengine 的主要功能如下:
+
+1. 写入数据,支持
+ - [SQL 写入](../develop/insert-data/sql-writing)
+ - [无模式(Schemaless)写入](../reference/schemaless/),支持多种标准写入协议
+ - [InfluxDB Line 协议](../develop/insert-data/influxdb-line)
+ - [OpenTSDB Telnet 协议](../develop/insert-data/opentsdb-telnet)
+ - [OpenTSDB JSON 协议](../develop/insert-data/opentsdb-json)
+ - 与多种第三方工具的无缝集成,它们都可以仅通过配置而无需任何代码即可将数据写入 TDengine
+ - [Telegraf](../third-party/telegraf)
+ - [Prometheus](../third-party/prometheus)
+ - [StatsD](../third-party/statsd)
+ - [collectd](../third-party/collectd)
+ - [Icinga2](../third-party/icinga2)
+ - [TCollector](../third-party/tcollector)
+ - [EMQX](../third-party/emq-broker)
+ - [HiveMQ](../third-party/hive-mq-broker)
+2. 查询数据,支持
+ - [标准 SQL](../taos-sql),含嵌套查询
+ - [时序数据特色函数](../taos-sql/function/#time-series-extensions)
+ - [时序数据特色查询](../taos-sql/distinguished),例如降采样、插值、累加和、时间加权平均、状态窗口、会话窗口等
+ - [用户自定义函数(UDF)](../taos-sql/udf)
+3. [缓存](../develop/cache),将每张表的最后一条记录缓存起来,这样无需 Redis 就能对时序数据进行高效处理
+4. [流式计算(Stream Processing)](../develop/stream),TDengine 不仅支持连续查询,还支持事件驱动的流式计算,这样在处理时序数据时就无需 Flink 或 Spark 这样流式计算组件
+5. [数据订阅](../develop/tmq),应用程序可以订阅一张表或一组表的数据,提供与 Kafka 相同的 API,而且可以指定过滤条件
+6. 可视化
+ - 支持与 [Grafana](../third-party/grafana/) 的无缝集成
+ - 支持与 Google Data Studio 的无缝集成
+7. 集群
+ - [集群部署](../deployment/),可以通过增加节点进行水平扩展以提升处理能力
+ - 可以通过 [Kubernetes 部署 TDengine](../deployment/k8s/)
+ - 通过多副本提供高可用能力
+8. 管理
+ - [监控](../operation/monitor)运行中的 TDengine 实例
+ - 多种[数据导入](../operation/import)方式
+ - 多种[数据导出](../operation/export)方式
+9. 工具
+ - 提供[交互式命令行程序(CLI)](../reference/taos-shell),便于管理集群,检查系统状态,做即席查询
+ - 提供压力测试工具 [taosBenchmark](../reference/taosbenchmark),用于测试 TDengine 的性能
+10. 编程
+ - 提供各种语言的[连接器(Connector)](../connector): 如 [C/C++](../connector/cpp)、[Java](../connector/java)、[Go](../connector/go)、[Node.js](../connector/node)、[Rust](../connector/rust)、[Python](../connector/python)、[C#](../connector/csharp) 等
+ - 支持 [REST 接口](../connector/rest-api/)
+
+更多细节功能,请阅读整个文档。
## 竞争优势
-由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,设计了全新的针对时序数据的存储引擎和计算引擎,因此与其他时序数据库相比,TDengine 有以下特点:
+由于 TDengine 充分利用了[时序数据特点](https://www.taosdata.com/blog/2019/07/09/105.html),比如结构化、无需事务、很少删除或更新、写多读少等等,因此与其他时序数据库相比,TDengine 有以下特点:
-- **[高性能](https://www.taosdata.com/tdengine/fast)**:通过创新的存储引擎设计,无论是数据写入还是查询,TDengine 的性能比通用数据库快 10 倍以上,也远超其他时序数据库,存储空间不及通用数据库的1/10。
+- **[高性能](https://www.taosdata.com/tdengine/fast)**:TDengine 是唯一一个解决了时序数据存储的高基数难题的时序数据库,支持上亿数据采集点,并在数据插入、查询和数据压缩上远胜其它时序数据库。
-- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生分布式的设计,充分利用云平台的优势,TDengine 提供了水平扩展能力,具备弹性、韧性和可观测性,支持k8s部署,可运行在公有云、私有云和混合云上。
+- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建缓存、流式计算和数据订阅等功能,为时序数据的处理提供了极简的解决方案,从而大幅降低了业务系统的设计复杂度和运维成本。
-- **[极简时序数据平台](https://www.taosdata.com/tdengine/simplified_solution_for_time-series_data_processing)**:TDengine 内建消息队列、缓存、流式计算等功能,应用无需再集成 Kafka/Redis/HBase/Spark 等软件,大幅降低系统的复杂度,降低应用开发和运营成本。
+- **[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)**:通过原生的分布式设计、数据分片和分区、存算分离、RAFT 协议、Kubernetes 部署和完整的可观测性,TDengine 是一款云原生时序数据库并且能够部署在公有云、私有云和混合云上。
-- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:支持 SQL,同时为时序数据特有的分析提供SQL扩展。通过超级表、存储计算分离、分区分片、预计算、自定义函数等技术,TDengine 具备强大的分析能力。
+- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:对系统管理员来说,TDengine 大幅降低了管理和维护的代价。对开发者来说, TDengine 提供了简单的接口、极简的解决方案和与第三方工具的无缝集成。对数据分析专家来说,TDengine 提供了便捷的数据访问能力。
-- **[简单易用](https://www.taosdata.com/tdengine/ease_of_use)**:无任何依赖,安装、集群几秒搞定;提供REST以及各种语言连接器,与众多第三方工具无缝集成;提供命令行程序,便于管理和即席查询;提供各种运维工具。
+- **[分析能力](https://www.taosdata.com/tdengine/easy_data_analytics)**:通过超级表、存储计算分离、分区分片、预计算和其它技术,TDengine 能够高效地浏览、格式化和访问数据。
-- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
+- **[核心开源](https://www.taosdata.com/tdengine/open_source_time-series_database)**:TDengine 的核心代码包括集群功能全部在开源协议下公开。全球超过 140k 个运行实例,GitHub Star 19k,且拥有一个活跃的开发者社区。
采用 TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。表现在几个方面:
-1. 由于其超强性能,它能将系统需要的计算资源和存储资源大幅降低
+1. 由于其超强性能,它能将系统所需的计算资源和存储资源大幅降低
2. 因为支持 SQL,能与众多第三方软件无缝集成,学习迁移成本大幅下降
-3. 因为是一极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
-4. 因为维护简单,运营维护成本能大幅降低
+3. 因为是一款极简的时序数据平台,系统复杂度、研发和运营成本大幅降低
## 技术生态
-在整个时序大数据平台中,TDengine 在其中扮演的角色如下:
+在整个时序大数据平台中,TDengine 扮演的角色如下:

+图 1. TDengine 技术生态图
-图 1. TDengine技术生态图
-上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka, 他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序 (CLI) 以及可视化管理管理。
+上图中,左侧是各种数据采集或消息队列,包括 OPC-UA、MQTT、Telegraf、也包括 Kafka,他们的数据将被源源不断的写入到 TDengine。右侧则是可视化、BI 工具、组态软件、应用程序。下侧则是 TDengine 自身提供的命令行程序(CLI)以及可视化管理工具。
-## 总体适用场景
+## 典型适用场景
-作为一个高性能、分布式、支持 SQL 的时序数据库 (Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。本文对适用场景做更多详细的分析。
+作为一个高性能、分布式、支持 SQL 的时序数据库(Database),TDengine 的典型适用场景包括但不限于 IoT、工业互联网、车联网、IT 运维、能源、金融证券等领域。需要指出的是,TDengine 是针对时序数据场景设计的专用数据库和专用大数据处理工具,因其充分利用了时序大数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM 等通用型数据。下面本文将对适用场景做更多详细的分析。
### 数据源特点和需求
@@ -91,18 +114,18 @@ TDengine的主要功能如下:
### 系统功能需求
-| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| -------------------------- | ------ | -------- | -------- | --------------------------------------------------------------------------------------------------------------------- |
-| 要求完整的内置数据处理算法 | | √ | | TDengine 的实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有要求,因此特殊类型的处理还需要应用层面处理。 |
-| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据系统处理,或者应该考虑 TDengine 和关系型数据系统配合实现系统功能。 |
+| 系统功能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| -------------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------------------------- |
+| 要求完整的内置数据处理算法 | | √ | | TDengine 实现了通用的数据处理算法,但是还没有做到妥善处理各行各业的所有需求,因此特殊类型的处理需求还需要在应用层面解决。 |
+| 需要大量的交叉查询处理 | | √ | | 这种类型的处理更多应该用关系型数据库处理,或者应该考虑 TDengine 和关系型数据库配合实现系统功能。 |
### 系统性能需求
-| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
-| ---------------------- | ------ | -------- | -------- | ------------------------------------------------------------------------------------------------------ |
-| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
-| 要求高速处理数据 | | | √ | TDengine 的专门为 IoT 优化的存储和数据处理的设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
-| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
+| 系统性能需求 | 不适用 | 可能适用 | 非常适用 | 简单说明 |
+| ---------------------- | ------ | -------- | -------- | -------------------------------------------------------------------------------------------------- |
+| 要求较大的总体处理能力 | | | √ | TDengine 的集群功能可以轻松地让多服务器配合达成处理能力的提升。 |
+| 要求高速处理数据 | | | √ | TDengine 专门为 IoT 优化的存储和数据处理设计,一般可以让系统得到超出同类产品多倍数的处理速度提升。 |
+| 要求快速处理小粒度数据 | | | √ | 这方面 TDengine 性能可以完全对标关系型和 NoSQL 型数据处理系统。 |
### 系统维护需求
diff --git a/docs/zh/04-concept/index.md b/docs/zh/04-concept/index.md
index 89d3df9c973d9a319397285599e6b2e6be3785de..2cba68edcd152f5059845b9e25342b3f335f3b8b 100644
--- a/docs/zh/04-concept/index.md
+++ b/docs/zh/04-concept/index.md
@@ -4,119 +4,118 @@ title: 数据模型和基本概念
description: TDengine 的数据模型和基本概念
---
-为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 location 和分组 group ID 的静态属性. 其采集的数据类似如下的表格:
+为了便于解释基本概念,便于撰写示例程序,整个 TDengine 文档以智能电表作为典型时序数据场景。假设每个智能电表采集电流、电压、相位三个量,有多个智能电表,每个电表有位置 Location 和分组 Group ID 的静态属性. 其采集的数据类似如下的表格:
-
- Device ID
- Time Stamp
- Collected Metrics
- Tags
+
+
+ Device ID
+ Timestamp
+ Collected Metrics
+ Tags
-
-Device ID
-Time Stamp
-current
-voltage
-phase
-location
-groupId
-
-
-
-
-d1001
-1538548685000
-10.3
-219
-0.31
-California.SanFrancisco
-2
-
-
-d1002
-1538548684000
-10.2
-220
-0.23
-California.SanFrancisco
-3
-
-
-d1003
-1538548686500
-11.5
-221
-0.35
-California.LosAngeles
-3
-
-
-d1004
-1538548685500
-13.4
-223
-0.29
-California.LosAngeles
-2
-
-
-d1001
-1538548695000
-12.6
-218
-0.33
-California.SanFrancisco
-2
-
-
-d1004
-1538548696600
-11.8
-221
-0.28
-California.LosAngeles
-2
-
-
-d1002
-1538548696650
-10.3
-218
-0.25
-California.SanFrancisco
-3
-
-
-d1001
-1538548696800
-12.3
-221
-0.31
-California.SanFrancisco
-2
-
-
+
+ current
+ voltage
+ phase
+ location
+ groupid
+
+
+
+
+ d1001
+ 1538548685000
+ 10.3
+ 219
+ 0.31
+ California.SanFrancisco
+ 2
+
+
+ d1002
+ 1538548684000
+ 10.2
+ 220
+ 0.23
+ California.SanFrancisco
+ 3
+
+
+ d1003
+ 1538548686500
+ 11.5
+ 221
+ 0.35
+ California.LosAngeles
+ 3
+
+
+ d1004
+ 1538548685500
+ 13.4
+ 223
+ 0.29
+ California.LosAngeles
+ 2
+
+
+ d1001
+ 1538548695000
+ 12.6
+ 218
+ 0.33
+ California.SanFrancisco
+ 2
+
+
+ d1004
+ 1538548696600
+ 11.8
+ 221
+ 0.28
+ California.LosAngeles
+ 2
+
+
+ d1002
+ 1538548696650
+ 10.3
+ 218
+ 0.25
+ California.SanFrancisco
+ 3
+
+
+ d1001
+ 1538548696800
+ 12.3
+ 221
+ 0.31
+ California.SanFrancisco
+ 2
+
+
-
表 1:智能电表数据示例
+
表 1. 智能电表数据示例
-每一条记录都有设备 ID,时间戳,采集的物理量以及每个设备相关的静态标签。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
+每一条记录都有设备 ID、时间戳、采集的物理量(如上表中的 `current`、`voltage` 和 `phase`)以及每个设备相关的静态标签(`location` 和 `groupid`)。每个设备是受外界的触发,或按照设定的周期采集数据。采集的数据点是时序的,是一个数据流。
-## 采集量 (Metric)
+## 采集量(Metric)
采集量是指传感器、设备或其他类型采集点采集的物理量,比如电流、电压、温度、压力、GPS 位置等,是随时间变化的,数据类型可以是整型、浮点型、布尔型,也可是字符串。随着时间的推移,存储的采集量的数据量越来越大。智能电表示例中的电流、电压、相位就是采集量。
-## 标签 (Label/Tag)
+## 标签(Label/Tag)
-标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的location与groupId就是标签。
+标签是指传感器、设备或其他类型采集点的静态属性,不是随时间变化的,比如设备型号、颜色、设备的所在地等,数据类型可以是任何类型。虽然是静态的,但 TDengine 容许用户修改、删除或增加标签值。与采集量不一样的是,随时间的推移,存储的标签的数据量不会有什么变化。智能电表示例中的 `location` 与 `groupid` 就是标签。
-## 数据采集点 (Data Collection Point)
+## 数据采集点(Data Collection Point)
-数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的d1001, d1002, d1003, d1004等就是数据采集点。
+数据采集点是指按照预设时间周期或受事件触发采集物理量的硬件或软件。一个数据采集点可以采集一个或多个采集量,**但这些采集量都是同一时刻采集的,具有相同的时间戳**。对于复杂的设备,往往有多个数据采集点,每个数据采集点采集的周期都可能不一样,而且完全独立,不同步。比如对于一台汽车,有数据采集点专门采集 GPS 位置,有数据采集点专门采集发动机状态,有数据采集点专门采集车内的环境,这样一台汽车就有三个数据采集点。智能电表示例中的 d1001、d1002、d1003、d1004 等就是数据采集点。
-## 表 (Table)
+## 表(Table)
因为采集量一般是结构化数据,同时为降低学习门槛,TDengine 采用传统的关系型数据库模型管理数据。用户需要先创建库,然后创建表,之后才能插入或查询数据。
@@ -129,50 +128,56 @@ description: TDengine 的数据模型和基本概念
如果采用传统的方式,将多个数据采集点的数据写入一张表,由于网络延时不可控,不同数据采集点的数据到达服务器的时序是无法保证的,写入操作是要有锁保护的,而且一个数据采集点的数据是难以保证连续存储在一起的。**采用一个数据采集点一张表的方式,能最大程度的保证单个数据采集点的插入和查询的性能是最优的。**
-TDengine 建议用数据采集点的名字(如上表中的 D1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 current,voltage,phase),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。
-
-对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一台汽车建立多张表。
+TDengine 建议用数据采集点的名字(如上表中的 d1001)来做表名。每个数据采集点可能同时采集多个采集量(如上表中的 `current`、`voltage` 和 `phase`),每个采集量对应一张表中的一列,数据类型可以是整型、浮点型、字符串等。除此之外,表的第一列必须是时间戳,即数据类型为 Timestamp。对采集量,TDengine 将自动按照时间戳建立索引,但对采集量本身不建任何索引。数据用列式存储方式保存。
+对于复杂的设备,比如汽车,它有多个数据采集点,那么就需要为一辆汽车建立多张表。
-## 超级表 (STable)
+## 超级表(STable)
由于一个数据采集点一张表,导致表的数量巨增,难以管理,而且应用经常需要做采集点之间的聚合操作,聚合的操作也变得复杂起来。为解决这个问题,TDengine 引入超级表(Super Table,简称为 STable)的概念。
-超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 schema,标签的数据类型可以是整数、浮点数、字符串,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。
+超级表是指某一特定类型的数据采集点的集合。同一类型的数据采集点,其表的结构是完全一样的,但每个表(数据采集点)的静态属性(标签)是不一样的。描述一个超级表(某一特定类型的数据采集点的集合),除需要定义采集量的表结构之外,还需要定义其标签的 Schema,标签的数据类型可以是整数、浮点数、字符串、JSON,标签可以有多个,可以事后增加、删除或修改。如果整个系统有 N 个不同类型的数据采集点,就需要建立 N 个超级表。
-在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表meters.
+在 TDengine 的设计里,**表用来代表一个具体的数据采集点,超级表用来代表一组相同类型的数据采集点集合**。智能电表示例中,我们可以创建一个超级表 `meters`.
-## 子表 (Subtable)
+## 子表(Subtable)
当为某个具体数据采集点创建表时,用户可以使用超级表的定义做模板,同时指定该具体采集点(表)的具体标签值来创建该表。**通过超级表创建的表称之为子表**。正常的表与子表的差异在于:
-1. 子表就是表,因此所有正常表的SQL操作都可以在子表上执行。
+1. 子表就是表,因此所有正常表的 SQL 操作都可以在子表上执行。
2. 子表在正常表的基础上有扩展,它是带有静态标签的,而且这些标签可以事后增加、删除、修改,而正常的表没有。
3. 子表一定属于一张超级表,但普通表不属于任何超级表
4. 普通表无法转为子表,子表也无法转为普通表。
超级表与与基于超级表建立的子表之间的关系表现在:
-1. 一张超级表包含有多张子表,这些子表具有相同的采集量 schema,但带有不同的标签值。
+1. 一张超级表包含有多张子表,这些子表具有相同的采集量 Schema,但带有不同的标签值。
2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。
3. 超级表只定义一个模板,自身不存储任何数据或标签信息。因此,不能向一个超级表写入数据,只能将数据写入子表中。
查询既可以在表上进行,也可以在超级表上进行。针对超级表的查询,TDengine 将把所有子表中的数据视为一个整体数据集进行处理,会先把满足标签过滤条件的表从超级表中找出来,然后再扫描这些表的时序数据,进行聚合操作,这样需要扫描的数据集会大幅减少,从而显著提高查询的性能。本质上,TDengine 通过对超级表查询的支持,实现了多个同类数据采集点的高效聚合。
-TDengine系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表meters创建子表d1001, d1002, d1003, d1004等。
+TDengine 系统建议给一个数据采集点建表,需要通过超级表建表,而不是建普通表。在智能电表的示例中,我们可以通过超级表 meters 创建子表 d1001、d1002、d1003、d1004 等。
+
+为了更好地理解采集量、标签、超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。
+
+
+
+
-为了更好地理解超级与子表的关系,可以参考下面关于智能电表数据模型的示意图。 
+图 1. 智能电表数据模型示意图
+
-## 库 (database)
+## 库(Database)
库是指一组表的集合。TDengine 容许一个运行实例有多个库,而且每个库可以配置不同的存储策略。不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为了在各种场景下 TDengine 都能最大效率的工作,TDengine 建议将不同数据特征的超级表创建在不同的库里。
一个库里,可以有一到多个超级表,但一个超级表只属于一个库。一个超级表所拥有的子表全部存在一个库里。
-## FQDN & End Point
+## FQDN & Endpoint
-FQDN (fully qualified domain name, 完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。
+FQDN(Fully Qualified Domain Name,完全限定域名)是 Internet 上特定计算机或主机的完整域名。FQDN 由两部分组成:主机名和域名。例如,假设邮件服务器的 FQDN 可能是 mail.tdengine.com。主机名是 mail,主机位于域名 tdengine.com 中。DNS(Domain Name System),负责将 FQDN 翻译成 IP,是互联网应用的寻址方式。对于没有 DNS 的系统,可以通过配置 hosts 文件来解决。
-TDengine 集群的每个节点是由 End Point 来唯一标识的,End Point 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。
+TDengine 集群的每个节点是由 Endpoint 来唯一标识的,Endpoint 是由 FQDN 外加 Port 组成,比如 h1.tdengine.com:6030。这样当 IP 发生变化的时候,我们依然可以使用 FQDN 来动态找到节点,不需要更改集群的任何配置。而且采用 FQDN,便于内网和外网对同一个集群的统一访问。
TDengine 不建议采用直接的 IP 地址访问集群,不利于管理。不了解 FQDN 概念,请看博文[《一篇文章说清楚 TDengine 的 FQDN》](https://www.taosdata.com/blog/2020/09/11/1824.html)。
diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md
index e2be4195176a3f1ac7712a036d04b60b2fb77718..0f004581b5be470f3c99f48e1aaecb3578c442ab 100644
--- a/docs/zh/05-get-started/01-docker.md
+++ b/docs/zh/05-get-started/01-docker.md
@@ -4,11 +4,11 @@ title: 通过 Docker 快速体验 TDengine
description: 使用 Docker 快速体验 TDengine 的高效写入和查询
---
-本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
+本节首先介绍如何通过 Docker 快速体验 TDengine,然后介绍如何在 Docker 环境下体验 TDengine 的写入和查询功能。如果你不熟悉 Docker,请使用[安装包的方式快速体验](../../get-started/package/)。如果您希望为 TDengine 贡献代码或对内部技术实现感兴趣,请参考 [TDengine GitHub 主页](https://github.com/taosdata/TDengine)下载源码构建和安装。
## 启动 TDengine
-如果已经安装了 docker, 只需执行下面的命令。
+如果已经安装了 Docker,只需执行下面的命令:
```shell
docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine
@@ -16,84 +16,84 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043
注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。
-确定该容器已经启动并且在正常运行
+确定该容器已经启动并且在正常运行。
```shell
docker ps
```
-进入该容器并执行 bash
+进入该容器并执行 `bash`
```shell
docker exec -it bash
```
-然后就可以执行相关的 Linux 命令操作和访问 TDengine
+然后就可以执行相关的 Linux 命令操作和访问 TDengine。
-注: Docker 工具自身的下载和使用请参考 [Docker 官网文档](https://docs.docker.com/get-docker/)。
+注:Docker 工具自身的下载和使用请参考 [Docker 官网文档](https://docs.docker.com/get-docker/)。
## 运行 TDengine CLI
-进入容器,执行 taos
+进入容器,执行 `taos`:
```
$ taos
-taos>
-
+taos>
```
-## 写入数据
+## 使用 taosBenchmark 体验写入速度
-可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。
+可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入速度。
-进入容器,启动 taosBenchmark:
+启动 TDengine 的服务,在 Linux 或 Windows 终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
- ```bash
- $ taosBenchmark
-
- ```
+```bash
+$ taosBenchmark
+```
- 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "San Francisco" 或者 "Los Angeles"等城市名称。
+该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。
- 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能。
+这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
- taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
+taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照[如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)和 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
-## 体验查询
+## 使用 TDengine CLI 体验查询速度
-使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。。
+使用上述 `taosBenchmark` 插入数据后,可以在 TDengine CLI(taos)输入查询命令,体验查询速度。
-查询超级表下记录总条数:
+查询超级表 `meters` 下的记录总条数:
```sql
-taos> select count(*) from test.meters;
+SELECT COUNT(*) FROM test.meters;
```
查询 1 亿条记录的平均值、最大值、最小值等:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
-查询 location="San Francisco" 的记录总条数:
+查询 location = "California.SanFrancisco" 的记录总条数:
```sql
-taos> select count(*) from test.meters where location="San Francisco";
+SELECT COUNT(*) FROM test.meters WHERE location = "California.SanFrancisco";
```
-查询 groupId=10 的所有记录的平均值、最大值、最小值等:
+查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
+对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
+SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
```
+在上面的查询中,你选择的是区间内的第一个时间戳(ts),另一种选择方式是 `\_wstart`,它将给出时间窗口的开始。关于窗口查询的更多信息,参见[特色查询](../../taos-sql/distinguished/)。
+
## 其它
-更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)
+更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)。
diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md
index 3e0fb056a5913d3a82a473bf879a79e398176075..2c857d37f3a8591316949f9a7440a115740a8e2d 100644
--- a/docs/zh/05-get-started/03-package.md
+++ b/docs/zh/05-get-started/03-package.md
@@ -10,23 +10,24 @@ import PkgListV3 from "/components/PkgListV3";
您可以[用 Docker 立即体验](../../get-started/docker/) TDengine。如果您希望对 TDengine 贡献代码或对内部实现感兴趣,请参考我们的 [TDengine GitHub 主页](https://github.com/taosdata/TDengine) 下载源码构建和安装.
-TDengine 完整的软件包包括服务端(taosd)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、应用驱动(taosc)、命令行程序 (CLI,taos) 和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
+TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc)、用于与第三方系统对接并提供 RESTful 接口的 taosAdapter、命令行程序(CLI,taos)和一些工具软件。目前 taosAdapter 仅在 Linux 系统上安装和运行,后续将支持 Windows、macOS 等系统。TDengine 除了提供多种语言的连接器之外,还通过 [taosAdapter](../../reference/taosadapter/) 提供 [RESTful 接口](../../connector/rest-api/)。
-为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。
+为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。
-在 Linux 系统上,TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,rpm 和 deb 包不含 taosdump 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
+在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。TDengine 也提供 Windows x64 平台的安装包。
## 安装
-1. 从列表中下载获得 deb 安装包;
-
+1. 从列表中下载获得 Deb 安装包;
+
2. 进入到安装包所在目录,执行如下的安装命令:
+> 请将 `` 替换为下载的安装包版本
+
```bash
-# 替换为下载的安装包版本
sudo dpkg -i TDengine-server--Linux-x64.deb
```
@@ -34,12 +35,13 @@ sudo dpkg -i TDengine-server--Linux-x64.deb
-1. 从列表中下载获得 rpm 安装包;
-
+1. 从列表中下载获得 RPM 安装包;
+
2. 进入到安装包所在目录,执行如下的安装命令:
+> 请将 `` 替换为下载的安装包版本
+
```bash
-# 替换为下载的安装包版本
sudo rpm -ivh TDengine-server--Linux-x64.rpm
```
@@ -48,44 +50,47 @@ sudo rpm -ivh TDengine-server--Linux-x64.rpm
1. 从列表中下载获得 tar.gz 安装包;
-
-2. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本:
+
+2. 进入到安装包所在目录,使用 `tar` 解压安装包;
+3. 进入到安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本。
+
+> 请将 `` 替换为下载的安装包版本
```bash
-# 替换为下载的安装包版本
tar -zxvf TDengine-server--Linux-x64.tar.gz
```
-解压后进入相应路径,执行
+解压文件后,进入相应子目录,执行其中的 `install.sh` 安装脚本:
```bash
sudo ./install.sh
```
:::info
-install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以用 -e no 参数来执行 install.sh 脚本。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
+install.sh 安装脚本在执行过程中,会通过命令行交互界面询问一些配置信息。如果希望采取无交互安装方式,那么可以运行 `./install.sh -e no`。运行 `./install.sh -h` 指令可以查看所有参数的详细说明信息。
:::
-可以使用 apt-get 工具从官方仓库安装。
-**安装包仓库**
+可以使用 `apt-get` 工具从官方仓库安装。
+
+**配置包仓库**
```bash
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-stable stable main" | sudo tee /etc/apt/sources.list.d/tdengine-stable.list
```
-如果安装 Beta 版需要安装包仓库
+如果安装 Beta 版需要安装包仓库:
```bash
wget -qO - http://repos.taosdata.com/tdengine.key | sudo apt-key add -
echo "deb [arch=amd64] http://repos.taosdata.com/tdengine-beta beta main" | sudo tee /etc/apt/sources.list.d/tdengine-beta.list
```
-**使用 apt-get 命令安装**
+**使用 `apt-get` 命令安装**
```bash
sudo apt-get update
@@ -94,26 +99,26 @@ sudo apt-get install tdengine
```
:::tip
-apt-get 方式只适用于 Debian 或 Ubuntu 系统
+apt-get 方式只适用于 Debian 或 Ubuntu 系统。
::::
-
+
-注意:目前 TDengine 在 Windows 平台上只支持 Windows server 2016/2019 和 Windows 10/11 系统版本。
+注意:目前 TDengine 在 Windows 平台上只支持 Windows Server 2016/2019 和 Windows 10/11。
1. 从列表中下载获得 exe 安装程序;
-
+
2. 运行可执行程序来安装 TDengine。
:::info
-下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)
+下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine)。
:::
:::note
-当安装第一个节点时,出现 Enter FQDN:提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
+当安装第一个节点时,出现 `Enter FQDN:` 提示的时候,不需要输入任何内容。只有当安装第二个或以后更多的节点时,才需要输入已有集群中任何一个可用节点的 FQDN,支持该新节点加入集群。当然也可以不输入,而是在新节点启动前,配置到新节点的配置文件中。
:::
@@ -148,7 +153,7 @@ Active: inactive (dead)
如果 TDengine 服务正常工作,那么您可以通过 TDengine 的命令行程序 `taos` 来访问并体验 TDengine。
-systemctl 命令汇总:
+如下 `systemctl` 命令可以帮助你管理 TDengine 服务:
- 启动服务进程:`systemctl start taosd`
@@ -160,7 +165,7 @@ systemctl 命令汇总:
:::info
-- systemctl 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo 。
+- `systemctl` 命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 `sudo`。
- `systemctl stop taosd` 指令在执行后并不会马上停止 TDengine 服务,而是会等待系统中必要的落盘工作正常完成。在数据量很大的情况下,这可能会消耗较长时间。
- 如果系统中不支持 `systemd`,也可以用手动运行 `/usr/local/taos/bin/taosd` 方式启动 TDengine 服务。
@@ -170,87 +175,93 @@ systemctl 命令汇总:
-安装后,在 C:\TDengine 目录下,运行 taosd.exe 来启动 TDengine 服务进程。
+安装后,在 `C:\TDengine` 目录下,运行 `taosd.exe` 来启动 TDengine 服务进程。
-## TDengine 命令行 (CLI)
+## TDengine 命令行(CLI)
-为便于检查 TDengine 的状态,执行数据库 (Database) 的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI) taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可,也可以在安装有 TDengine 的 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。
+为便于检查 TDengine 的状态,执行数据库(Database)的各种即席(Ad Hoc)查询,TDengine 提供一命令行应用程序(以下简称为 TDengine CLI)taos。要进入 TDengine 命令行,您只要在安装有 TDengine 的 Linux 终端执行 `taos` 即可,也可以在安装有 TDengine 的 Windows 终端的 C:\TDengine 目录下,运行 taos.exe 来启动 TDengine 命令行。
```bash
taos
```
-如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。 TDengine CLI 的提示符号如下:
+如果连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考 [FAQ](/train-faq/faq) 来解决终端连接服务端失败的问题)。TDengine CLI 的提示符号如下:
```cmd
taos>
```
-在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(database)插入查询操作。在终端中运行的 SQL 语句需要以分号结束来运行。示例:
+在 TDengine CLI 中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行数据库(Database)插入查询操作。在终端中运行的 SQL 语句需要以分号(;)结束来运行。示例:
```sql
-create database demo;
-use demo;
-create table t (ts timestamp, speed int);
-insert into t values ('2019-07-15 00:00:00', 10);
-insert into t values ('2019-07-15 01:00:00', 20);
-select * from t;
+CREATE DATABASE demo;
+USE demo;
+CREATE TABLE t (ts TIMESTAMP, speed INT);
+INSERT INTO t VALUES ('2019-07-15 00:00:00', 10);
+INSERT INTO t VALUES ('2019-07-15 01:00:00', 20);
+SELECT * FROM t;
+
ts | speed |
========================================
2019-07-15 00:00:00.000 | 10 |
2019-07-15 01:00:00.000 | 20 |
+
Query OK, 2 row(s) in set (0.003128s)
```
-除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [这里](../../reference/taos-shell/)
+除执行 SQL 语句外,系统管理员还可以从 TDengine CLI 进行检查系统运行状态、添加删除用户账号等操作。TDengine CLI 连同应用驱动也可以独立安装在 Linux 或 Windows 机器上运行,更多细节请参考 [TDengine 命令行](../../reference/taos-shell/)。
## 使用 taosBenchmark 体验写入速度
-启动 TDengine 的服务,在 Linux 或 windows 终端执行 `taosBenchmark` (曾命名为 `taosdemo`):
+可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入速度。
+
+启动 TDengine 的服务,在 Linux 或 Windows 终端执行 `taosBenchmark`(曾命名为 `taosdemo`):
```bash
-taosBenchmark
+$ taosBenchmark
```
-该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。
+该命令将在数据库 `test` 下面自动创建一张超级表 `meters`,该超级表下有 1 万张表,表名为 `d0` 到 `d9999`,每张表有 1 万条记录,每条记录有 `ts`、`current`、`voltage`、`phase` 四个字段,时间戳从 2017-07-14 10:40:00 000 到 2017-07-14 10:40:09 999,每张表带有标签 `location` 和 `groupId`,groupId 被设置为 1 到 10,location 被设置为 `California.Campbell`、`California.Cupertino`、`California.LosAngeles`、`California.MountainView`、`California.PaloAlto`、`California.SanDiego`、`California.SanFrancisco`、`California.SanJose`、`California.SantaClara` 或者 `California.Sunnyvale`。
这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能,即使在一台普通的 PC 服务器往往也仅需十几秒。
-taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照 [如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)。
+taosBenchmark 命令本身带有很多选项,配置表的数目、记录条数等等,您可以设置不同参数进行体验,请执行 `taosBenchmark --help` 详细列出。taosBenchmark 详细使用方法请参照[如何使用 taosBenchmark 对 TDengine 进行性能测试](https://www.taosdata.com/2021/10/09/3111.html)和 [taosBenchmark 参考手册](../../reference/taosbenchmark)。
## 使用 TDengine CLI 体验查询速度
-使用上述 taosBenchmark 插入数据后,可以在 TDengine CLI 输入查询命令,体验查询速度。
+使用上述 `taosBenchmark` 插入数据后,可以在 TDengine CLI(taos)输入查询命令,体验查询速度。
-查询超级表下记录总条数:
+查询超级表 `meters` 下的记录总条数:
```sql
-taos> select count(*) from test.meters;
+SELECT COUNT(*) FROM test.meters;
```
查询 1 亿条记录的平均值、最大值、最小值等:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters;
```
-查询 location="California.SanFrancisco" 的记录总条数:
+查询 location = "California.SanFrancisco" 的记录总条数:
```sql
-taos> select count(*) from test.meters where location="California.SanFrancisco";
+SELECT COUNT(*) FROM test.meters WHERE location = "Calaifornia.SanFrancisco";
```
-查询 groupId=10 的所有记录的平均值、最大值、最小值等:
+查询 groupId = 10 的所有记录的平均值、最大值、最小值等:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.meters where groupId=10;
+SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 10;
```
-对表 d10 按 10s 进行平均值、最大值和最小值聚合统计:
+对表 `d10` 按 10 每秒进行平均值、最大值和最小值聚合统计:
```sql
-taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s);
+SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL(10s);
```
+
+在上面的查询中,你选择的是区间内的第一个时间戳(ts),另一种选择方式是 `\_wstart`,它将给出时间窗口的开始。关于窗口查询的更多信息,参见[特色查询](../../taos-sql/distinguished/)。
diff --git a/docs/zh/07-develop/01-connect/_connect_cs.mdx b/docs/zh/07-develop/01-connect/_connect_cs.mdx
index 13b8a5dff250e6143fbed3090ba1f35e74adb9a0..169bf37a636276a4d94c7b7faba4f7896c42e007 100644
--- a/docs/zh/07-develop/01-connect/_connect_cs.mdx
+++ b/docs/zh/07-develop/01-connect/_connect_cs.mdx
@@ -1,8 +1,8 @@
```csharp title="原生连接"
-{{#include docs/examples/csharp/ConnectExample.cs}}
+{{#include docs/examples/csharp/connect/Program.cs}}
```
-:::info
-C# 连接器目前只支持原生连接。
+```csharp title="WebSocket 连接"
+{{#include docs/examples/csharp/wsConnect/Program.cs}}
+```
-:::
diff --git a/docs/zh/07-develop/02-model/index.mdx b/docs/zh/07-develop/02-model/index.mdx
index 634c8a98d4756253fbb39ffacb6f20888b66721d..d66059c2cda2a0e4629b16ca44cee036dc67546f 100644
--- a/docs/zh/07-develop/02-model/index.mdx
+++ b/docs/zh/07-develop/02-model/index.mdx
@@ -41,7 +41,7 @@ USE power;
CREATE STABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupId int);
```
-与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TAOS SQL 的超级表管理](/taos-sql/stable) 章节。
+与创建普通表一样,创建超级表时,需要提供表名(示例中为 meters),表结构 Schema,即数据列的定义。第一列必须为时间戳(示例中为 ts),其他列为采集的物理量(示例中为 current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的 schema (示例中为 location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组 ID、管理员 ID 等等。标签的 schema 可以事后增加、删除、修改。具体定义以及细节请见 [TDengine SQL 的超级表管理](/taos-sql/stable) 章节。
每一种类型的数据采集点需要建立一个超级表,因此一个物联网系统,往往会有多个超级表。对于电网,我们就需要对智能电表、变压器、母线、开关等都建立一个超级表。在物联网中,一个设备就可能有多个数据采集点(比如一台风力发电的风机,有的采集点采集电流、电压等电参数,有的采集点采集温度、湿度、风向等环境参数),这个时候,对这一类型的设备,需要建立多张超级表。
@@ -55,7 +55,7 @@ TDengine 对每个数据采集点需要独立建表。与标准的关系型数
CREATE TABLE d1001 USING meters TAGS ("California.SanFrancisco", 2);
```
-其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TAOS SQL 的表管理](/taos-sql/table) 章节。
+其中 d1001 是表名,meters 是超级表的表名,后面紧跟标签 Location 的具体标签值 "California.SanFrancisco",标签 groupId 的具体标签值 2。虽然在创建表时,需要指定标签值,但可以事后修改。详细细则请见 [TDengine SQL 的表管理](/taos-sql/table) 章节。
TDengine 建议将数据采集点的全局唯一 ID 作为表名(比如设备序列号)。但对于有的场景,并没有唯一的 ID,可以将多个 ID 组合成一个唯一的 ID。不建议将具有唯一性的 ID 作为标签值。
diff --git a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
index 214cbdaa96d02e0cd1251eeda97c6a897887cc7e..8818eaae3dc1806a00e73d9846fbd1dfe15e0c8a 100644
--- a/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
+++ b/docs/zh/07-develop/03-insert-data/01-sql-writing.mdx
@@ -23,9 +23,10 @@ import PhpStmt from "./_php_stmt.mdx";
## SQL 写入简介
-应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TAOS Shell,手动输入 INSERT 语句插入数据。
+应用通过连接器执行 INSERT 语句来插入数据,用户还可以通过 TDengine CLI,手动输入 INSERT 语句插入数据。
### 一次写入一条
+
下面这条 INSERT 就将一条记录写入到表 d1001 中:
```sql
@@ -48,7 +49,7 @@ TDengine 也支持一次向多个表写入数据,比如下面这条命令就
INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, 218, 0.33) d1002 VALUES (1538548696800, 12.3, 221, 0.31);
```
-详细的 SQL INSERT 语法规则参考 [TAOS SQL 的数据写入](/taos-sql/insert)。
+详细的 SQL INSERT 语法规则参考 [TDengine SQL 的数据写入](/taos-sql/insert)。
:::info
@@ -134,4 +135,3 @@ TDengine 也提供了支持参数绑定的 Prepare API,与 MySQL 类似,这
-
diff --git a/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx b/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx
index f88284ad676edaae1ff9424ae7a7dfe93aaebba2..a107ffb1b6de6bcf04f559bf4223363a2a40bc5b 100644
--- a/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx
+++ b/docs/zh/07-develop/03-insert-data/02-influxdb-line.mdx
@@ -37,7 +37,9 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
- tag_set 中的所有的数据自动转化为 nchar 数据类型;
- field_set 中的每个数据项都需要对自身的数据类型进行描述, 比如 1.2f32 代表 float 类型的数值 1.2, 如果不带类型后缀会被当作 double 处理;
- timestamp 支持多种时间精度。写入数据的时候需要用参数指定时间精度,支持从小时到纳秒的 6 种时间精度。
-
+- 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。(3.0.1.3之后的版本 smlDataFormat 默认为 false) [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
+- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
+举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
:::
要了解更多可参考:[InfluxDB Line 协议官方文档](https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/) 和 [TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
@@ -64,3 +66,7 @@ meters,location=California.LosAngeles,groupid=2 current=13.4,voltage=223,phase=0
+
+## 查询示例
+比如查询 location=California.LosAngeles,groupid=2 子表的数据可以通过如下sql:
+select * from meters where location=California.LosAngeles and groupid=2
diff --git a/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx b/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
index 4f63e17635a713f1f91785cc0fced89fe9340a95..58bca7f8430e68056c10d9df5184f1b104b48bac 100644
--- a/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
+++ b/docs/zh/07-develop/03-insert-data/03-opentsdb-telnet.mdx
@@ -32,6 +32,8 @@ OpenTSDB 行协议同样采用一行字符串来表示一行数据。OpenTSDB
meters.current 1648432611250 11.3 location=California.LosAngeles groupid=3
```
+- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
+举例如下:配置 smlChildTableName=tname 插入数据为 meters.current 1648432611250 11.3 tname=cpu1 location=California.LosAngeles groupid=3 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
参考[OpenTSDB Telnet API 文档](http://opentsdb.net/docs/build/html/api_telnet/put.html)。
## 示例代码
@@ -79,3 +81,6 @@ taos> select tbname, * from `meters.current`;
t_7e7b26dd860280242c6492a16... | 2022-03-28 09:56:51.250 | 12.600000000 | 2 | California.SanFrancisco |
Query OK, 4 row(s) in set (0.005399s)
```
+## 查询示例:
+想要查询 location=California.LosAngeles groupid=3 的数据,可以通过如下sql:
+select * from `meters.voltage` where location="California.LosAngeles" and groupid=3
diff --git a/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx b/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
index b0257b9cb71ad7aafbadd29d8b6d574e4e024796..aa3e5980cf76a06ecedd966d231ca6437fdd7e1b 100644
--- a/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
+++ b/docs/zh/07-develop/03-insert-data/04-opentsdb-json.mdx
@@ -48,7 +48,8 @@ OpenTSDB JSON 格式协议采用一个 JSON 字符串表示一行或多行数据
- 对于 JSON 格式协议,TDengine 并不会自动把所有标签转成 nchar 类型, 字符串将将转为 nchar 类型, 数值将同样转换为 double 类型。
- TDengine 只接收 JSON **数组格式**的字符串,即使一行数据也需要转换成数组形式。
-
+- 默认生产的子表名是根据规则生成的唯一ID值。为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
+举例如下:配置 smlChildTableName=tname 插入数据为 "tags": { "host": "web02","dc": "lga","tname":"cpu1"} 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
:::
## 示例代码
@@ -94,3 +95,7 @@ taos> select * from `meters.current`;
2022-03-28 09:56:51.250 | 12.600000000 | 2.000000000 | California.SanFrancisco |
Query OK, 2 row(s) in set (0.004076s)
```
+
+## 查询示例
+想要查询"tags": {"location": "California.LosAngeles", "groupid": 1} 的数据,可以通过如下sql:
+select * from `meters.voltage` where location="California.LosAngeles" and groupid=1
diff --git a/docs/zh/07-develop/03-insert-data/05-high-volume.md b/docs/zh/07-develop/03-insert-data/05-high-volume.md
new file mode 100644
index 0000000000000000000000000000000000000000..d7581467ae0315442d89de395d35bbd677f75d3a
--- /dev/null
+++ b/docs/zh/07-develop/03-insert-data/05-high-volume.md
@@ -0,0 +1,436 @@
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+# 高效写入
+
+本节介绍如何高效地向 TDengine 写入数据。
+
+## 高效写入原理 {#principle}
+
+### 客户端程序的角度 {#application-view}
+
+从客户端程序的角度来说,高效写入数据要考虑以下几个因素:
+
+1. 单次写入的数据量。一般来讲,每批次写入的数据量越大越高效(但超过一定阈值其优势会消失)。使用 SQL 写入 TDengine 时,尽量在一条 SQL 中拼接更多数据。目前,TDengine 支持的一条 SQL 的最大长度为 1,048,576(1M)个字符。
+2. 并发连接数。一般来讲,同时写入数据的并发连接数越多写入越高效(但超过一定阈值反而会下降,取决于服务端处理能力)。
+3. 数据在不同表(或子表)之间的分布,即要写入数据的相邻性。一般来说,每批次只向同一张表(或子表)写入数据比向多张表(或子表)写入数据要更高效;
+4. 写入方式。一般来讲:
+ - 参数绑定写入比 SQL 写入更高效。因参数绑定方式避免了 SQL 解析。(但增加了 C 接口的调用次数,对于连接器也有性能损耗)。
+ - SQL 写入不自动建表比自动建表更高效。因自动建表要频繁检查表是否存在
+ - SQL 写入比无模式写入更高效。因无模式写入会自动建表且支持动态更改表结构
+
+客户端程序要充分且恰当地利用以上几个因素。在单次写入中尽量只向同一张表(或子表)写入数据,每批次写入的数据量经过测试和调优设定为一个最适合当前系统处理能力的数值,并发写入的连接数同样经过测试和调优后设定为一个最适合当前系统处理能力的数值,以实现在当前系统中的最佳写入速度。
+
+### 数据源的角度 {#datasource-view}
+
+客户端程序通常需要从数据源读数据再写入 TDengine。从数据源角度来说,以下几种情况需要在读线程和写线程之间增加队列:
+
+1. 有多个数据源,单个数据源生成数据的速度远小于单线程写入的速度,但数据量整体比较大。此时队列的作用是把多个数据源的数据汇聚到一起,增加单次写入的数据量。
+2. 单个数据源生成数据的速度远大于单线程写入的速度。此时队列的作用是增加写入的并发度。
+3. 单张表的数据分散在多个数据源。此时队列的作用是将同一张表的数据提前汇聚到一起,提高写入时数据的相邻性。
+
+如果写应用的数据源是 Kafka, 写应用本身即 Kafka 的消费者,则可利用 Kafka 的特性实现高效写入。比如:
+
+1. 将同一张表的数据写到同一个 Topic 的同一个 Partition,增加数据的相邻性
+2. 通过订阅多个 Topic 实现数据汇聚
+3. 通过增加 Consumer 线程数增加写入的并发度
+4. 通过增加每次 fetch 的最大数据量来增加单次写入的最大数据量
+
+### 服务器配置的角度 {#setting-view}
+
+从服务端配置的角度,要根据系统中磁盘的数量,磁盘的 I/O 能力,以及处理器能力在创建数据库时设置适当的 vgroups 数量以充分发挥系统性能。如果 vgroups 过少,则系统性能无法发挥;如果 vgroups 过多,会造成无谓的资源竞争。常规推荐 vgroups 数量为 CPU 核数的 2 倍,但仍然要结合具体的系统资源配置进行调优。
+
+更多调优参数,请参考 [数据库管理](../../../taos-sql/database) 和 [服务端配置](../../../reference/config)。
+
+## 高效写入示例 {#sample-code}
+
+### 场景设计 {#scenario}
+
+下面的示例程序展示了如何高效写入数据,场景设计如下:
+
+- TDengine 客户端程序从其它数据源不断读入数据,在示例程序中采用生成模拟数据的方式来模拟读取数据源
+- 单个连接向 TDengine 写入的速度无法与读数据的速度相匹配,因此客户端程序启动多个线程,每个线程都建立了与 TDengine 的连接,每个线程都有一个独占的固定大小的消息队列
+- 客户端程序将接收到的数据根据所属的表名(或子表名)HASH 到不同的线程,即写入该线程所对应的消息队列,以此确保属于某个表(或子表)的数据一定会被一个固定的线程处理
+- 各个子线程在将所关联的消息队列中的数据读空后或者读取数据量达到一个预定的阈值后将该批数据写入 TDengine,并继续处理后面接收到的数据
+
+
+
+### 示例代码 {#code}
+
+这一部分是针对以上场景的示例代码。对于其它场景高效写入原理相同,不过代码需要适当修改。
+
+本示例代码假设源数据属于同一张超级表(meters)的不同子表。程序在开始写入数据之前已经在 test 库创建了这个超级表。对于子表,将根据收到的数据,由应用程序自动创建。如果实际场景是多个超级表,只需修改写任务自动建表的代码。
+
+
+
+
+**程序清单**
+
+| 类名 | 功能说明 |
+| ---------------- | --------------------------------------------------------------------------- |
+| FastWriteExample | 主程序 |
+| ReadTask | 从模拟源中读取数据,将表名经过 hash 后得到 Queue 的 index,写入对应的 Queue |
+| WriteTask | 从 Queue 中获取数据,组成一个 Batch,写入 TDengine |
+| MockDataSource | 模拟生成一定数量 meters 子表的数据 |
+| SQLWriter | WriteTask 依赖这个类完成 SQL 拼接、自动建表、 SQL 写入、SQL 长度检查 |
+| StmtWriter | 实现参数绑定方式批量写入(暂未完成) |
+| DataBaseMonitor | 统计写入速度,并每隔 10 秒把当前写入速度打印到控制台 |
+
+
+以下是各类的完整代码和更详细的功能说明。
+
+
+FastWriteExample
+主程序负责:
+
+1. 创建消息队列
+2. 启动写线程
+3. 启动读线程
+4. 每隔 10 秒统计一次写入速度
+
+主程序默认暴露了 4 个参数,每次启动程序都可调节,用于测试和调优:
+
+1. 读线程个数。默认为 1。
+2. 写线程个数。默认为 3。
+3. 模拟生成的总表数。默认为 1000。将会平分给各个读线程。如果总表数较大,建表需要花费较长,开始统计的写入速度可能较慢。
+4. 每批最多写入记录数量。默认为 3000。
+
+队列容量(taskQueueCapacity)也是与性能有关的参数,可通过修改程序调节。一般来讲,队列容量越大,入队被阻塞的概率越小,队列的吞吐量越大,但是内存占用也会越大。 示例程序默认值已经设置地足够大。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/FastWriteExample.java}}
+```
+
+
+
+
+ReadTask
+
+读任务负责从数据源读数据。每个读任务都关联了一个模拟数据源。每个模拟数据源可生成一点数量表的数据。不同的模拟数据源生成不同表的数据。
+
+读任务采用阻塞的方式写消息队列。也就是说,一旦队列满了,写操作就会阻塞。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/ReadTask.java}}
+```
+
+
+
+
+WriteTask
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/WriteTask.java}}
+```
+
+
+
+
+
+MockDataSource
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/MockDataSource.java}}
+```
+
+
+
+
+
+SQLWriter
+
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。注意,所有的表都没有提前创建,而是在 catch 到表不存在异常的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它异常,这里简单地记录当时执行的 SQL 语句到日志中,你也可以记录更多线索到日志,已便排查错误和故障恢复。
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java}}
+```
+
+
+
+
+
+DataBaseMonitor
+
+```java
+{{#include docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java}}
+```
+
+
+
+**执行步骤**
+
+
+执行 Java 示例程序
+
+执行程序前需配置环境变量 `TDENGINE_JDBC_URL`。如果 TDengine Server 部署在本机,且用户名、密码和端口都是默认值,那么可配置:
+
+```
+TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+```
+
+**本地集成开发环境执行示例程序**
+
+1. clone TDengine 仓库
+ ```
+ git clone git@github.com:taosdata/TDengine.git --depth 1
+ ```
+2. 用集成开发环境打开 `docs/examples/java` 目录。
+3. 在开发环境中配置环境变量 `TDENGINE_JDBC_URL`。如果已配置了全局的环境变量 `TDENGINE_JDBC_URL` 可跳过这一步。
+4. 运行类 `com.taos.example.highvolume.FastWriteExample`。
+
+**远程服务器上执行示例程序**
+
+若要在服务器上执行示例程序,可按照下面的步骤操作:
+
+1. 打包示例代码。在目录 TDengine/docs/examples/java 下执行:
+ ```
+ mvn package
+ ```
+2. 远程服务器上创建 examples 目录:
+ ```
+ mkdir -p examples/java
+ ```
+3. 复制依赖到服务器指定目录:
+ - 复制依赖包,只用复制一次
+ ```
+ scp -r .\target\lib @:~/examples/java
+ ```
+ - 复制本程序的 jar 包,每次更新代码都需要复制
+ ```
+ scp -r .\target\javaexample-1.0.jar @:~/examples/java
+ ```
+4. 配置环境变量。
+ 编辑 `~/.bash_profile` 或 `~/.bashrc` 添加如下内容例如:
+
+ ```
+ export TDENGINE_JDBC_URL="jdbc:TAOS://localhost:6030?user=root&password=taosdata"
+ ```
+
+ 以上使用的是本地部署 TDengine Server 时默认的 JDBC URL。你需要根据自己的实际情况更改。
+
+5. 用 java 命令启动示例程序,命令模板:
+
+ ```
+ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample
+ ```
+
+6. 结束测试程序。测试程序不会自动结束,在获取到当前配置下稳定的写入速度后,按 CTRL + C 结束程序。
+ 下面是一次实际运行的日志输出,机器配置 16核 + 64G + 固态硬盘。
+
+ ```
+ root@vm85$ java -classpath lib/*:javaexample-1.0.jar com.taos.example.highvolume.FastWriteExample 2 12
+ 18:56:35.896 [main] INFO c.t.e.highvolume.FastWriteExample - readTaskCount=2, writeTaskCount=12 tableCount=1000 maxBatchSize=3000
+ 18:56:36.011 [WriteThread-0] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.015 [WriteThread-0] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.021 [WriteThread-1] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.022 [WriteThread-1] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.031 [WriteThread-2] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.032 [WriteThread-2] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.041 [WriteThread-3] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.042 [WriteThread-3] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.093 [WriteThread-4] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.094 [WriteThread-4] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.099 [WriteThread-5] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.100 [WriteThread-5] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.100 [WriteThread-6] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.101 [WriteThread-6] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.103 [WriteThread-7] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.104 [WriteThread-7] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.105 [WriteThread-8] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.107 [WriteThread-8] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.108 [WriteThread-9] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.109 [WriteThread-9] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.156 [WriteThread-10] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.157 [WriteThread-11] INFO c.taos.example.highvolume.WriteTask - started
+ 18:56:36.158 [WriteThread-10] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:36.158 [ReadThread-0] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [ReadThread-1] INFO com.taos.example.highvolume.ReadTask - started
+ 18:56:36.158 [WriteThread-11] INFO c.taos.example.highvolume.SQLWriter - maxSQLLength=1048576
+ 18:56:46.369 [main] INFO c.t.e.highvolume.FastWriteExample - count=18554448 speed=1855444
+ 18:56:56.946 [main] INFO c.t.e.highvolume.FastWriteExample - count=39059660 speed=2050521
+ 18:57:07.322 [main] INFO c.t.e.highvolume.FastWriteExample - count=59403604 speed=2034394
+ 18:57:18.032 [main] INFO c.t.e.highvolume.FastWriteExample - count=80262938 speed=2085933
+ 18:57:28.432 [main] INFO c.t.e.highvolume.FastWriteExample - count=101139906 speed=2087696
+ 18:57:38.921 [main] INFO c.t.e.highvolume.FastWriteExample - count=121807202 speed=2066729
+ 18:57:49.375 [main] INFO c.t.e.highvolume.FastWriteExample - count=142952417 speed=2114521
+ 18:58:00.689 [main] INFO c.t.e.highvolume.FastWriteExample - count=163650306 speed=2069788
+ 18:58:11.646 [main] INFO c.t.e.highvolume.FastWriteExample - count=185019808 speed=2136950
+ ```
+
+
+
+
+
+
+**程序清单**
+
+Python 示例程序中采用了多进程的架构,并使用了跨进程的消息队列。
+
+| 函数或类 | 功能说明 |
+| ------------------------ | -------------------------------------------------------------------- |
+| main 函数 | 程序入口, 创建各个子进程和消息队列 |
+| run_monitor_process 函数 | 创建数据库,超级表,统计写入速度并定时打印到控制台 |
+| run_read_task 函数 | 读进程主要逻辑,负责从其它数据系统读数据,并分发数据到为之分配的队列 |
+| MockDataSource 类 | 模拟数据源, 实现迭代器接口,每次批量返回每张表的接下来 1000 条数据 |
+| run_write_task 函数 | 写进程主要逻辑。每次从队列中取出尽量多的数据,并批量写入 |
+| SQLWriter类 | SQL 写入和自动建表 |
+| StmtWriter 类 | 实现参数绑定方式批量写入(暂未完成) |
+
+
+
+main 函数
+
+main 函数负责创建消息队列和启动子进程,子进程有 3 类:
+
+1. 1 个监控进程,负责数据库初始化和统计写入速度
+2. n 个读进程,负责从其它数据系统读数据
+3. m 个写进程,负责写数据库
+
+main 函数可以接收 5 个启动参数,依次是:
+
+1. 读任务(进程)数, 默认为 1
+2. 写任务(进程)数, 默认为 1
+3. 模拟生成的总表数,默认为 1000
+4. 队列大小(单位字节),默认为 1000000
+5. 每批最多写入记录数量, 默认为 3000
+
+```python
+{{#include docs/examples/python/fast_write_example.py:main}}
+```
+
+
+
+
+run_monitor_process
+
+监控进程负责初始化数据库,并监控当前的写入速度。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:monitor}}
+```
+
+
+
+
+
+run_read_task 函数
+
+读进程,负责从其它数据系统读数据,并分发数据到为之分配的队列。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:read}}
+```
+
+
+
+
+
+MockDataSource
+
+以下是模拟数据源的实现,我们假设数据源生成的每一条数据都带有目标表名信息。实际中你可能需要一定的规则确定目标表名。
+
+```python
+{{#include docs/examples/python/mockdatasource.py}}
+```
+
+
+
+
+run_write_task 函数
+
+写进程每次从队列中取出尽量多的数据,并批量写入。
+
+```python
+{{#include docs/examples/python/fast_write_example.py:write}}
+```
+
+
+
+
+
+SQLWriter 类封装了拼 SQL 和写数据的逻辑。所有的表都没有提前创建,而是在发生表不存在错误的时候,再以超级表为模板批量建表,然后重新执行 INSERT 语句。对于其它错误会记录当时执行的 SQL, 以便排查错误和故障恢复。这个类也对 SQL 是否超过最大长度限制做了检查,根据 TDengine 3.0 的限制由输入参数 maxSQLLength 传入了支持的最大 SQL 长度,即 1048576 。
+
+SQLWriter
+
+```python
+{{#include docs/examples/python/sql_writer.py}}
+```
+
+
+
+**执行步骤**
+
+
+
+执行 Python 示例程序
+
+1. 前提条件
+
+ - 已安装 TDengine 客户端驱动
+ - 已安装 Python3, 推荐版本 >= 3.8
+ - 已安装 taospy
+
+2. 安装 faster-fifo 代替 python 内置的 multiprocessing.Queue
+
+ ```
+ pip3 install faster-fifo
+ ```
+
+3. 点击上面的“查看源码”链接复制 `fast_write_example.py` 、 `sql_writer.py` 和 `mockdatasource.py` 三个文件。
+
+4. 执行示例程序
+
+ ```
+ python3 fast_write_example.py
+ ```
+
+ 下面是一次实际运行的输出, 机器配置 16核 + 64G + 固态硬盘。
+
+ ```
+ root@vm85$ python3 fast_write_example.py 8 8
+ 2022-07-14 19:13:45,869 [root] - READ_TASK_COUNT=8, WRITE_TASK_COUNT=8, TABLE_COUNT=1000, QUEUE_SIZE=1000000, MAX_BATCH_SIZE=3000
+ 2022-07-14 19:13:48,882 [root] - WriteTask-0 started with pid 718347
+ 2022-07-14 19:13:48,883 [root] - WriteTask-1 started with pid 718348
+ 2022-07-14 19:13:48,884 [root] - WriteTask-2 started with pid 718349
+ 2022-07-14 19:13:48,884 [root] - WriteTask-3 started with pid 718350
+ 2022-07-14 19:13:48,885 [root] - WriteTask-4 started with pid 718351
+ 2022-07-14 19:13:48,885 [root] - WriteTask-5 started with pid 718352
+ 2022-07-14 19:13:48,886 [root] - WriteTask-6 started with pid 718353
+ 2022-07-14 19:13:48,886 [root] - WriteTask-7 started with pid 718354
+ 2022-07-14 19:13:48,887 [root] - ReadTask-0 started with pid 718355
+ 2022-07-14 19:13:48,888 [root] - ReadTask-1 started with pid 718356
+ 2022-07-14 19:13:48,889 [root] - ReadTask-2 started with pid 718357
+ 2022-07-14 19:13:48,889 [root] - ReadTask-3 started with pid 718358
+ 2022-07-14 19:13:48,890 [root] - ReadTask-4 started with pid 718359
+ 2022-07-14 19:13:48,891 [root] - ReadTask-5 started with pid 718361
+ 2022-07-14 19:13:48,892 [root] - ReadTask-6 started with pid 718364
+ 2022-07-14 19:13:48,893 [root] - ReadTask-7 started with pid 718365
+ 2022-07-14 19:13:56,042 [DataBaseMonitor] - count=6676310 speed=667631.0
+ 2022-07-14 19:14:06,196 [DataBaseMonitor] - count=20004310 speed=1332800.0
+ 2022-07-14 19:14:16,366 [DataBaseMonitor] - count=32290310 speed=1228600.0
+ 2022-07-14 19:14:26,527 [DataBaseMonitor] - count=44438310 speed=1214800.0
+ 2022-07-14 19:14:36,673 [DataBaseMonitor] - count=56608310 speed=1217000.0
+ 2022-07-14 19:14:46,834 [DataBaseMonitor] - count=68757310 speed=1214900.0
+ 2022-07-14 19:14:57,280 [DataBaseMonitor] - count=80992310 speed=1223500.0
+ 2022-07-14 19:15:07,689 [DataBaseMonitor] - count=93805310 speed=1281300.0
+ 2022-07-14 19:15:18,020 [DataBaseMonitor] - count=106111310 speed=1230600.0
+ 2022-07-14 19:15:28,356 [DataBaseMonitor] - count=118394310 speed=1228300.0
+ 2022-07-14 19:15:38,690 [DataBaseMonitor] - count=130742310 speed=1234800.0
+ 2022-07-14 19:15:49,000 [DataBaseMonitor] - count=143051310 speed=1230900.0
+ 2022-07-14 19:15:59,323 [DataBaseMonitor] - count=155276310 speed=1222500.0
+ 2022-07-14 19:16:09,649 [DataBaseMonitor] - count=167603310 speed=1232700.0
+ 2022-07-14 19:16:19,995 [DataBaseMonitor] - count=179976310 speed=1237300.0
+ ```
+
+
+
+:::note
+使用 Python 连接器多进程连接 TDengine 的时候,有一个限制:不能在父进程中建立连接,所有连接只能在子进程中创建。
+如果在父进程中创建连接,子进程再创建连接就会一直阻塞。这是个已知问题。
+
+:::
+
+
+
+
+
diff --git a/docs/zh/07-develop/03-insert-data/_cs_line.mdx b/docs/zh/07-develop/03-insert-data/_cs_line.mdx
index 71f46c62be3dfe7d771a35b2298e476bed353aba..ae49901c3ac0a34218def4b1e12702e79960d0b6 100644
--- a/docs/zh/07-develop/03-insert-data/_cs_line.mdx
+++ b/docs/zh/07-develop/03-insert-data/_cs_line.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/InfluxDBLineExample.cs}}
+{{#include docs/examples/csharp/influxdbLine/Program.cs}}
```
diff --git a/docs/zh/07-develop/03-insert-data/_cs_opts_json.mdx b/docs/zh/07-develop/03-insert-data/_cs_opts_json.mdx
index 8d80d042c984c513df5ca91813c0cd0a17b58eb5..2627648616b9ac8c92e0d76097d517c066232ef2 100644
--- a/docs/zh/07-develop/03-insert-data/_cs_opts_json.mdx
+++ b/docs/zh/07-develop/03-insert-data/_cs_opts_json.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/OptsJsonExample.cs}}
+{{#include docs/examples/csharp/optsJSON/Program.cs}}
```
diff --git a/docs/zh/07-develop/03-insert-data/_cs_opts_telnet.mdx b/docs/zh/07-develop/03-insert-data/_cs_opts_telnet.mdx
index cff32abf1feaf703971111542749fbe40152bc33..660db13fd1816150880883cf801ff50019fbae8d 100644
--- a/docs/zh/07-develop/03-insert-data/_cs_opts_telnet.mdx
+++ b/docs/zh/07-develop/03-insert-data/_cs_opts_telnet.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/OptsTelnetExample.cs}}
+{{#include docs/examples/csharp/optsTelnet/Program.cs}}
```
diff --git a/docs/zh/07-develop/03-insert-data/_cs_sql.mdx b/docs/zh/07-develop/03-insert-data/_cs_sql.mdx
index 1dc7bb3d1366aa3000212786756506eb5eb280e6..42a6bc4315393de0b2cba37caffbfbc1c07b952b 100644
--- a/docs/zh/07-develop/03-insert-data/_cs_sql.mdx
+++ b/docs/zh/07-develop/03-insert-data/_cs_sql.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/SQLInsertExample.cs}}
+{{#include docs/examples/csharp/sqlInsert/Program.cs}}
```
diff --git a/docs/zh/07-develop/03-insert-data/_cs_stmt.mdx b/docs/zh/07-develop/03-insert-data/_cs_stmt.mdx
index 229c874ab9f515e7eae66890a3dfe2e59c129e86..d8d73ca15ebdce28a40a6c922293493679491e97 100644
--- a/docs/zh/07-develop/03-insert-data/_cs_stmt.mdx
+++ b/docs/zh/07-develop/03-insert-data/_cs_stmt.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/StmtInsertExample.cs}}
+{{#include docs/examples/csharp/stmtInsert/Program.cs}}
```
diff --git a/docs/zh/07-develop/03-insert-data/highvolume.webp b/docs/zh/07-develop/03-insert-data/highvolume.webp
new file mode 100644
index 0000000000000000000000000000000000000000..46dfc74ae3b0043c591ff930c62251da49cae7ad
Binary files /dev/null and b/docs/zh/07-develop/03-insert-data/highvolume.webp differ
diff --git a/docs/zh/07-develop/04-query-data/_cs.mdx b/docs/zh/07-develop/04-query-data/_cs.mdx
index 4bb582ecbfaeceac679af975e7752d1caeacb018..745ab368115ca0dfbaff1f3a326abfd9bed02430 100644
--- a/docs/zh/07-develop/04-query-data/_cs.mdx
+++ b/docs/zh/07-develop/04-query-data/_cs.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/QueryExample.cs}}
+{{#include docs/examples/csharp/query/Program.cs}}
```
diff --git a/docs/zh/07-develop/04-query-data/_cs_async.mdx b/docs/zh/07-develop/04-query-data/_cs_async.mdx
index 3ecf635fd39db402d1db68de6d7336b7b2d9d8e8..19c8e58f32ed3598b5ccb953085b97ef2e4ce067 100644
--- a/docs/zh/07-develop/04-query-data/_cs_async.mdx
+++ b/docs/zh/07-develop/04-query-data/_cs_async.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/AsyncQueryExample.cs}}
+{{#include docs/examples/csharp/asyncQuery/Program.cs}}
```
diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx
index c083c30c2c26f8ecff96a36f3f4151e103ea1052..d6156c8a59a70af80f2632cdf3801ef7281b69d5 100644
--- a/docs/zh/07-develop/04-query-data/index.mdx
+++ b/docs/zh/07-develop/04-query-data/index.mdx
@@ -44,7 +44,7 @@ Query OK, 2 row(s) in set (0.001100s)
为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。
-具体的查询语法请看 [TAOS SQL 的数据查询](../../taos-sql/select) 章节。
+具体的查询语法请看 [TDengine SQL 的数据查询](../../taos-sql/select) 章节。
## 多表聚合查询
@@ -52,7 +52,7 @@ Query OK, 2 row(s) in set (0.001100s)
### 示例一
-在 TAOS Shell,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
+在 TDengine CLI,查找加利福尼亚州所有智能电表采集的电压平均值,并按照 location 分组。
```
taos> SELECT AVG(voltage), location FROM meters GROUP BY location;
@@ -65,7 +65,7 @@ Query OK, 2 rows in database (0.005995s)
### 示例二
-在 TAOS shell, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。
+在 TDengine CLI, 查找 groupId 为 2 的所有智能电表的记录条数,电流的最大值。
```
taos> SELECT count(*), max(current) FROM meters where groupId = 2;
@@ -75,7 +75,7 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2;
Query OK, 1 row(s) in set (0.002136s)
```
-在 [TAOS SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
+在 [TDengine SQL 的数据查询](../../taos-sql/select) 一章,查询类操作都会注明是否支持超级表。
## 降采样查询、插值
@@ -123,7 +123,7 @@ Query OK, 6 rows in database (0.005515s)
如果一个时间间隔里,没有采集的数据,TDengine 还提供插值计算的功能。
-语法规则细节请见 [TAOS SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
+语法规则细节请见 [TDengine SQL 的按时间窗口切分聚合](../../taos-sql/distinguished) 章节。
## 示例代码
diff --git a/docs/zh/07-develop/06-stream.md b/docs/zh/07-develop/06-stream.md
index d5296582d500e3271130bc1bfc6de34492133a8a..c9f1b1d43a3b8666e747db80b5e34473f45f0675 100644
--- a/docs/zh/07-develop/06-stream.md
+++ b/docs/zh/07-develop/06-stream.md
@@ -52,7 +52,7 @@ CREATE TABLE d1004 USING meters TAGS ("California.LosAngeles", 3);
### 创建流
```sql
-create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as end, max(current) as max_current from meters where voltage <= 220 interval (5s);
+create stream current_stream into current_stream_output_stb as select _wstart as start, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);
```
### 写入数据
@@ -71,7 +71,7 @@ insert into d1004 values("2018-10-03 14:38:06.500", 11.50000, 221, 0.35000);
```sql
taos> select start, end, max_current from current_stream_output_stb;
- start | end | max_current |
+ start | wend | max_current |
===========================================================================
2018-10-03 14:38:05.000 | 2018-10-03 14:38:10.000 | 10.30000 |
2018-10-03 14:38:15.000 | 2018-10-03 14:38:20.000 | 12.60000 |
diff --git a/docs/zh/07-develop/07-tmq.mdx b/docs/zh/07-develop/07-tmq.mdx
index 2f5c13d9b0bc0e3940fb99b45c693e2ae80c8f47..d9a35ab4eb545872a0f9a8e10e6e40dcf6ee4ec7 100644
--- a/docs/zh/07-develop/07-tmq.mdx
+++ b/docs/zh/07-develop/07-tmq.mdx
@@ -218,7 +218,7 @@ void Close()
```sql
DROP DATABASE IF EXISTS tmqdb;
CREATE DATABASE tmqdb;
-CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16) TAGS(t1 INT, t3 VARCHAR(16));
+CREATE TABLE tmqdb.stb (ts TIMESTAMP, c1 INT, c2 FLOAT, c3 VARCHAR(16)) TAGS(t1 INT, t3 VARCHAR(16));
CREATE TABLE tmqdb.ctb0 USING tmqdb.stb TAGS(0, "subtable0");
CREATE TABLE tmqdb.ctb1 USING tmqdb.stb TAGS(1, "subtable1");
INSERT INTO tmqdb.ctb0 VALUES(now, 0, 0, 'a0')(now+1s, 0, 0, 'a00');
diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md
index ef1cd4797a217b601b1b8e3eaa0e74b8c2907c88..3239eae49b05180c4a0dba5850de9f1c5e08a4f3 100644
--- a/docs/zh/07-develop/09-udf.md
+++ b/docs/zh/07-develop/09-udf.md
@@ -116,7 +116,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
参数的具体含义是:
- inputDataBlock: 输入的数据块
- - resultColumn: 输出列。输出列
+ - resultColumn: 输出列
### 聚合接口函数
diff --git a/docs/zh/07-develop/_sub_cs.mdx b/docs/zh/07-develop/_sub_cs.mdx
index a435ea0273c94cbe75eaf7431e1a9c39d49d92e3..093b617e9bb9c7da7bc9392f91316b9f3342bae6 100644
--- a/docs/zh/07-develop/_sub_cs.mdx
+++ b/docs/zh/07-develop/_sub_cs.mdx
@@ -1,3 +1,3 @@
```csharp
-{{#include docs/examples/csharp/SubscribeDemo.cs}}
+{{#include docs/examples/csharp/subscribe/Program.cs}}
```
\ No newline at end of file
diff --git a/docs/zh/07-develop/_sub_java.mdx b/docs/zh/07-develop/_sub_java.mdx
index d14b5fd6095dd90f89dd2c2e828858585cfddff9..e7de158cc8d2b0b686b25bbe96e7a092c2a68e51 100644
--- a/docs/zh/07-develop/_sub_java.mdx
+++ b/docs/zh/07-develop/_sub_java.mdx
@@ -1,7 +1,5 @@
```java
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
-{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
-{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
diff --git a/docs/zh/08-connector/02-rest-api.mdx b/docs/zh/08-connector/02-rest-api.mdx
index e254244657b457e10bc2daab020b230c9a8bb2cc..a8e16823015a11b883edd3ec81b0efcfcc2bfb8f 100644
--- a/docs/zh/08-connector/02-rest-api.mdx
+++ b/docs/zh/08-connector/02-rest-api.mdx
@@ -4,7 +4,7 @@ sidebar_label: REST API
description: 详细介绍 TDengine 提供的 RESTful API.
---
-为支持各种不同类型平台的开发,TDengine 提供符合 REST 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST 连接器的使用参见 [视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
+为支持各种不同类型平台的开发,TDengine 提供符合 RESTful 设计标准的 API,即 REST API。为最大程度降低学习成本,不同于其他数据库 REST API 的设计方法,TDengine 直接通过 HTTP POST 请求 BODY 中包含的 SQL 语句来操作数据库,仅需要一个 URL。REST API 的使用参见 [视频教程](https://www.taosdata.com/blog/2020/11/11/1965.html)。
:::note
与原生连接器的一个区别是,RESTful 接口是无状态的,因此 `USE db_name` 指令没有效果,所有对表名、超级表名的引用都需要指定数据库名前缀。支持在 RESTful URL 中指定 db_name,这时如果 SQL 语句中没有指定数据库名前缀的话,会使用 URL 中指定的这个 db_name。
@@ -18,7 +18,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安
在已经安装 TDengine 服务器端的情况下,可以按照如下方式进行验证。
-下面以 Ubuntu 环境中使用 curl 工具(确认已经安装)来验证 RESTful 接口的正常,验证前请确认 taosAdapter 服务已开启,在 Linux 系统上此服务默认由 systemd 管理,使用命令 `systemctl start taosadapter` 启动。
+下面以 Ubuntu 环境中使用 `curl` 工具(请确认已经安装)来验证 RESTful 接口是否工作正常,验证前请确认 taosAdapter 服务已开启,在 Linux 系统上此服务默认由 systemd 管理,使用命令 `systemctl start taosadapter` 启动。
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号:
@@ -125,7 +125,7 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
### HTTP body 结构
-#### 正确执行
+#### 正确执行插入
样例:
@@ -145,7 +145,7 @@ curl -L -u username:password -d "" :/rest/sql/[db_name]
- rows:(`int`)只返回 `1`。
- data:(`[][]any`)返回受影响行数。
-#### 正确查询
+#### 正确执行查询
样例:
diff --git a/docs/zh/08-connector/05-schemaless-api.mdx b/docs/zh/08-connector/05-schemaless-api.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..f6d7e09212c71ed7b142eeabf2a2d69c5f79d439
--- /dev/null
+++ b/docs/zh/08-connector/05-schemaless-api.mdx
@@ -0,0 +1,38 @@
+---
+title: Schemaless API
+sidebar_label: Schemaless API
+description: 详细介绍 TDengine 提供的 Schemaless API.
+---
+
+TDengine 提供了兼容 InfluxDB (v1) 和 OpenTSDB 行协议的 Schemaless API。支持 InfluxDB(v1) 或 OpenTSDB 行协议写入数据的第三方软件无需修改代码,只要修改配置的 EndPoint URL 就可以直接把数据写入 TDengine 数据库。
+
+### 兼容 InfluxDB 行协议写入的方法
+
+您可以配置任何支持使用 InfluxDB(v1) 行协议的应用访问地址 `http://:6041/` 来写入 InfluxDB 兼容格式的数据到 TDengine。EndPoint 如下:
+```text
+/influxdb/v1/write??...
+```
+
+支持 InfluxDB 查询参数如下:
+
+- `db` 指定 TDengine 使用的数据库名
+- `precision` TDengine 使用的时间精度
+- `u` TDengine 用户名
+- `p` TDengine 密码
+
+注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
+
+参考链接:[InfluxDB v1 写接口](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
+
+### 兼容 OpenTSDB 行协议写入的方法
+
+您可以配置任何支持 OpenTSDB 行协议的应用访问地址 `http://:6041/` 来写入 OpenTSDB 兼容格式的数据到 TDengine。EndPoint 如下:
+
+```text
+/opentsdb/v1/put/json/
+/opentsdb/v1/put/telnet/
+```
+
+参考链接:
+ - [OpenTSDB JSON](http://opentsdb.net/docs/build/html/api_http/put.html)
+ - [OpenTSDB Telnet](http://opentsdb.net/docs/build/html/api_telnet/put.html)
diff --git a/docs/zh/08-connector/03-cpp.mdx b/docs/zh/08-connector/10-cpp.mdx
similarity index 100%
rename from docs/zh/08-connector/03-cpp.mdx
rename to docs/zh/08-connector/10-cpp.mdx
diff --git a/docs/zh/08-connector/04-java.mdx b/docs/zh/08-connector/14-java.mdx
similarity index 100%
rename from docs/zh/08-connector/04-java.mdx
rename to docs/zh/08-connector/14-java.mdx
diff --git a/docs/zh/08-connector/05-go.mdx b/docs/zh/08-connector/20-go.mdx
similarity index 100%
rename from docs/zh/08-connector/05-go.mdx
rename to docs/zh/08-connector/20-go.mdx
diff --git a/docs/zh/08-connector/06-rust.mdx b/docs/zh/08-connector/26-rust.mdx
similarity index 98%
rename from docs/zh/08-connector/06-rust.mdx
rename to docs/zh/08-connector/26-rust.mdx
index 42cd5205cdd5e49784e921aba75804320ee0b7d5..63dce4b69b7a66c0888f306b72fd87b45e4c5bf3 100644
--- a/docs/zh/08-connector/06-rust.mdx
+++ b/docs/zh/08-connector/26-rust.mdx
@@ -117,7 +117,7 @@ DSN 描述字符串基本结构如下:
- **protocol**: 显示指定以何种方式建立连接,例如:`taos+ws://localhost:6041` 指定以 Websocket 方式建立连接。
- **username/password**: 用于创建连接的用户名及密码。
- **host/port**: 指定创建连接的服务器及端口,当不指定服务器地址及端口时(`taos://`),原生连接默认为 `localhost:6030`,Websocket 连接默认为 `localhost:6041` 。
-- **database**: 指定默认连接的数据库名。
+- **database**: 指定默认连接的数据库名,可选参数。
- **params**:其他可选参数。
一个完整的 DSN 描述字符串示例如下:
@@ -156,15 +156,15 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
let inserted = taos.exec_many([
// create super table
"CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) \
- TAGS (`groupid` INT, `location` BINARY(16))",
+ TAGS (`groupid` INT, `location` BINARY(24))",
// create child table
- "CREATE TABLE `d0` USING `meters` TAGS(0, 'Los Angles')",
+ "CREATE TABLE `d0` USING `meters` TAGS(0, 'California.LosAngles')",
// insert into child table
"INSERT INTO `d0` values(now - 10s, 10, 116, 0.32)",
// insert with NULL values
"INSERT INTO `d0` values(now - 8s, NULL, NULL, NULL)",
// insert and automatically create table with tags if not exists
- "INSERT INTO `d1` USING `meters` TAGS(1, 'San Francisco') values(now - 9s, 10.1, 119, 0.33)",
+ "INSERT INTO `d1` USING `meters` TAGS(1, 'California.SanFrancisco') values(now - 9s, 10.1, 119, 0.33)",
// insert many records in a single sql
"INSERT INTO `d1` values (now-8s, 10, 120, 0.33) (now - 6s, 10, 119, 0.34) (now - 4s, 11.2, 118, 0.322)",
]).await?;
diff --git a/docs/zh/08-connector/07-python.mdx b/docs/zh/08-connector/30-python.mdx
similarity index 100%
rename from docs/zh/08-connector/07-python.mdx
rename to docs/zh/08-connector/30-python.mdx
diff --git a/docs/zh/08-connector/08-node.mdx b/docs/zh/08-connector/35-node.mdx
similarity index 100%
rename from docs/zh/08-connector/08-node.mdx
rename to docs/zh/08-connector/35-node.mdx
diff --git a/docs/zh/08-connector/09-csharp.mdx b/docs/zh/08-connector/40-csharp.mdx
similarity index 64%
rename from docs/zh/08-connector/09-csharp.mdx
rename to docs/zh/08-connector/40-csharp.mdx
index be27bfb685d5890813aa65199813f021f7e92066..e99f41ae9cb0a97426878e3efb8cb85d66af4929 100644
--- a/docs/zh/08-connector/09-csharp.mdx
+++ b/docs/zh/08-connector/40-csharp.mdx
@@ -17,7 +17,7 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
`TDengine.Connector` 是 TDengine 提供的 C# 语言连接器。C# 开发人员可以通过它开发存取 TDengine 集群数据的 C# 应用软件。
-`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、订阅、schemaless 数据写入、参数绑定接口数据写入等功能 `TDengine.Connector` 目前暂未提供 REST 连接方式,用户可以参考 [REST API](../rest-api/) 文档自行编写。
+`TDengine.Connector` 连接器支持通过 TDengine 客户端驱动(taosc)建立与 TDengine 运行实例的连接,提供数据写入、查询、数据订阅、schemaless 数据写入、参数绑定接口数据写入等功能。 `TDengine.Connector` 还支持 WebSocket,通过 DSN 建立 WebSocket 连接,提供数据写入、查询、参数绑定接口数据写入等功能。
本文介绍如何在 Linux 或 Windows 环境中安装 `TDengine.Connector`,并通过 `TDengine.Connector` 连接 TDengine 集群,进行数据写入、查询等基本操作。
@@ -35,12 +35,29 @@ import CSAsyncQuery from "../07-develop/04-query-data/_cs_async.mdx"
## 支持的功能特性
+
+
+
+
1. 连接管理
2. 普通查询
3. 连续查询
4. 参数绑定
-5. 订阅功能
+5. 数据订阅(TMQ)
6. Schemaless
+
+
+
+
+
+1. 连接管理
+2. 普通查询
+3. 连续查询
+4. 参数绑定
+
+
+
+
## 安装步骤
@@ -79,7 +96,13 @@ dotnet add exmaple.csproj reference src/TDengine.csproj
## 建立连接
-``` C#
+
+
+
+
+使用 host、username、password、port 等信息建立连接。
+
+``` csharp
using TDengineDriver;
namespace TDengineExample
@@ -109,17 +132,63 @@ namespace TDengineExample
}
}
}
+```
+
+
+
+
+使用 DSN 建立 WebSocket 连接 DSN 连接。 描述字符串基本结构如下:
+
+```text
+[]://[[:@]:][/][?=[&=]]
+|------------|---|-----------|-----------|------|------|------------|-----------------------|
+| protocol | | username | password | host | port | database | params |
+```
+
+各部分意义见下表:
+
+* **protocol**: 显示指定以何种方式建立连接,例如:`ws://localhost:6041` 指定以 Websocket 方式建立连接(支持http/ws)。
+
+* **username/password**: 用于创建连接的用户名及密码(默认`root/taosdata`)。
+
+* **host/port**: 指定创建连接的服务器及端口,WebSocket 连接默认为 `localhost:6041` 。
+
+* **database**: 指定默认连接的数据库名,可选参数。
+
+* **params**:其他可选参数。
+
+``` csharp
+{{#include docs/examples/csharp/wsConnect/Program.cs}}
```
+
+
+
## 使用示例
### 写入数据
#### SQL 写入
+
+
+
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsInsert/Program.cs}}
+```
+
+
+
+
+
#### InfluxDB 行协议写入
@@ -132,12 +201,50 @@ namespace TDengineExample
+#### 参数绑定
+
+
+
+
+
+``` csharp
+{{#include docs/examples/csharp/stmtInsert/Program.cs}}
+```
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsStmt/Program.cs}}
+```
+
+
+
+
+
### 查询数据
#### 同步查询
+
+
+
+
+
+
+
+
+```csharp
+{{#include docs/examples/csharp/wsQuery/Program.cs}}
+```
+
+
+
+
+
#### 异步查询
@@ -151,12 +258,15 @@ namespace TDengineExample
| [stmt](https://github.com/taosdata/taos-connector-dotnet/tree/3.0/examples/Stmt) | 使用 TDengine.Connector 实现的参数绑定插入和查询的示例 |
| [schemaless](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/schemaless) | 使用 TDengine.Connector 实现的使用 schemaless 写入的示例 |
| [async query](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/AsyncQuery/QueryAsync.cs) | 使用 TDengine.Connector 实现的异步查询的示例 |
-| [TMQ](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
+| [数据订阅(TMQ)](https://github.com/taosdata/taos-connector-dotnet/blob/3.0/examples/TMQ/TMQ.cs) | 使用 TDengine.Connector 实现的订阅数据的示例 |
+| [Basic WebSocket Usage](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSample.cs) | 使用 TDengine.Connector 的 WebSocket 基本的示例 |
+| [Basic WebSocket STMT](https://github.com/taosdata/taos-connector-dotnet/blob/5a4a7cd0dbcda114447cdc6d0c6dedd8e84a52da/examples/WS/WebSocketSTMT.cs) | 使用 TDengine.Connector 的 WebSocket STMT 基本的示例 |
## 重要更新记录
| TDengine.Connector | 说明 |
|--------------------|--------------------------------|
+| 3.0.1 | 支持 WebSocket 和 Cloud,查询,插入,参数绑定。 |
| 3.0.0 | 支持 TDengine 3.0.0.0,不兼容 2.x。新增接口TDengine.Impl.GetData(),解析查询结果。 |
| 1.0.7 | 修复 TDengine.Query()内存泄露。 |
| 1.0.6 | 修复 schemaless 在 1.0.4 和 1.0.5 中失效 bug。 |
@@ -169,9 +279,9 @@ namespace TDengineExample
### 第三方驱动
-`Maikebing.Data.Taos` 是一个 TDengine 的 ADO.NET 连接器,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考:
+[`IoTSharp.Data.Taos`](https://github.com/IoTSharp/EntityFrameworkCore.Taos) 是一个 TDengine 的 ADO.NET 连接器,其中包含了用于EntityFrameworkCore 的提供程序 IoTSharp.EntityFrameworkCore.Taos 和健康检查组件 IoTSharp.HealthChecks.Taos ,支持 Linux,Windows 平台。该连接器由社区贡献者`麦壳饼@@maikebing` 提供,具体请参考:
-* 接口下载:
+* 接口下载:
* 用法说明:
## 常见问题
diff --git a/docs/zh/08-connector/10-php.mdx b/docs/zh/08-connector/45-php.mdx
similarity index 100%
rename from docs/zh/08-connector/10-php.mdx
rename to docs/zh/08-connector/45-php.mdx
diff --git a/docs/zh/08-connector/index.md b/docs/zh/08-connector/index.md
index 17de8e926cd9a3633dc8746b0fb49c38ff8ca61f..f54470f7420ada71c2cd283eff52c5fc6e9ada1a 100644
--- a/docs/zh/08-connector/index.md
+++ b/docs/zh/08-connector/index.md
@@ -41,14 +41,14 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
### 使用原生接口(taosc)
-| **功能特性** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
-| -------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
-| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
-| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
-| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
-| ** TMQ ** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
-| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
-| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
+| **功能特性** | **Java** | **Python** | **Go** | **C#** | **Node.js** | **Rust** |
+| ------------------- | -------- | ---------- | ------ | ------ | ----------- | -------- |
+| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
+| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
+| **参数绑定** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
+| **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
+| **Schemaless** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
+| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
:::info
由于不同编程语言数据库框架规范不同,并不意味着所有 C/C++ 接口都需要对应封装支持。
@@ -56,16 +56,15 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
### 使用 http (REST 或 WebSocket) 接口
-| **功能特性** | **Java** | **Python** | **Go** | **C#(暂不支持)** | **Node.js** | **Rust** |
-| ------------------------------ | -------- | ---------- | -------- | ------------------ | ----------- | -------- |
-| **连接管理** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
-| **普通查询** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
-| **连续查询** | 支持 | 支持 | 支持 | N/A | 支持 | 支持 |
-| **参数绑定** | 不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 支持 |
-| ** TMQ ** | 不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 支持 |
-| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | N/A | 不支持 | 暂不支持 |
-| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 暂不支持 | N/A | 不支持 | 支持 |
-| **DataFrame** | 不支持 | 支持 | 不支持 | N/A | 不支持 | 不支持 |
+| **功能特性** | **Java** | **Python** | **Go** | **C# ** | **Node.js** | **Rust** |
+| ------------------------------ | -------- | ---------- | -------- | -------- | ----------- | -------- |
+| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
+| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
+| **参数绑定** | 暂不支持 | 暂不支持 | 暂不支持 | 支持 | 暂不支持 | 支持 |
+| **数据订阅(TMQ)** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 支持 |
+| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
+| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 暂不支持 | 支持 | 暂不支持 | 支持 |
+| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
:::warning
diff --git a/docs/zh/10-deployment/01-deploy.md b/docs/zh/10-deployment/01-deploy.md
index 8d8a2eb6d864b2fb05ae6e0a2833d850b62d68a8..eecb86ce415b6ad0a24a7fcd0d6d9d9ce1357a33 100644
--- a/docs/zh/10-deployment/01-deploy.md
+++ b/docs/zh/10-deployment/01-deploy.md
@@ -71,7 +71,7 @@ serverPort 6030
## 启动集群
-按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 taos shell,从 shell 里执行命令“SHOW DNODES”,如下所示:
+按照《立即开始》里的步骤,启动第一个数据节点,例如 h1.taosdata.com,然后执行 taos,启动 TDengine CLI,在其中执行命令 “SHOW DNODES”,如下所示:
```
taos> show dnodes;
@@ -115,7 +115,7 @@ SHOW DNODES;
任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEp。
firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。
-接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。
+接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 TDengine CLI 如果不加参数,会默认连接由 firstEp 指定的节点。
两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。无法将两个独立的集群合并成为新的集群。
:::
@@ -168,7 +168,7 @@ Query OK, 8 row(s) in set (0.001154s)
## 删除数据节点
-先停止要删除的数据节点的 taosd 进程,然后启动 CLI 程序 taos,执行:
+启动 CLI 程序 taos,执行:
```sql
DROP DNODE "fqdn:port";
diff --git a/docs/zh/10-deployment/03-k8s.md b/docs/zh/10-deployment/03-k8s.md
index 5d512700b6506893dc3af049b655f4021f753463..0cae59657c2a0199d3452bc37d36f2c537944d21 100644
--- a/docs/zh/10-deployment/03-k8s.md
+++ b/docs/zh/10-deployment/03-k8s.md
@@ -10,6 +10,7 @@ description: 利用 Kubernetes 部署 TDengine 集群的详细指南
要使用 Kubernetes 部署管理 TDengine 集群,需要做好如下准备工作。
+* 本文适用 Kubernetes v1.5 以上版本
* 本文和下一章使用 minikube、kubectl 和 helm 等工具进行安装部署,请提前安装好相应软件
* Kubernetes 已经安装部署并能正常访问使用或更新必要的容器仓库或其他服务
@@ -366,7 +367,7 @@ kubectl scale statefulsets tdengine --replicas=1
```
-在 taos shell 中的所有数据库操作将无法成功。
+在 TDengine CLI 中的所有数据库操作将无法成功。
```
taos> show dnodes;
diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md
index b8ef050fb79fce5e5d2d65753480a6b156cfbc40..128fa20930d1b94b905a20fd1dde853d63e2b0c4 100644
--- a/docs/zh/12-taos-sql/01-data-type.md
+++ b/docs/zh/12-taos-sql/01-data-type.md
@@ -1,7 +1,7 @@
---
-sidebar_label: 支持的数据类型
-title: 支持的数据类型
-description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等"
+sidebar_label: 数据类型
+title: 数据类型
+description: 'TDengine 支持的数据类型: 时间戳、浮点型、JSON 类型等'
---
## 时间戳
@@ -9,64 +9,65 @@ description: "TDengine 支持的数据类型: 时间戳、浮点型、JSON 类
使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则:
- 时间格式为 `YYYY-MM-DD HH:mm:ss.MS`,默认时间分辨率为毫秒。比如:`2017-08-12 18:25:58.128`
-- 内部函数 now 是客户端的当前时间
-- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间
-- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑类似。
-- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降采样操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n (自然月) 和 y (自然年)。
+- 内部函数 NOW 是客户端的当前时间
+- 插入记录时,如果时间戳为 NOW,插入数据时使用提交这条记录的客户端的当前时间
+- Epoch Time:时间戳也可以是一个长整数,表示从 UTC 时间 1970-01-01 00:00:00 开始的毫秒数。相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从 UTC 时间 1970-01-01 00:00:00 开始的微秒数;纳秒精度逻辑相同。
+- 时间可以加减,比如 NOW-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 b(纳秒)、u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `SELECT * FROM t1 WHERE ts > NOW-2w AND ts <= NOW-1w`,表示查询两周前整整一周的数据。在指定降采样操作(Down Sampling)的时间窗口(Interval)时,时间单位还可以使用 n(自然月)和 y(自然年)。
-TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 PRECISION 参数也可以支持微秒和纳秒。
+TDengine 缺省的时间戳精度是毫秒,但通过在 `CREATE DATABASE` 时传递的 `PRECISION` 参数也可以支持微秒和纳秒。
```sql
CREATE DATABASE db_name PRECISION 'ns';
```
+
## 数据类型
在 TDengine 中,普通表的数据模型中可使用以下数据类型。
-| # | **类型** | **Bytes** | **说明** |
-| --- | :-------: | --------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 |
-| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] |
-| 3 | INT UNSIGNED| 4| 无符号整数,[0, 2^32-1]
-| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] |
-| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] |
-| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
-| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
-| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 nchar。 |
-| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
-| 10 | SMALLINT UNSIGNED | 2| 无符号短整型,范围 [0, 65535] |
-| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
-| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
-| 13 | BOOL | 1 | 布尔型,{true, false} |
-| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 nchar 字符占用 4 bytes 的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\’`。nchar 使用时须指定字符串大小,类型为 nchar(10) 的列表示此列的字符串最多存储 10 个 nchar 字符,会固定占用 40 bytes 的空间。如果用户字符串长度超出声明长度,将会报错。 |
-| 15 | JSON | | json 数据类型, 只有 tag 可以是 json 格式 |
-| 16 | VARCHAR | 自定义 | BINARY类型的别名 |
-
+| # | **类型** | **Bytes** | **说明** |
+| --- | :---------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒和纳秒,详细说明见上节。 |
+| 2 | INT | 4 | 整型,范围 [-2^31, 2^31-1] |
+| 3 | INT UNSIGNED | 4 | 无符号整数,[0, 2^32-1] |
+| 4 | BIGINT | 8 | 长整型,范围 [-2^63, 2^63-1] |
+| 5 | BIGINT UNSIGNED | 8 | 长整型,范围 [0, 2^64-1] |
+| 6 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] |
+| 7 | DOUBLE | 8 | 双精度浮点型,有效位数 15-16,范围 [-1.7E308, 1.7E308] |
+| 8 | BINARY | 自定义 | 记录单字节字符串,建议只用于处理 ASCII 可见字符,中文等多字节字符需使用 NCHAR |
+| 9 | SMALLINT | 2 | 短整型, 范围 [-32768, 32767] |
+| 10 | SMALLINT UNSIGNED | 2 | 无符号短整型,范围 [0, 65535] |
+| 11 | TINYINT | 1 | 单字节整型,范围 [-128, 127] |
+| 12 | TINYINT UNSIGNED | 1 | 无符号单字节整型,范围 [0, 255] |
+| 13 | BOOL | 1 | 布尔型,{true, false} |
+| 14 | NCHAR | 自定义 | 记录包含多字节字符在内的字符串,如中文字符。每个 NCHAR 字符占用 4 字节的存储空间。字符串两端使用单引号引用,字符串内的单引号需用转义字符 `\'`。NCHAR 使用时须指定字符串大小,类型为 NCHAR(10) 的列表示此列的字符串最多存储 10 个 NCHAR 字符,会固定占用 40 字节的空间。如果用户字符串长度超出声明长度,将会报错。 |
+| 15 | JSON | | JSON 数据类型, 只有 Tag 可以是 JSON 格式 |
+| 16 | VARCHAR | 自定义 | BINARY 类型的别名 |
:::note
-- TDengine 对 SQL 语句中的英文字符不区分大小写,自动转化为小写执行。因此用户大小写敏感的字符串及密码,需要使用单引号将字符串引起来。
+
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
-- BINARY 类型理论上最长可以有 16374 字节。binary 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 binary(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 byte 的存储空间,总共固定占用 20 bytes 的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\’`。
+- BINARY 类型理论上最长可以有 16,374 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
-
## 常量
-TDengine支持多个类型的常量,细节如下表:
-
-| # | **语法** | **类型** | **说明** |
-| --- | :-------: | --------- | -------------------------------------- |
-| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为BIGINT。如果用户输入超过了BIGINT的表示范围,TDengine 按BIGINT对数值进行截断。|
-| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为DOUBLE。TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。|
-| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为DOUBLE。|
-| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 \'。|
-| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为BINARY,BINARY的size为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 \"。 |
-| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP关键字表示后面的字符串字面量需要被解释为TIMESTAMP类型。字符串需要满足YYYY-MM-DD HH:mm:ss.MS格式,其时间分辨率为当前数据库的时间分辨率。 |
-| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 |
-| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。|
+
+TDengine 支持多个类型的常量,细节如下表:
+
+| # | **语法** | **类型** | **说明** |
+| --- | :-----------------------------------------------: | --------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- |
+| 1 | [{+ \| -}]123 | BIGINT | 整型数值的字面量的类型均为 BIGINT。如果用户输入超过了 BIGINT 的表示范围,TDengine 按 BIGINT 对数值进行截断。 |
+| 2 | 123.45 | DOUBLE | 浮点数值的字面量的类型均为 DOUBLE。TDengine 依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型。 |
+| 3 | 1.2E3 | DOUBLE | 科学计数法的字面量的类型为 DOUBLE。 |
+| 4 | 'abc' | BINARY | 单引号括住的内容为字符串字面值,其类型为 BINARY,BINARY 的 Size 为实际的字符个数。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。 |
+| 5 | "abc" | BINARY | 双引号括住的内容为字符串字面值,其类型为 BINARY,BINARY 的 Size 为实际的字符个数。对于字符串内的双引号,可以用转义字符反斜线加单引号来表示,即 `\"`。 |
+| 6 | TIMESTAMP {'literal' \| "literal"} | TIMESTAMP | TIMESTAMP 关键字表示后面的字符串字面量需要被解释为 TIMESTAMP 类型。字符串需要满足 YYYY-MM-DD HH:mm:ss.MS 格式,其时间分辨率为当前数据库的时间分辨率。 |
+| 7 | {TRUE \| FALSE} | BOOL | 布尔类型字面量。 |
+| 8 | {'' \| "" \| '\t' \| "\t" \| ' ' \| " " \| NULL } | -- | 空值字面量。可以用于任意类型。 |
:::note
-- TDengine依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999会认为超过长整型的上边界而溢出,而9999999999999999999.0会被认为是有效的浮点数。
+
+- TDengine 依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md
index 1675356c49c3435d6f9dad3ccc6b868da929f08f..c76311f008433f36259b08acaf56cafa729550b7 100644
--- a/docs/zh/12-taos-sql/02-database.md
+++ b/docs/zh/12-taos-sql/02-database.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 数据库管理
-title: 数据库管理
+sidebar_label: 数据库
+title: 数据库
description: "创建、删除数据库,查看、修改数据库参数"
---
@@ -71,9 +71,9 @@ database_option: {
- SINGLE_STABLE:表示此数据库中是否只可以创建一个超级表,用于超级表列非常多的情况。
- 0:表示可以创建多张超级表。
- 1:表示只可以创建一张超级表。
-- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。默认为 0,即落盘后立即删除。-1 表示不删除。
-- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。默认为 0,即落盘后立即删除。-1 表示不删除。
-- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。
+- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天。
+- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除。
+- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天。
- WAL_SEGMENT_SIZE:wal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。
### 创建数据库示例
diff --git a/docs/zh/12-taos-sql/03-table.md b/docs/zh/12-taos-sql/03-table.md
index a93b010c4c1af5349930225803e6714011d47912..f6790e3c692b815c1031413933c47eb7ad203204 100644
--- a/docs/zh/12-taos-sql/03-table.md
+++ b/docs/zh/12-taos-sql/03-table.md
@@ -1,5 +1,5 @@
---
-title: 表管理
+title: 表
sidebar_label: 表
description: 对表的各种管理操作
---
@@ -10,27 +10,24 @@ description: 对表的各种管理操作
```sql
CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...) [table_options]
-
+
CREATE TABLE create_subtable_clause
-
+
CREATE TABLE [IF NOT EXISTS] [db_name.]tb_name (create_definition [, create_definitionn] ...)
[TAGS (create_definition [, create_definitionn] ...)]
[table_options]
-
+
create_subtable_clause: {
create_subtable_clause [create_subtable_clause] ...
| [IF NOT EXISTS] [db_name.]tb_name USING [db_name.]stb_name [(tag_name [, tag_name] ...)] TAGS (tag_value [, tag_value] ...)
}
-
+
create_definition:
- col_name column_definition
-
-column_definition:
- type_name [comment 'string_value']
-
+ col_name column_type
+
table_options:
table_option ...
-
+
table_option: {
COMMENT 'string_value'
| WATERMARK duration[,duration]
@@ -54,12 +51,13 @@ table_option: {
需要注意的是转义字符中的内容必须是可打印字符。
**参数说明**
+
1. COMMENT:表注释。可用于超级表、子表和普通表。
-2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为0到15分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。
-3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为1毫秒到15分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。
-4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了RETENTIONS参数时,才可以使用此表参数。作用于超级表除TS列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。
-5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括MAX、MIN和SUM。可用于超级表/普通表。
-6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果在持续的TTL时间内,都没有数据写入该表,则TDengine系统会自动删除该表。这个TTL的时间只是一个大概时间,我们系统不保证到了时间一定会将其删除,而只保证存在这样一个机制。TTL单位是天,默认为0,表示不限制。用户需要注意,TTL优先级高于KEEP,即TTL时间满足删除机制时,即使当前数据的存在时间小于KEEP,此表也会被删除。只可用于子表和普通表。
+2. WATERMARK:指定窗口的关闭时间,默认值为 5 秒,最小单位毫秒,范围为 0 到 15 分钟,多个以逗号分隔。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
+3. MAX_DELAY:用于控制推送计算结果的最大延迟,默认值为 interval 的值(但不能超过最大值),最小单位毫秒,范围为 1 毫秒到 15 分钟,多个以逗号分隔。注:不建议 MAX_DELAY 设置太小,否则会过于频繁的推送结果,影响存储和查询性能,如无特殊需求,取默认值即可。只可用于超级表,且只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。
+4. ROLLUP:Rollup 指定的聚合函数,提供基于多层级的降采样聚合结果。只可用于超级表。只有当数据库使用了 RETENTIONS 参数时,才可以使用此表参数。作用于超级表除 TS 列外的其它所有列,但是只能定义一个聚合函数。 聚合函数支持 avg, sum, min, max, last, first。
+5. SMA:Small Materialized Aggregates,提供基于数据块的自定义预计算功能。预计算类型包括 MAX、MIN 和 SUM。可用于超级表/普通表。
+6. TTL:Time to Live,是用户用来指定表的生命周期的参数。如果创建表时指定了这个参数,当该表的存在时间超过 TTL 指定的时间后,TDengine 自动删除该表。这个 TTL 的时间只是一个大概时间,系统不保证到了时间一定会将其删除,而只保证存在这样一个机制且最终一定会删除。TTL 单位是天,默认为 0,表示不限制,到期时间为表创建时间加上 TTL 时间。
## 创建子表
@@ -89,7 +87,7 @@ CREATE TABLE [IF NOT EXISTS] tb_name1 USING stb_name TAGS (tag_value1, ...) [IF
```sql
ALTER TABLE [db_name.]tb_name alter_table_clause
-
+
alter_table_clause: {
alter_table_options
| ADD COLUMN col_name column_type
@@ -97,10 +95,10 @@ alter_table_clause: {
| MODIFY COLUMN col_name column_type
| RENAME COLUMN old_col_name new_col_name
}
-
+
alter_table_options:
alter_table_option ...
-
+
alter_table_option: {
TTL value
| COMMENT 'string_value'
@@ -110,6 +108,7 @@ alter_table_option: {
**使用说明**
对普通表可以进行如下修改操作
+
1. ADD COLUMN:添加列。
2. DROP COLUMN:删除列。
3. MODIFY COLUMN:修改列定义,如果数据列的类型是可变长类型,那么可以使用此指令修改其宽度,只能改大,不能改小。
@@ -143,15 +142,15 @@ ALTER TABLE tb_name RENAME COLUMN old_col_name new_col_name
```sql
ALTER TABLE [db_name.]tb_name alter_table_clause
-
+
alter_table_clause: {
alter_table_options
| SET TAG tag_name = new_tag_value
}
-
+
alter_table_options:
alter_table_option ...
-
+
alter_table_option: {
TTL value
| COMMENT 'string_value'
@@ -159,6 +158,7 @@ alter_table_option: {
```
**使用说明**
+
1. 对子表的列和标签的修改,除了更改标签值以外,都要通过超级表才能进行。
### 修改子表标签值
@@ -169,7 +169,7 @@ ALTER TABLE tb_name SET TAG tag_name=new_tag_value;
## 删除表
-可以在一条SQL语句中删除一个或多个普通表或子表。
+可以在一条 SQL 语句中删除一个或多个普通表或子表。
```sql
DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
@@ -179,7 +179,7 @@ DROP TABLE [IF EXISTS] [db_name.]tb_name [, [IF EXISTS] [db_name.]tb_name] ...
### 显示所有表
-如下SQL语句可以列出当前数据库中的所有表名。
+如下 SQL 语句可以列出当前数据库中的所有表名。
```sql
SHOW TABLES [LIKE tb_name_wildchar];
diff --git a/docs/zh/12-taos-sql/04-stable.md b/docs/zh/12-taos-sql/04-stable.md
index 450ff07fd8eb636b3ee185e5594d77d645195c56..95ef405fa780e831628e21766e1b3c3b18265059 100644
--- a/docs/zh/12-taos-sql/04-stable.md
+++ b/docs/zh/12-taos-sql/04-stable.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 超级表管理
-title: 超级表 STable 管理
+sidebar_label: 超级表
+title: 超级表
description: 对超级表的各种管理操作
---
diff --git a/docs/zh/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md
index 59af9c55ed076fb23814a24a5d2429e51d5fc051..5e64827a8f13cd9fd5de1f27129ad4859ad3cd27 100644
--- a/docs/zh/12-taos-sql/05-insert.md
+++ b/docs/zh/12-taos-sql/05-insert.md
@@ -17,6 +17,8 @@ INSERT INTO
[(field1_name, ...)]
VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path
...];
+
+INSERT INTO tb_name [(field1_name, ...)] subquery
```
**关于时间戳**
@@ -38,7 +40,7 @@ INSERT INTO
4. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。
-5. 无论使用哪种语法,均可以在一条 INSERT 语句中同时向多个表插入数据。
+5. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。
6. INSERT 语句是完整解析后再执行的,对如下语句,不会再出现数据错误但建表成功的情况:
@@ -48,6 +50,8 @@ INSERT INTO
7. 对于向多个子表插入数据的情况,依然会有部分数据写入失败,部分数据写入成功的情况。这是因为多个子表可能分布在不同的 VNODE 上,客户端将 INSERT 语句完整解析后,将数据发往各个涉及的 VNODE 上,每个 VNODE 独立进行写入操作。如果某个 VNODE 因为某些原因(比如网络问题或磁盘故障)导致写入失败,并不会影响其他 VNODE 节点的写入。
+8. 可以使用 `INSERT ... subquery` 语句将 TDengine 中的数据插入到指定表中。subquery 可以是任意的查询语句。此语法只能用于子表和普通表,且不支持自动建表。
+
## 插入一条记录
指定已经创建好的数据子表的表名,并通过 VALUES 关键字提供一行或多行数据,即可向数据库写入这些数据。例如,执行如下语句可以写入一行记录:
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 0c305231e03e3a4fa4ea9ce51f95ea241e43d20a..b3b8ef38873d20acaefa13515ff0d5785c5acc86 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -53,11 +53,6 @@ window_clause: {
| STATE_WINDOW(col)
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
-changes_option: {
- DURATION duration_val
- | ROWS rows_val
-}
-
group_by_clause:
GROUP BY expr [, expr] ... HAVING condition
@@ -74,7 +69,7 @@ order_expr:
### 通配符
-通配符 \* 可以用于代指全部列。对于普通表,结果中只有普通列。对于超级表和子表,还包含了 TAG 列。
+通配符 \* 可以用于代指全部列。对于普通表和子表,结果中只有普通列。对于超级表,还包含了 TAG 列。
```sql
SELECT * FROM d1001;
@@ -109,7 +104,7 @@ SELECT location, groupid, current FROM d1001 LIMIT 2;
### 结果去重
-`DISINTCT` 关键字可以对结果集中的一列或多列进行去重,去除的列既可以是标签列也可以是数据列。
+`DISTINCT` 关键字可以对结果集中的一列或多列进行去重,去除的列既可以是标签列也可以是数据列。
对标签列去重:
@@ -127,7 +122,6 @@ SELECT DISTINCT col_name [, col_name ...] FROM tb_name;
1. cfg 文件中的配置参数 maxNumOfDistinctRes 将对 DISTINCT 能够输出的数据行数进行限制。其最小值是 100000,最大值是 100000000,默认值是 10000000。如果实际计算结果超出了这个限制,那么会仅输出这个数量范围内的部分。
2. 由于浮点数天然的精度机制原因,在特定情况下,对 FLOAT 和 DOUBLE 列使用 DISTINCT 并不能保证输出值的完全唯一性。
-3. 在当前版本下,DISTINCT 不能在嵌套查询的子查询中使用,也不能与聚合函数、GROUP BY、或 JOIN 在同一条语句中混用。
:::
@@ -143,6 +137,8 @@ taos> SELECT ts, ts AS primary_key_ts FROM d1001;
### 伪列
+**伪列**: 伪列的行为表现与普通数据列相似但其并不实际存储在表中。可以查询伪列,但不能对其做插入、更新和删除的操作。伪列有点像没有参数的函数。下面介绍是可用的伪列:
+
**TBNAME**
`TBNAME` 可以视为超级表中一个特殊的标签,代表子表的表名。
@@ -186,6 +182,14 @@ TDengine 中,所有表的第一列都必须是时间戳类型,且为其主
select _rowts, max(current) from meters;
```
+**\_IROWTS**
+
+\_irowts 伪列只能与 interp 函数一起使用,用于返回 interp 函数插值结果对应的时间戳列。
+
+```sql
+select _irowts, interp(current) from meters range('2020-01-01 10:00:00', '2020-01-01 10:30:00') every(1s) fill(linear);
+```
+
## 查询对象
FROM 关键字后面可以是若干个表(超级表)列表,也可以是子查询的结果。
@@ -355,19 +359,15 @@ SELECT ... FROM (SELECT ... FROM ...) ...;
:::info
-- 目前仅支持一层嵌套,也即不能在子查询中再嵌入子查询。
-- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表可以使用 AS 语法做重命名,以便于外层查询中方便引用。
-- 目前不能在“连续查询”功能中使用子查询。
+- 内层查询的返回结果将作为“虚拟表”供外层查询使用,此虚拟表建议起别名,以便于外层查询中方便引用。
- 在内层和外层查询中,都支持普通的表间/超级表间 JOIN。内层查询的计算结果也可以再参与数据子表的 JOIN 操作。
-- 目前内层查询、外层查询均不支持 UNION 操作。
- 内层查询支持的功能特性与非嵌套的查询语句能力是一致的。
- 内层查询的 ORDER BY 子句一般没有意义,建议避免这样的写法以免无谓的资源消耗。
- 与非嵌套的查询语句相比,外层查询所能支持的功能特性存在如下限制:
- 计算函数部分:
- - 如果内层查询的结果数据未提供时间戳,那么计算过程依赖时间戳的函数在外层会无法正常工作。例如:TOP, BOTTOM, FIRST, LAST, DIFF。
- - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:STDDEV, PERCENTILE。
- - 外层查询中不支持 IN 算子,但在内层中可以使用。
- - 外层查询不支持 GROUP BY。
+ - 如果内层查询的结果数据未提供时间戳,那么计算过程隐式依赖时间戳的函数在外层会无法正常工作。例如:INTERP, DERIVATIVE, IRATE, LAST_ROW, FIRST, LAST, TWA, STATEDURATION, TAIL, UNIQUE。
+ - 如果内层查询的结果数据不是按时间戳有序,那么计算过程依赖数据按时间有序的函数在外层会无法正常工作。例如:LEASTSQUARES, ELAPSED, INTERP, DERIVATIVE, IRATE, TWA, DIFF, STATECOUNT, STATEDURATION, CSUM, MAVG, TAIL, UNIQUE。
+ - 计算过程需要两遍扫描的函数,在外层查询中无法正常工作。例如:此类函数包括:PERCENTILE。
:::
diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md
index af31a1d4bdecce839cb072b440293ab7219f14d2..4c33b5232ac7d7511d1ff4bd121c389ce84d9a41 100644
--- a/docs/zh/12-taos-sql/10-function.md
+++ b/docs/zh/12-taos-sql/10-function.md
@@ -14,7 +14,7 @@ toc_max_heading_level: 4
#### ABS
```sql
-SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+ABS(expr)
```
**功能说明**:获得指定字段的绝对值。
@@ -32,7 +32,7 @@ SELECT ABS(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### ACOS
```sql
-SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+ACOS(expr)
```
**功能说明**:获得指定字段的反余弦结果。
@@ -50,7 +50,7 @@ SELECT ACOS(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### ASIN
```sql
-SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+ASIN(expr)
```
**功能说明**:获得指定字段的反正弦结果。
@@ -69,7 +69,7 @@ SELECT ASIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### ATAN
```sql
-SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+ATAN(expr)
```
**功能说明**:获得指定字段的反正切结果。
@@ -88,7 +88,7 @@ SELECT ATAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### CEIL
```sql
-SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
+CEIL(expr)
```
**功能说明**:获得指定字段的向上取整数的结果。
@@ -106,7 +106,7 @@ SELECT CEIL(field_name) FROM { tb_name | stb_name } [WHERE clause];
#### COS
```sql
-SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
+COS(expr)
```
**功能说明**:获得指定字段的余弦结果。
@@ -124,19 +124,19 @@ SELECT COS(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### FLOOR
```sql
-SELECT FLOOR(field_name) FROM { tb_name | stb_name } [WHERE clause];
+FLOOR(expr)
```
-**功能说明**:获得指定字段的向下取整数的结果。
+**功能说明**:获得指定字段的向下取整数的结果。
其他使用说明参见 CEIL 函数描述。
#### LOG
```sql
-SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
+LOG(expr1[, expr2])
```
-**功能说明**:获得指定字段对于底数 base 的对数。如果 base 参数省略,则返回指定字段的自然对数值。
+**功能说明**:获得 expr1 对于底数 expr2 的对数。如果 expr2 参数省略,则返回指定字段的自然对数值。
**返回结果类型**:DOUBLE。
@@ -152,10 +152,10 @@ SELECT LOG(field_name[, base]) FROM { tb_name | stb_name } [WHERE clause]
#### POW
```sql
-SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
+POW(expr1, expr2)
```
-**功能说明**:获得指定字段的指数为 power 的幂。
+**功能说明**:获得 expr1 的指数为 expr2 的幂。
**返回结果类型**:DOUBLE。
@@ -171,17 +171,17 @@ SELECT POW(field_name, power) FROM { tb_name | stb_name } [WHERE clause]
#### ROUND
```sql
-SELECT ROUND(field_name) FROM { tb_name | stb_name } [WHERE clause];
+ROUND(expr)
```
-**功能说明**:获得指定字段的四舍五入的结果。
+**功能说明**:获得指定字段的四舍五入的结果。
其他使用说明参见 CEIL 函数描述。
#### SIN
```sql
-SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+SIN(expr)
```
**功能说明**:获得指定字段的正弦结果。
@@ -199,7 +199,7 @@ SELECT SIN(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### SQRT
```sql
-SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
+SQRT(expr)
```
**功能说明**:获得指定字段的平方根。
@@ -217,7 +217,7 @@ SELECT SQRT(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### TAN
```sql
-SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
+TAN(expr)
```
**功能说明**:获得指定字段的正切结果。
@@ -239,7 +239,7 @@ SELECT TAN(field_name) FROM { tb_name | stb_name } [WHERE clause]
#### CHAR_LENGTH
```sql
-SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+CHAR_LENGTH(expr)
```
**功能说明**:以字符计数的字符串长度。
@@ -255,7 +255,7 @@ SELECT CHAR_LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### CONCAT
```sql
-SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+CONCAT(expr1, expr2 [, expr] ... )
```
**功能说明**:字符串连接函数。
@@ -272,7 +272,7 @@ SELECT CONCAT(str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHER
#### CONCAT_WS
```sql
-SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | stb_name } [WHERE clause]
+CONCAT_WS(separator_expr, expr1, expr2 [, expr] ...)
```
**功能说明**:带分隔符的字符串连接函数。
@@ -289,7 +289,7 @@ SELECT CONCAT_WS(separator, str1|column1, str2|column2, ...) FROM { tb_name | st
#### LENGTH
```sql
-SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
+LENGTH(expr)
```
**功能说明**:以字节计数的字符串长度。
@@ -306,7 +306,7 @@ SELECT LENGTH(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### LOWER
```sql
-SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+LOWER(expr)
```
**功能说明**:将字符串参数值转换为全小写字母。
@@ -323,7 +323,7 @@ SELECT LOWER(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### LTRIM
```sql
-SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+LTRIM(expr)
```
**功能说明**:返回清除左边空格后的字符串。
@@ -340,7 +340,7 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### RTRIM
```sql
-SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
+LTRIM(expr)
```
**功能说明**:返回清除右边空格后的字符串。
@@ -357,7 +357,7 @@ SELECT LTRIM(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### SUBSTR
```sql
-SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
+SUBSTR(expr, pos [,len])
```
**功能说明**:从源字符串 str 中的指定位置 pos 开始取一个长度为 len 的子串并返回。如果输入参数 len 被忽略,返回的子串包含从 pos 开始的整个字串。
@@ -374,7 +374,7 @@ SELECT SUBSTR(str,pos[,len]) FROM { tb_name | stb_name } [WHERE clause]
#### UPPER
```sql
-SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
+UPPER(expr)
```
**功能说明**:将字符串参数值转换为全大写字母。
@@ -395,10 +395,10 @@ SELECT UPPER(str|column) FROM { tb_name | stb_name } [WHERE clause]
#### CAST
```sql
-SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
+CAST(expr AS type_name)
```
-**功能说明**:数据类型转换函数,返回 expression 转换为 type_name 指定的类型后的结果。只适用于 select 子句中。
+**功能说明**:数据类型转换函数,返回 expr 转换为 type_name 指定的类型后的结果。只适用于 select 子句中。
**返回结果类型**:CAST 中指定的类型(type_name)。
@@ -419,7 +419,7 @@ SELECT CAST(expression AS type_name) FROM { tb_name | stb_name } [WHERE clause]
#### TO_ISO8601
```sql
-SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
+TO_ISO8601(expr [, timezone])
```
**功能说明**:将 UNIX 时间戳转换成为 ISO8601 标准的日期时间格式,并附加时区信息。timezone 参数允许用户为输出结果指定附带任意时区信息。如果 timezone 参数省略,输出结果则附带当前客户端的系统时区信息。
@@ -435,14 +435,14 @@ SELECT TO_ISO8601(ts[, timezone]) FROM { tb_name | stb_name } [WHERE clause];
**使用说明**:
- timezone 参数允许输入的时区格式为: [z/Z, +/-hhmm, +/-hh, +/-hh:mm]。例如,TO_ISO8601(1, "+00:00")。
-- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定;
+- 如果输入是表示 UNIX 时间戳的整形,返回格式精度由时间戳的位数决定;
- 如果输入是 TIMESTAMP 类型的列,返回格式的时间戳精度与当前 DATABASE 设置的时间精度一致。
#### TO_JSON
```sql
-SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
+TO_JSON(str_literal)
```
**功能说明**: 将字符串常量转换为 JSON 类型。
@@ -459,7 +459,7 @@ SELECT TO_JSON(str_literal) FROM { tb_name | stb_name } [WHERE clause];
#### TO_UNIXTIMESTAMP
```sql
-SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clause];
+TO_UNIXTIMESTAMP(expr)
```
**功能说明**:将日期时间格式的字符串转换成为 UNIX 时间戳。
@@ -487,9 +487,7 @@ SELECT TO_UNIXTIMESTAMP(datetime_string) FROM { tb_name | stb_name } [WHERE clau
#### NOW
```sql
-SELECT NOW() FROM { tb_name | stb_name } [WHERE clause];
-SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior NOW();
-INSERT INTO tb_name VALUES (NOW(), ...);
+NOW()
```
**功能说明**:返回客户端当前系统时间。
@@ -512,7 +510,7 @@ INSERT INTO tb_name VALUES (NOW(), ...);
#### TIMEDIFF
```sql
-SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM { tb_name | stb_name } [WHERE clause];
+TIMEDIFF(expr1, expr2 [, time_unit])
```
**功能说明**:计算两个时间戳之间的差值,并近似到时间单位 time_unit 指定的精度。
@@ -535,7 +533,7 @@ SELECT TIMEDIFF(ts | datetime_string1, ts | datetime_string2 [, time_unit]) FROM
#### TIMETRUNCATE
```sql
-SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name } [WHERE clause];
+TIMETRUNCATE(expr, time_unit)
```
**功能说明**:将时间戳按照指定时间单位 time_unit 进行截断。
@@ -556,7 +554,7 @@ SELECT TIMETRUNCATE(ts | datetime_string , time_unit) FROM { tb_name | stb_name
#### TIMEZONE
```sql
-SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
+TIMEZONE()
```
**功能说明**:返回客户端当前时区信息。
@@ -571,9 +569,7 @@ SELECT TIMEZONE() FROM { tb_name | stb_name } [WHERE clause];
#### TODAY
```sql
-SELECT TODAY() FROM { tb_name | stb_name } [WHERE clause];
-SELECT select_expr FROM { tb_name | stb_name } WHERE ts_col cond_operatior TODAY()];
-INSERT INTO tb_name VALUES (TODAY(), ...);
+TODAY()
```
**功能说明**:返回客户端当日零时的系统时间。
@@ -600,7 +596,12 @@ TDengine 支持针对数据的聚合查询。提供如下聚合函数。
### APERCENTILE
```sql
-SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause]
+APERCENTILE(expr, p [, algo_type])
+
+algo_type: {
+ "default"
+ | "t-digest"
+}
```
**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
@@ -612,13 +613,14 @@ SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHER
**适用于**:表和超级表。
**说明**:
-- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
+- p值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
+- "t-digest"算法的近似结果对于输入数据顺序敏感,对超级表查询时不同的输入排序结果可能会有微小的误差。
### AVG
```sql
-SELECT AVG(field_name) FROM tb_name [WHERE clause];
+AVG(expr)
```
**功能说明**:统计指定字段的平均值。
@@ -633,7 +635,7 @@ SELECT AVG(field_name) FROM tb_name [WHERE clause];
### COUNT
```sql
-SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
+COUNT({* | expr})
```
**功能说明**:统计指定字段的记录行数。
@@ -653,7 +655,7 @@ SELECT COUNT([*|field_name]) FROM tb_name [WHERE clause];
### ELAPSED
```sql
-SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE clause] [INTERVAL(interval [, offset]) [SLIDING sliding]];
+ELAPSED(ts_primary_key [, time_unit])
```
**功能说明**:elapsed函数表达了统计周期内连续的时间长度,和twa函数配合使用可以计算统计曲线下的面积。在通过INTERVAL子句指定窗口的情况下,统计在给定时间范围内的每个窗口内有数据覆盖的时间范围;如果没有INTERVAL子句,则返回整个给定时间范围内的有数据覆盖的时间范围。注意,ELAPSED返回的并不是时间范围的绝对值,而是绝对值除以time_unit所得到的单位个数。
@@ -665,7 +667,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
**适用于**: 表,超级表,嵌套查询的外层查询
**说明**:
-- field_name参数只能是表的第一列,即 TIMESTAMP 类型的主键列。
+- ts_primary_key参数只能是表的第一列,即 TIMESTAMP 类型的主键列。
- 按time_unit参数指定的时间单位返回,最小是数据库的时间分辨率。time_unit 参数未指定时,以数据库的时间分辨率为时间单位。支持的时间单位 time_unit 如下:
1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。
- 可以和interval组合使用,返回每个时间窗口的时间戳差值。需要特别注意的是,除第一个时间窗口和最后一个时间窗口外,中间窗口的时间戳差值均为窗口长度。
@@ -679,14 +681,14 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
### LEASTSQUARES
```sql
-SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause];
+LEASTSQUARES(expr, start_val, step_val)
```
**功能说明**:统计表中某列的值是主键(时间戳)的拟合直线方程。start_val 是自变量初始值,step_val 是自变量的步长值。
**返回数据类型**:字符串表达式(斜率, 截距)。
-**适用数据类型**:field_name 必须是数值类型。
+**适用数据类型**:expr 必须是数值类型。
**适用于**:表。
@@ -694,7 +696,7 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
### SPREAD
```sql
-SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
+SPREAD(expr)
```
**功能说明**:统计表中某列的最大值和最小值之差。
@@ -709,7 +711,7 @@ SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause];
### STDDEV
```sql
-SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
+STDDEV(expr)
```
**功能说明**:统计表中某列的均方差。
@@ -724,7 +726,7 @@ SELECT STDDEV(field_name) FROM tb_name [WHERE clause];
### SUM
```sql
-SELECT SUM(field_name) FROM tb_name [WHERE clause];
+SUM(expr)
```
**功能说明**:统计表/超级表中某列的和。
@@ -739,7 +741,7 @@ SELECT SUM(field_name) FROM tb_name [WHERE clause];
### HYPERLOGLOG
```sql
-SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
+HYPERLOGLOG(expr)
```
**功能说明**:
@@ -756,7 +758,7 @@ SELECT HYPERLOGLOG(field_name) FROM { tb_name | stb_name } [WHERE clause];
### HISTOGRAM
```sql
-SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_name [WHERE clause];
+HISTOGRAM(expr,bin_type, bin_description, normalized)
```
**功能说明**:统计数据按照用户指定区间的分布。
@@ -769,14 +771,14 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
**详细说明**:
- bin_type 用户指定的分桶类型, 有效输入类型为"user_input“, ”linear_bin", "log_bin"。
-- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串):
- - "user_input": "[1, 3, 5, 7]"
+- bin_description 描述如何生成分桶区间,针对三种桶类型,分别为以下描述格式(均为 JSON 格式字符串):
+ - "user_input": "[1, 3, 5, 7]"
用户指定 bin 的具体数值。
-
+
- "linear_bin": "{"start": 0.0, "width": 5.0, "count": 5, "infinity": true}"
"start" 表示数据起始点,"width" 表示每次 bin 偏移量, "count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点,
生成区间为[-inf, 0.0, 5.0, 10.0, 15.0, 20.0, +inf]。
-
+
- "log_bin": "{"start":1.0, "factor": 2.0, "count": 5, "infinity": true}"
"start" 表示数据起始点,"factor" 表示按指数递增的因子,"count" 为 bin 的总数,"infinity" 表示是否添加(-inf, inf)作为区间起点和终点,
生成区间为[-inf, 1.0, 2.0, 4.0, 8.0, 16.0, +inf]。
@@ -786,7 +788,7 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
### PERCENTILE
```sql
-SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
+PERCENTILE(expr, p)
```
**功能说明**:统计表中某列的值百分比分位数。
@@ -807,7 +809,7 @@ SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
### BOTTOM
```sql
-SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
+BOTTOM(expr, k)
```
**功能说明**:统计表/超级表中某列的值最小 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
@@ -827,7 +829,7 @@ SELECT BOTTOM(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
### FIRST
```sql
-SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
+FIRST(expr)
```
**功能说明**:统计表/超级表中某列的值最先写入的非 NULL 值。
@@ -847,7 +849,7 @@ SELECT FIRST(field_name) FROM { tb_name | stb_name } [WHERE clause];
### INTERP
```sql
-SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RANGE(timestamp1,timestamp2) EVERY(interval) FILL({ VALUE | PREV | NULL | LINEAR | NEXT});
+INTERP(expr)
```
**功能说明**:返回指定时间截面指定列的记录值或插值。
@@ -866,11 +868,12 @@ SELECT INTERP(field_name) FROM { tb_name | stb_name } [WHERE where_condition] RA
- INTERP 根据 EVERY 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(EVERY 值)进行插值。如果没有指定 EVERY,则默认窗口大小为无穷大,即从 timestamp1 开始只有一个窗口。
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用。
+- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.1.4版本以后支持)。
### LAST
```sql
-SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
+LAST(expr)
```
**功能说明**:统计表/超级表中某列的值最后写入的非 NULL 值。
@@ -891,7 +894,7 @@ SELECT LAST(field_name) FROM { tb_name | stb_name } [WHERE clause];
### LAST_ROW
```sql
-SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
+LAST_ROW(expr)
```
**功能说明**:返回表/超级表的最后一条记录。
@@ -910,7 +913,7 @@ SELECT LAST_ROW(field_name) FROM { tb_name | stb_name };
### MAX
```sql
-SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
+MAX(expr)
```
**功能说明**:统计表/超级表中某列的值最大值。
@@ -925,7 +928,7 @@ SELECT MAX(field_name) FROM { tb_name | stb_name } [WHERE clause];
### MIN
```sql
-SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
+MIN(expr)
```
**功能说明**:统计表/超级表中某列的值最小值。
@@ -940,7 +943,7 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
### MODE
```sql
-SELECT MODE(field_name) FROM tb_name [WHERE clause];
+MODE(expr)
```
**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出NULL。
@@ -955,7 +958,7 @@ SELECT MODE(field_name) FROM tb_name [WHERE clause];
### SAMPLE
```sql
-SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
+SAMPLE(expr, k)
```
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
@@ -968,7 +971,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
**适用于**:表和超级表。
-**使用说明**:
+**使用说明**:
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
@@ -977,7 +980,7 @@ SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
### TAIL
```sql
-SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
+TAIL(expr, k [, offset_rows])
```
**功能说明**:返回跳过最后 offset_val 个,然后取连续 k 个记录,不忽略 NULL 值。offset_val 可以不输入。此时返回最后的 k 个记录。当有 offset_val 输入的情况下,该函数功能等效于 `order by ts desc LIMIT k OFFSET offset_val`。
@@ -994,7 +997,7 @@ SELECT TAIL(field_name, k, offset_val) FROM {tb_name | stb_name} [WHERE clause];
### TOP
```sql
-SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
+TOP(expr, k)
```
**功能说明**: 统计表/超级表中某列的值最大 _k_ 个非 NULL 值。如果多条数据取值一样,全部取用又会超出 k 条限制时,系统会从相同值中随机选取符合要求的数量返回。
@@ -1014,7 +1017,7 @@ SELECT TOP(field_name, K) FROM { tb_name | stb_name } [WHERE clause];
### UNIQUE
```sql
-SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
+UNIQUE(expr)
```
**功能说明**:返回该列的数值首次出现的值。该函数功能与 distinct 相似,但是可以匹配标签和时间戳信息。可以针对除时间列以外的字段进行查询,可以匹配标签和时间戳,其中的标签和时间戳是第一次出现时刻的标签和时间戳。
@@ -1033,7 +1036,7 @@ SELECT UNIQUE(field_name) FROM {tb_name | stb_name} [WHERE clause];
### CSUM
```sql
-SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
+CSUM(expr)
```
**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。
@@ -1046,17 +1049,22 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
**适用于**:表和超级表。
-**使用说明**:
-
+**使用说明**:
+
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
-- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
+- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### DERIVATIVE
```sql
-SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause];
+DERIVATIVE(expr, time_interval, ignore_negative)
+
+ignore_negative: {
+ 0
+ | 1
+}
```
**功能说明**:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。
@@ -1067,15 +1075,20 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
**适用于**:表和超级表。
-**使用说明**:
-
+**使用说明**:
+
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
### DIFF
```sql
-SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHERE clause];
+DIFF(expr [, ignore_negative])
+
+ignore_negative: {
+ 0
+ | 1
+}
```
**功能说明**:统计表中某列的值与前一行对应值的差。 ignore_negative 取值为 0|1 , 可以不填,默认值为 0. 不忽略负值。ignore_negative 为 1 时表示忽略负数。
@@ -1086,7 +1099,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
**适用于**:表和超级表。
-**使用说明**:
+**使用说明**:
- 输出结果行数是范围内总行数减一,第一行没有结果输出。
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。
@@ -1095,7 +1108,7 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
### IRATE
```sql
-SELECT IRATE(field_name) FROM tb_name WHERE clause;
+IRATE(expr)
```
**功能说明**:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。
@@ -1110,7 +1123,7 @@ SELECT IRATE(field_name) FROM tb_name WHERE clause;
### MAVG
```sql
-SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
+MAVG(expr, k)
```
**功能说明**: 计算连续 k 个值的移动平均数(moving average)。如果输入行数小于 k,则无结果输出。参数 k 的合法输入范围是 1≤ k ≤ 1000。
@@ -1123,9 +1136,9 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
**适用于**:表和超级表。
-**使用说明**:
-
-- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
+**使用说明**:
+
+- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
@@ -1133,7 +1146,7 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
### STATECOUNT
```sql
-SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clause];
+STATECOUNT(expr, oper, val)
```
**功能说明**:返回满足某个条件的连续记录的个数,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加 1,条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。
@@ -1160,14 +1173,14 @@ SELECT STATECOUNT(field_name, oper, val) FROM { tb_name | stb_name } [WHERE clau
### STATEDURATION
```sql
-SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [WHERE clause];
+STATEDURATION(expr, oper, val, unit)
```
**功能说明**:返回满足某个条件的连续记录的时间长度,结果作为新的一列追加在每行后面。条件根据参数计算,如果条件为 true 则加上两个记录之间的时间长度(第一个满足条件的记录时间长度记为 0),条件为 false 则重置为-1,如果数据为 NULL,跳过该条数据。
**参数范围**:
-- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
+- oper : `'LT'` (小于)、`'GT'`(大于)、`'LE'`(小于等于)、`'GE'`(大于等于)、`'NE'`(不等于)、`'EQ'`(等于),不区分大小写,但需要用`''`包括。
- val : 数值型
- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。
@@ -1188,7 +1201,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
### TWA
```sql
-SELECT TWA(field_name) FROM tb_name WHERE clause;
+TWA(expr)
```
**功能说明**:时间加权平均函数。统计表中某列在一段时间内的时间加权平均。
@@ -1232,7 +1245,7 @@ SELECT SERVER_VERSION();
### SERVER_STATUS
```sql
-SELECT SERVER_VERSION();
+SELECT SERVER_STATUS();
```
**说明**:返回服务端当前的状态。
diff --git a/docs/zh/12-taos-sql/12-distinguished.md b/docs/zh/12-taos-sql/12-distinguished.md
index b9e06033d6ae26daef8f30e21f341315d9e5edb2..861ef4ebb7f307f6653c72066b5cae7548c14855 100644
--- a/docs/zh/12-taos-sql/12-distinguished.md
+++ b/docs/zh/12-taos-sql/12-distinguished.md
@@ -1,16 +1,16 @@
---
-sidebar_label: 时序数据特色查询
-title: 时序数据特色查询
+sidebar_label: 特色查询
+title: 特色查询
description: TDengine 提供的时序数据特有的查询功能
---
TDengine 是专为时序数据而研发的大数据平台,存储和计算都针对时序数据的特定进行了量身定制,在支持标准 SQL 的基础之上,还提供了一系列贴合时序业务场景的特色查询语法,极大的方便时序场景的应用开发。
-TDengine 提供的特色查询包括标签切分查询和窗口切分查询。
+TDengine 提供的特色查询包括数据切分查询和窗口切分查询。
-## 标签切分查询
+## 数据切分查询
-超级表查询中,当需要针对标签进行数据切分然后在切分出的数据空间内再进行一系列的计算时使用标签切分子句,标签切分的语句如下:
+当需要按一定的维度对数据进行切分然后在切分出的数据空间内再进行一系列的计算时使用数据切分子句,数据切分语句的语法如下:
```sql
PARTITION BY part_list
@@ -18,22 +18,23 @@ PARTITION BY part_list
part_list 可以是任意的标量表达式,包括列、常量、标量函数和它们的组合。
-当 PARTITION BY 和标签一起使用时,TDengine 按如下方式处理标签切分子句:
+TDengine 按如下方式处理数据切分子句:
-- 标签切分子句位于 WHERE 子句之后,且不能和 JOIN 子句一起使用。
-- 标签切分子句将超级表数据按指定的标签组合进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
-- 标签切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
+- 数据切分子句位于 WHERE 子句之后。
+- 数据切分子句将表数据按指定的维度进行切分,每个切分的分片进行指定的计算。计算由之后的子句定义(窗口子句、GROUP BY 子句或 SELECT 子句)。
+- 数据切分子句可以和窗口切分子句(或 GROUP BY 子句)一起使用,此时后面的子句作用在每个切分的分片上。例如,将数据按标签 location 进行分组,并对每个组按 10 分钟进行降采样,取其最大值。
```sql
select max(current) from meters partition by location interval(10m)
```
+数据切分子句最常见的用法就是在超级表查询中,按标签将子表数据进行切分,然后分别进行计算。特别是 PARTITION BY TBNAME 用法,它将每个子表的数据独立出来,形成一条条独立的时间序列,极大的方便了各种时序场景的统计分析。
## 窗口切分查询
TDengine 支持按时间段窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下:
```sql
-SELECT function_list FROM tb_name
+SELECT select_list FROM tb_name
[WHERE where_condition]
[SESSION(ts_col, tol_val)]
[STATE_WINDOW(col)]
@@ -43,19 +44,15 @@ SELECT function_list FROM tb_name
在上述语法中的具体限制如下
-### 窗口切分查询中使用函数的限制
-
-- 在聚合查询中,function_list 位置允许使用聚合和选择函数,并要求每个函数仅输出单个结果(例如:COUNT、AVG、SUM、STDDEV、LEASTSQUARES、PERCENTILE、MIN、MAX、FIRST、LAST),而不能使用具有多行输出结果的函数(例如:DIFF 以及四则运算)。
-- 此外 LAST_ROW 查询也不能与窗口聚合同时出现。
-- 标量函数(如:CEIL/FLOOR 等)也不能使用在窗口聚合查询中。
-
### 窗口子句的规则
-- 窗口子句位于标签切分子句之后,GROUP BY 子句之前,且不可以和 GROUP BY 子句一起使用。
+- 窗口子句位于数据切分子句之后,不可以和 GROUP BY 子句一起使用。
- 窗口子句将数据按窗口进行切分,对每个窗口进行 SELECT 列表中的表达式的计算,SELECT 列表中的表达式只能包含:
- 常量。
- - 聚集函数。
+ - _wstart伪列、_wend伪列和_wduration伪列。
+ - 聚集函数(包括选择函数和可以由参数确定输出行数的时序特有函数)。
- 包含上面表达式的表达式。
+ - 且至少包含一个聚集函数。
- 窗口子句不可以和 GROUP BY 子句一起使用。
- WHERE 语句可以指定查询的起止时间和其他过滤条件。
@@ -74,7 +71,7 @@ FILL 语句指定某一窗口区间数据缺失的情况下的填充模式。填
1. 使用 FILL 语句的时候可能生成大量的填充输出,务必指定查询的时间区间。针对每次查询,系统可返回不超过 1 千万条具有插值的结果。
2. 在时间维度聚合中,返回的结果中时间序列严格单调递增。
-3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 GROUP BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 GROUP BY 语句分组,则返回结果中每个 GROUP 内不按照时间序列严格单调递增。
+3. 如果查询对象是超级表,则聚合函数会作用于该超级表下满足值过滤条件的所有表的数据。如果查询中没有使用 PARTITION BY 语句,则返回的结果按照时间序列严格单调递增;如果查询中使用了 PARTITION BY 语句分组,则返回结果中每个 PARTITION 内按照时间序列严格单调递增。
:::
@@ -106,7 +103,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
### 状态窗口
-使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。(状态窗口暂不支持对超级表使用)
+使用整数(布尔值)或字符串来标识产生记录时候设备的状态量。产生的记录如果具有相同的状态量数值则归属于同一个状态窗口,数值改变后该窗口关闭。如下图所示,根据状态量确定的状态窗口分别是[2019-04-28 14:22:07,2019-04-28 14:22:10]和[2019-04-28 14:22:11,2019-04-28 14:22:12]两个。

@@ -116,13 +113,19 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
SELECT COUNT(*), FIRST(ts), status FROM temp_tb_1 STATE_WINDOW(status);
```
+仅关心 status 为 2 时的状态窗口的信息。例如:
+
+```
+SELECT * FROM (SELECT COUNT(*) AS cnt, FIRST(ts) AS fst, status FROM temp_tb_1 STATE_WINDOW(status)) t WHERE status = 2;
+```
+
### 会话窗口
会话窗口根据记录的时间戳主键的值来确定是否属于同一个会话。如下图所示,如果设置时间戳的连续的间隔小于等于 12 秒,则以下 6 条记录构成 2 个会话窗口,分别是:[2019-04-28 14:22:10,2019-04-28 14:22:30]和[2019-04-28 14:23:10,2019-04-28 14:23:30]。因为 2019-04-28 14:22:30 与 2019-04-28 14:23:10 之间的时间间隔是 40 秒,超过了连续时间间隔(12 秒)。

-在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。(会话窗口暂不支持对超级表使用)
+在 tol_value 时间间隔范围内的结果都认为归属于同一个窗口,如果连续的两条记录的时间超过 tol_val,则自动开启下一个窗口。
```
diff --git a/docs/zh/12-taos-sql/14-stream.md b/docs/zh/12-taos-sql/14-stream.md
index 70b062a6ca28549347f78f8eea21c54b1e3bcb81..cd726e0a0ea644f575e16c656eeb4bb2cabf425d 100644
--- a/docs/zh/12-taos-sql/14-stream.md
+++ b/docs/zh/12-taos-sql/14-stream.md
@@ -44,7 +44,7 @@ window_clause: {
```sql
CREATE STREAM avg_vol_s INTO avg_vol AS
-SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
+SELECT _wstart, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVAL(1m) SLIDING(30s);
```
## 流式计算的 partition
@@ -58,7 +58,7 @@ SELECT _wstartts, count(*), avg(voltage) FROM meters PARTITION BY tbname INTERVA
## 删除流式计算
```sql
-DROP STREAM [IF NOT EXISTS] stream_name;
+DROP STREAM [IF EXISTS] stream_name;
```
仅删除流式计算任务,由流式计算写入的数据不会被删除。
diff --git a/docs/zh/12-taos-sql/17-json.md b/docs/zh/12-taos-sql/17-json.md
index 4cbd8eef364b1ea4e4285a34bb419a8ab3c7fc1d..18c25cfe230f81bf0b0e421634c1a768ae8e4628 100644
--- a/docs/zh/12-taos-sql/17-json.md
+++ b/docs/zh/12-taos-sql/17-json.md
@@ -1,6 +1,6 @@
---
-sidebar_label: JSON 类型使用说明
-title: JSON 类型使用说明
+sidebar_label: JSON 类型
+title: JSON 类型
description: 对 JSON 类型如何使用的详细说明
---
diff --git a/docs/zh/12-taos-sql/18-escape.md b/docs/zh/12-taos-sql/18-escape.md
index 7e543743a30aeaa125375b14ad8baf49b634d248..5e0d292d396fdb54bd3df553544353a900415283 100644
--- a/docs/zh/12-taos-sql/18-escape.md
+++ b/docs/zh/12-taos-sql/18-escape.md
@@ -1,5 +1,5 @@
---
-title: 转义字符说明
+title: 转义字符
sidebar_label: 转义字符
description: TDengine 中使用转义字符的详细规则
---
diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md
index 473bb29c1cd62770e3efc313417a50901a696550..a9743adddabe96440ffca8c8585787081d29398f 100644
--- a/docs/zh/12-taos-sql/19-limit.md
+++ b/docs/zh/12-taos-sql/19-limit.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 命名与边界限制
-title: 命名与边界限制
+sidebar_label: 命名与边界
+title: 命名与边界
description: 合法字符集和命名中的限制规则
---
@@ -31,7 +31,7 @@ description: 合法字符集和命名中的限制规则
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
- 标签名最大长度为 64
- 最多允许 128 个,至少要有 1 个标签,一个表中标签值的总长度不超过 16KB
-- SQL 语句最大长度 1048576 个字符,也可通过客户端配置参数 maxSQLLength 修改,取值范围 65480 ~ 1048576
+- SQL 语句最大长度 1048576 个字符
- SELECT 语句的查询结果,最多允许返回 4096 列(语句中的函数调用可能也会占用一些列空间),超限时需要显式指定较少的返回数据列,以避免语句执行报错
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制
- 数据库的副本数只能设置为 1 或 3
diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md
index 047c6b08c9646927fc8ec16a2fd390569e4404fb..8013698fced2f31ca21dfa220066b027e71cb856 100644
--- a/docs/zh/12-taos-sql/20-keywords.md
+++ b/docs/zh/12-taos-sql/20-keywords.md
@@ -1,12 +1,14 @@
---
sidebar_label: 保留关键字
-title: TDengine 保留关键字
+title: 保留关键字
description: TDengine 保留关键字的详细列表
---
## 保留关键字
-目前 TDengine 有将近 200 个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable 名、数据列名及标签列名等。这些关键字列表如下:
+目前 TDengine 有 200 多个内部保留关键字,这些关键字如果需要用作库名、表名、超级表名、子表名、数据列名及标签列名等,无论大小写,需要使用符号 `` ` `` 将关键字括起来使用,例如 \`ADD\`。
+
+关键字列表如下:
### A
@@ -15,15 +17,20 @@ description: TDengine 保留关键字的详细列表
- ACCOUNTS
- ADD
- AFTER
+- AGGREGATE
- ALL
- ALTER
+- ANALYZE
- AND
+- APPS
- AS
- ASC
+- AT_ONCE
- ATTACH
### B
+- BALANCE
- BEFORE
- BEGIN
- BETWEEN
@@ -33,19 +40,27 @@ description: TDengine 保留关键字的详细列表
- BITNOT
- BITOR
- BLOCKS
+- BNODE
+- BNODES
- BOOL
+- BUFFER
+- BUFSIZE
- BY
### C
- CACHE
-- CACHELAST
+- CACHEMODEL
+- CACHESIZE
- CASCADE
+- CAST
- CHANGE
+- CLIENT_VERSION
- CLUSTER
- COLON
- COLUMN
- COMMA
+- COMMENT
- COMP
- COMPACT
- CONCAT
@@ -53,15 +68,18 @@ description: TDengine 保留关键字的详细列表
- CONNECTION
- CONNECTIONS
- CONNS
+- CONSUMER
+- CONSUMERS
+- CONTAINS
- COPY
+- COUNT
- CREATE
-- CTIME
+- CURRENT_USER
### D
- DATABASE
- DATABASES
-- DAYS
- DBS
- DEFERRED
- DELETE
@@ -70,18 +88,23 @@ description: TDengine 保留关键字的详细列表
- DESCRIBE
- DETACH
- DISTINCT
+- DISTRIBUTED
- DIVIDE
- DNODE
- DNODES
- DOT
- DOUBLE
- DROP
+- DURATION
### E
+- EACH
+- ENABLE
- END
-- EQ
+- EVERY
- EXISTS
+- EXPIRED
- EXPLAIN
### F
@@ -89,18 +112,20 @@ description: TDengine 保留关键字的详细列表
- FAIL
- FILE
- FILL
+- FIRST
- FLOAT
+- FLUSH
- FOR
- FROM
-- FSYNC
+- FUNCTION
+- FUNCTIONS
### G
-- GE
- GLOB
+- GRANT
- GRANTS
- GROUP
-- GT
### H
@@ -111,15 +136,18 @@ description: TDengine 保留关键字的详细列表
- ID
- IF
- IGNORE
-- IMMEDIA
+- IMMEDIATE
- IMPORT
- IN
-- INITIAL
+- INDEX
+- INDEXES
+- INITIALLY
+- INNER
- INSERT
- INSTEAD
- INT
- INTEGER
-- INTERVA
+- INTERVAL
- INTO
- IS
- ISNULL
@@ -127,6 +155,7 @@ description: TDengine 保留关键字的详细列表
### J
- JOIN
+- JSON
### K
@@ -136,46 +165,57 @@ description: TDengine 保留关键字的详细列表
### L
-- LE
+- LAST
+- LAST_ROW
+- LICENCES
- LIKE
- LIMIT
- LINEAR
- LOCAL
-- LP
-- LSHIFT
-- LT
### M
- MATCH
+- MAX_DELAY
- MAXROWS
+- MERGE
+- META
- MINROWS
- MINUS
+- MNODE
- MNODES
- MODIFY
- MODULES
### N
-- NE
+- NCHAR
+- NEXT
+- NMATCH
- NONE
- NOT
- NOTNULL
- NOW
- NULL
+- NULLS
### O
- OF
- OFFSET
+- ON
- OR
- ORDER
+- OUTPUTTYPE
### P
-- PARTITION
+- PAGES
+- PAGESIZE
+- PARTITIONS
- PASS
- PLUS
+- PORT
- PPS
- PRECISION
- PREV
@@ -183,47 +223,63 @@ description: TDengine 保留关键字的详细列表
### Q
+- QNODE
+- QNODES
- QTIME
-- QUERIE
+- QUERIES
- QUERY
-- QUORUM
### R
- RAISE
-- REM
+- RANGE
+- RATIO
+- READ
+- REDISTRIBUTE
+- RENAME
- REPLACE
- REPLICA
- RESET
-- RESTRIC
+- RESTRICT
+- RETENTIONS
+- REVOKE
+- ROLLUP
- ROW
-- RP
-- RSHIFT
### S
+- SCHEMALESS
- SCORES
- SELECT
- SEMI
+- SERVER_STATUS
+- SERVER_VERSION
- SESSION
- SET
- SHOW
-- SLASH
+- SINGLE_STABLE
- SLIDING
- SLIMIT
-- SMALLIN
+- SMA
+- SMALLINT
+- SNODE
+- SNODES
- SOFFSET
-- STable
-- STableS
+- SPLIT
+- STABLE
+- STABLES
- STAR
- STATE
-- STATEMEN
-- STATE_WI
+- STATE_WINDOW
+- STATEMENT
- STORAGE
- STREAM
- STREAMS
+- STRICT
- STRING
+- SUBSCRIPTIONS
- SYNCDB
+- SYSINFO
### T
@@ -234,19 +290,24 @@ description: TDengine 保留关键字的详细列表
- TBNAME
- TIMES
- TIMESTAMP
+- TIMEZONE
- TINYINT
+- TO
+- TODAY
- TOPIC
- TOPICS
+- TRANSACTION
+- TRANSACTIONS
- TRIGGER
+- TRIM
- TSERIES
+- TTL
### U
-- UMINUS
- UNION
- UNSIGNED
- UPDATE
-- UPLUS
- USE
- USER
- USERS
@@ -254,9 +315,13 @@ description: TDengine 保留关键字的详细列表
### V
+- VALUE
- VALUES
+- VARCHAR
- VARIABLE
- VARIABLES
+- VERBOSE
+- VGROUP
- VGROUPS
- VIEW
- VNODES
@@ -264,14 +329,26 @@ description: TDengine 保留关键字的详细列表
### W
- WAL
+- WAL_FSYNC_PERIOD
+- WAL_LEVEL
+- WAL_RETENTION_PERIOD
+- WAL_RETENTION_SIZE
+- WAL_ROLL_PERIOD
+- WAL_SEGMENT_SIZE
+- WATERMARK
- WHERE
+- WINDOW_CLOSE
+- WITH
+- WRITE
### \_
- \_C0
-- \_QSTART
-- \_QSTOP
+- \_IROWTS
- \_QDURATION
-- \_WSTART
-- \_WSTOP
+- \_QEND
+- \_QSTART
+- \_ROWTS
- \_WDURATION
+- \_WEND
+- \_WSTART
diff --git a/docs/zh/12-taos-sql/22-meta.md b/docs/zh/12-taos-sql/22-meta.md
index e9cda45b0fa85319234572b236c936d907311796..c192d0e5e83ab856a891137c96e9bd778bfc8e1c 100644
--- a/docs/zh/12-taos-sql/22-meta.md
+++ b/docs/zh/12-taos-sql/22-meta.md
@@ -1,6 +1,6 @@
---
sidebar_label: 元数据
-title: 存储元数据的 Information_Schema 数据库
+title: 元数据
description: Information_Schema 数据库中存储了系统中所有的元数据信息
---
@@ -12,7 +12,15 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
4. TDengine 在后续演进中可以灵活的添加已有 INFORMATION_SCHEMA 中表的列,而不用担心对既有业务系统造成影响
5. 与其他数据库系统更具互操作性。例如,Oracle 数据库用户熟悉查询 Oracle 数据字典中的表
-Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
+:::info
+
+- 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们仍然被保留。
+- 系统表中的一些列可能是关键字,在查询时需要使用转义符'\`',例如查询数据库 test 有几个 VGROUP:
+```sql
+ select `vgroups` from ins_databases where name = 'test';
+```
+
+:::
本章将详细介绍 `INFORMATION_SCHEMA` 这个内置元数据库中的表和表结构。
@@ -103,7 +111,11 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们
| 24 | wal_retention_period | INT | WAL 的保存时长 |
| 25 | wal_retention_size | INT | WAL 的保存上限 |
| 26 | wal_roll_period | INT | wal 文件切换时长 |
-| 27 | wal_segment_size | wal 单个文件大小 |
+| 27 | wal_segment_size | BIGINT | wal 单个文件大小 |
+| 28 | stt_trigger | SMALLINT | 触发文件合并的落盘文件的个数 |
+| 29 | table_prefix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度 |
+| 30 | table_suffix | SMALLINT | 内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度 |
+| 31 | tsdb_pagesize | INT | 时序数据存储引擎中的页大小 |
## INS_FUNCTIONS
@@ -246,3 +258,35 @@ Note: 由于 SHOW 语句已经被开发者熟悉和广泛使用,所以它们
| 1 | dnode_id | INT | dnode 的 ID |
| 2 | name | BINARY(32) | 配置项名称 |
| 3 | value | BINARY(64) | 该配置项的值 |
+
+## INS_TOPICS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :---------: | ------------ | ------------------------------ |
+| 1 | topic_name | BINARY(192) | topic 名称 |
+| 2 | db_name | BINARY(64) | topic 相关的 DB |
+| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
+| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
+
+## INS_SUBSCRIPTIONS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :------------: | ------------ | ------------------------ |
+| 1 | topic_name | BINARY(204) | 被订阅的 topic |
+| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
+| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
+| 4 | consumer_id | BIGINT | 消费者的唯一 id |
+
+## INS_STREAMS
+
+| # | **列名** | **数据类型** | **说明** |
+| --- | :----------: | ------------ | --------------------------------------- |
+| 1 | stream_name | BINARY(64) | 流计算名称 |
+| 2 | create_time | TIMESTAMP | 创建时间 |
+| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
+| 4 | status | BIANRY(20) | 流当前状态 |
+| 5 | source_db | BINARY(64) | 源数据库 |
+| 6 | target_db | BIANRY(64) | 目的数据库 |
+| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
+| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
+| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
diff --git a/docs/zh/12-taos-sql/23-perf.md b/docs/zh/12-taos-sql/23-perf.md
index e6ff4960a7c0148eb7b65d06dc37a6355a6e289c..d4ee0e178c02e65eb3f1ceaa73e170893f65cc88 100644
--- a/docs/zh/12-taos-sql/23-perf.md
+++ b/docs/zh/12-taos-sql/23-perf.md
@@ -1,6 +1,6 @@
---
sidebar_label: 统计数据
-title: 存储统计数据的 Performance_Schema 数据库
+title: 统计数据
description: Performance_Schema 数据库中存储了系统中的各种统计信息
---
@@ -62,15 +62,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 12 | sub_status | BINARY(1000) | 子查询状态 |
| 13 | sql | BINARY(1024) | SQL 语句 |
-## PERF_TOPICS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :---------: | ------------ | ------------------------------ |
-| 1 | topic_name | BINARY(192) | topic 名称 |
-| 2 | db_name | BINARY(64) | topic 相关的 DB |
-| 3 | create_time | TIMESTAMP | topic 的 创建时间 |
-| 4 | sql | BINARY(1024) | 创建该 topic 时所用的 SQL 语句 |
-
## PERF_CONSUMERS
| # | **列名** | **数据类型** | **说明** |
@@ -84,15 +75,6 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 7 | subscribe_time | TIMESTAMP | 上一次发起订阅的时间 |
| 8 | rebalance_time | TIMESTAMP | 上一次触发 rebalance 的时间 |
-## PERF_SUBSCRIPTIONS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :------------: | ------------ | ------------------------ |
-| 1 | topic_name | BINARY(204) | 被订阅的 topic |
-| 2 | consumer_group | BINARY(193) | 订阅者的消费者组 |
-| 3 | vgroup_id | INT | 消费者被分配的 vgroup id |
-| 4 | consumer_id | BIGINT | 消费者的唯一 id |
-
## PERF_TRANS
| # | **列名** | **数据类型** | **说明** |
@@ -114,17 +96,3 @@ TDengine 3.0 版本开始提供一个内置数据库 `performance_schema`,其
| 2 | create_time | TIMESTAMP | sma 创建时间 |
| 3 | stable_name | BINARY(192) | sma 所属的超级表名称 |
| 4 | vgroup_id | INT | sma 专属的 vgroup 名称 |
-
-## PERF_STREAMS
-
-| # | **列名** | **数据类型** | **说明** |
-| --- | :----------: | ------------ | --------------------------------------- |
-| 1 | stream_name | BINARY(64) | 流计算名称 |
-| 2 | create_time | TIMESTAMP | 创建时间 |
-| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
-| 4 | status | BIANRY(20) | 流当前状态 |
-| 5 | source_db | BINARY(64) | 源数据库 |
-| 6 | target_db | BIANRY(64) | 目的数据库 |
-| 7 | target_table | BINARY(192) | 流计算写入的目标表 |
-| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算 |
-| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算 |
diff --git a/docs/zh/12-taos-sql/24-show.md b/docs/zh/12-taos-sql/24-show.md
index 14b51fb4c1414a93032c33384750f0334cb12eab..31b7c085a1ba97630223c16e06022ec9dfd9ea50 100644
--- a/docs/zh/12-taos-sql/24-show.md
+++ b/docs/zh/12-taos-sql/24-show.md
@@ -1,21 +1,11 @@
---
sidebar_label: SHOW 命令
-title: 使用 SHOW 命令查看系统元数据
+title: SHOW 命令
description: SHOW 命令的完整列表
---
SHOW 命令可以用来获取简要的系统信息。若想获取系统中详细的各种元数据、系统信息和状态,请使用 select 语句查询 INFORMATION_SCHEMA 数据库中的表。
-## SHOW ACCOUNTS
-
-```sql
-SHOW ACCOUNTS;
-```
-
-显示当前系统中所有租户的信息。
-
-注:企业版独有
-
## SHOW APPS
```sql
@@ -195,7 +185,7 @@ SHOW STREAMS;
SHOW SUBSCRIPTIONS;
```
-显示当前数据库下的所有的订阅关系
+显示当前系统内所有的订阅关系
## SHOW TABLES
diff --git a/docs/zh/12-taos-sql/25-grant.md b/docs/zh/12-taos-sql/25-grant.md
index 6f7024d32eb6514d8025aa7c50b6bd5b1c5603ee..7fb944710125de6fe4d6efcedbb0677b33e1fd0f 100644
--- a/docs/zh/12-taos-sql/25-grant.md
+++ b/docs/zh/12-taos-sql/25-grant.md
@@ -9,14 +9,51 @@ description: 企业版中才具有的权限管理功能
## 创建用户
```sql
-CREATE USER use_name PASS 'password';
+CREATE USER use_name PASS 'password' [SYSINFO {1|0}];
```
创建用户。
-use_name最长为23字节。
+use_name 最长为 23 字节。
-password最长为128字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+password 最长为 128 字节,合法字符包括"a-zA-Z0-9!?$%^&*()_–+={[}]:;@~#|<,>.?/",不可以出现单双引号、撇号、反斜杠和空格,且不可以为空。
+
+SYSINFO 表示用户是否可以查看系统信息。1 表示可以查看,0 表示不可以查看。系统信息包括服务端配置信息、服务端各种节点信息(如 DNODE、QNODE等)、存储相关的信息等。默认为可以查看系统信息。
+
+例如,创建密码为123456且可以查看系统信息的用户test如下:
+
+```sql
+taos> create user test pass '123456' sysinfo 1;
+Query OK, 0 of 0 rows affected (0.001254s)
+```
+
+## 查看用户
+
+```sql
+SHOW USERS;
+```
+
+查看用户信息。
+
+```sql
+taos> show users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001657s)
+```
+
+也可以通过查询INFORMATION_SCHEMA.INS_USERS系统表来查看用户信息,例如:
+
+```sql
+taos> select * from information_schema.ins_users;
+ name | super | enable | sysinfo | create_time |
+================================================================================
+ test | 0 | 1 | 1 | 2022-08-29 15:10:27.315 |
+ root | 1 | 1 | 1 | 2022-08-29 15:03:34.710 |
+Query OK, 2 rows in database (0.001953s)
+```
## 删除用户
@@ -37,9 +74,15 @@ alter_user_clause: {
```
- PASS:修改用户密码。
-- ENABLE:修改用户是否启用。1表示启用此用户,0表示禁用此用户。
-- SYSINFO:修改用户是否可查看系统信息。1表示可以查看系统信息,0表示不可以查看系统信息。
+- ENABLE:修改用户是否启用。1 表示启用此用户,0 表示禁用此用户。
+- SYSINFO:修改用户是否可查看系统信息。1 表示可以查看系统信息,0 表示不可以查看系统信息。
+
+例如,禁用 test 用户:
+```sql
+taos> alter user test enable 0;
+Query OK, 0 of 0 rows affected (0.001160s)
+```
## 授权
@@ -62,7 +105,7 @@ priv_level : {
}
```
-对用户授权。
+对用户授权。授权功能只包含在企业版中。
授权级别支持到DATABASE,权限有READ和WRITE两种。
@@ -92,4 +135,4 @@ priv_level : {
```
-收回对用户的授权。
+收回对用户的授权。授权功能只包含在企业版中。
diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md
index 764fde6e1f2e8aa38b90b4b8bc0131c9eaf44da6..6dc1b6eb5fbe346ae65993e4e290566179b0e6ee 100644
--- a/docs/zh/12-taos-sql/26-udf.md
+++ b/docs/zh/12-taos-sql/26-udf.md
@@ -1,6 +1,6 @@
---
sidebar_label: 自定义函数
-title: 用户自定义函数
+title: 自定义函数
description: 使用 UDF 的详细指南
---
diff --git a/docs/zh/12-taos-sql/27-index.md b/docs/zh/12-taos-sql/27-index.md
index f88c6cf4ffe53ae19926e09c760bedd2997a952d..aa84140296832f79a6498d0da2b5a8f500cd1e90 100644
--- a/docs/zh/12-taos-sql/27-index.md
+++ b/docs/zh/12-taos-sql/27-index.md
@@ -1,6 +1,6 @@
---
sidebar_label: 索引
-title: 使用索引
+title: 索引
description: 索引功能的使用细节
---
diff --git a/docs/zh/12-taos-sql/29-changes.md b/docs/zh/12-taos-sql/29-changes.md
index d653c59a5cd1309fbdcd6ef7e3706e33c4a43dee..7da8e9f331bd1d9c81e9bde441e35b03900422b0 100644
--- a/docs/zh/12-taos-sql/29-changes.md
+++ b/docs/zh/12-taos-sql/29-changes.md
@@ -1,6 +1,6 @@
---
-sidebar_label: 3.0 版本语法变更
-title: 3.0 版本语法变更
+sidebar_label: 语法变更
+title: 语法变更
description: "TDengine 3.0 版本的语法变更说明"
---
@@ -11,12 +11,13 @@ description: "TDengine 3.0 版本的语法变更说明"
| 1 | VARCHAR | 新增 | BINARY类型的别名。
| 2 | TIMESTAMP字面量 | 新增 | 新增支持 TIMESTAMP 'timestamp format' 语法。
| 3 | _ROWTS伪列 | 新增 | 表示时间戳主键。是_C0伪列的别名。
-| 4 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
-| 5 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
-| 6 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
-| 7 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT的各个子句均全面支持符合语法语义的混合运算。
-| 8 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
-| 9 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时,超级表的数据会被合并成一条时间线。
+| 4 | _IROWTS伪列 | 新增 | 用于返回 interp 函数插值结果对应的时间戳列。
+| 5 | INFORMATION_SCHEMA | 新增 | 包含各种SCHEMA定义的系统数据库。
+| 6 | PERFORMANCE_SCHEMA | 新增 | 包含运行信息的系统数据库。
+| 7 | 连续查询 | 废除 | 不再支持连续查询。相关的各种语法和接口废除。
+| 8 | 混合运算 | 增强 | 查询中的混合运算(标量运算和矢量运算混合)全面增强,SELECT的各个子句均全面支持符合语法语义的混合运算。
+| 9 | 标签运算 | 新增 |在查询中,标签列可以像普通列一样参与各种运算,用于各种子句。
+| 10 | 时间线子句和时间函数用于超级表查询 | 增强 |没有PARTITION BY时,超级表的数据会被合并成一条时间线。
## SQL 语句变更
diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md
index 821679551c453b1a3f2937ac5d2409dd733cd593..739d26b2240ddfcf32a269015f5c8915f4854f33 100644
--- a/docs/zh/12-taos-sql/index.md
+++ b/docs/zh/12-taos-sql/index.md
@@ -1,11 +1,11 @@
---
-title: TAOS SQL
-description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容"
+title: TDengine SQL
+description: 'TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容'
---
-本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
+本文档说明 TDengine SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。TDengine 3.0 版本相比 2.x 版本做了大量改进和优化,特别是查询引擎进行了彻底的重构,因此 SQL 语法相比 2.x 版本有很多变更。详细的变更内容请见 [3.0 版本语法变更](/taos-sql/changes) 章节
-TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
+TDengine SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TDengine SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TDengine SQL 语句的最大长度为 1M。TDengine SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。
本章节 SQL 语法遵循如下约定:
diff --git a/docs/zh/14-reference/04-taosadapter.md b/docs/zh/14-reference/04-taosadapter.md
index 71bf5f4223ae97cf2c1153aaea3b8f946e213522..82efcab06fbe8107583e49485770842c77ff4850 100644
--- a/docs/zh/14-reference/04-taosadapter.md
+++ b/docs/zh/14-reference/04-taosadapter.md
@@ -196,7 +196,7 @@ AllowWebSockets
- `u` TDengine 用户名
- `p` TDengine 密码
-注意: 目前不支持 InfluxDB 的 token 验证方式只支持 Basic 验证和查询参数验证。
+注意: 目前不支持 InfluxDB 的 token 验证方式,仅支持 Basic 验证和查询参数验证。
### OpenTSDB
diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md
index f84ec65b4c8574c0812567a65213d7605b306c99..0d6aad62401daf76737caf803461c187189cb76f 100644
--- a/docs/zh/14-reference/05-taosbenchmark.md
+++ b/docs/zh/14-reference/05-taosbenchmark.md
@@ -405,37 +405,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
订阅子表或者普通表的配置参数在 `specified_table_query` 中设置。
-- **threads** : 执行 SQL 的线程数,默认为 1。
-
-- **interval** : 执行订阅的时间间隔,单位为秒,默认为 0。
-
-- **restart** : "yes" 表示开始新的订阅,"no" 表示继续之前的订阅,默认值为 "no"。
-
-- **keepProgress** : "yes" 表示保留订阅进度,"no" 表示不保留,默认值为 "no"。
-
-- **resubAfterConsume** : "yes" 表示取消之前的订阅然后再次订阅, "no" 表示继续之前的订阅,默认值为 "no"。
+- **threads/concurrent** : 执行 SQL 的线程数,默认为 1。
- **sqls** :
- **sql** : 执行的 SQL 命令,必填。
- - **result** : 保存查询结果的文件,未指定则不保存。
-
-#### 订阅超级表的配置参数
-
-订阅超级表的配置参数在 `super_table_query` 中设置。
-
-- **stblname** : 要订阅的超级表名称,必填。
-
-- **threads** : 执行 SQL 的线程数,默认为 1。
-
-- **interval** : 执行订阅的时间间隔,单位为秒,默认为 0。
-
-- **restart** : "yes" 表示开始新的订阅,"no" 表示继续之前的订阅,默认值为 "no"。
-
-- **keepProgress** : "yes" 表示保留订阅进度,"no" 表示不保留,默认值为 "no"。
-
-- **resubAfterConsume** : "yes" 表示取消之前的订阅然后再次订阅, "no" 表示继续之前的订阅,默认值为 "no"。
-
-- **sqls** :
- - **sql** : 执行的 SQL 命令,必填;对于超级表的查询 SQL,在 SQL 命令中保留 "xxxx",程序会自动将其替换为超级表的所有子表名。
- 替换为超级表中所有的子表名。
- - **result** : 保存查询结果的文件,未指定则不保存。
diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/14-reference/11-docker/index.md
index 743fc2d32f82778fb97e7879972cd23db1159c8e..58bbe1e1178fbb1a1aa649508b0e36b331964753 100644
--- a/docs/zh/14-reference/11-docker/index.md
+++ b/docs/zh/14-reference/11-docker/index.md
@@ -32,7 +32,7 @@ taos> show databases;
Query OK, 2 rows in database (0.033802s)
```
-因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 taos shell 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 taos shell 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。
+因为运行在容器中的 TDengine 服务端使用容器的 hostname 建立连接,使用 TDengine CLI 或者各种连接器(例如 JDBC-JNI)从容器外访问容器内的 TDengine 比较复杂,所以上述方式是访问容器中 TDengine 服务的最简单的方法,适用于一些简单场景。如果在一些复杂场景下想要从容器化使用 TDengine CLI 或者各种连接器访问容器中的 TDengine 服务,请参考下一节。
## 在 host 网络上启动 TDengine
@@ -75,7 +75,7 @@ docker run -d \
echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
```
-最后,可以从 taos shell 或者任意连接器以 "tdengine" 为服务端地址访问 TDengine 服务。
+最后,可以从 TDengine CLI 或者任意连接器以 "tdengine" 为服务端地址访问 TDengine 服务。
```shell
taos -h tdengine -P 6030
@@ -119,7 +119,7 @@ taos -h tdengine -P 6030
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -234,7 +234,7 @@ go mod tidy
```dockerfile
FROM golang:1.19.0-buster as builder
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -250,7 +250,7 @@ RUN go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=3.0.0.0
-RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.tdengine.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -354,7 +354,7 @@ test-docker_td-2_1 /tini -- /usr/bin/entrypoi ... Up
test-docker_td-3_1 /tini -- /usr/bin/entrypoi ... Up
```
-4. 用 taos shell 查看 dnodes
+4. 用 TDengine CLI 查看 dnodes
```shell
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 7b31e10572c4a6bafd088e7b7c14853ee0d32df1..6f26878cdd4c558db11465db804e2eef92b143fa 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -177,12 +177,21 @@ taos --dump-config
### maxNumOfDistinctRes
| 属性 | 说明 |
-| -------- | -------------------------------- | --- |
+| -------- | -------------------------------- |
| 适用范围 | 仅服务端适用 |
| 含义 | 允许返回的 distinct 结果最大行数 |
| 取值范围 | 默认值为 10 万,最大值 1 亿 |
| 缺省值 | 10 万 |
+### keepColumnName
+
+| 属性 | 说明 |
+| -------- | -------------------------------- |
+| 适用范围 | 仅客户端适用 |
+| 含义 | Last、First、LastRow 函数查询时,返回的列名是否包含函数名。 |
+| 取值范围 | 0 表示包含函数名,1 表示不包含函数名。 |
+| 缺省值 | 0 |
+
## 区域相关
### timezone
@@ -325,7 +334,7 @@ charset 的有效值是 UTF-8。
| 适用范围 | 仅服务端适用 |
| 含义 | dnode 支持的最大 vnode 数目 |
| 取值范围 | 0-4096 |
-| 缺省值 | 256 |
+| 缺省值 | CPU 核数的 2 倍 |
## 时间相关
@@ -668,153 +677,154 @@ charset 的有效值是 UTF-8。
| 15 | telemetryPort | 否 | 是 |
| 16 | queryPolicy | 否 | 是 |
| 17 | querySmaOptimize | 否 | 是 |
-| 18 | queryBufferSize | 是 | 是 |
-| 19 | maxNumOfDistinctRes | 是 | 是 |
-| 20 | minSlidingTime | 是 | 是 |
-| 21 | minIntervalTime | 是 | 是 |
-| 22 | countAlwaysReturnValue | 是 | 是 |
-| 23 | dataDir | 是 | 是 |
-| 24 | minimalDataDirGB | 是 | 是 |
-| 25 | supportVnodes | 否 | 是 |
-| 26 | tempDir | 是 | 是 |
-| 27 | minimalTmpDirGB | 是 | 是 |
-| 28 | compressMsgSize | 是 | 是 |
-| 29 | compressColData | 是 | 是 |
-| 30 | smlChildTableName | 是 | 是 |
-| 31 | smlTagName | 是 | 是 |
-| 32 | smlDataFormat | 否 | 是 |
-| 33 | statusInterval | 是 | 是 |
-| 34 | shellActivityTimer | 是 | 是 |
-| 35 | transPullupInterval | 否 | 是 |
-| 36 | mqRebalanceInterval | 否 | 是 |
-| 37 | ttlUnit | 否 | 是 |
-| 38 | ttlPushInterval | 否 | 是 |
-| 39 | numOfTaskQueueThreads | 否 | 是 |
-| 40 | numOfRpcThreads | 否 | 是 |
-| 41 | numOfCommitThreads | 是 | 是 |
-| 42 | numOfMnodeReadThreads | 否 | 是 |
-| 43 | numOfVnodeQueryThreads | 否 | 是 |
-| 44 | numOfVnodeStreamThreads | 否 | 是 |
-| 45 | numOfVnodeFetchThreads | 否 | 是 |
-| 46 | numOfVnodeWriteThreads | 否 | 是 |
-| 47 | numOfVnodeSyncThreads | 否 | 是 |
-| 48 | numOfVnodeRsmaThreads | 否 | 是 |
-| 49 | numOfQnodeQueryThreads | 否 | 是 |
-| 50 | numOfQnodeFetchThreads | 否 | 是 |
-| 51 | numOfSnodeSharedThreads | 否 | 是 |
-| 52 | numOfSnodeUniqueThreads | 否 | 是 |
-| 53 | rpcQueueMemoryAllowed | 否 | 是 |
-| 54 | logDir | 是 | 是 |
-| 55 | minimalLogDirGB | 是 | 是 |
-| 56 | numOfLogLines | 是 | 是 |
-| 57 | asyncLog | 是 | 是 |
-| 58 | logKeepDays | 是 | 是 |
-| 59 | debugFlag | 是 | 是 |
-| 60 | tmrDebugFlag | 是 | 是 |
-| 61 | uDebugFlag | 是 | 是 |
-| 62 | rpcDebugFlag | 是 | 是 |
-| 63 | jniDebugFlag | 是 | 是 |
-| 64 | qDebugFlag | 是 | 是 |
-| 65 | cDebugFlag | 是 | 是 |
-| 66 | dDebugFlag | 是 | 是 |
-| 67 | vDebugFlag | 是 | 是 |
-| 68 | mDebugFlag | 是 | 是 |
-| 69 | wDebugFlag | 是 | 是 |
-| 70 | sDebugFlag | 是 | 是 |
-| 71 | tsdbDebugFlag | 是 | 是 |
-| 72 | tqDebugFlag | 否 | 是 |
-| 73 | fsDebugFlag | 是 | 是 |
-| 74 | udfDebugFlag | 否 | 是 |
-| 75 | smaDebugFlag | 否 | 是 |
-| 76 | idxDebugFlag | 否 | 是 |
-| 77 | tdbDebugFlag | 否 | 是 |
-| 78 | metaDebugFlag | 否 | 是 |
-| 79 | timezone | 是 | 是 |
-| 80 | locale | 是 | 是 |
-| 81 | charset | 是 | 是 |
-| 82 | udf | 是 | 是 |
-| 83 | enableCoreFile | 是 | 是 |
-| 84 | arbitrator | 是 | 否 |
-| 85 | numOfThreadsPerCore | 是 | 否 |
-| 86 | numOfMnodes | 是 | 否 |
-| 87 | vnodeBak | 是 | 否 |
-| 88 | balance | 是 | 否 |
-| 89 | balanceInterval | 是 | 否 |
-| 90 | offlineThreshold | 是 | 否 |
-| 91 | role | 是 | 否 |
-| 92 | dnodeNopLoop | 是 | 否 |
-| 93 | keepTimeOffset | 是 | 否 |
-| 94 | rpcTimer | 是 | 否 |
-| 95 | rpcMaxTime | 是 | 否 |
-| 96 | rpcForceTcp | 是 | 否 |
-| 97 | tcpConnTimeout | 是 | 否 |
-| 98 | syncCheckInterval | 是 | 否 |
-| 99 | maxTmrCtrl | 是 | 否 |
-| 100 | monitorReplica | 是 | 否 |
-| 101 | smlTagNullName | 是 | 否 |
-| 102 | keepColumnName | 是 | 否 |
-| 103 | ratioOfQueryCores | 是 | 否 |
-| 104 | maxStreamCompDelay | 是 | 否 |
-| 105 | maxFirstStreamCompDelay | 是 | 否 |
-| 106 | retryStreamCompDelay | 是 | 否 |
-| 107 | streamCompDelayRatio | 是 | 否 |
-| 108 | maxVgroupsPerDb | 是 | 否 |
-| 109 | maxTablesPerVnode | 是 | 否 |
-| 110 | minTablesPerVnode | 是 | 否 |
-| 111 | tableIncStepPerVnode | 是 | 否 |
-| 112 | cache | 是 | 否 |
-| 113 | blocks | 是 | 否 |
-| 114 | days | 是 | 否 |
-| 115 | keep | 是 | 否 |
-| 116 | minRows | 是 | 否 |
-| 117 | maxRows | 是 | 否 |
-| 118 | quorum | 是 | 否 |
-| 119 | comp | 是 | 否 |
-| 120 | walLevel | 是 | 否 |
-| 121 | fsync | 是 | 否 |
-| 122 | replica | 是 | 否 |
-| 123 | partitions | 是 | 否 |
-| 124 | quorum | 是 | 否 |
-| 125 | update | 是 | 否 |
-| 126 | cachelast | 是 | 否 |
-| 127 | maxSQLLength | 是 | 否 |
-| 128 | maxWildCardsLength | 是 | 否 |
-| 129 | maxRegexStringLen | 是 | 否 |
-| 130 | maxNumOfOrderedRes | 是 | 否 |
-| 131 | maxConnections | 是 | 否 |
-| 132 | mnodeEqualVnodeNum | 是 | 否 |
-| 133 | http | 是 | 否 |
-| 134 | httpEnableRecordSql | 是 | 否 |
-| 135 | httpMaxThreads | 是 | 否 |
-| 136 | restfulRowLimit | 是 | 否 |
-| 137 | httpDbNameMandatory | 是 | 否 |
-| 138 | httpKeepAlive | 是 | 否 |
-| 139 | enableRecordSql | 是 | 否 |
-| 140 | maxBinaryDisplayWidth | 是 | 否 |
-| 141 | stream | 是 | 否 |
-| 142 | retrieveBlockingModel | 是 | 否 |
-| 143 | tsdbMetaCompactRatio | 是 | 否 |
-| 144 | defaultJSONStrType | 是 | 否 |
-| 145 | walFlushSize | 是 | 否 |
-| 146 | keepTimeOffset | 是 | 否 |
-| 147 | flowctrl | 是 | 否 |
-| 148 | slaveQuery | 是 | 否 |
-| 149 | adjustMaster | 是 | 否 |
-| 150 | topicBinaryLen | 是 | 否 |
-| 151 | telegrafUseFieldNum | 是 | 否 |
-| 152 | deadLockKillQuery | 是 | 否 |
-| 153 | clientMerge | 是 | 否 |
-| 154 | sdbDebugFlag | 是 | 否 |
-| 155 | odbcDebugFlag | 是 | 否 |
-| 156 | httpDebugFlag | 是 | 否 |
-| 157 | monDebugFlag | 是 | 否 |
-| 158 | cqDebugFlag | 是 | 否 |
-| 159 | shortcutFlag | 是 | 否 |
-| 160 | probeSeconds | 是 | 否 |
-| 161 | probeKillSeconds | 是 | 否 |
-| 162 | probeInterval | 是 | 否 |
-| 163 | lossyColumns | 是 | 否 |
-| 164 | fPrecision | 是 | 否 |
-| 165 | dPrecision | 是 | 否 |
-| 166 | maxRange | 是 | 否 |
-| 167 | range | 是 | 否 |
+| 18 | queryRsmaTolerance | 否 | 是 |
+| 19 | queryBufferSize | 是 | 是 |
+| 20 | maxNumOfDistinctRes | 是 | 是 |
+| 21 | minSlidingTime | 是 | 是 |
+| 22 | minIntervalTime | 是 | 是 |
+| 23 | countAlwaysReturnValue | 是 | 是 |
+| 24 | dataDir | 是 | 是 |
+| 25 | minimalDataDirGB | 是 | 是 |
+| 26 | supportVnodes | 否 | 是 |
+| 27 | tempDir | 是 | 是 |
+| 28 | minimalTmpDirGB | 是 | 是 |
+| 29 | compressMsgSize | 是 | 是 |
+| 30 | compressColData | 是 | 是 |
+| 31 | smlChildTableName | 是 | 是 |
+| 32 | smlTagName | 是 | 是 |
+| 33 | smlDataFormat | 否 | 是 |
+| 34 | statusInterval | 是 | 是 |
+| 35 | shellActivityTimer | 是 | 是 |
+| 36 | transPullupInterval | 否 | 是 |
+| 37 | mqRebalanceInterval | 否 | 是 |
+| 38 | ttlUnit | 否 | 是 |
+| 39 | ttlPushInterval | 否 | 是 |
+| 40 | numOfTaskQueueThreads | 否 | 是 |
+| 41 | numOfRpcThreads | 否 | 是 |
+| 42 | numOfCommitThreads | 是 | 是 |
+| 43 | numOfMnodeReadThreads | 否 | 是 |
+| 44 | numOfVnodeQueryThreads | 否 | 是 |
+| 45 | numOfVnodeStreamThreads | 否 | 是 |
+| 46 | numOfVnodeFetchThreads | 否 | 是 |
+| 47 | numOfVnodeWriteThreads | 否 | 是 |
+| 48 | numOfVnodeSyncThreads | 否 | 是 |
+| 49 | numOfVnodeRsmaThreads | 否 | 是 |
+| 50 | numOfQnodeQueryThreads | 否 | 是 |
+| 51 | numOfQnodeFetchThreads | 否 | 是 |
+| 52 | numOfSnodeSharedThreads | 否 | 是 |
+| 53 | numOfSnodeUniqueThreads | 否 | 是 |
+| 54 | rpcQueueMemoryAllowed | 否 | 是 |
+| 55 | logDir | 是 | 是 |
+| 56 | minimalLogDirGB | 是 | 是 |
+| 57 | numOfLogLines | 是 | 是 |
+| 58 | asyncLog | 是 | 是 |
+| 59 | logKeepDays | 是 | 是 |
+| 60 | debugFlag | 是 | 是 |
+| 61 | tmrDebugFlag | 是 | 是 |
+| 62 | uDebugFlag | 是 | 是 |
+| 63 | rpcDebugFlag | 是 | 是 |
+| 64 | jniDebugFlag | 是 | 是 |
+| 65 | qDebugFlag | 是 | 是 |
+| 66 | cDebugFlag | 是 | 是 |
+| 67 | dDebugFlag | 是 | 是 |
+| 68 | vDebugFlag | 是 | 是 |
+| 69 | mDebugFlag | 是 | 是 |
+| 70 | wDebugFlag | 是 | 是 |
+| 71 | sDebugFlag | 是 | 是 |
+| 72 | tsdbDebugFlag | 是 | 是 |
+| 73 | tqDebugFlag | 否 | 是 |
+| 74 | fsDebugFlag | 是 | 是 |
+| 75 | udfDebugFlag | 否 | 是 |
+| 76 | smaDebugFlag | 否 | 是 |
+| 77 | idxDebugFlag | 否 | 是 |
+| 78 | tdbDebugFlag | 否 | 是 |
+| 79 | metaDebugFlag | 否 | 是 |
+| 80 | timezone | 是 | 是 |
+| 81 | locale | 是 | 是 |
+| 82 | charset | 是 | 是 |
+| 83 | udf | 是 | 是 |
+| 84 | enableCoreFile | 是 | 是 |
+| 85 | arbitrator | 是 | 否 |
+| 86 | numOfThreadsPerCore | 是 | 否 |
+| 87 | numOfMnodes | 是 | 否 |
+| 88 | vnodeBak | 是 | 否 |
+| 89 | balance | 是 | 否 |
+| 90 | balanceInterval | 是 | 否 |
+| 91 | offlineThreshold | 是 | 否 |
+| 92 | role | 是 | 否 |
+| 93 | dnodeNopLoop | 是 | 否 |
+| 94 | keepTimeOffset | 是 | 否 |
+| 95 | rpcTimer | 是 | 否 |
+| 96 | rpcMaxTime | 是 | 否 |
+| 97 | rpcForceTcp | 是 | 否 |
+| 98 | tcpConnTimeout | 是 | 否 |
+| 99 | syncCheckInterval | 是 | 否 |
+| 100 | maxTmrCtrl | 是 | 否 |
+| 101 | monitorReplica | 是 | 否 |
+| 102 | smlTagNullName | 是 | 否 |
+| 103 | keepColumnName | 是 | 否 |
+| 104 | ratioOfQueryCores | 是 | 否 |
+| 105 | maxStreamCompDelay | 是 | 否 |
+| 106 | maxFirstStreamCompDelay | 是 | 否 |
+| 107 | retryStreamCompDelay | 是 | 否 |
+| 108 | streamCompDelayRatio | 是 | 否 |
+| 109 | maxVgroupsPerDb | 是 | 否 |
+| 110 | maxTablesPerVnode | 是 | 否 |
+| 111 | minTablesPerVnode | 是 | 否 |
+| 112 | tableIncStepPerVnode | 是 | 否 |
+| 113 | cache | 是 | 否 |
+| 114 | blocks | 是 | 否 |
+| 115 | days | 是 | 否 |
+| 116 | keep | 是 | 否 |
+| 117 | minRows | 是 | 否 |
+| 118 | maxRows | 是 | 否 |
+| 119 | quorum | 是 | 否 |
+| 120 | comp | 是 | 否 |
+| 121 | walLevel | 是 | 否 |
+| 122 | fsync | 是 | 否 |
+| 123 | replica | 是 | 否 |
+| 124 | partitions | 是 | 否 |
+| 125 | quorum | 是 | 否 |
+| 126 | update | 是 | 否 |
+| 127 | cachelast | 是 | 否 |
+| 128 | maxSQLLength | 是 | 否 |
+| 129 | maxWildCardsLength | 是 | 否 |
+| 130 | maxRegexStringLen | 是 | 否 |
+| 131 | maxNumOfOrderedRes | 是 | 否 |
+| 132 | maxConnections | 是 | 否 |
+| 133 | mnodeEqualVnodeNum | 是 | 否 |
+| 134 | http | 是 | 否 |
+| 135 | httpEnableRecordSql | 是 | 否 |
+| 136 | httpMaxThreads | 是 | 否 |
+| 137 | restfulRowLimit | 是 | 否 |
+| 138 | httpDbNameMandatory | 是 | 否 |
+| 139 | httpKeepAlive | 是 | 否 |
+| 140 | enableRecordSql | 是 | 否 |
+| 141 | maxBinaryDisplayWidth | 是 | 否 |
+| 142 | stream | 是 | 否 |
+| 143 | retrieveBlockingModel | 是 | 否 |
+| 144 | tsdbMetaCompactRatio | 是 | 否 |
+| 145 | defaultJSONStrType | 是 | 否 |
+| 146 | walFlushSize | 是 | 否 |
+| 147 | keepTimeOffset | 是 | 否 |
+| 148 | flowctrl | 是 | 否 |
+| 149 | slaveQuery | 是 | 否 |
+| 150 | adjustMaster | 是 | 否 |
+| 151 | topicBinaryLen | 是 | 否 |
+| 152 | telegrafUseFieldNum | 是 | 否 |
+| 153 | deadLockKillQuery | 是 | 否 |
+| 154 | clientMerge | 是 | 否 |
+| 155 | sdbDebugFlag | 是 | 否 |
+| 156 | odbcDebugFlag | 是 | 否 |
+| 157 | httpDebugFlag | 是 | 否 |
+| 158 | monDebugFlag | 是 | 否 |
+| 159 | cqDebugFlag | 是 | 否 |
+| 160 | shortcutFlag | 是 | 否 |
+| 161 | probeSeconds | 是 | 否 |
+| 162 | probeKillSeconds | 是 | 否 |
+| 163 | probeInterval | 是 | 否 |
+| 164 | lossyColumns | 是 | 否 |
+| 165 | fPrecision | 是 | 否 |
+| 166 | dPrecision | 是 | 否 |
+| 167 | maxRange | 是 | 否 |
+| 168 | range | 是 | 否 |
diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md
index ae4280e26a64e2d10534a0faaf70ca0704cf58a6..07d5b0692bc9ac53d8c46cc0542c9512c2aa536e 100644
--- a/docs/zh/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md
@@ -3,7 +3,7 @@ title: Schemaless 写入
description: 'Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构'
---
-在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
+在物联网应用中,常会采集比较多的数据项,用于实现智能控制、业务分析、设备监控等。由于应用逻辑的版本升级,或者设备自身的硬件调整等原因,数据采集项就有可能比较频繁地出现变动。为了在这种情况下方便地完成数据记录工作,TDengine 提供调用 Schemaless 写入方式,可以免于预先创建超级表/子表的步骤,随着数据写入接口能够自动创建与数据对应的存储结构。并且在必要时,Schemaless
将自动增加必要的数据列,保证用户写入的数据可以被正确存储。
无模式写入方式建立的超级表及其对应的子表与通过 SQL 直接建立的超级表和子表完全没有区别,你也可以通过,SQL 语句直接向其中写入数据。需要注意的是,通过无模式写入方式建立的表,其表名是基于标签值按照固定的映射规则生成,所以无法明确地进行表意,缺乏可读性。
@@ -36,14 +36,14 @@ tag_set 中的所有的数据自动转化为 nchar 数据类型,并不需要
- 对空格、等号(=)、逗号(,)、双引号("),前面需要使用反斜杠(\)进行转义。(都指的是英文半角符号)
- 数值类型将通过后缀来区分数据类型:
-| **序号** | **后缀** | **映射类型** | **大小(字节)** |
-| -------- | -------- | ------------ | -------------- |
-| 1 | 无或 f64 | double | 8 |
-| 2 | f32 | float | 4 |
-| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
-| 4 | i16/u16 | SmallInt/USmallInt | 2 |
-| 5 | i32/u32 | Int/UInt | 4 |
-| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
+| **序号** | **后缀** | **映射类型** | **大小(字节)** |
+| -------- | ----------- | ----------------------------- | -------------- |
+| 1 | 无或 f64 | double | 8 |
+| 2 | f32 | float | 4 |
+| 3 | i8/u8 | TinyInt/UTinyInt | 1 |
+| 4 | i16/u16 | SmallInt/USmallInt | 2 |
+| 5 | i32/u32 | Int/UInt | 4 |
+| 6 | i64/i/u64/u | BigInt/BigInt/UBigInt/UBigInt | 8 |
- t, T, true, True, TRUE, f, F, false, False 将直接作为 BOOL 型来处理。
@@ -67,9 +67,12 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
"measurement,tag_key1=tag_value1,tag_key2=tag_value2"
```
+:::tip
需要注意的是,这里的 tag_key1, tag_key2 并不是用户输入的标签的原始顺序,而是使用了标签名称按照字符串升序排列后的结果。所以,tag_key1 并不是在行协议中输入的第一个标签。
排列完成以后计算该字符串的 MD5 散列值 "md5_val"。然后将计算的结果与字符串组合生成表名:“t_md5_val”。其中的 “t_” 是固定的前缀,每个通过该映射关系自动生成的表都具有该前缀。
-为了让用户可以指定生成的表名,可以通过配置smlChildTableName来指定(比如 配置smlChildTableName=tname 插入数据为st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为cpu1,注意如果多行数据tname相同,但是后面的tag_set不同,则使用第一次自动建表时指定的tag_set,其他的会忽略)。
+:::tip
+为了让用户可以指定生成的表名,可以通过在taos.cfg里配置 smlChildTableName 参数来指定。
+举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1,注意如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。
2. 如果解析行协议获得的超级表不存在,则会创建这个超级表(不建议手动创建超级表,不然插入数据可能异常)。
3. 如果解析行协议获得子表不存在,则 Schemaless 会按照步骤 1 或 2 确定的子表名来创建子表。
@@ -78,11 +81,11 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
NULL。
6. 对 BINARY 或 NCHAR 列,如果数据行中所提供值的长度超出了列类型的限制,自动增加该列允许存储的字符长度上限(只增不减),以保证数据的完整保存。
7. 整个处理过程中遇到的错误会中断写入过程,并返回错误代码。
-8. 为了提高写入的效率,默认假设同一个超级表中field_set的顺序是一样的(第一条数据包含所有的field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数smlDataFormat为false,否则,数据写入按照相同顺序写入,库中数据会异常。
+8. 为了提高写入的效率,默认假设同一个超级表中 field_set 的顺序是一样的(第一条数据包含所有的 field,后面的数据按照这个顺序),如果顺序不一样,需要配置参数 smlDataFormat 为 false,否则,数据写入按照相同顺序写入,库中数据会异常。
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
-16KB。这方面的具体限制约束请参见 [TAOS SQL 边界限制](/taos-sql/limit)
+16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
:::
diff --git a/docs/zh/14-reference/14-taosKeeper.md b/docs/zh/14-reference/14-taosKeeper.md
index f1165c9d0f01b6812c261c6f095f38fca55c44d8..7780dc2fe9a5742ce9d6855599ca080804a493b5 100644
--- a/docs/zh/14-reference/14-taosKeeper.md
+++ b/docs/zh/14-reference/14-taosKeeper.md
@@ -1,7 +1,7 @@
---
sidebar_label: taosKeeper
title: taosKeeper
-description: TDengine taosKeeper 使用说明
+description: TDengine 3.0 版本监控指标的导出工具
---
## 简介
@@ -22,26 +22,36 @@ taosKeeper 安装方式:
### 配置和运行方式
-
-taosKeeper 需要在操作系统终端执行,该工具支持 [配置文件启动](#配置文件启动)。
+taosKeeper 需要在操作系统终端执行,该工具支持三种配置方式:[命令行参数](#命令行参数启动)、[环境变量](#环境变量启动) 和 [配置文件](#配置文件启动)。优先级为:命令行参数、环境变量、配置文件参数。
**在运行 taosKeeper 之前要确保 TDengine 集群与 taosAdapter 已经在正确运行。** 并且 TDengine 已经开启监控服务,具体请参考:[TDengine 监控配置](../config/#监控相关)。
-
+
+### 环境变量启动
+
+通过设置环境变量达到控制启动参数的目的,通常在容器中运行时使用。
+
+```shell
+$ export TAOS_KEEPER_TDENGINE_HOST=192.168.64.3
+
+$ taoskeeper
+```
+
+具体参数列表请参照 `taoskeeper -h` 输入结果。
+
### 配置文件启动
执行以下命令即可快速体验 taosKeeper。当不指定 taosKeeper 配置文件时,优先使用 `/etc/taos/keeper.toml` 配置,否则将使用默认配置。
```shell
-taoskeeper -c
+$ taoskeeper -c
```
**下面是配置文件的示例:**
@@ -69,7 +79,7 @@ password = "taosdata"
# 需要被监控的 taosAdapter
[taosAdapter]
-address = ["127.0.0.1:6041","192.168.1.95:6041"]
+address = ["127.0.0.1:6041"]
[metrics]
# 监控指标前缀
@@ -82,7 +92,7 @@ cluster = "production"
database = "log"
# 指定需要监控的普通表
-tables = ["normal_table"]
+tables = []
```
### 获取监控指标
@@ -110,7 +120,7 @@ Query OK, 1 rows in database (0.036162s)
#### 导出监控指标
```shell
-curl http://127.0.0.1:6043/metrics
+$ curl http://127.0.0.1:6043/metrics
```
部分结果集:
@@ -131,4 +141,4 @@ taos_cluster_info_dnodes_total{cluster_id="5981392874047724755"} 1
# HELP taos_cluster_info_first_ep
# TYPE taos_cluster_info_first_ep gauge
taos_cluster_info_first_ep{cluster_id="5981392874047724755",value="hlb:6030"} 1
-```
\ No newline at end of file
+```
diff --git a/docs/zh/14-reference/_telegraf.mdx b/docs/zh/14-reference/_telegraf.mdx
index bae46d66062ad7d5bfc2487900c02bd7a19ce4f6..3f92e5dde03ef2d59d4e14cb47e82fbd71f290d2 100644
--- a/docs/zh/14-reference/_telegraf.mdx
+++ b/docs/zh/14-reference/_telegraf.mdx
@@ -22,6 +22,5 @@
username = "root"
password = "taosdata"
data_format = "influx"
- influx_max_line_bytes = 250
```
diff --git a/docs/zh/17-operation/01-pkg-install.md b/docs/zh/17-operation/01-pkg-install.md
index 671dc00cee070b4743453727d661f8b086cd0261..6d93c1697b1e0936b3f6539d3b1fb95db0baa956 100644
--- a/docs/zh/17-operation/01-pkg-install.md
+++ b/docs/zh/17-operation/01-pkg-install.md
@@ -47,7 +47,7 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/
-卸载命令如下:
+TDengine 卸载命令如下:
```
$ sudo apt-get remove tdengine
@@ -65,10 +65,26 @@ TDengine is removed successfully!
```
+taosTools 卸载命令如下:
+
+```
+$ sudo apt remove taostools
+Reading package lists... Done
+Building dependency tree
+Reading state information... Done
+The following packages will be REMOVED:
+ taostools
+0 upgraded, 0 newly installed, 1 to remove and 0 not upgraded.
+After this operation, 68.3 MB disk space will be freed.
+Do you want to continue? [Y/n]
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
-卸载命令如下:
+TDengine 卸载命令如下:
```
$ sudo dpkg -r tdengine
@@ -78,28 +94,52 @@ TDengine is removed successfully!
```
+taosTools 卸载命令如下:
+
+```
+$ sudo dpkg -r taostools
+(Reading database ... 147973 files and directories currently installed.)
+Removing taostools (2.1.2) ...
+```
+
-卸载命令如下:
+卸载 TDengine 命令如下:
```
$ sudo rpm -e tdengine
TDengine is removed successfully!
```
+卸载 taosTools 命令如下:
+
+```
+sudo rpm -e taostools
+taosToole is removed successfully!
+```
+
-卸载命令如下:
+卸载 TDengine 命令如下:
```
$ rmtaos
TDengine is removed successfully!
```
+卸载 taosTools 命令如下:
+
+```
+$ rmtaostools
+Start to uninstall taos tools ...
+
+taos tools is uninstalled successfully!
+```
+
在 C:\TDengine 目录下,通过运行 unins000.exe 卸载程序来卸载 TDengine。
diff --git a/docs/zh/17-operation/08-export.md b/docs/zh/17-operation/08-export.md
index ecc3b2f1105b6ce37c19e747e2afc4cfc145f0d4..44247e28bdf5ec48ccd05ab6f7e4d3558cf23103 100644
--- a/docs/zh/17-operation/08-export.md
+++ b/docs/zh/17-operation/08-export.md
@@ -7,7 +7,7 @@ description: 如何导出 TDengine 中的数据
## 按表导出 CSV 文件
-如果用户需要导出一个表或一个 STable 中的数据,可在 taos shell 中运行:
+如果用户需要导出一个表或一个 STable 中的数据,可在 TDengine CLI 中运行:
```sql
select * from >> data.csv;
diff --git a/docs/zh/20-third-party/03-telegraf.md b/docs/zh/20-third-party/03-telegraf.md
index 84883e665a84db89d564314a0e47f9caab04d6ff..71bb1b3885d51055ac98e7bc8fa99ad970c2ce40 100644
--- a/docs/zh/20-third-party/03-telegraf.md
+++ b/docs/zh/20-third-party/03-telegraf.md
@@ -16,6 +16,7 @@ Telegraf 是一款十分流行的指标采集开源软件。在数据采集和
- TDengine 集群已经部署并正常运行
- taosAdapter 已经安装并正常运行。具体细节请参考 [taosAdapter 的使用手册](/reference/taosadapter)
- Telegraf 已经安装。安装 Telegraf 请参考[官方文档](https://docs.influxdata.com/telegraf/v1.22/install/)
+- Telegraf 默认采集系统运行状态数据。通过使能[输入插件](https://docs.influxdata.com/telegraf/v1.22/plugins/)方式可以输出[其他格式](https://docs.influxdata.com/telegraf/v1.24/data_formats/input/)的数据到 Telegraf 再写入到 TDengine中。
## 配置步骤
@@ -32,11 +33,12 @@ sudo systemctl restart telegraf
```
taos> show databases;
- name | created_time | ntables | vgroups | replica | quorum | days | keep | cache(MB) | blocks | minrows | maxrows | wallevel | fsync | comp | cachelast | precision | update | status |
-====================================================================================================================================================================================================================================================================================
- telegraf | 2022-04-20 08:47:53.488 | 22 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ns | 2 | ready |
- log | 2022-04-20 07:19:50.260 | 9 | 1 | 1 | 1 | 10 | 3650 | 16 | 6 | 100 | 4096 | 1 | 3000 | 2 | 0 | ms | 0 | ready |
-Query OK, 2 row(s) in set (0.002401s)
+ name |
+=================================
+ information_schema |
+ performance_schema |
+ telegraf |
+Query OK, 3 rows in database (0.010568s)
taos> use telegraf;
Database changed.
@@ -66,3 +68,11 @@ taos> select * from telegraf.system limit 10;
|
Query OK, 3 row(s) in set (0.013269s)
```
+
+:::note
+
+- TDengine 接收 influxdb 格式数据默认生成的子表名是根据规则生成的唯一 ID 值。
+用户如需指定生成的表名,可以通过在 taos.cfg 里配置 smlChildTableName 参数来指定。如果通过控制输入数据格式,即可利用 TDengine 这个功能指定生成的表名。
+举例如下:配置 smlChildTableName=tname 插入数据为 st,tname=cpu1,t1=4 c1=3 1626006833639000000 则创建的表名为 cpu1。如果多行数据 tname 相同,但是后面的 tag_set 不同,则使用第一行自动建表时指定的 tag_set,其他的行会忽略)。[TDengine 无模式写入参考指南](/reference/schemaless/#无模式写入行协议)
+:::
+
diff --git a/docs/zh/20-third-party/12-google-data-studio.md b/docs/zh/20-third-party/12-google-data-studio.md
new file mode 100644
index 0000000000000000000000000000000000000000..bc06f0ea3261bcd93247e0c7b8e1d6c3628f3121
--- /dev/null
+++ b/docs/zh/20-third-party/12-google-data-studio.md
@@ -0,0 +1,39 @@
+---
+sidebar_label: Google Data Studio
+title: TDengine Google Data Studio Connector
+description: 使用 Google Data Studio 存取 TDengine 数据的详细指南
+---
+
+Google Data Studio 是一个强大的报表可视化工具,它提供了丰富的数据图表和数据连接,可以非常方便地按照既定模板生成报表。因其简便易用和生态丰富而在数据分析领域得到一众数据科学家的青睐。
+
+Data Studio 可以支持多种数据来源,除了诸如 Google Analytics、Google AdWords、Search Console、BigQuery 等 Google 自己的服务之外,用户也可以直接将离线文件上传至 Google Cloud Storage,或是通过连接器来接入其它数据源。
+
+
+
+目前 TDengine 连接器已经发布到 Google Data Studio 应用商店,你可以在 “Connect to Data” 页面下直接搜索 TDengine,将其选作数据源。
+
+
+
+接下来选择 AUTHORIZE 按钮。
+
+
+
+设置允许连接自己的账号到外部服务。
+
+
+
+在接下来的页面选择运行 TDengine REST 服务的 URL,并输入用户名、密码、数据库名称、表名称以及查询时间范围,并点击右上角的 CONNECT 按钮。
+
+
+
+连接成功后,就可以使用 GDS 方便地进行数据处理并创建报表了。
+
+
+
+目前的维度和指标规则是:timestamp 类型的字段和 tag 字段会被连接器定义为维度,而其他类型的字段是指标。用户还可以根据自己的需求创建不同的表。
+
+
+
+
+
+
diff --git a/docs/zh/20-third-party/gds/gds-01.webp b/docs/zh/20-third-party/gds/gds-01.webp
new file mode 100644
index 0000000000000000000000000000000000000000..2e5f9e4ff5db1e37718e2397c9a13a9f0e05602d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-01.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-02.png.webp b/docs/zh/20-third-party/gds/gds-02.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..3b3537f5a488019482f94452e70bd1bd79867ab5
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-02.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-03.png.webp b/docs/zh/20-third-party/gds/gds-03.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..5719436d5b2f21aa861067b966511e4b34d17dce
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-03.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-04.png.webp b/docs/zh/20-third-party/gds/gds-04.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..ddaae5c1a63b6b4db692e12491df55b88dcaadee
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-04.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-05.png.webp b/docs/zh/20-third-party/gds/gds-05.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..9a917678fc7e60f0a739fa1e2b0f4fa010d12708
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-05.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-06.png.webp b/docs/zh/20-third-party/gds/gds-06.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..c76b68d32b5907bd5ba4e4010456f2ca5303448f
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-06.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-07.png.webp b/docs/zh/20-third-party/gds/gds-07.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..1386ae9c4db4f2465dd071afc5a047658b47031c
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-07.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-08.png.webp b/docs/zh/20-third-party/gds/gds-08.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..59dcf8b31df8bde8d4073ee0c7b1c7bdd7bd439d
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-08.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-09.png.webp b/docs/zh/20-third-party/gds/gds-09.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..b94439f211a814f66d41231c9386c57f3ffe8322
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-09.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-10.png.webp b/docs/zh/20-third-party/gds/gds-10.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..a63cad9e9a3d412b1132359506530498fb1a0e57
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-10.png.webp differ
diff --git a/docs/zh/20-third-party/gds/gds-11.png.webp b/docs/zh/20-third-party/gds/gds-11.png.webp
new file mode 100644
index 0000000000000000000000000000000000000000..fc38cd9a29c00afa48238741c33b439f737a7b8f
Binary files /dev/null and b/docs/zh/20-third-party/gds/gds-11.png.webp differ
diff --git a/docs/zh/21-tdinternal/01-arch.md b/docs/zh/21-tdinternal/01-arch.md
index d74366d129d2c8fd69f2e44e1868a382b3b236c0..b128b9d438371f22763b0ccea605e19d2dc9aa7b 100644
--- a/docs/zh/21-tdinternal/01-arch.md
+++ b/docs/zh/21-tdinternal/01-arch.md
@@ -26,7 +26,7 @@ TDengine 分布式架构的逻辑结构图如下:
**管理节点(mnode):** 一个虚拟的逻辑单元,负责所有数据节点运行状态的监控和维护,以及节点之间的负载均衡(图中 M)。同时,管理节点也负责元数据(包括用户、数据库、超级表等)的存储和管理,因此也称为 Meta Node。TDengine 集群中可配置多个(最多不超过 3 个)mnode,它们自动构建成为一个虚拟管理节点组(图中 M1,M2,M3)。mnode 支持多副本,采用 RAFT 一致性协议,保证系统的高可用与高可靠,任何数据更新操作只能在 Leader 上进行。mnode 集群的第一个节点在集群部署时自动完成,其他节点的创建与删除由用户通过 SQL 命令完成。每个 dnode 上至多有一个 mnode,由所属的数据节点的 EP 来唯一标识。每个 dnode 通过内部消息交互自动获取整个集群中所有 mnode 所在的 dnode 的 EP。
-**弹性计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。
+**计算节点(qnode):** 一个虚拟的逻辑单元,运行查询计算任务,也包括基于系统表来实现的 show 命令(图中 Q)。集群中可配置多个 qnode,在整个集群内部共享使用(图中 Q1,Q2,Q3)。qnode 不与具体的 DB 绑定,即一个 qnode 可以同时执行多个 DB 的查询任务。每个 dnode 上至多有一个 qnode,由所属的数据节点的 EP 来唯一标识。客户端通过与 mnode 交互,获取可用的 qnode 列表,当没有可用的 qnode 时,计算任务在 vnode 中执行。当一个查询执行时,依赖执行计划,调度器会安排一个或多个 qnode 来一起执行。qnode 能从 vnode 获取数据,也可以将自己的计算结果发给其他 qnode 做进一步的处理。通过引入独立的计算节点,TDengine 实现了存储和计算分离。
**流计算节点(snode):** 一个虚拟的逻辑单元,只运行流计算任务(图中 S)。集群中可配置多个 snode,在整个集群内部共享使用(图中 S1,S2,S3)。snode 不与具体的 stream 绑定,即一个 snode 可以同时执行多个 stream 的计算任务。每个 dnode 上至多有一个 snode,由所属的数据节点的 EP 来唯一标识。由 mnode 调度可用的 snode 完成流计算任务,当没有可用的 snode 时,流计算任务在 vnode 中执行。
@@ -288,7 +288,7 @@ TDengine 对每个数据采集点单独建表,但在实际应用中经常需
7. vnode 返回本节点的查询计算结果;
8. qnode 完成多节点数据聚合后将最终查询结果返回给客户端;
-由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。
+由于 TDengine 在 vnode 内将标签数据与时序数据分离存储,通过在内存里过滤标签数据,先找到需要参与聚合操作的表的集合,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个 vnode/dnode,聚合计算操作在多个 vnode 里并发进行,又进一步提升了聚合的速度。 对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TDengine SQL。
### 预计算
diff --git a/docs/zh/25-application/01-telegraf.md b/docs/zh/25-application/01-telegraf.md
index 4e9597f96454730ebcdee5adeebf55439923e8e7..6338264d171b9246b2e99d418035e061d1068a4b 100644
--- a/docs/zh/25-application/01-telegraf.md
+++ b/docs/zh/25-application/01-telegraf.md
@@ -61,7 +61,6 @@ IT 运维监测数据通常都是对时间特性比较敏感的数据,例如
username = ""
password = ""
data_format = "influx"
- influx_max_line_bytes = 250
```
然后重启 Telegraf:
diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md
index 2fd9dff80b2e1f580b7d66cf94b34a6781698b24..0a46db4a28862e07dd86e427e320e8b2d1276034 100644
--- a/docs/zh/27-train-faq/01-faq.md
+++ b/docs/zh/27-train-faq/01-faq.md
@@ -116,7 +116,7 @@ charset UTF-8
### 9. 表名显示不全
-由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
+由于 TDengine CLI 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。
### 10. 如何进行数据迁移?
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index e3e146313115fee12e539a161792234c2df671a5..597e98238b7a7f4e379d9cc2af961087c707cabb 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -6,11 +6,19 @@ description: TDengine 发布历史、Release Notes 及下载链接
import Release from "/components/ReleaseV3";
-## 3.0.0.1
+## 3.0.1.3
-
+
-
+
+
+## 3.0.1.1
+
+
+
+## 3.0.1.0
+
+
diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md
index 61129d74e57504286660a178f757cb816b75dbb5..8604885d3c94d8f03772af0a47ae3610f0b9f565 100644
--- a/docs/zh/28-releases/02-tools.md
+++ b/docs/zh/28-releases/02-tools.md
@@ -6,6 +6,18 @@ description: taosTools 的发布历史、Release Notes 和下载链接
import Release from "/components/ReleaseV3";
-## 2.1.2
+## 2.2.3
-
\ No newline at end of file
+
+
+## 2.2.2
+
+
+
+## 2.2.0
+
+
+
+## 2.1.3
+
+
diff --git a/examples/JDBC/JDBCDemo/README-jdbc-windows.md b/examples/JDBC/JDBCDemo/README-jdbc-windows.md
index 17c5c8df00ab8727d1adfe493d3fbbd32891a676..5a781f40f730218286edb9f6a7f184ee79e7a5fc 100644
--- a/examples/JDBC/JDBCDemo/README-jdbc-windows.md
+++ b/examples/JDBC/JDBCDemo/README-jdbc-windows.md
@@ -129,7 +129,7 @@ https://www.taosdata.com/cn/all-downloads/
192.168.236.136 td01
```
-配置完成后,在命令行内使用taos shell连接server端
+配置完成后,在命令行内使用TDengine CLI连接server端
```shell
C:\TDengine>taos -h td01
diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt
index 9d06dbac6dc3ba9d4dcafe6d8316b52e1b3daeca..4a9007acecaa679dc716c5665eea7f0cd1e34dbb 100644
--- a/examples/c/CMakeLists.txt
+++ b/examples/c/CMakeLists.txt
@@ -13,15 +13,9 @@ IF (TD_LINUX)
#TARGET_LINK_LIBRARIES(epoll taos_static trpc tutil pthread lua)
add_executable(tmq "")
- add_executable(tmq_taosx "")
add_executable(stream_demo "")
add_executable(demoapi "")
- target_sources(tmq_taosx
- PRIVATE
- "tmq_taosx.c"
- )
-
target_sources(tmq
PRIVATE
"tmq.c"
@@ -41,10 +35,6 @@ IF (TD_LINUX)
taos_static
)
- target_link_libraries(tmq_taosx
- taos_static
- )
-
target_link_libraries(stream_demo
taos_static
)
@@ -57,10 +47,6 @@ IF (TD_LINUX)
PUBLIC "${TD_SOURCE_DIR}/include/os"
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
- target_include_directories(tmq_taosx
- PUBLIC "${TD_SOURCE_DIR}/include/os"
- PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
- )
target_include_directories(stream_demo
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
@@ -73,7 +59,6 @@ IF (TD_LINUX)
)
SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq)
- SET_TARGET_PROPERTIES(tmq_taosx PROPERTIES OUTPUT_NAME tmq_taosx)
SET_TARGET_PROPERTIES(stream_demo PROPERTIES OUTPUT_NAME stream_demo)
SET_TARGET_PROPERTIES(demoapi PROPERTIES OUTPUT_NAME demoapi)
ENDIF ()
diff --git a/examples/c/tmq_taosx.c b/examples/c/tmq_taosx.c
deleted file mode 100644
index d0def4426905b773db948b0cf6f0d22c8733d5da..0000000000000000000000000000000000000000
--- a/examples/c/tmq_taosx.c
+++ /dev/null
@@ -1,480 +0,0 @@
-/*
- * Copyright (c) 2019 TAOS Data, Inc.
- *
- * This program is free software: you can use, redistribute, and/or modify
- * it under the terms of the GNU Affero General Public License, version 3
- * or later ("AGPL"), as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see .
- */
-
-#include
-#include
-#include
-#include
-#include
-#include "taos.h"
-
-static int running = 1;
-
-static TAOS* use_db(){
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return NULL;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "use db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in use db_taosx, reason:%s\n", taos_errstr(pRes));
- return NULL;
- }
- taos_free_result(pRes);
- return pConn;
-}
-
-static void msg_process(TAOS_RES* msg) {
- /*memset(buf, 0, 1024);*/
- printf("-----------topic-------------: %s\n", tmq_get_topic_name(msg));
- printf("db: %s\n", tmq_get_db_name(msg));
- printf("vg: %d\n", tmq_get_vgroup_id(msg));
- TAOS *pConn = use_db();
- if (tmq_get_res_type(msg) == TMQ_RES_TABLE_META) {
- char* result = tmq_get_json_meta(msg);
- if (result) {
- printf("meta result: %s\n", result);
- }
- tmq_free_json_meta(result);
- }
-
- tmq_raw_data raw = {0};
- tmq_get_raw(msg, &raw);
- int32_t ret = tmq_write_raw(pConn, raw);
- printf("write raw data: %s\n", tmq_err2str(ret));
-
-// else{
-// while(1){
-// int numOfRows = 0;
-// void *pData = NULL;
-// taos_fetch_raw_block(msg, &numOfRows, &pData);
-// if(numOfRows == 0) break;
-// printf("write data: tbname:%s, numOfRows:%d\n", tmq_get_table_name(msg), numOfRows);
-// int ret = taos_write_raw_block(pConn, numOfRows, pData, tmq_get_table_name(msg));
-// printf("write raw data: %s\n", tmq_err2str(ret));
-// }
-// }
-
- taos_close(pConn);
-}
-
-int32_t init_env() {
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- TAOS_RES* pRes = taos_query(pConn, "drop database if exists db_taosx");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists db_taosx vgroups 4");
- if (taos_errno(pRes) != 0) {
- printf("error in create db_taosx, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop database if exists abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in drop db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create database if not exists abc1 vgroups 3");
- if (taos_errno(pRes) != 0) {
- printf("error in create db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct0 values(1626006833600, 1, 2, 'a')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct1 using st1(t1) tags(2000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct2 using st1(t1) tags(NULL)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct1 values(1626006833600, 3, 4, 'b')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists ct3 using st1(t1) tags(3000)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, 'ddd') ct0 values(1626006833602, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add column c4 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 values(1626006833605, 53, 63, 'cffffffffffffffffffffffffffff', 8989898899999) (1626006833609, 51, 62, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into ct3 select * from ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table st1 add tag t2 binary(64)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table ct3 set tag t1=5000");
- if (taos_errno(pRes) != 0) {
- printf("failed to slter child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "delete from abc1 .ct3 where ts < 1626006833606");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table ct3 ct1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop child table ct3, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table st1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table if not exists n1(ts timestamp, c1 int, c2 nchar(4))");
- if (taos_errno(pRes) != 0) {
- printf("failed to create normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 add column c3 bigint");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 modify column c2 nchar(8)");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 rename column c3 cc3");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 comment 'hello'");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "alter table n1 drop column c1");
- if (taos_errno(pRes) != 0) {
- printf("failed to alter normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "insert into n1 values(now, 'eeee', 8989898899999) (now+9s, 'c333', 940)");
- if (taos_errno(pRes) != 0) {
- printf("failed to insert into n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table n1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop normal table n1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt(ts timestamp, i int) tags(t json)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt1 using jt tags('{\"k1\":1, \"k2\":\"hello\"}')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create table jt2 using jt tags('')");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table jt2, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn,
- "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(16)) tags(t1 int, t3 "
- "nchar(8), t4 bool)");
- if (taos_errno(pRes) != 0) {
- printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "drop table st1");
- if (taos_errno(pRes) != 0) {
- printf("failed to drop super table st1, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-int32_t create_topic() {
- printf("create topic\n");
- TAOS_RES* pRes;
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- if (pConn == NULL) {
- return -1;
- }
-
- pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- pRes = taos_query(pConn, "create topic topic_ctb_column with meta as database abc1");
- if (taos_errno(pRes) != 0) {
- printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
- return -1;
- }
- taos_free_result(pRes);
-
- taos_close(pConn);
- return 0;
-}
-
-void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
- printf("commit %d tmq %p param %p\n", code, tmq, param);
-}
-
-tmq_t* build_consumer() {
-#if 0
- TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
- assert(pConn != NULL);
-
- TAOS_RES* pRes = taos_query(pConn, "use abc1");
- if (taos_errno(pRes) != 0) {
- printf("error in use db, reason:%s\n", taos_errstr(pRes));
- }
- taos_free_result(pRes);
-#endif
-
- tmq_conf_t* conf = tmq_conf_new();
- tmq_conf_set(conf, "group.id", "tg2");
- tmq_conf_set(conf, "client.id", "my app 1");
- tmq_conf_set(conf, "td.connect.user", "root");
- tmq_conf_set(conf, "td.connect.pass", "taosdata");
- tmq_conf_set(conf, "msg.with.table.name", "true");
- tmq_conf_set(conf, "enable.auto.commit", "true");
-
- /*tmq_conf_set(conf, "experimental.snapshot.enable", "true");*/
-
- tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
- tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
- assert(tmq);
- tmq_conf_destroy(conf);
- return tmq;
-}
-
-tmq_list_t* build_topic_list() {
- tmq_list_t* topic_list = tmq_list_new();
- tmq_list_append(topic_list, "topic_ctb_column");
- /*tmq_list_append(topic_list, "tmq_test_db_multi_insert_topic");*/
- return topic_list;
-}
-
-void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- printf("subscribe err\n");
- return;
- }
- int32_t cnt = 0;
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, -1);
- if (tmqmessage) {
- cnt++;
- msg_process(tmqmessage);
- /*if (cnt >= 2) break;*/
- /*printf("get data\n");*/
- taos_free_result(tmqmessage);
- /*} else {*/
- /*break;*/
- /*tmq_commit_sync(tmq, NULL);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-void sync_consume_loop(tmq_t* tmq, tmq_list_t* topics) {
- static const int MIN_COMMIT_COUNT = 1;
-
- int msg_count = 0;
- int32_t code;
-
- if ((code = tmq_subscribe(tmq, topics))) {
- fprintf(stderr, "%% Failed to start consuming topics: %s\n", tmq_err2str(code));
- return;
- }
-
- tmq_list_t* subList = NULL;
- tmq_subscription(tmq, &subList);
- char** subTopics = tmq_list_to_c_array(subList);
- int32_t sz = tmq_list_get_size(subList);
- printf("subscribed topics: ");
- for (int32_t i = 0; i < sz; i++) {
- printf("%s, ", subTopics[i]);
- }
- printf("\n");
- tmq_list_destroy(subList);
-
- while (running) {
- TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 1000);
- if (tmqmessage) {
- msg_process(tmqmessage);
- taos_free_result(tmqmessage);
-
- /*tmq_commit_sync(tmq, NULL);*/
- /*if ((++msg_count % MIN_COMMIT_COUNT) == 0) tmq_commit(tmq, NULL, 0);*/
- }
- }
-
- code = tmq_consumer_close(tmq);
- if (code)
- fprintf(stderr, "%% Failed to close consumer: %s\n", tmq_err2str(code));
- else
- fprintf(stderr, "%% Consumer closed\n");
-}
-
-int main(int argc, char* argv[]) {
- printf("env init\n");
- if (init_env() < 0) {
- return -1;
- }
- create_topic();
-
- tmq_t* tmq = build_consumer();
- tmq_list_t* topic_list = build_topic_list();
- basic_consume_loop(tmq, topic_list);
- /*sync_consume_loop(tmq, topic_list);*/
-}
diff --git a/examples/lua/OpenResty/so/luaconnector51.so b/examples/lua/OpenResty/so/luaconnector51.so
index 442de6e39f909e1aeb869988722b84795c048855..168d3a9d2406680ceec3c12f29a8157d19aca2ff 100755
Binary files a/examples/lua/OpenResty/so/luaconnector51.so and b/examples/lua/OpenResty/so/luaconnector51.so differ
diff --git a/examples/lua/README.md b/examples/lua/README.md
index 32d6a4cace9bd0bf66238ff32af1d3ecf0285046..5abf0c1aab9bf7f53daf2b95827169e3aaa23fc8 100644
--- a/examples/lua/README.md
+++ b/examples/lua/README.md
@@ -1,6 +1,12 @@
# TDengine driver connector for Lua
It's a Lua implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. You may need to install Lua5.3 .
+As TDengine is built with lua-enable with default configure, the built-in lua lib conflicts with external lua lib. The following commands require TDengine built with lua-disable.
+To disable built-in lua:
+```
+mkdir debug && cd debug
+cmake .. -DBUILD_LUA=false && cmake --build .
+```
## Lua Dependencies
- Lua:
diff --git a/examples/lua/lua51/lua_connector51.c b/examples/lua/lua51/lua_connector51.c
index 578622bf1fb50f428a4ba44b3b02c4eeed2508b2..8a9051dd0cb3da0fd82e752132d499a5935be758 100644
--- a/examples/lua/lua51/lua_connector51.c
+++ b/examples/lua/lua51/lua_connector51.c
@@ -2,7 +2,7 @@
#include
#include
#include
-#include "../../../../include/client/taos.h"
+#include "taos.h"
#include "lauxlib.h"
#include "lua.h"
#include "lualib.h"
@@ -29,13 +29,13 @@ static int l_connect(lua_State *L){
luaL_checktype(L, 1, LUA_TTABLE);
lua_getfield(L, 1,"host");
- if (lua_isstring(L,-1)){
+ if (lua_isstring(L, -1)){
host = lua_tostring(L, -1);
// printf("host = %s\n", host);
}
lua_getfield(L, 1, "port");
- if (lua_isnumber(L,-1)){
+ if (lua_isnumber(L, -1)){
port = lua_tonumber(L, -1);
//printf("port = %d\n", port);
}
@@ -58,7 +58,7 @@ static int l_connect(lua_State *L){
//printf("password = %s\n", password);
}
- lua_settop(L,0);
+ lua_settop(L, 0);
taos_init();
@@ -102,7 +102,7 @@ static int l_query(lua_State *L){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
- lua_pushstring(L, taos_errstr(taos));
+ lua_pushstring(L, taos_errstr(result));
lua_setfield(L, table_index, "error");
return 1;
@@ -113,7 +113,6 @@ static int l_query(lua_State *L){
int rows = 0;
int num_fields = taos_field_count(result);
const TAOS_FIELD *fields = taos_fetch_fields(result);
- //char temp[256];
const int affectRows = taos_affected_rows(result);
// printf(" affect rows:%d\r\n", affectRows);
@@ -122,12 +121,12 @@ static int l_query(lua_State *L){
lua_pushinteger(L, affectRows);
lua_setfield(L, table_index, "affected");
lua_newtable(L);
-
+
while ((row = taos_fetch_row(result))) {
//printf("row index:%d\n",rows);
rows++;
- lua_pushnumber(L,rows);
+ lua_pushnumber(L, rows);
lua_newtable(L);
for (int i = 0; i < num_fields; ++i) {
@@ -136,17 +135,21 @@ static int l_query(lua_State *L){
}
lua_pushstring(L,fields[i].name);
-
+ int32_t* length = taos_fetch_lengths(result);
switch (fields[i].type) {
+ case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_TINYINT:
lua_pushinteger(L,*((char *)row[i]));
break;
+ case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_SMALLINT:
lua_pushinteger(L,*((short *)row[i]));
break;
+ case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_INT:
lua_pushinteger(L,*((int *)row[i]));
break;
+ case TSDB_DATA_TYPE_UBIGINT:
case TSDB_DATA_TYPE_BIGINT:
lua_pushinteger(L,*((int64_t *)row[i]));
break;
@@ -156,9 +159,11 @@ static int l_query(lua_State *L){
case TSDB_DATA_TYPE_DOUBLE:
lua_pushnumber(L,*((double *)row[i]));
break;
+ case TSDB_DATA_TYPE_JSON:
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
- lua_pushstring(L,(char *)row[i]);
+ //printf("type:%d, max len:%d, current len:%d\n",fields[i].type, fields[i].bytes, length[i]);
+ lua_pushlstring(L,(char *)row[i], length[i]);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
lua_pushinteger(L,*((int64_t *)row[i]));
@@ -166,6 +171,7 @@ static int l_query(lua_State *L){
case TSDB_DATA_TYPE_BOOL:
lua_pushinteger(L,*((char *)row[i]));
break;
+ case TSDB_DATA_TYPE_NULL:
default:
lua_pushnil(L);
break;
@@ -197,7 +203,7 @@ void async_query_callback(void *param, TAOS_RES *result, int code){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
- lua_pushstring(L,"something is wrong");// taos_errstr(taos));
+ lua_pushstring(L, taos_errstr(result));
lua_setfield(L, table_index, "error");
}else{
//printf("success to async query.\n");
@@ -214,9 +220,9 @@ void async_query_callback(void *param, TAOS_RES *result, int code){
static int l_async_query(lua_State *L){
int r = luaL_ref(L, LUA_REGISTRYINDEX);
- TAOS * taos = (TAOS*)lua_topointer(L,1);
- const char * sqlstr = lua_tostring(L,2);
- // int stime = luaL_checknumber(L,3);
+ TAOS * taos = (TAOS*)lua_topointer(L, 1);
+ const char * sqlstr = lua_tostring(L, 2);
+ // int stime = luaL_checknumber(L, 3);
lua_newtable(L);
int table_index = lua_gettop(L);
@@ -224,7 +230,7 @@ static int l_async_query(lua_State *L){
struct async_query_callback_param *p = malloc(sizeof(struct async_query_callback_param));
p->state = L;
p->callback=r;
- // printf("r:%d, L:%d\n",r,L);
+ // printf("r:%d, L:%d\n", r, L);
taos_query_a(taos,sqlstr,async_query_callback,p);
lua_pushnumber(L, 0);
@@ -235,112 +241,6 @@ static int l_async_query(lua_State *L){
return 1;
}
-void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){
- struct cb_param* p = (struct cb_param*) param;
- TAOS_FIELD *fields = taos_fetch_fields(result);
- int numFields = taos_num_fields(result);
-
- // printf("\nnumfields:%d\n", numFields);
- //printf("\n\r-----------------------------------------------------------------------------------\n");
-
- lua_State *L = p->state;
- lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback);
-
- lua_newtable(L);
-
- for (int i = 0; i < numFields; ++i) {
- if (row[i] == NULL) {
- continue;
- }
-
- lua_pushstring(L,fields[i].name);
-
- switch (fields[i].type) {
- case TSDB_DATA_TYPE_TINYINT:
- lua_pushinteger(L,*((char *)row[i]));
- break;
- case TSDB_DATA_TYPE_SMALLINT:
- lua_pushinteger(L,*((short *)row[i]));
- break;
- case TSDB_DATA_TYPE_INT:
- lua_pushinteger(L,*((int *)row[i]));
- break;
- case TSDB_DATA_TYPE_BIGINT:
- lua_pushinteger(L,*((int64_t *)row[i]));
- break;
- case TSDB_DATA_TYPE_FLOAT:
- lua_pushnumber(L,*((float *)row[i]));
- break;
- case TSDB_DATA_TYPE_DOUBLE:
- lua_pushnumber(L,*((double *)row[i]));
- break;
- case TSDB_DATA_TYPE_BINARY:
- case TSDB_DATA_TYPE_NCHAR:
- lua_pushstring(L,(char *)row[i]);
- break;
- case TSDB_DATA_TYPE_TIMESTAMP:
- lua_pushinteger(L,*((int64_t *)row[i]));
- break;
- case TSDB_DATA_TYPE_BOOL:
- lua_pushinteger(L,*((char *)row[i]));
- break;
- default:
- lua_pushnil(L);
- break;
- }
-
- lua_settable(L, -3);
- }
-
- lua_call(L, 1, 0);
-
- // printf("-----------------------------------------------------------------------------------\n\r");
-}
-
-static int l_open_stream(lua_State *L){
- int r = luaL_ref(L, LUA_REGISTRYINDEX);
- TAOS * taos = (TAOS*)lua_topointer(L,1);
- const char * sqlstr = lua_tostring(L,2);
- int stime = luaL_checknumber(L,3);
-
- lua_newtable(L);
- int table_index = lua_gettop(L);
-
- struct cb_param *p = malloc(sizeof(struct cb_param));
- p->state = L;
- p->callback=r;
- // printf("r:%d, L:%d\n",r,L);
- void * s = taos_open_stream(taos,sqlstr,stream_cb,stime,p,NULL);
- if (s == NULL) {
- printf("failed to open stream, reason:%s\n", taos_errstr(taos));
- free(p);
- lua_pushnumber(L, -1);
- lua_setfield(L, table_index, "code");
- lua_pushstring(L, taos_errstr(taos));
- lua_setfield(L, table_index, "error");
- lua_pushlightuserdata(L,NULL);
- lua_setfield(L, table_index, "stream");
- }else{
- // printf("success to open stream\n");
- lua_pushnumber(L, 0);
- lua_setfield(L, table_index, "code");
- lua_pushstring(L, taos_errstr(taos));
- lua_setfield(L, table_index, "error");
- p->stream = s;
- lua_pushlightuserdata(L,p);
- lua_setfield(L, table_index, "stream");//stream has different content in lua and c.
- }
-
- return 1;
-}
-
-static int l_close_stream(lua_State *L){
- //TODO:get stream and free cb_param
- struct cb_param *p = lua_touserdata(L,1);
- taos_close_stream(p->stream);
- free(p);
- return 0;
-}
static int l_close(lua_State *L){
TAOS *taos= (TAOS*)lua_topointer(L,1);
@@ -367,15 +267,13 @@ static const struct luaL_Reg lib[] = {
{"query", l_query},
{"query_a",l_async_query},
{"close", l_close},
- {"open_stream", l_open_stream},
- {"close_stream", l_close_stream},
{NULL, NULL}
};
extern int luaopen_luaconnector51(lua_State* L)
{
// luaL_register(L, "luaconnector51", lib);
- lua_newtable (L);
+ lua_newtable(L);
luaL_setfuncs(L,lib,0);
return 1;
}
diff --git a/examples/lua/lua_connector.c b/examples/lua/lua_connector.c
index 3c13b196b991a6bdf18493c111d37028fcb5de9a..c3d8bcb99548c787953cd27bcbb90b6c63ee5a4a 100644
--- a/examples/lua/lua_connector.c
+++ b/examples/lua/lua_connector.c
@@ -5,7 +5,7 @@
#include
#include
#include
-#include "taos.h"
+#include
struct cb_param{
lua_State* state;
@@ -29,7 +29,7 @@ static int l_connect(lua_State *L){
luaL_checktype(L, 1, LUA_TTABLE);
lua_getfield(L, 1,"host");
- if (lua_isstring(L,-1)){
+ if (lua_isstring(L, -1)){
host = lua_tostring(L, -1);
// printf("host = %s\n", host);
}
@@ -58,8 +58,10 @@ static int l_connect(lua_State *L){
//printf("password = %s\n", password);
}
- lua_settop(L,0);
+ lua_settop(L, 0);
+ taos_init();
+
lua_newtable(L);
int table_index = lua_gettop(L);
@@ -124,7 +126,7 @@ static int l_query(lua_State *L){
//printf("row index:%d\n",rows);
rows++;
- lua_pushnumber(L,rows);
+ lua_pushnumber(L, rows);
lua_newtable(L);
for (int i = 0; i < num_fields; ++i) {
@@ -201,7 +203,7 @@ void async_query_callback(void *param, TAOS_RES *result, int code){
printf("failed, reason:%s\n", taos_errstr(result));
lua_pushinteger(L, -1);
lua_setfield(L, table_index, "code");
- lua_pushstring(L,"something is wrong");// taos_errstr(taos));
+ lua_pushstring(L, taos_errstr(result));
lua_setfield(L, table_index, "error");
}else{
//printf("success to async query.\n");
@@ -218,9 +220,9 @@ void async_query_callback(void *param, TAOS_RES *result, int code){
static int l_async_query(lua_State *L){
int r = luaL_ref(L, LUA_REGISTRYINDEX);
- TAOS * taos = (TAOS*)lua_topointer(L,1);
- const char * sqlstr = lua_tostring(L,2);
- // int stime = luaL_checknumber(L,3);
+ TAOS * taos = (TAOS*)lua_topointer(L, 1);
+ const char * sqlstr = lua_tostring(L, 2);
+ // int stime = luaL_checknumber(L, 3);
lua_newtable(L);
int table_index = lua_gettop(L);
@@ -228,7 +230,7 @@ static int l_async_query(lua_State *L){
struct async_query_callback_param *p = malloc(sizeof(struct async_query_callback_param));
p->state = L;
p->callback=r;
- // printf("r:%d, L:%d\n",r,L);
+ // printf("r:%d, L:%d\n", r, L);
taos_query_a(taos,sqlstr,async_query_callback,p);
lua_pushnumber(L, 0);
diff --git a/examples/lua/test.lua b/examples/lua/test.lua
index 89c0904c6a04ecec79a95cb1f710136e93a4a00b..94415982e7ceaa75498acc8b8bfa523d953ec401 100644
--- a/examples/lua/test.lua
+++ b/examples/lua/test.lua
@@ -9,6 +9,50 @@ local config = {
max_packet_size = 1024 * 1024
}
+function dump(obj)
+ local getIndent, quoteStr, wrapKey, wrapVal, dumpObj
+ getIndent = function(level)
+ return string.rep("\t", level)
+ end
+ quoteStr = function(str)
+ return '"' .. string.gsub(str, '"', '\\"') .. '"'
+ end
+ wrapKey = function(val)
+ if type(val) == "number" then
+ return "[" .. val .. "]"
+ elseif type(val) == "string" then
+ return "[" .. quoteStr(val) .. "]"
+ else
+ return "[" .. tostring(val) .. "]"
+ end
+ end
+ wrapVal = function(val, level)
+ if type(val) == "table" then
+ return dumpObj(val, level)
+ elseif type(val) == "number" then
+ return val
+ elseif type(val) == "string" then
+ return quoteStr(val)
+ else
+ return tostring(val)
+ end
+ end
+ dumpObj = function(obj, level)
+ if type(obj) ~= "table" then
+ return wrapVal(obj)
+ end
+ level = level + 1
+ local tokens = {}
+ tokens[#tokens + 1] = "{"
+ for k, v in pairs(obj) do
+ tokens[#tokens + 1] = getIndent(level) .. wrapKey(k) .. " = " .. wrapVal(v, level) .. ","
+ end
+ tokens[#tokens + 1] = getIndent(level - 1) .. "}"
+ return table.concat(tokens, "\n")
+ end
+ return dumpObj(obj, 0)
+end
+
local conn
local res = driver.connect(config)
if res.code ~=0 then
@@ -37,7 +81,7 @@ else
print("select db--- pass.")
end
-res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))")
+res = driver.query(conn,"create table m1 (ts timestamp, speed int, owner binary(20), mark nchar(30))")
if res.code ~=0 then
print("create table---failed: "..res.error)
return
@@ -45,7 +89,7 @@ else
print("create table--- pass.")
end
-res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')")
+res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001', 0, 'robotspace', '世界人民大团结万岁'), ('2019-09-01 00:00:00.002', 1, 'Hilink', '⾾⾿⿀⿁⿂⿃⿄⿅⿆⿇⿈⿉⿊⿋⿌⿍⿎⿏⿐⿑⿒⿓⿔⿕'),('2019-09-01 00:00:00.003', 2, 'Harmony', '₠₡₢₣₤₥₦₧₨₩₪₫€₭₮₯₰₱₲₳₴₵')")
if res.code ~=0 then
print("insert records failed: "..res.error)
return
@@ -64,21 +108,25 @@ if res.code ~=0 then
return
else
if (#(res.item) == 3) then
- print("select--- pass")
+ print("select--- pass")
+ print(res.item[1].mark)
+ print(res.item[2].mark)
+ print(res.item[3].mark)
+
else
print("select--- failed: expect 3 affected records, actually received "..#(res.item))
end
end
-res = driver.query(conn,"CREATE TABLE thermometer (ts timestamp, degree double) TAGS(location binary(20), type int)")
+res = driver.query(conn,"create table thermometer (ts timestamp, degree double) tags(location binary(20), type int)")
if res.code ~=0 then
print(res.error)
return
else
print("create super table--- pass")
end
-res = driver.query(conn,"CREATE TABLE therm1 USING thermometer TAGS ('beijing', 1)")
+res = driver.query(conn,"create table therm1 using thermometer tags ('beijing', 1)")
if res.code ~=0 then
print(res.error)
return
@@ -86,7 +134,7 @@ else
print("create table--- pass")
end
-res = driver.query(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.001', 20),('2019-09-01 00:00:00.002', 21)")
+res = driver.query(conn,"insert into therm1 values ('2019-09-01 00:00:00.001', 20),('2019-09-01 00:00:00.002', 21)")
if res.code ~=0 then
print(res.error)
@@ -99,14 +147,14 @@ else
end
end
-res = driver.query(conn,"SELECT COUNT(*) count, AVG(degree) AS av, MAX(degree), MIN(degree) FROM thermometer WHERE location='beijing' or location='tianjin' GROUP BY location, type")
+res = driver.query(conn,"select count(*) as cnt, avg(degree) as av, max(degree), min(degree) from thermometer where location='beijing' or location='tianjin' group by location, type")
if res.code ~=0 then
print("select from super table--- failed:"..res.error)
return
else
print("select from super table--- pass")
for i = 1, #(res.item) do
- print("res:"..res.item[i].count)
+ print("res:"..res.item[i].cnt)
end
end
@@ -127,30 +175,19 @@ end
driver.query_a(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.005', 100),('2019-09-01 00:00:00.006', 101),('2019-09-01 00:00:00.007', 102)", async_query_callback)
-
-function stream_callback(t)
- print("------------------------")
- print("continuous query result:")
- for key, value in pairs(t) do
- print("key:"..key..", value:"..value)
- end
-end
-
-local stream
-res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0, stream_callback)
+res = driver.query(conn, "create stream stream_avg_degree into avg_degree as select avg(degree) from thermometer interval(5s) sliding(1s)")
if res.code ~=0 then
- print("open stream--- failed:"..res.error)
+ print("create stream--- failed:"..res.error)
return
else
- print("open stream--- pass")
- stream = res.stream
+ print("create stream--- pass")
end
-print("From now on we start continous insert in an definite (infinite if you want) loop.")
+print("From now on we start continous insertion in an definite loop, please wait for about 10 seconds and check stream table avg_degree for result.")
local loop_index = 0
-while loop_index < 30 do
+while loop_index < 10 do
local t = os.time()*1000
- local v = loop_index
+ local v = math.random(20)
res = driver.query(conn,string.format("INSERT INTO therm1 VALUES (%d, %d)",t,v))
if res.code ~=0 then
@@ -162,7 +199,5 @@ while loop_index < 30 do
os.execute("sleep " .. 1)
loop_index = loop_index + 1
end
-
-driver.close_stream(stream)
-
+driver.query(conn,"DROP STREAM IF EXISTS stream_avg_degree")
driver.close(conn)
diff --git a/examples/nodejs/README-win.md b/examples/nodejs/README-win.md
index 75fec69413af2bb49498118ec7235c9947e2f89e..e496be2f87e3ff0fcc01359f23888734669b0c22 100644
--- a/examples/nodejs/README-win.md
+++ b/examples/nodejs/README-win.md
@@ -35,7 +35,7 @@ Python 2.7.18
下载地址:https://www.taosdata.com/cn/all-downloads/,选择一个合适的windows-client下载(client应该尽量与server端的版本保持一致)
-使用client的taos shell连接server
+使用client的TDengine CLI连接server
```shell
>taos -h node5
diff --git a/include/client/taos.h b/include/client/taos.h
index f260b84f4aaf238badb1de3a6446b639b5681fa9..270b647a77d18fd10e97954e435000799bdd6007 100644
--- a/include/client/taos.h
+++ b/include/client/taos.h
@@ -254,6 +254,7 @@ enum tmq_res_t {
TMQ_RES_INVALID = -1,
TMQ_RES_DATA = 1,
TMQ_RES_TABLE_META = 2,
+ TMQ_RES_METADATA = 3,
};
typedef struct tmq_raw_data {
diff --git a/include/common/systable.h b/include/common/systable.h
index 01c9807627c9e0ead401ffe873a4e7f7f08b8282..882c54de952dc044ed30aa6a1aed66145c0db804 100644
--- a/include/common/systable.h
+++ b/include/common/systable.h
@@ -43,17 +43,17 @@ extern "C" {
#define TSDB_INS_TABLE_VNODES "ins_vnodes"
#define TSDB_INS_TABLE_CONFIGS "ins_configs"
#define TSDB_INS_TABLE_DNODE_VARIABLES "ins_dnode_variables"
+#define TSDB_INS_TABLE_SUBSCRIPTIONS "ins_subscriptions"
+#define TSDB_INS_TABLE_TOPICS "ins_topics"
+#define TSDB_INS_TABLE_STREAMS "ins_streams"
#define TSDB_PERFORMANCE_SCHEMA_DB "performance_schema"
#define TSDB_PERFS_TABLE_SMAS "perf_smas"
#define TSDB_PERFS_TABLE_CONNECTIONS "perf_connections"
#define TSDB_PERFS_TABLE_QUERIES "perf_queries"
-#define TSDB_PERFS_TABLE_TOPICS "perf_topics"
#define TSDB_PERFS_TABLE_CONSUMERS "perf_consumers"
-#define TSDB_PERFS_TABLE_SUBSCRIPTIONS "perf_subscriptions"
#define TSDB_PERFS_TABLE_OFFSETS "perf_offsets"
#define TSDB_PERFS_TABLE_TRANS "perf_trans"
-#define TSDB_PERFS_TABLE_STREAMS "perf_streams"
#define TSDB_PERFS_TABLE_APPS "perf_apps"
typedef struct SSysDbTableSchema {
diff --git a/include/common/taosdef.h b/include/common/taosdef.h
index 9bfee56e2974832593578c9c2b1c984373763088..bf4de9d4ded1d0955bef05b1e3000be0bf34d8aa 100644
--- a/include/common/taosdef.h
+++ b/include/common/taosdef.h
@@ -65,13 +65,6 @@ typedef enum {
TSDB_STATIS_NONE = 1, // statis part not exist
} ETsdbStatisStatus;
-typedef enum {
- TSDB_SMA_STAT_UNKNOWN = -1, // unknown
- TSDB_SMA_STAT_OK = 0, // ready to provide service
- TSDB_SMA_STAT_EXPIRED = 1, // not ready or expired
- TSDB_SMA_STAT_DROPPED = 2, // sma dropped
-} ETsdbSmaStat; // bit operation
-
typedef enum {
TSDB_SMA_TYPE_BLOCK = 0, // Block-wise SMA
TSDB_SMA_TYPE_TIME_RANGE = 1, // Time-range-wise SMA
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index a071516fbfc2e647e78ae00c57842ac92253dea6..2add3332ab28320f52c666141470f30e40b11fe8 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -45,8 +45,8 @@ enum {
// clang-format on
typedef struct {
- TSKEY ts;
uint64_t groupId;
+ TSKEY ts;
} SWinKey;
static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
@@ -68,11 +68,43 @@ static inline int SWinKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, i
return 0;
}
+typedef struct {
+ uint64_t groupId;
+ TSKEY ts;
+ int32_t exprIdx;
+} STupleKey;
+
+static inline int STupleKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
+ STupleKey* pTuple1 = (STupleKey*)pKey1;
+ STupleKey* pTuple2 = (STupleKey*)pKey2;
+
+ if (pTuple1->groupId > pTuple2->groupId) {
+ return 1;
+ } else if (pTuple1->groupId < pTuple2->groupId) {
+ return -1;
+ }
+
+ if (pTuple1->ts > pTuple2->ts) {
+ return 1;
+ } else if (pTuple1->ts < pTuple2->ts) {
+ return -1;
+ }
+
+ if (pTuple1->exprIdx > pTuple2->exprIdx) {
+ return 1;
+ } else if (pTuple1->exprIdx < pTuple2->exprIdx) {
+ return -1;
+ }
+
+ return 0;
+}
+
enum {
TMQ_MSG_TYPE__DUMMY = 0,
TMQ_MSG_TYPE__POLL_RSP,
TMQ_MSG_TYPE__POLL_META_RSP,
TMQ_MSG_TYPE__EP_RSP,
+ TMQ_MSG_TYPE__TAOSX_RSP,
TMQ_MSG_TYPE__END_RSP,
};
@@ -84,6 +116,7 @@ enum {
STREAM_INPUT__DATA_RETRIEVE,
STREAM_INPUT__GET_RES,
STREAM_INPUT__CHECKPOINT,
+ STREAM_INPUT__REF_DATA_BLOCK,
STREAM_INPUT__DESTROY,
};
@@ -129,7 +162,6 @@ typedef struct SDataBlockInfo {
uint32_t capacity;
// TODO: optimize and remove following
int64_t version; // used for stream, and need serialization
- int64_t ts; // used for stream, and need serialization
int32_t childId; // used for stream, do not serialize
EStreamType type; // used for stream, do not serialize
STimeWindow calWin; // used for stream, do not serialize
@@ -145,6 +177,7 @@ typedef struct SSDataBlock {
enum {
FETCH_TYPE__DATA = 1,
FETCH_TYPE__META,
+ FETCH_TYPE__SEP,
FETCH_TYPE__NONE,
};
diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h
index 410fa02ded3c16bd0e1fd2c669b5c8c46a7e1801..73d043b2d0ac680d69b517d042b02dfa71167435 100644
--- a/include/common/tdatablock.h
+++ b/include/common/tdatablock.h
@@ -184,7 +184,8 @@ static FORCE_INLINE void colDataAppendDouble(SColumnInfoData* pColumnInfoData, u
int32_t getJsonValueLen(const char* data);
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, bool isNull);
-int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows);
+int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
+ uint32_t numOfRows);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2);
int32_t colDataAssign(SColumnInfoData* pColumnInfoData, const SColumnInfoData* pSource, int32_t numOfRows,
@@ -225,15 +226,16 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize);
int32_t blockDataTrimFirstNRows(SSDataBlock* pBlock, size_t n);
int32_t blockDataKeepFirstNRows(SSDataBlock* pBlock, size_t n);
-int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
-int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
+int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src);
+int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src);
SSDataBlock* createDataBlock();
void* blockDataDestroy(SSDataBlock* pBlock);
void blockDataFreeRes(SSDataBlock* pBlock);
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData);
+SSDataBlock* createSpecialDataBlock(EStreamType type);
-int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
+int32_t blockDataAppendColInfo(SSDataBlock* pBlock, SColumnInfoData* pColInfoData);
SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId);
SColumnInfoData* bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index);
@@ -249,7 +251,6 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf);
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid);
-
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h
index af7c88acded2e151ff730ccb1ade5fdf15f9862a..ac94b0af7d8c2118e65a94f9b06e84c7b5ed4b48 100644
--- a/include/common/tdataformat.h
+++ b/include/common/tdataformat.h
@@ -36,8 +36,13 @@ typedef struct STSRow2 STSRow2;
typedef struct STSRowBuilder STSRowBuilder;
typedef struct STagVal STagVal;
typedef struct STag STag;
+typedef struct SColData SColData;
-// bitmap
+#define HAS_NONE ((uint8_t)0x1)
+#define HAS_NULL ((uint8_t)0x2)
+#define HAS_VALUE ((uint8_t)0x4)
+
+// bitmap ================================
const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
{0b00000000, 0b00000100, 0b00001000, 2},
{0b00000000, 0b00010000, 0b00100000, 4},
@@ -51,21 +56,29 @@ const static uint8_t BIT2_MAP[4][4] = {{0b00000000, 0b00000001, 0b00000010, 0},
#define SET_BIT2(p, i, v) ((p)[(i) >> 2] = (p)[(i) >> 2] & N1(BIT2_MAP[(i)&3][3]) | BIT2_MAP[(i)&3][(v)])
#define GET_BIT2(p, i) (((p)[(i) >> 2] >> BIT2_MAP[(i)&3][3]) & ((uint8_t)3))
-// STSchema
+// STSchema ================================
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t nCols, STSchema **ppTSchema);
void tTSchemaDestroy(STSchema *pTSchema);
-// SValue
+// SValue ================================
int32_t tPutValue(uint8_t *p, SValue *pValue, int8_t type);
int32_t tGetValue(uint8_t *p, SValue *pValue, int8_t type);
int tValueCmprFn(const SValue *pValue1, const SValue *pValue2, int8_t type);
-// SColVal
-#define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNone = 1})
-#define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .isNull = 1})
+// SColVal ================================
+#define CV_FLAG_VALUE ((int8_t)0x0)
+#define CV_FLAG_NONE ((int8_t)0x1)
+#define CV_FLAG_NULL ((int8_t)0x2)
+
+#define COL_VAL_NONE(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .flag = CV_FLAG_NONE})
+#define COL_VAL_NULL(CID, TYPE) ((SColVal){.cid = (CID), .type = (TYPE), .flag = CV_FLAG_NULL})
#define COL_VAL_VALUE(CID, TYPE, V) ((SColVal){.cid = (CID), .type = (TYPE), .value = (V)})
-// STSRow2
+#define COL_VAL_IS_NONE(CV) ((CV)->flag == CV_FLAG_NONE)
+#define COL_VAL_IS_NULL(CV) ((CV)->flag == CV_FLAG_NULL)
+#define COL_VAL_IS_VALUE(CV) ((CV)->flag == CV_FLAG_VALUE)
+
+// STSRow2 ================================
#define TSROW_LEN(PROW, V) tGetI32v((uint8_t *)(PROW)->data, (V) ? &(V) : NULL)
#define TSROW_SVER(PROW, V) tGetI32v((PROW)->data + TSROW_LEN(PROW, NULL), (V) ? &(V) : NULL)
@@ -77,7 +90,7 @@ int32_t tTSRowToArray(STSRow2 *pRow, STSchema *pTSchema, SArray **ppArray);
int32_t tPutTSRow(uint8_t *p, STSRow2 *pRow);
int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow);
-// STSRowBuilder
+// STSRowBuilder ================================
#define tsRowBuilderInit() ((STSRowBuilder){0})
#define tsRowBuilderClear(B) \
do { \
@@ -86,7 +99,7 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow);
} \
} while (0)
-// STag
+// STag ================================
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag);
void tTagFree(STag *pTag);
bool tTagIsJson(const void *pTag);
@@ -96,10 +109,20 @@ char *tTagValToData(const STagVal *pTagVal, bool isJson);
int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);
int32_t tDecodeTag(SDecoder *pDecoder, STag **ppTag);
int32_t tTagToValArray(const STag *pTag, SArray **ppArray);
+void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid);
void debugPrintSTag(STag *pTag, const char *tag, int32_t ln); // TODO: remove
int32_t parseJsontoTagData(const char *json, SArray *pTagVals, STag **ppTag, void *pMsgBuf);
-// STRUCT =================
+// SColData ================================
+void tColDataDestroy(void *ph);
+void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn);
+void tColDataClear(SColData *pColData);
+int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
+void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal);
+uint8_t tColDataGetBitValue(SColData *pColData, int32_t iVal);
+int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
+
+// STRUCT ================================
struct STColumn {
col_id_t colId;
int8_t type;
@@ -160,11 +183,22 @@ struct SValue {
struct SColVal {
int16_t cid;
int8_t type;
- int8_t isNone;
- int8_t isNull;
+ int8_t flag;
SValue value;
};
+struct SColData {
+ int16_t cid;
+ int8_t type;
+ int8_t smaOn;
+ int32_t nVal;
+ uint8_t flag;
+ uint8_t *pBitMap;
+ int32_t *aOffset;
+ int32_t nData;
+ uint8_t *pData;
+};
+
#pragma pack(push, 1)
struct STagVal {
// char colName[TSDB_COL_NAME_LEN]; // only used for tmq_get_meta
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index 03e15ed8e705c598c444847c94741d0c6b56fdfe..bd5e74387edc4292e3a019b817a8d70f266e081d 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -89,11 +89,16 @@ extern uint16_t tsTelemPort;
// query buffer management
extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing
-extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
+extern int64_t tsQueryBufferSizeBytes; // maximum allowed usage buffer size in byte for each data node
// query client
extern int32_t tsQueryPolicy;
extern int32_t tsQuerySmaOptimize;
+extern int32_t tsQueryRsmaTolerance;
+extern bool tsQueryPlannerTrace;
+extern int32_t tsQueryNodeChunkSize;
+extern bool tsQueryUseNodeAllocator;
+extern bool tsKeepColumnName;
// client
extern int32_t tsMinSlidingTime;
@@ -119,6 +124,7 @@ extern SDiskCfg tsDiskCfg[];
// udf
extern bool tsStartUdfd;
+extern char tsUdfdResFuncs[];
// schemaless
extern char tsSmlChildTableName[];
@@ -144,10 +150,10 @@ void taosCfgDynamicOptions(const char *option, const char *value);
struct SConfig *taosGetCfg();
-void taosSetAllDebugFlag(int32_t flag);
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal);
+void taosSetAllDebugFlag(int32_t flag, bool rewrite);
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite);
int32_t taosSetCfg(SConfig *pCfg, char *name);
-void taosLocalCfgForbiddenToChange(char* name, bool* forbidden);
+void taosLocalCfgForbiddenToChange(char *name, bool *forbidden);
#ifdef __cplusplus
}
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 47bd0d0b029e48ea3e6b64107cc0b90291be63a5..7aec00c7c12177b867e2f16576cc96301f9dc973 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -55,11 +55,10 @@ extern int32_t tMsgDict[];
#define TMSG_SEG_CODE(TYPE) (((TYPE)&0xff00) >> 8)
#define TMSG_SEG_SEQ(TYPE) ((TYPE)&0xff)
-#define TMSG_INFO(TYPE) \
- ((TYPE) >= 0 && ((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || \
- (TYPE) < TDMT_SCH_MAX_MSG || (TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || \
- (TYPE) < TDMT_SYNC_MAX_MSG)) \
- ? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
+#define TMSG_INFO(TYPE) \
+ ((TYPE) < TDMT_DND_MAX_MSG || (TYPE) < TDMT_MND_MAX_MSG || (TYPE) < TDMT_VND_MAX_MSG || (TYPE) < TDMT_SCH_MAX_MSG || \
+ (TYPE) < TDMT_STREAM_MAX_MSG || (TYPE) < TDMT_MON_MAX_MSG || (TYPE) < TDMT_SYNC_MAX_MSG) \
+ ? tMsgInfo[tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE)] \
: 0
#define TMSG_INDEX(TYPE) (tMsgDict[TMSG_SEG_CODE(TYPE)] + TMSG_SEG_SEQ(TYPE))
@@ -276,7 +275,6 @@ struct SSchema {
char name[TSDB_COL_NAME_LEN];
};
-
typedef struct {
char tbName[TSDB_TABLE_NAME_LEN];
char stbName[TSDB_TABLE_NAME_LEN];
@@ -295,17 +293,15 @@ typedef struct {
SSchema* pSchemas;
} STableMetaRsp;
-
-
typedef struct {
- int32_t code;
- int8_t hashMeta;
- int64_t uid;
- char* tblFName;
- int32_t numOfRows;
- int32_t affectedRows;
- int64_t sver;
- STableMetaRsp* pMeta;
+ int32_t code;
+ int8_t hashMeta;
+ int64_t uid;
+ char* tblFName;
+ int32_t numOfRows;
+ int32_t affectedRows;
+ int64_t sver;
+ STableMetaRsp* pMeta;
} SSubmitBlkRsp;
typedef struct {
@@ -320,7 +316,7 @@ typedef struct {
int32_t tEncodeSSubmitRsp(SEncoder* pEncoder, const SSubmitRsp* pRsp);
int32_t tDecodeSSubmitRsp(SDecoder* pDecoder, SSubmitRsp* pRsp);
-void tFreeSSubmitBlkRsp(void* param);
+void tFreeSSubmitBlkRsp(void* param);
void tFreeSSubmitRsp(SSubmitRsp* pRsp);
#define COL_SMA_ON ((int8_t)0x1)
@@ -787,6 +783,10 @@ typedef struct {
int64_t walRetentionSize;
int32_t walRollPeriod;
int64_t walSegmentSize;
+ int32_t sstTrigger;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
+ int32_t tsdbPageSize;
} SCreateDbReq;
int32_t tSerializeSCreateDbReq(void* buf, int32_t bufLen, SCreateDbReq* pReq);
@@ -808,6 +808,7 @@ typedef struct {
int8_t strict;
int8_t cacheLast;
int8_t replications;
+ int32_t sstTrigger;
} SAlterDbReq;
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);
@@ -844,6 +845,8 @@ typedef struct {
int64_t uid;
int32_t vgVersion;
int32_t vgNum;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
int8_t hashMethod;
SArray* pVgroupInfos; // Array of SVgroupInfo
} SUseDbRsp;
@@ -862,7 +865,8 @@ int32_t tSerializeSDbCfgReq(void* buf, int32_t bufLen, SDbCfgReq* pReq);
int32_t tDeserializeSDbCfgReq(void* buf, int32_t bufLen, SDbCfgReq* pReq);
typedef struct {
- char db[TSDB_DB_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
+ int32_t maxSpeed;
} STrimDbReq;
int32_t tSerializeSTrimDbReq(void* buf, int32_t bufLen, STrimDbReq* pReq);
@@ -1069,6 +1073,7 @@ typedef struct {
typedef struct {
int32_t vgId;
int32_t syncState;
+ int64_t cacheUsage;
int64_t numOfTables;
int64_t numOfTimeSeries;
int64_t totalStorage;
@@ -1193,6 +1198,10 @@ typedef struct {
int64_t walRetentionSize;
int32_t walRollPeriod;
int64_t walSegmentSize;
+ int16_t sstTrigger;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
+ int32_t tsdbPageSize;
} SCreateVnodeReq;
int32_t tSerializeSCreateVnodeReq(void* buf, int32_t bufLen, SCreateVnodeReq* pReq);
@@ -1415,6 +1424,14 @@ typedef struct {
SExplainExecInfo* subplanInfo;
} SExplainRsp;
+typedef struct {
+ SExplainRsp rsp;
+ uint64_t qId;
+ uint64_t tId;
+ int64_t rId;
+ int32_t eId;
+} SExplainLocalRsp;
+
typedef struct STableScanAnalyzeInfo {
uint64_t totalRows;
uint64_t totalCheckedRows;
@@ -1429,6 +1446,7 @@ typedef struct STableScanAnalyzeInfo {
int32_t tSerializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
int32_t tDeserializeSExplainRsp(void* buf, int32_t bufLen, SExplainRsp* pRsp);
+void tFreeSExplainRsp(SExplainRsp* pRsp);
typedef struct {
char fqdn[TSDB_FQDN_LEN]; // end point, hostname:port
@@ -1711,6 +1729,8 @@ typedef struct {
int64_t maxDelay;
int64_t watermark;
int8_t igExpired;
+ int32_t numOfTags;
+ SArray* pTags; // array of SField
} SCMCreateStreamReq;
typedef struct {
@@ -2049,8 +2069,8 @@ typedef struct {
STableMetaRsp* pMeta;
} SVCreateTbRsp, SVUpdateTbRsp;
-int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
-int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
+int tEncodeSVCreateTbRsp(SEncoder* pCoder, const SVCreateTbRsp* pRsp);
+int tDecodeSVCreateTbRsp(SDecoder* pCoder, SVCreateTbRsp* pRsp);
void tFreeSVCreateTbRsp(void* param);
int32_t tSerializeSVCreateTbReq(void** buf, SVCreateTbReq* pReq);
@@ -2072,8 +2092,9 @@ int32_t tDeserializeSVCreateTbBatchRsp(void* buf, int32_t bufLen, SVCreateTbBatc
// TDMT_VND_DROP_TABLE =================
typedef struct {
- char* name;
- int8_t igNotExists;
+ char* name;
+ uint64_t suid; // for tmq in wal format
+ int8_t igNotExists;
} SVDropTbReq;
typedef struct {
@@ -2327,6 +2348,7 @@ int32_t tSerializeSClientHbBatchReq(void* buf, int32_t bufLen, const SClientHbBa
int32_t tDeserializeSClientHbBatchReq(void* buf, int32_t bufLen, SClientHbBatchReq* pReq);
static FORCE_INLINE void tFreeClientHbBatchReq(void* pReq) {
+ if (pReq == NULL) return;
SClientHbBatchReq* req = (SClientHbBatchReq*)pReq;
taosArrayDestroyEx(req->reqs, tFreeClientHbReq);
taosMemoryFree(pReq);
@@ -2617,7 +2639,7 @@ enum {
typedef struct {
int8_t type;
union {
- // snapshot data
+ // snapshot
struct {
int64_t uid;
int64_t ts;
@@ -2629,6 +2651,22 @@ typedef struct {
};
} STqOffsetVal;
+static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
+ pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pOffsetVal->uid = uid;
+ pOffsetVal->ts = ts;
+}
+
+static FORCE_INLINE void tqOffsetResetToMeta(STqOffsetVal* pOffsetVal, int64_t uid) {
+ pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_META;
+ pOffsetVal->uid = uid;
+}
+
+static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
+ pOffsetVal->type = TMQ_OFFSET__LOG;
+ pOffsetVal->version = ver;
+}
+
int32_t tEncodeSTqOffsetVal(SEncoder* pEncoder, const STqOffsetVal* pOffsetVal);
int32_t tDecodeSTqOffsetVal(SDecoder* pDecoder, STqOffsetVal* pOffsetVal);
int32_t tFormatOffset(char* buf, int32_t maxLen, const STqOffsetVal* pVal);
@@ -2677,15 +2715,6 @@ typedef struct {
int32_t tSerializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
int32_t tDeserializeSMDropSmaReq(void* buf, int32_t bufLen, SMDropSmaReq* pReq);
-typedef struct {
- int32_t vgId;
- SEpSet epSet;
-} SVgEpSet;
-
-typedef struct {
- int32_t padding;
-} SRSmaExecMsg;
-
typedef struct {
int8_t version; // for compatibility(default 0)
int8_t intervalUnit; // MACRO: TIME_UNIT_XXX
@@ -2939,39 +2968,20 @@ static FORCE_INLINE void* tDecodeSMqSubTopicEp(void* buf, SMqSubTopicEp* pTopicE
}
static FORCE_INLINE void tDeleteSMqSubTopicEp(SMqSubTopicEp* pSubTopicEp) {
- // taosMemoryFree(pSubTopicEp->schema.pSchema);
+ if (pSubTopicEp->schema.nCols) taosMemoryFreeClear(pSubTopicEp->schema.pSchema);
taosArrayDestroy(pSubTopicEp->vgs);
}
typedef struct {
SMqRspHead head;
- int64_t reqOffset;
- int64_t rspOffset;
- STqOffsetVal reqOffsetNew;
- STqOffsetVal rspOffsetNew;
+ STqOffsetVal rspOffset;
int16_t resMsgType;
int32_t metaRspLen;
void* metaRsp;
} SMqMetaRsp;
-static FORCE_INLINE int32_t tEncodeSMqMetaRsp(void** buf, const SMqMetaRsp* pRsp) {
- int32_t tlen = 0;
- tlen += taosEncodeFixedI64(buf, pRsp->reqOffset);
- tlen += taosEncodeFixedI64(buf, pRsp->rspOffset);
- tlen += taosEncodeFixedI16(buf, pRsp->resMsgType);
- tlen += taosEncodeFixedI32(buf, pRsp->metaRspLen);
- tlen += taosEncodeBinary(buf, pRsp->metaRsp, pRsp->metaRspLen);
- return tlen;
-}
-
-static FORCE_INLINE void* tDecodeSMqMetaRsp(const void* buf, SMqMetaRsp* pRsp) {
- buf = taosDecodeFixedI64(buf, &pRsp->reqOffset);
- buf = taosDecodeFixedI64(buf, &pRsp->rspOffset);
- buf = taosDecodeFixedI16(buf, &pRsp->resMsgType);
- buf = taosDecodeFixedI32(buf, &pRsp->metaRspLen);
- buf = taosDecodeBinary(buf, &pRsp->metaRsp, pRsp->metaRspLen);
- return (void*)buf;
-}
+int32_t tEncodeSMqMetaRsp(SEncoder* pEncoder, const SMqMetaRsp* pRsp);
+int32_t tDecodeSMqMetaRsp(SDecoder* pDecoder, SMqMetaRsp* pRsp);
typedef struct {
SMqRspHead head;
@@ -2988,6 +2998,27 @@ typedef struct {
int32_t tEncodeSMqDataRsp(SEncoder* pEncoder, const SMqDataRsp* pRsp);
int32_t tDecodeSMqDataRsp(SDecoder* pDecoder, SMqDataRsp* pRsp);
+void tDeleteSMqDataRsp(SMqDataRsp* pRsp);
+
+typedef struct {
+ SMqRspHead head;
+ STqOffsetVal reqOffset;
+ STqOffsetVal rspOffset;
+ int32_t blockNum;
+ int8_t withTbName;
+ int8_t withSchema;
+ SArray* blockDataLen;
+ SArray* blockData;
+ SArray* blockTbName;
+ SArray* blockSchema;
+ int32_t createTableNum;
+ SArray* createTableLen;
+ SArray* createTableReq;
+} STaosxRsp;
+
+int32_t tEncodeSTaosxRsp(SEncoder* pEncoder, const STaosxRsp* pRsp);
+int32_t tDecodeSTaosxRsp(SDecoder* pDecoder, STaosxRsp* pRsp);
+void tDeleteSTaosxRsp(STaosxRsp* pRsp);
typedef struct {
SMqRspHead head;
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index 006ba7f21bf0177c2b0104a51ef7908785cced2d..3f917ff0d1665d90de079dfc3eae884412ed0e7f 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -272,6 +272,8 @@ enum {
TD_DEF_MSG_TYPE(TDMT_SYNC_LEADER_TRANSFER, "sync-leader-transfer", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_MNODE_STANDBY, "set-mnode-standby", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_SET_VNODE_STANDBY, "set-vnode-standby", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT, "sync-heartbeat", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_SYNC_HEARTBEAT_REPLY, "sync-heartbeat-reply", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_SYNC_MAX_MSG, "sync-max", NULL, NULL)
#if defined(TD_MSG_NUMBER_)
diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h
index b38ec664358d08622c06d8f941873b59e43b9455..94128a4999b932e85efda41df4bd5829fb1076f3 100644
--- a/include/common/ttokendef.h
+++ b/include/common/ttokendef.h
@@ -89,237 +89,248 @@
#define TK_KEEP 71
#define TK_PAGES 72
#define TK_PAGESIZE 73
-#define TK_PRECISION 74
-#define TK_REPLICA 75
-#define TK_STRICT 76
-#define TK_VGROUPS 77
-#define TK_SINGLE_STABLE 78
-#define TK_RETENTIONS 79
-#define TK_SCHEMALESS 80
-#define TK_WAL_LEVEL 81
-#define TK_WAL_FSYNC_PERIOD 82
-#define TK_WAL_RETENTION_PERIOD 83
-#define TK_WAL_RETENTION_SIZE 84
-#define TK_WAL_ROLL_PERIOD 85
-#define TK_WAL_SEGMENT_SIZE 86
-#define TK_NK_COLON 87
-#define TK_TABLE 88
-#define TK_NK_LP 89
-#define TK_NK_RP 90
-#define TK_STABLE 91
-#define TK_ADD 92
-#define TK_COLUMN 93
-#define TK_MODIFY 94
-#define TK_RENAME 95
-#define TK_TAG 96
-#define TK_SET 97
-#define TK_NK_EQ 98
-#define TK_USING 99
-#define TK_TAGS 100
-#define TK_COMMENT 101
-#define TK_BOOL 102
-#define TK_TINYINT 103
-#define TK_SMALLINT 104
-#define TK_INT 105
-#define TK_INTEGER 106
-#define TK_BIGINT 107
-#define TK_FLOAT 108
-#define TK_DOUBLE 109
-#define TK_BINARY 110
-#define TK_TIMESTAMP 111
-#define TK_NCHAR 112
-#define TK_UNSIGNED 113
-#define TK_JSON 114
-#define TK_VARCHAR 115
-#define TK_MEDIUMBLOB 116
-#define TK_BLOB 117
-#define TK_VARBINARY 118
-#define TK_DECIMAL 119
-#define TK_MAX_DELAY 120
-#define TK_WATERMARK 121
-#define TK_ROLLUP 122
-#define TK_TTL 123
-#define TK_SMA 124
-#define TK_FIRST 125
-#define TK_LAST 126
-#define TK_SHOW 127
-#define TK_DATABASES 128
-#define TK_TABLES 129
-#define TK_STABLES 130
-#define TK_MNODES 131
-#define TK_MODULES 132
-#define TK_QNODES 133
-#define TK_FUNCTIONS 134
-#define TK_INDEXES 135
-#define TK_ACCOUNTS 136
-#define TK_APPS 137
-#define TK_CONNECTIONS 138
-#define TK_LICENCES 139
-#define TK_GRANTS 140
-#define TK_QUERIES 141
-#define TK_SCORES 142
-#define TK_TOPICS 143
-#define TK_VARIABLES 144
-#define TK_BNODES 145
-#define TK_SNODES 146
-#define TK_CLUSTER 147
-#define TK_TRANSACTIONS 148
-#define TK_DISTRIBUTED 149
-#define TK_CONSUMERS 150
-#define TK_SUBSCRIPTIONS 151
-#define TK_LIKE 152
-#define TK_INDEX 153
-#define TK_FUNCTION 154
-#define TK_INTERVAL 155
-#define TK_TOPIC 156
-#define TK_AS 157
-#define TK_WITH 158
-#define TK_META 159
-#define TK_CONSUMER 160
-#define TK_GROUP 161
-#define TK_DESC 162
-#define TK_DESCRIBE 163
-#define TK_RESET 164
-#define TK_QUERY 165
-#define TK_CACHE 166
-#define TK_EXPLAIN 167
-#define TK_ANALYZE 168
-#define TK_VERBOSE 169
-#define TK_NK_BOOL 170
-#define TK_RATIO 171
-#define TK_NK_FLOAT 172
-#define TK_OUTPUTTYPE 173
-#define TK_AGGREGATE 174
-#define TK_BUFSIZE 175
-#define TK_STREAM 176
-#define TK_INTO 177
-#define TK_TRIGGER 178
-#define TK_AT_ONCE 179
-#define TK_WINDOW_CLOSE 180
-#define TK_IGNORE 181
-#define TK_EXPIRED 182
-#define TK_KILL 183
-#define TK_CONNECTION 184
-#define TK_TRANSACTION 185
-#define TK_BALANCE 186
-#define TK_VGROUP 187
-#define TK_MERGE 188
-#define TK_REDISTRIBUTE 189
-#define TK_SPLIT 190
-#define TK_DELETE 191
-#define TK_INSERT 192
-#define TK_NULL 193
-#define TK_NK_QUESTION 194
-#define TK_NK_ARROW 195
-#define TK_ROWTS 196
-#define TK_TBNAME 197
-#define TK_QSTART 198
-#define TK_QEND 199
-#define TK_QDURATION 200
-#define TK_WSTART 201
-#define TK_WEND 202
-#define TK_WDURATION 203
-#define TK_CAST 204
-#define TK_NOW 205
-#define TK_TODAY 206
-#define TK_TIMEZONE 207
-#define TK_CLIENT_VERSION 208
-#define TK_SERVER_VERSION 209
-#define TK_SERVER_STATUS 210
-#define TK_CURRENT_USER 211
-#define TK_COUNT 212
-#define TK_LAST_ROW 213
-#define TK_BETWEEN 214
-#define TK_IS 215
-#define TK_NK_LT 216
-#define TK_NK_GT 217
-#define TK_NK_LE 218
-#define TK_NK_GE 219
-#define TK_NK_NE 220
-#define TK_MATCH 221
-#define TK_NMATCH 222
-#define TK_CONTAINS 223
-#define TK_IN 224
-#define TK_JOIN 225
-#define TK_INNER 226
-#define TK_SELECT 227
-#define TK_DISTINCT 228
-#define TK_WHERE 229
-#define TK_PARTITION 230
-#define TK_BY 231
-#define TK_SESSION 232
-#define TK_STATE_WINDOW 233
-#define TK_SLIDING 234
-#define TK_FILL 235
-#define TK_VALUE 236
-#define TK_NONE 237
-#define TK_PREV 238
-#define TK_LINEAR 239
-#define TK_NEXT 240
-#define TK_HAVING 241
-#define TK_RANGE 242
-#define TK_EVERY 243
-#define TK_ORDER 244
-#define TK_SLIMIT 245
-#define TK_SOFFSET 246
-#define TK_LIMIT 247
-#define TK_OFFSET 248
-#define TK_ASC 249
-#define TK_NULLS 250
-#define TK_ABORT 251
-#define TK_AFTER 252
-#define TK_ATTACH 253
-#define TK_BEFORE 254
-#define TK_BEGIN 255
-#define TK_BITAND 256
-#define TK_BITNOT 257
-#define TK_BITOR 258
-#define TK_BLOCKS 259
-#define TK_CHANGE 260
-#define TK_COMMA 261
-#define TK_COMPACT 262
-#define TK_CONCAT 263
-#define TK_CONFLICT 264
-#define TK_COPY 265
-#define TK_DEFERRED 266
-#define TK_DELIMITERS 267
-#define TK_DETACH 268
-#define TK_DIVIDE 269
-#define TK_DOT 270
-#define TK_EACH 271
-#define TK_END 272
-#define TK_FAIL 273
-#define TK_FILE 274
-#define TK_FOR 275
-#define TK_GLOB 276
-#define TK_ID 277
-#define TK_IMMEDIATE 278
-#define TK_IMPORT 279
-#define TK_INITIALLY 280
-#define TK_INSTEAD 281
-#define TK_ISNULL 282
-#define TK_KEY 283
-#define TK_NK_BITNOT 284
-#define TK_NK_SEMI 285
-#define TK_NOTNULL 286
-#define TK_OF 287
-#define TK_PLUS 288
-#define TK_PRIVILEGE 289
-#define TK_RAISE 290
-#define TK_REPLACE 291
-#define TK_RESTRICT 292
-#define TK_ROW 293
-#define TK_SEMI 294
-#define TK_STAR 295
-#define TK_STATEMENT 296
-#define TK_STRING 297
-#define TK_TIMES 298
-#define TK_UPDATE 299
-#define TK_VALUES 300
-#define TK_VARIABLE 301
-#define TK_VIEW 302
-#define TK_VNODES 303
-#define TK_WAL 304
+#define TK_TSDB_PAGESIZE 74
+#define TK_PRECISION 75
+#define TK_REPLICA 76
+#define TK_STRICT 77
+#define TK_VGROUPS 78
+#define TK_SINGLE_STABLE 79
+#define TK_RETENTIONS 80
+#define TK_SCHEMALESS 81
+#define TK_WAL_LEVEL 82
+#define TK_WAL_FSYNC_PERIOD 83
+#define TK_WAL_RETENTION_PERIOD 84
+#define TK_WAL_RETENTION_SIZE 85
+#define TK_WAL_ROLL_PERIOD 86
+#define TK_WAL_SEGMENT_SIZE 87
+#define TK_STT_TRIGGER 88
+#define TK_TABLE_PREFIX 89
+#define TK_TABLE_SUFFIX 90
+#define TK_NK_COLON 91
+#define TK_MAX_SPEED 92
+#define TK_TABLE 93
+#define TK_NK_LP 94
+#define TK_NK_RP 95
+#define TK_STABLE 96
+#define TK_ADD 97
+#define TK_COLUMN 98
+#define TK_MODIFY 99
+#define TK_RENAME 100
+#define TK_TAG 101
+#define TK_SET 102
+#define TK_NK_EQ 103
+#define TK_USING 104
+#define TK_TAGS 105
+#define TK_COMMENT 106
+#define TK_BOOL 107
+#define TK_TINYINT 108
+#define TK_SMALLINT 109
+#define TK_INT 110
+#define TK_INTEGER 111
+#define TK_BIGINT 112
+#define TK_FLOAT 113
+#define TK_DOUBLE 114
+#define TK_BINARY 115
+#define TK_TIMESTAMP 116
+#define TK_NCHAR 117
+#define TK_UNSIGNED 118
+#define TK_JSON 119
+#define TK_VARCHAR 120
+#define TK_MEDIUMBLOB 121
+#define TK_BLOB 122
+#define TK_VARBINARY 123
+#define TK_DECIMAL 124
+#define TK_MAX_DELAY 125
+#define TK_WATERMARK 126
+#define TK_ROLLUP 127
+#define TK_TTL 128
+#define TK_SMA 129
+#define TK_FIRST 130
+#define TK_LAST 131
+#define TK_SHOW 132
+#define TK_DATABASES 133
+#define TK_TABLES 134
+#define TK_STABLES 135
+#define TK_MNODES 136
+#define TK_MODULES 137
+#define TK_QNODES 138
+#define TK_FUNCTIONS 139
+#define TK_INDEXES 140
+#define TK_ACCOUNTS 141
+#define TK_APPS 142
+#define TK_CONNECTIONS 143
+#define TK_LICENCES 144
+#define TK_GRANTS 145
+#define TK_QUERIES 146
+#define TK_SCORES 147
+#define TK_TOPICS 148
+#define TK_VARIABLES 149
+#define TK_BNODES 150
+#define TK_SNODES 151
+#define TK_CLUSTER 152
+#define TK_TRANSACTIONS 153
+#define TK_DISTRIBUTED 154
+#define TK_CONSUMERS 155
+#define TK_SUBSCRIPTIONS 156
+#define TK_VNODES 157
+#define TK_LIKE 158
+#define TK_INDEX 159
+#define TK_FUNCTION 160
+#define TK_INTERVAL 161
+#define TK_TOPIC 162
+#define TK_AS 163
+#define TK_WITH 164
+#define TK_META 165
+#define TK_CONSUMER 166
+#define TK_GROUP 167
+#define TK_DESC 168
+#define TK_DESCRIBE 169
+#define TK_RESET 170
+#define TK_QUERY 171
+#define TK_CACHE 172
+#define TK_EXPLAIN 173
+#define TK_ANALYZE 174
+#define TK_VERBOSE 175
+#define TK_NK_BOOL 176
+#define TK_RATIO 177
+#define TK_NK_FLOAT 178
+#define TK_OUTPUTTYPE 179
+#define TK_AGGREGATE 180
+#define TK_BUFSIZE 181
+#define TK_STREAM 182
+#define TK_INTO 183
+#define TK_TRIGGER 184
+#define TK_AT_ONCE 185
+#define TK_WINDOW_CLOSE 186
+#define TK_IGNORE 187
+#define TK_EXPIRED 188
+#define TK_SUBTABLE 189
+#define TK_KILL 190
+#define TK_CONNECTION 191
+#define TK_TRANSACTION 192
+#define TK_BALANCE 193
+#define TK_VGROUP 194
+#define TK_MERGE 195
+#define TK_REDISTRIBUTE 196
+#define TK_SPLIT 197
+#define TK_DELETE 198
+#define TK_INSERT 199
+#define TK_NULL 200
+#define TK_NK_QUESTION 201
+#define TK_NK_ARROW 202
+#define TK_ROWTS 203
+#define TK_TBNAME 204
+#define TK_QSTART 205
+#define TK_QEND 206
+#define TK_QDURATION 207
+#define TK_WSTART 208
+#define TK_WEND 209
+#define TK_WDURATION 210
+#define TK_IROWTS 211
+#define TK_CAST 212
+#define TK_NOW 213
+#define TK_TODAY 214
+#define TK_TIMEZONE 215
+#define TK_CLIENT_VERSION 216
+#define TK_SERVER_VERSION 217
+#define TK_SERVER_STATUS 218
+#define TK_CURRENT_USER 219
+#define TK_COUNT 220
+#define TK_LAST_ROW 221
+#define TK_CASE 222
+#define TK_END 223
+#define TK_WHEN 224
+#define TK_THEN 225
+#define TK_ELSE 226
+#define TK_BETWEEN 227
+#define TK_IS 228
+#define TK_NK_LT 229
+#define TK_NK_GT 230
+#define TK_NK_LE 231
+#define TK_NK_GE 232
+#define TK_NK_NE 233
+#define TK_MATCH 234
+#define TK_NMATCH 235
+#define TK_CONTAINS 236
+#define TK_IN 237
+#define TK_JOIN 238
+#define TK_INNER 239
+#define TK_SELECT 240
+#define TK_DISTINCT 241
+#define TK_WHERE 242
+#define TK_PARTITION 243
+#define TK_BY 244
+#define TK_SESSION 245
+#define TK_STATE_WINDOW 246
+#define TK_SLIDING 247
+#define TK_FILL 248
+#define TK_VALUE 249
+#define TK_NONE 250
+#define TK_PREV 251
+#define TK_LINEAR 252
+#define TK_NEXT 253
+#define TK_HAVING 254
+#define TK_RANGE 255
+#define TK_EVERY 256
+#define TK_ORDER 257
+#define TK_SLIMIT 258
+#define TK_SOFFSET 259
+#define TK_LIMIT 260
+#define TK_OFFSET 261
+#define TK_ASC 262
+#define TK_NULLS 263
+#define TK_ABORT 264
+#define TK_AFTER 265
+#define TK_ATTACH 266
+#define TK_BEFORE 267
+#define TK_BEGIN 268
+#define TK_BITAND 269
+#define TK_BITNOT 270
+#define TK_BITOR 271
+#define TK_BLOCKS 272
+#define TK_CHANGE 273
+#define TK_COMMA 274
+#define TK_COMPACT 275
+#define TK_CONCAT 276
+#define TK_CONFLICT 277
+#define TK_COPY 278
+#define TK_DEFERRED 279
+#define TK_DELIMITERS 280
+#define TK_DETACH 281
+#define TK_DIVIDE 282
+#define TK_DOT 283
+#define TK_EACH 284
+#define TK_FAIL 285
+#define TK_FILE 286
+#define TK_FOR 287
+#define TK_GLOB 288
+#define TK_ID 289
+#define TK_IMMEDIATE 290
+#define TK_IMPORT 291
+#define TK_INITIALLY 292
+#define TK_INSTEAD 293
+#define TK_ISNULL 294
+#define TK_KEY 295
+#define TK_NK_BITNOT 296
+#define TK_NK_SEMI 297
+#define TK_NOTNULL 298
+#define TK_OF 299
+#define TK_PLUS 300
+#define TK_PRIVILEGE 301
+#define TK_RAISE 302
+#define TK_REPLACE 303
+#define TK_RESTRICT 304
+#define TK_ROW 305
+#define TK_SEMI 306
+#define TK_STAR 307
+#define TK_STATEMENT 308
+#define TK_STRING 309
+#define TK_TIMES 310
+#define TK_UPDATE 311
+#define TK_VALUES 312
+#define TK_VARIABLE 313
+#define TK_VIEW 314
+#define TK_WAL 315
#define TK_NK_SPACE 300
#define TK_NK_COMMENT 301
diff --git a/include/common/ttypes.h b/include/common/ttypes.h
index ceb3eae0338455ab207034fca707473c6c44940d..a88f65f6acf69d552073ab0ede31a0b027b25692 100644
--- a/include/common/ttypes.h
+++ b/include/common/ttypes.h
@@ -49,9 +49,6 @@ typedef struct {
#define varDataCopy(dst, v) memcpy((dst), (void *)(v), varDataTLen(v))
#define varDataLenByData(v) (*(VarDataLenT *)(((char *)(v)) - VARSTR_HEADER_SIZE))
#define varDataSetLen(v, _len) (((VarDataLenT *)(v))[0] = (VarDataLenT)(_len))
-#define IS_VAR_DATA_TYPE(t) \
- (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
-#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
#define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0]))
#define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v))
@@ -268,11 +265,16 @@ typedef struct {
#define IS_UNSIGNED_NUMERIC_TYPE(_t) ((_t) >= TSDB_DATA_TYPE_UTINYINT && (_t) <= TSDB_DATA_TYPE_UBIGINT)
#define IS_FLOAT_TYPE(_t) ((_t) == TSDB_DATA_TYPE_FLOAT || (_t) == TSDB_DATA_TYPE_DOUBLE)
#define IS_INTEGER_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)))
+#define IS_TIMESTAMP_TYPE(_t) ((_t) == TSDB_DATA_TYPE_TIMESTAMP)
#define IS_NUMERIC_TYPE(_t) ((IS_SIGNED_NUMERIC_TYPE(_t)) || (IS_UNSIGNED_NUMERIC_TYPE(_t)) || (IS_FLOAT_TYPE(_t)))
#define IS_MATHABLE_TYPE(_t) \
(IS_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP))
+#define IS_VAR_DATA_TYPE(t) \
+ (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR) || ((t) == TSDB_DATA_TYPE_JSON))
+#define IS_STR_DATA_TYPE(t) (((t) == TSDB_DATA_TYPE_VARCHAR) || ((t) == TSDB_DATA_TYPE_NCHAR))
+
#define IS_VALID_TINYINT(_t) ((_t) >= INT8_MIN && (_t) <= INT8_MAX)
#define IS_VALID_SMALLINT(_t) ((_t) >= INT16_MIN && (_t) <= INT16_MAX)
#define IS_VALID_INT(_t) ((_t) >= INT32_MIN && (_t) <= INT32_MAX)
diff --git a/include/dnode/mnode/mnode.h b/include/dnode/mnode/mnode.h
index 3bed77d682f14d3409b02bccd366bd99b3be2a94..0d43539629c170a2dcf22bee395cbc7caa93edc5 100644
--- a/include/dnode/mnode/mnode.h
+++ b/include/dnode/mnode/mnode.h
@@ -106,6 +106,8 @@ void mndPostProcessQueryMsg(SRpcMsg *pMsg);
*/
void mndGenerateMachineCode();
+void mndDumpSdb();
+
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index 1ce88905c23e8e6b9db35694ea0a9aa3197d5ba9..78eedaf921fe91fef7c3b8fc337a4d26c4d8d96e 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -29,6 +29,15 @@ typedef void* DataSinkHandle;
struct SRpcMsg;
struct SSubplan;
+typedef int32_t (*localFetchFp)(void*, uint64_t, uint64_t, uint64_t, int64_t, int32_t, void**, SArray*);
+
+typedef struct {
+ void* handle;
+ bool localExec;
+ localFetchFp fp;
+ SArray* explainRes;
+} SLocalFetch;
+
typedef struct {
void* tqReader;
void* meta;
@@ -41,7 +50,10 @@ typedef struct {
bool initTableReader;
bool initTqReader;
int32_t numOfVgroups;
- void* pStateBackend;
+
+ void* sContext; // SSnapContext*
+
+ void* pStateBackend;
} SReadHandle;
// in queue mode, data streams are seperated by msg
@@ -124,7 +136,8 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table
* @param handle
* @return
*/
-int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds);
+
+int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bool* hasMore, SLocalFetch *pLocal);
int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pBlock, uint64_t* useconds);
/**
@@ -181,11 +194,19 @@ int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
int32_t qStreamPrepareTsdbScan(qTaskInfo_t tinfo, uint64_t uid, int64_t ts);
-int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset);
+int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType);
+
+int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq);
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset);
-void* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
+SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo);
+
+int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo);
+
+const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo);
+
+const char* qExtractTbnameFromTask(qTaskInfo_t tinfo);
void* qExtractReaderFromStreamScanner(void* scanner);
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index d5da306fd297dd49f4753aa01c6423cb9dd82e9c..60c7b18367ea9bc90c441ab005f85931106aecf3 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -34,64 +34,68 @@ typedef struct SFuncExecEnv {
int32_t calcMemSize;
} SFuncExecEnv;
-typedef bool (*FExecGetEnv)(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
-typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo* pResultCellInfo);
+typedef bool (*FExecGetEnv)(struct SFunctionNode *pFunc, SFuncExecEnv *pEnv);
+typedef bool (*FExecInit)(struct SqlFunctionCtx *pCtx, struct SResultRowEntryInfo *pResultCellInfo);
typedef int32_t (*FExecProcess)(struct SqlFunctionCtx *pCtx);
-typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock* pBlock);
+typedef int32_t (*FExecFinalize)(struct SqlFunctionCtx *pCtx, SSDataBlock *pBlock);
typedef int32_t (*FScalarExecProcess)(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput);
typedef int32_t (*FExecCombine)(struct SqlFunctionCtx *pDestCtx, struct SqlFunctionCtx *pSourceCtx);
typedef struct SScalarFuncExecFuncs {
- FExecGetEnv getEnv;
+ FExecGetEnv getEnv;
FScalarExecProcess process;
} SScalarFuncExecFuncs;
typedef struct SFuncExecFuncs {
- FExecGetEnv getEnv;
- FExecInit init;
- FExecProcess process;
+ FExecGetEnv getEnv;
+ FExecInit init;
+ FExecProcess process;
FExecFinalize finalize;
- FExecCombine combine;
+ FExecCombine combine;
} SFuncExecFuncs;
-#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
+#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results
#define TOP_BOTTOM_QUERY_LIMIT 100
#define FUNCTIONS_NAME_MAX_LENGTH 16
typedef struct SResultRowEntryInfo {
- bool initialized:1; // output buffer has been initialized
- bool complete:1; // query has completed
- uint8_t isNullRes:6; // the result is null
- uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT
+ bool initialized : 1; // output buffer has been initialized
+ bool complete : 1; // query has completed
+ uint8_t isNullRes : 6; // the result is null
+ uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT
} SResultRowEntryInfo;
// determine the real data need to calculated the result
enum {
- BLK_DATA_NOT_LOAD = 0x0,
- BLK_DATA_SMA_LOAD = 0x1,
+ BLK_DATA_NOT_LOAD = 0x0,
+ BLK_DATA_SMA_LOAD = 0x1,
BLK_DATA_DATA_LOAD = 0x3,
- BLK_DATA_FILTEROUT = 0x4, // discard current data block since it is not qualified for filter
+ BLK_DATA_FILTEROUT = 0x4, // discard current data block since it is not qualified for filter
};
enum {
- MAIN_SCAN = 0x0u,
- REVERSE_SCAN = 0x1u, // todo remove it
- REPEAT_SCAN = 0x2u, //repeat scan belongs to the master scan
- MERGE_STAGE = 0x20u,
+ MAIN_SCAN = 0x0u,
+ REVERSE_SCAN = 0x1u, // todo remove it
+ REPEAT_SCAN = 0x2u, // repeat scan belongs to the master scan
};
typedef struct SPoint1 {
- int64_t key;
- union{double val; char* ptr;};
+ int64_t key;
+ union {
+ double val;
+ char *ptr;
+ };
} SPoint1;
struct SqlFunctionCtx;
struct SResultRowEntryInfo;
-//for selectivity query, the corresponding tag value is assigned if the data is qualified
+// for selectivity query, the corresponding tag value is assigned if the data is qualified
typedef struct SSubsidiaryResInfo {
- int16_t num;
+ int16_t num;
+ int32_t rowLen;
+ char *buf; // serialize data buffer
struct SqlFunctionCtx **pCtx;
} SSubsidiaryResInfo;
@@ -104,66 +108,65 @@ typedef struct SResultDataInfo {
} SResultDataInfo;
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
-#define GET_ROWCELL_INTERBUF(_c) ((void*) ((char*)(_c) + sizeof(SResultRowEntryInfo)))
+#define GET_ROWCELL_INTERBUF(_c) ((void *)((char *)(_c) + sizeof(SResultRowEntryInfo)))
typedef struct SInputColumnInfoData {
- int32_t totalRows; // total rows in current columnar data
- int32_t startRowIndex; // handle started row index
- int32_t numOfRows; // the number of rows needs to be handled
- int32_t numOfInputCols; // PTS is not included
- bool colDataAggIsSet;// if agg is set or not
- SColumnInfoData *pPTS; // primary timestamp column
+ int32_t totalRows; // total rows in current columnar data
+ int32_t startRowIndex; // handle started row index
+ int32_t numOfRows; // the number of rows needs to be handled
+ int32_t numOfInputCols; // PTS is not included
+ bool colDataAggIsSet; // if agg is set or not
+ SColumnInfoData *pPTS; // primary timestamp column
SColumnInfoData **pData;
SColumnDataAgg **pColumnDataAgg;
- uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
+ uint64_t uid; // table uid, used to set the tag value when building the final query result for selectivity functions.
} SInputColumnInfoData;
+typedef struct SSerializeDataHandle {
+ struct SDiskbasedBuf *pBuf;
+ int32_t currentPage;
+ void *pState;
+} SSerializeDataHandle;
+
// sql function runtime context
typedef struct SqlFunctionCtx {
- SInputColumnInfoData input;
- SResultDataInfo resDataInfo;
- uint32_t order; // data block scanner order: asc|desc
- uint8_t scanFlag; // record current running step, default: 0
- int16_t functionId; // function id
- char *pOutput; // final result output buffer, point to sdata->data
- int32_t numOfParams;
- SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
- SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
- int32_t offset;
- struct SResultRowEntryInfo *resultInfo;
- SSubsidiaryResInfo subsidiaries;
- SPoint1 start;
- SPoint1 end;
- SFuncExecFuncs fpSet;
- SScalarFuncExecFuncs sfp;
- struct SExprInfo *pExpr;
- struct SDiskbasedBuf *pBuf;
- struct SSDataBlock *pSrcBlock;
- struct SSDataBlock *pDstBlock; // used by indifinite rows function to set selectivity
- int32_t curBufPage;
- bool increase;
- bool isStream;
-
- char udfName[TSDB_FUNC_NAME_LEN];
+ SInputColumnInfoData input;
+ SResultDataInfo resDataInfo;
+ uint32_t order; // data block scanner order: asc|desc
+ uint8_t scanFlag; // record current running step, default: 0
+ int16_t functionId; // function id
+ char *pOutput; // final result output buffer, point to sdata->data
+ int32_t numOfParams;
+ SFunctParam *param; // input parameter, e.g., top(k, 20), the number of results for top query is kept in param
+ SColumnInfoData *pTsOutput; // corresponding output buffer for timestamp of each result, e.g., top/bottom*/
+ int32_t offset;
+ struct SResultRowEntryInfo *resultInfo;
+ SSubsidiaryResInfo subsidiaries;
+ SPoint1 start;
+ SPoint1 end;
+ SFuncExecFuncs fpSet;
+ SScalarFuncExecFuncs sfp;
+ struct SExprInfo *pExpr;
+ struct SSDataBlock *pSrcBlock;
+ struct SSDataBlock *pDstBlock; // used by indefinite rows function to set selectivity
+ SSerializeDataHandle saveHandle;
+ bool isStream;
+
+ char udfName[TSDB_FUNC_NAME_LEN];
} SqlFunctionCtx;
-enum {
- TEXPR_BINARYEXPR_NODE= 0x1,
- TEXPR_UNARYEXPR_NODE = 0x2,
-};
-
typedef struct tExprNode {
int32_t nodeType;
union {
- struct {// function node
- char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor
- int32_t functionId;
- int32_t num;
- struct SFunctionNode *pFunctNode;
+ struct { // function node
+ char functionName[FUNCTIONS_NAME_MAX_LENGTH]; // todo refactor
+ int32_t functionId;
+ int32_t num;
+ struct SFunctionNode *pFunctNode;
} _function;
struct {
- struct SNode* pRootNode;
+ struct SNode *pRootNode;
} _optrRoot;
};
} tExprNode;
@@ -173,26 +176,26 @@ struct SScalarParam {
SColumnInfoData *columnData;
SHashObj *pHashFilter;
int32_t hashValueType;
- void *param; // other parameter, such as meta handle from vnode, to extract table name/tag value
+ void *param; // other parameter, such as meta handle from vnode, to extract table name/tag value
int32_t numOfRows;
+ int32_t numOfQualified; // number of qualified elements in the final results
};
-void cleanupResultRowEntry(struct SResultRowEntryInfo* pCell);
-int32_t getNumOfResult(SqlFunctionCtx* pCtx, int32_t num, SSDataBlock* pResBlock);
-bool isRowEntryCompleted(struct SResultRowEntryInfo* pEntry);
-bool isRowEntryInitialized(struct SResultRowEntryInfo* pEntry);
+void cleanupResultRowEntry(struct SResultRowEntryInfo *pCell);
+int32_t getNumOfResult(SqlFunctionCtx *pCtx, int32_t num, SSDataBlock *pResBlock);
+bool isRowEntryCompleted(struct SResultRowEntryInfo *pEntry);
+bool isRowEntryInitialized(struct SResultRowEntryInfo *pEntry);
typedef struct SPoint {
int64_t key;
- void * val;
+ void *val;
} SPoint;
-int32_t taosGetLinearInterpolationVal(SPoint* point, int32_t outputType, SPoint* point1, SPoint* point2, int32_t inputType);
+int32_t taosGetLinearInterpolationVal(SPoint *point, int32_t outputType, SPoint *point1, SPoint *point2,
+ int32_t inputType);
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// udf api
-struct SUdfInfo;
-
/**
* create udfd proxy, called once in process that call doSetupUdf/callUdfxxx/doTeardownUdf
* @return error code
@@ -216,6 +219,7 @@ int32_t udfStartUdfd(int32_t startDnodeId);
* @return
*/
int32_t udfStopUdfd();
+
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h
index 741b0fddebf36cd1a8f16d0d2265742bcb9ac16c..cb4960707b26fd9d17ea6a8af7250a5d504ff2a2 100644
--- a/include/libs/function/functionMgt.h
+++ b/include/libs/function/functionMgt.h
@@ -119,9 +119,10 @@ typedef enum EFunctionType {
FUNCTION_TYPE_WSTART,
FUNCTION_TYPE_WEND,
FUNCTION_TYPE_WDURATION,
+ FUNCTION_TYPE_IROWTS,
// internal function
- FUNCTION_TYPE_SELECT_VALUE,
+ FUNCTION_TYPE_SELECT_VALUE = 3750,
FUNCTION_TYPE_BLOCK_DIST, // block distribution aggregate function
FUNCTION_TYPE_BLOCK_DIST_INFO, // block distribution pseudo column function
FUNCTION_TYPE_TO_COLUMN,
@@ -176,7 +177,8 @@ int32_t fmGetFuncInfo(SFunctionNode* pFunc, char* pMsg, int32_t msgLen);
EFuncReturnRows fmGetFuncReturnRows(SFunctionNode* pFunc);
-bool fmIsBuiltinFunc(const char* pFunc);
+bool fmIsBuiltinFunc(const char* pFunc);
+EFunctionType fmGetFuncType(const char* pFunc);
bool fmIsAggFunc(int32_t funcId);
bool fmIsScalarFunc(int32_t funcId);
@@ -211,6 +213,7 @@ bool fmIsClientPseudoColumnFunc(int32_t funcId);
bool fmIsMultiRowsFunc(int32_t funcId);
bool fmIsKeepOrderFunc(int32_t funcId);
bool fmIsCumulativeFunc(int32_t funcId);
+bool fmIsInterpPseudoColumnFunc(int32_t funcId);
int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMergeFunc);
diff --git a/include/libs/function/taosudf.h b/include/libs/function/taosudf.h
index 5e84b87a81ec1808dfc368ac285f4dabd2e1d57e..2b2063e3f61e575cd59de099feee3b83ad87ff9c 100644
--- a/include/libs/function/taosudf.h
+++ b/include/libs/function/taosudf.h
@@ -256,8 +256,9 @@ static FORCE_INLINE int32_t udfColDataSet(SUdfColumn* pColumn, uint32_t currentR
typedef int32_t (*TUdfScalarProcFunc)(SUdfDataBlock* block, SUdfColumn *resultCol);
typedef int32_t (*TUdfAggStartFunc)(SUdfInterBuf *buf);
-typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
-typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf* buf, SUdfInterBuf *resultData);
+typedef int32_t (*TUdfAggProcessFunc)(SUdfDataBlock *block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf);
+typedef int32_t (*TUdfAggMergeFunc)(SUdfInterBuf *inputBuf1, SUdfInterBuf *inputBuf2, SUdfInterBuf *outputBuf);
+typedef int32_t (*TUdfAggFinishFunc)(SUdfInterBuf *buf, SUdfInterBuf *resultData);
#ifdef __cplusplus
}
diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h
index 3223d4cdb8dfd36284f3d36922451166226fdd3e..075251295190c95e80580d122b6a728ce93ced80 100644
--- a/include/libs/nodes/cmdnodes.h
+++ b/include/libs/nodes/cmdnodes.h
@@ -64,6 +64,7 @@ typedef struct SDatabaseOptions {
int64_t keep[3];
int32_t pages;
int32_t pagesize;
+ int32_t tsdbPageSize;
char precisionStr[3];
int8_t precision;
int8_t replica;
@@ -78,6 +79,12 @@ typedef struct SDatabaseOptions {
int32_t walRetentionSize;
int32_t walRollPeriod;
int32_t walSegmentSize;
+ bool walRetentionPeriodIsSet;
+ bool walRetentionSizeIsSet;
+ bool walRollPeriodIsSet;
+ int32_t sstTrigger;
+ int32_t tablePrefix;
+ int32_t tableSuffix;
} SDatabaseOptions;
typedef struct SCreateDatabaseStmt {
@@ -112,6 +119,7 @@ typedef struct SFlushDatabaseStmt {
typedef struct STrimDatabaseStmt {
ENodeType type;
char dbName[TSDB_DB_NAME_LEN];
+ int32_t maxSpeed;
} STrimDatabaseStmt;
typedef struct STableOptions {
@@ -268,6 +276,12 @@ typedef struct SShowDnodeVariablesStmt {
SNode* pDnodeId;
} SShowDnodeVariablesStmt;
+typedef struct SShowVnodesStmt {
+ ENodeType type;
+ SNode* pDnodeId;
+ SNode* pDnodeEndpoint;
+} SShowVnodesStmt;
+
typedef enum EIndexType { INDEX_TYPE_SMA = 1, INDEX_TYPE_FULLTEXT } EIndexType;
typedef struct SIndexOptions {
@@ -370,6 +384,8 @@ typedef struct SCreateStreamStmt {
bool ignoreExists;
SStreamOptions* pOptions;
SNode* pQuery;
+ SNodeList* pTags;
+ SNode* pSubtable;
} SCreateStreamStmt;
typedef struct SDropStreamStmt {
diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h
index 5743d3360857dab460841d89e50360ba53d36b39..560832cd7499d6df1bc723c2bcd0a50af3ed5fc7 100644
--- a/include/libs/nodes/nodes.h
+++ b/include/libs/nodes/nodes.h
@@ -27,9 +27,9 @@ extern "C" {
#define LIST_LENGTH(l) (NULL != (l) ? (l)->length : 0)
-#define FOREACH(node, list) \
- for (SListCell* cell = (NULL != (list) ? (list)->pHead : NULL); \
- (NULL != cell ? (node = cell->pNode, true) : (node = NULL, false)); cell = cell->pNext)
+#define FOREACH(node, list) \
+ for (SListCell *cell = (NULL != (list) ? (list)->pHead : NULL), *pNext; \
+ (NULL != cell ? (node = cell->pNode, pNext = cell->pNext, true) : (node = NULL, pNext = NULL, false)); cell = pNext)
#define REPLACE_NODE(newNode) cell->pNode = (SNode*)(newNode)
@@ -103,6 +103,8 @@ typedef enum ENodeType {
QUERY_NODE_STREAM_OPTIONS,
QUERY_NODE_LEFT_VALUE,
QUERY_NODE_COLUMN_REF,
+ QUERY_NODE_WHEN_THEN,
+ QUERY_NODE_CASE_WHEN,
// Statement nodes are used in parser and planner module.
QUERY_NODE_SET_OPERATOR = 100,
@@ -183,12 +185,12 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_DNODE_VARIABLES_STMT,
QUERY_NODE_SHOW_TRANSACTIONS_STMT,
QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
+ QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_CREATE_DATABASE_STMT,
QUERY_NODE_SHOW_CREATE_TABLE_STMT,
QUERY_NODE_SHOW_CREATE_STABLE_STMT,
QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT,
QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT,
- QUERY_NODE_SHOW_VNODES_STMT,
QUERY_NODE_SHOW_SCORES_STMT,
QUERY_NODE_KILL_CONNECTION_STMT,
QUERY_NODE_KILL_QUERY_STMT,
@@ -237,6 +239,7 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_FILL,
+ QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION,
QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION,
@@ -244,6 +247,7 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE,
QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE,
QUERY_NODE_PHYSICAL_PLAN_PARTITION,
+ QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION,
QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC,
QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC,
QUERY_NODE_PHYSICAL_PLAN_DISPATCH,
@@ -274,6 +278,17 @@ typedef struct SNodeList {
SListCell* pTail;
} SNodeList;
+typedef struct SNodeAllocator SNodeAllocator;
+
+int32_t nodesInitAllocatorSet();
+void nodesDestroyAllocatorSet();
+int32_t nodesCreateAllocator(int64_t queryId, int32_t chunkSize, int64_t* pAllocatorId);
+int32_t nodesAcquireAllocator(int64_t allocatorId);
+int32_t nodesReleaseAllocator(int64_t allocatorId);
+int64_t nodesMakeAllocatorWeakRef(int64_t allocatorId);
+int64_t nodesReleaseAllocatorWeakRef(int64_t allocatorId);
+void nodesDestroyAllocator(int64_t allocatorId);
+
SNode* nodesMakeNode(ENodeType type);
void nodesDestroyNode(SNode* pNode);
@@ -319,6 +334,9 @@ int32_t nodesStringToNode(const char* pStr, SNode** pNode);
int32_t nodesListToString(const SNodeList* pList, bool format, char** pStr, int32_t* pLen);
int32_t nodesStringToList(const char* pStr, SNodeList** pList);
+int32_t nodesNodeToMsg(const SNode* pNode, char** pMsg, int32_t* pLen);
+int32_t nodesMsgToNode(const char* pStr, int32_t len, SNode** pNode);
+
int32_t nodesNodeToSQL(SNode* pNode, char* buf, int32_t bufSize, int32_t* len);
char* nodesGetNameFromColumnNode(SNode* pNode);
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots);
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 6fd6a316ebd1bd6b0dcdb9b0b222716cfa33203e..25ff18a8fc1a5b6f35e82c2323ed6a4023285d76 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -94,6 +94,8 @@ typedef struct SScanLogicNode {
SArray* pSmaIndexes;
SNodeList* pGroupTags;
bool groupSort;
+ SNodeList* pTags; // for create stream
+ SNode* pSubtable; // for create stream
int8_t cacheLastMode;
bool hasNormalCols; // neither tag column nor primary key tag column
bool sortPrimaryKey;
@@ -151,6 +153,8 @@ typedef struct SVnodeModifyLogicNode {
SArray* pDataBlocks;
SVgDataBlocks* pVgDataBlocks;
SNode* pAffectedRows; // SColumnNode
+ SNode* pStartTs; // SColumnNode
+ SNode* pEndTs; // SColumnNode
uint64_t tableId;
uint64_t stableId;
int8_t tableType; // table type
@@ -163,7 +167,8 @@ typedef struct SVnodeModifyLogicNode {
typedef struct SExchangeLogicNode {
SLogicNode node;
- int32_t srcGroupId;
+ int32_t srcStartGroupId;
+ int32_t srcEndGroupId;
} SExchangeLogicNode;
typedef struct SMergeLogicNode {
@@ -230,6 +235,8 @@ typedef struct SSortLogicNode {
typedef struct SPartitionLogicNode {
SLogicNode node;
SNodeList* pPartitionKeys;
+ SNodeList* pTags;
+ SNode* pSubtable;
} SPartitionLogicNode;
typedef enum ESubplanType {
@@ -329,6 +336,8 @@ typedef struct STableScanPhysiNode {
SNodeList* pDynamicScanFuncs;
SNodeList* pGroupTags;
bool groupSort;
+ SNodeList* pTags;
+ SNode* pSubtable;
int64_t interval;
int64_t offset;
int64_t sliding;
@@ -393,11 +402,15 @@ typedef struct SDownstreamSourceNode {
uint64_t schedId;
int32_t execId;
int32_t fetchMsgType;
+ bool localExec;
} SDownstreamSourceNode;
typedef struct SExchangePhysiNode {
SPhysiNode node;
- int32_t srcGroupId; // group id of datasource suplans
+ // for set operators, there will be multiple execution groups under one exchange, and the ids of these execution
+ // groups are consecutive
+ int32_t srcStartGroupId;
+ int32_t srcEndGroupId;
bool singleChannel;
SNodeList* pSrcEndPoints; // element is SDownstreamSource, scheduler fill by calling qSetSuplanExecutionNode
} SExchangePhysiNode;
@@ -451,6 +464,8 @@ typedef struct SFillPhysiNode {
EOrder inputTsOrder;
} SFillPhysiNode;
+typedef SFillPhysiNode SStreamFillPhysiNode;
+
typedef struct SMultiTableIntervalPhysiNode {
SIntervalPhysiNode interval;
SNodeList* pPartitionKeys;
@@ -488,6 +503,12 @@ typedef struct SPartitionPhysiNode {
SNodeList* pTargets;
} SPartitionPhysiNode;
+typedef struct SStreamPartitionPhysiNode {
+ SPartitionPhysiNode part;
+ SNodeList* pTags;
+ SNode* pSubtable;
+} SStreamPartitionPhysiNode;
+
typedef struct SDataSinkNode {
ENodeType type;
SDataBlockDescNode* pInputDataBlockDesc;
@@ -523,6 +544,8 @@ typedef struct SDataDeleterNode {
char tsColName[TSDB_COL_NAME_LEN];
STimeWindow deleteTimeRange;
SNode* pAffectedRows;
+ SNode* pStartTs;
+ SNode* pEndTs;
} SDataDeleterNode;
typedef struct SSubplan {
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 3a1eaf289e4ba245544b985e893f746845c37c88..5ee097bd9254970536c61caace0fdde3f6a11d9d 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -241,6 +241,19 @@ typedef struct SFillNode {
STimeWindow timeRange;
} SFillNode;
+typedef struct SWhenThenNode {
+ SExprNode node; // QUERY_NODE_WHEN_THEN
+ SNode* pWhen;
+ SNode* pThen;
+} SWhenThenNode;
+
+typedef struct SCaseWhenNode {
+ SExprNode node; // QUERY_NODE_CASE_WHEN
+ SNode* pCase;
+ SNode* pElse;
+ SNodeList* pWhenThenList;
+} SCaseWhenNode;
+
typedef struct SSelectStmt {
ENodeType type; // QUERY_NODE_SELECT_STMT
bool isDistinct;
@@ -248,6 +261,8 @@ typedef struct SSelectStmt {
SNode* pFromTable;
SNode* pWhere;
SNodeList* pPartitionByList;
+ SNodeList* pTags; // for create stream
+ SNode* pSubtable; // for create stream
SNode* pWindow;
SNodeList* pGroupByList; // SGroupingSetNode
SNode* pHaving;
@@ -315,6 +330,8 @@ typedef struct SDeleteStmt {
SNode* pFromTable; // FROM clause
SNode* pWhere; // WHERE clause
SNode* pCountFunc; // count the number of rows affected
+ SNode* pFirstFunc; // the start timestamp when the data was actually deleted
+ SNode* pLastFunc; // the end timestamp when the data was actually deleted
SNode* pTagCond; // pWhere divided into pTagCond and timeRange
STimeWindow timeRange;
uint8_t precision;
diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h
index 95bde858640b3d4cd5df616bc1d0a5a65795d8f3..b1a937910dfe8defd107ec525afd20edfc639aaf 100644
--- a/include/libs/parser/parser.h
+++ b/include/libs/parser/parser.h
@@ -56,6 +56,7 @@ typedef struct SParseContext {
bool nodeOffline;
SArray* pTableMetaPos; // sql table pos => catalog data pos
SArray* pTableVgroupPos; // sql table pos => catalog data pos
+ int64_t allocatorId;
} SParseContext;
int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery);
diff --git a/include/libs/planner/planner.h b/include/libs/planner/planner.h
index 05caa7a7bb56617ef34c03e3646f85ac98f65a56..e52fe39527dda9aa80ea05c1ffcab487b84cd466 100644
--- a/include/libs/planner/planner.h
+++ b/include/libs/planner/planner.h
@@ -39,6 +39,7 @@ typedef struct SPlanContext {
int32_t msgLen;
const char* pUser;
bool sysInfo;
+ int64_t allocatorId;
} SPlanContext;
// Create the physical plan for the query, according to the AST.
@@ -52,10 +53,14 @@ int32_t qSetSubplanExecutionNode(SSubplan* pSubplan, int32_t groupId, SDownstrea
void qClearSubplanExecutionNode(SSubplan* pSubplan);
-// Convert to subplan to string for the scheduler to send to the executor
+// Convert to subplan to display string for the scheduler to send to the executor
int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen);
int32_t qStringToSubplan(const char* pStr, SSubplan** pSubplan);
+// Convert to subplan to msg for the scheduler to send to the executor
+int32_t qSubPlanToMsg(const SSubplan* pSubplan, char** pStr, int32_t* pLen);
+int32_t qMsgToSubplan(const char* pStr, int32_t len, SSubplan** pSubplan);
+
char* qQueryPlanToString(const SQueryPlan* pPlan);
SQueryPlan* qStringToQueryPlan(const char* pStr);
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index 1fa7dca7dc6ad975e87e18570c8a9a35d990bb7e..e9f3864f6738bc7714db49d8f2373900fe10bfb6 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -52,6 +52,7 @@ typedef enum {
#define QUERY_POLICY_VNODE 1
#define QUERY_POLICY_HYBRID 2
#define QUERY_POLICY_QNODE 3
+#define QUERY_POLICY_CLIENT 4
typedef struct STableComInfo {
uint8_t numOfTags; // the number of tags in schema
@@ -116,6 +117,8 @@ typedef struct STableMeta {
typedef struct SDBVgInfo {
int32_t vgVersion;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
int8_t hashMethod;
int32_t numOfTable; // DB's table num, unit is TSDB_TABLE_NUM_UNIT
SHashObj* vgHash; // key:vgId, value:SVgroupInfo
@@ -267,43 +270,43 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
#define qFatal(...) \
do { \
if (qDebugFlag & DEBUG_FATAL) { \
- taosPrintLog("QRY FATAL ", DEBUG_FATAL, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ taosPrintLog("QRY FATAL ", DEBUG_FATAL, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qError(...) \
do { \
if (qDebugFlag & DEBUG_ERROR) { \
- taosPrintLog("QRY ERROR ", DEBUG_ERROR, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ taosPrintLog("QRY ERROR ", DEBUG_ERROR, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qWarn(...) \
do { \
if (qDebugFlag & DEBUG_WARN) { \
- taosPrintLog("QRY WARN ", DEBUG_WARN, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ taosPrintLog("QRY WARN ", DEBUG_WARN, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qInfo(...) \
do { \
if (qDebugFlag & DEBUG_INFO) { \
- taosPrintLog("QRY ", DEBUG_INFO, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ taosPrintLog("QRY ", DEBUG_INFO, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebug(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
- taosPrintLog("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ taosPrintLog("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qTrace(...) \
do { \
if (qDebugFlag & DEBUG_TRACE) { \
- taosPrintLog("QRY ", DEBUG_TRACE, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ taosPrintLog("QRY ", DEBUG_TRACE, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
#define qDebugL(...) \
do { \
if (qDebugFlag & DEBUG_DEBUG) { \
- taosPrintLongString("QRY ", DEBUG_DEBUG, tsLogEmbedded ? 255 : qDebugFlag, __VA_ARGS__); \
+ taosPrintLongString("QRY ", DEBUG_DEBUG, qDebugFlag, __VA_ARGS__); \
} \
} while (0)
diff --git a/include/libs/qworker/qworker.h b/include/libs/qworker/qworker.h
index 87aefe5187ec7ca61a4de5f6f14adbbf26861dfc..99f51892284bb4eb9884b763bef051771d39e1b7 100644
--- a/include/libs/qworker/qworker.h
+++ b/include/libs/qworker/qworker.h
@@ -29,6 +29,7 @@ enum {
NODE_TYPE_QNODE,
NODE_TYPE_SNODE,
NODE_TYPE_MNODE,
+ NODE_TYPE_CLIENT,
};
typedef struct SQWorkerCfg {
@@ -55,7 +56,24 @@ typedef struct {
uint64_t numOfErrors;
} SQWorkerStat;
-int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb);
+typedef struct SQWMsgInfo {
+ int8_t taskType;
+ int8_t explain;
+ int8_t needFetch;
+} SQWMsgInfo;
+
+typedef struct SQWMsg {
+ void *node;
+ int32_t code;
+ int32_t msgType;
+ void *msg;
+ int32_t msgLen;
+ SQWMsgInfo msgInfo;
+ SRpcHandleInfo connInfo;
+} SQWMsg;
+
+
+int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, void **qWorkerMgmt, const SMsgCb *pMsgCb);
int32_t qWorkerAbortPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg);
@@ -77,10 +95,14 @@ int32_t qWorkerProcessHbMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int64_
int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SDeleteRes *pRes);
-void qWorkerDestroy(void **qWorkerMgmt);
+void qWorkerDestroy(void **qWorkerMgmt);
int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pStat);
+int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId, SQWMsg *qwMsg, SArray *explainRes);
+
+int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId, void** pRsp, SArray* explainRes);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/scalar/filter.h b/include/libs/scalar/filter.h
index 1f1d9dea933affc01394111f5d5c3c4082b32cfb..e02b4a617253c7ade77a922568b478eaf62e0d9a 100644
--- a/include/libs/scalar/filter.h
+++ b/include/libs/scalar/filter.h
@@ -31,13 +31,17 @@ enum {
FLT_OPTION_NEED_UNIQE = 4,
};
+#define FILTER_RESULT_ALL_QUALIFIED 0x1
+#define FILTER_RESULT_NONE_QUALIFIED 0x2
+#define FILTER_RESULT_PARTIAL_QUALIFIED 0x3
+
typedef struct SFilterColumnParam {
int32_t numOfCols;
SArray *pDataBlock;
} SFilterColumnParam;
extern int32_t filterInitFromNode(SNode *pNode, SFilterInfo **pinfo, uint32_t options);
-extern bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, int8_t **p, SColumnDataAgg *statis, int16_t numOfCols);
+extern bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData** p, SColumnDataAgg *statis, int16_t numOfCols, int32_t* pFilterResStatus);
extern int32_t filterSetDataFromSlotId(SFilterInfo *info, void *param);
extern int32_t filterSetDataFromColId(SFilterInfo *info, void *param);
extern int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict);
diff --git a/include/libs/scheduler/scheduler.h b/include/libs/scheduler/scheduler.h
index e6973cd390c10ff524f70549d161090582ee56ab..077c23c1b580bb693e0ec581920b9ebea46de751 100644
--- a/include/libs/scheduler/scheduler.h
+++ b/include/libs/scheduler/scheduler.h
@@ -64,9 +64,11 @@ typedef bool (*schedulerChkKillFp)(void* param);
typedef struct SSchedulerReq {
bool syncReq;
+ bool localReq;
SRequestConnInfo *pConn;
SArray *pNodeList;
SQueryPlan *pDag;
+ int64_t allocatorRefId;
const char *sql;
int64_t startTs;
schedulerExecFp execFp;
diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h
new file mode 100644
index 0000000000000000000000000000000000000000..0adcf976f01d43eaeb37b5d04809d1fcf8e8a79b
--- /dev/null
+++ b/include/libs/stream/streamState.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tdatablock.h"
+#include "tdbInt.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef _STREAM_STATE_H_
+#define _STREAM_STATE_H_
+
+typedef struct SStreamTask SStreamTask;
+
+// incremental state storage
+typedef struct {
+ SStreamTask* pOwner;
+ TDB* db;
+ TTB* pStateDb;
+ TTB* pFuncStateDb;
+ TTB* pFillStateDb; // todo refactor
+ TXN txn;
+} SStreamState;
+
+SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath);
+void streamStateClose(SStreamState* pState);
+int32_t streamStateBegin(SStreamState* pState);
+int32_t streamStateCommit(SStreamState* pState);
+int32_t streamStateAbort(SStreamState* pState);
+
+typedef struct {
+ TBC* pCur;
+} SStreamStateCur;
+
+int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen);
+int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen);
+int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key);
+
+int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
+int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
+
+int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
+int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
+
+int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
+void streamFreeVal(void* val);
+
+SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
+SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
+SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key);
+SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey* key);
+void streamStateFreeCur(SStreamStateCur* pCur);
+
+int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
+int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
+
+int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
+int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
+
+int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
+int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ifndef _STREAM_STATE_H_ */
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 2c275090080f73577cd28b3e10b3f1e102b4556e..bdc12f7e3f1c03c88ea87233f3bf708b86800c5b 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -16,6 +16,7 @@
#include "executor.h"
#include "os.h"
#include "query.h"
+#include "streamState.h"
#include "tdatablock.h"
#include "tdbInt.h"
#include "tmsg.h"
@@ -124,6 +125,14 @@ typedef struct {
SArray* blocks; // SArray
} SStreamDataBlock;
+// ref data block, for delete
+typedef struct {
+ int8_t type;
+ int64_t ver;
+ int32_t* dataRef;
+ SSDataBlock* pBlock;
+} SStreamRefDataBlock;
+
typedef struct {
int8_t type;
} SStreamCheckpoint;
@@ -263,14 +272,6 @@ typedef struct {
SArray* checkpointVer;
} SStreamRecoveringState;
-// incremental state storage
-typedef struct {
- SStreamTask* pOwner;
- TDB* db;
- TTB* pStateDb;
- TXN txn;
-} SStreamState;
-
typedef struct SStreamTask {
int64_t streamId;
int32_t taskId;
@@ -346,7 +347,8 @@ static FORCE_INLINE int32_t streamTaskInput(SStreamTask* pTask, SStreamQueueItem
qDebug("task %d %p submit enqueue %p %p %p", pTask->taskId, pTask, pItem, pSubmitClone, pSubmitClone->data);
taosWriteQitem(pTask->inputQueue->queue, pSubmitClone);
// qStreamInput(pTask->exec.executor, pSubmitClone);
- } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
+ } else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE ||
+ pItem->type == STREAM_INPUT__REF_DATA_BLOCK) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
// qStreamInput(pTask->exec.executor, pItem);
} else if (pItem->type == STREAM_INPUT__CHECKPOINT) {
@@ -499,7 +501,9 @@ typedef struct {
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
-void tFreeStreamDispatchReq(SStreamDispatchReq* pReq);
+void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
+
+void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq);
int32_t streamSetupTrigger(SStreamTask* pTask);
@@ -540,37 +544,6 @@ int32_t streamMetaCommit(SStreamMeta* pMeta);
int32_t streamMetaRollBack(SStreamMeta* pMeta);
int32_t streamLoadTasks(SStreamMeta* pMeta);
-SStreamState* streamStateOpen(char* path, SStreamTask* pTask);
-void streamStateClose(SStreamState* pState);
-int32_t streamStateBegin(SStreamState* pState);
-int32_t streamStateCommit(SStreamState* pState);
-int32_t streamStateAbort(SStreamState* pState);
-
-typedef struct {
- TBC* pCur;
-} SStreamStateCur;
-
-#if 1
-int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
-int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
-int32_t streamStateDel(SStreamState* pState, const SWinKey* key);
-void streamFreeVal(void* val);
-
-SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key);
-SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
-SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key);
-void streamStateFreeCur(SStreamStateCur* pCur);
-
-int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
-
-int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur);
-int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur);
-
-int32_t streamStateCurNext(SStreamState* pState, SStreamStateCur* pCur);
-int32_t streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
-
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h
index e6a4dd1d493969a333005a64f515ba35dde34573..285e079b3ec90a066cd70fa3e7576dac3d5c8b8d 100644
--- a/include/libs/sync/sync.h
+++ b/include/libs/sync/sync.h
@@ -22,6 +22,7 @@ extern "C" {
#include "cJSON.h"
#include "tdef.h"
+#include "tlrucache.h"
#include "tmsgcb.h"
extern bool gRaftDetailLog;
@@ -153,7 +154,8 @@ typedef struct SSyncFSM {
// abstract definition of log store in raft
// SWal implements it
typedef struct SSyncLogStore {
- void* data;
+ SLRUCache* pCache;
+ void* data;
// append one log entry
int32_t (*appendEntry)(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry);
diff --git a/include/libs/sync/syncTools.h b/include/libs/sync/syncTools.h
index 6c95c3c6d72929045bd780056811c1938864717b..de2271554d2b9166ec240ea07e91dea9d017ef92 100644
--- a/include/libs/sync/syncTools.h
+++ b/include/libs/sync/syncTools.h
@@ -444,6 +444,70 @@ void syncAppendEntriesReplyPrint2(char* s, const SyncAppendEntriesReply* pMsg);
void syncAppendEntriesReplyLog(const SyncAppendEntriesReply* pMsg);
void syncAppendEntriesReplyLog2(char* s, const SyncAppendEntriesReply* pMsg);
+// ---------------------------------------------
+typedef struct SyncHeartbeat {
+ uint32_t bytes;
+ int32_t vgId;
+ uint32_t msgType;
+ SRaftId srcId;
+ SRaftId destId;
+
+ // private data
+ SyncTerm term;
+ SyncIndex commitIndex;
+ SyncTerm privateTerm;
+} SyncHeartbeat;
+
+SyncHeartbeat* syncHeartbeatBuild(int32_t vgId);
+void syncHeartbeatDestroy(SyncHeartbeat* pMsg);
+void syncHeartbeatSerialize(const SyncHeartbeat* pMsg, char* buf, uint32_t bufLen);
+void syncHeartbeatDeserialize(const char* buf, uint32_t len, SyncHeartbeat* pMsg);
+char* syncHeartbeatSerialize2(const SyncHeartbeat* pMsg, uint32_t* len);
+SyncHeartbeat* syncHeartbeatDeserialize2(const char* buf, uint32_t len);
+void syncHeartbeat2RpcMsg(const SyncHeartbeat* pMsg, SRpcMsg* pRpcMsg);
+void syncHeartbeatFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeat* pMsg);
+SyncHeartbeat* syncHeartbeatFromRpcMsg2(const SRpcMsg* pRpcMsg);
+cJSON* syncHeartbeat2Json(const SyncHeartbeat* pMsg);
+char* syncHeartbeat2Str(const SyncHeartbeat* pMsg);
+
+// for debug ----------------------
+void syncHeartbeatPrint(const SyncHeartbeat* pMsg);
+void syncHeartbeatPrint2(char* s, const SyncHeartbeat* pMsg);
+void syncHeartbeatLog(const SyncHeartbeat* pMsg);
+void syncHeartbeatLog2(char* s, const SyncHeartbeat* pMsg);
+
+// ---------------------------------------------
+typedef struct SyncHeartbeatReply {
+ uint32_t bytes;
+ int32_t vgId;
+ uint32_t msgType;
+ SRaftId srcId;
+ SRaftId destId;
+
+ // private data
+ SyncTerm term;
+ SyncTerm privateTerm;
+ int64_t startTime;
+} SyncHeartbeatReply;
+
+SyncHeartbeatReply* syncHeartbeatReplyBuild(int32_t vgId);
+void syncHeartbeatReplyDestroy(SyncHeartbeatReply* pMsg);
+void syncHeartbeatReplySerialize(const SyncHeartbeatReply* pMsg, char* buf, uint32_t bufLen);
+void syncHeartbeatReplyDeserialize(const char* buf, uint32_t len, SyncHeartbeatReply* pMsg);
+char* syncHeartbeatReplySerialize2(const SyncHeartbeatReply* pMsg, uint32_t* len);
+SyncHeartbeatReply* syncHeartbeatReplyDeserialize2(const char* buf, uint32_t len);
+void syncHeartbeatReply2RpcMsg(const SyncHeartbeatReply* pMsg, SRpcMsg* pRpcMsg);
+void syncHeartbeatReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeatReply* pMsg);
+SyncHeartbeatReply* syncHeartbeatReplyFromRpcMsg2(const SRpcMsg* pRpcMsg);
+cJSON* syncHeartbeatReply2Json(const SyncHeartbeatReply* pMsg);
+char* syncHeartbeatReply2Str(const SyncHeartbeatReply* pMsg);
+
+// for debug ----------------------
+void syncHeartbeatReplyPrint(const SyncHeartbeatReply* pMsg);
+void syncHeartbeatReplyPrint2(char* s, const SyncHeartbeatReply* pMsg);
+void syncHeartbeatReplyLog(const SyncHeartbeatReply* pMsg);
+void syncHeartbeatReplyLog2(char* s, const SyncHeartbeatReply* pMsg);
+
// ---------------------------------------------
typedef struct SyncApplyMsg {
uint32_t bytes;
diff --git a/include/libs/tfs/tfs.h b/include/libs/tfs/tfs.h
index 1dc154ce484e1a923852f877bdae4ec37d62b1b4..6f71fd4cd02a241b9d4a43efbfd92bc99b3c91a8 100644
--- a/include/libs/tfs/tfs.h
+++ b/include/libs/tfs/tfs.h
@@ -69,6 +69,14 @@ void tfsUpdateSize(STfs *pTfs);
*/
SDiskSize tfsGetSize(STfs *pTfs);
+/**
+ * @brief Get level of multi-tier storage.
+ *
+ * @param pTfs
+ * @return int32_t
+ */
+int32_t tfsGetLevel(STfs *pTfs);
+
/**
* @brief Allocate an existing available tier level from fs.
*
diff --git a/include/os/os.h b/include/os/os.h
index b036002f8adb5d246db8346112f2189f779f73cd..71966061a19a175d816010ff6425b4004b1f2223 100644
--- a/include/os/os.h
+++ b/include/os/os.h
@@ -79,6 +79,7 @@ extern "C" {
#include
#include
+#include "taoserror.h"
#include "osAtomic.h"
#include "osDef.h"
#include "osDir.h"
diff --git a/include/os/osDir.h b/include/os/osDir.h
index 9019d4f80240b2335824cb5626488bf4d0957f06..95b1a6ee1d00ab18e31522063102ff0ec9a2bab8 100644
--- a/include/os/osDir.h
+++ b/include/os/osDir.h
@@ -56,6 +56,7 @@ void taosRemoveDir(const char *dirname);
bool taosDirExist(const char *dirname);
int32_t taosMkDir(const char *dirname);
int32_t taosMulMkDir(const char *dirname);
+int32_t taosMulModeMkDir(const char *dirname, int mode);
void taosRemoveOldFiles(const char *dirname, int32_t keepDays);
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen);
int32_t taosRealPath(char *dirname, char *realPath, int32_t maxlen);
diff --git a/include/os/osSemaphore.h b/include/os/osSemaphore.h
index 7fca20d75e2eaece441656bc4ae2c707e0b15cd3..e52da96f0170d4d67d9fb8fa3aeff7270223e2d3 100644
--- a/include/os/osSemaphore.h
+++ b/include/os/osSemaphore.h
@@ -23,10 +23,9 @@ extern "C" {
#include
#if defined(_TD_DARWIN_64)
-
+#include
// typedef struct tsem_s *tsem_t;
-typedef struct bosal_sem_t *tsem_t;
-
+typedef dispatch_semaphore_t tsem_t;
int tsem_init(tsem_t *sem, int pshared, unsigned int value);
int tsem_wait(tsem_t *sem);
diff --git a/include/os/osSocket.h b/include/os/osSocket.h
index 4bad51e26322405d9c4187e3bd7a12a75ee011b9..c6729da76af0e59cb3ccda024709b86fe034b039 100644
--- a/include/os/osSocket.h
+++ b/include/os/osSocket.h
@@ -167,7 +167,7 @@ uint32_t ip2uint(const char *const ip_addr);
void taosIgnSIGPIPE();
void taosSetMaskSIGPIPE();
uint32_t taosInetAddr(const char *ipAddr);
-const char *taosInetNtoa(struct in_addr ipInt);
+const char *taosInetNtoa(struct in_addr ipInt, char *dstStr, int32_t len);
#ifdef __cplusplus
}
diff --git a/include/os/osString.h b/include/os/osString.h
index 8eb341faa7bf61e4c2f67f8a21859da94c0dcbf4..8c1885efdfd72c3bdbd7e588e834901c78c41d1f 100644
--- a/include/os/osString.h
+++ b/include/os/osString.h
@@ -77,7 +77,6 @@ int32_t taosWcharsWidth(TdWchar *pWchar, int32_t size);
int32_t taosMbToWchar(TdWchar *pWchar, const char *pStr, int32_t size);
int32_t taosMbsToWchars(TdWchar *pWchars, const char *pStrs, int32_t size);
int32_t taosWcharToMb(char *pStr, TdWchar wchar);
-int32_t taosWcharsToMbs(char *pStrs, TdWchar *pWchars, int32_t size);
char *taosStrCaseStr(const char *str, const char *pattern);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index e39172d74e52e852f0fa1812634e494d61ac6213..837d0c630310d723e1baf7715553565408029d0c 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -285,6 +285,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MND_TOPIC_SUBSCRIBED TAOS_DEF_ERROR_CODE(0, 0x03EB)
#define TSDB_CODE_MND_CGROUP_USED TAOS_DEF_ERROR_CODE(0, 0x03EC)
#define TSDB_CODE_MND_TOPIC_MUST_BE_DELETED TAOS_DEF_ERROR_CODE(0, 0x03ED)
+#define TSDB_CODE_MND_IN_REBALANCE TAOS_DEF_ERROR_CODE(0, 0x03EF)
// mnode-stream
#define TSDB_CODE_MND_STREAM_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x03F0)
@@ -551,7 +552,6 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_VALUE_TOO_LONG TAOS_DEF_ERROR_CODE(0, 0x2653)
#define TSDB_CODE_PAR_INVALID_DELETE_WHERE TAOS_DEF_ERROR_CODE(0, 0x2655)
#define TSDB_CODE_PAR_INVALID_REDISTRIBUTE_VG TAOS_DEF_ERROR_CODE(0, 0x2656)
-
#define TSDB_CODE_PAR_FILL_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2657)
#define TSDB_CODE_PAR_INVALID_WINDOW_PC TAOS_DEF_ERROR_CODE(0, 0x2658)
#define TSDB_CODE_PAR_WINDOW_NOT_ALLOWED_FUNC TAOS_DEF_ERROR_CODE(0, 0x2659)
@@ -564,6 +564,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_SELECTED_EXPR TAOS_DEF_ERROR_CODE(0, 0x2661)
#define TSDB_CODE_PAR_GET_META_ERROR TAOS_DEF_ERROR_CODE(0, 0x2662)
#define TSDB_CODE_PAR_NOT_UNIQUE_TABLE_ALIAS TAOS_DEF_ERROR_CODE(0, 0x2663)
+#define TSDB_CODE_PAR_NOT_SUPPORT_JOIN TAOS_DEF_ERROR_CODE(0, 0x2664)
#define TSDB_CODE_PAR_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x26FF)
//planner
@@ -577,6 +578,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_FUNC_FUNTION_PARA_TYPE TAOS_DEF_ERROR_CODE(0, 0x2802)
#define TSDB_CODE_FUNC_FUNTION_PARA_VALUE TAOS_DEF_ERROR_CODE(0, 0x2803)
#define TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION TAOS_DEF_ERROR_CODE(0, 0x2804)
+#define TSDB_CODE_FUNC_DUP_TIMESTAMP TAOS_DEF_ERROR_CODE(0, 0x2805)
//udf
#define TSDB_CODE_UDF_STOPPING TAOS_DEF_ERROR_CODE(0, 0x2901)
@@ -616,6 +618,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155)
#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156)
#define TSDB_CODE_RSMA_INVALID_SCHEMA TAOS_DEF_ERROR_CODE(0, 0x3157)
+#define TSDB_CODE_RSMA_REGEX_MATCH TAOS_DEF_ERROR_CODE(0, 0x3158)
+#define TSDB_CODE_RSMA_STREAM_STATE_OPEN TAOS_DEF_ERROR_CODE(0, 0x3159)
+#define TSDB_CODE_RSMA_STREAM_STATE_COMMIT TAOS_DEF_ERROR_CODE(0, 0x3160)
//index
#define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200)
diff --git a/include/util/tcompare.h b/include/util/tcompare.h
index cc9e8ae4641138be528830e17467dab7897f0166..c7a3ca20f222c7d919460b31e9f3c55a79325f46 100644
--- a/include/util/tcompare.h
+++ b/include/util/tcompare.h
@@ -105,6 +105,97 @@ int32_t compareStrPatternNotMatch(const void *pLeft, const void *pRight);
int32_t compareWStrPatternMatch(const void *pLeft, const void *pRight);
int32_t compareWStrPatternNotMatch(const void *pLeft, const void *pRight);
+int32_t compareInt8Int16(const void *pLeft, const void *pRight);
+int32_t compareInt8Int32(const void *pLeft, const void *pRight);
+int32_t compareInt8Int64(const void *pLeft, const void *pRight);
+int32_t compareInt8Float(const void *pLeft, const void *pRight);
+int32_t compareInt8Double(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt8Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt16Int8(const void *pLeft, const void *pRight);
+int32_t compareInt16Int32(const void *pLeft, const void *pRight);
+int32_t compareInt16Int64(const void *pLeft, const void *pRight);
+int32_t compareInt16Float(const void *pLeft, const void *pRight);
+int32_t compareInt16Double(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt16Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt32Int8(const void *pLeft, const void *pRight);
+int32_t compareInt32Int16(const void *pLeft, const void *pRight);
+int32_t compareInt32Int64(const void *pLeft, const void *pRight);
+int32_t compareInt32Float(const void *pLeft, const void *pRight);
+int32_t compareInt32Double(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt32Uint64(const void *pLeft, const void *pRight);
+int32_t compareInt64Int8(const void *pLeft, const void *pRight);
+int32_t compareInt64Int16(const void *pLeft, const void *pRight);
+int32_t compareInt64Int32(const void *pLeft, const void *pRight);
+int32_t compareInt64Float(const void *pLeft, const void *pRight);
+int32_t compareInt64Double(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint8(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint16(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint32(const void *pLeft, const void *pRight);
+int32_t compareInt64Uint64(const void *pLeft, const void *pRight);
+int32_t compareFloatInt8(const void *pLeft, const void *pRight);
+int32_t compareFloatInt16(const void *pLeft, const void *pRight);
+int32_t compareFloatInt32(const void *pLeft, const void *pRight);
+int32_t compareFloatInt64(const void *pLeft, const void *pRight);
+int32_t compareFloatDouble(const void *pLeft, const void *pRight);
+int32_t compareFloatUint8(const void *pLeft, const void *pRight);
+int32_t compareFloatUint16(const void *pLeft, const void *pRight);
+int32_t compareFloatUint32(const void *pLeft, const void *pRight);
+int32_t compareFloatUint64(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt8(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt16(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt32(const void *pLeft, const void *pRight);
+int32_t compareDoubleInt64(const void *pLeft, const void *pRight);
+int32_t compareDoubleFloat(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint8(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint16(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint32(const void *pLeft, const void *pRight);
+int32_t compareDoubleUint64(const void *pLeft, const void *pRight);
+int32_t compareUint8Int8(const void *pLeft, const void *pRight);
+int32_t compareUint8Int16(const void *pLeft, const void *pRight);
+int32_t compareUint8Int32(const void *pLeft, const void *pRight);
+int32_t compareUint8Int64(const void *pLeft, const void *pRight);
+int32_t compareUint8Float(const void *pLeft, const void *pRight);
+int32_t compareUint8Double(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint32(const void *pLeft, const void *pRight);
+int32_t compareUint8Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint16Int8(const void *pLeft, const void *pRight);
+int32_t compareUint16Int16(const void *pLeft, const void *pRight);
+int32_t compareUint16Int32(const void *pLeft, const void *pRight);
+int32_t compareUint16Int64(const void *pLeft, const void *pRight);
+int32_t compareUint16Float(const void *pLeft, const void *pRight);
+int32_t compareUint16Double(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint32(const void *pLeft, const void *pRight);
+int32_t compareUint16Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint32Int8(const void *pLeft, const void *pRight);
+int32_t compareUint32Int16(const void *pLeft, const void *pRight);
+int32_t compareUint32Int32(const void *pLeft, const void *pRight);
+int32_t compareUint32Int64(const void *pLeft, const void *pRight);
+int32_t compareUint32Float(const void *pLeft, const void *pRight);
+int32_t compareUint32Double(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint32Uint64(const void *pLeft, const void *pRight);
+int32_t compareUint64Int8(const void *pLeft, const void *pRight);
+int32_t compareUint64Int16(const void *pLeft, const void *pRight);
+int32_t compareUint64Int32(const void *pLeft, const void *pRight);
+int32_t compareUint64Int64(const void *pLeft, const void *pRight);
+int32_t compareUint64Float(const void *pLeft, const void *pRight);
+int32_t compareUint64Double(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint8(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint16(const void *pLeft, const void *pRight);
+int32_t compareUint64Uint32(const void *pLeft, const void *pRight);
+
__compar_fn_t getComparFunc(int32_t type, int32_t optr);
__compar_fn_t getKeyComparFunc(int32_t keyType, int32_t order);
int32_t doCompare(const char *a, const char *b, int32_t type, size_t size);
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 2bc821b8736edf745a30e0e103734e4e7b7b31e4..43fd31afa7cc634e473c93518b78776c182d48d4 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -225,7 +225,8 @@ typedef enum ELogicConditionType {
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_TB_COMMENT_LEN 1025
-#define TSDB_QUERY_ID_LEN 26
+#define TSDB_QUERY_ID_LEN 26
+#define TSDB_TRANS_OPER_LEN 16
/**
* In some scenarios uint16_t (0~65535) is used to store the row len.
@@ -300,6 +301,9 @@ typedef enum ELogicConditionType {
#define TSDB_DEFAULT_PAGES_PER_VNODE 256
#define TSDB_MIN_PAGESIZE_PER_VNODE 1 // unit KB
#define TSDB_MAX_PAGESIZE_PER_VNODE 16384
+#define TSDB_DEFAULT_TSDB_PAGESIZE 4
+#define TSDB_MIN_TSDB_PAGESIZE 1 // unit KB
+#define TSDB_MAX_TSDB_PAGESIZE 16384
#define TSDB_DEFAULT_PAGESIZE_PER_VNODE 4
#define TSDB_MIN_DAYS_PER_FILE 60 // unit minute
#define TSDB_MAX_DAYS_PER_FILE (3650 * 1440)
@@ -359,15 +363,27 @@ typedef enum ELogicConditionType {
#define TSDB_DB_SCHEMALESS_ON 1
#define TSDB_DB_SCHEMALESS_OFF 0
#define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF
-
-#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
-#define TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD (24 * 60 * 60 * 4)
-#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
-#define TSDB_DEFAULT_DB_WAL_RETENTION_SIZE -1
-#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
-#define TSDB_DEFAULT_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
-#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
-#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
+#define TSDB_MIN_STT_TRIGGER 1
+#define TSDB_MAX_STT_TRIGGER 16
+#define TSDB_DEFAULT_SST_TRIGGER 8
+#define TSDB_MIN_HASH_PREFIX 0
+#define TSDB_MAX_HASH_PREFIX 128
+#define TSDB_DEFAULT_HASH_PREFIX 0
+#define TSDB_MIN_HASH_SUFFIX 0
+#define TSDB_MAX_HASH_SUFFIX 128
+#define TSDB_DEFAULT_HASH_SUFFIX 0
+
+#define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1
+#define TSDB_REP_DEF_DB_WAL_RET_PERIOD 0
+#define TSDB_REPS_DEF_DB_WAL_RET_PERIOD (24 * 60 * 60 * 4)
+#define TSDB_DB_MIN_WAL_RETENTION_SIZE -1
+#define TSDB_REP_DEF_DB_WAL_RET_SIZE 0
+#define TSDB_REPS_DEF_DB_WAL_RET_SIZE -1
+#define TSDB_DB_MIN_WAL_ROLL_PERIOD 0
+#define TSDB_REP_DEF_DB_WAL_ROLL_PERIOD 0
+#define TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1)
+#define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0
+#define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0
#define TSDB_MIN_ROLLUP_MAX_DELAY 1 // unit millisecond
#define TSDB_MAX_ROLLUP_MAX_DELAY (15 * 60 * 1000)
@@ -386,7 +402,7 @@ typedef enum ELogicConditionType {
#define TSDB_DEFAULT_EXPLAIN_VERBOSE false
-#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16*1024)
+#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024)
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
#define TSDB_MAX_FIELD_LEN 16384
@@ -467,6 +483,7 @@ enum {
#define SNODE_HANDLE -2
#define VNODE_HANDLE -3
#define BNODE_HANDLE -4
+#define CLIENT_HANDLE -5
#define TSDB_CONFIG_OPTION_LEN 32
#define TSDB_CONFIG_VALUE_LEN 64
diff --git a/include/util/tencode.h b/include/util/tencode.h
index ad642cd612db4d1bb31f57b7a49d977e90978ee5..a6dd58297e8c1dba644d86eb5145b273406fbf9e 100644
--- a/include/util/tencode.h
+++ b/include/util/tencode.h
@@ -264,12 +264,14 @@ static FORCE_INLINE int32_t tEncodeDouble(SEncoder* pCoder, double val) {
static FORCE_INLINE int32_t tEncodeBinary(SEncoder* pCoder, const uint8_t* val, uint32_t len) {
if (tEncodeU32v(pCoder, len) < 0) return -1;
- if (pCoder->data) {
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
- memcpy(TD_CODER_CURRENT(pCoder), val, len);
- }
+ if (len) {
+ if (pCoder->data) {
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, len)) return -1;
+ memcpy(TD_CODER_CURRENT(pCoder), val, len);
+ }
- TD_CODER_MOVE_POS(pCoder, len);
+ TD_CODER_MOVE_POS(pCoder, len);
+ }
return 0;
}
@@ -414,14 +416,18 @@ static int32_t tDecodeCStrTo(SDecoder* pCoder, char* val) {
static FORCE_INLINE int32_t tDecodeBinaryAlloc(SDecoder* pCoder, void** val, uint64_t* len) {
uint64_t length = 0;
if (tDecodeU64v(pCoder, &length) < 0) return -1;
- if (len) *len = length;
+ if (length) {
+ if (len) *len = length;
- if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
- *val = taosMemoryMalloc(length);
- if (*val == NULL) return -1;
- memcpy(*val, TD_CODER_CURRENT(pCoder), length);
+ if (TD_CODER_CHECK_CAPACITY_FAILED(pCoder, length)) return -1;
+ *val = taosMemoryMalloc(length);
+ if (*val == NULL) return -1;
+ memcpy(*val, TD_CODER_CURRENT(pCoder), length);
- TD_CODER_MOVE_POS(pCoder, length);
+ TD_CODER_MOVE_POS(pCoder, length);
+ } else {
+ *val = NULL;
+ }
return 0;
}
diff --git a/include/util/tpagedbuf.h b/include/util/tpagedbuf.h
index ef266068cbaff046ec6ebcf0bf02d0b44ee9d3a2..9ab89273e6895c2ea322fa116c06332a431028bc 100644
--- a/include/util/tpagedbuf.h
+++ b/include/util/tpagedbuf.h
@@ -58,19 +58,17 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
/**
*
* @param pBuf
- * @param groupId
* @param pageId
* @return
*/
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId);
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId);
/**
*
* @param pBuf
- * @param groupId
* @return
*/
-SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf, int32_t groupId);
+SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf);
/**
* get the specified buffer page by id
@@ -101,13 +99,6 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, struct SPageInfo* pi);
*/
size_t getTotalBufSize(const SDiskbasedBuf* pBuf);
-/**
- * get the number of groups in the result buffer
- * @param pBuf
- * @return
- */
-size_t getNumOfBufGroupId(const SDiskbasedBuf* pBuf);
-
/**
* destroy result buffer
* @param pBuf
diff --git a/include/util/trbtree.h b/include/util/trbtree.h
new file mode 100644
index 0000000000000000000000000000000000000000..f6d37e3d753de71fdf312b795935cb9014149f23
--- /dev/null
+++ b/include/util/trbtree.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef _TD_UTIL_RBTREE_H_
+#define _TD_UTIL_RBTREE_H_
+
+#include "os.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct SRBTree SRBTree;
+typedef struct SRBTreeNode SRBTreeNode;
+typedef struct SRBTreeIter SRBTreeIter;
+
+typedef int32_t (*tRBTreeCmprFn)(const void *, const void *);
+
+// SRBTree =============================================
+#define tRBTreeMin(T) ((T)->min == ((T)->NIL) ? NULL : (T)->min)
+#define tRBTreeMax(T) ((T)->max == ((T)->NIL) ? NULL : (T)->max)
+
+void tRBTreeCreate(SRBTree *pTree, tRBTreeCmprFn cmprFn);
+SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *z);
+void tRBTreeDrop(SRBTree *pTree, SRBTreeNode *z);
+SRBTreeNode *tRBTreeDropByKey(SRBTree *pTree, void *pKey);
+SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey);
+
+// SRBTreeIter =============================================
+#define tRBTreeIterCreate(tree, ascend) \
+ (SRBTreeIter) { .asc = (ascend), .pTree = (tree), .pNode = (ascend) ? (tree)->min : (tree)->max }
+
+SRBTreeNode *tRBTreeIterNext(SRBTreeIter *pIter);
+
+// STRUCT =============================================
+typedef enum { RED, BLACK } ECOLOR;
+struct SRBTreeNode {
+ ECOLOR color;
+ SRBTreeNode *parent;
+ SRBTreeNode *left;
+ SRBTreeNode *right;
+};
+
+#define RBTREE_NODE_PAYLOAD(N) ((const void *)&(N)[1])
+
+struct SRBTree {
+ tRBTreeCmprFn cmprFn;
+ int64_t n;
+ SRBTreeNode *root;
+ SRBTreeNode *min;
+ SRBTreeNode *max;
+ SRBTreeNode *NIL;
+ SRBTreeNode NILNODE;
+};
+
+struct SRBTreeIter {
+ int8_t asc;
+ SRBTree *pTree;
+ SRBTreeNode *pNode;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /*_TD_UTIL_RBTREE_H_*/
\ No newline at end of file
diff --git a/include/util/tsched.h b/include/util/tsched.h
index 347cacd19185b0891deadfdbdc6a0b42b8a71d18..379456afe64ebfd225bcec8dc873a23c54e6e29b 100644
--- a/include/util/tsched.h
+++ b/include/util/tsched.h
@@ -31,7 +31,6 @@ typedef struct SSchedMsg {
void *thandle;
} SSchedMsg;
-
typedef struct {
char label[TSDB_LABEL_LEN];
tsem_t emptySem;
@@ -48,7 +47,6 @@ typedef struct {
void *pTimer;
} SSchedQueue;
-
/**
* Create a thread-safe ring-buffer based task queue and return the instance. A thread
* pool will be created to consume the messages in the queue.
@@ -57,7 +55,7 @@ typedef struct {
* @param label the label of the queue
* @return the created queue scheduler
*/
-void *taosInitScheduler(int32_t capacity, int32_t numOfThreads, const char *label, SSchedQueue* pSched);
+void *taosInitScheduler(int32_t capacity, int32_t numOfThreads, const char *label, SSchedQueue *pSched);
/**
* Create a thread-safe ring-buffer based task queue and return the instance.
@@ -83,7 +81,7 @@ void taosCleanUpScheduler(void *queueScheduler);
* @param queueScheduler the queue scheduler instance
* @param pMsg the message for the task
*/
-void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg);
+int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg);
#ifdef __cplusplus
}
diff --git a/include/util/tutil.h b/include/util/tutil.h
index 6a1a40f14ccb865533f117524ffdfef3c84e20ad..c22495b75fdb0fe0a035d7f30809c3a9152d0798 100644
--- a/include/util/tutil.h
+++ b/include/util/tutil.h
@@ -20,6 +20,7 @@
#include "tcrc32c.h"
#include "tdef.h"
#include "tmd5.h"
+#include "thash.h"
#ifdef __cplusplus
extern "C" {
@@ -61,6 +62,7 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
tMD5Final(&context);
char buf[TSDB_PASSWORD_LEN + 1];
+ buf[TSDB_PASSWORD_LEN] = 0;
sprintf(buf, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", context.digest[0], context.digest[1],
context.digest[2], context.digest[3], context.digest[4], context.digest[5], context.digest[6],
context.digest[7], context.digest[8], context.digest[9], context.digest[10], context.digest[11],
@@ -68,6 +70,19 @@ static FORCE_INLINE void taosEncryptPass_c(uint8_t *inBuf, size_t len, char *tar
memcpy(target, buf, TSDB_PASSWORD_LEN);
}
+static FORCE_INLINE int32_t taosGetTbHashVal(const char *tbname, int32_t tblen, int32_t method, int32_t prefix,
+ int32_t suffix) {
+ if (prefix == 0 && suffix == 0) {
+ return MurmurHash3_32(tbname, tblen);
+ } else {
+ if (tblen <= (prefix + suffix)) {
+ return MurmurHash3_32(tbname, tblen);
+ } else {
+ return MurmurHash3_32(tbname + prefix, tblen - prefix - suffix);
+ }
+ }
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/packaging/MPtestJenkinsfile b/packaging/MPtestJenkinsfile
new file mode 100644
index 0000000000000000000000000000000000000000..5b793459164baec2791107842bddb3f0bb90b2df
--- /dev/null
+++ b/packaging/MPtestJenkinsfile
@@ -0,0 +1,251 @@
+def sync_source(branch_name) {
+ sh '''
+ hostname
+ ip addr|grep 192|awk '{print $2}'|sed "s/\\/.*//"
+ echo ''' + branch_name + '''
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}
+ git reset --hard
+ git fetch || git fetch
+ rm -rf examples/rust/
+ git checkout ''' + branch_name + ''' -f
+ git branch
+ git pull || git pull
+ git log | head -n 20
+ git submodule update --init --recursive
+ '''
+ return 1
+}
+def run_test() {
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+
+ '''
+ sh '''
+ export LD_LIBRARY_PATH=${TDINTERNAL_ROOT_DIR}/debug/build/lib
+ ./fulltest.sh
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/tests
+ ./test-all.sh b1fq
+ '''
+}
+def build_run() {
+ sync_source("${BRANCH_NAME}")
+}
+pipeline {
+ agent none
+ parameters {
+ string (
+ name:'version',
+ defaultValue:'3.0.0.1',
+ description: 'release version number,eg: 3.0.0.1 or 3.0.0.'
+ )
+ string (
+ name:'baseVersion',
+ defaultValue:'3.0.0.1',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ string (
+ name:'toolsVersion',
+ defaultValue:'2.1.2',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ string (
+ name:'toolsBaseVersion',
+ defaultValue:'2.1.2',
+ description: 'This number of baseVerison is generally not modified.Now it is 3.0.0.1'
+ )
+ }
+ environment{
+ WORK_DIR = '/var/lib/jenkins/workspace'
+ TDINTERNAL_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal'
+ TDENGINE_ROOT_DIR = '/var/lib/jenkins/workspace/TDinternal/community'
+ BRANCH_NAME = 'test/chr/TD-14699'
+
+ TD_SERVER_TAR = "TDengine-server-${version}-Linux-x64.tar.gz"
+ BASE_TD_SERVER_TAR = "TDengine-server-${baseVersion}-Linux-x64.tar.gz"
+
+ TD_SERVER_ARM_TAR = "TDengine-server-${version}-Linux-arm64.tar.gz"
+ BASE_TD_SERVER_ARM_TAR = "TDengine-server-${baseVersion}-Linux-arm64.tar.gz"
+
+ TD_SERVER_LITE_TAR = "TDengine-server-${version}-Linux-x64-Lite.tar.gz"
+ BASE_TD_SERVER_LITE_TAR = "TDengine-server-${baseVersion}-Linux-x64-Lite.tar.gz"
+
+ TD_CLIENT_TAR = "TDengine-client-${version}-Linux-x64.tar.gz"
+ BASE_TD_CLIENT_TAR = "TDengine-client-${baseVersion}-Linux-x64.tar.gz"
+
+ TD_CLIENT_ARM_TAR = "TDengine-client-${version}-Linux-arm64.tar.gz"
+ BASE_TD_CLIENT_ARM_TAR = "TDengine-client-${baseVersion}-Linux-arm64.tar.gz"
+
+ TD_CLIENT_LITE_TAR = "TDengine-client-${version}-Linux-x64-Lite.tar.gz"
+ BASE_TD_CLIENT_LITE_TAR = "TDengine-client-${baseVersion}-Linux-x64-Lite.tar.gz"
+
+ TD_SERVER_RPM = "TDengine-server-${version}-Linux-x64.rpm"
+
+ TD_SERVER_DEB = "TDengine-server-${version}-Linux-x64.deb"
+
+ TD_SERVER_EXE = "TDengine-server-${version}-Windows-x64.exe"
+
+ TD_CLIENT_EXE = "TDengine-client-${version}-Windows-x64.exe"
+
+ TD_TOOLS_TAR = "taosTools-${toolsVersion}-Linux-x64.tar.gz"
+
+
+ }
+ stages {
+ stage ('Test Server') {
+ parallel {
+ stage('ubuntu16') {
+ agent{label " ubuntu16 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ stage('ubuntu18') {
+ agent{label " ubuntu18 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_DEB} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ dpkg -r tdengine
+ '''
+
+ }
+ }
+ }
+ stage('centos7') {
+ agent{label " centos7_9 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ stage('centos8') {
+ agent{label " centos8_3 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_TAR} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_LITE_TAR} ${version} ${BASE_TD_SERVER_LITE_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_RPM} ${version} ${BASE_TD_SERVER_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ sudo rpm -e tdengine
+ '''
+ }
+ }
+ }
+ stage('arm64') {
+ agent{label 'linux_arm64'}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sync_source("${BRANCH_NAME}")
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_SERVER_ARM_TAR} ${version} ${BASE_TD_SERVER_ARM_TAR} ${baseVersion} server
+ python3 checkPackageRuning.py
+ '''
+ }
+ }
+ }
+ }
+ }
+ stage ('Test Client') {
+ parallel {
+ stage('ubuntu18') {
+ agent{label " ubuntu18 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_TAR} ${version} ${BASE_TD_CLIENT_TAR} ${baseVersion} client
+ python3 checkPackageRuning.py 192.168.0.21
+ '''
+ }
+ }
+ }
+ stage('centos8') {
+ agent{label " centos8_3 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_LITE_TAR} ${version} ${BASE_TD_CLIENT_LITE_TAR} ${baseVersion} client
+ python3 checkPackageRuning.py 192.168.0.24
+ '''
+ }
+ }
+ }
+ }
+ }
+ stage('arm64-client') {
+ agent{label " linux_arm64 "}
+ steps {
+ timeout(time: 30, unit: 'MINUTES'){
+ sh '''
+ cd ${TDENGINE_ROOT_DIR}/packaging
+ bash testpackage.sh ${TD_CLIENT_ARM_TAR} ${version} ${BASE_TD_CLIENT_ARM_TAR} ${baseVersion} client
+ python3 checkPackageRuning.py 192.168.0.21
+ '''
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/packaging/cfg/nginxd.service b/packaging/cfg/nginxd.service
deleted file mode 100644
index 50bbc1a21de5e6645404ec1d4e9bcd6f177f69d2..0000000000000000000000000000000000000000
--- a/packaging/cfg/nginxd.service
+++ /dev/null
@@ -1,22 +0,0 @@
-[Unit]
-Description=Nginx For TDengine Service
-After=network-online.target
-Wants=network-online.target
-
-[Service]
-Type=forking
-PIDFile=/usr/local/nginxd/logs/nginx.pid
-ExecStart=/usr/local/nginxd/sbin/nginx
-ExecStop=/usr/local/nginxd/sbin/nginx -s stop
-TimeoutStopSec=1000000s
-LimitNOFILE=infinity
-LimitNPROC=infinity
-LimitCORE=infinity
-TimeoutStartSec=0
-StandardOutput=null
-Restart=always
-StartLimitBurst=3
-StartLimitInterval=60s
-
-[Install]
-WantedBy=multi-user.target
diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg
index aae2e7c856ac7ce4747d798acf5852d6cdf21535..87f465fdb93ddbff8973430b11ecadc13878069d 100644
--- a/packaging/cfg/taos.cfg
+++ b/packaging/cfg/taos.cfg
@@ -38,7 +38,7 @@
# The interval of dnode reporting status to mnode
# statusInterval 1
-# The interval for taos shell to send heartbeat to mnode
+# The interval for TDengine CLI to send heartbeat to mnode
# shellActivityTimer 3
# The minimum sliding window time, milli-second
diff --git a/packaging/checkPackageRuning.py b/packaging/checkPackageRuning.py
new file mode 100755
index 0000000000000000000000000000000000000000..2edeeb6dbbb682bb06150e30803a7f05c170a5b1
--- /dev/null
+++ b/packaging/checkPackageRuning.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+# install pip
+# pip install src/connector/python/
+
+# -*- coding: utf-8 -*-
+import sys , os
+import getopt
+import subprocess
+# from this import d
+import time
+
+
+if( len(sys.argv)>1 ):
+ serverHost=sys.argv[1]
+else:
+ serverHost="localhost"
+
+
+# install taospy
+
+out = subprocess.getoutput("pip3 show taospy|grep Version| awk -F ':' '{print $2}' ")
+print("taospy version %s "%out)
+if (out == "" ):
+ os.system("pip3 install git+https://github.com/taosdata/taos-connector-python.git")
+ print("install taos python connector")
+else:
+ os.system("pip3 install --upgrade taospy ")
+
+
+
+# start taosd prepare
+# os.system("rm -rf /var/lib/taos/*")
+# os.system("systemctl restart taosd ")
+
+# wait a moment ,at least 5 seconds
+time.sleep(5)
+
+# prepare data by taosBenchmark
+
+os.system("taosBenchmark -y -n 100 -t 100 -h %s "%serverHost )
+
+import taos
+
+conn = taos.connect(host="%s"%serverHost,
+ user="root",
+ password="taosdata",
+ database="test",
+ port=6030,
+ config="/etc/taos", # for windows the default value is C:\TDengine\cfg
+ timezone="Asia/Shanghai") # default your host's timezone
+
+server_version = conn.server_info
+print("server_version", server_version)
+client_version = conn.client_info
+print("client_version", client_version) # 3.0.0.0
+
+# Execute a sql and get its result set. It's useful for SELECT statement
+result: taos.TaosResult = conn.query("SELECT count(*) from test.meters")
+
+data = result.fetch_all()
+
+if data[0][0] !=10000:
+ print(" taosBenchmark work not as expected ")
+ sys.exit(1)
+else:
+ print(" taosBenchmark work as expected ")
+
+# test taosdump dump out data and dump in data
+
+# dump out datas
+os.system("taosdump --version")
+os.system("mkdir -p /tmp/dumpdata")
+os.system("rm -rf /tmp/dumpdata/*")
+
+
+
+# dump data out
+print("taosdump dump out data")
+
+os.system("taosdump -o /tmp/dumpdata -D test -y -h %s "%serverHost)
+
+# drop database of test
+print("drop database test")
+os.system(" taos -s ' drop database test ;' -h %s "%serverHost)
+
+# dump data in
+print("taosdump dump data in")
+os.system("taosdump -i /tmp/dumpdata -y -h %s "%serverHost)
+
+result = conn.query("SELECT count(*) from test.meters")
+
+data = result.fetch_all()
+
+if data[0][0] !=10000:
+ print(" taosdump work not as expected ")
+ sys.exit(1)
+else:
+ print(" taosdump work as expected ")
+
+conn.close()
\ No newline at end of file
diff --git a/packaging/check_package.sh b/packaging/check_package.sh
index e728c6455abdb149d43a4e46d2b7730511b8800d..5c3a2f926731ffed236b5b859e483c7fc38c242b 100644
--- a/packaging/check_package.sh
+++ b/packaging/check_package.sh
@@ -38,8 +38,6 @@ temp_version=""
fin_result=""
service_config_dir="/etc/systemd/system"
-nginx_port=6060
-nginx_dir="/usr/local/nginxd"
# Color setting
RED='\033[0;31m'
@@ -132,10 +130,7 @@ function check_main_path() {
check_file ${install_main_dir} $i
done
if [ "$verMode" == "cluster" ]; then
- nginx_main_dir=("admin" "conf" "html" "sbin" "logs")
- for i in "${nginx_main_dir[@]}";do
- check_file ${nginx_dir} $i
- done
+ check_file ${install_main_dir} "share/admin"
fi
echo -e "Check main path:\033[32mOK\033[0m!"
}
@@ -150,9 +145,6 @@ function check_bin_path() {
for i in "${lbin_dir[@]}";do
check_link ${bin_link_dir}/$i
done
- if [ "$verMode" == "cluster" ]; then
- check_file ${nginx_dir}/sbin nginx
- fi
echo -e "Check bin path:\033[32mOK\033[0m!"
}
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 49531028423142ccc9808b90b772bd97b0b3fc58..65f261db2c6c1ac70b761312af68a5188acea541 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -1,6 +1,6 @@
#!/bin/bash
-if [ $1 -eq "abort-upgrade" ]; then
+if [ "$1"x = "abort-upgrade"x ]; then
exit 0
fi
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 3db9005f95a3027c42dd05b9f28d448ade5852cb..94a24a41487e8d7b82571bcc524392e4335d7fae 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -45,6 +45,7 @@ mkdir -p ${pkg_dir}${install_home_path}/include
mkdir -p ${pkg_dir}${install_home_path}/script
cp ${compile_dir}/../packaging/cfg/taos.cfg ${pkg_dir}${install_home_path}/cfg
+cp ${compile_dir}/../packaging/cfg/taosd.service ${pkg_dir}${install_home_path}/cfg
if [ -f "${compile_dir}/test/cfg/taosadapter.toml" ]; then
cp ${compile_dir}/test/cfg/taosadapter.toml ${pkg_dir}${install_home_path}/cfg || :
fi
diff --git a/packaging/debRpmAutoInstall.sh b/packaging/debRpmAutoInstall.sh
new file mode 100755
index 0000000000000000000000000000000000000000..3579f813e5b6ce91f0daa1fd230af14a4bf3d4b9
--- /dev/null
+++ b/packaging/debRpmAutoInstall.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/expect
+set packgeName [lindex $argv 0]
+set packageSuffix [lindex $argv 1]
+set timeout 3
+if { ${packageSuffix} == "deb" } {
+ spawn dpkg -i ${packgeName}
+} elseif { ${packageSuffix} == "rpm"} {
+ spawn rpm -ivh ${packgeName}
+}
+expect "*one:"
+send "\r"
+expect "*skip:"
+send "\r"
+
+expect eof
\ No newline at end of file
diff --git a/packaging/docker/README.md b/packaging/docker/README.md
index e41182f471050af6b4d47b696eb237e319b2dd80..763ab73724587eb4dc231eb399a60937eaba6dca 100644
--- a/packaging/docker/README.md
+++ b/packaging/docker/README.md
@@ -47,7 +47,7 @@ taos> show databases;
Query OK, 1 row(s) in set (0.002843s)
```
-Since TDengine use container hostname to establish connections, it's a bit more complex to use taos shell and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use taos shell or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need.
+Since TDengine use container hostname to establish connections, it's a bit more complex to use TDengine CLI and native connectors(such as JDBC-JNI) with TDengine container instance. This is the recommended way to expose ports and use TDengine with docker in simple cases. If you want to use TDengine CLI or taosc/connectors smoothly outside the `tdengine` container, see next use cases that match you need.
### Start with host network
@@ -87,7 +87,7 @@ docker run -d \
This command starts a docker container with TDengine server running and maps the container's TCP ports from 6030 to 6049 to the host's ports from 6030 to 6049 with TCP protocol and UDP ports range 6030-6039 to the host's UDP ports 6030-6039. If the host is already running TDengine server and occupying the same port(s), you need to map the container's port to a different unused port segment. (Please see TDengine 2.0 Port Description for details). In order to support TDengine clients accessing TDengine server services, both TCP and UDP ports need to be exposed by default(unless `rpcForceTcp` is set to `1`).
-If you want to use taos shell or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service.
+If you want to use TDengine CLI or native connectors([JDBC-JNI](https://www.taosdata.com/cn/documentation/connector/java), or [driver-go](https://github.com/taosdata/driver-go)), you need to make sure the `TAOS_FQDN` is resolvable at `/etc/hosts` or with custom DNS service.
If you set the `TAOS_FQDN` to host's hostname, it will works as using `hosts` network like previous use case. Otherwise, like in `-e TAOS_FQDN=tdengine`, you can add the hostname record `tdengine` into `/etc/hosts` (use `127.0.0.1` here in host path, if use TDengine client/application in other hosts, you should set the right ip to the host eg. `192.168.10.1`(check the real ip in host with `hostname -i` or `ip route list default`) to make the TDengine endpoint resolvable):
@@ -158,7 +158,7 @@ When you build your application with docker, you should add the TDengine client
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -265,7 +265,7 @@ Full version of dockerfile could be:
```dockerfile
FROM golang:1.17.6-buster as builder
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -279,7 +279,7 @@ RUN go env && go mod tidy && go build
FROM ubuntu:20.04
RUN apt-get update && apt-get install -y wget
ENV TDENGINE_VERSION=2.4.0.0
-RUN wget -c https://www.taosdata.com/assets-download/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
+RUN wget -c https://www.taosdata.com/assets-download/3.0/TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& tar xvf TDengine-client-${TDENGINE_VERSION}-Linux-x64.tar.gz \
&& cd TDengine-client-${TDENGINE_VERSION} \
&& ./install_client.sh \
@@ -391,7 +391,7 @@ test_td-1_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp,
test_td-2_1 /usr/bin/entrypoint.sh taosd Up 6030/tcp, 6031/tcp, 6032/tcp, 6033/tcp, 6034/tcp, 6035/tcp, 6036/tcp, 6037/tcp, 6038/tcp, 6039/tcp, 6040/tcp, 6041/tcp, 6042/tcp
```
-Check dnodes with taos shell:
+Check dnodes with TDengine CLI:
```bash
$ docker-compose exec td-1 taos -s "show dnodes"
diff --git a/packaging/docker/dockerManifest.sh b/packaging/docker/dockerManifest.sh
index 71788423f6e58b2788346ef2804cd4d03ee54b02..8f71e30fbdca1cc9adf8e9b46c652475822e4b08 100755
--- a/packaging/docker/dockerManifest.sh
+++ b/packaging/docker/dockerManifest.sh
@@ -1,6 +1,7 @@
#!/bin/bash
set -e
#set -x
+set -v
# dockerbuild.sh
# -n [version number]
@@ -11,8 +12,9 @@ set -e
version=""
passWord=""
verType=""
+dockerLatest="n"
-while getopts "hn:p:V:" arg
+while getopts "hn:p:V:a:" arg
do
case $arg in
n)
@@ -29,9 +31,15 @@ do
;;
h)
echo "Usage: `basename $0` -n [version number] "
- echo " -p [password for docker hub] "
+ echo " -p [password for docker hub] "
+ echo " -V [stable |beta] "
+ echo " -a [y | n ] "
exit 0
;;
+ a)
+ #echo "dockerLatest=$OPTARG"
+ dockerLatest=$(echo $OPTARG)
+ ;;
?) #unknow option
echo "unkonw argument"
exit 1
@@ -41,42 +49,55 @@ done
echo "version=${version}"
-#docker manifest rm tdengine/tdengine
-#docker manifest rm tdengine/tdengine:${version}
-if [ "$verType" == "beta" ]; then
- docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
- docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
- docker manifest rm tdengine/tdengine-beta:${version}
- docker manifest rm tdengine/tdengine-beta:latest
- docker manifest create -a tdengine/tdengine-beta:${version} tdengine/tdengine-amd64-beta:${version} tdengine/tdengine-aarch64-beta:${version} tdengine/tdengine-aarch32-beta:${version}
- docker manifest create -a tdengine/tdengine-beta:latest tdengine/tdengine-amd64-beta:latest tdengine/tdengine-aarch64-beta:latest tdengine/tdengine-aarch32-beta:latest
- docker manifest inspect tdengine/tdengine:latest
- docker manifest inspect tdengine/tdengine:${version}
- docker login -u tdengine -p ${passWord} #replace the docker registry username and password
- docker manifest push tdengine/tdengine-beta:${version}
- docker manifest push tdengine/tdengine-beta:latest
-elif [ "$verType" == "stable" ]; then
- docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
- docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
- docker manifest rm tdengine/tdengine:latest
- docker manifest rm tdengine/tdengine:${version}
- docker manifest create -a tdengine/tdengine:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
- docker manifest create -a tdengine/tdengine:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
- docker manifest inspect tdengine/tdengine:latest
- docker manifest inspect tdengine/tdengine:${version}
- docker login -u tdengine -p ${passWord} #replace the docker registry username and password
- docker manifest push tdengine/tdengine:${version}
- docker manifest push tdengine/tdengine:latest
-else
+if [ "$verType" == "stable" ]; then
+ verType=stable
+ dockerinput=TDengine-server-${version}-Linux-$cpuType.tar.gz
+ dockerinput_x64=TDengine-server-${version}-Linux-amd64.tar.gz
+ dockerim=tdengine/tdengine
+ dockeramd64=tdengine/tdengine-amd64
+ dockeraarch64=tdengine/tdengine-aarch64
+ dockeraarch32=tdengine/tdengine-aarch32
+elif [ "$verType" == "beta" ];then
+ verType=beta
+ tagVal=ver-${version}-beta
+ dockerinput=TDengine-server-${version}-${verType}-Linux-$cpuType.tar.gz
+ dockerinput_x64=TDengine-server-${version}-${verType}-Linux-amd64.tar.gz
+ dockerim=tdengine/tdengine-beta
+ dockeramd64=tdengine/tdengine-amd64-beta
+ dockeraarch64=tdengine/tdengine-aarch64-beta
+ dockeraarch32=tdengine/tdengine-aarch32-beta
+ else
echo "unknow verType, nor stabel or beta"
exit 1
fi
-# docker manifest create -a tdengine/${dockername}:${version} tdengine/tdengine-amd64:${version} tdengine/tdengine-aarch64:${version} tdengine/tdengine-aarch32:${version}
-# docker manifest create -a tdengine/${dockername}:latest tdengine/tdengine-amd64:latest tdengine/tdengine-aarch64:latest tdengine/tdengine-aarch32:latest
+username="tdengine"
-# docker login -u tdengine -p ${passWord} #replace the docker registry username and password
+# generate docker verison
+echo "generate ${dockerim}:${version}"
+docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version}
+docker manifest inspect ${dockerim}:${version}
+docker manifest rm ${dockerim}:${version}
+docker manifest create -a ${dockerim}:${version} ${dockeramd64}:${version} ${dockeraarch64}:${version}
+docker manifest inspect ${dockerim}:${version}
+docker login -u ${username} -p ${passWord}
+docker manifest push ${dockerim}:${version}
+
+
+# generate docker latest
+echo "generate ${dockerim}:latest "
+
+if [ ${dockerLatest} == 'y' ] ;then
+ echo "docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest"
+ docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest
+ docker manifest inspect ${dockerim}:latest
+ docker manifest rm ${dockerim}:latest
+ docker manifest create -a ${dockerim}:latest ${dockeramd64}:latest ${dockeraarch64}:latest
+ docker manifest inspect ${dockerim}:latest
+ docker login -u tdengine -p ${passWord} #replace the docker registry username and password
+ docker manifest push ${dockerim}:latest
+ docker pull tdengine/tdengine:latest
+
+fi
-# docker manifest push tdengine/tdengine:latest
-# # how set latest version ???
diff --git a/packaging/docker/dockerbuild.sh b/packaging/docker/dockerbuild.sh
index 541ae6ec1398ae40a450382d25aa53bec18a8ced..8b0b0c190c5acd9352ddc2699db3375e18d5fb9c 100755
--- a/packaging/docker/dockerbuild.sh
+++ b/packaging/docker/dockerbuild.sh
@@ -149,26 +149,4 @@ rm -rf temp1.data
if [ ${dockerLatest} == 'y' ] ;then
docker tag tdengine/tdengine-${dockername}:${version} tdengine/tdengine-${dockername}:latest
docker push tdengine/tdengine-${dockername}:latest
- echo ">>>>>>>>>>>>> check whether tdengine/tdengine-${dockername}:latest has been published correctly"
- docker run -d --name doctestla -p 7030-7049:6030-6049 -p 7030-7049:6030-6049/udp tdengine/tdengine-${dockername}:latest
- sleep 2
- curl -u root:taosdata -d 'show variables;' 127.0.0.1:7041/rest/sql > temp2.data
- version_latest=` cat temp2.data |jq .data| jq '.[]' |grep "version" -A 2 -B 1 | jq ".[1]" `
- echo "${version_latest}"
- if [ "${version_latest}" == "\"${version}\"" ] ; then
- echo "docker version is right "
- else
- echo "docker version is wrong "
- exit 1
- fi
fi
-rm -rf temp2.data
-
-if [ -n "$(docker ps -aq)" ] ;then
- echo "delte docker process"
- docker stop $(docker ps -aq)
- docker rm $(docker ps -aq)
-fi
-
-cd ${scriptDir}
-rm -f ${pkgFile}
diff --git a/packaging/release.bat b/packaging/release.bat
index 591227382f9cec4a2fa1308a9b827994430f7236..b87ae68e2b2f8b0507992206af6ed482e5a9392c 100644
--- a/packaging/release.bat
+++ b/packaging/release.bat
@@ -44,8 +44,6 @@ cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=tru
cmake --build .
rd /s /Q C:\TDengine
cmake --install .
-for /r c:\TDengine %%i in (*.dll) do signtool sign /f D:\\123.pfx /p taosdata %%i
-for /r c:\TDengine %%i in (*.exe) do signtool sign /f D:\\123.pfx /p taosdata %%i
if not %errorlevel% == 0 ( call :RUNFAILED build x64 failed & exit /b 1)
cd %package_dir%
iscc /DMyAppInstallName="%packagServerName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release
@@ -53,7 +51,6 @@ if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x64% faile
iscc /DMyAppInstallName="%packagClientName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release
if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x64% failed & exit /b 1)
-for /r ..\release %%i in (*.exe) do signtool sign /f d:\\123.pfx /p taosdata %%i
goto EXIT0
:USAGE
diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh
new file mode 100755
index 0000000000000000000000000000000000000000..20f93ecaec41045fca38c552e0b150c65b37c0be
--- /dev/null
+++ b/packaging/testpackage.sh
@@ -0,0 +1,277 @@
+#!/bin/sh
+#parameter
+scriptDir=$(dirname $(readlink -f $0))
+packgeName=$1
+version=$2
+originPackageName=$3
+originversion=$4
+testFile=$5
+subFile="taos.tar.gz"
+
+# Color setting
+RED='\033[41;30m'
+GREEN='\033[1;32m'
+YELLOW='\033[1;33m'
+BLUE='\033[1;34m'
+GREEN_DARK='\033[0;32m'
+YELLOW_DARK='\033[0;33m'
+BLUE_DARK='\033[0;34m'
+GREEN_UNDERLINE='\033[4;32m'
+NC='\033[0m'
+
+if [ ${testFile} = "server" ];then
+ tdPath="TDengine-server-${version}"
+ originTdpPath="TDengine-server-${originversion}"
+ installCmd="install.sh"
+elif [ ${testFile} = "client" ];then
+ tdPath="TDengine-client-${version}"
+ originTdpPath="TDengine-client-${originversion}"
+ installCmd="install_client.sh"
+elif [ ${testFile} = "tools" ];then
+ tdPath="taosTools-${version}"
+ originTdpPath="taosTools-${originversion}"
+ installCmd="install-taostools.sh"
+fi
+
+function cmdInstall {
+command=$1
+if command -v ${command} ;then
+ echoColor YD "${command} is already installed"
+else
+ if command -v apt ;then
+ apt-get install ${command} -y
+ elif command -v yum ;then
+ yum -y install ${command}
+ echoColor YD "you should install ${command} manually"
+ fi
+fi
+}
+
+function echoColor {
+color=$1
+command=$2
+
+if [ ${color} = 'Y' ];then
+ echo -e "${YELLOW}${command}${NC}"
+elif [ ${color} = 'YD' ];then
+ echo -e "${YELLOW_DARK}${command}${NC}"
+elif [ ${color} = 'R' ];then
+ echo -e "${RED}${command}${NC}"
+elif [ ${color} = 'G' ];then
+ echo -e "${GREEN}${command}${NC}\r\n"
+elif [ ${color} = 'B' ];then
+ echo -e "${BLUE}${command}${NC}"
+elif [ ${color} = 'BD' ];then
+ echo -e "${BLUE_DARK}${command}${NC}"
+fi
+}
+
+
+function wgetFile {
+
+file=$1
+
+if [ ! -f ${file} ];then
+ echoColor BD "wget https://www.taosdata.com/assets-download/3.0/${file}"
+ wget https://www.taosdata.com/assets-download/3.0/${file}
+else
+ echoColor YD "${file} already exists "
+fi
+}
+
+function newPath {
+
+buildPath=$1
+
+if [ ! -d ${buildPath} ] ;then
+ echoColor BD "mkdir -p ${buildPath}"
+ mkdir -p ${buildPath}
+else
+ echoColor YD "${buildPath} already exists"
+fi
+
+}
+
+
+echoColor G "===== install basesoft ====="
+
+cmdInstall tree
+cmdInstall wget
+cmdInstall expect
+
+echoColor G "===== Uninstall all components of TDeingne ====="
+
+if command -v rmtaos ;then
+ echoColor YD "uninstall all components of TDeingne:rmtaos"
+ rmtaos
+else
+ echoColor YD "os doesn't include TDengine"
+fi
+
+if command -v rmtaostools ;then
+ echoColor YD "uninstall all components of TDeingne:rmtaostools"
+ rmtaostools
+else
+ echoColor YD "os doesn't include rmtaostools "
+fi
+
+
+
+
+echoColor G "===== new workroom path ====="
+installPath="/usr/local/src/packageTest"
+oriInstallPath="/usr/local/src/packageTest/3.1"
+
+newPath ${installPath}
+
+newPath ${oriInstallPath}
+
+
+if [ -d ${oriInstallPath}/${originTdpPath} ] ;then
+ echoColor BD "rm -rf ${oriInstallPath}/${originTdpPath}/*"
+ rm -rf ${oriInstallPath}/${originTdpPath}/*
+fi
+
+if [ -d ${installPath}/${tdPath} ] ;then
+ echoColor BD "rm -rf ${installPath}/${tdPath}/*"
+ rm -rf ${installPath}/${tdPath}/*
+fi
+
+echoColor G "===== download installPackage ====="
+cd ${installPath} && wgetFile ${packgeName}
+cd ${oriInstallPath} && wgetFile ${originPackageName}
+
+cd ${installPath}
+cp -r ${scriptDir}/debRpmAutoInstall.sh .
+
+packageSuffix=$(echo ${packgeName} | awk -F '.' '{print $NF}')
+
+
+if [ ! -f debRpmAutoInstall.sh ];then
+ echo '#!/usr/bin/expect ' > debRpmAutoInstall.sh
+ echo 'set packgeName [lindex $argv 0]' >> debRpmAutoInstall.sh
+ echo 'set packageSuffix [lindex $argv 1]' >> debRpmAutoInstall.sh
+ echo 'set timeout 3 ' >> debRpmAutoInstall.sh
+ echo 'if { ${packageSuffix} == "deb" } {' >> debRpmAutoInstall.sh
+ echo ' spawn dpkg -i ${packgeName} ' >> debRpmAutoInstall.sh
+ echo '} elseif { ${packageSuffix} == "rpm"} {' >> debRpmAutoInstall.sh
+ echo ' spawn rpm -ivh ${packgeName}' >> debRpmAutoInstall.sh
+ echo '}' >> debRpmAutoInstall.sh
+ echo 'expect "*one:"' >> debRpmAutoInstall.sh
+ echo 'send "\r"' >> debRpmAutoInstall.sh
+ echo 'expect "*skip:"' >> debRpmAutoInstall.sh
+ echo 'send "\r" ' >> debRpmAutoInstall.sh
+fi
+
+
+echoColor G "===== instal Package ====="
+
+if [[ ${packgeName} =~ "deb" ]];then
+ cd ${installPath}
+ dpkg -r taostools
+ dpkg -r tdengine
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
+ else
+ echoColor BD "dpkg -i ${packgeName}" && dpkg -i ${packgeName}
+ fi
+elif [[ ${packgeName} =~ "rpm" ]];then
+ cd ${installPath}
+ sudo rpm -e tdengine
+ sudo rpm -e taostools
+ if [[ ${packgeName} =~ "TDengine" ]];then
+ echoColor BD "./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}" && chmod 755 debRpmAutoInstall.sh && ./debRpmAutoInstall.sh ${packgeName} ${packageSuffix}
+ else
+ echoColor BD "rpm -ivh ${packgeName}" && rpm -ivh ${packgeName}
+ fi
+elif [[ ${packgeName} =~ "tar" ]];then
+ echoColor G "===== check installPackage File of tar ====="
+ cd ${oriInstallPath}
+ if [ ! -f {originPackageName} ];then
+ echoColor YD "download base installPackage"
+ wgetFile ${originPackageName}
+ fi
+ echoColor YD "unzip the base installation package"
+ echoColor BD "tar -xf ${originPackageName}" && tar -xf ${originPackageName}
+ cd ${installPath}
+ echoColor YD "unzip the new installation package"
+ echoColor BD "tar -xf ${packgeName}" && tar -xf ${packgeName}
+
+ if [ ${testFile} != "tools" ] ;then
+ cd ${installPath}/${tdPath} && tar xf ${subFile}
+ cd ${oriInstallPath}/${originTdpPath} && tar xf ${subFile}
+ fi
+
+ cd ${oriInstallPath}/${originTdpPath} && tree -I "driver" > ${installPath}/base_${originversion}_checkfile
+ cd ${installPath}/${tdPath} && tree -I "driver" > ${installPath}/now_${version}_checkfile
+
+ cd ${installPath}
+ diff ${installPath}/base_${originversion}_checkfile ${installPath}/now_${version}_checkfile > ${installPath}/diffFile.log
+ diffNumbers=`cat ${installPath}/diffFile.log |wc -l `
+
+ if [ ${diffNumbers} != 0 ];then
+ echoColor R "The number and names of files is different from the previous installation package"
+ echoColor Y `cat ${installPath}/diffFile.log`
+ exit -1
+ else
+ echoColor G "The number and names of files are the same as previous installation packages"
+ rm -rf ${installPath}/diffFile.log
+ fi
+ echoColor YD "===== install Package of tar ====="
+ cd ${installPath}/${tdPath}
+ if [ ${testFile} = "server" ];then
+ echoColor BD "bash ${installCmd} -e no "
+ bash ${installCmd} -e no
+ else
+ echoColor BD "bash ${installCmd} "
+ bash ${installCmd}
+ fi
+fi
+
+cd ${installPath}
+
+if [[ ${packgeName} =~ "Lite" ]] || ([[ ${packgeName} =~ "x64" ]] && [[ ${packgeName} =~ "client" ]]) || ([[ ${packgeName} =~ "deb" ]] && [[ ${packgeName} =~ "server" ]]) || ([[ ${packgeName} =~ "rpm" ]] && [[ ${packgeName} =~ "server" ]]) ;then
+ echoColor G "===== install taos-tools when package is lite or client ====="
+ cd ${installPath}
+ wgetFile taosTools-2.1.3-Linux-x64.tar.gz .
+ tar xf taosTools-2.1.3-Linux-x64.tar.gz
+ cd taosTools-2.1.3 && bash install-taostools.sh
+elif ([[ ${packgeName} =~ "arm64" ]] && [[ ${packgeName} =~ "client" ]]);then
+ echoColor G "===== install taos-tools arm when package is arm64-client ====="
+ cd ${installPath}
+ wgetFile taosTools-2.1.3-Linux-arm64.tar.gz .
+ tar xf taosTools-2.1.3-Linux-arm64.tar.gz
+ cd taosTools-2.1.3 && bash install-taostools.sh
+fi
+
+echoColor G "===== start TDengine ====="
+
+if [[ ${packgeName} =~ "server" ]] ;then
+ echoColor BD " rm -rf /var/lib/taos/* && systemctl restart taosd "
+ rm -rf /var/lib/taos/*
+ systemctl restart taosd
+fi
+
+rm -rf ${installPath}/${packgeName}
+rm -rf ${installPath}/${tdPath}/
+
+# if ([[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "tar" ]]) || [[ ${packgeName} =~ "client" ]] ;then
+# echoColor G "===== install taos-tools when package is lite or client ====="
+# cd ${installPath}
+# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
+# tar xf taosTools-2.1.2-Linux-x64.tar.gz
+# cd taosTools-2.1.2 && bash install-taostools.sh
+# elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "deb" ]] ;then
+# echoColor G "===== install taos-tools when package is lite or client ====="
+# cd ${installPath}
+# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
+# tar xf taosTools-2.1.2-Linux-x64.tar.gz
+# cd taosTools-2.1.2 && bash install-taostools.sh
+# elif [[ ${packgeName} =~ "Lite" ]] && [[ ${packgeName} =~ "rpm" ]] ;then
+# echoColor G "===== install taos-tools when package is lite or client ====="
+# cd ${installPath}
+# wgetFile taosTools-2.1.2-Linux-x64.tar.gz .
+# tar xf taosTools-2.1.2-Linux-x64.tar.gz
+# cd taosTools-2.1.2 && bash install-taostools.sh
+# fi
+
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 39606ead300c8c603b9f25360d19e3af49b642ff..9694a89a8f8376e871c1e45434fb64d4e9c4408f 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -33,6 +33,7 @@ adapterName="taosadapter"
benchmarkName="taosBenchmark"
dumpName="taosdump"
demoName="taosdemo"
+xname="taosx"
data_dir=${dataDir}
log_dir=${logDir}
@@ -49,8 +50,7 @@ install_main_dir=${installDir}
bin_dir="${installDir}/bin"
service_config_dir="/etc/systemd/system"
-nginx_port=6060
-nginx_dir="/usr/local/nginxd"
+web_port=6041
# Color setting
RED='\033[0;31m'
@@ -181,7 +181,7 @@ function install_main_path() {
${csudo}mkdir -p ${install_main_dir}/include
# ${csudo}mkdir -p ${install_main_dir}/init.d
if [ "$verMode" == "cluster" ]; then
- ${csudo}mkdir -p ${nginx_dir}
+ ${csudo}mkdir -p ${install_main_dir}/share
fi
if [[ -e ${script_dir}/email ]]; then
@@ -199,6 +199,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${demoName} || :
${csudo}rm -f ${bin_link_dir}/${benchmarkName} || :
${csudo}rm -f ${bin_link_dir}/${dumpName} || :
+ ${csudo}rm -f ${bin_link_dir}/${xname} || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
@@ -212,15 +213,10 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${demoName} || :
[ -x ${install_main_dir}/bin/${benchmarkName} ] && ${csudo}ln -s ${install_main_dir}/bin/${benchmarkName} ${bin_link_dir}/${benchmarkName} || :
[ -x ${install_main_dir}/bin/${dumpName} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${dumpName} || :
+ [ -x ${install_main_dir}/bin/${xname} ] && ${csudo}ln -s ${install_main_dir}/bin/${dumpName} ${bin_link_dir}/${xname} || :
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
-
- if [ "$verMode" == "cluster" ]; then
- ${csudo}cp -r ${script_dir}/nginxd/* ${nginx_dir} && ${csudo}chmod 0555 ${nginx_dir}/*
- ${csudo}mkdir -p ${nginx_dir}/logs
- ${csudo}chmod 777 ${nginx_dir}/sbin/nginx
- fi
}
function install_lib() {
@@ -571,6 +567,13 @@ function install_examples() {
fi
}
+function install_web() {
+ if [ -d "${script_dir}/share" ]; then
+ ${csudo}cp -rf ${script_dir}/share/* ${install_main_dir}/share
+ fi
+}
+
+
function clean_service_on_sysvinit() {
if pidof ${serverName} &>/dev/null; then
${csudo}service ${serverName} stop || :
@@ -651,16 +654,6 @@ function clean_service_on_systemd() {
fi
${csudo}systemctl disable tarbitratord &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config}
-
- if [ "$verMode" == "cluster" ]; then
- nginx_service_config="${service_config_dir}/nginxd.service"
- if systemctl is-active --quiet nginxd; then
- echo "Nginx for ${productName} is running, stopping it..."
- ${csudo}systemctl stop nginxd &>/dev/null || echo &>/dev/null
- fi
- ${csudo}systemctl disable nginxd &>/dev/null || echo &>/dev/null
- ${csudo}rm -f ${nginx_service_config}
- fi
}
function install_service_on_systemd() {
@@ -674,19 +667,6 @@ function install_service_on_systemd() {
${csudo}systemctl enable ${serverName}
${csudo}systemctl daemon-reload
-
- if [ "$verMode" == "cluster" ]; then
- [ -f ${script_dir}/cfg/nginxd.service ] &&
- ${csudo}cp ${script_dir}/cfg/nginxd.service \
- ${service_config_dir}/ || :
- ${csudo}systemctl daemon-reload
-
- if ! ${csudo}systemctl enable nginxd &>/dev/null; then
- ${csudo}systemctl daemon-reexec
- ${csudo}systemctl enable nginxd
- fi
- ${csudo}systemctl start nginxd
- fi
}
function install_adapter_service() {
@@ -790,19 +770,6 @@ function updateProduct() {
sleep 1
fi
- if [ "$verMode" == "cluster" ]; then
- if pidof nginx &>/dev/null; then
- if ((${service_mod} == 0)); then
- ${csudo}systemctl stop nginxd || :
- elif ((${service_mod} == 1)); then
- ${csudo}service nginxd stop || :
- else
- kill_process nginx
- fi
- sleep 1
- fi
- fi
-
install_main_path
install_log
@@ -814,6 +781,7 @@ function updateProduct() {
fi
install_examples
+ install_web
if [ -z $1 ]; then
install_bin
install_service
@@ -822,33 +790,27 @@ function updateProduct() {
install_adapter_config
openresty_work=false
- if [ "$verMode" == "cluster" ]; then
- # Check if openresty is installed
- # Check if nginx is installed successfully
- if type curl &>/dev/null; then
- if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then
- echo -e "\033[44;32;1mNginx for ${productName} is updated successfully!${NC}"
- openresty_work=true
- else
- echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m"
- fi
- fi
- fi
echo
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
- echo -e "${GREEN_DARK}To configure Adapter (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml"
+ [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
+ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
+ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
else
- echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}"
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
+ [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
fi
if [ ${openresty_work} = 'true' ]; then
- echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}"
+ echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}"
else
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName} -h $serverFqdn${NC} in shell${NC}"
fi
@@ -897,6 +859,7 @@ function installProduct() {
install_connector
fi
install_examples
+ install_web
if [ -z $1 ]; then # install service and client
# For installing new
@@ -906,31 +869,26 @@ function installProduct() {
install_adapter_config
openresty_work=false
- if [ "$verMode" == "cluster" ]; then
- # Check if nginx is installed successfully
- if type curl &>/dev/null; then
- if curl -sSf http://127.0.0.1:${nginx_port} &>/dev/null; then
- echo -e "\033[44;32;1mNginx for ${productName} is installed successfully!${NC}"
- openresty_work=true
- else
- echo -e "\033[44;31;5mNginx for ${productName} does not work! Please try again!\033[0m"
- fi
- fi
- fi
install_config
# Ask if to start the service
echo
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${cfg_install_dir}/${configFile}"
- echo -e "${GREEN_DARK}To configure ${adapterName} (if has) ${NC}: edit ${cfg_install_dir}/${adapterName}.toml"
+ [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
+ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
+ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
else
- echo -e "${GREEN_DARK}To start Adapter (if has)${NC}: ${adapterName} &${NC}"
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
+ [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
fi
if [ ! -z "$firstEp" ]; then
diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh
index 6a95ace99ee521b9e1baca39d72bf7fa1cabb7d5..58b43850013a27147e76c0eb7a6e4c9a219b3303 100755
--- a/packaging/tools/make_install.sh
+++ b/packaging/tools/make_install.sh
@@ -151,6 +151,7 @@ function install_main_path() {
${csudo}mkdir -p ${install_main_dir}/driver
${csudo}mkdir -p ${install_main_dir}/examples
${csudo}mkdir -p ${install_main_dir}/include
+ ${csudo}mkdir -p ${install_main_dir}/share
# ${csudo}mkdir -p ${install_main_dir}/init.d
else
${csudo}rm -rf ${install_main_dir} || ${csudo}rm -rf ${install_main_2_dir} || :
@@ -161,6 +162,7 @@ function install_main_path() {
${csudo}mkdir -p ${install_main_dir}/driver || ${csudo}mkdir -p ${install_main_2_dir}/driver
${csudo}mkdir -p ${install_main_dir}/examples || ${csudo}mkdir -p ${install_main_2_dir}/examples
${csudo}mkdir -p ${install_main_dir}/include || ${csudo}mkdir -p ${install_main_2_dir}/include
+ ${csudo}mkdir -p ${install_main_dir}/share || ${csudo}mkdir -p ${install_main_2_dir}/share
fi
}
@@ -172,6 +174,7 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/udfd || :
${csudo}rm -f ${bin_link_dir}/taosdemo || :
${csudo}rm -f ${bin_link_dir}/taosdump || :
+ ${csudo}rm -f ${bin_link_dir}/taosx || :
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
@@ -184,6 +187,7 @@ function install_bin() {
[ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || :
[ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || :
+ [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || :
${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || :
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin || :
@@ -199,6 +203,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || :
[ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
+ [ -x ${install_main_dir}/bin/taosx ] && ${csudo}ln -s ${install_main_dir}/bin/taosx ${bin_link_dir}/taosx || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
@@ -215,6 +220,7 @@ function install_bin() {
[ -x ${install_main_dir}/bin/udfd ] || [ -x ${install_main_2_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd || ${csudo}ln -s ${install_main_2_dir}/bin/udfd || :
[ -x ${install_main_dir}/bin/taosdump ] || [ -x ${install_main_2_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump || ln -s ${install_main_2_dir}/bin/taosdump ${bin_link_dir}/taosdump || :
[ -x ${install_main_dir}/bin/taosdemo ] || [ -x ${install_main_2_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || ln -s ${install_main_2_dir}/bin/taosdemo ${bin_link_dir}/taosdemo || :
+ [ -x ${install_main_dir}/bin/taosx ] || [ -x ${install_main_2_dir}/bin/taosx ] && ${csudo}ln -s ${install_main_dir}/bin/taosx ${bin_link_dir}/taosx || ln -s ${install_main_2_dir}/bin/taosx ${bin_link_dir}/taosx || :
fi
}
@@ -381,8 +387,7 @@ function install_header() {
${install_main_dir}/include ||
${csudo}cp -f ${source_dir}/include/client/taos.h ${source_dir}/include/common/taosdef.h ${source_dir}/include/util/taoserror.h ${source_dir}/include/libs/function/taosudf.h \
${install_main_2_dir}/include &&
- ${csudo}chmod 644 ${install_main_dir}/include/* ||:
- ${csudo}chmod 644 ${install_main_2_dir}/include/*
+ ${csudo}chmod 644 ${install_main_dir}/include/* || ${csudo}chmod 644 ${install_main_2_dir}/include/*
fi
}
@@ -466,6 +471,16 @@ function install_examples() {
fi
}
+function install_web() {
+ if [ -d "${binary_dir}/build/share" ]; then
+ if [ "$osType" != "Darwin" ]; then
+ ${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_dir}/share || :
+ else
+ ${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_dir}/share || ${csudo}cp -rf ${binary_dir}/build/share/* ${install_main_2_dir}/share || :
+ fi
+ fi
+}
+
function clean_service_on_sysvinit() {
if pidof ${serverName} &>/dev/null; then
${csudo}service ${serverName} stop || :
@@ -593,6 +608,7 @@ function update_TDengine() {
install_lib
# install_connector
install_examples
+ install_web
install_bin
install_service
@@ -606,14 +622,20 @@ function update_TDengine() {
echo
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
- echo -e "${GREEN_DARK}To configure Taos Adapter (if has) ${NC}: edit ${configDir}/taosadapter.toml"
+ [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
+ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adatper ${NC}: ${csudo}systemctl start taosadapter ${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
+ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
else
- echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}"
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}"
+ [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
fi
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}"
@@ -646,14 +668,20 @@ function install_TDengine() {
echo -e "\033[44;32;1m${productName} is installed successfully!${NC}"
echo
echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}"
- echo -e "${GREEN_DARK}To configure taosadapter (if has) ${NC}: edit ${configDir}/taosadapter.toml"
+ [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To configure Taos Adapter ${NC}: edit ${configDir}/taosadapter.toml"
if ((${service_mod} == 0)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}"
+ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}"
elif ((${service_mod} == 1)); then
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}"
+ [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: ${csudo}service taosadapter start${NC}"
else
- echo -e "${GREEN_DARK}To start Taos Adapter (if has)${NC}: taosadapter &${NC}"
echo -e "${GREEN_DARK}To start ${productName} ${NC}: ./${serverName}${NC}"
+ [ -f ${installDir}/bin/taosadapter ] && \
+ echo -e "${GREEN_DARK}To start Taos Adapter ${NC}: taosadapter &${NC}"
fi
echo -e "${GREEN_DARK}To access ${productName} ${NC}: use ${GREEN_UNDERLINE}${clientName}${NC} in shell${NC}"
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index f5e3bf18822676f54ee2f20412b5ebb4ce57fd3a..fb0ef4f9a30f11db261103ae10d8cde3e2b19aab 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -80,10 +80,12 @@ else
${build_dir}/bin/taosBenchmark \
${build_dir}/bin/TDinsight.sh \
$tdinsight_caches"
+ [ -f ${build_dir}/bin/taosx ] && taosx_bin="${build_dir}/bin/taosx"
bin_files="${build_dir}/bin/${serverName} \
${build_dir}/bin/${clientName} \
${taostools_bin_files} \
+ ${taosx_bin} \
${build_dir}/bin/taosadapter \
${build_dir}/bin/udfd \
${script_dir}/remove.sh \
@@ -105,7 +107,7 @@ else
fi
install_files="${script_dir}/install.sh"
-nginx_dir="${top_dir}/../enterprise/src/plugins/web"
+web_dir="${top_dir}/../enterprise/src/plugins/web"
init_file_deb=${script_dir}/../deb/taosd
init_file_rpm=${script_dir}/../rpm/taosd
@@ -130,10 +132,6 @@ if [ -f "${cfg_dir}/${serverName}.service" ]; then
cp ${cfg_dir}/${serverName}.service ${install_dir}/cfg || :
fi
-if [ -f "${top_dir}/packaging/cfg/nginxd.service" ]; then
- cp ${top_dir}/packaging/cfg/nginxd.service ${install_dir}/cfg || :
-fi
-
mkdir -p ${install_dir}/bin && cp ${bin_files} ${install_dir}/bin && chmod a+x ${install_dir}/bin/* || :
mkdir -p ${install_dir}/init.d && cp ${init_file_deb} ${install_dir}/init.d/${serverName}.deb
mkdir -p ${install_dir}/init.d && cp ${init_file_rpm} ${install_dir}/init.d/${serverName}.rpm
@@ -220,16 +218,6 @@ if [ "$verMode" == "cluster" ]; then
sed 's/verMode=edge/verMode=cluster/g' ${install_dir}/bin/remove.sh >>remove_temp.sh
mv remove_temp.sh ${install_dir}/bin/remove.sh
- mkdir -p ${install_dir}/nginxd && cp -r ${nginx_dir}/* ${install_dir}/nginxd
- cp ${nginx_dir}/png/taos.png ${install_dir}/nginxd/admin/images/taos.png
- rm -rf ${install_dir}/nginxd/png
-
- if [ "$cpuType" == "aarch64" ]; then
- cp -f ${install_dir}/nginxd/sbin/arm/64bit/nginx ${install_dir}/nginxd/sbin/
- elif [ "$cpuType" == "aarch32" ]; then
- cp -f ${install_dir}/nginxd/sbin/arm/32bit/nginx ${install_dir}/nginxd/sbin/
- fi
- rm -rf ${install_dir}/nginxd/sbin/arm
fi
cd ${install_dir}
@@ -286,6 +274,13 @@ if [[ $dbName == "taos" ]]; then
cp -r ${examples_dir}/C# ${install_dir}/examples
mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json
fi
+
+ # Add web files
+ if [ -d "${web_dir}/admin" ]; then
+ mkdir -p ${install_dir}/share/
+ cp ${web_dir}/admin ${install_dir}/share/ -r
+ cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png
+ fi
fi
# Copy driver
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index bf4ff564f56c3fa1a35418c6a1d3a0a372596e03..a648750904c9102c1786f5d1de1655a54d1e0a9a 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -27,13 +27,11 @@ local_bin_link_dir="/usr/local/bin"
lib_link_dir="/usr/lib"
lib64_link_dir="/usr/lib64"
inc_link_dir="/usr/include"
-install_nginxd_dir="/usr/local/nginxd"
service_config_dir="/etc/systemd/system"
taos_service_name=${serverName}
taosadapter_service_name="taosadapter"
tarbitrator_service_name="tarbitratord"
-nginx_service_name="nginxd"
csudo=""
if command -v sudo >/dev/null; then
csudo="sudo "
@@ -153,18 +151,6 @@ function clean_service_on_systemd() {
fi
${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null
${csudo}rm -f ${tarbitratord_service_config}
-
- if [ "$verMode" == "cluster" ]; then
- nginx_service_config="${service_config_dir}/${nginx_service_name}.service"
- if [ -d ${install_nginxd_dir} ]; then
- if systemctl is-active --quiet ${nginx_service_name}; then
- echo "Nginx for ${productName} is running, stopping it..."
- ${csudo}systemctl stop ${nginx_service_name} &>/dev/null || echo &>/dev/null
- fi
- ${csudo}systemctl disable ${nginx_service_name} &>/dev/null || echo &>/dev/null
- ${csudo}rm -f ${nginx_service_config}
- fi
- fi
}
function clean_service_on_sysvinit() {
@@ -239,7 +225,6 @@ clean_config
${csudo}rm -rf ${data_link_dir} || :
${csudo}rm -rf ${install_main_dir}
-${csudo}rm -rf ${install_nginxd_dir}
if [[ -e /etc/os-release ]]; then
osinfo=$(awk -F= '/^NAME/{print $2}' /etc/os-release)
else
diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt
index f52edbe71f151398c5ebdcd705eab376f2318aae..e8e3c878496c58631131922cc6de47491d548f06 100644
--- a/source/client/CMakeLists.txt
+++ b/source/client/CMakeLists.txt
@@ -27,11 +27,18 @@ else()
INCLUDE_DIRECTORIES(jni/linux)
endif()
+set_target_properties(
+ taos
+ PROPERTIES
+ CLEAN_DIRECT_OUTPUT
+ 1
+)
+
set_target_properties(
taos
PROPERTIES
VERSION ${TD_VER_NUMBER}
- SOVERSION ${TD_VER_NUMBER}
+ SOVERSION 1
)
add_library(taos_static STATIC ${CLIENT_SRC})
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index 07e5f75e87904e8b7998717224214d2f15dccce0..535a436c6c17dfd90603f40dc189bcff5b2902d2 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -52,15 +52,17 @@ enum {
RES_TYPE__QUERY = 1,
RES_TYPE__TMQ,
RES_TYPE__TMQ_META,
+ RES_TYPE__TMQ_METADATA,
};
#define SHOW_VARIABLES_RESULT_COLS 2
#define SHOW_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
-#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
-#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
-#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_QUERY(res) (*(int8_t*)res == RES_TYPE__QUERY)
+#define TD_RES_TMQ(res) (*(int8_t*)res == RES_TYPE__TMQ)
+#define TD_RES_TMQ_META(res) (*(int8_t*)res == RES_TYPE__TMQ_META)
+#define TD_RES_TMQ_METADATA(res) (*(int8_t*)res == RES_TYPE__TMQ_METADATA)
typedef struct SAppInstInfo SAppInstInfo;
@@ -101,6 +103,8 @@ typedef struct SQueryExecMetric {
int64_t ctgStart; // start to parse, us
int64_t ctgEnd; // end to parse, us
int64_t semanticEnd;
+ int64_t planEnd;
+ int64_t resultReady;
int64_t execEnd;
int64_t send; // start to send to server, us
int64_t rsp; // receive response from server, us
@@ -198,8 +202,8 @@ typedef struct {
int32_t vgId;
SSchemaWrapper schema;
int32_t resIter;
- SMqDataRsp rsp;
SReqResultInfo resInfo;
+ SMqDataRsp rsp;
} SMqRspObj;
typedef struct {
@@ -210,6 +214,17 @@ typedef struct {
SMqMetaRsp metaRsp;
} SMqMetaRspObj;
+typedef struct {
+ int8_t resType;
+ char topic[TSDB_TOPIC_FNAME_LEN];
+ char db[TSDB_DB_FNAME_LEN];
+ int32_t vgId;
+ SSchemaWrapper schema;
+ int32_t resIter;
+ SReqResultInfo resInfo;
+ STaosxRsp rsp;
+} SMqTaosxRspObj;
+
typedef struct SRequestObj {
int8_t resType; // query or tmq
uint64_t requestId;
@@ -235,6 +250,8 @@ typedef struct SRequestObj {
bool inRetry;
uint32_t prevCode; // previous error code: todo refactor, add update flag for catalog
uint32_t retry;
+ int64_t allocatorRefId;
+ SQuery* pQuery;
} SRequestObj;
typedef struct SSyncQueryParam {
@@ -369,7 +386,7 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData*
int32_t refreshMeta(STscObj* pTscObj, SRequestObj* pRequest);
int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList);
void doAsyncQuery(SRequestObj* pRequest, bool forceUpdateMeta);
-int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
+int32_t removeMeta(STscObj* pTscObj, SArray* tbList);
int32_t handleAlterTbExecRes(void* res, struct SCatalog* pCatalog);
int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog);
bool qnodeRequired(SRequestObj* pRequest);
diff --git a/source/client/src/TMQConnector.c b/source/client/src/TMQConnector.c
index 17d3a212c482c3462e542721d7d57f516250ff13..26bf55055f76eeade6a192e759560f21c0c2253d 100644
--- a/source/client/src/TMQConnector.c
+++ b/source/client/src/TMQConnector.c
@@ -42,6 +42,7 @@ void commit_cb(tmq_t *tmq, int32_t code, void *param) {
JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConfNewImp(JNIEnv *env, jobject jobj) {
tmq_conf_t *conf = tmq_conf_new();
+ jniGetGlobalMethod(env);
return (jlong)conf;
}
@@ -211,7 +212,7 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitAsync(JN
tmq_commit_async(tmq, res, commit_cb, consumer);
}
-JNIEXPORT int JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqUnsubscribeImp(JNIEnv *env, jobject jobj, jlong jtmq) {
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqUnsubscribeImp(JNIEnv *env, jobject jobj, jlong jtmq) {
tmq_t *tmq = (tmq_t *)jtmq;
if (tmq == NULL) {
jniError("jobj:%p, tmq is closed", jobj);
@@ -221,7 +222,7 @@ JNIEXPORT int JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqUnsubscribeImp(
return tmq_unsubscribe((tmq_t *)tmq);
}
-JNIEXPORT int JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConsumerCloseImp(JNIEnv *env, jobject jobj,
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqConsumerCloseImp(JNIEnv *env, jobject jobj,
jlong jtmq) {
tmq_t *tmq = (tmq_t *)jtmq;
if (tmq == NULL) {
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index 1342e89b523ac0a2ae2fc1eca7584aff2a340f1d..5792f498efddfdf43a7bab3ce3d47aebaa512a36 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -19,6 +19,7 @@
#include "functionMgt.h"
#include "os.h"
#include "query.h"
+#include "qworker.h"
#include "scheduler.h"
#include "tcache.h"
#include "tglobal.h"
@@ -69,25 +70,26 @@ static void deregisterRequest(SRequestObj *pRequest) {
int32_t currentInst = atomic_sub_fetch_64((int64_t *)&pActivity->currentRequests, 1);
int32_t num = atomic_sub_fetch_32(&pTscObj->numOfReqs, 1);
- int64_t nowUs = taosGetTimestampUs();
- int64_t duration = nowUs - pRequest->metric.start;
- tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%" PRIu64
- " ms, current:%d, app current:%d",
- pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000, num, currentInst);
+ int64_t duration = taosGetTimestampUs() - pRequest->metric.start;
+ tscDebug("0x%" PRIx64 " free Request from connObj: 0x%" PRIx64 ", reqId:0x%" PRIx64 " elapsed:%.2f ms, "
+ "current:%d, app current:%d",
+ pRequest->self, pTscObj->id, pRequest->requestId, duration / 1000.0, num, currentInst);
if (QUERY_NODE_VNODE_MODIF_STMT == pRequest->stmtType) {
- tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us",
- duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
- pRequest->metric.ctgEnd - pRequest->metric.ctgStart,
- pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
+ tscPerf("insert duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
+ "us, exec:%" PRId64 "us",
+ duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
+ pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
pRequest->metric.execEnd - pRequest->metric.semanticEnd);
atomic_add_fetch_64((int64_t *)&pActivity->insertElapsedTime, duration);
} else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) {
- tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64 "us, exec:%" PRId64 "us",
- duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
- pRequest->metric.ctgEnd - pRequest->metric.ctgStart,
- pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
- pRequest->metric.execEnd - pRequest->metric.semanticEnd);
+ tscPerf("select duration %" PRId64 "us: syntax:%" PRId64 "us, ctg:%" PRId64 "us, semantic:%" PRId64
+ "us, planner:%" PRId64 "us, exec:%" PRId64 "us, reqId:0x%"PRIx64,
+ duration, pRequest->metric.syntaxEnd - pRequest->metric.syntaxStart,
+ pRequest->metric.ctgEnd - pRequest->metric.ctgStart, pRequest->metric.semanticEnd - pRequest->metric.ctgEnd,
+ pRequest->metric.planEnd - pRequest->metric.semanticEnd,
+ pRequest->metric.resultReady - pRequest->metric.planEnd, pRequest->requestId);
+
atomic_add_fetch_64((int64_t *)&pActivity->queryElapsedTime, duration);
}
@@ -287,6 +289,7 @@ void *createRequest(uint64_t connId, int32_t type) {
pRequest->body.resInfo.convertUcs4 = true; // convert ucs4 by default
pRequest->type = type;
+ pRequest->allocatorRefId = -1;
pRequest->pDb = getDbOfConnection(pTscObj);
pRequest->pTscObj = pTscObj;
@@ -348,6 +351,8 @@ void doDestroyRequest(void *p) {
taosArrayDestroy(pRequest->tableList);
taosArrayDestroy(pRequest->dbList);
taosArrayDestroy(pRequest->targetTableList);
+ qDestroyQuery(pRequest->pQuery);
+ nodesDestroyAllocator(pRequest->allocatorRefId);
destroyQueryExecRes(&pRequest->body.resInfo.execRes);
@@ -410,6 +415,7 @@ void taos_init_imp(void) {
initTaskQueue();
fmFuncMgtInit();
+ nodesInitAllocatorSet();
clientConnRefPool = taosOpenRef(200, destroyTscObj);
clientReqRefPool = taosOpenRef(40960, doDestroyRequest);
@@ -487,7 +493,7 @@ int taos_options_imp(TSDB_OPTION option, const char *str) {
*/
uint64_t generateRequestId() {
static uint64_t hashId = 0;
- static int32_t requestSerialId = 0;
+ static uint32_t requestSerialId = 0;
if (hashId == 0) {
char uid[64] = {0};
@@ -506,7 +512,8 @@ uint64_t generateRequestId() {
while (true) {
int64_t ts = taosGetTimestampMs();
uint64_t pid = taosGetPId();
- int32_t val = atomic_add_fetch_32(&requestSerialId, 1);
+ uint32_t val = atomic_add_fetch_32(&requestSerialId, 1);
+ if (val >= 0xFFFF) atomic_store_32(&requestSerialId, 0);
id = ((hashId & 0x0FFF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
if (id) {
diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c
index 9475d1b51e51d093bcf7335d1668908e0c039a80..096e2cdac8b892673de7e991f64a60dcb7dc8f6d 100644
--- a/source/client/src/clientHb.c
+++ b/source/client/src/clientHb.c
@@ -73,6 +73,8 @@ static int32_t hbProcessDBInfoRsp(void *value, int32_t valueLen, struct SCatalog
vgInfo->vgVersion = rsp->vgVersion;
vgInfo->hashMethod = rsp->hashMethod;
+ vgInfo->hashPrefix = rsp->hashPrefix;
+ vgInfo->hashSuffix = rsp->hashSuffix;
vgInfo->vgHash = taosHashInit(rsp->vgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
if (NULL == vgInfo->vgHash) {
taosMemoryFree(vgInfo);
@@ -145,7 +147,7 @@ static int32_t hbProcessStbInfoRsp(void *value, int32_t valueLen, struct SCatalo
}
static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &pRsp->connKey, sizeof(SClientHbKey));
if (NULL == pReq) {
tscWarn("pReq to get activeInfo, may be dropped, refId:%" PRIx64 ", type:%d", pRsp->connKey.tscRid,
pRsp->connKey.connType);
@@ -171,7 +173,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
pTscObj->pAppInfo->totalDnodes = pRsp->query->totalDnodes;
pTscObj->pAppInfo->onlineDnodes = pRsp->query->onlineDnodes;
pTscObj->connId = pRsp->query->connId;
- tscTrace("conn %p hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes, pTscObj->pAppInfo->totalDnodes);
+ tscTrace("conn %p hb rsp, dnodes %d/%d", pTscObj->connId, pTscObj->pAppInfo->onlineDnodes,
+ pTscObj->pAppInfo->totalDnodes);
if (pRsp->query->killRid) {
tscDebug("request rid %" PRIx64 " need to be killed now", pRsp->query->killRid);
@@ -260,6 +263,8 @@ static int32_t hbQueryHbRspHandle(SAppHbMgr *pAppHbMgr, SClientHbRsp *pRsp) {
}
}
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
+
return TSDB_CODE_SUCCESS;
}
@@ -269,13 +274,13 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
SClientHbBatchRsp pRsp = {0};
if (TSDB_CODE_SUCCESS == code) {
tDeserializeSClientHbBatchRsp(pMsg->pData, pMsg->len, &pRsp);
- }
- int32_t now = taosGetTimestampSec();
- int32_t delta = abs(now - pRsp.svrTimestamp);
- if (delta > timestampDeltaLimit) {
- code = TSDB_CODE_TIME_UNSYNCED;
- tscError("time diff: %ds is too big", delta);
+ int32_t now = taosGetTimestampSec();
+ int32_t delta = abs(now - pRsp.svrTimestamp);
+ if (delta > timestampDeltaLimit) {
+ code = TSDB_CODE_TIME_UNSYNCED;
+ tscError("time diff: %ds is too big", delta);
+ }
}
int32_t rspNum = taosArrayGetSize(pRsp.rsps);
@@ -293,7 +298,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) {
if (code != 0) {
(*pInst)->onlineDnodes = ((*pInst)->totalDnodes ? 0 : -1);
- tscDebug("hb rsp error %s, update server status %d/%d", tstrerror(code), (*pInst)->onlineDnodes, (*pInst)->totalDnodes);
+ tscDebug("hb rsp error %s, update server status %d/%d", tstrerror(code), (*pInst)->onlineDnodes,
+ (*pInst)->totalDnodes);
}
if (rspNum) {
@@ -410,6 +416,9 @@ int32_t hbGetQueryBasicInfo(SClientHbKey *connKey, SClientHbReq *req) {
int32_t code = hbBuildQueryDesc(hbBasic, pTscObj);
if (code) {
releaseTscObj(connKey->tscRid);
+ if (hbBasic->queryDesc) {
+ taosArrayDestroyEx(hbBasic->queryDesc, tFreeClientHbQueryDesc);
+ }
taosMemoryFree(hbBasic);
return code;
}
@@ -597,28 +606,34 @@ SClientHbBatchReq *hbGatherAllInfo(SAppHbMgr *pAppHbMgr) {
int32_t connKeyCnt = atomic_load_32(&pAppHbMgr->connKeyCnt);
pBatchReq->reqs = taosArrayInit(connKeyCnt, sizeof(SClientHbReq));
+ int64_t rid = -1;
int32_t code = 0;
- void *pIter = taosHashIterate(pAppHbMgr->activeInfo, NULL);
- while (pIter != NULL) {
- SClientHbReq *pOneReq = pIter;
- pOneReq = taosArrayPush(pBatchReq->reqs, pOneReq);
+ void *pIter = taosHashIterate(pAppHbMgr->activeInfo, NULL);
+ SClientHbReq *pOneReq = pIter;
+ SClientHbKey *connKey = pOneReq ? &pOneReq->connKey : NULL;
+ if (connKey != NULL) rid = connKey->tscRid;
+
+ STscObj *pTscObj = (STscObj *)acquireTscObj(rid);
+ if (pTscObj == NULL) {
+ tFreeClientHbBatchReq(pBatchReq);
+ return NULL;
+ }
+
+ while (pIter != NULL) {
+ pOneReq = taosArrayPush(pBatchReq->reqs, pOneReq);
code = (*clientHbMgr.reqHandle[pOneReq->connKey.connType])(&pOneReq->connKey, &pOneReq->clusterId, pOneReq);
if (code) {
pIter = taosHashIterate(pAppHbMgr->activeInfo, pIter);
+ pOneReq = pIter;
continue;
}
- // hbClearClientHbReq(pOneReq);
-
pIter = taosHashIterate(pAppHbMgr->activeInfo, pIter);
+ pOneReq = pIter;
}
-
- // if (code) {
- // taosArrayDestroyEx(pBatchReq->reqs, hbFreeReq);
- // taosMemoryFreeClear(pBatchReq);
- // }
+ releaseTscObj(rid);
return pBatchReq;
}
@@ -650,6 +665,8 @@ int32_t hbGatherAppInfo(void) {
for (int32_t i = 0; i < sz; ++i) {
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
+ if (pAppHbMgr == NULL) continue;
+
uint64_t clusterId = pAppHbMgr->pAppInstInfo->clusterId;
SAppHbReq *pApp = taosHashGet(clientHbMgr.appSummary, &clusterId, sizeof(clusterId));
if (NULL == pApp) {
@@ -687,15 +704,21 @@ static void *hbThreadFunc(void *param) {
hbGatherAppInfo();
}
+ SArray *mgr = taosArrayInit(sz, sizeof(void *));
for (int i = 0; i < sz; i++) {
SAppHbMgr *pAppHbMgr = taosArrayGetP(clientHbMgr.appHbMgrs, i);
+ if (pAppHbMgr == NULL) {
+ continue;
+ }
int32_t connCnt = atomic_load_32(&pAppHbMgr->connKeyCnt);
if (connCnt == 0) {
+ taosArrayPush(mgr, &pAppHbMgr);
continue;
}
SClientHbBatchReq *pReq = hbGatherAllInfo(pAppHbMgr);
- if (pReq == NULL) {
+ if (pReq == NULL || taosArrayGetP(clientHbMgr.appHbMgrs, i) == NULL) {
+ tFreeClientHbBatchReq(pReq);
continue;
}
int tlen = tSerializeSClientHbBatchReq(NULL, 0, pReq);
@@ -704,6 +727,7 @@ static void *hbThreadFunc(void *param) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
tFreeClientHbBatchReq(pReq);
// hbClearReqInfo(pAppHbMgr);
+ taosArrayPush(mgr, &pAppHbMgr);
break;
}
@@ -715,6 +739,7 @@ static void *hbThreadFunc(void *param) {
tFreeClientHbBatchReq(pReq);
// hbClearReqInfo(pAppHbMgr);
taosMemoryFree(buf);
+ taosArrayPush(mgr, &pAppHbMgr);
break;
}
pInfo->fp = hbAsyncCallBack;
@@ -722,7 +747,7 @@ static void *hbThreadFunc(void *param) {
pInfo->msgInfo.len = tlen;
pInfo->msgType = TDMT_MND_HEARTBEAT;
pInfo->param = strdup(pAppHbMgr->key);
- pInfo->paramFreeFp = taosMemoryFree;
+ pInfo->paramFreeFp = taosMemoryFree;
pInfo->requestId = generateRequestId();
pInfo->requestObjRefId = 0;
@@ -734,8 +759,12 @@ static void *hbThreadFunc(void *param) {
// hbClearReqInfo(pAppHbMgr);
atomic_add_fetch_32(&pAppHbMgr->reportCnt, 1);
+ taosArrayPush(mgr, &pAppHbMgr);
}
+ taosArrayDestroy(clientHbMgr.appHbMgrs);
+ clientHbMgr.appHbMgrs = mgr;
+
taosThreadMutexUnlock(&clientHbMgr.lock);
taosMsleep(HEARTBEAT_INTERVAL);
@@ -827,7 +856,7 @@ void hbRemoveAppHbMrg(SAppHbMgr **pAppHbMgr) {
if (pItem == *pAppHbMgr) {
hbFreeAppHbMgr(*pAppHbMgr);
*pAppHbMgr = NULL;
- taosArrayRemove(clientHbMgr.appHbMgrs, i);
+ taosArraySet(clientHbMgr.appHbMgrs, i, pAppHbMgr);
break;
}
}
@@ -838,6 +867,7 @@ void appHbMgrCleanup(void) {
int sz = taosArrayGetSize(clientHbMgr.appHbMgrs);
for (int i = 0; i < sz; i++) {
SAppHbMgr *pTarget = taosArrayGetP(clientHbMgr.appHbMgrs, i);
+ if (pTarget == NULL) continue;
hbFreeAppHbMgr(pTarget);
}
}
@@ -852,7 +882,20 @@ int hbMgrInit() {
clientHbMgr.appSummary = taosHashInit(10, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
clientHbMgr.appHbMgrs = taosArrayInit(0, sizeof(void *));
- taosThreadMutexInit(&clientHbMgr.lock, NULL);
+
+ TdThreadMutexAttr attr = {0};
+
+ int ret = taosThreadMutexAttrInit(&attr);
+ assert(ret == 0);
+
+ ret = taosThreadMutexAttrSetType(&attr, PTHREAD_MUTEX_RECURSIVE);
+ assert(ret == 0);
+
+ ret = taosThreadMutexInit(&clientHbMgr.lock, &attr);
+ assert(ret == 0);
+
+ ret = taosThreadMutexAttrDestroy(&attr);
+ assert(ret == 0);
// init handle funcs
hbMgrInitHandle();
@@ -914,10 +957,11 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
}
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey) {
- SClientHbReq *pReq = taosHashGet(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
if (pReq) {
tFreeClientHbReq(pReq);
taosHashRemove(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
+ taosHashRelease(pAppHbMgr->activeInfo, pReq);
}
if (NULL == pReq) {
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 2baca1288fe086388cc3e5a424ab289586a864f5..594200b5fb637e4fc6f72cdea1da92f61821b6f3 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -189,11 +189,25 @@ int32_t buildRequest(uint64_t connId, const char* sql, int sqlLen, void* param,
tscError("%d failed to add to request container, reqId:0x%" PRIx64 ", conn:%d, %s", (*pRequest)->self,
(*pRequest)->requestId, pTscObj->id, sql);
+ taosMemoryFree(param);
destroyRequest(*pRequest);
*pRequest = NULL;
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
+ (*pRequest)->allocatorRefId = -1;
+ if (tsQueryUseNodeAllocator && !qIsInsertValuesSql((*pRequest)->sqlstr, (*pRequest)->sqlLen)) {
+ if (TSDB_CODE_SUCCESS !=
+ nodesCreateAllocator((*pRequest)->requestId, tsQueryNodeChunkSize, &((*pRequest)->allocatorRefId))) {
+ tscError("%d failed to create node allocator, reqId:0x%" PRIx64 ", conn:%d, %s", (*pRequest)->self,
+ (*pRequest)->requestId, pTscObj->id, sql);
+
+ destroyRequest(*pRequest);
+ *pRequest = NULL;
+ return TSDB_CODE_TSC_OUT_OF_MEMORY;
+ }
+ }
+
tscDebugL("0x%" PRIx64 " SQL: %s, reqId:0x%" PRIx64, (*pRequest)->self, (*pRequest)->sqlstr, (*pRequest)->requestId);
return TSDB_CODE_SUCCESS;
}
@@ -365,7 +379,7 @@ int32_t updateQnodeList(SAppInstInfo* pInfo, SArray* pNodeList) {
}
bool qnodeRequired(SRequestObj* pRequest) {
- if (QUERY_POLICY_VNODE == tsQueryPolicy) {
+ if (QUERY_POLICY_VNODE == tsQueryPolicy || QUERY_POLICY_CLIENT == tsQueryPolicy) {
return false;
}
@@ -438,6 +452,7 @@ void setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t
}
pResInfo->fields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
pResInfo->userFields = taosMemoryCalloc(numOfCols, sizeof(TAOS_FIELD));
+ ASSERT(numOfCols == pResInfo->numOfCols);
for (int32_t i = 0; i < pResInfo->numOfCols; ++i) {
pResInfo->fields[i].bytes = pSchema[i].bytes;
@@ -468,6 +483,7 @@ void setResPrecision(SReqResultInfo* pResInfo, int32_t precision) {
int32_t buildVnodePolicyNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray* pMnodeList, SArray* pDbVgList) {
SArray* nodeList = taosArrayInit(4, sizeof(SQueryNodeLoad));
+ char* policy = (tsQueryPolicy == QUERY_POLICY_VNODE) ? "vnode" : "client";
int32_t dbNum = taosArrayGetSize(pDbVgList);
for (int32_t i = 0; i < dbNum; ++i) {
@@ -489,20 +505,20 @@ int32_t buildVnodePolicyNodeList(SRequestObj* pRequest, SArray** pNodeList, SArr
int32_t vnodeNum = taosArrayGetSize(nodeList);
if (vnodeNum > 0) {
- tscDebug("0x%" PRIx64 " vnode policy, use vnode list, num:%d", pRequest->requestId, vnodeNum);
+ tscDebug("0x%" PRIx64 " %s policy, use vnode list, num:%d", pRequest->requestId, policy, vnodeNum);
goto _return;
}
int32_t mnodeNum = taosArrayGetSize(pMnodeList);
if (mnodeNum <= 0) {
- tscDebug("0x%" PRIx64 " vnode policy, empty node list", pRequest->requestId);
+ tscDebug("0x%" PRIx64 " %s policy, empty node list", pRequest->requestId, policy);
goto _return;
}
void* pData = taosArrayGet(pMnodeList, 0);
taosArrayAddBatch(nodeList, pData, mnodeNum);
- tscDebug("0x%" PRIx64 " vnode policy, use mnode list, num:%d", pRequest->requestId, mnodeNum);
+ tscDebug("0x%" PRIx64 " %s policy, use mnode list, num:%d", pRequest->requestId, policy, mnodeNum);
_return:
@@ -546,7 +562,8 @@ int32_t buildAsyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray
int32_t code = 0;
switch (tsQueryPolicy) {
- case QUERY_POLICY_VNODE: {
+ case QUERY_POLICY_VNODE:
+ case QUERY_POLICY_CLIENT: {
if (pResultMeta) {
pDbVgList = taosArrayInit(4, POINTER_BYTES);
@@ -607,7 +624,8 @@ int32_t buildSyncExecNodeList(SRequestObj* pRequest, SArray** pNodeList, SArray*
int32_t code = 0;
switch (tsQueryPolicy) {
- case QUERY_POLICY_VNODE: {
+ case QUERY_POLICY_VNODE:
+ case QUERY_POLICY_CLIENT: {
int32_t dbNum = taosArrayGetSize(pRequest->dbList);
if (dbNum > 0) {
SCatalog* pCtg = NULL;
@@ -667,6 +685,7 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
.requestObjRefId = pRequest->self};
SSchedulerReq req = {
.syncReq = true,
+ .localReq = (tsQueryPolicy == QUERY_POLICY_CLIENT),
.pConn = &conn,
.pNodeList = pNodeList,
.pDag = pDag,
@@ -680,6 +699,8 @@ int32_t scheduleQuery(SRequestObj* pRequest, SQueryPlan* pDag, SArray* pNodeList
};
int32_t code = schedulerExecJob(&req, &pRequest->body.queryJob);
+
+ destroyQueryExecRes(&pRequest->body.resInfo.execRes);
memcpy(&pRequest->body.resInfo.execRes, &res, sizeof(res));
if (code != TSDB_CODE_SUCCESS) {
@@ -728,7 +749,7 @@ int32_t handleSubmitExecRes(SRequestObj* pRequest, void* res, SCatalog* pCatalog
tFreeSTableMetaRsp(blk->pMeta);
taosMemoryFreeClear(blk->pMeta);
}
-
+
if (NULL == blk->tblFName || 0 == blk->tblFName[0]) {
continue;
}
@@ -794,7 +815,7 @@ int32_t handleCreateTbExecRes(void* res, SCatalog* pCatalog) {
int32_t handleQueryExecRsp(SRequestObj* pRequest) {
if (NULL == pRequest->body.resInfo.execRes.res) {
- return TSDB_CODE_SUCCESS;
+ return pRequest->code;
}
SCatalog* pCatalog = NULL;
@@ -847,41 +868,45 @@ int32_t handleQueryExecRsp(SRequestObj* pRequest) {
return code;
}
+//todo refacto the error code mgmt
void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
SRequestObj* pRequest = (SRequestObj*)param;
+ STscObj* pTscObj = pRequest->pTscObj;
+
pRequest->code = code;
+ pRequest->metric.resultReady = taosGetTimestampUs();
if (pResult) {
+ destroyQueryExecRes(&pRequest->body.resInfo.execRes);
memcpy(&pRequest->body.resInfo.execRes, pResult, sizeof(*pResult));
}
- if (TDMT_VND_SUBMIT == pRequest->type || TDMT_VND_DELETE == pRequest->type ||
- TDMT_VND_CREATE_TABLE == pRequest->type) {
+ int32_t type = pRequest->type;
+ if (TDMT_VND_SUBMIT == type || TDMT_VND_DELETE == type || TDMT_VND_CREATE_TABLE == type) {
if (pResult) {
pRequest->body.resInfo.numOfRows = pResult->numOfRows;
- if (TDMT_VND_SUBMIT == pRequest->type) {
- STscObj* pTscObj = pRequest->pTscObj;
+
+ // record the insert rows
+ if (TDMT_VND_SUBMIT == type) {
SAppClusterSummary* pActivity = &pTscObj->pAppInfo->summary;
atomic_add_fetch_64((int64_t*)&pActivity->numOfInsertRows, pResult->numOfRows);
}
}
schedulerFreeJob(&pRequest->body.queryJob, 0);
-
- pRequest->metric.execEnd = taosGetTimestampUs();
}
taosMemoryFree(pResult);
+ tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%s, reqId:0x%" PRIx64, pRequest->self, tstrerror(code),
+ pRequest->requestId);
- tscDebug("0x%" PRIx64 " enter scheduler exec cb, code:%d - %s, reqId:0x%" PRIx64, pRequest->self, code,
- tstrerror(code), pRequest->requestId);
-
- STscObj* pTscObj = pRequest->pTscObj;
if (code != TSDB_CODE_SUCCESS && NEED_CLIENT_HANDLE_ERROR(code) && pRequest->sqlstr != NULL) {
- tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
- pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
+ tscDebug("0x%" PRIx64 " client retry to handle the error, code:%s, tryCount:%d, reqId:0x%" PRIx64,
+ pRequest->self, tstrerror(code), pRequest->retry, pRequest->requestId);
pRequest->prevCode = code;
schedulerFreeJob(&pRequest->body.queryJob, 0);
+ qDestroyQuery(pRequest->pQuery);
+ pRequest->pQuery = NULL;
doAsyncQuery(pRequest, true);
return;
}
@@ -891,7 +916,11 @@ void schedulerExecCb(SExecResult* pResult, void* param, int32_t code) {
removeMeta(pTscObj, pRequest->targetTableList);
}
- handleQueryExecRsp(pRequest);
+ pRequest->metric.execEnd = taosGetTimestampUs();
+ int32_t code1 = handleQueryExecRsp(pRequest);
+ if (pRequest->code == TSDB_CODE_SUCCESS && pRequest->code != code1) {
+ pRequest->code = code1;
+ }
// return to client
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@@ -1018,7 +1047,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
.pMsg = pRequest->msgBuf,
.msgLen = ERROR_MSG_BUF_DEFAULT_SIZE,
.pUser = pRequest->pTscObj->user,
- .sysInfo = pRequest->pTscObj->sysInfo};
+ .sysInfo = pRequest->pTscObj->sysInfo,
+ .allocatorId = pRequest->allocatorRefId};
SAppInstInfo* pAppInfo = getAppInfo(pRequest);
SQueryPlan* pDag = NULL;
@@ -1030,6 +1060,8 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
pRequest->body.subplanNum = pDag->numOfSubplans;
}
+ pRequest->metric.planEnd = taosGetTimestampUs();
+
if (TSDB_CODE_SUCCESS == code && !pRequest->validateOnly) {
SArray* pNodeList = NULL;
buildAsyncExecNodeList(pRequest, &pNodeList, pMnodeList, pResultMeta);
@@ -1038,9 +1070,11 @@ void launchAsyncQuery(SRequestObj* pRequest, SQuery* pQuery, SMetaData* pResultM
.pTrans = pAppInfo->pTransporter, .requestId = pRequest->requestId, .requestObjRefId = pRequest->self};
SSchedulerReq req = {
.syncReq = false,
+ .localReq = (tsQueryPolicy == QUERY_POLICY_CLIENT),
.pConn = &conn,
.pNodeList = pNodeList,
.pDag = pDag,
+ .allocatorRefId = pRequest->allocatorRefId,
.sql = pRequest->sqlstr,
.startTs = pRequest->metric.start,
.execFp = schedulerExecCb,
@@ -1380,6 +1414,7 @@ int32_t doProcessMsgFromServer(void* param) {
pSendInfo->fp(pSendInfo->param, &buf, pMsg->code);
rpcFreeCont(pMsg->pCont);
destroySendMsgInfo(pSendInfo);
+
taosMemoryFree(arg);
return TSDB_CODE_SUCCESS;
}
@@ -1395,7 +1430,12 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
arg->msg = *pMsg;
arg->pEpset = tEpSet;
- taosAsyncExec(doProcessMsgFromServer, arg, NULL);
+ if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) {
+ tscError("failed to sched msg to tsc, tsc ready to quit");
+ rpcFreeCont(pMsg->pCont);
+ taosMemoryFree(arg->pEpset);
+ taosMemoryFree(arg);
+ }
}
TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) {
@@ -1668,7 +1708,12 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
break;
}
}
- if (!needConvert) return TSDB_CODE_SUCCESS;
+
+ if (!needConvert) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ tscDebug("start to convert form json format string");
char* p = (char*)pResultInfo->pData;
int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows);
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index 9ceb6e068375a67cb1230200e28aad6a74395661..5ec4ddf2e53dfa77e9f7afb68738b84b1ac5731c 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -20,6 +20,7 @@
#include "functionMgt.h"
#include "os.h"
#include "query.h"
+#include "qworker.h"
#include "scheduler.h"
#include "tglobal.h"
#include "tmsg.h"
@@ -65,6 +66,7 @@ void taos_cleanup(void) {
fmFuncMgtDestroy();
qCleanupKeywordsTable();
+ nodesDestroyAllocatorSet();
id = clientConnRefPool;
clientConnRefPool = -1;
@@ -148,7 +150,7 @@ int taos_errno(TAOS_RES *res) {
return terrno;
}
- if (TD_RES_TMQ(res)) {
+ if (TD_RES_TMQ(res) || TD_RES_TMQ_METADATA(res)) {
return 0;
}
@@ -162,7 +164,7 @@ const char *taos_errstr(TAOS_RES *res) {
return (const char *)tstrerror(terrno);
}
- if (TD_RES_TMQ(res)) {
+ if (TD_RES_TMQ(res) || TD_RES_TMQ_METADATA(res)) {
return "success";
}
@@ -184,6 +186,19 @@ void taos_free_result(TAOS_RES *res) {
SRequestObj *pRequest = (SRequestObj *)res;
tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId);
destroyRequest(pRequest);
+ } else if (TD_RES_TMQ_METADATA(res)) {
+ SMqTaosxRspObj *pRsp = (SMqTaosxRspObj *)res;
+ if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
+ if (pRsp->rsp.blockDataLen) taosArrayDestroy(pRsp->rsp.blockDataLen);
+ if (pRsp->rsp.withTbName) taosArrayDestroyP(pRsp->rsp.blockTbName, taosMemoryFree);
+ if (pRsp->rsp.withSchema) taosArrayDestroyP(pRsp->rsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ // taosx
+ taosArrayDestroy(pRsp->rsp.createTableLen);
+ taosArrayDestroyP(pRsp->rsp.createTableReq, taosMemoryFree);
+
+ pRsp->resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&pRsp->resInfo);
+ taosMemoryFree(pRsp);
} else if (TD_RES_TMQ(res)) {
SMqRspObj *pRsp = (SMqRspObj *)res;
if (pRsp->rsp.blockData) taosArrayDestroyP(pRsp->rsp.blockData, taosMemoryFree);
@@ -251,7 +266,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
return doFetchRows(pRequest, true, true);
#endif
- } else if (TD_RES_TMQ(res)) {
+ } else if (TD_RES_TMQ(res) || TD_RES_TMQ_METADATA(res)) {
SMqRspObj *msg = ((SMqRspObj *)res);
SReqResultInfo *pResultInfo;
if (msg->resIter == -1) {
@@ -424,7 +439,7 @@ const char *taos_data_type(int type) {
const char *taos_get_client_info() { return version; }
int taos_affected_rows(TAOS_RES *res) {
- if (res == NULL || TD_RES_TMQ(res) || TD_RES_TMQ_META(res)) {
+ if (res == NULL || TD_RES_TMQ(res) || TD_RES_TMQ_META(res) || TD_RES_TMQ_METADATA(res)) {
return 0;
}
@@ -441,7 +456,7 @@ int taos_result_precision(TAOS_RES *res) {
if (TD_RES_QUERY(res)) {
SRequestObj *pRequest = (SRequestObj *)res;
return pRequest->body.resInfo.precision;
- } else if (TD_RES_TMQ(res)) {
+ } else if (TD_RES_TMQ(res) || TD_RES_TMQ_METADATA(res)) {
SReqResultInfo *info = tmqGetCurResInfo(res);
return info->precision;
}
@@ -474,7 +489,7 @@ int taos_select_db(TAOS *taos, const char *db) {
}
void taos_stop_query(TAOS_RES *res) {
- if (res == NULL || TD_RES_TMQ(res) || TD_RES_TMQ_META(res)) {
+ if (res == NULL || TD_RES_TMQ(res) || TD_RES_TMQ_META(res) || TD_RES_TMQ_METADATA(res)) {
return;
}
@@ -546,7 +561,7 @@ int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows) {
(*rows) = pResultInfo->row;
(*numOfRows) = pResultInfo->numOfRows;
return pRequest->code;
- } else if (TD_RES_TMQ(res)) {
+ } else if (TD_RES_TMQ(res) || TD_RES_TMQ_METADATA(res)) {
SReqResultInfo *pResultInfo = tmqGetNextResInfo(res, true);
if (pResultInfo == NULL) return -1;
@@ -565,7 +580,7 @@ int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData) {
return 0;
}
- if (TD_RES_TMQ(res)) {
+ if (TD_RES_TMQ(res) || TD_RES_TMQ_METADATA(res)) {
SReqResultInfo *pResultInfo = tmqGetNextResInfo(res, false);
if (pResultInfo == NULL) {
(*numOfRows) = 0;
@@ -656,7 +671,6 @@ typedef struct SqlParseWrapper {
SParseContext *pCtx;
SCatalogReq catalogReq;
SRequestObj *pRequest;
- SQuery *pQuery;
} SqlParseWrapper;
static void destoryTablesReq(void *p) {
@@ -682,10 +696,11 @@ static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) {
void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
SqlParseWrapper *pWrapper = (SqlParseWrapper *)param;
- SQuery *pQuery = pWrapper->pQuery;
SRequestObj *pRequest = pWrapper->pRequest;
+ SQuery *pQuery = pRequest->pQuery;
pRequest->metric.ctgEnd = taosGetTimestampUs();
+ qDebug("0x%" PRIx64 " start to semantic analysis, reqId:0x%" PRIx64, pRequest->self, pRequest->requestId);
if (code == TSDB_CODE_SUCCESS) {
code = qAnalyseSqlSemantic(pWrapper->pCtx, &pWrapper->catalogReq, pResultMeta, pQuery);
@@ -709,13 +724,16 @@ void retrieveMetaCallback(SMetaData *pResultMeta, void *param, int32_t code) {
destorySqlParseWrapper(pWrapper);
- tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, reqId:0x%" PRIx64, pRequest->self,
- pRequest->requestId);
+ double el = (pRequest->metric.semanticEnd - pRequest->metric.ctgEnd)/1000.0;
+ tscDebug("0x%" PRIx64 " analysis semantics completed, start async query, elapsed time:%.2f ms, reqId:0x%" PRIx64,
+ pRequest->self, el, pRequest->requestId);
+
launchAsyncQuery(pRequest, pQuery, pResultMeta);
- qDestroyQuery(pQuery);
} else {
destorySqlParseWrapper(pWrapper);
- qDestroyQuery(pQuery);
+ qDestroyQuery(pRequest->pQuery);
+ pRequest->pQuery = NULL;
+
if (NEED_CLIENT_HANDLE_ERROR(code)) {
tscDebug("0x%" PRIx64 " client retry to handle the error, code:%d - %s, tryCount:%d, reqId:0x%" PRIx64,
pRequest->self, code, tstrerror(code), pRequest->retry, pRequest->requestId);
@@ -762,7 +780,8 @@ int32_t createParseContext(const SRequestObj *pRequest, SParseContext **pCxt) {
.enableSysInfo = pTscObj->sysInfo,
.async = true,
.svrVer = pTscObj->sVer,
- .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes)};
+ .nodeOffline = (pTscObj->pAppInfo->onlineDnodes < pTscObj->pAppInfo->totalDnodes),
+ .allocatorId = pRequest->allocatorRefId};
return TSDB_CODE_SUCCESS;
}
@@ -787,12 +806,10 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
goto _error;
}
- SQuery *pQuery = NULL;
-
pRequest->metric.syntaxStart = taosGetTimestampUs();
SCatalogReq catalogReq = {.forceUpdate = updateMetaForce, .qNodeRequired = qnodeRequired(pRequest)};
- code = qParseSqlSyntax(pCxt, &pQuery, &catalogReq);
+ code = qParseSqlSyntax(pCxt, &pRequest->pQuery, &catalogReq);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -802,9 +819,9 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
if (!updateMetaForce) {
STscObj *pTscObj = pRequest->pTscObj;
SAppClusterSummary *pActivity = &pTscObj->pAppInfo->summary;
- if (NULL == pQuery->pRoot) {
+ if (NULL == pRequest->pQuery->pRoot) {
atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
- } else if (QUERY_NODE_SELECT_STMT == pQuery->pRoot->type) {
+ } else if (QUERY_NODE_SELECT_STMT == pRequest->pQuery->pRoot->type) {
atomic_add_fetch_64((int64_t *)&pActivity->numOfQueryReq, 1);
}
}
@@ -816,7 +833,6 @@ void doAsyncQuery(SRequestObj *pRequest, bool updateMetaForce) {
}
pWrapper->pCtx = pCxt;
- pWrapper->pQuery = pQuery;
pWrapper->pRequest = pRequest;
pWrapper->catalogReq = catalogReq;
@@ -857,11 +873,13 @@ static void fetchCallback(void *pResult, void *param, int32_t code) {
if (code != TSDB_CODE_SUCCESS) {
pRequest->code = code;
+ taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
return;
}
if (pRequest->code != TSDB_CODE_SUCCESS) {
+ taosMemoryFreeClear(pResultInfo->pData);
pRequest->body.fetchFp(pRequest->body.param, pRequest, 0);
return;
}
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index a7a16d484ca10a8baa65419105f42e46dc3814f3..cdf977bea328a0bdf35789f24edb0addce46e087 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -34,6 +34,7 @@ int32_t genericRspCallback(void* param, SDataBuf* pMsg, int32_t code) {
removeMeta(pRequest->pTscObj, pRequest->targetTableList);
}
+ taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@@ -46,6 +47,7 @@ int32_t genericRspCallback(void* param, SDataBuf* pMsg, int32_t code) {
int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
SRequestObj* pRequest = param;
if (code != TSDB_CODE_SUCCESS) {
+ taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
@@ -62,6 +64,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
if (delta > timestampDeltaLimit) {
code = TSDB_CODE_TIME_UNSYNCED;
tscError("time diff:%ds is too big", delta);
+ taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
@@ -70,6 +73,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
/*assert(connectRsp.epSet.numOfEps > 0);*/
if (connectRsp.epSet.numOfEps == 0) {
+ taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
setErrno(pRequest, TSDB_CODE_MND_APP_ERROR);
tsem_post(&pRequest->body.rspSem);
@@ -114,6 +118,7 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
pTscObj->pAppInfo->numOfConns);
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
tsem_post(&pRequest->body.rspSem);
return 0;
}
@@ -137,6 +142,7 @@ int32_t processCreateDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
// todo rsp with the vnode id list
SRequestObj* pRequest = param;
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
if (code != TSDB_CODE_SUCCESS) {
setErrno(pRequest, code);
}
@@ -173,6 +179,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
setErrno(pRequest, code);
if (pRequest->body.queryFp != NULL) {
@@ -220,6 +227,7 @@ int32_t processUseDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
setConnectionDB(pRequest->pTscObj, db);
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, pRequest->code);
@@ -237,7 +245,7 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
setErrno(pRequest, code);
} else {
SMCreateStbRsp createRsp = {0};
- SDecoder coder = {0};
+ SDecoder coder = {0};
tDecoderInit(&coder, pMsg->pData, pMsg->len);
tDecodeSMCreateStbRsp(&coder, &createRsp);
tDecoderClear(&coder);
@@ -246,6 +254,7 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
pRequest->body.resInfo.execRes.res = createRsp.pMeta;
}
+ taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
if (pRequest->body.queryFp != NULL) {
@@ -262,7 +271,7 @@ int32_t processCreateSTableRsp(void* param, SDataBuf* pMsg, int32_t code) {
code = ret;
}
}
-
+
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
} else {
tsem_post(&pRequest->body.rspSem);
@@ -284,6 +293,7 @@ int32_t processDropDbRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
@@ -309,6 +319,7 @@ int32_t processAlterStbRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
if (pRequest->body.queryFp != NULL) {
SExecResult* pRes = &pRequest->body.resInfo.execRes;
@@ -420,6 +431,7 @@ int32_t processShowVariablesRsp(void* param, SDataBuf* pMsg, int32_t code) {
}
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
if (pRequest->body.queryFp != NULL) {
pRequest->body.queryFp(pRequest->body.param, pRequest, code);
diff --git a/source/client/src/taosx.c b/source/client/src/clientRawBlockWrite.c
similarity index 74%
rename from source/client/src/taosx.c
rename to source/client/src/clientRawBlockWrite.c
index 677567e38ffcecefaa72373ac02a976cb2078676..eb7b45cc05d6158368af00d7dd7bc7397d7f20b3 100644
--- a/source/client/src/taosx.c
+++ b/source/client/src/clientRawBlockWrite.c
@@ -30,7 +30,7 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
char* string = NULL;
cJSON* json = cJSON_CreateObject();
if (json == NULL) {
- return string;
+ return NULL;
}
cJSON* type = cJSON_CreateString("create");
cJSON_AddItemToObject(json, "type", type);
@@ -39,10 +39,10 @@ static char* buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sch
// sprintf(uid, "%"PRIi64, id);
// cJSON* id_ = cJSON_CreateString(uid);
// cJSON_AddItemToObject(json, "id", id_);
- cJSON* tableName = cJSON_CreateString(name);
- cJSON_AddItemToObject(json, "tableName", tableName);
cJSON* tableType = cJSON_CreateString(t == TSDB_NORMAL_TABLE ? "normal" : "super");
cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* tableName = cJSON_CreateString(name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
// cJSON* version = cJSON_CreateNumber(1);
// cJSON_AddItemToObject(json, "version", version);
@@ -112,10 +112,10 @@ static char* buildAlterSTableJson(void* alterData, int32_t alterDataLen) {
// cJSON_AddItemToObject(json, "uid", uid);
SName name = {0};
tNameFromString(&name, req.name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
- cJSON* tableName = cJSON_CreateString(name.tname);
- cJSON_AddItemToObject(json, "tableName", tableName);
cJSON* tableType = cJSON_CreateString("super");
cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* tableName = cJSON_CreateString(name.tname);
+ cJSON_AddItemToObject(json, "tableName", tableName);
cJSON* alterType = cJSON_CreateNumber(req.alterType);
cJSON_AddItemToObject(json, "alterType", alterType);
@@ -199,8 +199,6 @@ static char* processCreateStb(SMqMetaRsp* metaRsp) {
goto _err;
}
string = buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE);
- tDecoderClear(&coder);
- return string;
_err:
tDecoderClear(&coder);
@@ -221,32 +219,22 @@ static char* processAlterStb(SMqMetaRsp* metaRsp) {
goto _err;
}
string = buildAlterSTableJson(req.alterOriData, req.alterOriDataLen);
- tDecoderClear(&coder);
- return string;
_err:
tDecoderClear(&coder);
return string;
}
-static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray* tagName, int64_t id, uint8_t tagNum) {
- char* string = NULL;
- SArray* pTagVals = NULL;
- cJSON* json = cJSON_CreateObject();
- if (json == NULL) {
- return string;
- }
- cJSON* type = cJSON_CreateString("create");
- cJSON_AddItemToObject(json, "type", type);
- // char cid[32] = {0};
- // sprintf(cid, "%"PRIi64, id);
- // cJSON* cid_ = cJSON_CreateString(cid);
- // cJSON_AddItemToObject(json, "id", cid_);
+static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq){
+ STag* pTag = (STag*)pCreateReq->ctb.pTag;
+ char* sname = pCreateReq->ctb.name;
+ char* name = pCreateReq->name;
+ SArray* tagName = pCreateReq->ctb.tagName;
+ int64_t id = pCreateReq->uid;
+ uint8_t tagNum = pCreateReq->ctb.tagNum;
cJSON* tableName = cJSON_CreateString(name);
cJSON_AddItemToObject(json, "tableName", tableName);
- cJSON* tableType = cJSON_CreateString("child");
- cJSON_AddItemToObject(json, "tableType", tableType);
cJSON* using = cJSON_CreateString(sname);
cJSON_AddItemToObject(json, "using", using);
cJSON* tagNumJson = cJSON_CreateNumber(tagNum);
@@ -255,6 +243,7 @@ static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray*
// cJSON_AddItemToObject(json, "version", version);
cJSON* tags = cJSON_CreateArray();
+ SArray* pTagVals = NULL;
int32_t code = tTagToValArray(pTag, &pTagVals);
if (code) {
goto end;
@@ -313,11 +302,37 @@ static char* buildCreateCTableJson(STag* pTag, char* sname, char* name, SArray*
cJSON_AddItemToArray(tags, tag);
}
-end:
+ end:
cJSON_AddItemToObject(json, "tags", tags);
+ taosArrayDestroy(pTagVals);
+}
+
+static char* buildCreateCTableJson(SVCreateTbReq* pCreateReq, int32_t nReqs) {
+ char* string = NULL;
+ cJSON* json = cJSON_CreateObject();
+ if (json == NULL) {
+ return NULL;
+ }
+ cJSON* type = cJSON_CreateString("create");
+ cJSON_AddItemToObject(json, "type", type);
+ // char cid[32] = {0};
+ // sprintf(cid, "%"PRIi64, id);
+ // cJSON* cid_ = cJSON_CreateString(cid);
+ // cJSON_AddItemToObject(json, "id", cid_);
+
+ cJSON* tableType = cJSON_CreateString("child");
+ cJSON_AddItemToObject(json, "tableType", tableType);
+
+ buildChildElement(json, pCreateReq);
+ cJSON* createList = cJSON_CreateArray();
+ for(int i = 0; nReqs > 1 && i < nReqs; i++){
+ cJSON* create = cJSON_CreateObject();
+ buildChildElement(create, pCreateReq + i);
+ cJSON_AddItemToArray(createList, create);
+ }
+ cJSON_AddItemToObject(json, "createList", createList);
string = cJSON_PrintUnformatted(json);
cJSON_Delete(json);
- taosArrayDestroy(pTagVals);
return string;
}
@@ -335,21 +350,58 @@ static char* processCreateTable(SMqMetaRsp* metaRsp) {
}
// loop to create table
- for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
- pCreateReq = req.pReqs + iReq;
+ if (req.nReqs > 0) {
+ pCreateReq = req.pReqs;
if (pCreateReq->type == TSDB_CHILD_TABLE) {
- string = buildCreateCTableJson((STag*)pCreateReq->ctb.pTag, pCreateReq->ctb.name, pCreateReq->name,
- pCreateReq->ctb.tagName, pCreateReq->uid, pCreateReq->ctb.tagNum);
+ string = buildCreateCTableJson(req.pReqs, req.nReqs);
} else if (pCreateReq->type == TSDB_NORMAL_TABLE) {
- string =
- buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
+ string = buildCreateTableJson(&pCreateReq->ntb.schemaRow, NULL, pCreateReq->name, pCreateReq->uid, TSDB_NORMAL_TABLE);
}
}
+_exit:
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ taosMemoryFreeClear(pCreateReq->comment);
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ taosArrayDestroy(pCreateReq->ctb.tagName);
+ }
+ }
tDecoderClear(&decoder);
+ return string;
+}
+
+static char* processAutoCreateTable(STaosxRsp* rsp) {
+ ASSERT(rsp->createTableNum != 0);
+
+ SDecoder* decoder = taosMemoryCalloc(rsp->createTableNum, sizeof(SDecoder));
+ SVCreateTbReq* pCreateReq = taosMemoryCalloc(rsp->createTableNum, sizeof(SVCreateTbReq));
+ char* string = NULL;
+
+ // loop to create table
+ for (int32_t iReq = 0; iReq < rsp->createTableNum; iReq++) {
+ // decode
+ void** data = taosArrayGet(rsp->createTableReq, iReq);
+ int32_t *len = taosArrayGet(rsp->createTableLen, iReq);
+ tDecoderInit(&decoder[iReq], *data, *len);
+ if (tDecodeSVCreateTbReq(&decoder[iReq], pCreateReq + iReq) < 0) {
+ goto _exit;
+ }
+
+ ASSERT(pCreateReq[iReq].type == TSDB_CHILD_TABLE);
+ }
+ string = buildCreateCTableJson(pCreateReq, rsp->createTableNum);
_exit:
- tDecoderClear(&decoder);
+ for(int i = 0; i < rsp->createTableNum; i++){
+ tDecoderClear(&decoder[i]);
+ taosMemoryFreeClear(pCreateReq[i].comment);
+ if (pCreateReq[i].type == TSDB_CHILD_TABLE) {
+ taosArrayDestroy(pCreateReq[i].ctb.tagName);
+ }
+ }
+ taosMemoryFree(decoder);
+ taosMemoryFree(pCreateReq);
return string;
}
@@ -374,10 +426,10 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
cJSON_AddItemToObject(json, "type", type);
// cJSON* uid = cJSON_CreateNumber(id);
// cJSON_AddItemToObject(json, "uid", uid);
- cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
- cJSON_AddItemToObject(json, "tableName", tableName);
cJSON* tableType = cJSON_CreateString(vAlterTbReq.action == TSDB_ALTER_TABLE_UPDATE_TAG_VAL ? "child" : "normal");
cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* tableName = cJSON_CreateString(vAlterTbReq.tbName);
+ cJSON_AddItemToObject(json, "tableName", tableName);
cJSON* alterType = cJSON_CreateNumber(vAlterTbReq.action);
cJSON_AddItemToObject(json, "alterType", alterType);
@@ -462,6 +514,7 @@ static char* processAlterTable(SMqMetaRsp* metaRsp) {
string = cJSON_PrintUnformatted(json);
_exit:
+ cJSON_Delete(json);
tDecoderClear(&decoder);
return string;
}
@@ -485,14 +538,15 @@ static char* processDropSTable(SMqMetaRsp* metaRsp) {
}
cJSON* type = cJSON_CreateString("drop");
cJSON_AddItemToObject(json, "type", type);
- cJSON* tableName = cJSON_CreateString(req.name);
- cJSON_AddItemToObject(json, "tableName", tableName);
cJSON* tableType = cJSON_CreateString("super");
cJSON_AddItemToObject(json, "tableType", tableType);
+ cJSON* tableName = cJSON_CreateString(req.name);
+ cJSON_AddItemToObject(json, "tableName", tableName);
string = cJSON_PrintUnformatted(json);
_exit:
+ cJSON_Delete(json);
tDecoderClear(&decoder);
return string;
}
@@ -533,6 +587,7 @@ static char* processDropTable(SMqMetaRsp* metaRsp) {
string = cJSON_PrintUnformatted(json);
_exit:
+ cJSON_Delete(json);
tDecoderClear(&decoder);
return string;
}
@@ -549,6 +604,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
@@ -637,6 +693,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
@@ -717,6 +774,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
@@ -765,6 +823,31 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
}
taosArrayPush(pRequest->tableList, &pName);
+ pCreateReq->flags |= TD_CREATE_IF_NOT_EXISTS;
+ // change tag cid to new cid
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ STableMeta* pTableMeta = NULL;
+ SName sName = {0};
+ toName(pTscObj->acctId, pRequest->pDb, pCreateReq->ctb.name, &sName);
+ code = catalogGetTableMeta(pCatalog, &conn, &sName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("taosCreateTable:catalogGetTableMeta failed. table name: %s", pCreateReq->ctb.name);
+ goto end;
+ }
+
+ for (int32_t i = 0; i < taosArrayGetSize(pCreateReq->ctb.tagName); i++) {
+ char* tName = taosArrayGet(pCreateReq->ctb.tagName, i);
+ for (int32_t j = pTableMeta->tableInfo.numOfColumns;
+ j < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; j++) {
+ SSchema* tag = &pTableMeta->schema[j];
+ if (strcmp(tag->name, tName) == 0 && tag->type != TSDB_DATA_TYPE_JSON) {
+ tTagSetCid((STag*)pCreateReq->ctb.pTag, i, tag->colId);
+ }
+ }
+ }
+ taosMemoryFreeClear(pTableMeta);
+ }
+
SVgroupCreateTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pInfo.vgId, sizeof(pInfo.vgId));
if (pTableBatch == NULL) {
SVgroupCreateTableBatch tBatch = {0};
@@ -805,6 +888,14 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) {
code = pRequest->code;
end:
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ taosMemoryFreeClear(pCreateReq->comment);
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ taosArrayDestroy(pCreateReq->ctb.tagName);
+ }
+ }
+
taosHashCleanup(pVgroupHashmap);
destroyRequest(pRequest);
tDecoderClear(&coder);
@@ -835,7 +926,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) {
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
-
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
@@ -1008,6 +1099,7 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) {
goto end;
}
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
@@ -1127,6 +1219,7 @@ int taos_write_raw_block(TAOS* taos, int rows, char* pData, const char* tbname)
goto end;
}
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
uError("WriteRaw:not use db");
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
@@ -1299,12 +1392,13 @@ end:
return code;
}
-static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
- int32_t code = TSDB_CODE_SUCCESS;
- SHashObj* pVgHash = NULL;
- SQuery* pQuery = NULL;
- SMqRspObj rspObj = {0};
- SDecoder decoder = {0};
+static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SHashObj* pVgHash = NULL;
+ SQuery* pQuery = NULL;
+ SMqRspObj rspObj = {0};
+ SDecoder decoder = {0};
+ STableMeta* pTableMeta = NULL;
terrno = TSDB_CODE_SUCCESS;
SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
@@ -1313,6 +1407,7 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
return terrno;
}
+ pRequest->syncQuery = true;
rspObj.resIter = -1;
rspObj.resType = RES_TYPE__TMQ;
@@ -1361,11 +1456,41 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
goto end;
}
+ const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
+ if (!tbName) {
+ uError("WriteRaw: tbname is null");
+ code = TSDB_CODE_TMQ_INVALID_MSG;
+ goto end;
+ }
+
+ printf("raw data tbname:%s\n", tbName);
+ SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}};
+ strcpy(pName.dbname, pRequest->pDb);
+ strcpy(pName.tname, tbName);
+
+ VgData vgData = {0};
+ code = catalogGetTableHashVgroup(pCatalog, &conn, &pName, &(vgData.vg));
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableHashVgroup failed. table name: %s", tbName);
+ goto end;
+ }
+
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
+ code = TSDB_CODE_SUCCESS;
+ continue;
+ }
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
+ goto end;
+ }
+
uint16_t fLen = 0;
int32_t rowSize = 0;
int16_t nVar = 0;
- for (int i = 0; i < pSW->nCols; i++) {
- SSchema* schema = pSW->pSchema + i;
+ for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
+ SSchema* schema = &pTableMeta->schema[i];
fLen += TYPE_BYTES[schema->type];
rowSize += schema->bytes;
if (IS_VAR_DATA_TYPE(schema->type)) {
@@ -1375,10 +1500,230 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
int32_t rows = rspObj.resInfo.numOfRows;
int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
- (int32_t)TD_BITMAP_BYTES(pSW->nCols - 1);
+ (int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1);
int32_t schemaLen = 0;
int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+ SSubmitReq* subReq = NULL;
+ SSubmitBlk* blk = NULL;
+ void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
+ if (hData) {
+ vgData = *(VgData*)hData;
+
+ int32_t totalLen = ((SSubmitReq*)(vgData.data))->length + submitLen;
+ void* tmp = taosMemoryRealloc(vgData.data, totalLen);
+ if (tmp == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ vgData.data = tmp;
+ ((VgData*)hData)->data = tmp;
+ subReq = (SSubmitReq*)(vgData.data);
+ blk = POINTER_SHIFT(vgData.data, subReq->length);
+ } else {
+ int32_t totalLen = sizeof(SSubmitReq) + submitLen;
+ void* tmp = taosMemoryCalloc(1, totalLen);
+ if (tmp == NULL) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ vgData.data = tmp;
+ taosHashPut(pVgHash, (const char*)&vgData.vg.vgId, sizeof(vgData.vg.vgId), (char*)&vgData, sizeof(vgData));
+ subReq = (SSubmitReq*)(vgData.data);
+ subReq->length = sizeof(SSubmitReq);
+ subReq->numOfBlocks = 0;
+
+ blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
+ }
+
+ // pSW->pSchema should be same as pTableMeta->schema
+ // ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
+ uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
+ uint64_t uid = pTableMeta->uid;
+ int16_t sver = pTableMeta->sversion;
+
+ void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
+ STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
+
+ SRowBuilder rb = {0};
+ tdSRowInit(&rb, sver);
+ tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen);
+ int32_t totalLen = 0;
+
+ SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ for (int i = 0; i < pSW->nCols; i++) {
+ SSchema* schema = &pSW->pSchema[i];
+ taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
+ }
+
+ for (int32_t j = 0; j < rows; j++) {
+ tdSRowResetBuf(&rb, rowData);
+
+ doSetOneRowPtr(&rspObj.resInfo);
+ rspObj.resInfo.current += 1;
+
+ int32_t offset = 0;
+ for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
+ const SSchema* pColumn = &pTableMeta->schema[k];
+ int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
+ if (!index) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ } else {
+ char* colData = rspObj.resInfo.row[*index];
+ if (!colData) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ } else {
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ colData -= VARSTR_HEADER_SIZE;
+ }
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k);
+ }
+ }
+
+ offset += TYPE_BYTES[pColumn->type];
+ }
+ tdSRowEnd(&rb);
+ int32_t rowLen = TD_ROW_LEN(rowData);
+ rowData = POINTER_SHIFT(rowData, rowLen);
+ totalLen += rowLen;
+ }
+
+ taosHashCleanup(schemaHash);
+ blk->uid = htobe64(uid);
+ blk->suid = htobe64(suid);
+ blk->sversion = htonl(sver);
+ blk->schemaLen = htonl(schemaLen);
+ blk->numOfRows = htonl(rows);
+ blk->dataLen = htonl(totalLen);
+ subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen;
+ subReq->numOfBlocks++;
+ taosMemoryFreeClear(pTableMeta);
+ rspObj.resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&rspObj.resInfo);
+ }
+
+ pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
+ if (NULL == pQuery) {
+ uError("create SQuery error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ pQuery->execMode = QUERY_EXEC_MODE_SCHEDULE;
+ pQuery->haveResultSet = false;
+ pQuery->msgType = TDMT_VND_SUBMIT;
+ pQuery->pRoot = (SNode*)nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
+ if (NULL == pQuery->pRoot) {
+ uError("create pQuery->pRoot error");
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto end;
+ }
+ SVnodeModifOpStmt* nodeStmt = (SVnodeModifOpStmt*)(pQuery->pRoot);
+ nodeStmt->payloadType = PAYLOAD_TYPE_KV;
+
+ int32_t numOfVg = taosHashGetSize(pVgHash);
+ nodeStmt->pDataBlocks = taosArrayInit(numOfVg, POINTER_BYTES);
+
+ VgData* vData = (VgData*)taosHashIterate(pVgHash, NULL);
+ while (vData) {
+ SVgDataBlocks* dst = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
+ if (NULL == dst) {
+ code = TSDB_CODE_TSC_OUT_OF_MEMORY;
+ goto end;
+ }
+ dst->vg = vData->vg;
+ SSubmitReq* subReq = (SSubmitReq*)(vData->data);
+ dst->numOfTables = subReq->numOfBlocks;
+ dst->size = subReq->length;
+ dst->pData = (char*)subReq;
+ vData->data = NULL; // no need free
+ subReq->header.vgId = htonl(dst->vg.vgId);
+ subReq->version = htonl(1);
+ subReq->header.contLen = htonl(subReq->length);
+ subReq->length = htonl(subReq->length);
+ subReq->numOfBlocks = htonl(subReq->numOfBlocks);
+ taosArrayPush(nodeStmt->pDataBlocks, &dst);
+ vData = (VgData*)taosHashIterate(pVgHash, vData);
+ }
+
+ launchQueryImpl(pRequest, pQuery, true, NULL);
+ code = pRequest->code;
+
+end:
+ tDeleteSMqDataRsp(&rspObj.rsp);
+ rspObj.resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&rspObj.resInfo);
+ tDecoderClear(&decoder);
+ qDestroyQuery(pQuery);
+ destroyRequest(pRequest);
+ taosHashCleanup(pVgHash);
+ taosMemoryFreeClear(pTableMeta);
+ return code;
+}
+
+static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SHashObj* pVgHash = NULL;
+ SQuery* pQuery = NULL;
+ SMqTaosxRspObj rspObj = {0};
+ SDecoder decoder = {0};
+ STableMeta* pTableMeta = NULL;
+
+ terrno = TSDB_CODE_SUCCESS;
+ SRequestObj* pRequest = (SRequestObj*)createRequest(*(int64_t*)taos, TSDB_SQL_INSERT);
+ if (!pRequest) {
+ uError("WriteRaw:createRequest error request is null");
+ return terrno;
+ }
+
+ pRequest->syncQuery = true;
+ rspObj.resIter = -1;
+ rspObj.resType = RES_TYPE__TMQ_METADATA;
+
+ tDecoderInit(&decoder, data, dataLen);
+ code = tDecodeSTaosxRsp(&decoder, &rspObj.rsp);
+ if (code != 0) {
+ uError("WriteRaw:decode smqDataRsp error");
+ code = TSDB_CODE_INVALID_MSG;
+ goto end;
+ }
+
+ if (!pRequest->pDb) {
+ uError("WriteRaw:not use db");
+ code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
+ goto end;
+ }
+
+ pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK);
+ taosHashSetFreeFp(pVgHash, destroyVgHash);
+ struct SCatalog* pCatalog = NULL;
+ code = catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: get gatlog error");
+ goto end;
+ }
+
+ SRequestConnInfo conn = {0};
+ conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter;
+ conn.requestId = pRequest->requestId;
+ conn.requestObjRefId = pRequest->self;
+ conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp);
+
+ printf("raw data block num:%d\n", rspObj.rsp.blockNum);
+ while (++rspObj.resIter < rspObj.rsp.blockNum) {
+ SRetrieveTableRsp* pRetrieve = (SRetrieveTableRsp*)taosArrayGetP(rspObj.rsp.blockData, rspObj.resIter);
+ if (!rspObj.rsp.withSchema) {
+ uError("WriteRaw:no schema, iter:%d", rspObj.resIter);
+ goto end;
+ }
+ SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.rsp.blockSchema, rspObj.resIter);
+ setResSchemaInfo(&rspObj.resInfo, pSW->pSchema, pSW->nCols);
+
+ code = setQueryResultFromRsp(&rspObj.resInfo, pRetrieve, false, false);
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw: setQueryResultFromRsp error");
+ goto end;
+ }
+
const char* tbName = (const char*)taosArrayGetP(rspObj.rsp.blockTbName, rspObj.resIter);
if (!tbName) {
uError("WriteRaw: tbname is null");
@@ -1398,6 +1743,68 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
goto end;
}
+ // find schema data info
+ int32_t schemaLen = 0;
+ void* schemaData = NULL;
+ for(int j = 0; j < rspObj.rsp.createTableNum; j++){
+ void** dataTmp = taosArrayGet(rspObj.rsp.createTableReq, j);
+ int32_t* lenTmp = taosArrayGet(rspObj.rsp.createTableLen, j);
+
+ SDecoder decoderTmp = {0};
+ SVCreateTbReq pCreateReq = {0};
+
+ tDecoderInit(&decoderTmp, *dataTmp, *lenTmp);
+ if (tDecodeSVCreateTbReq(&decoderTmp, &pCreateReq) < 0) {
+ tDecoderClear(&decoderTmp);
+ taosMemoryFreeClear(pCreateReq.comment);
+ taosArrayDestroy(pCreateReq.ctb.tagName);
+ goto end;
+ }
+
+ ASSERT (pCreateReq.type == TSDB_CHILD_TABLE);
+ if(strcmp(tbName, pCreateReq.name) == 0){
+ schemaLen = *lenTmp;
+ schemaData = *dataTmp;
+ strcpy(pName.tname, pCreateReq.ctb.name);
+ tDecoderClear(&decoderTmp);
+ taosMemoryFreeClear(pCreateReq.comment);
+ taosArrayDestroy(pCreateReq.ctb.tagName);
+ break;
+ }
+ tDecoderClear(&decoderTmp);
+ taosMemoryFreeClear(pCreateReq.comment);
+ taosArrayDestroy(pCreateReq.ctb.tagName);
+ }
+
+ code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ uError("WriteRaw:catalogGetTableMeta table not exist. table name: %s", tbName);
+ code = TSDB_CODE_SUCCESS;
+ continue;
+ }
+ if (code != TSDB_CODE_SUCCESS) {
+ uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
+ goto end;
+ }
+
+ uint16_t fLen = 0;
+ int32_t rowSize = 0;
+ int16_t nVar = 0;
+ for (int i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
+ SSchema* schema = &pTableMeta->schema[i];
+ fLen += TYPE_BYTES[schema->type];
+ rowSize += schema->bytes;
+ if (IS_VAR_DATA_TYPE(schema->type)) {
+ nVar++;
+ }
+ }
+
+ int32_t rows = rspObj.resInfo.numOfRows;
+ int32_t extendedRowSize = rowSize + TD_ROW_HEAD_LEN - sizeof(TSKEY) + nVar * sizeof(VarDataOffsetT) +
+ (int32_t)TD_BITMAP_BYTES(pTableMeta->tableInfo.numOfColumns - 1);
+
+ int32_t submitLen = sizeof(SSubmitBlk) + schemaLen + rows * extendedRowSize;
+
SSubmitReq* subReq = NULL;
SSubmitBlk* blk = NULL;
void* hData = taosHashGet(pVgHash, &vgData.vg.vgId, sizeof(vgData.vg.vgId));
@@ -1430,23 +1837,28 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
blk = POINTER_SHIFT(vgData.data, sizeof(SSubmitReq));
}
- STableMeta* pTableMeta = NULL;
- code = catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta);
- if (code != TSDB_CODE_SUCCESS) {
- uError("WriteRaw:catalogGetTableMeta failed. table name: %s", tbName);
- goto end;
- }
+ // pSW->pSchema should be same as pTableMeta->schema
+ // ASSERT(pSW->nCols == pTableMeta->tableInfo.numOfColumns);
uint64_t suid = (TSDB_NORMAL_TABLE == pTableMeta->tableType ? 0 : pTableMeta->suid);
uint64_t uid = pTableMeta->uid;
- taosMemoryFreeClear(pTableMeta);
+ int16_t sver = pTableMeta->sversion;
void* blkSchema = POINTER_SHIFT(blk, sizeof(SSubmitBlk));
+ if(schemaData){
+ memcpy(blkSchema, schemaData, schemaLen);
+ }
STSRow* rowData = POINTER_SHIFT(blkSchema, schemaLen);
SRowBuilder rb = {0};
- tdSRowInit(&rb, pSW->version);
- tdSRowSetTpInfo(&rb, pSW->nCols, fLen);
- int32_t dataLen = 0;
+ tdSRowInit(&rb, sver);
+ tdSRowSetTpInfo(&rb, pTableMeta->tableInfo.numOfColumns, fLen);
+ int32_t totalLen = 0;
+
+ SHashObj* schemaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ for (int i = 0; i < pSW->nCols; i++) {
+ SSchema* schema = &pSW->pSchema[i];
+ taosHashPut(schemaHash, schema->name, strlen(schema->name), &i, sizeof(int32_t));
+ }
for (int32_t j = 0; j < rows; j++) {
tdSRowResetBuf(&rb, rowData);
@@ -1455,33 +1867,43 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
rspObj.resInfo.current += 1;
int32_t offset = 0;
- for (int32_t k = 0; k < pSW->nCols; k++) {
- const SSchema* pColumn = &pSW->pSchema[k];
- char* data = rspObj.resInfo.row[k];
- if (!data) {
+ for (int32_t k = 0; k < pTableMeta->tableInfo.numOfColumns; k++) {
+ const SSchema* pColumn = &pTableMeta->schema[k];
+ int32_t* index = taosHashGet(schemaHash, pColumn->name, strlen(pColumn->name));
+ if (!index) {
tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
} else {
- if (IS_VAR_DATA_TYPE(pColumn->type)) {
- data -= VARSTR_HEADER_SIZE;
+ char* colData = rspObj.resInfo.row[*index];
+ if (!colData) {
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NULL, NULL, false, offset, k);
+ } else {
+ if (IS_VAR_DATA_TYPE(pColumn->type)) {
+ colData -= VARSTR_HEADER_SIZE;
+ }
+ tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, colData, true, offset, k);
}
- tdAppendColValToRow(&rb, pColumn->colId, pColumn->type, TD_VTYPE_NORM, data, true, offset, k);
}
+
offset += TYPE_BYTES[pColumn->type];
}
tdSRowEnd(&rb);
int32_t rowLen = TD_ROW_LEN(rowData);
rowData = POINTER_SHIFT(rowData, rowLen);
- dataLen += rowLen;
+ totalLen += rowLen;
}
+ taosHashCleanup(schemaHash);
blk->uid = htobe64(uid);
blk->suid = htobe64(suid);
- blk->sversion = htonl(pSW->version);
+ blk->sversion = htonl(sver);
blk->schemaLen = htonl(schemaLen);
blk->numOfRows = htonl(rows);
- blk->dataLen = htonl(dataLen);
- subReq->length += sizeof(SSubmitBlk) + schemaLen + dataLen;
+ blk->dataLen = htonl(totalLen);
+ subReq->length += sizeof(SSubmitBlk) + schemaLen + totalLen;
subReq->numOfBlocks++;
+ taosMemoryFreeClear(pTableMeta);
+ rspObj.resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&rspObj.resInfo);
}
pQuery = (SQuery*)nodesMakeNode(QUERY_NODE_QUERY);
@@ -1530,19 +1952,28 @@ static int32_t tmqWriteRaw(TAOS* taos, void* data, int32_t dataLen) {
launchQueryImpl(pRequest, pQuery, true, NULL);
code = pRequest->code;
-end:
+ end:
+ tDeleteSTaosxRsp(&rspObj.rsp);
+ rspObj.resInfo.pRspMsg = NULL;
+ doFreeReqResultInfo(&rspObj.resInfo);
tDecoderClear(&decoder);
qDestroyQuery(pQuery);
destroyRequest(pRequest);
taosHashCleanup(pVgHash);
+ taosMemoryFreeClear(pTableMeta);
return code;
}
char* tmq_get_json_meta(TAOS_RES* res) {
- if (!TD_RES_TMQ_META(res)) {
+ if (!TD_RES_TMQ_META(res) && !TD_RES_TMQ_METADATA(res)) {
return NULL;
}
+ if(TD_RES_TMQ_METADATA(res)){
+ SMqTaosxRspObj* pMetaDataRspObj = (SMqTaosxRspObj*)res;
+ return processAutoCreateTable(&pMetaDataRspObj->rsp);
+ }
+
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
if (pMetaRspObj->metaRsp.resMsgType == TDMT_VND_CREATE_STB) {
return processCreateStb(&pMetaRspObj->metaRsp);
@@ -1590,6 +2021,25 @@ int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
raw->raw = buf;
raw->raw_len = len;
raw->raw_type = RES_TYPE__TMQ;
+ } else if (TD_RES_TMQ_METADATA(res)) {
+ SMqTaosxRspObj* rspObj = ((SMqTaosxRspObj*)res);
+
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSTaosxRsp, &rspObj->rsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+
+ void* buf = taosMemoryCalloc(1, len);
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, buf, len);
+ tEncodeSTaosxRsp(&encoder, &rspObj->rsp);
+ tEncoderClear(&encoder);
+
+ raw->raw = buf;
+ raw->raw_len = len;
+ raw->raw_type = RES_TYPE__TMQ_METADATA;
} else {
return TSDB_CODE_TMQ_INVALID_MSG;
}
@@ -1597,7 +2047,7 @@ int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) {
}
void tmq_free_raw(tmq_raw_data raw) {
- if (raw.raw_type == RES_TYPE__TMQ) {
+ if (raw.raw_type == RES_TYPE__TMQ || raw.raw_type == RES_TYPE__TMQ_METADATA) {
taosMemoryFree(raw.raw);
}
}
@@ -1622,7 +2072,9 @@ int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) {
} else if (raw.raw_type == TDMT_VND_DELETE) {
return taosDeleteData(taos, raw.raw, raw.raw_len);
} else if (raw.raw_type == RES_TYPE__TMQ) {
- return tmqWriteRaw(taos, raw.raw, raw.raw_len);
+ return tmqWriteRawDataImpl(taos, raw.raw, raw.raw_len);
+ } else if (raw.raw_type == RES_TYPE__TMQ_METADATA) {
+ return tmqWriteRawMetaDataImpl(taos, raw.raw, raw.raw_len);
}
return TSDB_CODE_INVALID_PARA;
}
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index 9f905a835241d54722cf3e15056d1d1019123dcf..195466061d2c19cbd6cedf6fadc08c8f2cc92bdc 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -79,14 +79,17 @@
#define NCHAR_ADD_LEN 3 // L"nchar" 3 means L" "
#define MAX_RETRY_TIMES 5
-#define LINE_BATCH 20000
+#define LINE_BATCH 2000
//=================================================================================================
typedef TSDB_SML_PROTOCOL_TYPE SMLProtocolType;
typedef enum {
SCHEMA_ACTION_NULL,
- SCHEMA_ACTION_COLUMN,
- SCHEMA_ACTION_TAG
+ SCHEMA_ACTION_CREATE_STABLE,
+ SCHEMA_ACTION_ADD_COLUMN,
+ SCHEMA_ACTION_ADD_TAG,
+ SCHEMA_ACTION_CHANGE_COLUMN_SIZE,
+ SCHEMA_ACTION_CHANGE_TAG_SIZE,
} ESchemaAction;
typedef struct {
@@ -148,13 +151,14 @@ typedef struct {
typedef struct {
SRequestObj *request;
tsem_t sem;
+ int32_t cnt;
+ int32_t total;
TdThreadSpinlock lock;
} Params;
typedef struct {
int64_t id;
Params *params;
- bool isLast;
SMLProtocolType protocol;
int8_t precision;
@@ -219,7 +223,7 @@ static int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const
static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSmlKv *kv, bool isTag,
ESchemaAction *action, SSmlHandle *info) {
- uint16_t *index = (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen);
+ uint16_t *index = colHash ? (uint16_t *)taosHashGet(colHash, kv->key, kv->keyLen) : NULL;
if (index) {
if (colField[*index].type != kv->type) {
uError("SML:0x%" PRIx64 " point type and db type mismatch. key: %s. point type: %d, db type: %d", info->id,
@@ -232,16 +236,16 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
(colField[*index].type == TSDB_DATA_TYPE_NCHAR &&
((colField[*index].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE < kv->length))) {
if (isTag) {
- *action = SCHEMA_ACTION_TAG;
+ *action = SCHEMA_ACTION_CHANGE_TAG_SIZE;
} else {
- *action = SCHEMA_ACTION_COLUMN;
+ *action = SCHEMA_ACTION_CHANGE_COLUMN_SIZE;
}
}
} else {
if (isTag) {
- *action = SCHEMA_ACTION_TAG;
+ *action = SCHEMA_ACTION_ADD_TAG;
} else {
- *action = SCHEMA_ACTION_COLUMN;
+ *action = SCHEMA_ACTION_ADD_COLUMN;
}
}
return 0;
@@ -310,9 +314,31 @@ static int32_t getBytes(uint8_t type, int32_t length){
}
}
+static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashObj *schemaHash, SArray *cols, SArray* results, int32_t numOfCols, bool isTag) {
+ for (int j = 0; j < taosArrayGetSize(cols); ++j) {
+ SSmlKv *kv = (SSmlKv *)taosArrayGetP(cols, j);
+ ESchemaAction action = SCHEMA_ACTION_NULL;
+ smlGenerateSchemaAction(schemaField, schemaHash, kv, isTag, &action, info);
+ if(action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_ADD_TAG){
+ SField field = {0};
+ field.type = kv->type;
+ field.bytes = getBytes(kv->type, kv->length);
+ memcpy(field.name, kv->key, kv->keyLen);
+ taosArrayPush(results, &field);
+ }else if(action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE || action == SCHEMA_ACTION_CHANGE_TAG_SIZE){
+ uint16_t *index = (uint16_t *)taosHashGet(schemaHash, kv->key, kv->keyLen);
+ uint16_t newIndex = *index;
+ if(isTag) newIndex -= numOfCols;
+ SField *field = (SField *)taosArrayGet(results, newIndex);
+ field->bytes = getBytes(kv->type, kv->length);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
//static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
// int32_t colVer, int32_t tagVer, int8_t source, uint64_t suid){
-static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *sTableData,
+static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SArray* pColumns, SArray* pTags,
STableMeta *pTableMeta, ESchemaAction action){
SRequestObj* pRequest = NULL;
@@ -320,101 +346,58 @@ static int32_t smlSendMetaMsg(SSmlHandle *info, SName *pName, SSmlSTableMeta *s
int32_t code = TSDB_CODE_SUCCESS;
SCmdMsgInfo pCmdMsg = {0};
+ // put front for free
+ pReq.numOfColumns = taosArrayGetSize(pColumns);
+ pReq.pColumns = pColumns;
+ pReq.numOfTags = taosArrayGetSize(pTags);
+ pReq.pTags = pTags;
+
code = buildRequest(info->taos->id, "", 0, NULL, false, &pRequest);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
+ pRequest->syncQuery = true;
if (!pRequest->pDb) {
code = TSDB_CODE_PAR_DB_NOT_SPECIFIED;
goto end;
}
- if (action == SCHEMA_ACTION_NULL){
+ if (action == SCHEMA_ACTION_CREATE_STABLE){
pReq.colVer = 1;
pReq.tagVer = 1;
pReq.suid = 0;
pReq.source = TD_REQ_FROM_APP;
- } else if (action == SCHEMA_ACTION_TAG){
+ } else if (action == SCHEMA_ACTION_ADD_TAG || action == SCHEMA_ACTION_CHANGE_TAG_SIZE){
pReq.colVer = pTableMeta->sversion;
pReq.tagVer = pTableMeta->tversion + 1;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
- } else if (action == SCHEMA_ACTION_COLUMN){
+ } else if (action == SCHEMA_ACTION_ADD_COLUMN || action == SCHEMA_ACTION_CHANGE_COLUMN_SIZE){
pReq.colVer = pTableMeta->sversion + 1;
pReq.tagVer = pTableMeta->tversion;
pReq.suid = pTableMeta->uid;
pReq.source = TD_REQ_FROM_TAOX;
}
+ if (pReq.numOfTags == 0){
+ pReq.numOfTags = 1;
+ SField field = {0};
+ field.type = TSDB_DATA_TYPE_NCHAR;
+ field.bytes = 1;
+ strcpy(field.name, tsSmlTagName);
+ taosArrayPush(pReq.pTags, &field);
+ }
+
pReq.commentLen = -1;
pReq.igExists = true;
tNameExtractFullName(pName, pReq.name);
- if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_COLUMN){
- pReq.numOfColumns = taosArrayGetSize(sTableData->cols);
- pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField));
- for (int i = 0; i < pReq.numOfColumns; i++) {
- SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->cols, i);
- SField field = {0};
- field.type = kv->type;
- field.bytes = getBytes(kv->type, kv->length);
- memcpy(field.name, kv->key, kv->keyLen);
- taosArrayPush(pReq.pColumns, &field);
- }
- }else if (action == SCHEMA_ACTION_TAG){
- pReq.numOfColumns = pTableMeta->tableInfo.numOfColumns;
- pReq.pColumns = taosArrayInit(pReq.numOfColumns, sizeof(SField));
- for (int i = 0; i < pReq.numOfColumns; i++) {
- SSchema *s = &pTableMeta->schema[i];
- SField field = {0};
- field.type = s->type;
- field.bytes = s->bytes;
- strcpy(field.name, s->name);
- taosArrayPush(pReq.pColumns, &field);
- }
- }
-
- if(action == SCHEMA_ACTION_NULL || action == SCHEMA_ACTION_TAG){
- pReq.numOfTags = taosArrayGetSize(sTableData->tags);
- if (pReq.numOfTags == 0){
- pReq.numOfTags = 1;
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- SField field = {0};
- field.type = TSDB_DATA_TYPE_NCHAR;
- field.bytes = 1;
- strcpy(field.name, tsSmlTagName);
- taosArrayPush(pReq.pTags, &field);
- }else{
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- for (int i = 0; i < pReq.numOfTags; i++) {
- SSmlKv *kv = (SSmlKv *)taosArrayGetP(sTableData->tags, i);
- SField field = {0};
- field.type = kv->type;
- field.bytes = getBytes(kv->type, kv->length);
- memcpy(field.name, kv->key, kv->keyLen);
- taosArrayPush(pReq.pTags, &field);
- }
- }
- }else if (action == SCHEMA_ACTION_COLUMN){
- pReq.numOfTags = pTableMeta->tableInfo.numOfTags;
- pReq.pTags = taosArrayInit(pReq.numOfTags, sizeof(SField));
- for (int i = 0; i < pReq.numOfTags; i++) {
- SSchema *s = &pTableMeta->schema[i + pTableMeta->tableInfo.numOfColumns];
- SField field = {0};
- field.type = s->type;
- field.bytes = s->bytes;
- strcpy(field.name, s->name);
- taosArrayPush(pReq.pTags, &field);
- }
- }
-
pCmdMsg.epSet = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
pCmdMsg.msgType = TDMT_MND_CREATE_STB;
pCmdMsg.msgLen = tSerializeSMCreateStbReq(NULL, 0, &pReq);
pCmdMsg.pMsg = taosMemoryMalloc(pCmdMsg.msgLen);
if (NULL == pCmdMsg.pMsg) {
- tFreeSMCreateStbReq(&pReq);
code = TSDB_CODE_OUT_OF_MEMORY;
goto end;
}
@@ -442,7 +425,10 @@ end:
}
static int32_t smlModifyDBSchemas(SSmlHandle *info) {
- int32_t code = 0;
+ int32_t code = 0;
+ SHashObj *hashTmp = NULL;
+ STableMeta *pTableMeta = NULL;
+
SName pName = {TSDB_TABLE_NAME_T, info->taos->acctId, {0}, {0}};
strcpy(pName.dbname, info->pRequest->pDb);
@@ -455,7 +441,6 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
SSmlSTableMeta **tableMetaSml = (SSmlSTableMeta **)taosHashIterate(info->superTables, NULL);
while (tableMetaSml) {
SSmlSTableMeta *sTableData = *tableMetaSml;
- STableMeta *pTableMeta = NULL;
bool needCheckMeta = false; // for multi thread
size_t superTableLen = 0;
@@ -466,14 +451,19 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) {
- code = smlSendMetaMsg(info, &pName, sTableData, NULL, SCHEMA_ACTION_NULL);
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField));
+ smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true);
+ smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
info->cost.numOfCreateSTables++;
} else if (code == TSDB_CODE_SUCCESS) {
- SHashObj *hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags,
+ hashTmp = taosHashInit(pTableMeta->tableInfo.numOfTags,
taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
for (uint16_t i = pTableMeta->tableInfo.numOfColumns;
i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
@@ -483,36 +473,72 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
ESchemaAction action = SCHEMA_ACTION_NULL;
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->tags, &action, true);
if (code != TSDB_CODE_SUCCESS) {
- taosHashCleanup(hashTmp);
goto end;
}
- if (action == SCHEMA_ACTION_TAG){
- code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action);
+ if (action != SCHEMA_ACTION_NULL){
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
+
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
+ SField field = {0};
+ field.type = pTableMeta->schema[i].type;
+ field.bytes = pTableMeta->schema[i].bytes;
+ strcpy(field.name, pTableMeta->schema[i].name);
+ if(i < pTableMeta->tableInfo.numOfColumns){
+ taosArrayPush(pColumns, &field);
+ }else{
+ taosArrayPush(pTags, &field);
+ }
+ }
+ smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->tags, pTags, pTableMeta->tableInfo.numOfColumns, true);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
}
+ taosMemoryFreeClear(pTableMeta);
code = catalogRefreshTableMeta(info->pCatalog, &conn, &pName, -1);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
+ code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto end;
+ }
taosHashClear(hashTmp);
- for (uint16_t i = 1; i < pTableMeta->tableInfo.numOfColumns; i++) {
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns; i++) {
taosHashPut(hashTmp, pTableMeta->schema[i].name, strlen(pTableMeta->schema[i].name), &i, SHORT_BYTES);
}
action = SCHEMA_ACTION_NULL;
code = smlProcessSchemaAction(info, pTableMeta->schema, hashTmp, sTableData->cols, &action, false);
- taosHashCleanup(hashTmp);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
- if (action == SCHEMA_ACTION_COLUMN){
- code = smlSendMetaMsg(info, &pName, sTableData, pTableMeta, action);
+ if (action != SCHEMA_ACTION_NULL){
+ SArray* pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols) + pTableMeta->tableInfo.numOfColumns, sizeof(SField));
+ SArray* pTags = taosArrayInit(taosArrayGetSize(sTableData->tags) + pTableMeta->tableInfo.numOfTags, sizeof(SField));
+
+ for (uint16_t i = 0; i < pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; i++) {
+ SField field = {0};
+ field.type = pTableMeta->schema[i].type;
+ field.bytes = pTableMeta->schema[i].bytes;
+ strcpy(field.name, pTableMeta->schema[i].name);
+ if(i < pTableMeta->tableInfo.numOfColumns){
+ taosArrayPush(pColumns, &field);
+ }else{
+ taosArrayPush(pTags, &field);
+ }
+ }
+
+ smlBuildFieldsList(info, pTableMeta->schema, hashTmp, sTableData->cols, pColumns, pTableMeta->tableInfo.numOfColumns, false);
+
+ code = smlSendMetaMsg(info, &pName, pColumns, pTags, pTableMeta, action);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, superTable);
+ uError("SML:0x%" PRIx64 " smlSendMetaMsg failed. can not create %s", info->id, pName.tname);
goto end;
}
}
@@ -522,15 +548,17 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
goto end;
}
needCheckMeta = true;
+ taosHashCleanup(hashTmp);
+ hashTmp = NULL;
} else {
uError("SML:0x%" PRIx64 " load table meta error: %s", info->id, tstrerror(code));
goto end;
}
- if (pTableMeta) taosMemoryFree(pTableMeta);
+ taosMemoryFreeClear(pTableMeta);
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " catalogGetSTableMeta failed. super table name %s", info->id, pName.tname);
goto end;
}
@@ -538,12 +566,12 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = smlCheckMeta(&(pTableMeta->schema[pTableMeta->tableInfo.numOfColumns]), pTableMeta->tableInfo.numOfTags,
sTableData->tags, true);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " check tag failed. super table name %s", info->id, pName.tname);
goto end;
}
code = smlCheckMeta(&(pTableMeta->schema[0]), pTableMeta->tableInfo.numOfColumns, sTableData->cols, false);
if (code != TSDB_CODE_SUCCESS) {
- uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, (char *)superTable);
+ uError("SML:0x%" PRIx64 " check cols failed. super table name %s", info->id, pName.tname);
goto end;
}
}
@@ -555,6 +583,8 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
return 0;
end:
+ taosHashCleanup(hashTmp);
+ taosMemoryFreeClear(pTableMeta);
catalogRefreshTableMeta(info->pCatalog, &conn, &pName, 1);
return code;
}
@@ -1502,6 +1532,7 @@ static SSmlHandle* smlBuildSmlInfo(STscObj* pTscObj, SRequestObj* request, SMLPr
info->pRequest = request;
info->msgBuf.buf = info->pRequest->msgBuf;
info->msgBuf.len = ERROR_MSG_BUF_DEFAULT_SIZE;
+ info->pRequest->stmtType = info->pQuery->pRoot->type;
}
info->exec = smlInitHandle(info->pQuery);
@@ -1531,7 +1562,7 @@ cleanup:
/************* TSDB_SML_JSON_PROTOCOL function start **************/
static int32_t smlJsonCreateSring(const char **output, char *input, int32_t inputLen) {
- *output = (const char *)taosMemoryMalloc(inputLen);
+ *output = (const char *)taosMemoryCalloc(1, inputLen);
if (*output == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@@ -2057,10 +2088,6 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
if (info->dataFormat) taosArrayDestroy(cols);
return ret;
}
- if (taosArrayGetSize(cols) > TSDB_MAX_COLUMNS) {
- smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL);
- return TSDB_CODE_PAR_TOO_MANY_COLUMNS;
- }
bool hasTable = true;
SSmlTableInfo *tinfo = NULL;
@@ -2094,6 +2121,11 @@ static int32_t smlParseInfluxLine(SSmlHandle *info, const char *sql) {
return TSDB_CODE_PAR_INVALID_TAGS_NUM;
}
+ if (taosArrayGetSize(cols) + taosArrayGetSize((*oneTable)->tags) > TSDB_MAX_COLUMNS) {
+ smlBuildInvalidDataMsg(&info->msgBuf, "too many columns than 4096", NULL);
+ return TSDB_CODE_PAR_TOO_MANY_COLUMNS;
+ }
+
(*oneTable)->sTableName = elements.measure;
(*oneTable)->sTableNameLen = elements.measureLen;
if (strlen((*oneTable)->childTableName) == 0) {
@@ -2301,6 +2333,9 @@ static int32_t smlInsertData(SSmlHandle *info) {
// info->affectedRows = taos_affected_rows(info->pRequest);
// return info->pRequest->code;
+ SAppClusterSummary *pActivity = &info->taos->pAppInfo->summary;
+ atomic_add_fetch_64((int64_t *)&pActivity->numOfInsertsReq, 1);
+
launchAsyncQuery(info->pRequest, info->pQuery, NULL);
return TSDB_CODE_SUCCESS;
}
@@ -2419,26 +2454,26 @@ static void smlInsertCallback(void *param, void *res, int32_t code) {
int32_t rows = taos_affected_rows(pRequest);
uDebug("SML:0x%" PRIx64 " result. code:%d, msg:%s", info->id, pRequest->code, pRequest->msgBuf);
+ Params *pParam = info->params;
// lock
- taosThreadSpinLock(&info->params->lock);
- info->params->request->body.resInfo.numOfRows += rows;
+ taosThreadSpinLock(&pParam->lock);
+ pParam->cnt++;
if (code != TSDB_CODE_SUCCESS) {
- info->params->request->code = code;
+ pParam->request->code = code;
+ pParam->request->body.resInfo.numOfRows += rows;
+ }else{
+ pParam->request->body.resInfo.numOfRows += info->affectedRows;
}
- taosThreadSpinUnlock(&info->params->lock);
+ if (pParam->cnt == pParam->total) {
+ tsem_post(&pParam->sem);
+ }
+ taosThreadSpinUnlock(&pParam->lock);
// unlock
-
uDebug("SML:0x%" PRIx64 " insert finished, code: %d, rows: %d, total: %d", info->id, code, rows, info->affectedRows);
- Params *pParam = info->params;
- bool isLast = info->isLast;
info->cost.endTime = taosGetTimestampUs();
info->cost.code = code;
smlPrintStatisticInfo(info);
smlDestroyInfo(info);
-
- if (isLast) {
- tsem_post(&pParam->sem);
- }
}
/**
@@ -2480,7 +2515,7 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
pTscObj->schemalessType = 1;
SSmlMsgBuf msg = {ERROR_MSG_BUF_DEFAULT_SIZE, request->msgBuf};
- Params params;
+ Params params = {0};
params.request = request;
tsem_init(¶ms.sem, 0, 0);
taosThreadSpinInit(&(params.lock), 0);
@@ -2525,6 +2560,7 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
}
batchs = ceil(((double)numLines) / LINE_BATCH);
+ params.total = batchs;
for (int i = 0; i < batchs; ++i) {
SRequestObj* req = (SRequestObj*)createRequest(pTscObj->id, TSDB_SQL_INSERT);
if(!req){
@@ -2543,11 +2579,9 @@ TAOS_RES* taos_schemaless_insert(TAOS* taos, char* lines[], int numLines, int pr
if (numLines > perBatch) {
numLines -= perBatch;
- info->isLast = false;
} else {
perBatch = numLines;
numLines = 0;
- info->isLast = true;
}
info->params = ¶ms;
diff --git a/source/client/src/tmq.c b/source/client/src/clientTmq.c
similarity index 91%
rename from source/client/src/tmq.c
rename to source/client/src/clientTmq.c
index 7637ffbc80baa3f4e67b4a4fc27bc57adb8b7d3a..66f992f05f94f1e583118044cea2271ec8fcd7ee 100644
--- a/source/client/src/tmq.c
+++ b/source/client/src/clientTmq.c
@@ -164,6 +164,7 @@ typedef struct {
union {
SMqDataRsp dataRsp;
SMqMetaRsp metaRsp;
+ STaosxRsp taosxRsp;
};
} SMqPollRspWrapper;
@@ -514,6 +515,10 @@ int32_t tmqCommitMsgImpl(tmq_t* tmq, const TAOS_RES* msg, int8_t async, tmq_comm
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)msg;
topic = pMetaRspObj->topic;
vgId = pMetaRspObj->vgId;
+ } else if (TD_RES_TMQ_METADATA(msg)) {
+ SMqTaosxRspObj* pRspObj = (SMqTaosxRspObj*)msg;
+ topic = pRspObj->topic;
+ vgId = pRspObj->vgId;
} else {
return TSDB_CODE_TMQ_INVALID_MSG;
}
@@ -710,7 +715,7 @@ void tmqSendHbReq(void* param, void* tmrId) {
int32_t epoch = tmq->epoch;
SMqHbReq* pReq = taosMemoryMalloc(sizeof(SMqHbReq));
if (pReq == NULL) goto OVER;
- pReq->consumerId = consumerId;
+ pReq->consumerId = htobe64(consumerId);
pReq->epoch = epoch;
SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
@@ -810,8 +815,19 @@ int32_t tmq_subscription(tmq_t* tmq, tmq_list_t** topics) {
}
int32_t tmq_unsubscribe(tmq_t* tmq) {
+ int32_t rsp;
+ int32_t retryCnt = 0;
tmq_list_t* lst = tmq_list_new();
- int32_t rsp = tmq_subscribe(tmq, lst);
+ while (1) {
+ rsp = tmq_subscribe(tmq, lst);
+ if (rsp != TSDB_CODE_MND_CONSUMER_NOT_READY || retryCnt > 5) {
+ break;
+ } else {
+ retryCnt++;
+ taosMsleep(500);
+ }
+ }
+
tmq_list_destroy(lst);
return rsp;
}
@@ -829,7 +845,7 @@ void tmqFreeImpl(void* handle) {
int32_t sz = taosArrayGetSize(tmq->clientTopics);
for (int32_t i = 0; i < sz; i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
- if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema);
+ if (pTopic->schema.nCols) taosMemoryFreeClear(pTopic->schema.pSchema);
int32_t vgSz = taosArrayGetSize(pTopic->vgs);
taosArrayDestroy(pTopic->vgs);
}
@@ -1065,6 +1081,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tsem_destroy(&pParam->rspSem);
taosMemoryFree(pParam);
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
return -1;
}
@@ -1103,6 +1120,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tmqEpoch);
tsem_post(&tmq->rspSem);
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
return 0;
}
@@ -1116,6 +1134,7 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqPollRspWrapper* pRspWrapper = taosAllocateQitem(sizeof(SMqPollRspWrapper), DEF_QITEM);
if (pRspWrapper == NULL) {
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
tscWarn("msg discard from vgId:%d, epoch %d since out of memory", vgId, epoch);
goto CREATE_MSG_FAIL;
}
@@ -1130,17 +1149,29 @@ int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tDecodeSMqDataRsp(&decoder, &pRspWrapper->dataRsp);
tDecoderClear(&decoder);
memcpy(&pRspWrapper->dataRsp, pMsg->pData, sizeof(SMqRspHead));
- } else {
- ASSERT(rspType == TMQ_MSG_TYPE__POLL_META_RSP);
- tDecodeSMqMetaRsp(POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), &pRspWrapper->metaRsp);
+
+ tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
+ tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
+ rspType);
+
+ } else if (rspType == TMQ_MSG_TYPE__POLL_META_RSP) {
+ SDecoder decoder;
+ tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
+ tDecodeSMqMetaRsp(&decoder, &pRspWrapper->metaRsp);
+ tDecoderClear(&decoder);
memcpy(&pRspWrapper->metaRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else if (rspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SDecoder decoder;
+ tDecoderInit(&decoder, POINTER_SHIFT(pMsg->pData, sizeof(SMqRspHead)), pMsg->len - sizeof(SMqRspHead));
+ tDecodeSTaosxRsp(&decoder, &pRspWrapper->taosxRsp);
+ tDecoderClear(&decoder);
+ memcpy(&pRspWrapper->taosxRsp, pMsg->pData, sizeof(SMqRspHead));
+ } else {
+ ASSERT(0);
}
taosMemoryFree(pMsg->pData);
-
- tscDebug("consumer:%" PRId64 ", recv poll: vgId:%d, req offset %" PRId64 ", rsp offset %" PRId64 " type %d",
- tmq->consumerId, pVg->vgId, pRspWrapper->dataRsp.reqOffset.version, pRspWrapper->dataRsp.rspOffset.version,
- rspType);
+ taosMemoryFree(pMsg->pEpSet);
taosWriteQitem(tmq->mqueue, pRspWrapper);
tsem_post(&tmq->rspSem);
@@ -1195,6 +1226,8 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
SMqClientTopic topic = {0};
SMqSubTopicEp* pTopicEp = taosArrayGet(pRsp->topics, i);
topic.schema = pTopicEp->schema;
+ pTopicEp->schema.nCols = 0;
+ pTopicEp->schema.pSchema = NULL;
tstrncpy(topic.topicName, pTopicEp->topic, TSDB_TOPIC_FNAME_LEN);
tstrncpy(topic.db, pTopicEp->db, TSDB_DB_FNAME_LEN);
@@ -1228,7 +1261,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqAskEpRsp* pRsp) {
int32_t sz = taosArrayGetSize(tmq->clientTopics);
for (int32_t i = 0; i < sz; i++) {
SMqClientTopic* pTopic = taosArrayGet(tmq->clientTopics, i);
- if (pTopic->schema.nCols) taosMemoryFree(pTopic->schema.pSchema);
+ if (pTopic->schema.nCols) taosMemoryFreeClear(pTopic->schema.pSchema);
int32_t vgSz = taosArrayGetSize(pTopic->vgs);
taosArrayDestroy(pTopic->vgs);
}
@@ -1440,6 +1473,24 @@ SMqRspObj* tmqBuildRspFromWrapper(SMqPollRspWrapper* pWrapper) {
return pRspObj;
}
+SMqTaosxRspObj* tmqBuildTaosxRspFromWrapper(SMqPollRspWrapper* pWrapper) {
+ SMqTaosxRspObj* pRspObj = taosMemoryCalloc(1, sizeof(SMqTaosxRspObj));
+ pRspObj->resType = RES_TYPE__TMQ_METADATA;
+ tstrncpy(pRspObj->topic, pWrapper->topicHandle->topicName, TSDB_TOPIC_FNAME_LEN);
+ tstrncpy(pRspObj->db, pWrapper->topicHandle->db, TSDB_DB_FNAME_LEN);
+ pRspObj->vgId = pWrapper->vgHandle->vgId;
+ pRspObj->resIter = -1;
+ memcpy(&pRspObj->rsp, &pWrapper->taosxRsp, sizeof(STaosxRsp));
+
+ pRspObj->resInfo.totalRows = 0;
+ pRspObj->resInfo.precision = TSDB_TIME_PRECISION_MILLI;
+ if (!pWrapper->taosxRsp.withSchema) {
+ setResSchemaInfo(&pRspObj->resInfo, pWrapper->topicHandle->schema.pSchema, pWrapper->topicHandle->schema.nCols);
+ }
+
+ return pRspObj;
+}
+
int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
/*tscDebug("call poll");*/
for (int i = 0; i < taosArrayGetSize(tmq->clientTopics); i++) {
@@ -1552,6 +1603,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
return NULL;
} else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__POLL_RSP) {
SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
+ tscDebug("consumer %ld actual process poll rsp", tmq->consumerId);
/*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
if (pollRspWrapper->dataRsp.head.epoch == consumerEpoch) {
@@ -1581,8 +1633,7 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
SMqClientVg* pVg = pollRspWrapper->vgHandle;
/*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
* rspMsg->msg.rspOffset);*/
- pVg->currentOffset.version = pollRspWrapper->metaRsp.rspOffset;
- pVg->currentOffset.type = TMQ_OFFSET__LOG;
+ pVg->currentOffset = pollRspWrapper->metaRsp.rspOffset;
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
// build rsp
SMqMetaRspObj* pRsp = tmqBuildMetaRspFromWrapper(pollRspWrapper);
@@ -1593,6 +1644,36 @@ void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
pollRspWrapper->metaRsp.head.epoch, consumerEpoch);
taosFreeQitem(pollRspWrapper);
}
+ } else if (rspWrapper->tmqRspType == TMQ_MSG_TYPE__TAOSX_RSP) {
+ SMqPollRspWrapper* pollRspWrapper = (SMqPollRspWrapper*)rspWrapper;
+ /*atomic_sub_fetch_32(&tmq->readyRequest, 1);*/
+ int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
+ if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
+ SMqClientVg* pVg = pollRspWrapper->vgHandle;
+ /*printf("vgId:%d, offset %" PRId64 " up to %" PRId64 "\n", pVg->vgId, pVg->currentOffset,
+ * rspMsg->msg.rspOffset);*/
+ pVg->currentOffset = pollRspWrapper->taosxRsp.rspOffset;
+ atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
+ if (pollRspWrapper->taosxRsp.blockNum == 0) {
+ taosFreeQitem(pollRspWrapper);
+ rspWrapper = NULL;
+ continue;
+ }
+
+ // build rsp
+ void* pRsp = NULL;
+ if (pollRspWrapper->taosxRsp.createTableNum == 0) {
+ pRsp = tmqBuildRspFromWrapper(pollRspWrapper);
+ } else {
+ pRsp = tmqBuildTaosxRspFromWrapper(pollRspWrapper);
+ }
+ taosFreeQitem(pollRspWrapper);
+ return pRsp;
+ } else {
+ tscDebug("msg discard since epoch mismatch: msg epoch %d, consumer epoch %d\n",
+ pollRspWrapper->taosxRsp.head.epoch, consumerEpoch);
+ taosFreeQitem(pollRspWrapper);
+ }
} else {
/*printf("handle ep rsp %d\n", rspMsg->head.mqMsgType);*/
bool reset = false;
@@ -1638,7 +1719,10 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
while (1) {
tmqHandleAllDelayedTask(tmq);
- if (tmqPollImpl(tmq, timeout) < 0) return NULL;
+ if (tmqPollImpl(tmq, timeout) < 0) {
+ tscDebug("return since poll err");
+ /*return NULL;*/
+ }
rspObj = tmqHandleAllRsp(tmq, timeout, false);
if (rspObj) {
@@ -1708,6 +1792,8 @@ tmq_res_t tmq_get_res_type(TAOS_RES* res) {
return TMQ_RES_DATA;
}
return TMQ_RES_TABLE_META;
+ } else if (TD_RES_TMQ_METADATA(res)) {
+ return TMQ_RES_METADATA;
} else {
return TMQ_RES_INVALID;
}
@@ -1720,6 +1806,9 @@ const char* tmq_get_topic_name(TAOS_RES* res) {
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
return strchr(pMetaRspObj->topic, '.') + 1;
+ } else if (TD_RES_TMQ_METADATA(res)) {
+ SMqTaosxRspObj* pRspObj = (SMqTaosxRspObj*)res;
+ return strchr(pRspObj->topic, '.') + 1;
} else {
return NULL;
}
@@ -1732,6 +1821,9 @@ const char* tmq_get_db_name(TAOS_RES* res) {
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
return strchr(pMetaRspObj->db, '.') + 1;
+ } else if (TD_RES_TMQ_METADATA(res)) {
+ SMqTaosxRspObj* pRspObj = (SMqTaosxRspObj*)res;
+ return strchr(pRspObj->db, '.') + 1;
} else {
return NULL;
}
@@ -1744,6 +1836,9 @@ int32_t tmq_get_vgroup_id(TAOS_RES* res) {
} else if (TD_RES_TMQ_META(res)) {
SMqMetaRspObj* pMetaRspObj = (SMqMetaRspObj*)res;
return pMetaRspObj->vgId;
+ } else if (TD_RES_TMQ_METADATA(res)) {
+ SMqTaosxRspObj* pRspObj = (SMqTaosxRspObj*)res;
+ return pRspObj->vgId;
} else {
return -1;
}
@@ -1757,6 +1852,13 @@ const char* tmq_get_table_name(TAOS_RES* res) {
return NULL;
}
return (const char*)taosArrayGetP(pRspObj->rsp.blockTbName, pRspObj->resIter);
+ } else if (TD_RES_TMQ_METADATA(res)) {
+ SMqTaosxRspObj* pRspObj = (SMqTaosxRspObj*)res;
+ if (!pRspObj->rsp.withTbName || pRspObj->rsp.blockTbName == NULL || pRspObj->resIter < 0 ||
+ pRspObj->resIter >= pRspObj->rsp.blockNum) {
+ return NULL;
+ }
+ return (const char*)taosArrayGetP(pRspObj->rsp.blockTbName, pRspObj->resIter);
}
return NULL;
}
diff --git a/source/client/test/smlTest.cpp b/source/client/test/smlTest.cpp
index 68a8b9d336ae49e34c3dab28d3fdad6d3f27e9d4..b62238ccf26c991a516313270889a05a5b87d6ee 100644
--- a/source/client/test/smlTest.cpp
+++ b/source/client/test/smlTest.cpp
@@ -692,3 +692,52 @@ TEST(testCase, smlParseTelnetLine_diff_json_type2_Test) {
ASSERT_NE(ret, 0);
smlDestroyInfo(info);
}
+
+TEST(testCase, sml_col_4096_Test) {
+ SSmlHandle *info = smlBuildSmlInfo(NULL, NULL, TSDB_SML_LINE_PROTOCOL, TSDB_SML_TIMESTAMP_NANO_SECONDS);
+ ASSERT_NE(info, nullptr);
+
+ const char *sql[] = {
+ "spgwgvldxv,id=spgwgvldxv_1,t0=f c0=t,c1=t,c2=t,c3=t,c4=t,c5=t,c6=t,c7=t,c8=t,c9=t,c10=t,c11=t,c12=t,c13=t,c14=t,c15=t,c16=t,c17=t,c18=t,c19=t,c20=t,c21=t,c22=t,c23=t,c24=t,c25=t,c26=t,c27=t,c28=t,c29=t,c30=t,c31=t,c32=t,c33=t,c34=t,c35=t,c36=t,c37=t,c38=t,c39=t,c40=t,c41=t,c42=t,c43=t,c44=t,c45=t,c46=t,c47=t,c48=t,c49=t,c50=t,c51=t,c52=t,c53=t,c54=t,c55=t,c56=t,c57=t,c58=t,c59=t,c60=t,c61=t,c62=t,c63=t,c64=t,c65=t,c66=t,c67=t,c68=t,c69=t,c70=t,c71=t,c72=t,c73=t,c74=t,c75=t,c76=t,c77=t,c78=t,c79=t,c80=t,c81=t,c82=t,c83=t,c84=t,c85=t,c86=t,c87=t,c88=t,c89=t,c90=t,c91=t,c92=t,c93=t,c94=t,c95=t,c96=t,c97=t,c98=t,c99=t,c100=t,"
+ "c101=t,c102=t,c103=t,c104=t,c105=t,c106=t,c107=t,c108=t,c109=t,c110=t,c111=t,c112=t,c113=t,c114=t,c115=t,c116=t,c117=t,c118=t,c119=t,c120=t,c121=t,c122=t,c123=t,c124=t,c125=t,c126=t,c127=t,c128=t,c129=t,c130=t,c131=t,c132=t,c133=t,c134=t,c135=t,c136=t,c137=t,c138=t,c139=t,c140=t,c141=t,c142=t,c143=t,c144=t,c145=t,c146=t,c147=t,c148=t,c149=t,c150=t,c151=t,c152=t,c153=t,c154=t,c155=t,c156=t,c157=t,c158=t,c159=t,c160=t,c161=t,c162=t,c163=t,c164=t,c165=t,c166=t,c167=t,c168=t,c169=t,c170=t,c171=t,c172=t,c173=t,c174=t,c175=t,c176=t,c177=t,c178=t,c179=t,c180=t,c181=t,c182=t,c183=t,c184=t,c185=t,c186=t,c187=t,c188=t,c189=t,"
+ "c190=t,c191=t,c192=t,c193=t,c194=t,c195=t,c196=t,c197=t,c198=t,c199=t,c200=t,c201=t,c202=t,c203=t,c204=t,c205=t,c206=t,c207=t,c208=t,c209=t,c210=t,c211=t,c212=t,c213=t,c214=t,c215=t,c216=t,c217=t,c218=t,c219=t,c220=t,c221=t,c222=t,c223=t,c224=t,c225=t,c226=t,c227=t,c228=t,c229=t,c230=t,c231=t,c232=t,c233=t,c234=t,c235=t,c236=t,c237=t,c238=t,c239=t,c240=t,c241=t,c242=t,c243=t,c244=t,c245=t,c246=t,c247=t,c248=t,c249=t,c250=t,c251=t,c252=t,c253=t,c254=t,c255=t,c256=t,c257=t,c258=t,c259=t,c260=t,c261=t,c262=t,c263=t,c264=t,c265=t,c266=t,c267=t,c268=t,c269=t,c270=t,c271=t,c272=t,c273=t,c274=t,c275=t,c276=t,c277=t,c278=t,"
+ "c279=t,c280=t,c281=t,c282=t,c283=t,c284=t,c285=t,c286=t,c287=t,c288=t,c289=t,c290=t,c291=t,c292=t,c293=t,c294=t,c295=t,c296=t,c297=t,c298=t,c299=t,c300=t,c301=t,c302=t,c303=t,c304=t,c305=t,c306=t,c307=t,c308=t,c309=t,c310=t,c311=t,c312=t,c313=t,c314=t,c315=t,c316=t,c317=t,c318=t,c319=t,c320=t,c321=t,c322=t,c323=t,c324=t,c325=t,c326=t,c327=t,c328=t,c329=t,c330=t,c331=t,c332=t,c333=t,c334=t,c335=t,c336=t,c337=t,c338=t,c339=t,c340=t,c341=t,c342=t,c343=t,c344=t,c345=t,c346=t,c347=t,c348=t,c349=t,c350=t,c351=t,c352=t,c353=t,c354=t,c355=t,c356=t,c357=t,c358=t,c359=t,c360=t,c361=t,c362=t,c363=t,c364=t,c365=t,c366=t,c367=t,c368=t,c369=t,c370=t,c371=t,c372=t,c373=t,c374=t,c375=t,c376=t,c377=t,c378=t,c379=t,c380=t,c381=t,c382=t,c383=t,c384=t,c385=t,c386=t,c387=t,c388=t,c389=t,c390=t,c391=t,c392=t,c393=t,c394=t,c395=t,c396=t,c397=t,c398=t,c399=t,c400=t,c401=t,c402=t,c403=t,c404=t,c405=t,c406=t,c407=t,c408=t,c409=t,c410=t,c411=t,c412=t,c413=t,c414=t,c415=t,c416=t,c417=t,c418=t,c419=t,c420=t,c421=t,c422=t,c423=t,c424=t,c425=t,c426=t,c427=t,c428=t,c429=t,c430=t,c431=t,c432=t,c433=t,c434=t,c435=t,c436=t,c437=t,c438=t,c439=t,c440=t,c441=t,c442=t,c443=t,c444=t,c445=t,c446=t,"
+ "c447=t,c448=t,c449=t,c450=t,c451=t,c452=t,c453=t,c454=t,c455=t,c456=t,c457=t,c458=t,c459=t,c460=t,c461=t,c462=t,c463=t,c464=t,c465=t,c466=t,c467=t,c468=t,c469=t,c470=t,c471=t,c472=t,c473=t,c474=t,c475=t,c476=t,c477=t,c478=t,c479=t,c480=t,c481=t,c482=t,c483=t,c484=t,c485=t,c486=t,c487=t,c488=t,c489=t,c490=t,c491=t,c492=t,c493=t,c494=t,c495=t,c496=t,c497=t,c498=t,c499=t,c500=t,c501=t,c502=t,c503=t,c504=t,c505=t,c506=t,c507=t,c508=t,c509=t,c510=t,c511=t,c512=t,c513=t,c514=t,c515=t,c516=t,c517=t,c518=t,c519=t,c520=t,c521=t,c522=t,c523=t,c524=t,c525=t,c526=t,c527=t,c528=t,c529=t,c530=t,c531=t,c532=t,c533=t,c534=t,c535=t,c536=t,c537=t,c538=t,c539=t,c540=t,c541=t,c542=t,c543=t,c544=t,c545=t,c546=t,c547=t,c548=t,c549=t,c550=t,c551=t,c552=t,c553=t,c554=t,c555=t,c556=t,c557=t,c558=t,c559=t,c560=t,c561=t,c562=t,c563=t,c564=t,c565=t,c566=t,c567=t,c568=t,c569=t,c570=t,c571=t,c572=t,c573=t,c574=t,c575=t,c576=t,c577=t,c578=t,c579=t,c580=t,c581=t,c582=t,c583=t,c584=t,c585=t,c586=t,c587=t,c588=t,c589=t,c590=t,c591=t,c592=t,c593=t,c594=t,c595=t,c596=t,c597=t,c598=t,c599=t,c600=t,c601=t,c602=t,c603=t,c604=t,c605=t,c606=t,c607=t,c608=t,c609=t,c610=t,c611=t,c612=t,c613=t,c614=t,"
+ "c615=t,c616=t,c617=t,c618=t,c619=t,c620=t,c621=t,c622=t,c623=t,c624=t,c625=t,c626=t,c627=t,c628=t,c629=t,c630=t,c631=t,c632=t,c633=t,c634=t,c635=t,c636=t,c637=t,c638=t,c639=t,c640=t,c641=t,c642=t,c643=t,c644=t,c645=t,c646=t,c647=t,c648=t,c649=t,c650=t,c651=t,c652=t,c653=t,c654=t,c655=t,c656=t,c657=t,c658=t,c659=t,c660=t,c661=t,c662=t,c663=t,c664=t,c665=t,c666=t,c667=t,c668=t,c669=t,c670=t,c671=t,c672=t,c673=t,c674=t,c675=t,c676=t,c677=t,c678=t,c679=t,c680=t,c681=t,c682=t,c683=t,c684=t,c685=t,c686=t,c687=t,c688=t,c689=t,c690=t,c691=t,c692=t,c693=t,c694=t,c695=t,c696=t,c697=t,c698=t,c699=t,c700=t,c701=t,c702=t,c703=t,c704=t,c705=t,c706=t,c707=t,c708=t,c709=t,c710=t,c711=t,c712=t,c713=t,c714=t,c715=t,c716=t,c717=t,c718=t,c719=t,c720=t,c721=t,c722=t,c723=t,c724=t,c725=t,c726=t,c727=t,c728=t,c729=t,c730=t,c731=t,c732=t,c733=t,c734=t,c735=t,c736=t,c737=t,c738=t,c739=t,c740=t,c741=t,c742=t,c743=t,c744=t,c745=t,c746=t,c747=t,c748=t,c749=t,c750=t,c751=t,c752=t,c753=t,c754=t,c755=t,c756=t,c757=t,c758=t,c759=t,c760=t,c761=t,c762=t,c763=t,c764=t,c765=t,c766=t,c767=t,c768=t,c769=t,c770=t,c771=t,c772=t,c773=t,c774=t,c775=t,c776=t,c777=t,c778=t,c779=t,c780=t,c781=t,c782=t,"
+ "c783=t,c784=t,c785=t,c786=t,c787=t,c788=t,c789=t,c790=t,c791=t,c792=t,c793=t,c794=t,c795=t,c796=t,c797=t,c798=t,c799=t,c800=t,c801=t,c802=t,c803=t,c804=t,c805=t,c806=t,c807=t,c808=t,c809=t,c810=t,c811=t,c812=t,c813=t,"
+ "c814=t,c815=t,c816=t,c817=t,c818=t,c819=t,c820=t,c821=t,c822=t,c823=t,c824=t,c825=t,c826=t,c827=t,c828=t,c829=t,c830=t,c831=t,c832=t,c833=t,c834=t,c835=t,c836=t,c837=t,c838=t,c839=t,c840=t,c841=t,c842=t,c843=t,c844=t,c845=t,c846=t,c847=t,c848=t,c849=t,c850=t,c851=t,c852=t,c853=t,c854=t,c855=t,c856=t,c857=t,c858=t,c859=t,c860=t,c861=t,c862=t,"
+ "c863=t,c864=t,c865=t,c866=t,c867=t,c868=t,c869=t,c870=t,c871=t,c872=t,c873=t,c874=t,c875=t,c876=t,c877=t,c878=t,c879=t,c880=t,c881=t,c882=t,c883=t,c884=t,c885=t,c886=t,c887=t,c888=t,c889=t,c890=t,c891=t,c892=t,c893=t,c894=t,c895=t,c896=t,c897=t,c898=t,c899=t,c900=t,c901=t,c902=t,c903=t,c904=t,c905=t,c906=t,c907=t,c908=t,c909=t,c910=t,c911=t,c912=t,c913=t,c914=t,c915=t,c916=t,c917=t,c918=t,c919=t,c920=t,c921=t,c922=t,c923=t,c924=t,c925=t,c926=t,c927=t,c928=t,c929=t,c930=t,c931=t,c932=t,c933=t,c934=t,c935=t,c936=t,c937=t,c938=t,c939=t,c940=t,c941=t,c942=t,c943=t,c944=t,c945=t,c946=t,c947=t,c948=t,c949=t,c950=t,c951=t,c952=t,c953=t,c954=t,c955=t,c956=t,c957=t,c958=t,c959=t,c960=t,c961=t,c962=t,c963=t,c964=t,c965=t,c966=t,c967=t,c968=t,c969=t,c970=t,c971=t,c972=t,c973=t,c974=t,c975=t,c976=t,c977=t,c978=t,c979=t,c980=t,c981=t,c982=t,c983=t,c984=t,c985=t,c986=t,c987=t,c988=t,c989=t,c990=t,c991=t,c992=t,c993=t,c994=t,c995=t,c996=t,c997=t,c998=t,c999=t,c1000=t,c1001=t,c1002=t,c1003=t,c1004=t,c1005=t,c1006=t,c1007=t,c1008=t,c1009=t,c1010=t,c1011=t,c1012=t,c1013=t,c1014=t,c1015=t,c1016=t,c1017=t,c1018=t,c1019=t,c1020=t,c1021=t,c1022=t,c1023=t,c1024=t,c1025=t,c1026=t,"
+ "c1027=t,c1028=t,c1029=t,c1030=t,c1031=t,c1032=t,c1033=t,c1034=t,c1035=t,c1036=t,c1037=t,c1038=t,c1039=t,c1040=t,c1041=t,c1042=t,c1043=t,c1044=t,c1045=t,c1046=t,c1047=t,c1048=t,c1049=t,c1050=t,c1051=t,c1052=t,c1053=t,c1054=t,c1055=t,c1056=t,c1057=t,c1058=t,c1059=t,c1060=t,c1061=t,c1062=t,c1063=t,c1064=t,c1065=t,c1066=t,c1067=t,c1068=t,c1069=t,c1070=t,c1071=t,c1072=t,c1073=t,c1074=t,c1075=t,c1076=t,c1077=t,c1078=t,c1079=t,c1080=t,c1081=t,c1082=t,c1083=t,c1084=t,c1085=t,c1086=t,c1087=t,c1088=t,c1089=t,c1090=t,c1091=t,c1092=t,c1093=t,c1094=t,c1095=t,c1096=t,c1097=t,c1098=t,c1099=t,c1100=t,c1101=t,c1102=t,c1103=t,c1104=t,c1105=t,c1106=t,c1107=t,c1108=t,c1109=t,c1110=t,c1111=t,c1112=t,c1113=t,c1114=t,c1115=t,c1116=t,c1117=t,c1118=t,c1119=t,c1120=t,c1121=t,c1122=t,c1123=t,c1124=t,c1125=t,c1126=t,c1127=t,c1128=t,c1129=t,c1130=t,c1131=t,c1132=t,c1133=t,c1134=t,c1135=t,c1136=t,c1137=t,c1138=t,c1139=t,c1140=t,c1141=t,c1142=t,c1143=t,c1144=t,c1145=t,c1146=t,c1147=t,c1148=t,c1149=t,c1150=t,c1151=t,c1152=t,c1153=t,c1154=t,c1155=t,c1156=t,c1157=t,c1158=t,c1159=t,c1160=t,c1161=t,c1162=t,c1163=t,c1164=t,c1165=t,c1166=t,c1167=t,c1168=t,c1169=t,c1170=t,c1171=t,c1172=t,c1173=t,"
+ "c1174=t,c1175=t,c1176=t,c1177=t,c1178=t,c1179=t,c1180=t,c1181=t,c1182=t,c1183=t,c1184=t,c1185=t,c1186=t,c1187=t,c1188=t,c1189=t,c1190=t,c1191=t,c1192=t,c1193=t,c1194=t,c1195=t,c1196=t,c1197=t,c1198=t,c1199=t,c1200=t,c1201=t,c1202=t,c1203=t,c1204=t,c1205=t,c1206=t,c1207=t,c1208=t,c1209=t,c1210=t,c1211=t,c1212=t,c1213=t,c1214=t,c1215=t,c1216=t,c1217=t,c1218=t,c1219=t,c1220=t,c1221=t,c1222=t,c1223=t,c1224=t,c1225=t,c1226=t,c1227=t,c1228=t,c1229=t,c1230=t,c1231=t,c1232=t,c1233=t,c1234=t,c1235=t,c1236=t,c1237=t,c1238=t,c1239=t,c1240=t,c1241=t,c1242=t,c1243=t,c1244=t,c1245=t,c1246=t,c1247=t,c1248=t,c1249=t,c1250=t,c1251=t,c1252=t,c1253=t,c1254=t,c1255=t,c1256=t,c1257=t,c1258=t,c1259=t,c1260=t,c1261=t,c1262=t,c1263=t,c1264=t,c1265=t,c1266=t,c1267=t,c1268=t,c1269=t,c1270=t,c1271=t,c1272=t,c1273=t,c1274=t,c1275=t,c1276=t,c1277=t,c1278=t,c1279=t,c1280=t,c1281=t,c1282=t,c1283=t,c1284=t,c1285=t,c1286=t,c1287=t,c1288=t,c1289=t,c1290=t,c1291=t,c1292=t,c1293=t,c1294=t,c1295=t,c1296=t,c1297=t,c1298=t,c1299=t,c1300=t,c1301=t,c1302=t,c1303=t,c1304=t,c1305=t,c1306=t,c1307=t,c1308=t,c1309=t,c1310=t,c1311=t,c1312=t,c1313=t,c1314=t,c1315=t,c1316=t,c1317=t,c1318=t,c1319=t,c1320=t,"
+ "c1321=t,c1322=t,c1323=t,c1324=t,c1325=t,c1326=t,c1327=t,c1328=t,c1329=t,c1330=t,c1331=t,c1332=t,c1333=t,c1334=t,c1335=t,c1336=t,c1337=t,c1338=t,c1339=t,c1340=t,c1341=t,c1342=t,c1343=t,c1344=t,c1345=t,c1346=t,c1347=t,"
+ "c1348=t,c1349=t,c1350=t,c1351=t,c1352=t,c1353=t,c1354=t,c1355=t,c1356=t,c1357=t,c1358=t,c1359=t,c1360=t,c1361=t,c1362=t,c1363=t,c1364=t,c1365=t,c1366=t,c1367=t,c1368=t,c1369=t,c1370=t,c1371=t,c1372=t,c1373=t,c1374=t,c1375=t,c1376=t,c1377=t,c1378=t,c1379=t,c1380=t,c1381=t,c1382=t,c1383=t,c1384=t,c1385=t,c1386=t,c1387=t,c1388=t,c1389=t,c1390=t,c1391=t,c1392=t,c1393=t,c1394=t,c1395=t,c1396=t,c1397=t,c1398=t,c1399=t,c1400=t,c1401=t,c1402=t,c1403=t,c1404=t,c1405=t,c1406=t,c1407=t,c1408=t,c1409=t,c1410=t,c1411=t,c1412=t,c1413=t,c1414=t,c1415=t,c1416=t,c1417=t,c1418=t,c1419=t,c1420=t,c1421=t,c1422=t,c1423=t,c1424=t,c1425=t,c1426=t,c1427=t,c1428=t,c1429=t,c1430=t,c1431=t,c1432=t,c1433=t,c1434=t,c1435=t,c1436=t,c1437=t,c1438=t,c1439=t,c1440=t,c1441=t,c1442=t,c1443=t,c1444=t,c1445=t,c1446=t,c1447=t,c1448=t,c1449=t,c1450=t,c1451=t,c1452=t,c1453=t,c1454=t,c1455=t,c1456=t,c1457=t,c1458=t,c1459=t,c1460=t,c1461=t,c1462=t,c1463=t,c1464=t,c1465=t,c1466=t,c1467=t,c1468=t,c1469=t,c1470=t,c1471=t,c1472=t,c1473=t,c1474=t,c1475=t,c1476=t,c1477=t,c1478=t,c1479=t,c1480=t,c1481=t,c1482=t,c1483=t,c1484=t,c1485=t,c1486=t,c1487=t,c1488=t,c1489=t,c1490=t,c1491=t,c1492=t,c1493=t,c1494=t,"
+ "c1495=t,c1496=t,c1497=t,c1498=t,c1499=t,c1500=t,c1501=t,c1502=t,c1503=t,c1504=t,c1505=t,c1506=t,c1507=t,c1508=t,c1509=t,c1510=t,c1511=t,c1512=t,c1513=t,c1514=t,c1515=t,c1516=t,c1517=t,c1518=t,c1519=t,c1520=t,c1521=t,c1522=t,c1523=t,c1524=t,c1525=t,c1526=t,c1527=t,c1528=t,c1529=t,c1530=t,c1531=t,c1532=t,c1533=t,c1534=t,c1535=t,c1536=t,c1537=t,c1538=t,c1539=t,c1540=t,c1541=t,c1542=t,c1543=t,c1544=t,c1545=t,c1546=t,c1547=t,c1548=t,c1549=t,c1550=t,c1551=t,c1552=t,c1553=t,c1554=t,c1555=t,c1556=t,c1557=t,c1558=t,c1559=t,c1560=t,c1561=t,c1562=t,c1563=t,c1564=t,c1565=t,c1566=t,c1567=t,c1568=t,c1569=t,c1570=t,c1571=t,c1572=t,c1573=t,c1574=t,c1575=t,c1576=t,c1577=t,c1578=t,c1579=t,c1580=t,c1581=t,c1582=t,c1583=t,c1584=t,c1585=t,c1586=t,c1587=t,c1588=t,c1589=t,c1590=t,c1591=t,c1592=t,c1593=t,c1594=t,c1595=t,c1596=t,c1597=t,c1598=t,c1599=t,c1600=t,c1601=t,c1602=t,c1603=t,c1604=t,c1605=t,c1606=t,c1607=t,c1608=t,c1609=t,c1610=t,c1611=t,c1612=t,c1613=t,c1614=t,c1615=t,c1616=t,c1617=t,c1618=t,c1619=t,c1620=t,c1621=t,c1622=t,c1623=t,c1624=t,c1625=t,c1626=t,c1627=t,c1628=t,c1629=t,c1630=t,c1631=t,c1632=t,c1633=t,c1634=t,c1635=t,c1636=t,c1637=t,c1638=t,c1639=t,c1640=t,c1641=t,"
+ "c1642=t,c1643=t,c1644=t,c1645=t,c1646=t,c1647=t,c1648=t,c1649=t,c1650=t,c1651=t,c1652=t,c1653=t,c1654=t,c1655=t,c1656=t,c1657=t,c1658=t,c1659=t,c1660=t,c1661=t,c1662=t,c1663=t,c1664=t,c1665=t,c1666=t,c1667=t,c1668=t,c1669=t,c1670=t,c1671=t,c1672=t,c1673=t,c1674=t,c1675=t,c1676=t,c1677=t,c1678=t,c1679=t,c1680=t,c1681=t,c1682=t,c1683=t,c1684=t,c1685=t,c1686=t,c1687=t,c1688=t,c1689=t,c1690=t,c1691=t,c1692=t,c1693=t,c1694=t,c1695=t,c1696=t,c1697=t,c1698=t,c1699=t,c1700=t,c1701=t,c1702=t,c1703=t,c1704=t,c1705=t,c1706=t,c1707=t,c1708=t,c1709=t,c1710=t,c1711=t,c1712=t,c1713=t,c1714=t,c1715=t,c1716=t,c1717=t,c1718=t,c1719=t,c1720=t,c1721=t,c1722=t,c1723=t,c1724=t,c1725=t,c1726=t,c1727=t,c1728=t,c1729=t,c1730=t,c1731=t,c1732=t,c1733=t,c1734=t,c1735=t,c1736=t,c1737=t,c1738=t,c1739=t,c1740=t,c1741=t,c1742=t,c1743=t,c1744=t,c1745=t,c1746=t,c1747=t,c1748=t,c1749=t,c1750=t,c1751=t,c1752=t,c1753=t,c1754=t,c1755=t,c1756=t,c1757=t,c1758=t,c1759=t,c1760=t,c1761=t,c1762=t,c1763=t,c1764=t,c1765=t,c1766=t,c1767=t,c1768=t,c1769=t,c1770=t,c1771=t,c1772=t,c1773=t,c1774=t,c1775=t,c1776=t,c1777=t,c1778=t,c1779=t,c1780=t,c1781=t,c1782=t,c1783=t,c1784=t,c1785=t,c1786=t,c1787=t,c1788=t,"
+ "c1789=t,c1790=t,c1791=t,c1792=t,c1793=t,c1794=t,c1795=t,c1796=t,c1797=t,c1798=t,c1799=t,c1800=t,c1801=t,c1802=t,c1803=t,c1804=t,c1805=t,c1806=t,c1807=t,c1808=t,c1809=t,c1810=t,c1811=t,c1812=t,c1813=t,c1814=t,c1815=t,"
+ "c1816=t,c1817=t,c1818=t,c1819=t,c1820=t,c1821=t,c1822=t,c1823=t,c1824=t,c1825=t,c1826=t,c1827=t,c1828=t,c1829=t,c1830=t,c1831=t,c1832=t,c1833=t,c1834=t,c1835=t,c1836=t,c1837=t,c1838=t,c1839=t,c1840=t,c1841=t,c1842=t,c1843=t,c1844=t,c1845=t,c1846=t,c1847=t,c1848=t,c1849=t,c1850=t,c1851=t,c1852=t,c1853=t,c1854=t,c1855=t,c1856=t,c1857=t,c1858=t,c1859=t,c1860=t,c1861=t,c1862=t,c1863=t,c1864=t,c1865=t,c1866=t,c1867=t,c1868=t,c1869=t,c1870=t,c1871=t,c1872=t,c1873=t,c1874=t,c1875=t,c1876=t,c1877=t,c1878=t,c1879=t,c1880=t,c1881=t,c1882=t,c1883=t,c1884=t,c1885=t,c1886=t,c1887=t,c1888=t,c1889=t,c1890=t,c1891=t,c1892=t,c1893=t,c1894=t,c1895=t,c1896=t,c1897=t,c1898=t,c1899=t,c1900=t,c1901=t,c1902=t,c1903=t,c1904=t,c1905=t,c1906=t,c1907=t,c1908=t,c1909=t,c1910=t,c1911=t,c1912=t,c1913=t,c1914=t,c1915=t,c1916=t,c1917=t,c1918=t,c1919=t,c1920=t,c1921=t,c1922=t,c1923=t,c1924=t,c1925=t,c1926=t,c1927=t,c1928=t,c1929=t,c1930=t,c1931=t,c1932=t,c1933=t,c1934=t,c1935=t,c1936=t,c1937=t,c1938=t,c1939=t,c1940=t,c1941=t,c1942=t,c1943=t,c1944=t,c1945=t,c1946=t,c1947=t,c1948=t,c1949=t,c1950=t,c1951=t,c1952=t,c1953=t,c1954=t,c1955=t,c1956=t,c1957=t,c1958=t,c1959=t,c1960=t,c1961=t,c1962=t,"
+ "c1963=t,c1964=t,c1965=t,c1966=t,c1967=t,c1968=t,c1969=t,c1970=t,c1971=t,c1972=t,c1973=t,c1974=t,c1975=t,c1976=t,c1977=t,c1978=t,c1979=t,c1980=t,c1981=t,c1982=t,c1983=t,c1984=t,c1985=t,c1986=t,c1987=t,c1988=t,c1989=t,c1990=t,c1991=t,c1992=t,c1993=t,c1994=t,c1995=t,c1996=t,c1997=t,c1998=t,c1999=t,c2000=t,c2001=t,c2002=t,c2003=t,c2004=t,c2005=t,c2006=t,c2007=t,c2008=t,c2009=t,c2010=t,c2011=t,c2012=t,c2013=t,c2014=t,c2015=t,c2016=t,c2017=t,c2018=t,c2019=t,c2020=t,c2021=t,c2022=t,c2023=t,c2024=t,c2025=t,c2026=t,c2027=t,c2028=t,c2029=t,c2030=t,c2031=t,c2032=t,c2033=t,c2034=t,c2035=t,c2036=t,c2037=t,c2038=t,c2039=t,c2040=t,c2041=t,c2042=t,c2043=t,c2044=t,c2045=t,c2046=t,c2047=t,c2048=t,c2049=t,c2050=t,c2051=t,c2052=t,c2053=t,c2054=t,c2055=t,c2056=t,c2057=t,c2058=t,c2059=t,c2060=t,c2061=t,c2062=t,c2063=t,c2064=t,c2065=t,c2066=t,c2067=t,c2068=t,c2069=t,c2070=t,c2071=t,c2072=t,c2073=t,c2074=t,c2075=t,c2076=t,c2077=t,c2078=t,c2079=t,c2080=t,c2081=t,c2082=t,c2083=t,c2084=t,c2085=t,c2086=t,c2087=t,c2088=t,c2089=t,c2090=t,c2091=t,c2092=t,c2093=t,c2094=t,c2095=t,c2096=t,c2097=t,c2098=t,c2099=t,c2100=t,c2101=t,c2102=t,c2103=t,c2104=t,c2105=t,c2106=t,c2107=t,c2108=t,c2109=t,"
+ "c2110=t,c2111=t,c2112=t,c2113=t,c2114=t,c2115=t,c2116=t,c2117=t,c2118=t,c2119=t,c2120=t,c2121=t,c2122=t,c2123=t,c2124=t,c2125=t,c2126=t,c2127=t,c2128=t,c2129=t,c2130=t,c2131=t,c2132=t,c2133=t,c2134=t,c2135=t,c2136=t,c2137=t,c2138=t,c2139=t,c2140=t,c2141=t,c2142=t,c2143=t,c2144=t,c2145=t,c2146=t,c2147=t,c2148=t,c2149=t,c2150=t,c2151=t,c2152=t,c2153=t,c2154=t,c2155=t,c2156=t,c2157=t,c2158=t,c2159=t,c2160=t,c2161=t,c2162=t,c2163=t,c2164=t,c2165=t,c2166=t,c2167=t,c2168=t,c2169=t,c2170=t,c2171=t,c2172=t,c2173=t,c2174=t,c2175=t,c2176=t,c2177=t,c2178=t,c2179=t,c2180=t,c2181=t,c2182=t,c2183=t,c2184=t,c2185=t,c2186=t,c2187=t,c2188=t,c2189=t,c2190=t,c2191=t,c2192=t,c2193=t,c2194=t,c2195=t,c2196=t,c2197=t,c2198=t,c2199=t,c2200=t,c2201=t,c2202=t,c2203=t,c2204=t,c2205=t,c2206=t,c2207=t,c2208=t,c2209=t,c2210=t,c2211=t,c2212=t,c2213=t,c2214=t,c2215=t,c2216=t,c2217=t,c2218=t,c2219=t,c2220=t,c2221=t,c2222=t,c2223=t,c2224=t,c2225=t,c2226=t,c2227=t,c2228=t,c2229=t,c2230=t,c2231=t,c2232=t,c2233=t,c2234=t,c2235=t,c2236=t,c2237=t,c2238=t,c2239=t,c2240=t,c2241=t,c2242=t,c2243=t,c2244=t,c2245=t,c2246=t,c2247=t,c2248=t,c2249=t,c2250=t,c2251=t,c2252=t,c2253=t,c2254=t,c2255=t,c2256=t,"
+ "c2257=t,c2258=t,c2259=t,c2260=t,c2261=t,c2262=t,c2263=t,c2264=t,c2265=t,c2266=t,c2267=t,c2268=t,c2269=t,c2270=t,c2271=t,c2272=t,c2273=t,c2274=t,c2275=t,c2276=t,c2277=t,c2278=t,c2279=t,c2280=t,c2281=t,c2282=t,c2283=t,"
+ "c2284=t,c2285=t,c2286=t,c2287=t,c2288=t,c2289=t,c2290=t,c2291=t,c2292=t,c2293=t,c2294=t,c2295=t,c2296=t,c2297=t,c2298=t,c2299=t,c2300=t,c2301=t,c2302=t,c2303=t,c2304=t,c2305=t,c2306=t,c2307=t,c2308=t,c2309=t,c2310=t,c2311=t,c2312=t,c2313=t,c2314=t,c2315=t,c2316=t,c2317=t,c2318=t,c2319=t,c2320=t,c2321=t,c2322=t,c2323=t,c2324=t,c2325=t,c2326=t,c2327=t,c2328=t,c2329=t,c2330=t,c2331=t,c2332=t,c2333=t,c2334=t,c2335=t,c2336=t,c2337=t,c2338=t,c2339=t,c2340=t,c2341=t,c2342=t,c2343=t,c2344=t,c2345=t,c2346=t,c2347=t,c2348=t,c2349=t,c2350=t,c2351=t,c2352=t,c2353=t,c2354=t,c2355=t,c2356=t,c2357=t,c2358=t,c2359=t,c2360=t,c2361=t,c2362=t,c2363=t,c2364=t,c2365=t,c2366=t,c2367=t,c2368=t,c2369=t,c2370=t,c2371=t,c2372=t,c2373=t,c2374=t,c2375=t,c2376=t,c2377=t,c2378=t,c2379=t,c2380=t,c2381=t,c2382=t,c2383=t,c2384=t,c2385=t,c2386=t,c2387=t,c2388=t,c2389=t,c2390=t,c2391=t,c2392=t,c2393=t,c2394=t,c2395=t,c2396=t,c2397=t,c2398=t,c2399=t,c2400=t,c2401=t,c2402=t,c2403=t,c2404=t,c2405=t,c2406=t,c2407=t,c2408=t,c2409=t,c2410=t,c2411=t,c2412=t,c2413=t,c2414=t,c2415=t,c2416=t,c2417=t,c2418=t,c2419=t,c2420=t,c2421=t,c2422=t,c2423=t,c2424=t,c2425=t,c2426=t,c2427=t,c2428=t,c2429=t,c2430=t,"
+ "c2431=t,c2432=t,c2433=t,c2434=t,c2435=t,c2436=t,c2437=t,c2438=t,c2439=t,c2440=t,c2441=t,c2442=t,c2443=t,c2444=t,c2445=t,c2446=t,c2447=t,c2448=t,c2449=t,c2450=t,c2451=t,c2452=t,c2453=t,c2454=t,c2455=t,c2456=t,c2457=t,c2458=t,c2459=t,c2460=t,c2461=t,c2462=t,c2463=t,c2464=t,c2465=t,c2466=t,c2467=t,c2468=t,c2469=t,c2470=t,c2471=t,c2472=t,c2473=t,c2474=t,c2475=t,c2476=t,c2477=t,c2478=t,c2479=t,c2480=t,c2481=t,c2482=t,c2483=t,c2484=t,c2485=t,c2486=t,c2487=t,c2488=t,c2489=t,c2490=t,c2491=t,c2492=t,c2493=t,c2494=t,c2495=t,c2496=t,c2497=t,c2498=t,c2499=t,c2500=t,c2501=t,c2502=t,c2503=t,c2504=t,c2505=t,c2506=t,c2507=t,c2508=t,c2509=t,c2510=t,c2511=t,c2512=t,c2513=t,c2514=t,c2515=t,c2516=t,c2517=t,c2518=t,c2519=t,c2520=t,c2521=t,c2522=t,c2523=t,c2524=t,c2525=t,c2526=t,c2527=t,c2528=t,c2529=t,c2530=t,c2531=t,c2532=t,c2533=t,c2534=t,c2535=t,c2536=t,c2537=t,c2538=t,c2539=t,c2540=t,c2541=t,c2542=t,c2543=t,c2544=t,c2545=t,c2546=t,c2547=t,c2548=t,c2549=t,c2550=t,c2551=t,c2552=t,c2553=t,c2554=t,c2555=t,c2556=t,c2557=t,c2558=t,c2559=t,c2560=t,c2561=t,c2562=t,c2563=t,c2564=t,c2565=t,c2566=t,c2567=t,c2568=t,c2569=t,c2570=t,c2571=t,c2572=t,c2573=t,c2574=t,c2575=t,c2576=t,c2577=t,"
+ "c2578=t,c2579=t,c2580=t,c2581=t,c2582=t,c2583=t,c2584=t,c2585=t,c2586=t,c2587=t,c2588=t,c2589=t,c2590=t,c2591=t,c2592=t,c2593=t,c2594=t,c2595=t,c2596=t,c2597=t,c2598=t,c2599=t,c2600=t,c2601=t,c2602=t,c2603=t,c2604=t,c2605=t,c2606=t,c2607=t,c2608=t,c2609=t,c2610=t,c2611=t,c2612=t,c2613=t,c2614=t,c2615=t,c2616=t,c2617=t,c2618=t,c2619=t,c2620=t,c2621=t,c2622=t,c2623=t,c2624=t,c2625=t,c2626=t,c2627=t,c2628=t,c2629=t,c2630=t,c2631=t,c2632=t,c2633=t,c2634=t,c2635=t,c2636=t,c2637=t,c2638=t,c2639=t,c2640=t,c2641=t,c2642=t,c2643=t,c2644=t,c2645=t,c2646=t,c2647=t,c2648=t,c2649=t,c2650=t,c2651=t,c2652=t,c2653=t,c2654=t,c2655=t,c2656=t,c2657=t,c2658=t,c2659=t,c2660=t,c2661=t,c2662=t,c2663=t,c2664=t,c2665=t,c2666=t,c2667=t,c2668=t,c2669=t,c2670=t,c2671=t,c2672=t,c2673=t,c2674=t,c2675=t,c2676=t,c2677=t,c2678=t,c2679=t,c2680=t,c2681=t,c2682=t,c2683=t,c2684=t,c2685=t,c2686=t,c2687=t,c2688=t,c2689=t,c2690=t,c2691=t,c2692=t,c2693=t,c2694=t,c2695=t,c2696=t,c2697=t,c2698=t,c2699=t,c2700=t,c2701=t,c2702=t,c2703=t,c2704=t,c2705=t,c2706=t,c2707=t,c2708=t,c2709=t,c2710=t,c2711=t,c2712=t,c2713=t,c2714=t,c2715=t,c2716=t,c2717=t,c2718=t,c2719=t,c2720=t,c2721=t,c2722=t,c2723=t,c2724=t,"
+ "c2725=t,c2726=t,c2727=t,c2728=t,c2729=t,c2730=t,c2731=t,c2732=t,c2733=t,c2734=t,c2735=t,c2736=t,c2737=t,c2738=t,c2739=t,c2740=t,c2741=t,c2742=t,c2743=t,c2744=t,c2745=t,c2746=t,c2747=t,c2748=t,c2749=t,c2750=t,c2751=t,c2752=t,c2753=t,c2754=t,c2755=t,c2756=t,c2757=t,c2758=t,c2759=t,c2760=t,c2761=t,c2762=t,c2763=t,c2764=t,c2765=t,c2766=t,c2767=t,c2768=t,c2769=t,c2770=t,c2771=t,c2772=t,c2773=t,c2774=t,c2775=t,c2776=t,c2777=t,c2778=t,c2779=t,c2780=t,c2781=t,c2782=t,c2783=t,c2784=t,c2785=t,c2786=t,c2787=t,c2788=t,c2789=t,c2790=t,c2791=t,c2792=t,c2793=t,c2794=t,c2795=t,c2796=t,c2797=t,c2798=t,c2799=t,c2800=t,c2801=t,c2802=t,c2803=t,c2804=t,c2805=t,c2806=t,c2807=t,c2808=t,c2809=t,c2810=t,c2811=t,c2812=t,c2813=t,c2814=t,c2815=t,c2816=t,c2817=t,c2818=t,c2819=t,c2820=t,c2821=t,c2822=t,c2823=t,c2824=t,c2825=t,c2826=t,c2827=t,c2828=t,c2829=t,c2830=t,c2831=t,c2832=t,c2833=t,c2834=t,c2835=t,c2836=t,c2837=t,c2838=t,c2839=t,c2840=t,c2841=t,c2842=t,c2843=t,c2844=t,c2845=t,c2846=t,c2847=t,c2848=t,c2849=t,c2850=t,c2851=t,c2852=t,c2853=t,c2854=t,c2855=t,c2856=t,c2857=t,c2858=t,c2859=t,c2860=t,c2861=t,c2862=t,c2863=t,c2864=t,c2865=t,c2866=t,c2867=t,c2868=t,c2869=t,c2870=t,c2871=t,"
+ "c2872=t,c2873=t,c2874=t,c2875=t,c2876=t,c2877=t,c2878=t,c2879=t,c2880=t,c2881=t,c2882=t,c2883=t,c2884=t,c2885=t,c2886=t,c2887=t,c2888=t,c2889=t,c2890=t,c2891=t,c2892=t,c2893=t,c2894=t,c2895=t,c2896=t,c2897=t,c2898=t,c2899=t,c2900=t,c2901=t,c2902=t,c2903=t,c2904=t,c2905=t,c2906=t,c2907=t,c2908=t,c2909=t,c2910=t,c2911=t,c2912=t,c2913=t,c2914=t,c2915=t,c2916=t,c2917=t,c2918=t,c2919=t,c2920=t,c2921=t,c2922=t,c2923=t,c2924=t,c2925=t,c2926=t,c2927=t,c2928=t,c2929=t,c2930=t,c2931=t,c2932=t,c2933=t,c2934=t,c2935=t,c2936=t,c2937=t,c2938=t,c2939=t,c2940=t,c2941=t,c2942=t,c2943=t,c2944=t,c2945=t,c2946=t,c2947=t,c2948=t,c2949=t,c2950=t,c2951=t,c2952=t,c2953=t,c2954=t,c2955=t,c2956=t,c2957=t,c2958=t,c2959=t,c2960=t,c2961=t,c2962=t,c2963=t,c2964=t,c2965=t,c2966=t,c2967=t,c2968=t,c2969=t,c2970=t,c2971=t,c2972=t,c2973=t,c2974=t,c2975=t,c2976=t,c2977=t,c2978=t,c2979=t,c2980=t,c2981=t,c2982=t,c2983=t,c2984=t,c2985=t,c2986=t,c2987=t,c2988=t,c2989=t,c2990=t,c2991=t,c2992=t,c2993=t,c2994=t,c2995=t,c2996=t,c2997=t,c2998=t,c2999=t,c3000=t,c3001=t,c3002=t,c3003=t,c3004=t,c3005=t,c3006=t,c3007=t,c3008=t,c3009=t,c3010=t,c3011=t,c3012=t,c3013=t,c3014=t,c3015=t,c3016=t,c3017=t,c3018=t,"
+ "c3019=t,c3020=t,c3021=t,c3022=t,c3023=t,c3024=t,c3025=t,c3026=t,c3027=t,c3028=t,c3029=t,c3030=t,c3031=t,c3032=t,c3033=t,c3034=t,c3035=t,c3036=t,c3037=t,c3038=t,c3039=t,c3040=t,c3041=t,c3042=t,c3043=t,c3044=t,c3045=t,c3046=t,c3047=t,c3048=t,c3049=t,c3050=t,c3051=t,c3052=t,c3053=t,c3054=t,c3055=t,c3056=t,c3057=t,c3058=t,c3059=t,c3060=t,c3061=t,c3062=t,c3063=t,c3064=t,c3065=t,c3066=t,c3067=t,c3068=t,c3069=t,c3070=t,c3071=t,c3072=t,c3073=t,c3074=t,c3075=t,c3076=t,c3077=t,c3078=t,c3079=t,c3080=t,c3081=t,c3082=t,c3083=t,c3084=t,c3085=t,c3086=t,c3087=t,c3088=t,c3089=t,c3090=t,c3091=t,c3092=t,c3093=t,c3094=t,c3095=t,c3096=t,c3097=t,c3098=t,c3099=t,c3100=t,c3101=t,c3102=t,c3103=t,c3104=t,c3105=t,c3106=t,c3107=t,c3108=t,c3109=t,c3110=t,c3111=t,c3112=t,c3113=t,c3114=t,c3115=t,c3116=t,c3117=t,c3118=t,c3119=t,c3120=t,c3121=t,c3122=t,c3123=t,c3124=t,c3125=t,c3126=t,c3127=t,c3128=t,c3129=t,c3130=t,c3131=t,c3132=t,c3133=t,c3134=t,c3135=t,c3136=t,c3137=t,c3138=t,c3139=t,c3140=t,c3141=t,c3142=t,c3143=t,c3144=t,c3145=t,c3146=t,c3147=t,c3148=t,c3149=t,c3150=t,c3151=t,c3152=t,c3153=t,c3154=t,c3155=t,c3156=t,c3157=t,c3158=t,c3159=t,c3160=t,c3161=t,c3162=t,c3163=t,c3164=t,c3165=t,"
+ "c3166=t,c3167=t,c3168=t,c3169=t,c3170=t,c3171=t,c3172=t,c3173=t,c3174=t,c3175=t,c3176=t,c3177=t,c3178=t,c3179=t,c3180=t,c3181=t,c3182=t,c3183=t,c3184=t,c3185=t,c3186=t,c3187=t,c3188=t,c3189=t,c3190=t,c3191=t,c3192=t,c3193=t,c3194=t,c3195=t,c3196=t,c3197=t,c3198=t,c3199=t,c3200=t,c3201=t,c3202=t,c3203=t,c3204=t,c3205=t,c3206=t,c3207=t,c3208=t,c3209=t,c3210=t,c3211=t,c3212=t,c3213=t,c3214=t,c3215=t,c3216=t,c3217=t,c3218=t,c3219=t,c3220=t,c3221=t,c3222=t,c3223=t,c3224=t,c3225=t,c3226=t,c3227=t,c3228=t,c3229=t,c3230=t,c3231=t,c3232=t,c3233=t,c3234=t,c3235=t,c3236=t,c3237=t,c3238=t,c3239=t,c3240=t,c3241=t,c3242=t,c3243=t,c3244=t,c3245=t,c3246=t,c3247=t,c3248=t,c3249=t,c3250=t,c3251=t,c3252=t,c3253=t,c3254=t,c3255=t,c3256=t,c3257=t,c3258=t,c3259=t,c3260=t,c3261=t,c3262=t,c3263=t,c3264=t,c3265=t,c3266=t,c3267=t,c3268=t,c3269=t,c3270=t,c3271=t,c3272=t,c3273=t,c3274=t,c3275=t,c3276=t,c3277=t,c3278=t,c3279=t,c3280=t,c3281=t,c3282=t,c3283=t,c3284=t,c3285=t,c3286=t,c3287=t,c3288=t,c3289=t,c3290=t,c3291=t,c3292=t,c3293=t,c3294=t,c3295=t,c3296=t,c3297=t,c3298=t,c3299=t,c3300=t,c3301=t,c3302=t,c3303=t,c3304=t,c3305=t,c3306=t,c3307=t,c3308=t,c3309=t,c3310=t,c3311=t,c3312=t,"
+ "c3313=t,c3314=t,c3315=t,c3316=t,c3317=t,c3318=t,c3319=t,c3320=t,c3321=t,c3322=t,c3323=t,c3324=t,c3325=t,c3326=t,c3327=t,c3328=t,c3329=t,c3330=t,c3331=t,c3332=t,c3333=t,c3334=t,c3335=t,c3336=t,c3337=t,c3338=t,c3339=t,c3340=t,c3341=t,c3342=t,c3343=t,c3344=t,c3345=t,c3346=t,c3347=t,c3348=t,c3349=t,c3350=t,c3351=t,c3352=t,c3353=t,c3354=t,c3355=t,c3356=t,c3357=t,c3358=t,c3359=t,c3360=t,c3361=t,c3362=t,c3363=t,c3364=t,c3365=t,c3366=t,c3367=t,c3368=t,c3369=t,c3370=t,c3371=t,c3372=t,c3373=t,c3374=t,c3375=t,c3376=t,c3377=t,c3378=t,c3379=t,c3380=t,c3381=t,c3382=t,c3383=t,c3384=t,c3385=t,c3386=t,c3387=t,c3388=t,c3389=t,c3390=t,c3391=t,c3392=t,c3393=t,c3394=t,c3395=t,c3396=t,c3397=t,c3398=t,c3399=t,c3400=t,c3401=t,c3402=t,c3403=t,c3404=t,c3405=t,c3406=t,c3407=t,c3408=t,c3409=t,c3410=t,c3411=t,c3412=t,c3413=t,c3414=t,c3415=t,c3416=t,c3417=t,c3418=t,c3419=t,c3420=t,c3421=t,c3422=t,c3423=t,c3424=t,c3425=t,c3426=t,c3427=t,c3428=t,c3429=t,c3430=t,c3431=t,c3432=t,c3433=t,c3434=t,c3435=t,c3436=t,c3437=t,c3438=t,c3439=t,c3440=t,c3441=t,c3442=t,c3443=t,c3444=t,c3445=t,c3446=t,c3447=t,c3448=t,c3449=t,c3450=t,c3451=t,c3452=t,c3453=t,c3454=t,c3455=t,c3456=t,c3457=t,c3458=t,c3459=t,"
+ "c3460=t,c3461=t,c3462=t,c3463=t,c3464=t,c3465=t,c3466=t,c3467=t,c3468=t,c3469=t,c3470=t,c3471=t,c3472=t,c3473=t,c3474=t,c3475=t,c3476=t,c3477=t,c3478=t,c3479=t,c3480=t,c3481=t,c3482=t,c3483=t,c3484=t,c3485=t,c3486=t,c3487=t,c3488=t,c3489=t,c3490=t,c3491=t,c3492=t,c3493=t,c3494=t,c3495=t,c3496=t,c3497=t,c3498=t,c3499=t,c3500=t,c3501=t,c3502=t,c3503=t,c3504=t,c3505=t,c3506=t,c3507=t,c3508=t,c3509=t,c3510=t,c3511=t,c3512=t,c3513=t,"
+ "c3514=t,c3515=t,c3516=t,c3517=t,c3518=t,c3519=t,c3520=t,c3521=t,c3522=t,c3523=t,c3524=t,c3525=t,c3526=t,c3527=t,c3528=t,c3529=t,c3530=t,c3531=t,c3532=t,c3533=t,c3534=t,c3535=t,c3536=t,c3537=t,c3538=t,c3539=t,c3540=t,c3541=t,c3542=t,c3543=t,c3544=t,c3545=t,c3546=t,c3547=t,c3548=t,c3549=t,c3550=t,c3551=t,c3552=t,c3553=t,c3554=t,c3555=t,c3556=t,c3557=t,c3558=t,c3559=t,c3560=t,c3561=t,c3562=t,c3563=t,c3564=t,c3565=t,c3566=t,c3567=t,c3568=t,c3569=t,c3570=t,c3571=t,c3572=t,c3573=t,c3574=t,c3575=t,c3576=t,c3577=t,c3578=t,c3579=t,c3580=t,c3581=t,c3582=t,c3583=t,c3584=t,c3585=t,c3586=t,c3587=t,c3588=t,c3589=t,c3590=t,c3591=t,c3592=t,c3593=t,c3594=t,c3595=t,c3596=t,c3597=t,c3598=t,c3599=t,c3600=t,c3601=t,c3602=t,c3603=t,c3604=t,c3605=t,c3606=t,c3607=t,c3608=t,c3609=t,c3610=t,c3611=t,c3612=t,c3613=t,c3614=t,c3615=t,c3616=t,c3617=t,c3618=t,c3619=t,c3620=t,c3621=t,c3622=t,c3623=t,c3624=t,c3625=t,c3626=t,c3627=t,c3628=t,c3629=t,c3630=t,c3631=t,c3632=t,c3633=t,c3634=t,c3635=t,c3636=t,c3637=t,c3638=t,c3639=t,c3640=t,c3641=t,c3642=t,c3643=t,c3644=t,c3645=t,c3646=t,c3647=t,c3648=t,c3649=t,c3650=t,c3651=t,c3652=t,c3653=t,c3654=t,c3655=t,c3656=t,c3657=t,c3658=t,c3659=t,c3660=t,"
+ "c3661=t,c3662=t,c3663=t,c3664=t,c3665=t,c3666=t,c3667=t,c3668=t,c3669=t,c3670=t,c3671=t,c3672=t,c3673=t,c3674=t,c3675=t,c3676=t,c3677=t,c3678=t,c3679=t,c3680=t,c3681=t,c3682=t,c3683=t,c3684=t,c3685=t,c3686=t,c3687=t,c3688=t,c3689=t,c3690=t,c3691=t,c3692=t,c3693=t,c3694=t,c3695=t,c3696=t,c3697=t,c3698=t,c3699=t,c3700=t,c3701=t,c3702=t,c3703=t,c3704=t,c3705=t,c3706=t,c3707=t,c3708=t,c3709=t,c3710=t,c3711=t,c3712=t,c3713=t,c3714=t,c3715=t,c3716=t,c3717=t,c3718=t,c3719=t,c3720=t,c3721=t,c3722=t,c3723=t,c3724=t,c3725=t,c3726=t,c3727=t,c3728=t,c3729=t,c3730=t,c3731=t,c3732=t,c3733=t,c3734=t,c3735=t,c3736=t,c3737=t,c3738=t,c3739=t,c3740=t,c3741=t,c3742=t,c3743=t,c3744=t,c3745=t,c3746=t,c3747=t,c3748=t,c3749=t,c3750=t,c3751=t,c3752=t,c3753=t,c3754=t,c3755=t,c3756=t,c3757=t,c3758=t,c3759=t,c3760=t,c3761=t,c3762=t,c3763=t,c3764=t,c3765=t,c3766=t,c3767=t,c3768=t,c3769=t,c3770=t,c3771=t,c3772=t,c3773=t,c3774=t,c3775=t,c3776=t,c3777=t,c3778=t,c3779=t,c3780=t,c3781=t,c3782=t,c3783=t,c3784=t,c3785=t,c3786=t,c3787=t,c3788=t,c3789=t,c3790=t,c3791=t,c3792=t,c3793=t,c3794=t,c3795=t,c3796=t,c3797=t,c3798=t,c3799=t,c3800=t,c3801=t,c3802=t,c3803=t,c3804=t,c3805=t,c3806=t,c3807=t,"
+ "c3808=t,c3809=t,c3810=t,c3811=t,c3812=t,c3813=t,c3814=t,c3815=t,c3816=t,c3817=t,c3818=t,c3819=t,c3820=t,c3821=t,c3822=t,c3823=t,c3824=t,c3825=t,c3826=t,c3827=t,c3828=t,c3829=t,c3830=t,c3831=t,c3832=t,c3833=t,c3834=t,c3835=t,c3836=t,c3837=t,c3838=t,c3839=t,c3840=t,c3841=t,c3842=t,c3843=t,c3844=t,c3845=t,c3846=t,c3847=t,c3848=t,c3849=t,c3850=t,c3851=t,c3852=t,c3853=t,c3854=t,c3855=t,c3856=t,c3857=t,c3858=t,c3859=t,c3860=t,c3861=t,c3862=t,c3863=t,c3864=t,c3865=t,c3866=t,c3867=t,c3868=t,c3869=t,c3870=t,c3871=t,c3872=t,c3873=t,c3874=t,c3875=t,c3876=t,c3877=t,c3878=t,c3879=t,c3880=t,c3881=t,c3882=t,c3883=t,c3884=t,c3885=t,c3886=t,c3887=t,c3888=t,c3889=t,c3890=t,c3891=t,c3892=t,c3893=t,c3894=t,c3895=t,c3896=t,c3897=t,c3898=t,c3899=t,c3900=t,c3901=t,c3902=t,c3903=t,c3904=t,c3905=t,c3906=t,c3907=t,c3908=t,c3909=t,c3910=t,c3911=t,c3912=t,c3913=t,c3914=t,c3915=t,c3916=t,c3917=t,c3918=t,c3919=t,c3920=t,c3921=t,c3922=t,c3923=t,c3924=t,c3925=t,c3926=t,c3927=t,c3928=t,c3929=t,c3930=t,c3931=t,c3932=t,c3933=t,c3934=t,c3935=t,c3936=t,c3937=t,c3938=t,c3939=t,c3940=t,c3941=t,c3942=t,c3943=t,c3944=t,c3945=t,c3946=t,c3947=t,c3948=t,c3949=t,c3950=t,c3951=t,c3952=t,c3953=t,c3954=t,"
+ "c3955=t,c3956=t,c3957=t,c3958=t,c3959=t,c3960=t,c3961=t,c3962=t,c3963=t,c3964=t,c3965=t,c3966=t,c3967=t,c3968=t,c3969=t,c3970=t,c3971=t,c3972=t,c3973=t,c3974=t,c3975=t,c3976=t,c3977=t,c3978=t,c3979=t,c3980=t,c3981=t,c3982=t,c3983=t,c3984=t,c3985=t,c3986=t,c3987=t,c3988=t,c3989=t,c3990=t,c3991=t,c3992=t,c3993=t,c3994=t,c3995=t,c3996=t,c3997=t,c3998=t,c3999=t,c4000=t,c4001=t,c4002=t,c4003=t,c4004=t,c4005=t,c4006=t,c4007=t,c4008=t,c4009=t,c4010=t,c4011=t,c4012=t,c4013=t,c4014=t,c4015=t,c4016=t,c4017=t,c4018=t,c4019=t,c4020=t,c4021=t,c4022=t,c4023=t,c4024=t,c4025=t,c4026=t,c4027=t,c4028=t,c4029=t,c4030=t,c4031=t,c4032=t,c4033=t,c4034=t,c4035=t,c4036=t,c4037=t,c4038=t,c4039=t,c4040=t,c4041=t,c4042=t,c4043=t,c4044=t,c4045=t,c4046=t,c4047=t,c4048=t,c4049=t,c4050=t,c4051=t,c4052=t,c4053=t,c4054=t,c4055=t,c4056=t,c4057=t,c4058=t,c4059=t,c4060=t,c4061=t,c4062=t,c4063=t,c4064=t,c4065=t,c4066=t,c4067=t,c4068=t,c4069=t,c4070=t,c4071=t,c4072=t,c4073=t,c4074=t,c4075=t,c4076=t,c4077=t,c4078=t,c4079=t,c4080=t,c4081=t,c4082=t,c4083=t,c4084=t,c4085=t,c4086=t,c4087=t,c4088=t,c4089=t,c4090=t,c4091=t,c4092=t,c4093=t 1626006833640000000"
+ };
+
+ int ret = TSDB_CODE_SUCCESS;
+ for(int i = 0; i < sizeof(sql)/sizeof(sql[0]); i++){
+ ret = smlParseInfluxLine(info, sql[i]);
+ if(ret != TSDB_CODE_SUCCESS) break;
+ }
+ ASSERT_NE(ret, 0);
+ smlDestroyInfo(info);
+}
diff --git a/source/common/src/systable.c b/source/common/src/systable.c
index 9ca896c9ee624740b0602f35544219425db74779..a45f7b2913938ae9884940f45631d0f7c49ed1e8 100644
--- a/source/common/src/systable.c
+++ b/source/common/src/systable.c
@@ -90,7 +90,7 @@ static const SSysDbTableSchema userDBSchema[] = {
{.name = "minrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "maxrows", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "comp", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
- {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "precision", .bytes = 2 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "retentions", .bytes = 60 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "single_stable", .bytes = 1, .type = TSDB_DATA_TYPE_BOOL, .sysInfo = true},
@@ -102,6 +102,10 @@ static const SSysDbTableSchema userDBSchema[] = {
{.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
{.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = true},
+ {.name = "stt_trigger", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "table_prefix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true},
+ {.name = "tsdb_pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
};
static const SSysDbTableSchema userFuncSchema[] = {
@@ -206,6 +210,7 @@ static const SSysDbTableSchema vgroupsSchema[] = {
{.name = "v3_dnode", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "v3_status", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "status", .bytes = 12 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "cacheload", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "nfiles", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "file_size", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
{.name = "tsma", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
@@ -222,8 +227,9 @@ static const SSysDbTableSchema transSchema[] = {
{.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "stage", .bytes = TSDB_TRANS_STAGE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
- {.name = "db1", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
- {.name = "db2", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "oper", .bytes = TSDB_TRANS_OPER_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
+ {.name = "stable", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "failed_times", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
{.name = "last_exec_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
{.name = "last_action_info", .bytes = (TSDB_TRANS_ERROR_LEN - 1) + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
@@ -240,6 +246,31 @@ static const SSysDbTableSchema variablesSchema[] = {
{.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
};
+static const SSysDbTableSchema topicSchema[] = {
+ {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
+ {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ // TODO config
+};
+
+
+static const SSysDbTableSchema subscriptionSchema[] = {
+ {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
+ {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
+};
+
+static const SSysDbTableSchema vnodesSchema[] = {
+ {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true},
+ {.name = "status", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+ {.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true},
+ {.name = "dnode_ep", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
+};
+
static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_DNODES, dnodesSchema, tListLen(dnodesSchema), true},
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema), true},
@@ -260,6 +291,10 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true},
{TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true},
{TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true},
+ {TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
+ {TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
+ {TSDB_INS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
+ {TSDB_INS_TABLE_VNODES, vnodesSchema, tListLen(vnodesSchema), true},
};
static const SSysDbTableSchema connectionsSchema[] = {
@@ -272,13 +307,6 @@ static const SSysDbTableSchema connectionsSchema[] = {
{.name = "last_access", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
-static const SSysDbTableSchema topicSchema[] = {
- {.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "db_name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
- {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- // TODO config
-};
static const SSysDbTableSchema consumerSchema[] = {
{.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
@@ -292,13 +320,6 @@ static const SSysDbTableSchema consumerSchema[] = {
{.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false},
};
-static const SSysDbTableSchema subscriptionSchema[] = {
- {.name = "topic_name", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "consumer_group", .bytes = TSDB_CGROUP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
- {.name = "vgroup_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false},
- {.name = "consumer_id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false},
-};
-
static const SSysDbTableSchema offsetSchema[] = {
{.name = "topic_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
{.name = "group_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY, .sysInfo = false},
@@ -345,13 +366,10 @@ static const SSysDbTableSchema appSchema[] = {
static const SSysTableMeta perfsMeta[] = {
{TSDB_PERFS_TABLE_CONNECTIONS, connectionsSchema, tListLen(connectionsSchema), false},
{TSDB_PERFS_TABLE_QUERIES, querySchema, tListLen(querySchema), false},
- {TSDB_PERFS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
{TSDB_PERFS_TABLE_CONSUMERS, consumerSchema, tListLen(consumerSchema), false},
- {TSDB_PERFS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},
// {TSDB_PERFS_TABLE_OFFSETS, offsetSchema, tListLen(offsetSchema)},
{TSDB_PERFS_TABLE_TRANS, transSchema, tListLen(transSchema), false},
// {TSDB_PERFS_TABLE_SMAS, smaSchema, tListLen(smaSchema), false},
- {TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema), false},
{TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema), false}};
// clang-format on
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index c65e966046912edb6f8c0ca77db3f55d24710785..23cc8686587594d8c101a6912923af6a3c20df3f 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -140,7 +140,8 @@ int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
return TSDB_CODE_SUCCESS;
}
-static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData, int32_t itemLen, int32_t numOfRows) {
+static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t currentRow, const char* pData,
+ int32_t itemLen, int32_t numOfRows) {
ASSERT(pColumnInfoData->info.bytes >= itemLen);
size_t start = 1;
@@ -148,21 +149,23 @@ static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t curren
memcpy(pColumnInfoData->pData, pData, itemLen);
int32_t t = 0;
- int32_t count = log(numOfRows)/log(2);
- while(t < count) {
+ int32_t count = log(numOfRows) / log(2);
+ while (t < count) {
int32_t xlen = 1 << t;
- memcpy(pColumnInfoData->pData + start * itemLen + pColumnInfoData->varmeta.length, pColumnInfoData->pData, xlen * itemLen);
+ memcpy(pColumnInfoData->pData + start * itemLen + pColumnInfoData->varmeta.length, pColumnInfoData->pData,
+ xlen * itemLen);
t += 1;
start += xlen;
}
// the tail part
if (numOfRows > start) {
- memcpy(pColumnInfoData->pData + start * itemLen + currentRow * itemLen, pColumnInfoData->pData, (numOfRows - start) * itemLen);
+ memcpy(pColumnInfoData->pData + start * itemLen + currentRow * itemLen, pColumnInfoData->pData,
+ (numOfRows - start) * itemLen);
}
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
- for(int32_t i = 0; i < numOfRows; ++i) {
+ for (int32_t i = 0; i < numOfRows; ++i) {
pColumnInfoData->varmeta.offset[i + currentRow] = pColumnInfoData->varmeta.length + i * itemLen;
}
@@ -170,7 +173,8 @@ static void doCopyNItems(struct SColumnInfoData* pColumnInfoData, int32_t curren
}
}
-int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData, uint32_t numOfRows) {
+int32_t colDataAppendNItems(SColumnInfoData* pColumnInfoData, uint32_t currentRow, const char* pData,
+ uint32_t numOfRows) {
ASSERT(pData != NULL && pColumnInfoData != NULL);
int32_t len = pColumnInfoData->info.bytes;
@@ -278,7 +282,7 @@ int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int
} else {
if (finalNumOfRows > *capacity || (numOfRow1 == 0 && pColumnInfoData->info.bytes != 0)) {
// all data may be null, when the pColumnInfoData->info.type == 0, bytes == 0;
-// ASSERT(finalNumOfRows * pColumnInfoData->info.bytes);
+ // ASSERT(finalNumOfRows * pColumnInfoData->info.bytes);
char* tmp = taosMemoryRealloc(pColumnInfoData->pData, finalNumOfRows * pColumnInfoData->info.bytes);
if (tmp == NULL) {
return TSDB_CODE_VND_OUT_OF_MEMORY;
@@ -557,7 +561,7 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
}
int32_t blockDataFromBuf(SSDataBlock* pBlock, const char* buf) {
- int32_t numOfRows = *(int32_t*) buf;
+ int32_t numOfRows = *(int32_t*)buf;
blockDataEnsureCapacity(pBlock, numOfRows);
pBlock->info.rows = numOfRows;
@@ -676,7 +680,8 @@ size_t blockDataGetRowSize(SSDataBlock* pBlock) {
* @return
*/
size_t blockDataGetSerialMetaSize(uint32_t numOfCols) {
- // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column length |
+ // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column
+ // length |
return sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(int32_t) + sizeof(uint64_t) +
numOfCols * (sizeof(int8_t) + sizeof(int32_t)) + numOfCols * sizeof(int32_t);
}
@@ -1228,6 +1233,7 @@ void blockDataFreeRes(SSDataBlock* pBlock) {
}
taosArrayDestroy(pBlock->pDataBlock);
+ pBlock->pDataBlock = NULL;
taosMemoryFreeClear(pBlock->pBlockAgg);
memset(&pBlock->info, 0, sizeof(SDataBlockInfo));
}
@@ -1272,7 +1278,9 @@ int32_t assignOneDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
colDataAssign(pDst, pSrc, src->info.rows, &src->info);
}
+ uint32_t cap = dst->info.capacity;
dst->info = src->info;
+ dst->info.capacity = cap;
return 0;
}
@@ -1296,11 +1304,48 @@ int32_t copyDataBlock(SSDataBlock* dst, const SSDataBlock* src) {
colDataAssign(pDst, pSrc, src->info.rows, &src->info);
}
-
+ uint32_t cap = dst->info.capacity;
dst->info = src->info;
+ dst->info.capacity = cap;
return TSDB_CODE_SUCCESS;
}
+SSDataBlock* createSpecialDataBlock(EStreamType type) {
+ SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
+ pBlock->info.hasVarCol = false;
+ pBlock->info.groupId = 0;
+ pBlock->info.rows = 0;
+ pBlock->info.type = type;
+ pBlock->info.rowSize =
+ sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
+ pBlock->info.watermark = INT64_MIN;
+
+ pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
+ SColumnInfoData infoData = {0};
+ infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
+ infoData.info.bytes = sizeof(TSKEY);
+ // window start ts
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+ // window end ts
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+
+ infoData.info.type = TSDB_DATA_TYPE_UBIGINT;
+ infoData.info.bytes = sizeof(uint64_t);
+ // uid
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+ // group id
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+
+ infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
+ infoData.info.bytes = sizeof(TSKEY);
+ // calculate start ts
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+ // calculate end ts
+ taosArrayPush(pBlock->pDataBlock, &infoData);
+
+ return pBlock;
+}
+
SSDataBlock* createOneDataBlock(const SSDataBlock* pDataBlock, bool copyData) {
if (pDataBlock == NULL) {
return NULL;
@@ -1406,6 +1451,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
int32_t payloadSize = pageSize - blockDataGetSerialMetaSize(numOfCols);
int32_t rowSize = pBlock->info.rowSize;
int32_t nRows = payloadSize / rowSize;
+ ASSERT(nRows >= 1);
// the true value must be less than the value of nRows
int32_t additional = 0;
@@ -1425,7 +1471,7 @@ size_t blockDataGetCapacityInRow(const SSDataBlock* pBlock, size_t pageSize) {
}
void colDataDestroy(SColumnInfoData* pColData) {
- if(!pColData) return;
+ if (!pColData) return;
if (IS_VAR_DATA_TYPE(pColData->info.type)) {
taosMemoryFreeClear(pColData->varmeta.offset);
} else {
@@ -1692,7 +1738,7 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
struct tm ptm = {0};
taosLocalTime(&tt, &ptm);
- size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm);
+ size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", &ptm);
if (precision == TSDB_TIME_PRECISION_NANO) {
sprintf(buf + pos, ".%09d", ms);
@@ -1706,8 +1752,8 @@ static char* formatTimestamp(char* buf, int64_t val, int precision) {
}
void blockDebugShowDataBlock(SSDataBlock* pBlock, const char* flag) {
- SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock));
- taosArrayPush(dataBlocks, pBlock);
+ SArray* dataBlocks = taosArrayInit(1, sizeof(SSDataBlock*));
+ taosArrayPush(dataBlocks, &pBlock);
blockDebugShowDataBlocks(dataBlocks, flag);
taosArrayDestroy(dataBlocks);
}
@@ -1846,20 +1892,20 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
break;
case TSDB_DATA_TYPE_VARCHAR: {
memset(pBuf, 0, sizeof(pBuf));
- char* pData = colDataGetVarData(pColInfoData, j);
+ char* pData = colDataGetVarData(pColInfoData, j);
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
memcpy(pBuf, varDataVal(pData), dataSize);
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
if (len >= size - 1) return dumpBuf;
- } break;
+ } break;
case TSDB_DATA_TYPE_NCHAR: {
- char* pData = colDataGetVarData(pColInfoData, j);
+ char* pData = colDataGetVarData(pColInfoData, j);
int32_t dataSize = TMIN(sizeof(pBuf), varDataLen(pData));
memset(pBuf, 0, sizeof(pBuf));
- taosUcs4ToMbs((TdUcs4 *)varDataVal(pData), dataSize, pBuf);
+ taosUcs4ToMbs((TdUcs4*)varDataVal(pData), dataSize, pBuf);
len += snprintf(dumpBuf + len, size - len, " %15s |", pBuf);
if (len >= size - 1) return dumpBuf;
- } break;
+ } break;
}
}
len += snprintf(dumpBuf + len, size - len, "\n");
@@ -1876,7 +1922,7 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf)
* @param pDataBlocks
* @param vgId
* @param suid
- *
+ *
*/
int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId,
tb_uid_t suid) {
@@ -1903,8 +1949,8 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
tdSRowInit(&rb, pTSchema->version);
for (int32_t i = 0; i < sz; ++i) {
- int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
- int32_t rows = pDataBlock->info.rows;
+ int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock);
+ int32_t rows = pDataBlock->info.rows;
// int32_t rowSize = pDataBlock->info.rowSize;
// int64_t groupId = pDataBlock->info.groupId;
@@ -1925,7 +1971,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataB
msgLen += sizeof(SSubmitBlk);
int32_t dataLen = 0;
- for (int32_t j = 0; j < rows; ++j) { // iterate by row
+ for (int32_t j = 0; j < rows; ++j) { // iterate by row
tdSRowResetBuf(&rb, POINTER_SHIFT(pDataBuf, msgLen + dataLen)); // set row buf
bool isStartKey = false;
int32_t offset = 0;
@@ -2080,6 +2126,7 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
int32_t* rows = (int32_t*)data;
*rows = pBlock->info.rows;
data += sizeof(int32_t);
+ ASSERT(*rows > 0);
int32_t* cols = (int32_t*)data;
*cols = numOfCols;
@@ -2088,7 +2135,7 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
// flag segment.
// the inital bit is for column info
int32_t* flagSegment = (int32_t*)data;
- *flagSegment = (1<<31);
+ *flagSegment = (1 << 31);
data += sizeof(int32_t);
@@ -2143,12 +2190,14 @@ void blockEncode(const SSDataBlock* pBlock, char* data, int32_t* dataLen, int32_
*actualLen = *dataLen;
*groupId = pBlock->info.groupId;
+ ASSERT(*dataLen > 0);
+ uDebug("build data block, actualLen:%d, rows:%d, cols:%d", *dataLen, *rows, *cols);
}
const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
const char* pStart = pData;
- int32_t version = *(int32_t*) pStart;
+ int32_t version = *(int32_t*)pStart;
pStart += sizeof(int32_t);
ASSERT(version == 1);
@@ -2157,7 +2206,7 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
pStart += sizeof(int32_t);
// total rows sizeof(int32_t)
- int32_t numOfRows = *(int32_t*)pStart;
+ int32_t numOfRows = *(int32_t*)pStart;
pStart += sizeof(int32_t);
// total columns sizeof(int32_t)
@@ -2236,4 +2285,3 @@ const char* blockDecode(SSDataBlock* pBlock, const char* pData) {
ASSERT(pStart - pData == dataLen);
return pStart;
}
-
diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c
index 8eeab77a157993bd8d89479b221982d3b1e5c336..c0ae99806e2f97fc3e4e4340ddc6a4716452dff1 100644
--- a/source/common/src/tdataformat.c
+++ b/source/common/src/tdataformat.c
@@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "tdataformat.h"
+#include "tRealloc.h"
#include "tcoding.h"
#include "tdatablock.h"
#include "tlog.h"
@@ -211,9 +212,9 @@ int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, S
if (pColVal->cid == pTColumn->colId) {
iColVal++;
- if (pColVal->isNone) {
+ if (COL_VAL_IS_NONE(pColVal)) {
flags |= TSROW_HAS_NONE;
- } else if (pColVal->isNull) {
+ } else if (COL_VAL_IS_NULL(pColVal)) {
flags |= TSROW_HAS_NULL;
maxIdx = nkv;
nTag++;
@@ -397,9 +398,9 @@ int32_t tTSRowNew(STSRowBuilder *pBuilder, SArray *pArray, STSchema *pTSchema, S
if (pColVal->cid == pTColumn->colId) {
iColVal++;
- if (pColVal->isNone) {
+ if (COL_VAL_IS_NONE(pColVal)) {
goto _set_none;
- } else if (pColVal->isNull) {
+ } else if (COL_VAL_IS_NULL(pColVal)) {
goto _set_null;
} else {
goto _set_value;
@@ -680,7 +681,7 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 **ppRow) {
return n;
}
-// STSchema
+// STSchema ========================================
int32_t tTSchemaCreate(int32_t sver, SSchema *pSchema, int32_t ncols, STSchema **ppTSchema) {
*ppTSchema = (STSchema *)taosMemoryMalloc(sizeof(STSchema) + sizeof(STColumn) * ncols);
if (*ppTSchema == NULL) {
@@ -720,9 +721,7 @@ void tTSchemaDestroy(STSchema *pTSchema) {
if (pTSchema) taosMemoryFree(pTSchema);
}
-// STSRowBuilder
-
-// STag
+// STag ========================================
static int tTagValCmprFn(const void *p1, const void *p2) {
if (((STagVal *)p1)->cid < ((STagVal *)p2)->cid) {
return -1;
@@ -1064,6 +1063,26 @@ _err:
return code;
}
+void tTagSetCid(const STag *pTag, int16_t iTag, int16_t cid) {
+ uint8_t *p = NULL;
+ int8_t isLarge = pTag->flags & TD_TAG_LARGE;
+ int16_t offset = 0;
+
+ if (isLarge) {
+ p = (uint8_t *)&((int16_t *)pTag->idx)[pTag->nTag];
+ } else {
+ p = (uint8_t *)&pTag->idx[pTag->nTag];
+ }
+
+ if (isLarge) {
+ offset = ((int16_t *)pTag->idx)[iTag];
+ } else {
+ offset = pTag->idx[iTag];
+ }
+
+ tPutI16v(p + offset, cid);
+}
+
#if 1 // ===================================================================================================================
int tdInitTSchemaBuilder(STSchemaBuilder *pBuilder, schema_ver_t version) {
if (pBuilder == NULL) return -1;
@@ -1152,4 +1171,495 @@ STSchema *tdGetSchemaFromBuilder(STSchemaBuilder *pBuilder) {
return pSchema;
}
-#endif
\ No newline at end of file
+#endif
+
+// SColData ========================================
+void tColDataDestroy(void *ph) {
+ SColData *pColData = (SColData *)ph;
+
+ tFree(pColData->pBitMap);
+ tFree((uint8_t *)pColData->aOffset);
+ tFree(pColData->pData);
+}
+
+void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn) {
+ pColData->cid = cid;
+ pColData->type = type;
+ pColData->smaOn = smaOn;
+ tColDataClear(pColData);
+}
+
+void tColDataClear(SColData *pColData) {
+ pColData->nVal = 0;
+ pColData->flag = 0;
+ pColData->nData = 0;
+}
+
+static FORCE_INLINE int32_t tColDataPutValue(SColData *pColData, SColVal *pColVal) {
+ int32_t code = 0;
+
+ if (IS_VAR_DATA_TYPE(pColData->type)) {
+ code = tRealloc((uint8_t **)(&pColData->aOffset), sizeof(int32_t) * (pColData->nVal + 1));
+ if (code) goto _exit;
+ pColData->aOffset[pColData->nVal] = pColData->nData;
+
+ if (pColVal->value.nData) {
+ code = tRealloc(&pColData->pData, pColData->nData + pColVal->value.nData);
+ if (code) goto _exit;
+ memcpy(pColData->pData + pColData->nData, pColVal->value.pData, pColVal->value.nData);
+ pColData->nData += pColVal->value.nData;
+ }
+ } else {
+ ASSERT(pColData->nData == tDataTypes[pColData->type].bytes * pColData->nVal);
+ code = tRealloc(&pColData->pData, pColData->nData + tDataTypes[pColData->type].bytes);
+ if (code) goto _exit;
+ pColData->nData += tPutValue(pColData->pData + pColData->nData, &pColVal->value, pColVal->type);
+ }
+
+_exit:
+ return code;
+}
+static FORCE_INLINE int32_t tColDataAppendValue0(SColData *pColData, SColVal *pColVal) { // 0
+ int32_t code = 0;
+
+ if (COL_VAL_IS_NONE(pColVal)) {
+ pColData->flag = HAS_NONE;
+ } else if (COL_VAL_IS_NULL(pColVal)) {
+ pColData->flag = HAS_NULL;
+ } else {
+ pColData->flag = HAS_VALUE;
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+ }
+ pColData->nVal++;
+
+_exit:
+ return code;
+}
+static FORCE_INLINE int32_t tColDataAppendValue1(SColData *pColData, SColVal *pColVal) { // HAS_NONE
+ int32_t code = 0;
+
+ if (!COL_VAL_IS_NONE(pColVal)) {
+ int32_t nBit = BIT1_SIZE(pColData->nVal + 1);
+
+ code = tRealloc(&pColData->pBitMap, nBit);
+ if (code) goto _exit;
+
+ memset(pColData->pBitMap, 0, nBit);
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
+
+ if (COL_VAL_IS_NULL(pColVal)) {
+ pColData->flag |= HAS_NULL;
+ } else {
+ pColData->flag |= HAS_VALUE;
+
+ if (pColData->nVal) {
+ if (IS_VAR_DATA_TYPE(pColData->type)) {
+ int32_t nOffset = sizeof(int32_t) * pColData->nVal;
+ code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset);
+ if (code) goto _exit;
+ memset(pColData->aOffset, 0, nOffset);
+ } else {
+ pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal;
+ code = tRealloc(&pColData->pData, pColData->nData);
+ if (code) goto _exit;
+ memset(pColData->pData, 0, pColData->nData);
+ }
+ }
+
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+ }
+ }
+ pColData->nVal++;
+
+_exit:
+ return code;
+}
+static FORCE_INLINE int32_t tColDataAppendValue2(SColData *pColData, SColVal *pColVal) { // HAS_NULL
+ int32_t code = 0;
+
+ if (!COL_VAL_IS_NULL(pColVal)) {
+ int32_t nBit = BIT1_SIZE(pColData->nVal + 1);
+ code = tRealloc(&pColData->pBitMap, nBit);
+ if (code) goto _exit;
+
+ if (COL_VAL_IS_NONE(pColVal)) {
+ pColData->flag |= HAS_NONE;
+
+ memset(pColData->pBitMap, 255, nBit);
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
+ } else {
+ pColData->flag |= HAS_VALUE;
+
+ memset(pColData->pBitMap, 0, nBit);
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
+
+ if (pColData->nVal) {
+ if (IS_VAR_DATA_TYPE(pColData->type)) {
+ int32_t nOffset = sizeof(int32_t) * pColData->nVal;
+ code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset);
+ if (code) goto _exit;
+ memset(pColData->aOffset, 0, nOffset);
+ } else {
+ pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal;
+ code = tRealloc(&pColData->pData, pColData->nData);
+ if (code) goto _exit;
+ memset(pColData->pData, 0, pColData->nData);
+ }
+ }
+
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+ }
+ }
+ pColData->nVal++;
+
+_exit:
+ return code;
+}
+static FORCE_INLINE int32_t tColDataAppendValue3(SColData *pColData, SColVal *pColVal) { // HAS_NULL|HAS_NONE
+ int32_t code = 0;
+
+ if (COL_VAL_IS_NONE(pColVal)) {
+ code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1));
+ if (code) goto _exit;
+
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
+ } else if (COL_VAL_IS_NULL(pColVal)) {
+ code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1));
+ if (code) goto _exit;
+
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
+ } else {
+ pColData->flag |= HAS_VALUE;
+
+ uint8_t *pBitMap = NULL;
+ code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1));
+ if (code) goto _exit;
+
+ for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
+ SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal));
+ }
+ SET_BIT2(pBitMap, pColData->nVal, 2);
+
+ tFree(pColData->pBitMap);
+ pColData->pBitMap = pBitMap;
+
+ if (pColData->nVal) {
+ if (IS_VAR_DATA_TYPE(pColData->type)) {
+ int32_t nOffset = sizeof(int32_t) * pColData->nVal;
+ code = tRealloc((uint8_t **)(&pColData->aOffset), nOffset);
+ if (code) goto _exit;
+ memset(pColData->aOffset, 0, nOffset);
+ } else {
+ pColData->nData = tDataTypes[pColData->type].bytes * pColData->nVal;
+ code = tRealloc(&pColData->pData, pColData->nData);
+ if (code) goto _exit;
+ memset(pColData->pData, 0, pColData->nData);
+ }
+ }
+
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+ }
+ pColData->nVal++;
+
+_exit:
+ return code;
+}
+static FORCE_INLINE int32_t tColDataAppendValue4(SColData *pColData, SColVal *pColVal) { // HAS_VALUE
+ int32_t code = 0;
+
+ if (!COL_VAL_IS_VALUE(pColVal)) {
+ if (COL_VAL_IS_NONE(pColVal)) {
+ pColData->flag |= HAS_NONE;
+ } else {
+ pColData->flag |= HAS_NULL;
+ }
+
+ int32_t nBit = BIT1_SIZE(pColData->nVal + 1);
+ code = tRealloc(&pColData->pBitMap, nBit);
+ if (code) goto _exit;
+
+ memset(pColData->pBitMap, 255, nBit);
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
+
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+ } else {
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+ }
+ pColData->nVal++;
+
+_exit:
+ return code;
+}
+static FORCE_INLINE int32_t tColDataAppendValue5(SColData *pColData, SColVal *pColVal) { // HAS_VALUE|HAS_NONE
+ int32_t code = 0;
+
+ if (COL_VAL_IS_NULL(pColVal)) {
+ pColData->flag |= HAS_NULL;
+
+ uint8_t *pBitMap = NULL;
+ code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1));
+ if (code) goto _exit;
+
+ for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
+ SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal) ? 2 : 0);
+ }
+ SET_BIT2(pBitMap, pColData->nVal, 1);
+
+ tFree(pColData->pBitMap);
+ pColData->pBitMap = pBitMap;
+ } else {
+ code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1));
+ if (code) goto _exit;
+
+ if (COL_VAL_IS_NONE(pColVal)) {
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
+ } else {
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
+ }
+ }
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+
+ pColData->nVal++;
+
+_exit:
+ return code;
+}
+static FORCE_INLINE int32_t tColDataAppendValue6(SColData *pColData, SColVal *pColVal) { // HAS_VALUE|HAS_NULL
+ int32_t code = 0;
+
+ if (COL_VAL_IS_NONE(pColVal)) {
+ pColData->flag |= HAS_NONE;
+
+ uint8_t *pBitMap = NULL;
+ code = tRealloc(&pBitMap, BIT2_SIZE(pColData->nVal + 1));
+ if (code) goto _exit;
+
+ for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
+ SET_BIT2(pBitMap, iVal, GET_BIT1(pColData->pBitMap, iVal) ? 2 : 1);
+ }
+ SET_BIT2(pBitMap, pColData->nVal, 0);
+
+ tFree(pColData->pBitMap);
+ pColData->pBitMap = pBitMap;
+ } else {
+ code = tRealloc(&pColData->pBitMap, BIT1_SIZE(pColData->nVal + 1));
+ if (code) goto _exit;
+
+ if (COL_VAL_IS_NULL(pColVal)) {
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 0);
+ } else {
+ SET_BIT1(pColData->pBitMap, pColData->nVal, 1);
+ }
+ }
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+
+ pColData->nVal++;
+
+_exit:
+ return code;
+}
+static FORCE_INLINE int32_t tColDataAppendValue7(SColData *pColData,
+ SColVal *pColVal) { // HAS_VALUE|HAS_NULL|HAS_NONE
+ int32_t code = 0;
+
+ code = tRealloc(&pColData->pBitMap, BIT2_SIZE(pColData->nVal + 1));
+ if (code) goto _exit;
+
+ if (COL_VAL_IS_NONE(pColVal)) {
+ SET_BIT2(pColData->pBitMap, pColData->nVal, 0);
+ } else if (COL_VAL_IS_NULL(pColVal)) {
+ SET_BIT2(pColData->pBitMap, pColData->nVal, 1);
+ } else {
+ SET_BIT2(pColData->pBitMap, pColData->nVal, 2);
+ }
+ code = tColDataPutValue(pColData, pColVal);
+ if (code) goto _exit;
+
+ pColData->nVal++;
+
+_exit:
+ return code;
+}
+static int32_t (*tColDataAppendValueImpl[])(SColData *pColData, SColVal *pColVal) = {
+ tColDataAppendValue0, // 0
+ tColDataAppendValue1, // HAS_NONE
+ tColDataAppendValue2, // HAS_NULL
+ tColDataAppendValue3, // HAS_NULL|HAS_NONE
+ tColDataAppendValue4, // HAS_VALUE
+ tColDataAppendValue5, // HAS_VALUE|HAS_NONE
+ tColDataAppendValue6, // HAS_VALUE|HAS_NULL
+ tColDataAppendValue7 // HAS_VALUE|HAS_NULL|HAS_NONE
+};
+int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal) {
+ ASSERT(pColData->cid == pColVal->cid && pColData->type == pColVal->type);
+ return tColDataAppendValueImpl[pColData->flag](pColData, pColVal);
+}
+
+static FORCE_INLINE void tColDataGetValue1(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NONE
+ *pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
+}
+static FORCE_INLINE void tColDataGetValue2(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NULL
+ *pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
+}
+static FORCE_INLINE void tColDataGetValue3(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_NULL|HAS_NONE
+ switch (GET_BIT1(pColData->pBitMap, iVal)) {
+ case 0:
+ *pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
+ break;
+ case 1:
+ *pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+static FORCE_INLINE void tColDataGetValue4(SColData *pColData, int32_t iVal, SColVal *pColVal) { // HAS_VALUE
+ SValue value;
+ if (IS_VAR_DATA_TYPE(pColData->type)) {
+ if (iVal + 1 < pColData->nVal) {
+ value.nData = pColData->aOffset[iVal + 1] - pColData->aOffset[iVal];
+ } else {
+ value.nData = pColData->nData - pColData->aOffset[iVal];
+ }
+ value.pData = pColData->pData + pColData->aOffset[iVal];
+ } else {
+ tGetValue(pColData->pData + tDataTypes[pColData->type].bytes * iVal, &value, pColData->type);
+ }
+ *pColVal = COL_VAL_VALUE(pColData->cid, pColData->type, value);
+}
+static FORCE_INLINE void tColDataGetValue5(SColData *pColData, int32_t iVal,
+ SColVal *pColVal) { // HAS_VALUE|HAS_NONE
+ switch (GET_BIT1(pColData->pBitMap, iVal)) {
+ case 0:
+ *pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
+ break;
+ case 1:
+ tColDataGetValue4(pColData, iVal, pColVal);
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+static FORCE_INLINE void tColDataGetValue6(SColData *pColData, int32_t iVal,
+ SColVal *pColVal) { // HAS_VALUE|HAS_NULL
+ switch (GET_BIT1(pColData->pBitMap, iVal)) {
+ case 0:
+ *pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
+ break;
+ case 1:
+ tColDataGetValue4(pColData, iVal, pColVal);
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+static FORCE_INLINE void tColDataGetValue7(SColData *pColData, int32_t iVal,
+ SColVal *pColVal) { // HAS_VALUE|HAS_NULL|HAS_NONE
+ switch (GET_BIT2(pColData->pBitMap, iVal)) {
+ case 0:
+ *pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
+ break;
+ case 1:
+ *pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
+ break;
+ case 2:
+ tColDataGetValue4(pColData, iVal, pColVal);
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+static void (*tColDataGetValueImpl[])(SColData *pColData, int32_t iVal, SColVal *pColVal) = {
+ NULL, // 0
+ tColDataGetValue1, // HAS_NONE
+ tColDataGetValue2, // HAS_NULL
+ tColDataGetValue3, // HAS_NULL | HAS_NONE
+ tColDataGetValue4, // HAS_VALUE
+ tColDataGetValue5, // HAS_VALUE | HAS_NONE
+ tColDataGetValue6, // HAS_VALUE | HAS_NULL
+ tColDataGetValue7 // HAS_VALUE | HAS_NULL | HAS_NONE
+};
+void tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal) {
+ ASSERT(iVal >= 0 && iVal < pColData->nVal && pColData->flag);
+ tColDataGetValueImpl[pColData->flag](pColData, iVal, pColVal);
+}
+
+uint8_t tColDataGetBitValue(SColData *pColData, int32_t iVal) {
+ uint8_t v;
+ switch (pColData->flag) {
+ case HAS_NONE:
+ v = 0;
+ break;
+ case HAS_NULL:
+ v = 1;
+ break;
+ case (HAS_NULL | HAS_NONE):
+ v = GET_BIT1(pColData->pBitMap, iVal);
+ break;
+ case HAS_VALUE:
+ v = 2;
+ break;
+ case (HAS_VALUE | HAS_NONE):
+ v = GET_BIT1(pColData->pBitMap, iVal);
+ if (v) v = 2;
+ break;
+ case (HAS_VALUE | HAS_NULL):
+ v = GET_BIT1(pColData->pBitMap, iVal) + 1;
+ break;
+ case (HAS_VALUE | HAS_NULL | HAS_NONE):
+ v = GET_BIT2(pColData->pBitMap, iVal);
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ return v;
+}
+
+int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) {
+ int32_t code = 0;
+ int32_t size;
+
+ ASSERT(pColDataSrc->nVal > 0);
+ ASSERT(pColDataDest->cid = pColDataSrc->cid);
+ ASSERT(pColDataDest->type = pColDataSrc->type);
+
+ pColDataDest->smaOn = pColDataSrc->smaOn;
+ pColDataDest->nVal = pColDataSrc->nVal;
+ pColDataDest->flag = pColDataSrc->flag;
+
+ // bitmap
+ if (pColDataSrc->flag != HAS_NONE && pColDataSrc->flag != HAS_NULL && pColDataSrc->flag != HAS_VALUE) {
+ size = BIT2_SIZE(pColDataSrc->nVal);
+ code = tRealloc(&pColDataDest->pBitMap, size);
+ if (code) goto _exit;
+ memcpy(pColDataDest->pBitMap, pColDataSrc->pBitMap, size);
+ }
+
+ // offset
+ if (IS_VAR_DATA_TYPE(pColDataDest->type)) {
+ size = sizeof(int32_t) * pColDataSrc->nVal;
+
+ code = tRealloc((uint8_t **)&pColDataDest->aOffset, size);
+ if (code) goto _exit;
+
+ memcpy(pColDataDest->aOffset, pColDataSrc->aOffset, size);
+ }
+
+ // value
+ pColDataDest->nData = pColDataSrc->nData;
+ code = tRealloc(&pColDataDest->pData, pColDataSrc->nData);
+ if (code) goto _exit;
+ memcpy(pColDataDest->pData, pColDataSrc->pData, pColDataDest->nData);
+
+_exit:
+ return code;
+}
\ No newline at end of file
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index ee9d75155563a9892ce952e50032b47e5b5c1eb5..c0203dadb82066992510fde7143a82d68afb023b 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -58,12 +58,12 @@ int32_t tsNumOfMnodeFetchThreads = 1;
int32_t tsNumOfMnodeReadThreads = 1;
int32_t tsNumOfVnodeQueryThreads = 4;
int32_t tsNumOfVnodeStreamThreads = 2;
-int32_t tsNumOfVnodeFetchThreads = 4;
+int32_t tsNumOfVnodeFetchThreads = 1;
int32_t tsNumOfVnodeWriteThreads = 2;
int32_t tsNumOfVnodeSyncThreads = 2;
int32_t tsNumOfVnodeRsmaThreads = 2;
int32_t tsNumOfQnodeQueryThreads = 4;
-int32_t tsNumOfQnodeFetchThreads = 4;
+int32_t tsNumOfQnodeFetchThreads = 1;
int32_t tsNumOfSnodeSharedThreads = 2;
int32_t tsNumOfSnodeUniqueThreads = 2;
@@ -85,12 +85,16 @@ uint16_t tsTelemPort = 80;
char tsSmlTagName[TSDB_COL_NAME_LEN] = "_tag_null";
char tsSmlChildTableName[TSDB_TABLE_NAME_LEN] = ""; // user defined child table name can be specified in tag value.
// If set to empty system will generate table name using MD5 hash.
-bool tsSmlDataFormat =
- true; // true means that the name and order of cols in each line are the same(only for influx protocol)
+bool tsSmlDataFormat = false; // true means that the name and order of cols in each line are the same(only for influx protocol)
// query
int32_t tsQueryPolicy = 1;
int32_t tsQuerySmaOptimize = 0;
+int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
+bool tsQueryPlannerTrace = false;
+int32_t tsQueryNodeChunkSize = 32 * 1024;
+bool tsQueryUseNodeAllocator = true;
+bool tsKeepColumnName = false;
/*
* denote if the server needs to compress response message at the application layer to client, including query rsp,
@@ -128,10 +132,6 @@ int32_t tsMinIntervalTime = 1;
int32_t tsQueryBufferSize = -1;
int64_t tsQueryBufferSizeBytes = -1;
-// tsdb config
-// For backward compatibility
-bool tsdbForceKeepFile = false;
-
int32_t tsDiskCfgNum = 0;
SDiskCfg tsDiskCfg[TFS_MAX_DISKS] = {0};
@@ -165,7 +165,8 @@ int32_t tsMqRebalanceInterval = 2;
int32_t tsTtlUnit = 86400;
int32_t tsTtlPushInterval = 86400;
int32_t tsGrantHBInterval = 60;
-int32_t tsUptimeInterval = 300; // seconds
+int32_t tsUptimeInterval = 300; // seconds
+char tsUdfdResFuncs[1024] = ""; // udfd resident funcs that teardown when udfd exits
#ifndef _STORAGE
int32_t taosSetTfsCfg(SConfig *pCfg) {
@@ -204,7 +205,9 @@ static int32_t taosLoadCfg(SConfig *pCfg, const char **envCmd, const char *input
tstrncpy(cfgFile, cfgDir, sizeof(cfgDir));
}
- if (apolloUrl == NULL || apolloUrl[0] == '\0') cfgGetApollUrl(envCmd, envFile, apolloUrl);
+ if (apolloUrl != NULL && apolloUrl[0] == '\0') {
+ cfgGetApollUrl(envCmd, envFile, apolloUrl);
+ }
if (cfgLoad(pCfg, CFG_STYPE_APOLLO_URL, apolloUrl) != 0) {
uError("failed to load from apollo url:%s since %s", apolloUrl, terrstr());
@@ -284,8 +287,12 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "shellActivityTimer", tsShellActivityTimer, 1, 120, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "compressMsgSize", tsCompressMsgSize, -1, 100000000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "compressColData", tsCompressColData, -1, 100000000, 1) != 0) return -1;
- if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 3, 1) != 0) return -1;
+ if (cfgAddInt32(pCfg, "queryPolicy", tsQueryPolicy, 1, 4, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "querySmaOptimize", tsQuerySmaOptimize, 0, 1, 1) != 0) return -1;
+ if (cfgAddBool(pCfg, "queryPlannerTrace", tsQueryPlannerTrace, true) != 0) return -1;
+ if (cfgAddInt32(pCfg, "queryNodeChunkSize", tsQueryNodeChunkSize, 1024, 128 * 1024, true) != 0) return -1;
+ if (cfgAddBool(pCfg, "queryUseNodeAllocator", tsQueryUseNodeAllocator, true) != 0) return -1;
+ if (cfgAddBool(pCfg, "keepColumnName", tsKeepColumnName, true) != 0) return -1;
if (cfgAddString(pCfg, "smlChildTableName", "", 1) != 0) return -1;
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
@@ -367,9 +374,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
if (cfgAddInt32(pCfg, "numOfVnodeStreamThreads", tsNumOfVnodeStreamThreads, 4, 1024, 0) != 0) return -1;
- tsNumOfVnodeFetchThreads = tsNumOfCores / 4;
- tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
- if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 4, 1024, 0) != 0) return -1;
+ tsNumOfVnodeFetchThreads = 1;
+ if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 1, 1024, 0) != 0) return -1;
tsNumOfVnodeWriteThreads = tsNumOfCores;
tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
@@ -387,9 +393,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
if (cfgAddInt32(pCfg, "numOfQnodeQueryThreads", tsNumOfQnodeQueryThreads, 1, 1024, 0) != 0) return -1;
- tsNumOfQnodeFetchThreads = tsNumOfCores / 2;
- tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
- if (cfgAddInt32(pCfg, "numOfQnodeFetchThreads", tsNumOfQnodeFetchThreads, 1, 1024, 0) != 0) return -1;
+ // tsNumOfQnodeFetchThreads = tsNumOfCores / 2;
+ // tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
+ // if (cfgAddInt32(pCfg, "numOfQnodeFetchThreads", tsNumOfQnodeFetchThreads, 1, 1024, 0) != 0) return -1;
tsNumOfSnodeSharedThreads = tsNumOfCores / 4;
tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
@@ -421,12 +427,161 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400 * 365, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 100000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, 1) != 0) return -1;
+ if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, 0) != 0) return -1;
if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1;
+ if (cfgAddString(pCfg, "udfdResFuncs", tsUdfdResFuncs, 0) != 0) return -1;
GRANT_CFG_ADD;
return 0;
}
+static int32_t taosUpdateServerCfg(SConfig *pCfg) {
+ SConfigItem *pItem;
+ ECfgSrcType stype;
+ int32_t numOfCores;
+ int64_t totalMemoryKB;
+
+ pItem = cfgGetItem(tsCfg, "numOfCores");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ numOfCores = pItem->fval;
+ }
+
+ pItem = cfgGetItem(tsCfg, "supportVnodes");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSupportVnodes = numOfCores * 2;
+ tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2);
+ pItem->i32 = tsNumOfSupportVnodes;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfRpcThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfRpcThreads = numOfCores / 2;
+ tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, 4);
+ pItem->i32 = tsNumOfRpcThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfCommitThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfCommitThreads = numOfCores / 2;
+ tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4);
+ pItem->i32 = tsNumOfCommitThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfMnodeReadThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfMnodeReadThreads = numOfCores / 8;
+ tsNumOfMnodeReadThreads = TRANGE(tsNumOfMnodeReadThreads, 1, 4);
+ pItem->i32 = tsNumOfMnodeReadThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeQueryThreads = numOfCores * 2;
+ tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfVnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeStreamThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeStreamThreads = numOfCores / 4;
+ tsNumOfVnodeStreamThreads = TMAX(tsNumOfVnodeStreamThreads, 4);
+ pItem->i32 = tsNumOfVnodeStreamThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeFetchThreads = numOfCores / 4;
+ tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfVnodeFetchThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeWriteThreads = numOfCores;
+ tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
+ pItem->i32 = tsNumOfVnodeWriteThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeSyncThreads = numOfCores * 2;
+ tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
+ pItem->i32 = tsNumOfVnodeSyncThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfVnodeRsmaThreads = numOfCores;
+ tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
+ pItem->i32 = tsNumOfVnodeRsmaThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfQnodeQueryThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeQueryThreads = numOfCores * 2;
+ tsNumOfQnodeQueryThreads = TMAX(tsNumOfQnodeQueryThreads, 4);
+ pItem->i32 = tsNumOfQnodeQueryThreads;
+ pItem->stype = stype;
+ }
+
+ /*
+ pItem = cfgGetItem(tsCfg, "numOfQnodeFetchThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfQnodeFetchThreads = numOfCores / 2;
+ tsNumOfQnodeFetchThreads = TMAX(tsNumOfQnodeFetchThreads, 4);
+ pItem->i32 = tsNumOfQnodeFetchThreads;
+ pItem->stype = stype;
+ }
+ */
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeSharedThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeSharedThreads = numOfCores / 4;
+ tsNumOfSnodeSharedThreads = TRANGE(tsNumOfSnodeSharedThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeSharedThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "numOfSnodeUniqueThreads");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsNumOfSnodeUniqueThreads = numOfCores / 4;
+ tsNumOfSnodeUniqueThreads = TRANGE(tsNumOfSnodeUniqueThreads, 2, 4);
+ pItem->i32 = tsNumOfSnodeUniqueThreads;
+ pItem->stype = stype;
+ }
+
+ pItem = cfgGetItem(tsCfg, "totalMemoryKB");
+ if (pItem == NULL) {
+ return -1;
+ } else {
+ stype = pItem->stype;
+ totalMemoryKB = pItem->i64;
+ }
+
+ pItem = cfgGetItem(tsCfg, "rpcQueueMemoryAllowed");
+ if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
+ tsRpcQueueMemoryAllowed = totalMemoryKB * 1024 * 0.1;
+ tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL);
+ pItem->i64 = tsRpcQueueMemoryAllowed;
+ pItem->stype = stype;
+ }
+
+ return 0;
+}
+
static void taosSetClientLogCfg(SConfig *pCfg) {
SConfigItem *pItem = cfgGetItem(pCfg, "logDir");
tstrncpy(tsLogDir, cfgGetItem(pCfg, "logDir")->str, PATH_MAX);
@@ -497,6 +652,10 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
+ tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
+ tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
+ tsQueryUseNodeAllocator = cfgGetItem(pCfg, "queryUseNodeAllocator")->bval;
+ tsKeepColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
return 0;
}
@@ -530,7 +689,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsQueryBufferSize = cfgGetItem(pCfg, "queryBufferSize")->i32;
tsPrintAuth = cfgGetItem(pCfg, "printAuth")->bval;
+#if !defined(WINDOWS) && !defined(DARWIN)
tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval;
+#endif
tsMnodeShmSize = cfgGetItem(pCfg, "mnodeShmSize")->i32;
tsVnodeShmSize = cfgGetItem(pCfg, "vnodeShmSize")->i32;
tsQnodeShmSize = cfgGetItem(pCfg, "qnodeShmSize")->i32;
@@ -547,7 +708,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
- tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
+ // tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
tsNumOfSnodeUniqueThreads = cfgGetItem(pCfg, "numOfSnodeUniqueThreads")->i32;
tsRpcQueueMemoryAllowed = cfgGetItem(pCfg, "rpcQueueMemoryAllowed")->i64;
@@ -569,8 +730,10 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32;
tsUptimeInterval = cfgGetItem(pCfg, "uptimeInterval")->i32;
+ tsQueryRsmaTolerance = cfgGetItem(pCfg, "queryRsmaTolerance")->i32;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
+ tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs));
if (tsQueryBufferSize >= 0) {
tsQueryBufferSizeBytes = tsQueryBufferSize * 1048576UL;
@@ -686,6 +849,9 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
break;
}
case 'k': {
+ if (strcasecmp("keepColumnName", name) == 0) {
+ tsKeepColumnName = cfgGetItem(pCfg, "keepColumnName")->bval;
+ }
break;
}
case 'l': {
@@ -762,7 +928,9 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
}
case 'u': {
if (strcasecmp("multiProcess", name) == 0) {
+#if !defined(WINDOWS) && !defined(DARWIN)
tsMultiProcess = cfgGetItem(pCfg, "multiProcess")->bval;
+#endif
} else if (strcasecmp("udfDebugFlag", name) == 0) {
udfDebugFlag = cfgGetItem(pCfg, "udfDebugFlag")->i32;
}
@@ -795,8 +963,10 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
} else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) {
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
- } else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) {
- tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
+ /*
+ } else if (strcasecmp("numOfQnodeFetchThreads", name) == 0) {
+ tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
+ */
} else if (strcasecmp("numOfSnodeSharedThreads", name) == 0) {
tsNumOfSnodeSharedThreads = cfgGetItem(pCfg, "numOfSnodeSharedThreads")->i32;
} else if (strcasecmp("numOfSnodeUniqueThreads", name) == 0) {
@@ -826,6 +996,14 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
tsQnodeShmSize = cfgGetItem(pCfg, "qnodeShmSize")->i32;
} else if (strcasecmp("qDebugFlag", name) == 0) {
qDebugFlag = cfgGetItem(pCfg, "qDebugFlag")->i32;
+ } else if (strcasecmp("queryPlannerTrace", name) == 0) {
+ tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
+ } else if (strcasecmp("queryNodeChunkSize", name) == 0) {
+ tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
+ } else if (strcasecmp("queryUseNodeAllocator", name) == 0) {
+ tsQueryUseNodeAllocator = cfgGetItem(pCfg, "queryUseNodeAllocator")->bval;
+ } else if (strcasecmp("queryRsmaTolerance", name) == 0) {
+ tsQueryRsmaTolerance = cfgGetItem(pCfg, "queryRsmaTolerance")->i32;
}
break;
}
@@ -955,11 +1133,20 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
if (tsc) {
tsLogEmbedded = 0;
- if (taosAddClientLogCfg(pCfg) != 0) return -1;
+ if (taosAddClientLogCfg(pCfg) != 0) {
+ cfgCleanup(pCfg);
+ return -1;
+ }
} else {
tsLogEmbedded = 1;
- if (taosAddClientLogCfg(pCfg) != 0) return -1;
- if (taosAddServerLogCfg(pCfg) != 0) return -1;
+ if (taosAddClientLogCfg(pCfg) != 0) {
+ cfgCleanup(pCfg);
+ return -1;
+ }
+ if (taosAddServerLogCfg(pCfg) != 0) {
+ cfgCleanup(pCfg);
+ return -1;
+ }
}
if (taosLoadCfg(pCfg, envCmd, cfgDir, envFile, apolloUrl) != 0) {
@@ -981,9 +1168,9 @@ int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDi
taosSetServerLogCfg(pCfg);
}
- taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32);
+ taosSetAllDebugFlag(cfgGetItem(pCfg, "debugFlag")->i32, false);
- if (taosMulMkDir(tsLogDir) != 0) {
+ if (taosMulModeMkDir(tsLogDir, 0777) != 0) {
uError("failed to create dir:%s since %s", tsLogDir, terrstr());
cfgCleanup(pCfg);
return -1;
@@ -1048,6 +1235,7 @@ int32_t taosInitCfg(const char *cfgDir, const char **envCmd, const char *envFile
if (taosSetClientCfg(tsCfg)) return -1;
} else {
if (taosSetClientCfg(tsCfg)) return -1;
+ if (taosUpdateServerCfg(tsCfg)) return -1;
if (taosSetServerCfg(tsCfg)) return -1;
if (taosSetTfsCfg(tsCfg) != 0) return -1;
}
@@ -1072,7 +1260,7 @@ void taosCleanupCfg() {
void taosCfgDynamicOptions(const char *option, const char *value) {
if (strncasecmp(option, "debugFlag", 9) == 0) {
int32_t flag = atoi(value);
- taosSetAllDebugFlag(flag);
+ taosSetAllDebugFlag(flag, true);
return;
}
@@ -1094,14 +1282,14 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
}
const char *options[] = {
- "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag",
- "tqDebugFlag", "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag",
- "tmrDebugFlag", "uDebugFlag", "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag",
+ "dDebugFlag", "vDebugFlag", "mDebugFlag", "wDebugFlag", "sDebugFlag", "tsdbDebugFlag", "tqDebugFlag",
+ "fsDebugFlag", "udfDebugFlag", "smaDebugFlag", "idxDebugFlag", "tdbDebugFlag", "tmrDebugFlag", "uDebugFlag",
+ "smaDebugFlag", "rpcDebugFlag", "qDebugFlag", "metaDebugFlag", "jniDebugFlag",
};
int32_t *optionVars[] = {
- &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag,
- &tqDebugFlag, &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag,
- &tmrDebugFlag, &uDebugFlag, &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag,
+ &dDebugFlag, &vDebugFlag, &mDebugFlag, &wDebugFlag, &sDebugFlag, &tsdbDebugFlag, &tqDebugFlag,
+ &fsDebugFlag, &udfDebugFlag, &smaDebugFlag, &idxDebugFlag, &tdbDebugFlag, &tmrDebugFlag, &uDebugFlag,
+ &smaDebugFlag, &rpcDebugFlag, &qDebugFlag, &metaDebugFlag, &jniDebugFlag,
};
int32_t optionSize = tListLen(options);
@@ -1113,41 +1301,42 @@ void taosCfgDynamicOptions(const char *option, const char *value) {
int32_t flag = atoi(value);
uInfo("%s set from %d to %d", optName, *optionVars[d], flag);
*optionVars[d] = flag;
- taosSetDebugFlag(optionVars[d], optName, flag);
+ taosSetDebugFlag(optionVars[d], optName, flag, true);
return;
}
uError("failed to cfg dynamic option:%s value:%s", option, value);
}
-void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal) {
+void taosSetDebugFlag(int32_t *pFlagPtr, const char *flagName, int32_t flagVal, bool rewrite) {
SConfigItem *pItem = cfgGetItem(tsCfg, flagName);
- if (pItem != NULL) {
+ if (pItem != NULL && (rewrite || pItem->i32 == 0)) {
pItem->i32 = flagVal;
}
*pFlagPtr = flagVal;
}
-void taosSetAllDebugFlag(int32_t flag) {
+void taosSetAllDebugFlag(int32_t flag, bool rewrite) {
if (flag <= 0) return;
- taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag);
- taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag);
- taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag);
- taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag);
- taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag);
- taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag);
- taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag);
- taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag);
- taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag);
- taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag);
- taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag);
- taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag);
- taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag);
- taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag);
- taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag);
- taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag);
- taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag);
- taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag);
+ taosSetDebugFlag(&uDebugFlag, "uDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&rpcDebugFlag, "rpcDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&jniDebugFlag, "jniDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&qDebugFlag, "qDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&cDebugFlag, "cDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&dDebugFlag, "dDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&vDebugFlag, "vDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&mDebugFlag, "mDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&wDebugFlag, "wDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&sDebugFlag, "sDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tsdbDebugFlag, "tsdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tqDebugFlag, "tqDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&fsDebugFlag, "fsDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&udfDebugFlag, "udfDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&smaDebugFlag, "smaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&idxDebugFlag, "idxDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&tdbDebugFlag, "tdbDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "metaDebugFlag", flag, rewrite);
+ taosSetDebugFlag(&metaDebugFlag, "tmrDebugFlag", flag, rewrite);
uInfo("all debug flag are set to %d", flag);
}
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 058f26d1454a20af7fe5ee480e9b9240ade999af..f4ffc4c996d540a0623319cad50cf25483414ea4 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -994,6 +994,7 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
SVnodeLoad *pload = taosArrayGet(pReq->pVloads, i);
if (tEncodeI32(&encoder, pload->vgId) < 0) return -1;
if (tEncodeI32(&encoder, pload->syncState) < 0) return -1;
+ if (tEncodeI64(&encoder, pload->cacheUsage) < 0) return -1;
if (tEncodeI64(&encoder, pload->numOfTables) < 0) return -1;
if (tEncodeI64(&encoder, pload->numOfTimeSeries) < 0) return -1;
if (tEncodeI64(&encoder, pload->totalStorage) < 0) return -1;
@@ -1063,6 +1064,7 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
SVnodeLoad vload = {0};
if (tDecodeI32(&decoder, &vload.vgId) < 0) return -1;
if (tDecodeI32(&decoder, &vload.syncState) < 0) return -1;
+ if (tDecodeI64(&decoder, &vload.cacheUsage) < 0) return -1;
if (tDecodeI64(&decoder, &vload.numOfTables) < 0) return -1;
if (tDecodeI64(&decoder, &vload.numOfTimeSeries) < 0) return -1;
if (tDecodeI64(&decoder, &vload.totalStorage) < 0) return -1;
@@ -2024,6 +2026,9 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
if (tEncodeI64(&encoder, pReq->walRetentionSize) < 0) return -1;
if (tEncodeI32(&encoder, pReq->walRollPeriod) < 0) return -1;
if (tEncodeI64(&encoder, pReq->walSegmentSize) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->sstTrigger) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->hashPrefix) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->hashSuffix) < 0) return -1;
if (tEncodeI8(&encoder, pReq->ignoreExist) < 0) return -1;
if (tEncodeI32(&encoder, pReq->numOfRetensions) < 0) return -1;
for (int32_t i = 0; i < pReq->numOfRetensions; ++i) {
@@ -2033,6 +2038,7 @@ int32_t tSerializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq) {
if (tEncodeI8(&encoder, pRetension->freqUnit) < 0) return -1;
if (tEncodeI8(&encoder, pRetension->keepUnit) < 0) return -1;
}
+ if (tEncodeI32(&encoder, pReq->tsdbPageSize) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -2070,6 +2076,9 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
if (tDecodeI64(&decoder, &pReq->walRetentionSize) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->walRollPeriod) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->walSegmentSize) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->sstTrigger) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->hashPrefix) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->hashSuffix) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->ignoreExist) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->numOfRetensions) < 0) return -1;
pReq->pRetensions = taosArrayInit(pReq->numOfRetensions, sizeof(SRetention));
@@ -2090,6 +2099,8 @@ int32_t tDeserializeSCreateDbReq(void *buf, int32_t bufLen, SCreateDbReq *pReq)
}
}
+ if (tDecodeI32(&decoder, &pReq->tsdbPageSize) < 0) return -1;
+
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -2120,6 +2131,7 @@ int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
if (tEncodeI8(&encoder, pReq->strict) < 0) return -1;
if (tEncodeI8(&encoder, pReq->cacheLast) < 0) return -1;
if (tEncodeI8(&encoder, pReq->replications) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->sstTrigger) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -2146,6 +2158,7 @@ int32_t tDeserializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
if (tDecodeI8(&decoder, &pReq->strict) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->cacheLast) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->replications) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->sstTrigger) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -2453,6 +2466,8 @@ int32_t tSerializeSUseDbRspImp(SEncoder *pEncoder, const SUseDbRsp *pRsp) {
if (tEncodeI64(pEncoder, pRsp->uid) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->vgVersion) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->vgNum) < 0) return -1;
+ if (tEncodeI16(pEncoder, pRsp->hashPrefix) < 0) return -1;
+ if (tEncodeI16(pEncoder, pRsp->hashSuffix) < 0) return -1;
if (tEncodeI8(pEncoder, pRsp->hashMethod) < 0) return -1;
for (int32_t i = 0; i < pRsp->vgNum; ++i) {
@@ -2504,6 +2519,8 @@ int32_t tDeserializeSUseDbRspImp(SDecoder *pDecoder, SUseDbRsp *pRsp) {
if (tDecodeI64(pDecoder, &pRsp->uid) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->vgVersion) < 0) return -1;
if (tDecodeI32(pDecoder, &pRsp->vgNum) < 0) return -1;
+ if (tDecodeI16(pDecoder, &pRsp->hashPrefix) < 0) return -1;
+ if (tDecodeI16(pDecoder, &pRsp->hashSuffix) < 0) return -1;
if (tDecodeI8(pDecoder, &pRsp->hashMethod) < 0) return -1;
if (pRsp->vgNum <= 0) {
@@ -2665,6 +2682,7 @@ int32_t tSerializeSTrimDbReq(void *buf, int32_t bufLen, STrimDbReq *pReq) {
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->db) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->maxSpeed) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -2678,6 +2696,7 @@ int32_t tDeserializeSTrimDbReq(void *buf, int32_t bufLen, STrimDbReq *pReq) {
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->db) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->maxSpeed) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -3330,7 +3349,13 @@ int32_t tDeserializeSSTbHbRsp(void *buf, int32_t bufLen, SSTbHbRsp *pRsp) {
return 0;
}
-void tFreeSTableMetaRsp(void *pRsp) { taosMemoryFreeClear(((STableMetaRsp*)pRsp)->pSchemas); }
+void tFreeSTableMetaRsp(void *pRsp) {
+ if (NULL == pRsp) {
+ return;
+ }
+
+ taosMemoryFreeClear(((STableMetaRsp *)pRsp)->pSchemas);
+}
void tFreeSTableIndexRsp(void *info) {
if (NULL == info) {
@@ -3762,6 +3787,10 @@ int32_t tSerializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *pR
if (tEncodeI64(&encoder, pReq->walRetentionSize) < 0) return -1;
if (tEncodeI32(&encoder, pReq->walRollPeriod) < 0) return -1;
if (tEncodeI64(&encoder, pReq->walSegmentSize) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->sstTrigger) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->hashPrefix) < 0) return -1;
+ if (tEncodeI16(&encoder, pReq->hashSuffix) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->tsdbPageSize) < 0) return -1;
tEndEncode(&encoder);
@@ -3834,6 +3863,10 @@ int32_t tDeserializeSCreateVnodeReq(void *buf, int32_t bufLen, SCreateVnodeReq *
if (tDecodeI64(&decoder, &pReq->walRetentionSize) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->walRollPeriod) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->walSegmentSize) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->sstTrigger) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->hashPrefix) < 0) return -1;
+ if (tDecodeI16(&decoder, &pReq->hashSuffix) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->tsdbPageSize) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -4334,7 +4367,7 @@ int32_t tDeserializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) {
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pRsp->numOfPlans) < 0) return -1;
if (pRsp->numOfPlans > 0) {
- pRsp->subplanInfo = taosMemoryMalloc(pRsp->numOfPlans * sizeof(SExplainExecInfo));
+ pRsp->subplanInfo = taosMemoryCalloc(pRsp->numOfPlans, sizeof(SExplainExecInfo));
if (pRsp->subplanInfo == NULL) return -1;
}
for (int32_t i = 0; i < pRsp->numOfPlans; ++i) {
@@ -4342,8 +4375,7 @@ int32_t tDeserializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) {
if (tDecodeDouble(&decoder, &pRsp->subplanInfo[i].totalCost) < 0) return -1;
if (tDecodeU64(&decoder, &pRsp->subplanInfo[i].numOfRows) < 0) return -1;
if (tDecodeU32(&decoder, &pRsp->subplanInfo[i].verboseLen) < 0) return -1;
- if (tDecodeBinary(&decoder, (uint8_t **)&pRsp->subplanInfo[i].verboseInfo, &pRsp->subplanInfo[i].verboseLen) < 0)
- return -1;
+ if (tDecodeBinaryAlloc(&decoder, &pRsp->subplanInfo[i].verboseInfo, NULL) < 0) return -1;
}
tEndDecode(&decoder);
@@ -4352,6 +4384,19 @@ int32_t tDeserializeSExplainRsp(void *buf, int32_t bufLen, SExplainRsp *pRsp) {
return 0;
}
+void tFreeSExplainRsp(SExplainRsp *pRsp) {
+ if (NULL == pRsp) {
+ return;
+ }
+
+ for (int32_t i = 0; i < pRsp->numOfPlans; ++i) {
+ SExplainExecInfo *pExec = pRsp->subplanInfo + i;
+ taosMemoryFree(pExec->verboseInfo);
+ }
+
+ taosMemoryFreeClear(pRsp->subplanInfo);
+}
+
int32_t tSerializeSSchedulerHbReq(void *buf, int32_t bufLen, SSchedulerHbReq *pReq) {
int32_t headLen = sizeof(SMsgHead);
if (buf != NULL) {
@@ -4698,9 +4743,8 @@ int32_t tSerializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
if (tEncodeU64(&encoder, pReq->queryId) < 0) return -1;
if (tEncodeU64(&encoder, pReq->taskId) < 0) return -1;
if (tEncodeU32(&encoder, pReq->sqlLen) < 0) return -1;
- if (tEncodeU32(&encoder, pReq->phyLen) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
- if (tEncodeCStr(&encoder, pReq->msg) < 0) return -1;
+ if (tEncodeBinary(&encoder, pReq->msg, pReq->phyLen) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -4730,13 +4774,12 @@ int32_t tDeserializeSVDeleteReq(void *buf, int32_t bufLen, SVDeleteReq *pReq) {
if (tDecodeU64(&decoder, &pReq->queryId) < 0) return -1;
if (tDecodeU64(&decoder, &pReq->taskId) < 0) return -1;
if (tDecodeU32(&decoder, &pReq->sqlLen) < 0) return -1;
- if (tDecodeU32(&decoder, &pReq->phyLen) < 0) return -1;
pReq->sql = taosMemoryCalloc(1, pReq->sqlLen + 1);
if (NULL == pReq->sql) return -1;
- pReq->msg = taosMemoryCalloc(1, pReq->phyLen + 1);
- if (NULL == pReq->msg) return -1;
if (tDecodeCStrTo(&decoder, pReq->sql) < 0) return -1;
- if (tDecodeCStrTo(&decoder, pReq->msg) < 0) return -1;
+ uint64_t msgLen = 0;
+ if (tDecodeBinaryAlloc(&decoder, (void **)&pReq->msg, &msgLen) < 0) return -1;
+ pReq->phyLen = msgLen;
tEndDecode(&decoder);
@@ -4784,6 +4827,14 @@ int32_t tSerializeSCMCreateStreamReq(void *buf, int32_t bufLen, const SCMCreateS
if (tEncodeI8(&encoder, pReq->igExpired) < 0) return -1;
if (sqlLen > 0 && tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
if (astLen > 0 && tEncodeCStr(&encoder, pReq->ast) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->numOfTags) < 0) return -1;
+ for (int32_t i = 0; i < pReq->numOfTags; ++i) {
+ SField *pField = taosArrayGet(pReq->pTags, i);
+ if (tEncodeI8(&encoder, pField->type) < 0) return -1;
+ if (tEncodeI8(&encoder, pField->flags) < 0) return -1;
+ if (tEncodeI32(&encoder, pField->bytes) < 0) return -1;
+ if (tEncodeCStr(&encoder, pField->name) < 0) return -1;
+ }
tEndEncode(&encoder);
@@ -4822,6 +4873,28 @@ int32_t tDeserializeSCMCreateStreamReq(void *buf, int32_t bufLen, SCMCreateStrea
if (pReq->ast == NULL) return -1;
if (tDecodeCStrTo(&decoder, pReq->ast) < 0) return -1;
}
+
+ if (tDecodeI32(&decoder, &pReq->numOfTags) < 0) return -1;
+ if (pReq->numOfTags > 0) {
+ pReq->pTags = taosArrayInit(pReq->numOfTags, sizeof(SField));
+ if (pReq->pTags == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+
+ for (int32_t i = 0; i < pReq->numOfTags; ++i) {
+ SField field = {0};
+ if (tDecodeI8(&decoder, &field.type) < 0) return -1;
+ if (tDecodeI8(&decoder, &field.flags) < 0) return -1;
+ if (tDecodeI32(&decoder, &field.bytes) < 0) return -1;
+ if (tDecodeCStrTo(&decoder, field.name) < 0) return -1;
+ if (taosArrayPush(pReq->pTags, &field) == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return -1;
+ }
+ }
+ }
+
tEndDecode(&decoder);
tDecoderClear(&decoder);
@@ -5119,17 +5192,17 @@ int tDecodeSVCreateTbRsp(SDecoder *pCoder, SVCreateTbRsp *pRsp) {
} else {
pRsp->pMeta = NULL;
}
-
+
tEndDecode(pCoder);
return 0;
}
-void tFreeSVCreateTbRsp(void* param) {
+void tFreeSVCreateTbRsp(void *param) {
if (NULL == param) {
return;
}
-
- SVCreateTbRsp* pRsp = (SVCreateTbRsp*)param;
+
+ SVCreateTbRsp *pRsp = (SVCreateTbRsp *)param;
if (pRsp->pMeta) {
taosMemoryFree(pRsp->pMeta->pSchemas);
taosMemoryFree(pRsp->pMeta);
@@ -5141,6 +5214,7 @@ static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) {
if (tStartEncode(pCoder) < 0) return -1;
if (tEncodeCStr(pCoder, pReq->name) < 0) return -1;
+ if (tEncodeU64(pCoder, pReq->suid) < 0) return -1;
if (tEncodeI8(pCoder, pReq->igNotExists) < 0) return -1;
tEndEncode(pCoder);
@@ -5151,6 +5225,7 @@ static int32_t tDecodeSVDropTbReq(SDecoder *pCoder, SVDropTbReq *pReq) {
if (tStartDecode(pCoder) < 0) return -1;
if (tDecodeCStr(pCoder, &pReq->name) < 0) return -1;
+ if (tDecodeU64(pCoder, &pReq->suid) < 0) return -1;
if (tDecodeI8(pCoder, &pReq->igNotExists) < 0) return -1;
tEndDecode(pCoder);
@@ -5345,7 +5420,7 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tDecodeI32v(pDecoder, &pBlock->numOfRows) < 0) return -1;
if (tDecodeI32v(pDecoder, &pBlock->affectedRows) < 0) return -1;
if (tDecodeI64v(pDecoder, &pBlock->sver) < 0) return -1;
-
+
int32_t meta = 0;
if (tDecodeI32(pDecoder, &meta) < 0) return -1;
if (meta) {
@@ -5393,12 +5468,12 @@ int32_t tDecodeSSubmitRsp(SDecoder *pDecoder, SSubmitRsp *pRsp) {
return 0;
}
-void tFreeSSubmitBlkRsp(void* param) {
+void tFreeSSubmitBlkRsp(void *param) {
if (NULL == param) {
return;
}
-
- SSubmitBlkRsp* pRsp = (SSubmitBlkRsp*)param;
+
+ SSubmitBlkRsp *pRsp = (SSubmitBlkRsp *)param;
taosMemoryFree(pRsp->tblFName);
if (pRsp->pMeta) {
@@ -5407,7 +5482,6 @@ void tFreeSSubmitBlkRsp(void* param) {
}
}
-
void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
if (NULL == pRsp) return;
@@ -5415,6 +5489,8 @@ void tFreeSSubmitRsp(SSubmitRsp *pRsp) {
for (int32_t i = 0; i < pRsp->nBlocks; ++i) {
SSubmitBlkRsp *sRsp = pRsp->pBlocks + i;
taosMemoryFree(sRsp->tblFName);
+ tFreeSTableMetaRsp(sRsp->pMeta);
+ taosMemoryFree(sRsp->pMeta);
}
taosMemoryFree(pRsp->pBlocks);
@@ -5619,7 +5695,6 @@ void tFreeSMAlterStbRsp(SMAlterStbRsp *pRsp) {
}
}
-
int32_t tEncodeSMCreateStbRsp(SEncoder *pEncoder, const SMCreateStbRsp *pRsp) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI32(pEncoder, pRsp->pMeta->pSchemas ? 1 : 0) < 0) return -1;
@@ -5671,11 +5746,9 @@ void tFreeSMCreateStbRsp(SMCreateStbRsp *pRsp) {
}
}
-
-
int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal) {
if (tEncodeI8(pEncoder, pOffsetVal->type) < 0) return -1;
- if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tEncodeI64(pEncoder, pOffsetVal->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@@ -5690,7 +5763,7 @@ int32_t tEncodeSTqOffsetVal(SEncoder *pEncoder, const STqOffsetVal *pOffsetVal)
int32_t tDecodeSTqOffsetVal(SDecoder *pDecoder, STqOffsetVal *pOffsetVal) {
if (tDecodeI8(pDecoder, &pOffsetVal->type) < 0) return -1;
- if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pOffsetVal->type == TMQ_OFFSET__SNAPSHOT_META) {
if (tDecodeI64(pDecoder, &pOffsetVal->uid) < 0) return -1;
if (tDecodeI64(pDecoder, &pOffsetVal->ts) < 0) return -1;
} else if (pOffsetVal->type == TMQ_OFFSET__LOG) {
@@ -5712,10 +5785,8 @@ int32_t tFormatOffset(char *buf, int32_t maxLen, const STqOffsetVal *pVal) {
snprintf(buf, maxLen, "offset(reset to latest)");
} else if (pVal->type == TMQ_OFFSET__LOG) {
snprintf(buf, maxLen, "offset(log) ver:%" PRId64, pVal->version);
- } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_DATA || pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
snprintf(buf, maxLen, "offset(ss data) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
- } else if (pVal->type == TMQ_OFFSET__SNAPSHOT_META) {
- snprintf(buf, maxLen, "offset(ss meta) uid:%" PRId64 ", ts:%" PRId64, pVal->uid, pVal->ts);
} else {
ASSERT(0);
}
@@ -5729,9 +5800,7 @@ bool tOffsetEqual(const STqOffsetVal *pLeft, const STqOffsetVal *pRight) {
} else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_DATA) {
return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
} else if (pLeft->type == TMQ_OFFSET__SNAPSHOT_META) {
- ASSERT(0);
- // TODO
- return pLeft->uid == pRight->uid && pLeft->ts == pRight->ts;
+ return pLeft->uid == pRight->uid;
} else {
ASSERT(0);
/*ASSERT(pLeft->type == TMQ_OFFSET__RESET_NONE || pLeft->type == TMQ_OFFSET__RESET_EARLIEAST ||*/
@@ -5816,6 +5885,21 @@ int32_t tDecodeDeleteRes(SDecoder *pCoder, SDeleteRes *pRes) {
if (tDecodeCStrTo(pCoder, pRes->tsColName) < 0) return -1;
return 0;
}
+
+int32_t tEncodeSMqMetaRsp(SEncoder *pEncoder, const SMqMetaRsp *pRsp) {
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
+ if (tEncodeI16(pEncoder, pRsp->resMsgType)) return -1;
+ if (tEncodeBinary(pEncoder, pRsp->metaRsp, pRsp->metaRspLen)) return -1;
+ return 0;
+}
+
+int32_t tDecodeSMqMetaRsp(SDecoder *pDecoder, SMqMetaRsp *pRsp) {
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
+ if (tDecodeI16(pDecoder, &pRsp->resMsgType) < 0) return -1;
+ if (tDecodeBinaryAlloc(pDecoder, &pRsp->metaRsp, (uint64_t *)&pRsp->metaRspLen) < 0) return -1;
+ return 0;
+}
+
int32_t tEncodeSMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) {
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
@@ -5882,6 +5966,116 @@ int32_t tDecodeSMqDataRsp(SDecoder *pDecoder, SMqDataRsp *pRsp) {
return 0;
}
+void tDeleteSMqDataRsp(SMqDataRsp *pRsp) {
+ taosArrayDestroy(pRsp->blockDataLen);
+ taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
+}
+
+int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const STaosxRsp *pRsp) {
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->reqOffset) < 0) return -1;
+ if (tEncodeSTqOffsetVal(pEncoder, &pRsp->rspOffset) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ if (tEncodeI8(pEncoder, pRsp->withTbName) < 0) return -1;
+ if (tEncodeI8(pEncoder, pRsp->withSchema) < 0) return -1;
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ int32_t bLen = *(int32_t *)taosArrayGet(pRsp->blockDataLen, i);
+ void *data = taosArrayGetP(pRsp->blockData, i);
+ if (tEncodeBinary(pEncoder, (const uint8_t *)data, bLen) < 0) return -1;
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosArrayGetP(pRsp->blockSchema, i);
+ if (tEncodeSSchemaWrapper(pEncoder, pSW) < 0) return -1;
+ }
+ if (pRsp->withTbName) {
+ char *tbName = (char *)taosArrayGetP(pRsp->blockTbName, i);
+ if (tEncodeCStr(pEncoder, tbName) < 0) return -1;
+ }
+ }
+ }
+ if (tEncodeI32(pEncoder, pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *createTableReq = taosArrayGetP(pRsp->createTableReq, i);
+ int32_t createTableLen = *(int32_t *)taosArrayGet(pRsp->createTableLen, i);
+ if (tEncodeBinary(pEncoder, createTableReq, createTableLen) < 0) return -1;
+ }
+ }
+ return 0;
+}
+
+int32_t tDecodeSTaosxRsp(SDecoder *pDecoder, STaosxRsp *pRsp) {
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->reqOffset) < 0) return -1;
+ if (tDecodeSTqOffsetVal(pDecoder, &pRsp->rspOffset) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->blockNum) < 0) return -1;
+ if (pRsp->blockNum != 0) {
+ pRsp->blockData = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ pRsp->blockDataLen = taosArrayInit(pRsp->blockNum, sizeof(int32_t));
+ if (tDecodeI8(pDecoder, &pRsp->withTbName) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pRsp->withSchema) < 0) return -1;
+ if (pRsp->withTbName) {
+ pRsp->blockTbName = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+ if (pRsp->withSchema) {
+ pRsp->blockSchema = taosArrayInit(pRsp->blockNum, sizeof(void *));
+ }
+
+ for (int32_t i = 0; i < pRsp->blockNum; i++) {
+ void *data;
+ uint64_t bLen;
+ if (tDecodeBinaryAlloc(pDecoder, &data, &bLen) < 0) return -1;
+ taosArrayPush(pRsp->blockData, &data);
+ int32_t len = bLen;
+ taosArrayPush(pRsp->blockDataLen, &len);
+
+ if (pRsp->withSchema) {
+ SSchemaWrapper *pSW = (SSchemaWrapper *)taosMemoryCalloc(1, sizeof(SSchemaWrapper));
+ if (pSW == NULL) return -1;
+ if (tDecodeSSchemaWrapper(pDecoder, pSW) < 0) return -1;
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ }
+
+ if (pRsp->withTbName) {
+ char *tbName;
+ if (tDecodeCStrAlloc(pDecoder, &tbName) < 0) return -1;
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ }
+ }
+ }
+ if (tDecodeI32(pDecoder, &pRsp->createTableNum) < 0) return -1;
+ if (pRsp->createTableNum) {
+ pRsp->createTableLen = taosArrayInit(pRsp->createTableNum, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(pRsp->createTableNum, sizeof(void *));
+ for (int32_t i = 0; i < pRsp->createTableNum; i++) {
+ void *pCreate = NULL;
+ uint64_t len;
+ if (tDecodeBinaryAlloc(pDecoder, &pCreate, &len) < 0) return -1;
+ int32_t l = (int32_t)len;
+ taosArrayPush(pRsp->createTableLen, &l);
+ taosArrayPush(pRsp->createTableReq, &pCreate);
+ }
+ }
+ return 0;
+}
+
+void tDeleteSTaosxRsp(STaosxRsp *pRsp) {
+ taosArrayDestroy(pRsp->blockDataLen);
+ pRsp->blockDataLen = NULL;
+ taosArrayDestroyP(pRsp->blockData, (FDelete)taosMemoryFree);
+ pRsp->blockData = NULL;
+ taosArrayDestroyP(pRsp->blockSchema, (FDelete)tDeleteSSchemaWrapper);
+ pRsp->blockSchema = NULL;
+ taosArrayDestroyP(pRsp->blockTbName, (FDelete)taosMemoryFree);
+ pRsp->blockTbName = NULL;
+
+ taosArrayDestroy(pRsp->createTableLen);
+ pRsp->createTableLen = NULL;
+ taosArrayDestroyP(pRsp->createTableReq, (FDelete)taosMemoryFree);
+ pRsp->createTableReq = NULL;
+}
+
int32_t tEncodeSSingleDeleteReq(SEncoder *pEncoder, const SSingleDeleteReq *pReq) {
if (tEncodeI64(pEncoder, pReq->uid) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->ts) < 0) return -1;
diff --git a/source/common/src/trow.c b/source/common/src/trow.c
index 565498a47bb28bc5e76bb5a33911c86f8f42da5d..f23a2b386e280a185c1af7f4976070ed04d4f3f3 100644
--- a/source/common/src/trow.c
+++ b/source/common/src/trow.c
@@ -538,12 +538,12 @@ bool tdSTSRowIterGetTpVal(STSRowIter *pIter, col_type_t colType, int32_t offset,
} else {
pVal->val = POINTER_SHIFT(TD_ROW_DATA(pRow), offset);
}
- return TSDB_CODE_SUCCESS;
+ return true;
}
if (tdGetBitmapValType(pIter->pBitmap, pIter->colIdx - 1, &pVal->valType, 0) != TSDB_CODE_SUCCESS) {
pVal->valType = TD_VTYPE_NONE;
- return terrno;
+ return true;
}
if (pVal->valType == TD_VTYPE_NORM) {
@@ -620,7 +620,7 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
ASSERT(pTColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID);
} else {
if (IS_VAR_DATA_TYPE(pTColumn->type)) {
- if (pColVal && !pColVal->isNone && !pColVal->isNull) {
+ if (pColVal && COL_VAL_IS_VALUE(pColVal)) {
varDataLen += (pColVal->value.nData + sizeof(VarDataLenT));
if (maxVarDataLen < (pColVal->value.nData + sizeof(VarDataLenT))) {
maxVarDataLen = pColVal->value.nData + sizeof(VarDataLenT);
@@ -680,9 +680,9 @@ int32_t tdSTSRowNew(SArray *pArray, STSchema *pTSchema, STSRow **ppRow) {
const void *val = NULL;
if (iColVal < nColVal) {
pColVal = (SColVal *)taosArrayGet(pArray, iColVal);
- if (pColVal->isNone) {
+ if (COL_VAL_IS_NONE(pColVal)) {
valType = TD_VTYPE_NONE;
- } else if (pColVal->isNull) {
+ } else if (COL_VAL_IS_NULL(pColVal)) {
valType = TD_VTYPE_NULL;
} else if (IS_VAR_DATA_TYPE(pTColumn->type)) {
varDataSetLen(varBuf, pColVal->value.nData);
diff --git a/source/common/src/tvariant.c b/source/common/src/tvariant.c
index 0810be149716e58fdac74b67db6946fde7db62e9..a01c393441c0a4b6945226ba2c77ffe1a23ced57 100644
--- a/source/common/src/tvariant.c
+++ b/source/common/src/tvariant.c
@@ -155,8 +155,8 @@ void taosVariantCreateFromBinary(SVariant *pVar, const char *pz, size_t len, uin
void taosVariantDestroy(SVariant *pVar) {
if (pVar == NULL) return;
- if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR
- || pVar->nType == TSDB_DATA_TYPE_JSON) {
+ if (pVar->nType == TSDB_DATA_TYPE_BINARY || pVar->nType == TSDB_DATA_TYPE_NCHAR ||
+ pVar->nType == TSDB_DATA_TYPE_JSON) {
taosMemoryFreeClear(pVar->pz);
pVar->nLen = 0;
}
@@ -185,8 +185,8 @@ void taosVariantAssign(SVariant *pDst, const SVariant *pSrc) {
if (pSrc == NULL || pDst == NULL) return;
pDst->nType = pSrc->nType;
- if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR
- || pSrc->nType == TSDB_DATA_TYPE_JSON) {
+ if (pSrc->nType == TSDB_DATA_TYPE_BINARY || pSrc->nType == TSDB_DATA_TYPE_NCHAR ||
+ pSrc->nType == TSDB_DATA_TYPE_JSON) {
int32_t len = pSrc->nLen + TSDB_NCHAR_SIZE;
char *p = taosMemoryRealloc(pDst->pz, len);
assert(p);
diff --git a/source/common/test/dataformatTest.cpp b/source/common/test/dataformatTest.cpp
index 65f21bee402409eab971036db59e61cca8aa746c..6d77180610af6f2d8d13b6d977c4493af026564e 100644
--- a/source/common/test/dataformatTest.cpp
+++ b/source/common/test/dataformatTest.cpp
@@ -133,11 +133,11 @@ static int32_t genTestData(const char **data, int16_t nCols, SArray **pArray) {
SColVal colVal = {0};
colVal.cid = PRIMARYKEY_TIMESTAMP_COL_ID + i;
if (strncasecmp(data[i], NONE_CSTR, NONE_LEN) == 0) {
- colVal.isNone = 1;
+ colVal.flag = CV_FLAG_NONE;
taosArrayPush(*pArray, &colVal);
continue;
} else if (strncasecmp(data[i], NULL_CSTR, NULL_LEN) == 0) {
- colVal.isNull = 1;
+ colVal.flag = CV_FLAG_NULL;
taosArrayPush(*pArray, &colVal);
continue;
}
@@ -204,11 +204,11 @@ static int32_t genTestData(const char **data, int16_t nCols, SArray **pArray) {
}
int32_t debugPrintSColVal(SColVal *cv, int8_t type) {
- if (cv->isNone) {
+ if (COL_VAL_IS_NONE(cv)) {
printf("None ");
return 0;
}
- if (cv->isNull) {
+ if (COL_VAL_IS_NULL(cv)) {
printf("Null ");
return 0;
}
@@ -298,11 +298,11 @@ void debugPrintTSRow(STSRow2 *row, STSchema *pTSchema, const char *tags, int32_t
static int32_t checkSColVal(const char *rawVal, SColVal *cv, int8_t type) {
ASSERT(rawVal);
- if (cv->isNone) {
+ if (COL_VAL_IS_NONE(cv)) {
EXPECT_STRCASEEQ(rawVal, NONE_CSTR);
return 0;
}
- if (cv->isNull) {
+ if (COL_VAL_IS_NULL(cv)) {
EXPECT_STRCASEEQ(rawVal, NULL_CSTR);
return 0;
}
diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c
index 4030eaa6fe2fd32ec5718b6e4f7689f619747366..6406ae560f3619ca5b7019e309430e40246d4d54 100644
--- a/source/dnode/mgmt/exe/dmMain.c
+++ b/source/dnode/mgmt/exe/dmMain.c
@@ -16,10 +16,12 @@
#define _DEFAULT_SOURCE
#include "dmMgmt.h"
#include "tconfig.h"
+#include "mnode.h"
#define DM_APOLLO_URL "The apollo string to use when configuring the server, such as: -a 'jsonFile:./tests/cfg.json', cfg.json text can be '{\"fqdn\":\"td1\"}'."
#define DM_CFG_DIR "Configuration directory."
#define DM_DMP_CFG "Dump configuration."
+#define DM_SDB_INFO "Dump sdb info."
#define DM_ENV_CMD "The env cmd variable string to use when configuring the server, such as: -e 'TAOS_FQDN=td1'."
#define DM_ENV_FILE "The env variable file path to use when configuring the server, default is './.env', .env text can be 'TAOS_FQDN=td1'."
#define DM_NODE_TYPE "Startup type of the node, default is 0."
@@ -31,6 +33,7 @@ static struct {
bool winServiceMode;
#endif
bool dumpConfig;
+ bool dumpSdb;
bool generateGrant;
bool printAuth;
bool printVersion;
@@ -82,6 +85,8 @@ static int32_t dmParseArgs(int32_t argc, char const *argv[]) {
}
} else if (strcmp(argv[i], "-a") == 0) {
tstrncpy(global.apolloUrl, argv[++i], PATH_MAX);
+ } else if (strcmp(argv[i], "-s") == 0) {
+ global.dumpSdb = true;
} else if (strcmp(argv[i], "-E") == 0) {
tstrncpy(global.envFile, argv[++i], PATH_MAX);
} else if (strcmp(argv[i], "-n") == 0) {
@@ -131,6 +136,7 @@ static void dmPrintHelp() {
printf("Usage: taosd [OPTION...] \n\n");
printf("%s%s%s%s\n", indent, "-a,", indent, DM_APOLLO_URL);
printf("%s%s%s%s\n", indent, "-c,", indent, DM_CFG_DIR);
+ printf("%s%s%s%s\n", indent, "-s,", indent, DM_SDB_INFO);
printf("%s%s%s%s\n", indent, "-C,", indent, DM_DMP_CFG);
printf("%s%s%s%s\n", indent, "-e,", indent, DM_ENV_CMD);
printf("%s%s%s%s\n", indent, "-E,", indent, DM_ENV_FILE);
@@ -229,6 +235,14 @@ int mainWindows(int argc,char** argv) {
return 0;
}
+ if (global.dumpSdb) {
+ mndDumpSdb();
+ taosCleanupCfg();
+ taosCloseLog();
+ taosCleanupArgs();
+ return 0;
+ }
+
dmSetProcInfo(argc, (char **)argv);
taosCleanupArgs();
diff --git a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
index 1c7edbe6be23ce5867d395e7071f6886f918220c..66386b0ee0bc7fac510a0e9ceb1d1bab4ac649be 100644
--- a/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
+++ b/source/dnode/mgmt/mgmt_qnode/src/qmWorker.c
@@ -87,6 +87,7 @@ int32_t qmPutRpcMsgToQueue(SQnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
return 0;
default:
terrno = TSDB_CODE_INVALID_PARA;
+ taosFreeQitem(pMsg);
return -1;
}
}
diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
index ebbb9fa5d4f820cbdc46f92590dabc56161c80b2..30f54831987999daccac93f5b6cbd0ccc2dfbf28 100644
--- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
+++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
@@ -74,6 +74,7 @@ typedef struct {
TdThread thread;
SVnodeMgmt *pMgmt;
SWrapperCfg *pCfgs;
+ SVnodeObj **ppVnodes;
} SVnodeThread;
// vmInt.c
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c
index cbcb541200c899260a265872f625cb306987f7ae..82fc286a949d4a668f66419cf1f7f1f7e6ca2067 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c
@@ -135,12 +135,14 @@ _OVER:
if (content != NULL) taosMemoryFree(content);
if (root != NULL) cJSON_Delete(root);
if (pFile != NULL) taosCloseFile(&pFile);
+ if (*ppCfgs == NULL && pCfgs != NULL) taosMemoryFree(pCfgs);
terrno = code;
return code;
}
int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
+ int32_t ret = 0;
char file[PATH_MAX] = {0};
char realfile[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%svnodes.json.bak", pMgmt->path, TD_DIRSEP);
@@ -161,13 +163,16 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
char *content = taosMemoryCalloc(1, maxLen + 1);
if (content == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
+ ret = -1;
+ goto _OVER;
}
len += snprintf(content + len, maxLen - len, "{\n");
len += snprintf(content + len, maxLen - len, " \"vnodes\": [\n");
for (int32_t i = 0; i < numOfVnodes; ++i) {
SVnodeObj *pVnode = pVnodes[i];
+ if (pVnode == NULL) continue;
+
len += snprintf(content + len, maxLen - len, " {\n");
len += snprintf(content + len, maxLen - len, " \"vgId\": %d,\n", pVnode->vgId);
len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pVnode->dropped);
@@ -180,12 +185,13 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
}
len += snprintf(content + len, maxLen - len, " ]\n");
len += snprintf(content + len, maxLen - len, "}\n");
+ terrno = 0;
+_OVER:
taosWriteFile(pFile, content, len);
taosFsyncFile(pFile);
taosCloseFile(&pFile);
taosMemoryFree(content);
- terrno = 0;
for (int32_t i = 0; i < numOfVnodes; ++i) {
SVnodeObj *pVnode = pVnodes[i];
@@ -196,6 +202,8 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) {
taosMemoryFree(pVnodes);
}
+ if (ret != 0) return -1;
+
dDebug("successed to write %s, numOfVnodes:%d", realfile, numOfVnodes);
return taosRenameFile(file, realfile);
}
\ No newline at end of file
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index e610b41a04dc7792638a251fa379bcacb37e0050..4047bc2340de74500f9c675b6c7ee570f6b94b6a 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -167,11 +167,15 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->walCfg.segSize = pCreate->walSegmentSize;
pCfg->walCfg.level = pCreate->walLevel;
+ pCfg->sttTrigger = pCreate->sstTrigger;
pCfg->hashBegin = pCreate->hashBegin;
pCfg->hashEnd = pCreate->hashEnd;
pCfg->hashMethod = pCreate->hashMethod;
+ pCfg->hashPrefix = pCreate->hashPrefix;
+ pCfg->hashSuffix = pCreate->hashSuffix;
+ pCfg->tsdbPageSize = pCreate->tsdbPageSize * 1024;
- pCfg->standby = pCfg->standby;
+ pCfg->standby = 0;
pCfg->syncCfg.myIndex = pCreate->selfIndex;
pCfg->syncCfg.replicaNum = pCreate->replica;
memset(&pCfg->syncCfg.nodeInfo, 0, sizeof(pCfg->syncCfg.nodeInfo));
@@ -219,8 +223,13 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return -1;
}
- dDebug("vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d", createReq.vgId,
- createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize);
+ dInfo(
+ "vgId:%d, start to create vnode, tsma:%d standby:%d cacheLast:%d cacheLastSize:%d sstTrigger:%d "
+ "tsdbPageSize:%d",
+ createReq.vgId, createReq.isTsma, createReq.standby, createReq.cacheLast, createReq.cacheLastSize,
+ createReq.sstTrigger, createReq.tsdbPageSize);
+ dInfo("vgId:%d, hashMethod:%d begin:%u end:%u prefix:%d surfix:%d", createReq.vgId, createReq.hashMethod,
+ createReq.hashBegin, createReq.hashEnd, createReq.hashPrefix, createReq.hashSuffix);
vmGenerateVnodeCfg(&createReq, &vnodeCfg);
if (vmTsmaAdjustDays(&vnodeCfg, &createReq) < 0) {
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
index 1f981cc9e0fbc6b119542980e241e20bb06b1fde..19ed2cbc88eb8c283f95857d1f1857592b9e7828 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
@@ -58,11 +58,14 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
if (pVnode->path == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pVnode);
return -1;
}
if (vmAllocQueue(pMgmt, pVnode) != 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pVnode->path);
+ taosMemoryFree(pVnode);
return -1;
}
@@ -218,14 +221,15 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) {
dInfo("start to close all vnodes");
int32_t numOfVnodes = 0;
- SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
+ SVnodeObj **ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
for (int32_t i = 0; i < numOfVnodes; ++i) {
- vmCloseVnode(pMgmt, pVnodes[i]);
+ if (ppVnodes == NULL || ppVnodes[i] == NULL) continue;
+ vmCloseVnode(pMgmt, ppVnodes[i]);
}
- if (pVnodes != NULL) {
- taosMemoryFree(pVnodes);
+ if (ppVnodes != NULL) {
+ taosMemoryFree(ppVnodes);
}
if (pMgmt->hash != NULL) {
@@ -331,22 +335,94 @@ static int32_t vmRequire(const SMgmtInputOpt *pInput, bool *required) {
return 0;
}
-static int32_t vmStart(SVnodeMgmt *pMgmt) {
+static void *vmRestoreVnodeInThread(void *param) {
+ SVnodeThread *pThread = param;
+ SVnodeMgmt *pMgmt = pThread->pMgmt;
+
+ dInfo("thread:%d, start to restore %d vnodes", pThread->threadIndex, pThread->vnodeNum);
+ setThreadName("restore-vnodes");
+
+ for (int32_t v = 0; v < pThread->vnodeNum; ++v) {
+ SVnodeObj *pVnode = pThread->ppVnodes[v];
+
+ char stepDesc[TSDB_STEP_DESC_LEN] = {0};
+ snprintf(stepDesc, TSDB_STEP_DESC_LEN, "vgId:%d, start to restore, %d of %d have been restored", pVnode->vgId,
+ pMgmt->state.openVnodes, pMgmt->state.totalVnodes);
+ tmsgReportStartup("vnode-restore", stepDesc);
+
+ int32_t code = vnodeStart(pVnode->pImpl);
+ if (code != 0) {
+ dError("vgId:%d, failed to restore vnode by thread:%d", pVnode->vgId, pThread->threadIndex);
+ pThread->failed++;
+ } else {
+ dDebug("vgId:%d, is restored by thread:%d", pVnode->vgId, pThread->threadIndex);
+ pThread->opened++;
+ atomic_add_fetch_32(&pMgmt->state.openVnodes, 1);
+ }
+ }
+
+ dInfo("thread:%d, numOfVnodes:%d, restored:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened,
+ pThread->failed);
+ return NULL;
+}
+
+static int32_t vmStartVnodes(SVnodeMgmt *pMgmt) {
int32_t numOfVnodes = 0;
- SVnodeObj **pVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
+ SVnodeObj **ppVnodes = vmGetVnodeListFromHash(pMgmt, &numOfVnodes);
- for (int32_t i = 0; i < numOfVnodes; ++i) {
- SVnodeObj *pVnode = pVnodes[i];
- vnodeStart(pVnode->pImpl);
+ int32_t threadNum = tsNumOfCores / 2;
+ if (threadNum < 1) threadNum = 1;
+ int32_t vnodesPerThread = numOfVnodes / threadNum + 1;
+
+ SVnodeThread *threads = taosMemoryCalloc(threadNum, sizeof(SVnodeThread));
+ for (int32_t t = 0; t < threadNum; ++t) {
+ threads[t].threadIndex = t;
+ threads[t].pMgmt = pMgmt;
+ threads[t].ppVnodes = taosMemoryCalloc(vnodesPerThread, sizeof(SVnode *));
}
+ for (int32_t v = 0; v < numOfVnodes; ++v) {
+ int32_t t = v % threadNum;
+ SVnodeThread *pThread = &threads[t];
+ if (pThread->ppVnodes != NULL) {
+ pThread->ppVnodes[pThread->vnodeNum++] = ppVnodes[v];
+ }
+ }
+
+ pMgmt->state.openVnodes = 0;
+ dInfo("restore %d vnodes with %d threads", numOfVnodes, threadNum);
+
+ for (int32_t t = 0; t < threadNum; ++t) {
+ SVnodeThread *pThread = &threads[t];
+ if (pThread->vnodeNum == 0) continue;
+
+ TdThreadAttr thAttr;
+ taosThreadAttrInit(&thAttr);
+ taosThreadAttrSetDetachState(&thAttr, PTHREAD_CREATE_JOINABLE);
+ if (taosThreadCreate(&pThread->thread, &thAttr, vmRestoreVnodeInThread, pThread) != 0) {
+ dError("thread:%d, failed to create thread to restore vnode since %s", pThread->threadIndex, strerror(errno));
+ }
+
+ taosThreadAttrDestroy(&thAttr);
+ }
+
+ for (int32_t t = 0; t < threadNum; ++t) {
+ SVnodeThread *pThread = &threads[t];
+ if (pThread->vnodeNum > 0 && taosCheckPthreadValid(pThread->thread)) {
+ taosThreadJoin(pThread->thread, NULL);
+ taosThreadClear(&pThread->thread);
+ }
+ taosMemoryFree(pThread->ppVnodes);
+ }
+ taosMemoryFree(threads);
+
for (int32_t i = 0; i < numOfVnodes; ++i) {
- SVnodeObj *pVnode = pVnodes[i];
- vmReleaseVnode(pMgmt, pVnode);
+ if (ppVnodes == NULL || ppVnodes[i] == NULL) continue;
+ vmReleaseVnode(pMgmt, ppVnodes[i]);
}
- if (pVnodes != NULL) {
- taosMemoryFree(pVnodes);
+ if (ppVnodes != NULL) {
+ taosMemoryFree(ppVnodes);
}
return 0;
@@ -360,7 +436,7 @@ SMgmtFunc vmGetMgmtFunc() {
SMgmtFunc mgmtFunc = {0};
mgmtFunc.openFp = vmInit;
mgmtFunc.closeFp = (NodeCloseFp)vmCleanup;
- mgmtFunc.startFp = (NodeStartFp)vmStart;
+ mgmtFunc.startFp = (NodeStartFp)vmStartVnodes;
mgmtFunc.stopFp = (NodeStopFp)vmStop;
mgmtFunc.requiredFp = vmRequire;
mgmtFunc.getHandlesFp = vmGetMsgHandles;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
index 0a42f06081fbc75a114badedf0886786f934985d..d7df30bc7587cada18a308176e2fc739c1679afa 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
@@ -244,7 +244,7 @@ int32_t vmPutMsgToMonitorQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
int32_t vmPutRpcMsgToQueue(SVnodeMgmt *pMgmt, EQueueType qtype, SRpcMsg *pRpc) {
SRpcMsg *pMsg = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
if (pMsg == NULL) {
- rpcFreeCont(pMsg->pCont);
+ rpcFreeCont(pRpc->pCont);
pRpc->pCont = NULL;
return -1;
}
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index a059db6b00b7896289346eae0016dedfe95db400..f57943b9ddca9f6e33421e2a675f3e5cdc10cdbd 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -270,7 +270,7 @@ int32_t dmInitClient(SDnode *pDnode) {
SRpcInit rpcInit = {0};
rpcInit.label = "DND-C";
- rpcInit.numOfThreads = 1;
+ rpcInit.numOfThreads = 4;
rpcInit.cfp = (RpcCfp)dmProcessRpcMsg;
rpcInit.sessions = 1024;
rpcInit.connType = TAOS_CONN_CLIENT;
@@ -301,7 +301,7 @@ int32_t dmInitServer(SDnode *pDnode) {
SDnodeTrans *pTrans = &pDnode->trans;
SRpcInit rpcInit = {0};
- strncpy(rpcInit.localFqdn, tsLocalFqdn, strlen(tsLocalFqdn));
+ strncpy(rpcInit.localFqdn, tsLocalFqdn, TSDB_FQDN_LEN);
rpcInit.localPort = tsServerPort;
rpcInit.label = "DND-S";
rpcInit.numOfThreads = tsNumOfRpcThreads;
diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h
index ea05215fe90d30708013fe4b1c8fc08d2be8d3d6..c3d03a6c5eb086997fd4920b8c6586e3c20c2649 100644
--- a/source/dnode/mnode/impl/inc/mndDef.h
+++ b/source/dnode/mnode/impl/inc/mndDef.h
@@ -165,12 +165,13 @@ typedef struct {
SEpSet lastEpset;
tmsg_t lastMsgType;
tmsg_t originRpcType;
- char dbname1[TSDB_TABLE_FNAME_LEN];
- char dbname2[TSDB_TABLE_FNAME_LEN];
+ char dbname[TSDB_TABLE_FNAME_LEN];
+ char stbname[TSDB_TABLE_FNAME_LEN];
int32_t startFunc;
int32_t stopFunc;
int32_t paramLen;
void* param;
+ char opername[TSDB_TRANS_OPER_LEN];
SArray* pRpcArray;
} STrans;
@@ -305,11 +306,15 @@ typedef struct {
int8_t hashMethod; // default is 1
int8_t cacheLast;
int8_t schemaless;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
+ int16_t sstTrigger;
+ int32_t tsdbPageSize;
int32_t numOfRetensions;
SArray* pRetensions;
int32_t walRetentionPeriod;
- int64_t walRetentionSize;
int32_t walRollPeriod;
+ int64_t walRetentionSize;
int64_t walSegmentSize;
} SDbCfg;
@@ -340,6 +345,7 @@ typedef struct {
uint32_t hashEnd;
char dbName[TSDB_DB_FNAME_LEN];
int64_t dbUid;
+ int64_t cacheUsage;
int64_t numOfTables;
int64_t numOfTimeSeries;
int64_t totalStorage;
@@ -535,7 +541,7 @@ typedef struct {
} SMqConsumerEp;
SMqConsumerEp* tCloneSMqConsumerEp(const SMqConsumerEp* pEp);
-void tDeleteSMqConsumerEp(SMqConsumerEp* pEp);
+void tDeleteSMqConsumerEp(void* pEp);
int32_t tEncodeSMqConsumerEp(void** buf, const SMqConsumerEp* pEp);
void* tDecodeSMqConsumerEp(const void* buf, SMqConsumerEp* pEp);
diff --git a/source/dnode/mnode/impl/inc/mndStb.h b/source/dnode/mnode/impl/inc/mndStb.h
index 8f0d55e10061ce4517c4305ae7450a7439b91cfd..ba9acdbd8fb7bc7762e26f6580f7eecee49a961c 100644
--- a/source/dnode/mnode/impl/inc/mndStb.h
+++ b/source/dnode/mnode/impl/inc/mndStb.h
@@ -40,6 +40,8 @@ int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, vo
void mndExtractDbNameFromStbFullName(const char *stbFullName, char *dst);
void mndExtractTbNameFromStbFullName(const char *stbFullName, char *dst, int32_t dstSize);
+const char *mndGetStbStr(const char *src);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/mnode/impl/inc/mndTrans.h b/source/dnode/mnode/impl/inc/mndTrans.h
index faf656a25160efd3ce6b221a0efa396484392230..2372fa30e5bfc1e27b32ff10672929a328029865 100644
--- a/source/dnode/mnode/impl/inc/mndTrans.h
+++ b/source/dnode/mnode/impl/inc/mndTrans.h
@@ -61,7 +61,8 @@ void mndCleanupTrans(SMnode *pMnode);
STrans *mndAcquireTrans(SMnode *pMnode, int32_t transId);
void mndReleaseTrans(SMnode *pMnode, STrans *pTrans);
-STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq);
+STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq,
+ const char *opername);
void mndTransDrop(STrans *pTrans);
int32_t mndTransAppendRedolog(STrans *pTrans, SSdbRaw *pRaw);
int32_t mndTransAppendUndolog(STrans *pTrans, SSdbRaw *pRaw);
@@ -71,7 +72,7 @@ int32_t mndTransAppendRedoAction(STrans *pTrans, STransAction *pAction);
int32_t mndTransAppendUndoAction(STrans *pTrans, STransAction *pAction);
void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen);
-void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2);
+void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbname);
void mndTransSetSerial(STrans *pTrans);
void mndTransSetOper(STrans *pTrans, EOperType oper);
diff --git a/source/dnode/mnode/impl/src/mndAcct.c b/source/dnode/mnode/impl/src/mndAcct.c
index 33f0bb7a34d667b8f25fb10b06e6e6a00e669d60..5279a86d83c3070242c8e8280f875005a399a421 100644
--- a/source/dnode/mnode/impl/src/mndAcct.c
+++ b/source/dnode/mnode/impl/src/mndAcct.c
@@ -79,14 +79,14 @@ static int32_t mndCreateDefaultAcct(SMnode *pMnode) {
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("acct:%s, will be created when deploying, raw:%p", acctObj.acct, pRaw);
+ mInfo("acct:%s, will be created when deploying, raw:%p", acctObj.acct, pRaw);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL, "create-acct");
if (pTrans == NULL) {
mError("acct:%s, failed to create since %s", acctObj.acct, terrstr());
return -1;
}
- mDebug("trans:%d, used to create acct:%s", pTrans->id, acctObj.acct);
+ mInfo("trans:%d, used to create acct:%s", pTrans->id, acctObj.acct);
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
diff --git a/source/dnode/mnode/impl/src/mndBnode.c b/source/dnode/mnode/impl/src/mndBnode.c
index aafcd1999230e71a03c05422cfb538958f4d27c7..9cae83152a7ceba87d13d09ee10befb82c59d9fd 100644
--- a/source/dnode/mnode/impl/src/mndBnode.c
+++ b/source/dnode/mnode/impl/src/mndBnode.c
@@ -246,10 +246,10 @@ static int32_t mndCreateBnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
bnodeObj.createdTime = taosGetTimestampMs();
bnodeObj.updateTime = bnodeObj.createdTime;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "create-bnode");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to create bnode:%d", pTrans->id, pCreate->dnodeId);
+ mInfo("trans:%d, used to create bnode:%d", pTrans->id, pCreate->dnodeId);
if (mndSetCreateBnodeRedoLogs(pTrans, &bnodeObj) != 0) goto _OVER;
if (mndSetCreateBnodeUndoLogs(pTrans, &bnodeObj) != 0) goto _OVER;
if (mndSetCreateBnodeCommitLogs(pTrans, &bnodeObj) != 0) goto _OVER;
@@ -276,7 +276,7 @@ static int32_t mndProcessCreateBnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("bnode:%d, start to create", createReq.dnodeId);
+ mInfo("bnode:%d, start to create", createReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_BNODE) != 0) {
goto _OVER;
}
@@ -354,10 +354,10 @@ static int32_t mndSetDropBnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SBn
static int32_t mndDropBnode(SMnode *pMnode, SRpcMsg *pReq, SBnodeObj *pObj) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-bnode");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to drop bnode:%d", pTrans->id, pObj->id);
+ mInfo("trans:%d, used to drop bnode:%d", pTrans->id, pObj->id);
if (mndSetDropBnodeRedoLogs(pTrans, pObj) != 0) goto _OVER;
if (mndSetDropBnodeCommitLogs(pTrans, pObj) != 0) goto _OVER;
if (mndSetDropBnodeRedoActions(pTrans, pObj->pDnode, pObj) != 0) goto _OVER;
@@ -381,7 +381,7 @@ static int32_t mndProcessDropBnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("bnode:%d, start to drop", dropReq.dnodeId);
+ mInfo("bnode:%d, start to drop", dropReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_BNODE) != 0) {
goto _OVER;
}
diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c
index 7d633f90bd937c24b82094bdc0fa6d30c30bc250..70c93748210605cb13493b9f8ab11c8dc2e9793f 100644
--- a/source/dnode/mnode/impl/src/mndCluster.c
+++ b/source/dnode/mnode/impl/src/mndCluster.c
@@ -233,14 +233,14 @@ static int32_t mndCreateDefaultCluster(SMnode *pMnode) {
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("cluster:%" PRId64 ", will be created when deploying, raw:%p", clusterObj.id, pRaw);
+ mInfo("cluster:%" PRId64 ", will be created when deploying, raw:%p", clusterObj.id, pRaw);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL, "create-cluster");
if (pTrans == NULL) {
mError("cluster:%" PRId64 ", failed to create since %s", clusterObj.id, terrstr());
return -1;
}
- mDebug("trans:%d, used to create cluster:%" PRId64, pTrans->id, clusterObj.id);
+ mInfo("trans:%d, used to create cluster:%" PRId64, pTrans->id, clusterObj.id);
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
@@ -315,8 +315,8 @@ static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) {
return 0;
}
- mTrace("update cluster uptime to %" PRId64, clusterObj.upTime);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ mInfo("update cluster uptime to %" PRId64, clusterObj.upTime);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "update-uptime");
if (pTrans == NULL) return -1;
SSdbRaw *pCommitRaw = mndClusterActionEncode(&clusterObj);
diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c
index 614348c2098e9ca447f8077c1bbc1860e6ebb73f..3dfc10e5544bf3c348349ede2e88896721c6174b 100644
--- a/source/dnode/mnode/impl/src/mndConsumer.c
+++ b/source/dnode/mnode/impl/src/mndConsumer.c
@@ -54,13 +54,15 @@ static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg);
static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg);
int32_t mndInitConsumer(SMnode *pMnode) {
- SSdbTable table = {.sdbType = SDB_CONSUMER,
- .keyType = SDB_KEY_INT64,
- .encodeFp = (SdbEncodeFp)mndConsumerActionEncode,
- .decodeFp = (SdbDecodeFp)mndConsumerActionDecode,
- .insertFp = (SdbInsertFp)mndConsumerActionInsert,
- .updateFp = (SdbUpdateFp)mndConsumerActionUpdate,
- .deleteFp = (SdbDeleteFp)mndConsumerActionDelete};
+ SSdbTable table = {
+ .sdbType = SDB_CONSUMER,
+ .keyType = SDB_KEY_INT64,
+ .encodeFp = (SdbEncodeFp)mndConsumerActionEncode,
+ .decodeFp = (SdbDecodeFp)mndConsumerActionDecode,
+ .insertFp = (SdbInsertFp)mndConsumerActionInsert,
+ .updateFp = (SdbUpdateFp)mndConsumerActionUpdate,
+ .deleteFp = (SdbDeleteFp)mndConsumerActionDelete,
+ };
mndSetMsgHandle(pMnode, TDMT_MND_SUBSCRIBE, mndProcessSubscribeReq);
mndSetMsgHandle(pMnode, TDMT_MND_MQ_HB, mndProcessMqHbReq);
@@ -109,7 +111,7 @@ static int32_t mndProcessConsumerLostMsg(SRpcMsg *pMsg) {
mndReleaseConsumer(pMnode, pConsumer);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "lost-csm");
if (pTrans == NULL) goto FAIL;
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL;
if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL;
@@ -142,7 +144,7 @@ static int32_t mndProcessConsumerRecoverMsg(SRpcMsg *pMsg) {
mndReleaseConsumer(pMnode, pConsumer);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "recover-csm");
if (pTrans == NULL) goto FAIL;
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) goto FAIL;
if (mndTransPrepare(pMnode, pTrans) != 0) goto FAIL;
@@ -176,6 +178,8 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
SMqConsumerObj *pConsumer;
void *pIter = NULL;
+ mTrace("start to process mq timer");
+
// rebalance cannot be parallel
if (!mndRebTryStart()) {
mInfo("mq rebalance already in progress, do nothing");
@@ -197,11 +201,12 @@ static int32_t mndProcessMqTimerMsg(SRpcMsg *pMsg) {
SMqConsumerLostMsg *pLostMsg = rpcMallocCont(sizeof(SMqConsumerLostMsg));
pLostMsg->consumerId = pConsumer->consumerId;
- SRpcMsg *pRpcMsg = taosMemoryCalloc(1, sizeof(SRpcMsg));
- pRpcMsg->msgType = TDMT_MND_MQ_CONSUMER_LOST;
- pRpcMsg->pCont = pLostMsg;
- pRpcMsg->contLen = sizeof(SMqConsumerLostMsg);
- tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
+ SRpcMsg pRpcMsg = {
+ .msgType = TDMT_MND_MQ_CONSUMER_LOST,
+ .pCont = pLostMsg,
+ .contLen = sizeof(SMqConsumerLostMsg),
+ };
+ tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg);
}
if (status == MQ_CONSUMER_STATUS__LOST_REBD || status == MQ_CONSUMER_STATUS__READY) {
// do nothing
@@ -267,6 +272,7 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
SMqConsumerObj *pConsumer = mndAcquireConsumer(pMnode, consumerId);
if (pConsumer == NULL) {
+ mError("consumer %ld not exist", consumerId);
terrno = TSDB_CODE_MND_CONSUMER_NOT_EXIST;
return -1;
}
@@ -280,11 +286,12 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) {
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
pRecoverMsg->consumerId = consumerId;
- SRpcMsg *pRpcMsg = taosMemoryCalloc(1, sizeof(SRpcMsg));
- pRpcMsg->msgType = TDMT_MND_MQ_CONSUMER_RECOVER;
- pRpcMsg->pCont = pRecoverMsg;
- pRpcMsg->contLen = sizeof(SMqConsumerRecoverMsg);
- tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
+ SRpcMsg pRpcMsg = {
+ .msgType = TDMT_MND_MQ_CONSUMER_RECOVER,
+ .pCont = pRecoverMsg,
+ .contLen = sizeof(SMqConsumerRecoverMsg),
+ };
+ tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg);
}
mndReleaseConsumer(pMnode, pConsumer);
@@ -318,11 +325,12 @@ static int32_t mndProcessAskEpReq(SRpcMsg *pMsg) {
SMqConsumerRecoverMsg *pRecoverMsg = rpcMallocCont(sizeof(SMqConsumerRecoverMsg));
pRecoverMsg->consumerId = consumerId;
- SRpcMsg *pRpcMsg = taosMemoryCalloc(1, sizeof(SRpcMsg));
- pRpcMsg->msgType = TDMT_MND_MQ_CONSUMER_RECOVER;
- pRpcMsg->pCont = pRecoverMsg;
- pRpcMsg->contLen = sizeof(SMqConsumerRecoverMsg);
- tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, pRpcMsg);
+ SRpcMsg pRpcMsg = {
+ .msgType = TDMT_MND_MQ_CONSUMER_RECOVER,
+ .pCont = pRecoverMsg,
+ .contLen = sizeof(SMqConsumerRecoverMsg),
+ };
+ tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &pRpcMsg);
}
#endif
@@ -462,7 +470,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
int32_t newTopicNum = taosArrayGetSize(newSub);
// check topic existance
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pMsg, "subscribe");
if (pTrans == NULL) goto SUBSCRIBE_OVER;
for (int32_t i = 0; i < newTopicNum; i++) {
@@ -487,6 +495,7 @@ static int32_t mndProcessSubscribeReq(SRpcMsg *pMsg) {
pConsumerNew = tNewSMqConsumerObj(consumerId, cgroup);
tstrncpy(pConsumerNew->clientId, subscribe.clientId, 256);
pConsumerNew->updateType = CONSUMER_UPDATE__MODIFY;
+ taosArrayDestroy(pConsumerNew->rebNewTopics);
pConsumerNew->rebNewTopics = newSub;
subscribe.topicNames = NULL;
diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c
index 8c1c3ba8735f21684b5f9577b9fad20beec110a7..a05d8dd73912ee7996239997dd365e5f5550bbcf 100644
--- a/source/dnode/mnode/impl/src/mndDb.c
+++ b/source/dnode/mnode/impl/src/mndDb.c
@@ -15,6 +15,7 @@
#define _DEFAULT_SOURCE
#include "mndDb.h"
+#include "mndCluster.h"
#include "mndDnode.h"
#include "mndOffset.h"
#include "mndPrivilege.h"
@@ -30,7 +31,7 @@
#include "systable.h"
#define DB_VER_NUMBER 1
-#define DB_RESERVE_SIZE 64
+#define DB_RESERVE_SIZE 54
static SSdbRaw *mndDbActionEncode(SDbObj *pDb);
static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw);
@@ -124,6 +125,10 @@ static SSdbRaw *mndDbActionEncode(SDbObj *pDb) {
SDB_SET_INT64(pRaw, dataPos, pDb->cfg.walRetentionSize, _OVER)
SDB_SET_INT32(pRaw, dataPos, pDb->cfg.walRollPeriod, _OVER)
SDB_SET_INT64(pRaw, dataPos, pDb->cfg.walSegmentSize, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pDb->cfg.sstTrigger, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pDb->cfg.hashPrefix, _OVER)
+ SDB_SET_INT16(pRaw, dataPos, pDb->cfg.hashSuffix, _OVER)
+ SDB_SET_INT32(pRaw, dataPos, pDb->cfg.tsdbPageSize, _OVER)
SDB_SET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER)
SDB_SET_DATALEN(pRaw, dataPos, _OVER)
@@ -207,10 +212,23 @@ static SSdbRow *mndDbActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT64(pRaw, dataPos, &pDb->cfg.walRetentionSize, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.walRollPeriod, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pDb->cfg.walSegmentSize, _OVER)
+ SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.sstTrigger, _OVER)
+ SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.hashPrefix, _OVER)
+ SDB_GET_INT16(pRaw, dataPos, &pDb->cfg.hashSuffix, _OVER)
+ SDB_GET_INT32(pRaw, dataPos, &pDb->cfg.tsdbPageSize, _OVER)
SDB_GET_RESERVE(pRaw, dataPos, DB_RESERVE_SIZE, _OVER)
taosInitRWLatch(&pDb->lock);
+ if (pDb->cfg.tsdbPageSize <= TSDB_MIN_TSDB_PAGESIZE) {
+ mInfo("db:%s, tsdbPageSize set from %d to default %d", pDb->name, pDb->cfg.tsdbPageSize,
+ TSDB_DEFAULT_TSDB_PAGESIZE);
+ }
+
+ if (pDb->cfg.sstTrigger <= TSDB_MIN_STT_TRIGGER) {
+ mInfo("db:%s, sstTrigger set from %d to default %d", pDb->name, pDb->cfg.sstTrigger, TSDB_DEFAULT_SST_TRIGGER);
+ }
+
terrno = 0;
_OVER:
@@ -254,6 +272,8 @@ static int32_t mndDbActionUpdate(SSdb *pSdb, SDbObj *pOld, SDbObj *pNew) {
pOld->cfg.strict = pNew->cfg.strict;
pOld->cfg.cacheLast = pNew->cfg.cacheLast;
pOld->cfg.replications = pNew->cfg.replications;
+ pOld->cfg.sstTrigger = pNew->cfg.sstTrigger;
+ pOld->cfg.tsdbPageSize = pNew->cfg.tsdbPageSize;
taosWUnLockLatch(&pOld->lock);
return 0;
}
@@ -330,6 +350,10 @@ static int32_t mndCheckDbCfg(SMnode *pMnode, SDbCfg *pCfg) {
if (pCfg->walRetentionSize < TSDB_DB_MIN_WAL_RETENTION_SIZE) return -1;
if (pCfg->walRollPeriod < TSDB_DB_MIN_WAL_ROLL_PERIOD) return -1;
if (pCfg->walSegmentSize < TSDB_DB_MIN_WAL_SEGMENT_SIZE) return -1;
+ if (pCfg->sstTrigger < TSDB_MIN_STT_TRIGGER || pCfg->sstTrigger > TSDB_MAX_STT_TRIGGER) return -1;
+ if (pCfg->hashPrefix < TSDB_MIN_HASH_PREFIX || pCfg->hashPrefix > TSDB_MAX_HASH_PREFIX) return -1;
+ if (pCfg->hashSuffix < TSDB_MIN_HASH_SUFFIX || pCfg->hashSuffix > TSDB_MAX_HASH_SUFFIX) return -1;
+ if (pCfg->tsdbPageSize < TSDB_MIN_TSDB_PAGESIZE || pCfg->tsdbPageSize > TSDB_MAX_TSDB_PAGESIZE) return -1;
terrno = 0;
return terrno;
@@ -358,11 +382,15 @@ static void mndSetDefaultDbCfg(SDbCfg *pCfg) {
if (pCfg->numOfRetensions < 0) pCfg->numOfRetensions = 0;
if (pCfg->schemaless < 0) pCfg->schemaless = TSDB_DB_SCHEMALESS_OFF;
if (pCfg->walRetentionPeriod < 0 && pCfg->walRetentionPeriod != -1)
- pCfg->walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
+ pCfg->walRetentionPeriod = TSDB_REPS_DEF_DB_WAL_RET_PERIOD;
if (pCfg->walRetentionSize < 0 && pCfg->walRetentionSize != -1)
- pCfg->walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- if (pCfg->walRollPeriod < 0) pCfg->walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ pCfg->walRetentionSize = TSDB_REPS_DEF_DB_WAL_RET_SIZE;
+ if (pCfg->walRollPeriod < 0) pCfg->walRollPeriod = TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD;
if (pCfg->walSegmentSize < 0) pCfg->walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
+ if (pCfg->sstTrigger <= 0) pCfg->sstTrigger = TSDB_DEFAULT_SST_TRIGGER;
+ if (pCfg->hashPrefix < 0) pCfg->hashPrefix = TSDB_DEFAULT_HASH_PREFIX;
+ if (pCfg->hashSuffix < 0) pCfg->hashSuffix = TSDB_DEFAULT_HASH_SUFFIX;
+ if (pCfg->tsdbPageSize <= 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
}
static int32_t mndSetCreateDbRedoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
@@ -479,6 +507,10 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
.walRetentionSize = pCreate->walRetentionSize,
.walRollPeriod = pCreate->walRollPeriod,
.walSegmentSize = pCreate->walSegmentSize,
+ .sstTrigger = pCreate->sstTrigger,
+ .hashPrefix = pCreate->hashPrefix,
+ .hashSuffix = pCreate->hashSuffix,
+ .tsdbPageSize = pCreate->tsdbPageSize,
};
dbObj.cfg.numOfRetensions = pCreate->numOfRetensions;
@@ -496,6 +528,12 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
return -1;
}
+ if (dbObj.cfg.hashPrefix > 0) {
+ int32_t dbLen = strlen(dbObj.name) + 1;
+ mInfo("db:%s, hashPrefix adjust from %d to %d", dbObj.name, dbObj.cfg.hashPrefix, dbObj.cfg.hashPrefix + dbLen);
+ dbObj.cfg.hashPrefix += dbLen;
+ }
+
SVgObj *pVgroups = NULL;
if (mndAllocVgroup(pMnode, &dbObj, &pVgroups) != 0) {
mError("db:%s, failed to create since %s", pCreate->db, terrstr());
@@ -503,10 +541,10 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
}
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-db");
if (pTrans == NULL) goto _OVER;
// mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to create db:%s", pTrans->id, pCreate->db);
+ mInfo("trans:%d, used to create db:%s", pTrans->id, pCreate->db);
mndTransSetDbName(pTrans, dbObj.name, NULL);
mndTransSetOper(pTrans, MND_OPER_CREATE_DB);
@@ -542,7 +580,7 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups);
+ mInfo("db:%s, start to create, vgroups:%d", createReq.db, createReq.numOfVgroups);
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_DB, NULL) != 0) {
goto _OVER;
}
@@ -550,7 +588,7 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
pDb = mndAcquireDb(pMnode, createReq.db);
if (pDb != NULL) {
if (createReq.ignoreExist) {
- mDebug("db:%s, already exist, ignore exist is set", createReq.db);
+ mInfo("db:%s, already exist, ignore exist is set", createReq.db);
code = 0;
goto _OVER;
} else {
@@ -559,7 +597,7 @@ static int32_t mndProcessCreateDbReq(SRpcMsg *pReq) {
}
} else if (terrno == TSDB_CODE_SDB_OBJ_CREATING) {
if (mndSetRpcInfoForDbTrans(pMnode, pReq, MND_OPER_CREATE_DB, createReq.db) == 0) {
- mDebug("db:%s, is creating and response after trans finished", createReq.db);
+ mInfo("db:%s, is creating and response after trans finished", createReq.db);
code = TSDB_CODE_ACTION_IN_PROGRESS;
goto _OVER;
} else {
@@ -735,9 +773,9 @@ static int32_t mndSetAlterDbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
}
static int32_t mndAlterDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pOld, SDbObj *pNew) {
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "alter-db");
if (pTrans == NULL) return -1;
- mDebug("trans:%d, used to alter db:%s", pTrans->id, pOld->name);
+ mInfo("trans:%d, used to alter db:%s", pTrans->id, pOld->name);
int32_t code = -1;
mndTransSetDbName(pTrans, pOld->name, NULL);
@@ -764,7 +802,7 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("db:%s, start to alter", alterReq.db);
+ mInfo("db:%s, start to alter", alterReq.db);
pDb = mndAcquireDb(pMnode, alterReq.db);
if (pDb == NULL) {
@@ -989,10 +1027,10 @@ static int32_t mndBuildDropDbRsp(SDbObj *pDb, int32_t *pRspLen, void **ppRsp, bo
static int32_t mndDropDb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "drop-db");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to drop db:%s", pTrans->id, pDb->name);
+ mInfo("trans:%d, used to drop db:%s", pTrans->id, pDb->name);
mndTransSetDbName(pTrans, pDb->name, NULL);
if (mndCheckTopicExist(pMnode, pDb) < 0) goto _OVER;
@@ -1041,7 +1079,7 @@ static int32_t mndProcessDropDbReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("db:%s, start to drop", dropReq.db);
+ mInfo("db:%s, start to drop", dropReq.db);
pDb = mndAcquireDb(pMnode, dropReq.db);
if (pDb == NULL) {
@@ -1155,6 +1193,8 @@ int32_t mndExtractDbInfo(SMnode *pMnode, SDbObj *pDb, SUseDbRsp *pRsp, const SUs
pRsp->vgVersion = pDb->vgVersion;
pRsp->vgNum = taosArrayGetSize(pRsp->pVgroupInfos);
pRsp->hashMethod = pDb->cfg.hashMethod;
+ pRsp->hashPrefix = pDb->cfg.hashPrefix;
+ pRsp->hashSuffix = pDb->cfg.hashSuffix;
return 0;
}
@@ -1266,11 +1306,11 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs,
int32_t numOfTable = mndGetDBTableNum(pDb, pMnode);
if (pDbVgVersion->vgVersion >= pDb->vgVersion && numOfTable == pDbVgVersion->numOfTable) {
- mDebug("db:%s, version and numOfTable not changed", pDbVgVersion->dbFName);
+ mInfo("db:%s, version and numOfTable not changed", pDbVgVersion->dbFName);
mndReleaseDb(pMnode, pDb);
continue;
} else {
- mDebug("db:%s, vgroup version changed from %d to %d", pDbVgVersion->dbFName, pDbVgVersion->vgVersion,
+ mInfo("db:%s, vgroup version changed from %d to %d", pDbVgVersion->dbFName, pDbVgVersion->vgVersion,
pDb->vgVersion);
}
@@ -1287,6 +1327,8 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs,
usedbRsp.vgVersion = pDb->vgVersion;
usedbRsp.vgNum = (int32_t)taosArrayGetSize(usedbRsp.pVgroupInfos);
usedbRsp.hashMethod = pDb->cfg.hashMethod;
+ usedbRsp.hashPrefix = pDb->cfg.hashPrefix;
+ usedbRsp.hashSuffix = pDb->cfg.hashSuffix;
taosArrayPush(batchUseRsp.pArray, &usedbRsp);
mndReleaseDb(pMnode, pDb);
@@ -1321,7 +1363,7 @@ static int32_t mndProcessCompactDbReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("db:%s, start to compact", compactReq.db);
+ mInfo("db:%s, start to compact", compactReq.db);
pDb = mndAcquireDb(pMnode, compactReq.db);
if (pDb == NULL) {
@@ -1371,7 +1413,7 @@ static int32_t mndTrimDb(SMnode *pMnode, SDbObj *pDb) {
if (code != 0) {
mError("vgId:%d, failed to send vnode-trim request to vnode since 0x%x", pVgroup->vgId, code);
} else {
- mDebug("vgId:%d, send vnode-trim request to vnode, time:%d", pVgroup->vgId, trimReq.timestamp);
+ mInfo("vgId:%d, send vnode-trim request to vnode, time:%d", pVgroup->vgId, trimReq.timestamp);
}
sdbRelease(pSdb, pVgroup);
}
@@ -1390,7 +1432,7 @@ static int32_t mndProcessTrimDbReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("db:%s, start to trim", trimReq.db);
+ mInfo("db:%s, start to trim", trimReq.db);
pDb = mndAcquireDb(pMnode, trimReq.db);
if (pDb == NULL) {
@@ -1536,6 +1578,24 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
STR_WITH_MAXSIZE_TO_VARSTR(buf, "NULL", bytes);
}
+ const char *precStr = NULL;
+ switch (pDb->cfg.precision) {
+ case TSDB_TIME_PRECISION_MILLI:
+ precStr = TSDB_TIME_PRECISION_MILLI_STR;
+ break;
+ case TSDB_TIME_PRECISION_MICRO:
+ precStr = TSDB_TIME_PRECISION_MICRO_STR;
+ break;
+ case TSDB_TIME_PRECISION_NANO:
+ precStr = TSDB_TIME_PRECISION_NANO_STR;
+ break;
+ default:
+ precStr = "none";
+ break;
+ }
+ char precVstr[10] = {0};
+ STR_WITH_SIZE_TO_VARSTR(precVstr, precStr, 2);
+
char *statusStr = "ready";
if (objStatus == SDB_STATUS_CREATING) {
statusStr = "creating";
@@ -1546,7 +1606,6 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
statusStr = "unsynced";
}
}
-
char statusVstr[24] = {0};
STR_WITH_SIZE_TO_VARSTR(statusVstr, statusStr, strlen(statusStr));
@@ -1555,8 +1614,12 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, i);
if (i == 0) {
colDataAppend(pColInfo, rows, buf, false);
+ } else if (i == 1) {
+ colDataAppend(pColInfo, rows, (const char *)&pDb->createdTime, false);
} else if (i == 3) {
colDataAppend(pColInfo, rows, (const char *)&numOfTables, false);
+ } else if (i == 14) {
+ colDataAppend(pColInfo, rows, precVstr, false);
} else if (i == 15) {
colDataAppend(pColInfo, rows, statusVstr, false);
} else {
@@ -1621,23 +1684,6 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.compression, false);
- const char *precStr = NULL;
- switch (pDb->cfg.precision) {
- case TSDB_TIME_PRECISION_MILLI:
- precStr = TSDB_TIME_PRECISION_MILLI_STR;
- break;
- case TSDB_TIME_PRECISION_MICRO:
- precStr = TSDB_TIME_PRECISION_MICRO_STR;
- break;
- case TSDB_TIME_PRECISION_NANO:
- precStr = TSDB_TIME_PRECISION_NANO_STR;
- break;
- default:
- precStr = "none";
- break;
- }
- char precVstr[10] = {0};
- STR_WITH_SIZE_TO_VARSTR(precVstr, precStr, 2);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)precVstr, false);
@@ -1682,23 +1728,39 @@ static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb,
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.walSegmentSize, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.sstTrigger, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ int16_t hashPrefix = pDb->cfg.hashPrefix;
+ if (hashPrefix > 0) {
+ hashPrefix = pDb->cfg.hashPrefix - strlen(pDb->name) - 1;
+ }
+ colDataAppend(pColInfo, rows, (const char *)&hashPrefix, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.hashSuffix, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, rows, (const char *)&pDb->cfg.tsdbPageSize, false);
}
taosMemoryFree(buf);
}
-static void setInformationSchemaDbCfg(SDbObj *pDbObj) {
+static void setInformationSchemaDbCfg(SMnode *pMnode, SDbObj *pDbObj) {
tstrncpy(pDbObj->name, TSDB_INFORMATION_SCHEMA_DB, tListLen(pDbObj->name));
- pDbObj->createdTime = 0;
+ pDbObj->createdTime = mndGetClusterCreateTime(pMnode);
pDbObj->cfg.numOfVgroups = 0;
pDbObj->cfg.strict = 1;
pDbObj->cfg.replications = 1;
pDbObj->cfg.precision = TSDB_TIME_PRECISION_MILLI;
}
-static void setPerfSchemaDbCfg(SDbObj *pDbObj) {
+static void setPerfSchemaDbCfg(SMnode *pMnode, SDbObj *pDbObj) {
tstrncpy(pDbObj->name, TSDB_PERFORMANCE_SCHEMA_DB, tListLen(pDbObj->name));
- pDbObj->createdTime = 0;
+ pDbObj->createdTime = mndGetClusterCreateTime(pMnode);
pDbObj->cfg.numOfVgroups = 0;
pDbObj->cfg.strict = 1;
pDbObj->cfg.replications = 1;
@@ -1729,7 +1791,7 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
// Append the information_schema database into the result.
if (!pShow->sysDbRsp) {
SDbObj infoschemaDb = {0};
- setInformationSchemaDbCfg(&infoschemaDb);
+ setInformationSchemaDbCfg(pMnode, &infoschemaDb);
size_t numOfTables = 0;
getVisibleInfosTablesNum(sysinfo, &numOfTables);
mndDumpDbInfoData(pMnode, pBlock, &infoschemaDb, pShow, numOfRows, numOfTables, true, 0, 1);
@@ -1737,7 +1799,7 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc
numOfRows += 1;
SDbObj perfschemaDb = {0};
- setPerfSchemaDbCfg(&perfschemaDb);
+ setPerfSchemaDbCfg(pMnode, &perfschemaDb);
numOfTables = 0;
getPerfDbMeta(NULL, &numOfTables);
mndDumpDbInfoData(pMnode, pBlock, &perfschemaDb, pShow, numOfRows, numOfTables, true, 0, 1);
diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c
index e6f1a40993fcb7adf2fec121b5e99374c48aae8f..50d26ce9a51dfb8488020a0fd257074f723bbbb6 100644
--- a/source/dnode/mnode/impl/src/mndDef.c
+++ b/source/dnode/mnode/impl/src/mndDef.c
@@ -145,7 +145,10 @@ SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) {
}
void tDeleteSMqVgEp(SMqVgEp *pVgEp) {
- if (pVgEp->qmsg) taosMemoryFree(pVgEp->qmsg);
+ if (pVgEp) {
+ taosMemoryFreeClear(pVgEp->qmsg);
+ taosMemoryFree(pVgEp);
+ }
}
int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) {
@@ -200,18 +203,10 @@ SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char cgroup[TSDB_CGROUP_L
}
void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer) {
- if (pConsumer->currentTopics) {
- taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree);
- }
- if (pConsumer->rebNewTopics) {
- taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree);
- }
- if (pConsumer->rebRemovedTopics) {
- taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree);
- }
- if (pConsumer->assignedTopics) {
- taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree);
- }
+ taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree);
+ taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree);
}
int32_t tEncodeSMqConsumerObj(void **buf, const SMqConsumerObj *pConsumer) {
@@ -343,8 +338,8 @@ SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) {
return pConsumerEpNew;
}
-void tDeleteSMqConsumerEp(SMqConsumerEp *pConsumerEp) {
- //
+void tDeleteSMqConsumerEp(void *data) {
+ SMqConsumerEp *pConsumerEp = (SMqConsumerEp*)data;
taosArrayDestroyP(pConsumerEp->vgs, (FDelete)tDeleteSMqVgEp);
}
@@ -428,6 +423,13 @@ SMqSubscribeObj *tCloneSubscribeObj(const SMqSubscribeObj *pSub) {
}
void tDeleteSubscribeObj(SMqSubscribeObj *pSub) {
+ void *pIter = NULL;
+ while (1) {
+ pIter = taosHashIterate(pSub->consumerHash, pIter);
+ if (pIter == NULL) break;
+ SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)pIter;
+ taosArrayDestroyP(pConsumerEp->vgs, (FDelete)tDeleteSMqVgEp);
+ }
taosHashCleanup(pSub->consumerHash);
taosArrayDestroyP(pSub->unassignedVgs, (FDelete)tDeleteSMqVgEp);
}
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index fc5e20ef288d733927499484675acddd042fb3ca..4a8436513fe4c278fe827ae79166b682652dcf61 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -104,9 +104,9 @@ static int32_t mndCreateDefaultDnode(SMnode *pMnode) {
memcpy(&dnodeObj.fqdn, tsLocalFqdn, TSDB_FQDN_LEN);
snprintf(dnodeObj.ep, TSDB_EP_LEN, "%s:%u", dnodeObj.fqdn, dnodeObj.port);
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL, "create-dnode");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to create dnode:%s on first deploy", pTrans->id, dnodeObj.ep);
+ mInfo("trans:%d, used to create dnode:%s on first deploy", pTrans->id, dnodeObj.ep);
pRaw = mndDnodeActionEncode(&dnodeObj);
if (pRaw == NULL || mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
@@ -326,7 +326,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
if (statusReq.dnodeId == 0) {
pDnode = mndAcquireDnodeByEp(pMnode, statusReq.dnodeEp);
if (pDnode == NULL) {
- mDebug("dnode:%s, not created yet", statusReq.dnodeEp);
+ mInfo("dnode:%s, not created yet", statusReq.dnodeEp);
goto _OVER;
}
} else {
@@ -347,6 +347,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
SVgObj *pVgroup = mndAcquireVgroup(pMnode, pVload->vgId);
if (pVgroup != NULL) {
if (pVload->syncState == TAOS_SYNC_STATE_LEADER) {
+ pVgroup->cacheUsage = pVload->cacheUsage;
pVgroup->numOfTables = pVload->numOfTables;
pVgroup->numOfTimeSeries = pVload->numOfTimeSeries;
pVgroup->totalStorage = pVload->totalStorage;
@@ -434,7 +435,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
mInfo("dnode:%d, from offline to online, memory avail:%" PRId64 " total:%" PRId64 " cores:%.2f", pDnode->id,
statusReq.memAvail, statusReq.memTotal, statusReq.numOfCores);
} else {
- mDebug("dnode:%d, send dnode epset, online:%d dnodeVer:%" PRId64 ":%" PRId64 " reboot:%d", pDnode->id, online,
+ mInfo("dnode:%d, send dnode epset, online:%d dnodeVer:%" PRId64 ":%" PRId64 " reboot:%d", pDnode->id, online,
statusReq.dnodeVer, dnodeVer, reboot);
}
@@ -487,9 +488,9 @@ static int32_t mndCreateDnode(SMnode *pMnode, SRpcMsg *pReq, SCreateDnodeReq *pC
memcpy(dnodeObj.fqdn, pCreate->fqdn, TSDB_FQDN_LEN);
snprintf(dnodeObj.ep, TSDB_EP_LEN, "%s:%u", dnodeObj.fqdn, dnodeObj.port);
- pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_GLOBAL, pReq, "create-dnode");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep);
+ mInfo("trans:%d, used to create dnode:%s", pTrans->id, dnodeObj.ep);
pRaw = mndDnodeActionEncode(&dnodeObj);
if (pRaw == NULL || mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
@@ -666,7 +667,7 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SM
SSdbRaw *pRaw = NULL;
STrans *pTrans = NULL;
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "drop-dnode");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
mInfo("trans:%d, used to drop dnode:%d", pTrans->id, pDnode->id);
@@ -853,8 +854,8 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
}
int32_t code = -1;
- SSdb *pSdb = pMnode->pSdb;
- void *pIter = NULL;
+ SSdb *pSdb = pMnode->pSdb;
+ void *pIter = NULL;
while (1) {
SDnodeObj *pDnode = NULL;
pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pDnode);
@@ -877,7 +878,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
sdbRelease(pSdb, pDnode);
}
-
+
if (code == -1) {
terrno = TSDB_CODE_MND_DNODE_NOT_EXIST;
}
diff --git a/source/dnode/mnode/impl/src/mndDump.c b/source/dnode/mnode/impl/src/mndDump.c
new file mode 100644
index 0000000000000000000000000000000000000000..881ebbbf9de161018a3d0965bd61f2ab92666094
--- /dev/null
+++ b/source/dnode/mnode/impl/src/mndDump.c
@@ -0,0 +1,645 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#define _DEFAULT_SOURCE
+#include "mndDb.h"
+#include "mndInt.h"
+#include "mndStb.h"
+#include "sdb.h"
+#include "tconfig.h"
+#include "tjson.h"
+#include "ttypes.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
+
+void reportStartup(const char *name, const char *desc) {}
+void sendRsp(SRpcMsg *pMsg) { rpcFreeCont(pMsg->pCont); }
+
+int32_t sendReq(const SEpSet *pEpSet, SRpcMsg *pMsg) {
+ terrno = TSDB_CODE_INVALID_PTR;
+ return -1;
+}
+
+char *i642str(int64_t val) {
+ static char str[24] = {0};
+ snprintf(str, sizeof(str), "%" PRId64, val);
+ return str;
+}
+
+void dumpFunc(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "funcs");
+
+ while (1) {
+ SFuncObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_FUNC, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "name", pObj->name);
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "funcType", i642str(pObj->funcType));
+ tjsonAddStringToObject(item, "scriptType", i642str(pObj->scriptType));
+ tjsonAddStringToObject(item, "align", i642str(pObj->align));
+ tjsonAddStringToObject(item, "outputType", i642str(pObj->outputType));
+ tjsonAddStringToObject(item, "outputLen", i642str(pObj->outputLen));
+ tjsonAddStringToObject(item, "bufSize", i642str(pObj->bufSize));
+ tjsonAddStringToObject(item, "signature", i642str(pObj->signature));
+ tjsonAddStringToObject(item, "commentSize", i642str(pObj->commentSize));
+ tjsonAddStringToObject(item, "codeSize", i642str(pObj->codeSize));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpDb(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonCreateObject();
+ tjsonAddItemToObject(json, "dbs", items);
+
+ while (1) {
+ SDbObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_DB, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToObject(items, "db", item);
+
+ tjsonAddStringToObject(item, "name", mndGetDbStr(pObj->name));
+ tjsonAddStringToObject(item, "acct", pObj->acct);
+ tjsonAddStringToObject(item, "createUser", pObj->createUser);
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "uid", i642str(pObj->uid));
+ tjsonAddStringToObject(item, "cfgVersion", i642str(pObj->cfgVersion));
+ tjsonAddStringToObject(item, "vgVersion", i642str(pObj->vgVersion));
+ tjsonAddStringToObject(item, "numOfVgroups", i642str(pObj->cfg.numOfVgroups));
+ tjsonAddStringToObject(item, "numOfStables", i642str(pObj->cfg.numOfStables));
+ tjsonAddStringToObject(item, "buffer", i642str(pObj->cfg.buffer));
+ tjsonAddStringToObject(item, "pageSize", i642str(pObj->cfg.pageSize));
+ tjsonAddStringToObject(item, "pages", i642str(pObj->cfg.pages));
+ tjsonAddStringToObject(item, "cacheLastSize", i642str(pObj->cfg.cacheLastSize));
+ tjsonAddStringToObject(item, "daysPerFile", i642str(pObj->cfg.daysPerFile));
+ tjsonAddStringToObject(item, "daysToKeep0", i642str(pObj->cfg.daysToKeep0));
+ tjsonAddStringToObject(item, "daysToKeep1", i642str(pObj->cfg.daysToKeep1));
+ tjsonAddStringToObject(item, "daysToKeep2", i642str(pObj->cfg.daysToKeep2));
+ tjsonAddStringToObject(item, "minRows", i642str(pObj->cfg.minRows));
+ tjsonAddStringToObject(item, "maxRows", i642str(pObj->cfg.maxRows));
+ tjsonAddStringToObject(item, "precision", i642str(pObj->cfg.precision));
+ tjsonAddStringToObject(item, "compression", i642str(pObj->cfg.compression));
+ tjsonAddStringToObject(item, "replications", i642str(pObj->cfg.replications));
+ tjsonAddStringToObject(item, "strict", i642str(pObj->cfg.strict));
+ tjsonAddStringToObject(item, "cacheLast",i642str( pObj->cfg.cacheLast));
+ tjsonAddStringToObject(item, "hashMethod", i642str(pObj->cfg.hashMethod));
+ tjsonAddStringToObject(item, "hashPrefix", i642str(pObj->cfg.hashPrefix));
+ tjsonAddStringToObject(item, "hashSuffix", i642str(pObj->cfg.hashSuffix));
+ tjsonAddStringToObject(item, "sstTrigger", i642str(pObj->cfg.sstTrigger));
+ tjsonAddStringToObject(item, "tsdbPageSize",i642str( pObj->cfg.tsdbPageSize));
+ tjsonAddStringToObject(item, "schemaless", i642str(pObj->cfg.schemaless));
+ tjsonAddStringToObject(item, "walLevel",i642str( pObj->cfg.walLevel));
+ tjsonAddStringToObject(item, "walFsyncPeriod", i642str(pObj->cfg.walFsyncPeriod));
+ tjsonAddStringToObject(item, "walRetentionPeriod", i642str(pObj->cfg.walRetentionPeriod));
+ tjsonAddStringToObject(item, "walRetentionSize",i642str( pObj->cfg.walRetentionSize));
+ tjsonAddStringToObject(item, "walRollPeriod", i642str(pObj->cfg.walRollPeriod));
+ tjsonAddStringToObject(item, "walSegmentSize", i642str(pObj->cfg.walSegmentSize));
+
+ tjsonAddStringToObject(item, "numOfRetensions",i642str( pObj->cfg.numOfRetensions));
+ for (int32_t i = 0; i < pObj->cfg.numOfRetensions; ++i) {
+ SJson *rentensions = tjsonAddArrayToObject(item, "rentensions");
+ SJson *rentension = tjsonCreateObject();
+ tjsonAddItemToArray(rentensions, rentension);
+
+ SRetention *pRetension = taosArrayGet(pObj->cfg.pRetensions, i);
+ tjsonAddStringToObject(item, "freq", i642str(pRetension->freq));
+ tjsonAddStringToObject(item, "freqUnit", i642str(pRetension->freqUnit));
+ tjsonAddStringToObject(item, "keep", i642str(pRetension->keep));
+ tjsonAddStringToObject(item, "keepUnit",i642str( pRetension->keepUnit));
+ }
+
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpStb(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "stbs");
+
+ while (1) {
+ SStbObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_STB, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "name", mndGetStbStr(pObj->name));
+ tjsonAddStringToObject(item, "db", mndGetDbStr(pObj->db));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "uid", i642str(pObj->uid));
+ tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
+ tjsonAddStringToObject(item, "tagVer",i642str( pObj->tagVer));
+ tjsonAddStringToObject(item, "colVer", i642str(pObj->colVer));
+ tjsonAddStringToObject(item, "smaVer", i642str(pObj->smaVer));
+ tjsonAddStringToObject(item, "nextColId", i642str(pObj->nextColId));
+ tjsonAddStringToObject(item, "watermark1", i642str(pObj->watermark[0]));
+ tjsonAddStringToObject(item, "watermark2", i642str(pObj->watermark[1]));
+ tjsonAddStringToObject(item, "maxdelay0",i642str( pObj->maxdelay[0]));
+ tjsonAddStringToObject(item, "maxdelay1",i642str( pObj->maxdelay[1]));
+ tjsonAddStringToObject(item, "ttl",i642str( pObj->ttl));
+ tjsonAddStringToObject(item, "numOfFuncs",i642str( pObj->numOfFuncs));
+ tjsonAddStringToObject(item, "commentLen", i642str(pObj->commentLen));
+ tjsonAddStringToObject(item, "ast1Len", i642str(pObj->ast1Len));
+ tjsonAddStringToObject(item, "ast2Len",i642str( pObj->ast2Len));
+
+ tjsonAddStringToObject(item, "numOfColumns",i642str( pObj->numOfColumns));
+ SJson *columns = tjsonAddArrayToObject(item, "columns");
+ for (int32_t i = 0; i < pObj->numOfColumns; ++i) {
+ SJson *column = tjsonCreateObject();
+ tjsonAddItemToArray(columns, column);
+
+ SSchema *pColumn = &pObj->pColumns[i];
+ tjsonAddStringToObject(column, "type", i642str(pColumn->type));
+ tjsonAddStringToObject(column, "typestr", tDataTypes[pColumn->type].name);
+ tjsonAddStringToObject(column, "flags", i642str(pColumn->flags));
+ tjsonAddStringToObject(column, "colId", i642str(pColumn->colId));
+ tjsonAddStringToObject(column, "bytes", i642str(pColumn->bytes));
+ tjsonAddStringToObject(column, "name", pColumn->name);
+ }
+
+ tjsonAddStringToObject(item, "numOfTags", i642str(pObj->numOfTags));
+ SJson *tags = tjsonAddArrayToObject(item, "tags");
+ for (int32_t i = 0; i < pObj->numOfTags; ++i) {
+ SJson *tag = tjsonCreateObject();
+ tjsonAddItemToArray(tags, tag);
+
+ SSchema *pTag = &pObj->pTags[i];
+ tjsonAddStringToObject(tag, "type", i642str(pTag->type));
+ tjsonAddStringToObject(tag, "typestr", tDataTypes[pTag->type].name);
+ tjsonAddStringToObject(tag, "flags",i642str( pTag->flags));
+ tjsonAddStringToObject(tag, "colId", i642str(pTag->colId));
+ tjsonAddStringToObject(tag, "bytes", i642str(pTag->bytes));
+ tjsonAddStringToObject(tag, "name", pTag->name);
+ }
+
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpSma(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "smas");
+
+ while (1) {
+ SSmaObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "name", mndGetStbStr(pObj->name));
+ tjsonAddStringToObject(item, "stb", mndGetStbStr(pObj->stb));
+ tjsonAddStringToObject(item, "db", mndGetDbStr(pObj->db));
+ tjsonAddStringToObject(item, "dstTbName", mndGetStbStr(pObj->dstTbName));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "uid", i642str(pObj->uid));
+ tjsonAddStringToObject(item, "stbUid", i642str(pObj->stbUid));
+ tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
+ tjsonAddStringToObject(item, "dstTbUid", i642str(pObj->dstTbUid));
+ tjsonAddStringToObject(item, "intervalUnit", i642str(pObj->intervalUnit));
+ tjsonAddStringToObject(item, "slidingUnit",i642str( pObj->slidingUnit));
+ tjsonAddStringToObject(item, "timezone", i642str(pObj->timezone));
+ tjsonAddStringToObject(item, "dstVgId",i642str( pObj->dstVgId));
+ tjsonAddStringToObject(item, "interval", i642str(pObj->interval));
+ tjsonAddStringToObject(item, "offset", i642str(pObj->offset));
+ tjsonAddStringToObject(item, "sliding", i642str(pObj->sliding));
+ tjsonAddStringToObject(item, "exprLen",i642str( pObj->exprLen));
+ tjsonAddStringToObject(item, "tagsFilterLen", i642str(pObj->tagsFilterLen));
+ tjsonAddStringToObject(item, "sqlLen",i642str( pObj->sqlLen));
+ tjsonAddStringToObject(item, "astLen",i642str( pObj->astLen));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpVgroup(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "vgroups");
+
+ while (1) {
+ SVgObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "vgId", i642str(pObj->vgId));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "version",i642str(pObj->version));
+ tjsonAddStringToObject(item, "hashBegin", i642str(pObj->hashBegin));
+ tjsonAddStringToObject(item, "hashEnd", i642str(pObj->hashEnd));
+ tjsonAddStringToObject(item, "db", mndGetDbStr(pObj->dbName));
+ tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
+ tjsonAddStringToObject(item, "isTsma", i642str(pObj->isTsma));
+ tjsonAddStringToObject(item, "replica",i642str( pObj->replica));
+ for (int32_t i = 0; i < pObj->replica; ++i) {
+ SJson *replicas = tjsonAddArrayToObject(item, "replicas");
+ SJson *replica = tjsonCreateObject();
+ tjsonAddItemToArray(replicas, replica);
+ tjsonAddStringToObject(replica, "dnodeId", i642str(pObj->vnodeGid[i].dnodeId));
+ }
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpTopic(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "topics");
+
+ while (1) {
+ SMqTopicObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "name", mndGetDbStr(pObj->name));
+ tjsonAddStringToObject(item, "name", mndGetDbStr(pObj->db));
+ tjsonAddStringToObject(item, "createTime", i642str(pObj->createTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "uid", i642str(pObj->uid));
+ tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
+ tjsonAddStringToObject(item, "version",i642str( pObj->version));
+ tjsonAddStringToObject(item, "subType",i642str( pObj->subType));
+ tjsonAddStringToObject(item, "withMeta", i642str(pObj->withMeta));
+ tjsonAddStringToObject(item, "stbUid", i642str(pObj->stbUid));
+ tjsonAddStringToObject(item, "sqlLen", i642str(pObj->sqlLen));
+ tjsonAddStringToObject(item, "astLen",i642str( pObj->astLen));
+ tjsonAddStringToObject(item, "sqlLen",i642str( pObj->sqlLen));
+ tjsonAddStringToObject(item, "ntbUid", i642str(pObj->ntbUid));
+ tjsonAddStringToObject(item, "ctbStbUid", i642str(pObj->ctbStbUid));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpConsumer(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "consumers");
+
+ while (1) {
+ SMqConsumerObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_CONSUMER, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "consumerId", i642str(pObj->consumerId));
+ tjsonAddStringToObject(item, "cgroup", pObj->cgroup);
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpSubscribe(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "subscribes");
+
+ while (1) {
+ SMqSubscribeObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_SUBSCRIBE, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "key", pObj->key);
+ tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
+ tjsonAddStringToObject(item, "stbUid", i642str(pObj->stbUid));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpOffset(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "offsets");
+
+ while (1) {
+ SMqOffsetObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_OFFSET, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "key", pObj->key);
+ tjsonAddStringToObject(item, "dbUid", i642str(pObj->dbUid));
+ tjsonAddStringToObject(item, "offset", i642str(pObj->offset));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpStream(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "streams");
+
+ while (1) {
+ SStreamObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_STREAM, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "name", mndGetDbStr(pObj->name));
+ tjsonAddStringToObject(item, "createTime", i642str(pObj->createTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "version", i642str(pObj->version));
+ tjsonAddStringToObject(item, "totalLevel", i642str(pObj->totalLevel));
+ tjsonAddStringToObject(item, "smaId", i642str(pObj->smaId));
+ tjsonAddStringToObject(item, "uid", i642str(pObj->uid));
+ tjsonAddStringToObject(item, "status",i642str( pObj->status));
+ tjsonAddStringToObject(item, "igExpired",i642str( pObj->igExpired));
+ tjsonAddStringToObject(item, "trigger",i642str( pObj->trigger));
+ tjsonAddStringToObject(item, "triggerParam", i642str(pObj->triggerParam));
+ tjsonAddStringToObject(item, "watermark", i642str(pObj->watermark));
+ tjsonAddStringToObject(item, "sourceDbUid", i642str(pObj->sourceDbUid));
+ tjsonAddStringToObject(item, "targetDbUid", i642str(pObj->targetDbUid));
+ tjsonAddStringToObject(item, "sourceDb", mndGetDbStr(pObj->sourceDb));
+ tjsonAddStringToObject(item, "targetDb", mndGetDbStr(pObj->targetDb));
+ tjsonAddStringToObject(item, "targetSTbName", mndGetStbStr(pObj->targetSTbName));
+ tjsonAddStringToObject(item, "targetStbUid", i642str(pObj->targetStbUid));
+ tjsonAddStringToObject(item, "fixedSinkVgId", i642str(pObj->fixedSinkVgId));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpAcct(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "accts");
+
+ while (1) {
+ SAcctObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_ACCT, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "acct", pObj->acct);
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "acctId", i642str(pObj->acctId));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpAuth(SSdb *pSdb, SJson *json) {
+ // todo
+}
+
+void dumpUser(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "users");
+
+ while (1) {
+ SUserObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_USER, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "name", pObj->user);
+ tjsonAddStringToObject(item, "acct", pObj->acct);
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "superUser",i642str( pObj->superUser));
+ tjsonAddStringToObject(item, "authVersion", i642str(pObj->authVersion));
+ tjsonAddStringToObject(item, "numOfReadDbs",i642str( taosHashGetSize(pObj->readDbs)));
+ tjsonAddStringToObject(item, "numOfWriteDbs", i642str(taosHashGetSize(pObj->writeDbs)));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpDnode(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "dnodes");
+
+ while (1) {
+ SDnodeObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "id",i642str( pObj->id));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "port",i642str( pObj->port));
+ tjsonAddStringToObject(item, "fqdn", pObj->fqdn);
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpBnode(SSdb *pSdb, SJson *json) {
+ // not implemented yet
+}
+
+void dumpSnode(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "snodes");
+
+ while (1) {
+ SSnodeObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_QNODE, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "id",i642str( pObj->id));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpQnode(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "qnodes");
+
+ while (1) {
+ SQnodeObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_QNODE, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "id", i642str(pObj->id));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpMnode(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "mnodes");
+
+ while (1) {
+ SMnodeObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "id", i642str(pObj->id));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpCluster(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "clusters");
+
+ while (1) {
+ SClusterObj *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "id", i642str(pObj->id));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "updateTime", i642str(pObj->updateTime));
+ tjsonAddStringToObject(item, "name", pObj->name);
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpTrans(SSdb *pSdb, SJson *json) {
+ void *pIter = NULL;
+ SJson *items = tjsonAddArrayToObject(json, "transactions");
+
+ while (1) {
+ STrans *pObj = NULL;
+ pIter = sdbFetch(pSdb, SDB_TRANS, pIter, (void **)&pObj);
+ if (pIter == NULL) break;
+
+ SJson *item = tjsonCreateObject();
+ tjsonAddItemToArray(items, item);
+ tjsonAddStringToObject(item, "id", i642str(pObj->id));
+ tjsonAddStringToObject(item, "stage", i642str(pObj->stage));
+ tjsonAddStringToObject(item, "policy", i642str(pObj->policy));
+ tjsonAddStringToObject(item, "conflict",i642str( pObj->conflict));
+ tjsonAddStringToObject(item, "exec", i642str(pObj->exec));
+ tjsonAddStringToObject(item, "oper", i642str(pObj->oper));
+ tjsonAddStringToObject(item, "createdTime", i642str(pObj->createdTime));
+ tjsonAddStringToObject(item, "dbname", pObj->dbname);
+ tjsonAddStringToObject(item, "stbname", pObj->stbname);
+ tjsonAddStringToObject(item, "opername", pObj->opername);
+ tjsonAddStringToObject(item, "commitLogNum",i642str( taosArrayGetSize(pObj->commitActions)));
+ tjsonAddStringToObject(item, "redoActionNum",i642str(taosArrayGetSize(pObj->redoActions)));
+ tjsonAddStringToObject(item, "undoActionNum", i642str(taosArrayGetSize(pObj->undoActions)));
+ sdbRelease(pSdb, pObj);
+ }
+}
+
+void dumpHeader(SSdb *pSdb, SJson *json) {
+ tjsonAddStringToObject(json, "sver", i642str(1));
+ tjsonAddStringToObject(json, "applyIndex", i642str(pSdb->applyIndex));
+ tjsonAddStringToObject(json, "applyTerm", i642str(pSdb->applyTerm));
+ tjsonAddStringToObject(json, "applyConfig", i642str(pSdb->applyConfig));
+
+ SJson *maxIdsJson = tjsonCreateObject();
+ tjsonAddItemToObject(json, "maxIds", maxIdsJson);
+ for (int32_t i = 0; i < SDB_MAX; ++i) {
+ int64_t maxId = 0;
+ if (i < SDB_MAX) {
+ maxId = pSdb->maxId[i];
+ }
+ tjsonAddStringToObject(maxIdsJson, sdbTableName(i), i642str(maxId));
+ }
+
+ SJson *tableVersJson = tjsonCreateObject();
+ tjsonAddItemToObject(json, "tableVers", tableVersJson);
+ for (int32_t i = 0; i < SDB_MAX; ++i) {
+ int64_t tableVer = 0;
+ if (i < SDB_MAX) {
+ tableVer = pSdb->tableVer[i];
+ }
+ tjsonAddStringToObject(tableVersJson, sdbTableName(i), i642str(tableVer));
+ }
+}
+
+void mndDumpSdb() {
+ mInfo("start to dump sdb info to sdb.json");
+
+ char path[PATH_MAX * 2] = {0};
+ snprintf(path, sizeof(path), "%s%smnode", tsDataDir, TD_DIRSEP);
+
+ SMsgCb msgCb = {0};
+ msgCb.reportStartupFp = reportStartup;
+ msgCb.sendReqFp = sendReq;
+ msgCb.sendRspFp = sendRsp;
+ msgCb.mgmt = (SMgmtWrapper *)(&msgCb); // hack
+ tmsgSetDefault(&msgCb);
+
+ walInit();
+ syncInit();
+
+ SMnodeOpt opt = {.msgCb = msgCb};
+ SMnode *pMnode = mndOpen(path, &opt);
+ if (pMnode == NULL) return;
+
+ SSdb *pSdb = pMnode->pSdb;
+ SJson *json = tjsonCreateObject();
+ dumpHeader(pSdb, json);
+ dumpFunc(pSdb, json);
+ dumpDb(pSdb, json);
+ dumpStb(pSdb, json);
+ dumpSma(pSdb, json);
+ dumpVgroup(pSdb, json);
+ dumpTopic(pSdb, json);
+ dumpConsumer(pSdb, json);
+ dumpSubscribe(pSdb, json);
+ dumpOffset(pSdb, json);
+ dumpStream(pSdb, json);
+ dumpAcct(pSdb, json);
+ dumpAuth(pSdb, json);
+ dumpUser(pSdb, json);
+ dumpDnode(pSdb, json);
+ dumpBnode(pSdb, json);
+ dumpSnode(pSdb, json);
+ dumpQnode(pSdb, json);
+ dumpMnode(pSdb, json);
+ dumpCluster(pSdb, json);
+ dumpTrans(pSdb, json);
+
+ char *pCont = tjsonToString(json);
+ int32_t contLen = strlen(pCont);
+ char file[] = "sdb.json";
+ TdFilePtr pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
+ if (pFile == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ mError("failed to write %s since %s", file, terrstr());
+ return;
+ }
+ taosWriteFile(pFile, pCont, contLen);
+ taosWriteFile(pFile, "\n", 1);
+ taosFsyncFile(pFile);
+ taosCloseFile(&pFile);
+ tjsonDelete(json);
+ taosMemoryFree(pCont);
+
+ mInfo("dump sdb info success");
+}
+
+#pragma GCC diagnostic pop
diff --git a/source/dnode/mnode/impl/src/mndFunc.c b/source/dnode/mnode/impl/src/mndFunc.c
index e6f4b485242d375fd635f1067acca4a3f2083423..727f7b0cc90f42d6d560fb1f34a5c9a82f70de22 100644
--- a/source/dnode/mnode/impl/src/mndFunc.c
+++ b/source/dnode/mnode/impl/src/mndFunc.c
@@ -38,13 +38,15 @@ static int32_t mndRetrieveFuncs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
static void mndCancelGetNextFunc(SMnode *pMnode, void *pIter);
int32_t mndInitFunc(SMnode *pMnode) {
- SSdbTable table = {.sdbType = SDB_FUNC,
- .keyType = SDB_KEY_BINARY,
- .encodeFp = (SdbEncodeFp)mndFuncActionEncode,
- .decodeFp = (SdbDecodeFp)mndFuncActionDecode,
- .insertFp = (SdbInsertFp)mndFuncActionInsert,
- .updateFp = (SdbUpdateFp)mndFuncActionUpdate,
- .deleteFp = (SdbDeleteFp)mndFuncActionDelete};
+ SSdbTable table = {
+ .sdbType = SDB_FUNC,
+ .keyType = SDB_KEY_BINARY,
+ .encodeFp = (SdbEncodeFp)mndFuncActionEncode,
+ .decodeFp = (SdbDecodeFp)mndFuncActionDecode,
+ .insertFp = (SdbInsertFp)mndFuncActionInsert,
+ .updateFp = (SdbUpdateFp)mndFuncActionUpdate,
+ .deleteFp = (SdbDeleteFp)mndFuncActionDelete,
+ };
mndSetMsgHandle(pMnode, TDMT_MND_CREATE_FUNC, mndProcessCreateFuncReq);
mndSetMsgHandle(pMnode, TDMT_MND_DROP_FUNC, mndProcessDropFuncReq);
@@ -219,10 +221,10 @@ static int32_t mndCreateFunc(SMnode *pMnode, SRpcMsg *pReq, SCreateFuncReq *pCre
}
memcpy(func.pCode, pCreate->pCode, func.codeSize);
- pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "create-func");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to create func:%s", pTrans->id, pCreate->name);
+ mInfo("trans:%d, used to create func:%s", pTrans->id, pCreate->name);
SSdbRaw *pRedoRaw = mndFuncActionEncode(&func);
if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) goto _OVER;
@@ -249,10 +251,10 @@ _OVER:
static int32_t mndDropFunc(SMnode *pMnode, SRpcMsg *pReq, SFuncObj *pFunc) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "drop-func");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to drop user:%s", pTrans->id, pFunc->name);
+ mInfo("trans:%d, used to drop user:%s", pTrans->id, pFunc->name);
SSdbRaw *pRedoRaw = mndFuncActionEncode(pFunc);
if (pRedoRaw == NULL || mndTransAppendRedolog(pTrans, pRedoRaw) != 0) goto _OVER;
@@ -286,7 +288,7 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("func:%s, start to create", createReq.name);
+ mInfo("func:%s, start to create", createReq.name);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_FUNC) != 0) {
goto _OVER;
}
@@ -294,7 +296,7 @@ static int32_t mndProcessCreateFuncReq(SRpcMsg *pReq) {
pFunc = mndAcquireFunc(pMnode, createReq.name);
if (pFunc != NULL) {
if (createReq.igExists) {
- mDebug("func:%s, already exist, ignore exist is set", createReq.name);
+ mInfo("func:%s, already exist, ignore exist is set", createReq.name);
code = 0;
goto _OVER;
} else {
@@ -349,7 +351,7 @@ static int32_t mndProcessDropFuncReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("func:%s, start to drop", dropReq.name);
+ mInfo("func:%s, start to drop", dropReq.name);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_FUNC) != 0) {
goto _OVER;
}
@@ -362,7 +364,7 @@ static int32_t mndProcessDropFuncReq(SRpcMsg *pReq) {
pFunc = mndAcquireFunc(pMnode, dropReq.name);
if (pFunc == NULL) {
if (dropReq.igNotExists) {
- mDebug("func:%s, not exist, ignore not exist is set", dropReq.name);
+ mInfo("func:%s, not exist, ignore not exist is set", dropReq.name);
code = 0;
goto _OVER;
} else {
diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c
index 2221718023c8d080059736fd811c946618fd948d..a628cefa65bbaf10787d6f92036fb4aaaff439f2 100644
--- a/source/dnode/mnode/impl/src/mndMain.c
+++ b/source/dnode/mnode/impl/src/mndMain.c
@@ -65,7 +65,7 @@ static void mndPullupTrans(SMnode *pMnode) {
}
}
-static void mndTtlTimer(SMnode *pMnode) {
+static void mndPullupTtl(SMnode *pMnode) {
int32_t contLen = 0;
void *pReq = mndBuildTimerMsg(&contLen);
SRpcMsg rpcMsg = {.msgType = TDMT_MND_TTL_TIMER, .pCont = pReq, .contLen = contLen};
@@ -90,7 +90,7 @@ static void mndPullupTelem(SMnode *pMnode) {
}
}
-static void mndGrantHeartBeat(SMnode *pMnode) {
+static void mndPullupGrant(SMnode *pMnode) {
int32_t contLen = 0;
void *pReq = mndBuildTimerMsg(&contLen);
if (pReq != NULL) {
@@ -119,28 +119,30 @@ static void *mndThreadFp(void *param) {
lastTime++;
taosMsleep(100);
if (mndGetStop(pMnode)) break;
+ if (lastTime % 10 != 0) continue;
- if (lastTime % (tsTtlPushInterval * 10) == 1) {
- mndTtlTimer(pMnode);
+ int64_t sec = lastTime / 10;
+ if (sec % tsTtlPushInterval == 0) {
+ mndPullupTtl(pMnode);
}
- if (lastTime % (tsTransPullupInterval * 10) == 0) {
+ if (sec % tsTransPullupInterval == 0) {
mndPullupTrans(pMnode);
}
- if (lastTime % (tsMqRebalanceInterval * 10) == 0) {
+ if (sec % tsMqRebalanceInterval == 0) {
mndCalMqRebalance(pMnode);
}
- if (lastTime % (tsTelemInterval * 10) == ((tsTelemInterval - 1) * 10)) {
+ if (sec % tsTelemInterval == (TMIN(60, (tsTelemInterval - 1)))) {
mndPullupTelem(pMnode);
}
- if (lastTime % (tsGrantHBInterval * 10) == 0) {
- mndGrantHeartBeat(pMnode);
+ if (sec % tsGrantHBInterval == 0) {
+ mndPullupGrant(pMnode);
}
- if ((lastTime % (tsUptimeInterval * 10)) == ((tsUptimeInterval - 1) * 10)) {
+ if (sec % tsUptimeInterval == 0) {
mndIncreaseUpTime(pMnode);
}
}
@@ -300,7 +302,7 @@ static void mndCleanupSteps(SMnode *pMnode, int32_t pos) {
for (int32_t s = pos; s >= 0; s--) {
SMnodeStep *pStep = taosArrayGet(pMnode->pSteps, s);
- mDebug("%s will cleanup", pStep->name);
+ mInfo("%s will cleanup", pStep->name);
if (pStep->cleanupFp != NULL) {
(*pStep->cleanupFp)(pMnode);
}
@@ -324,7 +326,7 @@ static int32_t mndExecSteps(SMnode *pMnode) {
terrno = code;
return -1;
} else {
- mDebug("%s is initialized", pStep->name);
+ mInfo("%s is initialized", pStep->name);
tmsgReportStartup(pStep->name, "initialized");
}
}
@@ -341,7 +343,7 @@ static void mndSetOptions(SMnode *pMnode, const SMnodeOpt *pOption) {
}
SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
- mDebug("start to open mnode in %s", path);
+ mInfo("start to open mnode in %s", path);
SMnode *pMnode = taosMemoryCalloc(1, sizeof(SMnode));
if (pMnode == NULL) {
@@ -390,7 +392,7 @@ SMnode *mndOpen(const char *path, const SMnodeOpt *pOption) {
return NULL;
}
- mDebug("mnode open successfully ");
+ mInfo("mnode open successfully ");
return pMnode;
}
@@ -399,25 +401,25 @@ void mndPreClose(SMnode *pMnode) {
atomic_store_8(&(pMnode->syncMgmt.leaderTransferFinish), 0);
syncLeaderTransfer(pMnode->syncMgmt.sync);
- /*
- mDebug("vgId:1, mnode start leader transfer");
- // wait for leader transfer finish
- while (!atomic_load_8(&(pMnode->syncMgmt.leaderTransferFinish))) {
- taosMsleep(10);
- mDebug("vgId:1, mnode waiting for leader transfer");
- }
- mDebug("vgId:1, mnode finish leader transfer");
- */
+#if 0
+ mInfo("vgId:1, mnode start leader transfer");
+ // wait for leader transfer finish
+ while (!atomic_load_8(&(pMnode->syncMgmt.leaderTransferFinish))) {
+ taosMsleep(10);
+ mInfo("vgId:1, mnode waiting for leader transfer");
+ }
+ mInfo("vgId:1, mnode finish leader transfer");
+#endif
}
}
void mndClose(SMnode *pMnode) {
if (pMnode != NULL) {
- mDebug("start to close mnode");
+ mInfo("start to close mnode");
mndCleanupSteps(pMnode, -1);
taosMemoryFreeClear(pMnode->path);
taosMemoryFreeClear(pMnode);
- mDebug("mnode is closed");
+ mInfo("mnode is closed");
}
}
diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c
index 4f07d9e0143f52da057c40d2e655044da01a6b72..a41f958c0f62490eff3a7f50f495949f06f48925 100644
--- a/source/dnode/mnode/impl/src/mndMnode.c
+++ b/source/dnode/mnode/impl/src/mndMnode.c
@@ -89,14 +89,14 @@ static int32_t mndCreateDefaultMnode(SMnode *pMnode) {
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
+ mInfo("mnode:%d, will be created when deploying, raw:%p", mnodeObj.id, pRaw);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, NULL, "create-mnode");
if (pTrans == NULL) {
mError("mnode:%d, failed to create since %s", mnodeObj.id, terrstr());
return -1;
}
- mDebug("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id);
+ mInfo("trans:%d, used to create mnode:%d", pTrans->id, mnodeObj.id);
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr());
@@ -362,10 +362,10 @@ static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
mnodeObj.createdTime = taosGetTimestampMs();
mnodeObj.updateTime = mnodeObj.createdTime;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "create-mnode");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
+ mInfo("trans:%d, used to create mnode:%d", pTrans->id, pCreate->dnodeId);
if (mndSetCreateMnodeRedoLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
if (mndSetCreateMnodeCommitLogs(pMnode, pTrans, &mnodeObj) != 0) goto _OVER;
@@ -392,7 +392,7 @@ static int32_t mndProcessCreateMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("mnode:%d, start to create", createReq.dnodeId);
+ mInfo("mnode:%d, start to create", createReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_MNODE) != 0) {
goto _OVER;
}
@@ -571,10 +571,10 @@ static int32_t mndDropMnode(SMnode *pMnode, SRpcMsg *pReq, SMnodeObj *pObj) {
int32_t code = -1;
STrans *pTrans = NULL;
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "drop-mnode");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
+ mInfo("trans:%d, used to drop mnode:%d", pTrans->id, pObj->id);
if (mndSetDropMnodeInfoToTrans(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@@ -597,7 +597,7 @@ static int32_t mndProcessDropMnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("mnode:%d, start to drop", dropReq.dnodeId);
+ mInfo("mnode:%d, start to drop", dropReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_MNODE) != 0) {
goto _OVER;
}
@@ -732,7 +732,7 @@ static int32_t mndProcessAlterMnodeReq(SRpcMsg *pReq) {
}
}
- mTrace("trans:-1, sync reconfig will be proposed");
+ mInfo("trans:-1, sync reconfig will be proposed");
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
pMgmt->standby = 0;
diff --git a/source/dnode/mnode/impl/src/mndOffset.c b/source/dnode/mnode/impl/src/mndOffset.c
index 037a46345ffed6b1205292e513df1c2db9528b3b..797aa88670fc2ac7f83ace7d59851ef9b7874f6c 100644
--- a/source/dnode/mnode/impl/src/mndOffset.c
+++ b/source/dnode/mnode/impl/src/mndOffset.c
@@ -181,7 +181,7 @@ static int32_t mndProcessCommitOffsetReq(SRpcMsg *pMsg) {
tDecodeSMqCMCommitOffsetReq(&decoder, &commitOffsetReq);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pMsg, "commit-offset");
for (int32_t i = 0; i < commitOffsetReq.num; i++) {
SMqOffset *pOffset = &commitOffsetReq.offsets[i];
diff --git a/source/dnode/mnode/impl/src/mndQnode.c b/source/dnode/mnode/impl/src/mndQnode.c
index f057f6190defd3c7c8d01e7b8e7d39b37c1f3c6e..cbd398e36181517b19dc5c682badb80a2814f09a 100644
--- a/source/dnode/mnode/impl/src/mndQnode.c
+++ b/source/dnode/mnode/impl/src/mndQnode.c
@@ -248,10 +248,10 @@ static int32_t mndCreateQnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
qnodeObj.createdTime = taosGetTimestampMs();
qnodeObj.updateTime = qnodeObj.createdTime;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "create-qnode");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to create qnode:%d", pTrans->id, pCreate->dnodeId);
+ mInfo("trans:%d, used to create qnode:%d", pTrans->id, pCreate->dnodeId);
if (mndSetCreateQnodeRedoLogs(pTrans, &qnodeObj) != 0) goto _OVER;
if (mndSetCreateQnodeUndoLogs(pTrans, &qnodeObj) != 0) goto _OVER;
if (mndSetCreateQnodeCommitLogs(pTrans, &qnodeObj) != 0) goto _OVER;
@@ -278,7 +278,7 @@ static int32_t mndProcessCreateQnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("qnode:%d, start to create", createReq.dnodeId);
+ mInfo("qnode:%d, start to create", createReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_QNODE) != 0) {
goto _OVER;
}
@@ -364,10 +364,10 @@ int32_t mndSetDropQnodeInfoToTrans(SMnode *pMnode, STrans *pTrans, SQnodeObj *pO
static int32_t mndDropQnode(SMnode *pMnode, SRpcMsg *pReq, SQnodeObj *pObj) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-qnode");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to drop qnode:%d", pTrans->id, pObj->id);
+ mInfo("trans:%d, used to drop qnode:%d", pTrans->id, pObj->id);
if (mndSetDropQnodeInfoToTrans(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@@ -389,7 +389,7 @@ static int32_t mndProcessDropQnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("qnode:%d, start to drop", dropReq.dnodeId);
+ mInfo("qnode:%d, start to drop", dropReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_QNODE) != 0) {
goto _OVER;
}
diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c
index 654f46ec85682a21e8ef0009c3fcb654180c93b1..0e897de4e7bd64cdaffcd0c0f6da339ab8db4851 100644
--- a/source/dnode/mnode/impl/src/mndQuery.c
+++ b/source/dnode/mnode/impl/src/mndQuery.c
@@ -170,7 +170,7 @@ _exit:
}
int32_t mndInitQuery(SMnode *pMnode) {
- if (qWorkerInit(NODE_TYPE_MNODE, MNODE_HANDLE, NULL, (void **)&pMnode->pQuery, &pMnode->msgCb) != 0) {
+ if (qWorkerInit(NODE_TYPE_MNODE, MNODE_HANDLE, (void **)&pMnode->pQuery, &pMnode->msgCb) != 0) {
mError("failed to init qworker in mnode since %s", terrstr());
return -1;
}
diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c
index 3bfd7eb5964a446698556551bbe572f2dc568110..511a84290dd2a311f59cfc9467ff2619d92013cb 100644
--- a/source/dnode/mnode/impl/src/mndScheduler.c
+++ b/source/dnode/mnode/impl/src/mndScheduler.c
@@ -319,7 +319,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
bool multiTarget = pDbObj->cfg.numOfVgroups > 1;
- if (planTotLevel == 2 || externalTargetDB || multiTarget) {
+ if (planTotLevel == 2 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) {
/*if (true) {*/
SArray* taskOneLevel = taosArrayInit(0, sizeof(void*));
taosArrayPush(pStream->tasks, &taskOneLevel);
diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c
index 9499c90c57c59e3600c701668dd17671f641d919..5a998dfe986d9f012e066f45810604b7ca9d728f 100644
--- a/source/dnode/mnode/impl/src/mndShow.c
+++ b/source/dnode/mnode/impl/src/mndShow.c
@@ -88,7 +88,7 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_VGROUP;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_CONSUMERS, len) == 0) {
type = TSDB_MGMT_TABLE_CONSUMERS;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_SUBSCRIPTIONS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_SUBSCRIPTIONS, len) == 0) {
type = TSDB_MGMT_TABLE_SUBSCRIPTIONS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_TRANS, len) == 0) {
type = TSDB_MGMT_TABLE_TRANS;
@@ -102,9 +102,9 @@ static int32_t convertToRetrieveType(char *name, int32_t len) {
type = TSDB_MGMT_TABLE_QUERIES;
} else if (strncasecmp(name, TSDB_INS_TABLE_VNODES, len) == 0) {
type = TSDB_MGMT_TABLE_VNODES;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_TOPICS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_TOPICS, len) == 0) {
type = TSDB_MGMT_TABLE_TOPICS;
- } else if (strncasecmp(name, TSDB_PERFS_TABLE_STREAMS, len) == 0) {
+ } else if (strncasecmp(name, TSDB_INS_TABLE_STREAMS, len) == 0) {
type = TSDB_MGMT_TABLE_STREAMS;
} else if (strncasecmp(name, TSDB_PERFS_TABLE_APPS, len) == 0) {
type = TSDB_MGMT_TABLE_APPS;
diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c
index 2fb934aaad735240e1a249447b5d041853819d82..90d05e8f211d98a2b66d0c98a76a01492a3f3749 100644
--- a/source/dnode/mnode/impl/src/mndSma.c
+++ b/source/dnode/mnode/impl/src/mndSma.c
@@ -38,7 +38,6 @@ static SSdbRow *mndSmaActionDecode(SSdbRaw *pRaw);
static int32_t mndSmaActionInsert(SSdb *pSdb, SSmaObj *pSma);
static int32_t mndSmaActionDelete(SSdb *pSdb, SSmaObj *pSpSmatb);
static int32_t mndSmaActionUpdate(SSdb *pSdb, SSmaObj *pOld, SSmaObj *pNew);
-static int32_t mndSmaGetVgEpSet(SMnode *pMnode, SDbObj *pDb, SVgEpSet **ppVgEpSet, int32_t *numOfVgroups);
static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq);
static int32_t mndProcessDropSmaReq(SRpcMsg *pReq);
static int32_t mndProcessGetSmaReq(SRpcMsg *pReq);
@@ -588,11 +587,11 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
nodesDestroyNode((SNode *)pPlan);
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-sma");
if (pTrans == NULL) goto _OVER;
mndTransSetDbName(pTrans, pDb->name, NULL);
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to create sma:%s stream:%s", pTrans->id, pCreate->name, streamObj.name);
+ mInfo("trans:%d, used to create sma:%s stream:%s", pTrans->id, pCreate->name, streamObj.name);
if (mndSetCreateSmaRedoLogs(pMnode, pTrans, &smaObj) != 0) goto _OVER;
if (mndSetCreateSmaVgroupRedoLogs(pMnode, pTrans, &streamObj.fixedSinkVg) != 0) goto _OVER;
@@ -604,8 +603,8 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea
if (mndPersistStream(pMnode, pTrans, &streamObj) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
- mDebug("mndSma: create sma index %s %" PRIi64 " on stb:%" PRIi64 ", dstSuid:%" PRIi64 " dstTb:%s dstVg:%d",
- pCreate->name, smaObj.uid, smaObj.stbUid, smaObj.dstTbUid, smaObj.dstTbName, smaObj.dstVgId);
+ mInfo("sma:%s, uid:%" PRIi64 " create on stb:%" PRIi64 ", dstSuid:%" PRIi64 " dstTb:%s dstVg:%d", pCreate->name,
+ smaObj.uid, smaObj.stbUid, smaObj.dstTbUid, smaObj.dstTbName, smaObj.dstVgId);
code = 0;
@@ -666,7 +665,7 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("sma:%s, start to create", createReq.name);
+ mInfo("sma:%s, start to create", createReq.name);
if (mndCheckCreateSmaReq(&createReq) != 0) {
goto _OVER;
}
@@ -690,7 +689,7 @@ static int32_t mndProcessCreateSmaReq(SRpcMsg *pReq) {
pSma = mndAcquireSma(pMnode, createReq.name);
if (pSma != NULL) {
if (createReq.igExists) {
- mDebug("sma:%s, already exist in sma:%s, ignore exist is set", createReq.name, pSma->name);
+ mInfo("sma:%s, already exist in sma:%s, ignore exist is set", createReq.name, pSma->name);
code = 0;
goto _OVER;
} else {
@@ -800,10 +799,10 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
pStb = mndAcquireStb(pMnode, pSma->stb);
if (pStb == NULL) goto _OVER;
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "drop-sma");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
+ mInfo("trans:%d, used to drop sma:%s", pTrans->id, pSma->name);
mndTransSetDbName(pTrans, pDb->name, NULL);
mndTransSetSerial(pTrans);
@@ -841,6 +840,7 @@ static int32_t mndDropSma(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SSmaObj *p
_OVER:
mndTransDrop(pTrans);
+ mndReleaseStream(pMnode, pStream);
mndReleaseVgroup(pMnode, pVgroup);
mndReleaseStb(pMnode, pStb);
return code;
@@ -929,12 +929,12 @@ static int32_t mndProcessDropSmaReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("sma:%s, start to drop", dropReq.name);
+ mInfo("sma:%s, start to drop", dropReq.name);
pSma = mndAcquireSma(pMnode, dropReq.name);
if (pSma == NULL) {
if (dropReq.igNotExists) {
- mDebug("sma:%s, not exist, ignore not exist is set", dropReq.name);
+ mInfo("sma:%s, not exist, ignore not exist is set", dropReq.name);
code = 0;
goto _OVER;
} else {
@@ -961,6 +961,7 @@ _OVER:
mError("sma:%s, failed to drop since %s", dropReq.name, terrstr());
}
+ mndReleaseSma(pMnode, pSma);
mndReleaseDb(pMnode, pDb);
return code;
}
diff --git a/source/dnode/mnode/impl/src/mndSnode.c b/source/dnode/mnode/impl/src/mndSnode.c
index d18a233d29eb6f7c0b72a4fbbfc81f2dd2869560..691270997064f0648fad3c268beac0f34921980c 100644
--- a/source/dnode/mnode/impl/src/mndSnode.c
+++ b/source/dnode/mnode/impl/src/mndSnode.c
@@ -253,10 +253,10 @@ static int32_t mndCreateSnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode,
snodeObj.createdTime = taosGetTimestampMs();
snodeObj.updateTime = snodeObj.createdTime;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "create-snode");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to create snode:%d", pTrans->id, pCreate->dnodeId);
+ mInfo("trans:%d, used to create snode:%d", pTrans->id, pCreate->dnodeId);
if (mndSetCreateSnodeRedoLogs(pTrans, &snodeObj) != 0) goto _OVER;
if (mndSetCreateSnodeUndoLogs(pTrans, &snodeObj) != 0) goto _OVER;
@@ -287,7 +287,7 @@ static int32_t mndProcessCreateSnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("snode:%d, start to create", createReq.dnodeId);
+ mInfo("snode:%d, start to create", createReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_SNODE) != 0) {
goto _OVER;
}
@@ -375,10 +375,10 @@ int32_t mndSetDropSnodeInfoToTrans(SMnode *pMnode, STrans *pTrans, SSnodeObj *pO
static int32_t mndDropSnode(SMnode *pMnode, SRpcMsg *pReq, SSnodeObj *pObj) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-snode");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to drop snode:%d", pTrans->id, pObj->id);
+ mInfo("trans:%d, used to drop snode:%d", pTrans->id, pObj->id);
if (mndSetDropSnodeInfoToTrans(pMnode, pTrans, pObj) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@@ -403,7 +403,7 @@ static int32_t mndProcessDropSnodeReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("snode:%d, start to drop", dropReq.dnodeId);
+ mInfo("snode:%d, start to drop", dropReq.dnodeId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_SNODE) != 0) {
goto _OVER;
}
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 81ede6de90305d6b825e52fb1b1977079b455a7b..38c7850e6c3eb85efcb47185d507816ac1cc497b 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -536,7 +536,7 @@ int32_t mndCheckCreateStbReq(SMCreateStbReq *pCreate) {
return -1;
}
- if (pCreate->numOfColumns < TSDB_MIN_COLUMNS || pCreate->numOfColumns > TSDB_MAX_COLUMNS) {
+ if (pCreate->numOfColumns < TSDB_MIN_COLUMNS || pCreate->numOfTags + pCreate->numOfColumns > TSDB_MAX_COLUMNS) {
terrno = TSDB_CODE_PAR_INVALID_COLUMNS_NUM;
return -1;
}
@@ -800,10 +800,10 @@ static int32_t mndCreateStb(SMnode *pMnode, SRpcMsg *pReq, SMCreateStbReq *pCrea
SStbObj stbObj = {0};
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stb");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to create stb:%s", pTrans->id, pCreate->name);
+ mInfo("trans:%d, used to create stb:%s", pTrans->id, pCreate->name);
if (mndBuildStbFromReq(pMnode, &stbObj, pCreate, pDb) != 0) goto _OVER;
if (mndAddStbToTrans(pMnode, pTrans, pDb, &stbObj) < 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@@ -834,6 +834,8 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq) {
int32_t reqLen = tSerializeSVDropTtlTableReq(NULL, 0, &ttlReq);
int32_t contLen = reqLen + sizeof(SMsgHead);
+ mInfo("start to process ttl timer");
+
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
@@ -854,7 +856,7 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq) {
if (code != 0) {
mError("vgId:%d, failed to send drop ttl table request to vnode since 0x%x", pVgroup->vgId, code);
} else {
- mDebug("vgId:%d, send drop ttl table request to vnode, time:%d", pVgroup->vgId, ttlReq.timestamp);
+ mInfo("vgId:%d, send drop ttl table request to vnode, time:%d", pVgroup->vgId, ttlReq.timestamp);
}
sdbRelease(pSdb, pVgroup);
}
@@ -943,7 +945,7 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("stb:%s, start to create", createReq.name);
+ mInfo("stb:%s, start to create", createReq.name);
if (mndCheckCreateStbReq(&createReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
@@ -953,7 +955,7 @@ static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) {
if (pStb != NULL) {
if (createReq.igExists) {
if (createReq.source == TD_REQ_FROM_APP) {
- mDebug("stb:%s, already exist, ignore exist is set", createReq.name);
+ mInfo("stb:%s, already exist, ignore exist is set", createReq.name);
code = 0;
goto _OVER;
} else if (pStb->uid != createReq.suid) {
@@ -1144,7 +1146,7 @@ static int32_t mndAddSuperTableTag(const SStbObj *pOld, SStbObj *pNew, SArray *p
pSchema->colId = pNew->nextColId;
pNew->nextColId++;
- mDebug("stb:%s, start to add tag %s", pNew->name, pSchema->name);
+ mInfo("stb:%s, start to add tag %s", pNew->name, pSchema->name);
}
pNew->tagVer++;
@@ -1159,7 +1161,7 @@ static int32_t mndCheckAlterColForTopic(SMnode *pMnode, const char *stbFullName,
pIter = sdbFetch(pSdb, SDB_TOPIC, pIter, (void **)&pTopic);
if (pIter == NULL) break;
- mDebug("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s",
+ mInfo("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s",
pTopic->name, stbFullName, suid, colId, pTopic->subType, pTopic->sql);
if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
sdbRelease(pSdb, pTopic);
@@ -1177,26 +1179,28 @@ static int32_t mndCheckAlterColForTopic(SMnode *pMnode, const char *stbFullName,
SNode *pNode = NULL;
FOREACH(pNode, pNodeList) {
SColumnNode *pCol = (SColumnNode *)pNode;
- mDebug("topic:%s, check colId:%d tableId:%" PRId64 " ctbStbUid:%" PRId64, pTopic->name, pCol->colId,
+ mInfo("topic:%s, check colId:%d tableId:%" PRId64 " ctbStbUid:%" PRId64, pTopic->name, pCol->colId,
pCol->tableId, pTopic->ctbStbUid);
if (pCol->tableId != suid && pTopic->ctbStbUid != suid) {
- mDebug("topic:%s, check colId:%d passed", pTopic->name, pCol->colId);
+ mInfo("topic:%s, check colId:%d passed", pTopic->name, pCol->colId);
goto NEXT;
}
if (pCol->colId > 0 && pCol->colId == colId) {
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
terrno = TSDB_CODE_MND_FIELD_CONFLICT_WITH_TOPIC;
mError("topic:%s, check colId:%d conflicted", pTopic->name, pCol->colId);
return -1;
}
- mDebug("topic:%s, check colId:%d passed", pTopic->name, pCol->colId);
+ mInfo("topic:%s, check colId:%d passed", pTopic->name, pCol->colId);
}
NEXT:
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
}
return 0;
}
@@ -1222,22 +1226,24 @@ static int32_t mndCheckAlterColForStream(SMnode *pMnode, const char *stbFullName
SColumnNode *pCol = (SColumnNode *)pNode;
if (pCol->tableId != suid) {
- mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
+ mInfo("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
goto NEXT;
}
if (pCol->colId > 0 && pCol->colId == colId) {
sdbRelease(pSdb, pStream);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
terrno = TSDB_CODE_MND_STREAM_MUST_BE_DELETED;
mError("stream:%s, check colId:%d conflicted", pStream->name, pCol->colId);
return -1;
}
- mDebug("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
+ mInfo("stream:%s, check colId:%d passed", pStream->name, pCol->colId);
}
NEXT:
sdbRelease(pSdb, pStream);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
}
return 0;
}
@@ -1250,7 +1256,7 @@ static int32_t mndCheckAlterColForTSma(SMnode *pMnode, const char *stbFullName,
pIter = sdbFetch(pSdb, SDB_SMA, pIter, (void **)&pSma);
if (pIter == NULL) break;
- mDebug("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name,
+ mInfo("tsma:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, sql:%s", pSma->name,
stbFullName, suid, colId, pSma->sql);
SNode *pAst = NULL;
@@ -1266,25 +1272,27 @@ static int32_t mndCheckAlterColForTSma(SMnode *pMnode, const char *stbFullName,
SNode *pNode = NULL;
FOREACH(pNode, pNodeList) {
SColumnNode *pCol = (SColumnNode *)pNode;
- mDebug("tsma:%s, check colId:%d tableId:%" PRId64, pSma->name, pCol->colId, pCol->tableId);
+ mInfo("tsma:%s, check colId:%d tableId:%" PRId64, pSma->name, pCol->colId, pCol->tableId);
if ((pCol->tableId != suid) && (pSma->stbUid != suid)) {
- mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
+ mInfo("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
goto NEXT;
}
if ((pCol->colId) > 0 && (pCol->colId == colId)) {
sdbRelease(pSdb, pSma);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
terrno = TSDB_CODE_MND_FIELD_CONFLICT_WITH_TSMA;
mError("tsma:%s, check colId:%d conflicted", pSma->name, pCol->colId);
return -1;
}
- mDebug("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
+ mInfo("tsma:%s, check colId:%d passed", pSma->name, pCol->colId);
}
NEXT:
sdbRelease(pSdb, pSma);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
}
return 0;
}
@@ -1323,7 +1331,7 @@ static int32_t mndDropSuperTableTag(SMnode *pMnode, const SStbObj *pOld, SStbObj
pNew->numOfTags--;
pNew->tagVer++;
- mDebug("stb:%s, start to drop tag %s", pNew->name, tagName);
+ mInfo("stb:%s, start to drop tag %s", pNew->name, tagName);
return 0;
}
@@ -1368,7 +1376,7 @@ static int32_t mndAlterStbTagName(SMnode *pMnode, const SStbObj *pOld, SStbObj *
memcpy(pSchema->name, newTagName, TSDB_COL_NAME_LEN);
pNew->tagVer++;
- mDebug("stb:%s, start to modify tag %s to %s", pNew->name, oldTagName, newTagName);
+ mInfo("stb:%s, start to modify tag %s to %s", pNew->name, oldTagName, newTagName);
return 0;
}
@@ -1403,7 +1411,7 @@ static int32_t mndAlterStbTagBytes(SMnode *pMnode, const SStbObj *pOld, SStbObj
pTag->bytes = pField->bytes;
pNew->tagVer++;
- mDebug("stb:%s, start to modify tag len %s to %d", pNew->name, pField->name, pField->bytes);
+ mInfo("stb:%s, start to modify tag len %s to %d", pNew->name, pField->name, pField->bytes);
return 0;
}
@@ -1437,7 +1445,7 @@ static int32_t mndAddSuperTableColumn(const SStbObj *pOld, SStbObj *pNew, SArray
pSchema->colId = pNew->nextColId;
pNew->nextColId++;
- mDebug("stb:%s, start to add column %s", pNew->name, pSchema->name);
+ mInfo("stb:%s, start to add column %s", pNew->name, pSchema->name);
}
pNew->colVer++;
@@ -1474,7 +1482,7 @@ static int32_t mndDropSuperTableColumn(SMnode *pMnode, const SStbObj *pOld, SStb
pNew->numOfColumns--;
pNew->colVer++;
- mDebug("stb:%s, start to drop col %s", pNew->name, colName);
+ mInfo("stb:%s, start to drop col %s", pNew->name, colName);
return 0;
}
@@ -1518,7 +1526,7 @@ static int32_t mndAlterStbColumnBytes(SMnode *pMnode, const SStbObj *pOld, SStbO
pCol->bytes = pField->bytes;
pNew->colVer++;
- mDebug("stb:%s, start to modify col len %s to %d", pNew->name, pField->name, pField->bytes);
+ mInfo("stb:%s, start to modify col len %s to %d", pNew->name, pField->name, pField->bytes);
return 0;
}
@@ -1774,8 +1782,8 @@ static int32_t mndBuildSMAlterStbRsp(SDbObj *pDb, SStbObj *pObj, void **pCont, i
return 0;
}
-int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, void **pCont, int32_t *pLen) {
- int32_t ret = -1;
+int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char *dbFName, char *stbFName, void **pCont, int32_t *pLen) {
+ int32_t ret = -1;
SDbObj *pDb = mndAcquireDb(pMnode, dbFName);
if (NULL == pDb) {
return -1;
@@ -1785,11 +1793,11 @@ int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, vo
if (NULL == pObj) {
goto _OVER;
}
-
- SEncoder ec = {0};
- uint32_t contLen = 0;
+
+ SEncoder ec = {0};
+ uint32_t contLen = 0;
SMCreateStbRsp stbRsp = {0};
- SName name = {0};
+ SName name = {0};
tNameFromString(&name, pObj->name, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
stbRsp.pMeta = taosMemoryCalloc(1, sizeof(STableMetaRsp));
@@ -1821,12 +1829,12 @@ int32_t mndBuildSMCreateStbRsp(SMnode *pMnode, char* dbFName, char* stbFName, vo
*pLen = contLen;
ret = 0;
-
+
_OVER:
if (pObj) {
mndReleaseStb(pMnode, pObj);
}
-
+
if (pDb) {
mndReleaseDb(pMnode, pDb);
}
@@ -1834,14 +1842,13 @@ _OVER:
return ret;
}
-
static int32_t mndAlterStbImp(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb, bool needRsp,
void *alterOriData, int32_t alterOriDataLen) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "alter-stb");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
+ mInfo("trans:%d, used to alter stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
if (needRsp) {
@@ -1937,7 +1944,7 @@ static int32_t mndProcessAlterStbReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("stb:%s, start to alter", alterReq.name);
+ mInfo("stb:%s, start to alter", alterReq.name);
if (mndCheckAlterStbReq(&alterReq) != 0) goto _OVER;
pDb = mndAcquireDbByStb(pMnode, alterReq.name);
@@ -2037,10 +2044,10 @@ static int32_t mndSetDropStbRedoActions(SMnode *pMnode, STrans *pTrans, SDbObj *
static int32_t mndDropStb(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SStbObj *pStb) {
int32_t code = -1;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "drop-stb");
if (pTrans == NULL) goto _OVER;
- mDebug("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);
+ mInfo("trans:%d, used to drop stb:%s", pTrans->id, pStb->name);
mndTransSetDbName(pTrans, pDb->name, pStb->name);
if (mndSetDropStbRedoLogs(pMnode, pTrans, pStb) != 0) goto _OVER;
@@ -2091,6 +2098,7 @@ static int32_t mndCheckDropStbForTopic(SMnode *pMnode, const char *stbFullName,
if (pCol->tableId == suid) {
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
return -1;
} else {
goto NEXT;
@@ -2099,6 +2107,7 @@ static int32_t mndCheckDropStbForTopic(SMnode *pMnode, const char *stbFullName,
NEXT:
sdbRelease(pSdb, pTopic);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
}
return 0;
}
@@ -2136,6 +2145,7 @@ static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName,
if (pCol->tableId == suid) {
sdbRelease(pSdb, pStream);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
return -1;
} else {
goto NEXT;
@@ -2144,6 +2154,7 @@ static int32_t mndCheckDropStbForStream(SMnode *pMnode, const char *stbFullName,
NEXT:
sdbRelease(pSdb, pStream);
nodesDestroyNode(pAst);
+ nodesDestroyList(pNodeList);
}
return 0;
}
@@ -2160,12 +2171,12 @@ static int32_t mndProcessDropStbReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("stb:%s, start to drop", dropReq.name);
+ mInfo("stb:%s, start to drop", dropReq.name);
pStb = mndAcquireStb(pMnode, dropReq.name);
if (pStb == NULL) {
if (dropReq.igNotExists) {
- mDebug("stb:%s, not exist, ignore not exist is set", dropReq.name);
+ mInfo("stb:%s, not exist, ignore not exist is set", dropReq.name);
code = 0;
goto _OVER;
} else {
@@ -2228,17 +2239,17 @@ static int32_t mndProcessTableMetaReq(SRpcMsg *pReq) {
}
if (0 == strcmp(infoReq.dbFName, TSDB_INFORMATION_SCHEMA_DB)) {
- mDebug("information_schema table:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName);
+ mInfo("information_schema table:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName);
if (mndBuildInsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, sysinfo, &metaRsp) != 0) {
goto _OVER;
}
} else if (0 == strcmp(infoReq.dbFName, TSDB_PERFORMANCE_SCHEMA_DB)) {
- mDebug("performance_schema table:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName);
+ mInfo("performance_schema table:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName);
if (mndBuildPerfsTableSchema(pMnode, infoReq.dbFName, infoReq.tbName, &metaRsp) != 0) {
goto _OVER;
}
} else {
- mDebug("stb:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName);
+ mInfo("stb:%s.%s, start to retrieve meta", infoReq.dbFName, infoReq.tbName);
if (mndBuildStbSchema(pMnode, infoReq.dbFName, infoReq.tbName, &metaRsp, NULL) != 0) {
goto _OVER;
}
@@ -2285,17 +2296,17 @@ static int32_t mndProcessTableCfgReq(SRpcMsg *pReq) {
}
if (0 == strcmp(cfgReq.dbFName, TSDB_INFORMATION_SCHEMA_DB)) {
- mDebug("information_schema table:%s.%s, start to retrieve cfg", cfgReq.dbFName, cfgReq.tbName);
+ mInfo("information_schema table:%s.%s, start to retrieve cfg", cfgReq.dbFName, cfgReq.tbName);
if (mndBuildInsTableCfg(pMnode, cfgReq.dbFName, cfgReq.tbName, &cfgRsp) != 0) {
goto _OVER;
}
} else if (0 == strcmp(cfgReq.dbFName, TSDB_PERFORMANCE_SCHEMA_DB)) {
- mDebug("performance_schema table:%s.%s, start to retrieve cfg", cfgReq.dbFName, cfgReq.tbName);
+ mInfo("performance_schema table:%s.%s, start to retrieve cfg", cfgReq.dbFName, cfgReq.tbName);
if (mndBuildPerfsTableCfg(pMnode, cfgReq.dbFName, cfgReq.tbName, &cfgRsp) != 0) {
goto _OVER;
}
} else {
- mDebug("stb:%s.%s, start to retrieve cfg", cfgReq.dbFName, cfgReq.tbName);
+ mInfo("stb:%s.%s, start to retrieve cfg", cfgReq.dbFName, cfgReq.tbName);
if (mndBuildStbCfg(pMnode, cfgReq.dbFName, cfgReq.tbName, &cfgRsp) != 0) {
goto _OVER;
}
@@ -2354,7 +2365,7 @@ int32_t mndValidateStbInfo(SMnode *pMnode, SSTableVersion *pStbVersions, int32_t
STableMetaRsp metaRsp = {0};
int32_t smaVer = 0;
- mDebug("stb:%s.%s, start to retrieve meta", pStbVersion->dbFName, pStbVersion->stbName);
+ mInfo("stb:%s.%s, start to retrieve meta", pStbVersion->dbFName, pStbVersion->stbName);
if (mndBuildStbSchema(pMnode, pStbVersion->dbFName, pStbVersion->stbName, &metaRsp, &smaVer) != 0) {
metaRsp.numOfColumns = -1;
metaRsp.suid = pStbVersion->suid;
@@ -2570,3 +2581,14 @@ static void mndCancelGetNextStb(SMnode *pMnode, void *pIter) {
SSdb *pSdb = pMnode->pSdb;
sdbCancelFetch(pSdb, pIter);
}
+
+const char *mndGetStbStr(const char *src) {
+ char *posDb = strstr(src, TS_PATH_DELIMITER);
+ if (posDb != NULL) ++posDb;
+ if (posDb == NULL) return src;
+
+ char *posStb = strstr(posDb, TS_PATH_DELIMITER);
+ if (posStb != NULL) ++posStb;
+ if (posStb == NULL) return posDb;
+ return posStb;
+}
\ No newline at end of file
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index dd7a9e71eaa634a5bda506b318c6c4472a48726b..c6f46b28fce98ece39e76222c51e8302952bba8f 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -266,7 +266,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
SNode *pAst = NULL;
SQueryPlan *pPlan = NULL;
- mDebug("stream:%s to create", pCreate->name);
+ mInfo("stream:%s to create", pCreate->name);
memcpy(pObj->name, pCreate->name, TSDB_STREAM_FNAME_LEN);
pObj->createTime = taosGetTimestampMs();
pObj->updateTime = pObj->createTime;
@@ -285,7 +285,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
SDbObj *pSourceDb = mndAcquireDb(pMnode, pCreate->sourceDB);
if (pSourceDb == NULL) {
/*ASSERT(0);*/
- mDebug("stream:%s failed to create, source db %s not exist", pCreate->name, pObj->sourceDb);
+ mInfo("stream:%s failed to create, source db %s not exist", pCreate->name, pObj->sourceDb);
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
return -1;
}
@@ -295,7 +295,7 @@ static int32_t mndBuildStreamObjFromCreateReq(SMnode *pMnode, SStreamObj *pObj,
SDbObj *pTargetDb = mndAcquireDbByStb(pMnode, pObj->targetSTbName);
if (pTargetDb == NULL) {
- mDebug("stream:%s failed to create, target db %s not exist", pCreate->name, pObj->targetDb);
+ mInfo("stream:%s failed to create, target db %s not exist", pCreate->name, pObj->targetDb);
terrno = TSDB_CODE_MND_DB_NOT_EXIST;
return -1;
}
@@ -638,7 +638,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql);
+ mInfo("stream:%s, start to create, sql:%s", createStreamReq.name, createStreamReq.sql);
if (mndCheckCreateStreamReq(&createStreamReq) != 0) {
mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr());
@@ -648,7 +648,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
pStream = mndAcquireStream(pMnode, createStreamReq.name);
if (pStream != NULL) {
if (createStreamReq.igExists) {
- mDebug("stream:%s, already exist, ignore exist is set", createStreamReq.name);
+ mInfo("stream:%s, already exist, ignore exist is set", createStreamReq.name);
code = 0;
goto _OVER;
} else {
@@ -666,13 +666,13 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
goto _OVER;
}
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stream");
if (pTrans == NULL) {
mError("stream:%s, failed to create since %s", createStreamReq.name, terrstr());
goto _OVER;
}
- mndTransSetDbName(pTrans, createStreamReq.sourceDB, streamObj.targetDb);
- mDebug("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name);
+ mndTransSetDbName(pTrans, createStreamReq.sourceDB, streamObj.targetDb); // hack way
+ mInfo("trans:%d, used to create stream:%s", pTrans->id, createStreamReq.name);
// create stb for stream
if (mndCreateStbForStream(pMnode, pTrans, &streamObj, pReq->info.conn.user) < 0) {
@@ -746,7 +746,7 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
if (pStream == NULL) {
if (dropReq.igNotExists) {
- mDebug("stream:%s, not exist, ignore not exist is set", dropReq.name);
+ mInfo("stream:%s, not exist, ignore not exist is set", dropReq.name);
sdbRelease(pMnode->pSdb, pStream);
return 0;
} else {
@@ -759,13 +759,13 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
return -1;
}
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-stream");
if (pTrans == NULL) {
mError("stream:%s, failed to drop since %s", dropReq.name, terrstr());
sdbRelease(pMnode->pSdb, pStream);
return -1;
}
- mDebug("trans:%d, used to drop stream:%s", pTrans->id, dropReq.name);
+ mInfo("trans:%d, used to drop stream:%s", pTrans->id, dropReq.name);
// drop all tasks
if (mndDropStreamTasks(pMnode, pTrans, pStream) < 0) {
@@ -810,7 +810,7 @@ static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq) {
if (pStream == NULL) {
if (recoverReq.igNotExists) {
- mDebug("stream:%s, not exist, ignore not exist is set", recoverReq.name);
+ mInfo("stream:%s, not exist, ignore not exist is set", recoverReq.name);
sdbRelease(pMnode->pSdb, pStream);
return 0;
} else {
@@ -829,7 +829,7 @@ static int32_t mndProcessRecoverStreamReq(SRpcMsg *pReq) {
sdbRelease(pMnode->pSdb, pStream);
return -1;
}
- mDebug("trans:%d, used to drop stream:%s", pTrans->id, recoverReq.name);
+ mInfo("trans:%d, used to drop stream:%s", pTrans->id, recoverReq.name);
// broadcast to recover all tasks
if (mndRecoverStreamTasks(pMnode, pTrans, pStream) < 0) {
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 10e520d9ec49a53e5fcedcf668a40732480aa75b..21e45407fd15f862f059213eaaec03df4f990bc0 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -287,6 +287,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
if (consumerVgNum > minVgCnt) {
if (imbCnt < imbConsumerNum) {
if (consumerVgNum == minVgCnt + 1) {
+ imbCnt++;
continue;
} else {
// pop until equal minVg + 1
@@ -440,7 +441,7 @@ static int32_t mndDoRebalance(SMnode *pMnode, const SMqRebInputObj *pInput, SMqR
}
static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOutputObj *pOutput) {
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pMsg, "persist-reb");
mndTransSetDbName(pTrans, pOutput->pSub->dbName, NULL);
if (pTrans == NULL) return -1;
@@ -489,8 +490,12 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
mndReleaseConsumer(pMnode, pConsumerOld);
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) {
ASSERT(0);
+ tDeleteSMqConsumerObj(pConsumerNew);
+ taosMemoryFree(pConsumerNew);
goto REB_FAIL;
}
+ tDeleteSMqConsumerObj(pConsumerNew);
+ taosMemoryFree(pConsumerNew);
}
// 3.3 set removed consumer
@@ -508,8 +513,12 @@ static int32_t mndPersistRebResult(SMnode *pMnode, SRpcMsg *pMsg, const SMqRebOu
mndReleaseConsumer(pMnode, pConsumerOld);
if (mndSetConsumerCommitLogs(pMnode, pTrans, pConsumerNew) != 0) {
ASSERT(0);
+ tDeleteSMqConsumerObj(pConsumerNew);
+ taosMemoryFree(pConsumerNew);
goto REB_FAIL;
}
+ tDeleteSMqConsumerObj(pConsumerNew);
+ taosMemoryFree(pConsumerNew);
}
#if 0
if (consumerNum) {
@@ -649,7 +658,7 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pReq) {
SMqSubscribeObj *pSub = mndAcquireSubscribe(pMnode, dropReq.cgroup, dropReq.topic);
if (pSub == NULL) {
if (dropReq.igNotExists) {
- mDebug("cgroup:%s on topic:%s, not exist, ignore not exist is set", dropReq.cgroup, dropReq.topic);
+ mInfo("cgroup:%s on topic:%s, not exist, ignore not exist is set", dropReq.cgroup, dropReq.topic);
return 0;
} else {
terrno = TSDB_CODE_MND_SUBSCRIBE_NOT_EXIST;
@@ -665,14 +674,14 @@ static int32_t mndProcessDropCgroupReq(SRpcMsg *pReq) {
return -1;
}
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "drop-cgroup");
if (pTrans == NULL) {
mError("cgroup: %s on topic:%s, failed to drop since %s", dropReq.cgroup, dropReq.topic, terrstr());
mndReleaseSubscribe(pMnode, pSub);
return -1;
}
- mDebug("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic);
+ mInfo("trans:%d, used to drop cgroup:%s on topic %s", pTrans->id, dropReq.cgroup, dropReq.topic);
if (mndDropOffsetBySubKey(pMnode, pTrans, pSub->key) < 0) {
ASSERT(0);
@@ -899,6 +908,7 @@ int32_t mndDropSubByTopic(SMnode *pMnode, STrans *pTrans, const char *topicName)
// iter all vnode to delete handle
if (taosHashGetSize(pSub->consumerHash) != 0) {
sdbRelease(pSdb, pSub);
+ terrno = TSDB_CODE_MND_IN_REBALANCE;
return -1;
}
int32_t sz = taosArrayGetSize(pSub->unassignedVgs);
diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c
index b7129cf56ebd7ece43094d4281a1c5b8f4464969..3655289f99268a469809a50a6704e13077fc08e9 100644
--- a/source/dnode/mnode/impl/src/mndSync.c
+++ b/source/dnode/mnode/impl/src/mndSync.c
@@ -50,7 +50,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
int32_t transId = sdbGetIdFromRaw(pMnode->pSdb, pRaw);
pMgmt->errCode = cbMeta.code;
- mDebug("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
+ mInfo("trans:%d, is proposed, saved:%d code:0x%x, apply index:%" PRId64 " term:%" PRIu64 " config:%" PRId64
" role:%s raw:%p",
transId, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term, cbMeta.lastConfigIndex, syncStr(cbMeta.state),
pRaw);
@@ -68,7 +68,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
if (pMgmt->errCode != 0) {
mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode));
} else {
- mDebug("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode));
+ mInfo("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode));
}
pMgmt->transId = 0;
taosWUnLockLatch(&pMgmt->lock);
@@ -88,7 +88,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM
}
int32_t mndSyncGetSnapshot(struct SSyncFSM *pFsm, SSnapshot *pSnapshot, void *pReaderParam, void **ppReader) {
- mDebug("start to read snapshot from sdb in atomic way");
+ mInfo("start to read snapshot from sdb in atomic way");
SMnode *pMnode = pFsm->data;
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, &pSnapshot->lastApplyIndex, &pSnapshot->lastApplyTerm,
&pSnapshot->lastConfigIndex);
@@ -105,11 +105,11 @@ void mndRestoreFinish(struct SSyncFSM *pFsm) {
SMnode *pMnode = pFsm->data;
if (!pMnode->deploy) {
- mInfo("mnode sync restore finished, and will handle outstanding transactions");
+ mInfo("vgId:1, sync restore finished, and will handle outstanding transactions");
mndTransPullup(pMnode);
mndSetRestore(pMnode, true);
} else {
- mInfo("mnode sync restore finished");
+ mInfo("vgId:1, sync restore finished");
}
}
@@ -118,7 +118,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
pMgmt->errCode = cbMeta.code;
- mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId,
+ mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId,
cbMeta.code, cbMeta.index, cbMeta.term);
taosWLockLatch(&pMgmt->lock);
@@ -126,7 +126,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
if (pMgmt->errCode != 0) {
mError("trans:-1, failed to propose sync reconfig since %s, post sem", tstrerror(pMgmt->errCode));
} else {
- mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem",
+ mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem",
pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term);
}
pMgmt->transId = 0;
@@ -136,13 +136,13 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM
}
int32_t mndSnapshotStartRead(struct SSyncFSM *pFsm, void *pParam, void **ppReader) {
- mDebug("start to read snapshot from sdb");
+ mInfo("start to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStartRead(pMnode->pSdb, (SSdbIter **)ppReader, NULL, NULL, NULL);
}
int32_t mndSnapshotStopRead(struct SSyncFSM *pFsm, void *pReader) {
- mDebug("stop to read snapshot from sdb");
+ mInfo("stop to read snapshot from sdb");
SMnode *pMnode = pFsm->data;
return sdbStopRead(pMnode->pSdb, pReader);
}
@@ -174,12 +174,12 @@ int32_t mndSnapshotDoWrite(struct SSyncFSM *pFsm, void *pWriter, void *pBuf, int
void mndLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbMeta) {
SMnode *pMnode = pFsm->data;
atomic_store_8(&(pMnode->syncMgmt.leaderTransferFinish), 1);
- mDebug("vgId:1, mnode leader transfer finish");
+ mInfo("vgId:1, mnode leader transfer finish");
}
static void mndBecomeFollower(struct SSyncFSM *pFsm) {
SMnode *pMnode = pFsm->data;
- mDebug("vgId:1, become follower and post sem");
+ mInfo("vgId:1, become follower and post sem");
taosWLockLatch(&pMnode->syncMgmt.lock);
if (pMnode->syncMgmt.transId != 0) {
@@ -190,7 +190,7 @@ static void mndBecomeFollower(struct SSyncFSM *pFsm) {
}
static void mndBecomeLeader(struct SSyncFSM *pFsm) {
- mDebug("vgId:1, become leader");
+ mInfo("vgId:1, become leader");
SMnode *pMnode = pFsm->data;
}
@@ -228,7 +228,7 @@ int32_t mndInitSync(SMnode *pMnode) {
syncInfo.isStandBy = pMgmt->standby;
syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT;
- mDebug("start to open mnode sync, standby:%d", pMgmt->standby);
+ mInfo("vgId:1, start to open sync, standby:%d", pMgmt->standby);
if (pMgmt->standby || pMgmt->replica.id > 0) {
SSyncCfg *pCfg = &syncInfo.syncCfg;
pCfg->replicaNum = 1;
@@ -236,7 +236,7 @@ int32_t mndInitSync(SMnode *pMnode) {
SNodeInfo *pNode = &pCfg->nodeInfo[0];
tstrncpy(pNode->nodeFqdn, pMgmt->replica.fqdn, sizeof(pNode->nodeFqdn));
pNode->nodePort = pMgmt->replica.port;
- mDebug("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort);
+ mInfo("vgId:1, ep:%s:%u", pNode->nodeFqdn, pNode->nodePort);
}
tsem_init(&pMgmt->syncSem, 0, 0);
@@ -255,14 +255,14 @@ int32_t mndInitSync(SMnode *pMnode) {
setHeartbeatTimerMS(pMgmt->sync, 300);
*/
- mDebug("mnode-sync is opened, id:%" PRId64, pMgmt->sync);
+ mInfo("mnode-sync is opened, id:%" PRId64, pMgmt->sync);
return 0;
}
void mndCleanupSync(SMnode *pMnode) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
syncStop(pMgmt->sync);
- mDebug("mnode-sync is stopped, id:%" PRId64, pMgmt->sync);
+ mInfo("mnode-sync is stopped, id:%" PRId64, pMgmt->sync);
tsem_destroy(&pMgmt->syncSem);
memset(pMgmt, 0, sizeof(SSyncMgmt));
@@ -284,7 +284,7 @@ int32_t mndSyncPropose(SMnode *pMnode, SSdbRaw *pRaw, int32_t transId) {
return -1;
} else {
pMgmt->transId = transId;
- mDebug("trans:%d, will be proposed", pMgmt->transId);
+ mInfo("trans:%d, will be proposed", pMgmt->transId);
taosWUnLockLatch(&pMgmt->lock);
}
@@ -314,7 +314,7 @@ void mndSyncStart(SMnode *pMnode) {
SSyncMgmt *pMgmt = &pMnode->syncMgmt;
syncSetMsgCb(pMgmt->sync, &pMnode->msgCb);
syncStart(pMgmt->sync);
- mDebug("mnode sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby);
+ mInfo("vgId:1, sync started, id:%" PRId64 " standby:%d", pMgmt->sync, pMgmt->standby);
}
void mndSyncStop(SMnode *pMnode) {
diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c
index 93f7531a272860d63351ff1a008fa11f48b5a17c..ff2461b63b41da33df1a6c2479ed8d58e963a484 100644
--- a/source/dnode/mnode/impl/src/mndTelem.c
+++ b/source/dnode/mnode/impl/src/mndTelem.c
@@ -133,7 +133,7 @@ static int32_t mndProcessTelemTimer(SRpcMsg* pReq) {
if (taosSendHttpReport(tsTelemServer, tsTelemPort, pCont, strlen(pCont), HTTP_FLAT) != 0) {
mError("failed to send telemetry report");
} else {
- mTrace("succeed to send telemetry report");
+ mInfo("succeed to send telemetry report");
}
taosMemoryFree(pCont);
}
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index ff208eae607ab0fa57be7431771f209e18e02ce5..7308dc375ea37cea765b8016b5006797320e27e6 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -224,6 +224,7 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
if (taosDecodeSSchemaWrapper(buf, &pTopic->schema) == NULL) {
goto TOPIC_DECODE_OVER;
}
+ taosMemoryFree(buf);
} else {
pTopic->schema.nCols = 0;
pTopic->schema.version = 0;
@@ -266,6 +267,11 @@ static int32_t mndTopicActionInsert(SSdb *pSdb, SMqTopicObj *pTopic) {
static int32_t mndTopicActionDelete(SSdb *pSdb, SMqTopicObj *pTopic) {
mTrace("topic:%s, perform delete action", pTopic->name);
+ taosMemoryFreeClear(pTopic->sql);
+ taosMemoryFreeClear(pTopic->ast);
+ taosMemoryFreeClear(pTopic->physicalPlan);
+ if (pTopic->schema.nCols) taosMemoryFreeClear(pTopic->schema.pSchema);
+ taosArrayDestroy(pTopic->ntbColIds);
return 0;
}
@@ -347,11 +353,12 @@ static int32_t extractTopicTbInfo(SNode *pAst, SMqTopicObj *pTopic) {
}
}
}
+ nodesDestroyList(pNodeList);
return 0;
}
static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *pCreate, SDbObj *pDb) {
- mDebug("topic:%s to create", pCreate->name);
+ mInfo("topic:%s to create", pCreate->name);
SMqTopicObj topicObj = {0};
tstrncpy(topicObj.name, pCreate->name, TSDB_TOPIC_FNAME_LEN);
tstrncpy(topicObj.db, pDb->name, TSDB_DB_FNAME_LEN);
@@ -372,6 +379,8 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
topicObj.ast = strdup(pCreate->ast);
topicObj.astLen = strlen(pCreate->ast) + 1;
+ qDebugL("ast %s", topicObj.ast);
+
SNode *pAst = NULL;
if (nodesStringToNode(pCreate->ast, &pAst) != 0) {
taosMemoryFree(topicObj.ast);
@@ -416,6 +425,8 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
taosMemoryFree(topicObj.sql);
return -1;
}
+ nodesDestroyNode(pAst);
+ nodesDestroyNode((SNode *)pPlan);
} else if (pCreate->subType == TOPIC_SUB_TYPE__TABLE) {
SStbObj *pStb = mndAcquireStb(pMnode, pCreate->subStbName);
if (pStb == NULL) {
@@ -431,7 +442,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
/*topicObj.withTbName = 1;*/
/*topicObj.withSchema = 1;*/
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "create-topic");
if (pTrans == NULL) {
mError("topic:%s, failed to create since %s", pCreate->name, terrstr());
taosMemoryFreeClear(topicObj.ast);
@@ -439,7 +450,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
taosMemoryFreeClear(topicObj.physicalPlan);
return -1;
}
- mDebug("trans:%d, used to create topic:%s", pTrans->id, pCreate->name);
+ mInfo("trans:%d, used to create topic:%s", pTrans->id, pCreate->name);
SSdbRaw *pCommitRaw = mndTopicActionEncode(&topicObj);
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
@@ -512,6 +523,10 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
}
taosMemoryFreeClear(topicObj.physicalPlan);
+ taosMemoryFreeClear(topicObj.sql);
+ taosMemoryFreeClear(topicObj.ast);
+ taosArrayDestroy(topicObj.ntbColIds);
+ if (topicObj.schema.nCols) taosMemoryFreeClear(topicObj.schema.pSchema);
mndTransDrop(pTrans);
return TSDB_CODE_ACTION_IN_PROGRESS;
}
@@ -528,7 +543,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("topic:%s, start to create, sql:%s", createTopicReq.name, createTopicReq.sql);
+ mInfo("topic:%s, start to create, sql:%s", createTopicReq.name, createTopicReq.sql);
if (mndCheckCreateTopicReq(&createTopicReq) != 0) {
mError("topic:%s, failed to create since %s", createTopicReq.name, terrstr());
@@ -538,7 +553,7 @@ static int32_t mndProcessCreateTopicReq(SRpcMsg *pReq) {
pTopic = mndAcquireTopic(pMnode, createTopicReq.name);
if (pTopic != NULL) {
if (createTopicReq.igExists) {
- mDebug("topic:%s, already exist, ignore exist is set", createTopicReq.name);
+ mInfo("topic:%s, already exist, ignore exist is set", createTopicReq.name);
code = 0;
goto _OVER;
} else {
@@ -606,7 +621,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
SMqTopicObj *pTopic = mndAcquireTopic(pMnode, dropReq.name);
if (pTopic == NULL) {
if (dropReq.igNotExists) {
- mDebug("topic:%s, not exist, ignore not exist is set", dropReq.name);
+ mInfo("topic:%s, not exist, ignore not exist is set", dropReq.name);
return 0;
} else {
terrno = TSDB_CODE_MND_TOPIC_NOT_EXIST;
@@ -650,7 +665,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
return -1;
}
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "drop-topic");
mndTransSetDbName(pTrans, pTopic->db, NULL);
if (pTrans == NULL) {
mError("topic:%s, failed to drop since %s", pTopic->name, terrstr());
@@ -658,7 +673,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
return -1;
}
- mDebug("trans:%d, used to drop topic:%s", pTrans->id, pTopic->name);
+ mInfo("trans:%d, used to drop topic:%s", pTrans->id, pTopic->name);
#if 0
if (mndDropOffsetByTopic(pMnode, pTrans, dropReq.name) < 0) {
@@ -713,7 +728,6 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) {
mndReleaseTopic(pMnode, pTopic);
if (code != 0) {
- terrno = code;
mError("topic:%s, failed to drop since %s", dropReq.name, terrstr());
return -1;
}
@@ -763,8 +777,9 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
int32_t cols = 0;
char topicName[TSDB_TOPIC_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);
- tNameGetDbName(&n, varDataVal(topicName));
+ strcpy(varDataVal(topicName), mndGetDbStr(pTopic->name));
+ /*tNameFromString(&n, pTopic->name, T_NAME_ACCT | T_NAME_DB);*/
+ /*tNameGetDbName(&n, varDataVal(topicName));*/
varDataSetLen(topicName, strlen(varDataVal(topicName)));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)topicName, false);
diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c
index 1d8d62e534a10c8ee5cc1c83686302dce91d1b33..9613d924c460a7b3a89fb17accd6abdbb698ed2c 100644
--- a/source/dnode/mnode/impl/src/mndTrans.c
+++ b/source/dnode/mnode/impl/src/mndTrans.c
@@ -25,7 +25,7 @@
#define TRANS_VER_NUMBER 1
#define TRANS_ARRAY_SIZE 8
-#define TRANS_RESERVE_SIZE 64
+#define TRANS_RESERVE_SIZE 48
static SSdbRaw *mndTransActionEncode(STrans *pTrans);
static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw);
@@ -127,8 +127,8 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
SDB_SET_INT8(pRaw, dataPos, 0, _OVER)
SDB_SET_INT16(pRaw, dataPos, pTrans->originRpcType, _OVER)
SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_TABLE_FNAME_LEN, _OVER)
- SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_TABLE_FNAME_LEN, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_TABLE_FNAME_LEN, _OVER)
+ SDB_SET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
SDB_SET_INT32(pRaw, dataPos, pTrans->redoActionPos, _OVER)
int32_t redoActionNum = taosArrayGetSize(pTrans->redoActions);
@@ -223,6 +223,7 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) {
SDB_SET_BINARY(pRaw, dataPos, pTrans->param, pTrans->paramLen, _OVER)
}
+ SDB_SET_BINARY(pRaw, dataPos, pTrans->opername, TSDB_TRANS_OPER_LEN, _OVER)
SDB_SET_RESERVE(pRaw, dataPos, TRANS_RESERVE_SIZE, _OVER)
SDB_SET_DATALEN(pRaw, dataPos, _OVER)
@@ -289,8 +290,8 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
pTrans->oper = oper;
SDB_GET_INT16(pRaw, dataPos, &pTrans->originRpcType, _OVER)
SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER)
- SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_TABLE_FNAME_LEN, _OVER)
- SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_TABLE_FNAME_LEN, _OVER)
+ SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname, TSDB_TABLE_FNAME_LEN, _OVER)
+ SDB_GET_BINARY(pRaw, dataPos, pTrans->stbname, TSDB_TABLE_FNAME_LEN, _OVER)
SDB_GET_INT32(pRaw, dataPos, &pTrans->redoActionPos, _OVER)
SDB_GET_INT32(pRaw, dataPos, &redoActionNum, _OVER)
SDB_GET_INT32(pRaw, dataPos, &undoActionNum, _OVER)
@@ -305,6 +306,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
if (pTrans->commitActions == NULL) goto _OVER;
for (int32_t i = 0; i < redoActionNum; ++i) {
+ memset(&action, 0, sizeof(action));
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
@@ -319,7 +321,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
action.pRaw = taosMemoryMalloc(dataLen);
if (action.pRaw == NULL) goto _OVER;
- // mTrace("raw:%p, is created", pData);
+ mTrace("raw:%p, is created", pData);
SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
if (taosArrayPush(pTrans->redoActions, &action) == NULL) goto _OVER;
action.pRaw = NULL;
@@ -340,6 +342,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
}
for (int32_t i = 0; i < undoActionNum; ++i) {
+ memset(&action, 0, sizeof(action));
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
@@ -354,7 +357,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
action.pRaw = taosMemoryMalloc(dataLen);
if (action.pRaw == NULL) goto _OVER;
- // mTrace("raw:%p, is created", action.pRaw);
+ mTrace("raw:%p, is created", action.pRaw);
SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
if (taosArrayPush(pTrans->undoActions, &action) == NULL) goto _OVER;
action.pRaw = NULL;
@@ -375,6 +378,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
}
for (int32_t i = 0; i < commitActionNum; ++i) {
+ memset(&action, 0, sizeof(action));
SDB_GET_INT32(pRaw, dataPos, &action.id, _OVER)
SDB_GET_INT32(pRaw, dataPos, &action.errCode, _OVER)
SDB_GET_INT32(pRaw, dataPos, &action.acceptableCode, _OVER)
@@ -389,7 +393,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT32(pRaw, dataPos, &dataLen, _OVER)
action.pRaw = taosMemoryMalloc(dataLen);
if (action.pRaw == NULL) goto _OVER;
- // mTrace("raw:%p, is created", action.pRaw);
+ mTrace("raw:%p, is created", action.pRaw);
SDB_GET_BINARY(pRaw, dataPos, (void *)action.pRaw, dataLen, _OVER);
if (taosArrayPush(pTrans->commitActions, &action) == NULL) goto _OVER;
action.pRaw = NULL;
@@ -417,6 +421,7 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) {
SDB_GET_BINARY(pRaw, dataPos, pTrans->param, pTrans->paramLen, _OVER);
}
+ SDB_GET_BINARY(pRaw, dataPos, pTrans->opername, TSDB_TRANS_OPER_LEN, _OVER);
SDB_GET_RESERVE(pRaw, dataPos, TRANS_RESERVE_SIZE, _OVER)
terrno = 0;
@@ -455,12 +460,26 @@ static const char *mndTransStr(ETrnStage stage) {
}
}
+static void mndSetTransLastAction(STrans *pTrans, STransAction *pAction) {
+ if (pAction != NULL) {
+ pTrans->lastAction = pAction->id;
+ pTrans->lastMsgType = pAction->msgType;
+ pTrans->lastEpset = pAction->epSet;
+ pTrans->lastErrorNo = pAction->errCode;
+ } else {
+ pTrans->lastAction = 0;
+ pTrans->lastMsgType = 0;
+ memset(&pTrans->lastEpset, 0, sizeof(pTrans->lastEpset));
+ pTrans->lastErrorNo = 0;
+ }
+}
+
static void mndTransTestStartFunc(SMnode *pMnode, void *param, int32_t paramLen) {
- mDebug("test trans start, param:%s, len:%d", (char *)param, paramLen);
+ mInfo("test trans start, param:%s, len:%d", (char *)param, paramLen);
}
static void mndTransTestStopFunc(SMnode *pMnode, void *param, int32_t paramLen) {
- mDebug("test trans stop, param:%s, len:%d", (char *)param, paramLen);
+ mInfo("test trans stop, param:%s, len:%d", (char *)param, paramLen);
}
static TransCbFp mndTransGetCbFp(ETrnFunc ftype) {
@@ -582,7 +601,8 @@ void mndReleaseTrans(SMnode *pMnode, STrans *pTrans) {
sdbRelease(pSdb, pTrans);
}
-STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq) {
+STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, const SRpcMsg *pReq,
+ const char *opername) {
STrans *pTrans = taosMemoryCalloc(1, sizeof(STrans));
if (pTrans == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -590,6 +610,10 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
return NULL;
}
+ if (opername != NULL) {
+ tstrncpy(pTrans->opername, opername, TSDB_TRANS_OPER_LEN);
+ }
+
pTrans->id = sdbGetMaxId(pMnode->pSdb, SDB_TRANS);
pTrans->stage = TRN_STAGE_PREPARE;
pTrans->policy = policy;
@@ -706,8 +730,8 @@ int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, c
if (pIter == NULL) break;
if (pTrans->oper == oper) {
- if (strcasecmp(dbname, pTrans->dbname1) == 0) {
- mDebug("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper);
+ if (strcasecmp(dbname, pTrans->dbname) == 0) {
+ mInfo("trans:%d, db:%s oper:%d matched with input", pTrans->id, dbname, oper);
if (pTrans->pRpcArray == NULL) {
pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo));
}
@@ -725,12 +749,12 @@ int32_t mndSetRpcInfoForDbTrans(SMnode *pMnode, SRpcMsg *pMsg, EOperType oper, c
return code;
}
-void mndTransSetDbName(STrans *pTrans, const char *dbname1, const char *dbname2) {
- if (dbname1 != NULL) {
- tstrncpy(pTrans->dbname1, dbname1, TSDB_TABLE_FNAME_LEN);
+void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbname) {
+ if (dbname != NULL) {
+ tstrncpy(pTrans->dbname, dbname, TSDB_TABLE_FNAME_LEN);
}
- if (dbname2 != NULL) {
- tstrncpy(pTrans->dbname2, dbname2, TSDB_TABLE_FNAME_LEN);
+ if (stbname != NULL) {
+ tstrncpy(pTrans->stbname, stbname, TSDB_TABLE_FNAME_LEN);
}
}
@@ -746,7 +770,7 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
}
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
+ mInfo("trans:%d, sync to other mnodes, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
int32_t code = mndSyncPropose(pMnode, pRaw, pTrans->id);
if (code != 0) {
mError("trans:%d, failed to sync since %s", pTrans->id, terrstr());
@@ -755,13 +779,13 @@ static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
}
sdbFreeRaw(pRaw);
- mDebug("trans:%d, sync finished", pTrans->id);
+ mInfo("trans:%d, sync finished", pTrans->id);
return 0;
}
-static bool mndCheckDbConflict(const char *db, STrans *pTrans) {
- if (db[0] == 0) return false;
- if (strcasecmp(db, pTrans->dbname1) == 0 || strcasecmp(db, pTrans->dbname2) == 0) return true;
+static bool mndCheckDbConflict(const char *conflict, STrans *pTrans) {
+ if (conflict[0] == 0) return false;
+ if (strcasecmp(conflict, pTrans->dbname) == 0 || strcasecmp(conflict, pTrans->stbname) == 0) return true;
return false;
}
@@ -780,20 +804,30 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
if (pNew->conflict == TRN_CONFLICT_DB) {
if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
- if (mndCheckDbConflict(pNew->dbname1, pTrans)) conflict = true;
- if (mndCheckDbConflict(pNew->dbname2, pTrans)) conflict = true;
+ if (mndCheckDbConflict(pNew->dbname, pTrans)) conflict = true;
+ if (mndCheckDbConflict(pNew->stbname, pTrans)) conflict = true;
}
}
if (pNew->conflict == TRN_CONFLICT_DB_INSIDE) {
if (pTrans->conflict == TRN_CONFLICT_GLOBAL) conflict = true;
if (pTrans->conflict == TRN_CONFLICT_DB) {
- if (mndCheckDbConflict(pNew->dbname1, pTrans)) conflict = true;
- if (mndCheckDbConflict(pNew->dbname2, pTrans)) conflict = true;
+ if (mndCheckDbConflict(pNew->dbname, pTrans)) conflict = true;
+ if (mndCheckDbConflict(pNew->stbname, pTrans)) conflict = true;
+ }
+ if (pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
+ if (mndCheckDbConflict(pNew->stbname, pTrans)) conflict = true; // for stb
}
}
- mError("trans:%d, can't execute since conflict with trans:%d, db1:%s db2:%s", pNew->id, pTrans->id, pTrans->dbname1,
- pTrans->dbname2);
+ if (conflict) {
+ mError("trans:%d, db:%s stb:%s type:%d, can't execute since conflict with trans:%d db:%s stb:%s type:%d",
+ pNew->id, pNew->dbname, pNew->stbname, pNew->conflict, pTrans->id, pTrans->dbname, pTrans->stbname,
+ pTrans->conflict);
+ } else {
+ mInfo("trans:%d, db:%s stb:%s type:%d, not conflict with trans:%d db:%s stb:%s type:%d", pNew->id,
+ pNew->dbname, pNew->stbname, pNew->conflict, pTrans->id, pTrans->dbname, pTrans->stbname,
+ pTrans->conflict);
+ }
sdbRelease(pMnode->pSdb, pTrans);
}
@@ -802,7 +836,7 @@ static bool mndCheckTransConflict(SMnode *pMnode, STrans *pNew) {
int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
if (pTrans->conflict == TRN_CONFLICT_DB || pTrans->conflict == TRN_CONFLICT_DB_INSIDE) {
- if (strlen(pTrans->dbname1) == 0 && strlen(pTrans->dbname2) == 0) {
+ if (strlen(pTrans->dbname) == 0 && strlen(pTrans->stbname) == 0) {
terrno = TSDB_CODE_MND_TRANS_CONFLICT;
mError("trans:%d, failed to prepare conflict db not set", pTrans->id);
return -1;
@@ -821,12 +855,12 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
return -1;
}
- mDebug("trans:%d, prepare transaction", pTrans->id);
+ mInfo("trans:%d, prepare transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, prepare finished", pTrans->id);
+ mInfo("trans:%d, prepare finished", pTrans->id);
STrans *pNew = mndAcquireTrans(pMnode, pTrans->id);
if (pNew == NULL) {
@@ -847,22 +881,22 @@ int32_t mndTransPrepare(SMnode *pMnode, STrans *pTrans) {
}
static int32_t mndTransCommit(SMnode *pMnode, STrans *pTrans) {
- mDebug("trans:%d, commit transaction", pTrans->id);
+ mInfo("trans:%d, commit transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to commit since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, commit finished", pTrans->id);
+ mInfo("trans:%d, commit finished", pTrans->id);
return 0;
}
static int32_t mndTransRollback(SMnode *pMnode, STrans *pTrans) {
- mDebug("trans:%d, rollback transaction", pTrans->id);
+ mInfo("trans:%d, rollback transaction", pTrans->id);
if (mndTransSync(pMnode, pTrans) != 0) {
mError("trans:%d, failed to rollback since %s", pTrans->id, terrstr());
return -1;
}
- mDebug("trans:%d, rollback finished", pTrans->id);
+ mInfo("trans:%d, rollback finished", pTrans->id);
return 0;
}
@@ -894,7 +928,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
for (int32_t i = 0; i < size; ++i) {
SRpcHandleInfo *pInfo = taosArrayGet(pTrans->pRpcArray, i);
if (pInfo->handle != NULL) {
- mDebug("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage),
+ mInfo("trans:%d, send rsp, code:0x%x stage:%s app:%p", pTrans->id, code, mndTransStr(pTrans->stage),
pInfo->ahandle);
if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
code = TSDB_CODE_MND_TRANS_NETWORK_UNAVAILL;
@@ -902,13 +936,13 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
SRpcMsg rspMsg = {.code = code, .info = *pInfo};
if (pTrans->originRpcType == TDMT_MND_CREATE_DB) {
- mDebug("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType));
- SDbObj *pDb = mndAcquireDb(pMnode, pTrans->dbname1);
+ mInfo("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType));
+ SDbObj *pDb = mndAcquireDb(pMnode, pTrans->dbname);
if (pDb != NULL) {
for (int32_t j = 0; j < 12; j++) {
bool ready = mndIsDbReady(pMnode, pDb);
if (!ready) {
- mDebug("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname1, j);
+ mInfo("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname, j);
taosMsleep(1000);
} else {
break;
@@ -919,7 +953,7 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
} else if (pTrans->originRpcType == TDMT_MND_CREATE_STB) {
void *pCont = NULL;
int32_t contLen = 0;
- if (0 == mndBuildSMCreateStbRsp(pMnode, pTrans->dbname1, pTrans->dbname2, &pCont, &contLen) != 0) {
+ if (0 == mndBuildSMCreateStbRsp(pMnode, pTrans->dbname, pTrans->stbname, &pCont, &contLen) != 0) {
mndTransSetRpcRsp(pTrans, pCont, contLen);
}
}
@@ -978,7 +1012,7 @@ int32_t mndTransProcessRsp(SRpcMsg *pRsp) {
pAction->errCode = pRsp->code;
}
- mDebug("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId,
+ mInfo("trans:%d, %s:%d response is received, code:0x%x, accept:0x%x retry:0x%x", transId,
mndTransStr(pAction->stage), action, pRsp->code, pAction->acceptableCode, pAction->retryCode);
mndTransExecute(pMnode, pTrans);
@@ -994,10 +1028,10 @@ static void mndTransResetAction(SMnode *pMnode, STrans *pTrans, STransAction *pA
if (pAction->errCode == TSDB_CODE_RPC_REDIRECT || pAction->errCode == TSDB_CODE_SYN_NEW_CONFIG_ERROR ||
pAction->errCode == TSDB_CODE_SYN_INTERNAL_ERROR || pAction->errCode == TSDB_CODE_SYN_NOT_LEADER) {
pAction->epSet.inUse = (pAction->epSet.inUse + 1) % pAction->epSet.numOfEps;
- mDebug("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage),
+ mInfo("trans:%d, %s:%d execute status is reset and set epset inuse:%d", pTrans->id, mndTransStr(pAction->stage),
pAction->id, pAction->epSet.inUse);
} else {
- mDebug("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d execute status is reset", pTrans->id, mndTransStr(pAction->stage), pAction->id);
}
pAction->errCode = 0;
}
@@ -1024,21 +1058,15 @@ static int32_t mndTransWriteSingleLog(SMnode *pMnode, STrans *pTrans, STransActi
pAction->rawWritten = true;
pAction->errCode = 0;
code = 0;
- mDebug("trans:%d, %s:%d write to sdb, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
+ mInfo("trans:%d, %s:%d write to sdb, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage), pAction->id,
sdbTableName(pAction->pRaw->type), sdbStatusName(pAction->pRaw->status));
- pTrans->lastAction = pAction->id;
- pTrans->lastMsgType = pAction->msgType;
- pTrans->lastEpset = pAction->epSet;
- pTrans->lastErrorNo = 0;
+ mndSetTransLastAction(pTrans, pAction);
} else {
pAction->errCode = (terrno != 0) ? terrno : code;
mError("trans:%d, %s:%d failed to write sdb since %s, type:%s status:%s", pTrans->id, mndTransStr(pAction->stage),
pAction->id, terrstr(), sdbTableName(pAction->pRaw->type), sdbStatusName(pAction->pRaw->status));
- pTrans->lastAction = pAction->id;
- pTrans->lastMsgType = pAction->msgType;
- pTrans->lastEpset = pAction->epSet;
- pTrans->lastErrorNo = pAction->errCode;
+ mndSetTransLastAction(pTrans, pAction);
}
return code;
@@ -1072,15 +1100,10 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio
if (code == 0) {
pAction->msgSent = 1;
pAction->msgReceived = 0;
- pAction->errCode = 0;
- mDebug("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail);
+ pAction->errCode = TSDB_CODE_ACTION_IN_PROGRESS;
+ mInfo("trans:%d, %s:%d is sent, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, detail);
- pTrans->lastAction = pAction->id;
- pTrans->lastMsgType = pAction->msgType;
- pTrans->lastEpset = pAction->epSet;
- if (pTrans->lastErrorNo == 0) {
- pTrans->lastErrorNo = TSDB_CODE_ACTION_IN_PROGRESS;
- }
+ mndSetTransLastAction(pTrans, pAction);
} else {
pAction->msgSent = 0;
pAction->msgReceived = 0;
@@ -1088,10 +1111,7 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio
mError("trans:%d, %s:%d not send since %s, %s", pTrans->id, mndTransStr(pAction->stage), pAction->id, terrstr(),
detail);
- pTrans->lastAction = pAction->id;
- pTrans->lastMsgType = pAction->msgType;
- pTrans->lastEpset = pAction->epSet;
- pTrans->lastErrorNo = pAction->errCode;
+ mndSetTransLastAction(pTrans, pAction);
}
return code;
@@ -1100,12 +1120,9 @@ static int32_t mndTransSendSingleMsg(SMnode *pMnode, STrans *pTrans, STransActio
static int32_t mndTransExecNullMsg(SMnode *pMnode, STrans *pTrans, STransAction *pAction) {
pAction->rawWritten = 0;
pAction->errCode = 0;
- mDebug("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d confirm action executed", pTrans->id, mndTransStr(pAction->stage), pAction->id);
- pTrans->lastAction = pAction->id;
- pTrans->lastMsgType = pAction->msgType;
- pTrans->lastEpset = pAction->epSet;
- pTrans->lastErrorNo = 0;
+ mndSetTransLastAction(pTrans, pAction);
return 0;
}
@@ -1151,31 +1168,25 @@ static int32_t mndTransExecuteActions(SMnode *pMnode, STrans *pTrans, SArray *pA
errCode = pAction->errCode;
pErrAction = pAction;
}
+ } else {
+ pErrAction = pAction;
}
}
+ mndSetTransLastAction(pTrans, pErrAction);
+
if (numOfExecuted == numOfActions) {
if (errCode == 0) {
- pTrans->lastAction = 0;
- pTrans->lastMsgType = 0;
- memset(&pTrans->lastEpset, 0, sizeof(pTrans->lastEpset));
- pTrans->lastErrorNo = 0;
- mDebug("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions);
+ mInfo("trans:%d, all %d actions execute successfully", pTrans->id, numOfActions);
return 0;
} else {
mError("trans:%d, all %d actions executed, code:0x%x", pTrans->id, numOfActions, errCode & 0XFFFF);
- if (pErrAction != NULL) {
- pTrans->lastAction = pErrAction->id;
- pTrans->lastMsgType = pErrAction->msgType;
- pTrans->lastEpset = pErrAction->epSet;
- pTrans->lastErrorNo = pErrAction->errCode;
- }
mndTransResetActions(pMnode, pTrans, pArray);
terrno = errCode;
return errCode;
}
} else {
- mDebug("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions);
+ mInfo("trans:%d, %d of %d actions executed", pTrans->id, numOfExecuted, numOfActions);
return TSDB_CODE_ACTION_IN_PROGRESS;
}
}
@@ -1210,6 +1221,8 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
if (numOfActions == 0) return code;
if (pTrans->redoActionPos >= numOfActions) return code;
+ mInfo("trans:%d, execute %d actions serial", pTrans->id, numOfActions);
+
for (int32_t action = pTrans->redoActionPos; action < numOfActions; ++action) {
STransAction *pAction = taosArrayGet(pTrans->redoActions, pTrans->redoActionPos);
@@ -1221,7 +1234,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
code = pAction->errCode;
mndTransResetAction(pMnode, pTrans, pAction);
} else {
- mDebug("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action);
+ mInfo("trans:%d, %s:%d execute successfully", pTrans->id, mndTransStr(pAction->stage), action);
}
} else {
code = TSDB_CODE_ACTION_IN_PROGRESS;
@@ -1230,7 +1243,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
if (pAction->errCode != 0 && pAction->errCode != pAction->acceptableCode) {
code = pAction->errCode;
} else {
- mDebug("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action);
+ mInfo("trans:%d, %s:%d write successfully", pTrans->id, mndTransStr(pAction->stage), action);
}
} else {
}
@@ -1238,23 +1251,15 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
if (code == 0) {
pTrans->failedTimes = 0;
- pTrans->lastAction = action;
- pTrans->lastMsgType = 0;
- pTrans->lastErrorNo = 0;
- memset(&pTrans->lastEpset, 0, sizeof(pTrans->lastEpset));
- } else {
- pTrans->lastAction = action;
- pTrans->lastMsgType = pAction->msgType;
- pTrans->lastErrorNo = code;
- pTrans->lastEpset = pAction->epSet;
}
+ mndSetTransLastAction(pTrans, pAction);
if (mndCannotExecuteTransAction(pMnode)) break;
if (code == 0) {
pTrans->code = 0;
pTrans->redoActionPos++;
- mDebug("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage),
+ mInfo("trans:%d, %s:%d is executed and need sync to other mnodes", pTrans->id, mndTransStr(pAction->stage),
pAction->id);
code = mndTransSync(pMnode, pTrans);
if (code != 0) {
@@ -1263,17 +1268,17 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
mndTransStr(pAction->stage), pAction->id, terrstr());
}
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
+ mInfo("trans:%d, %s:%d is in progress and wait it finish", pTrans->id, mndTransStr(pAction->stage), pAction->id);
break;
} else if (code == pAction->retryCode) {
- mDebug("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code);
+ mInfo("trans:%d, %s:%d receive code:0x%x and retry", pTrans->id, mndTransStr(pAction->stage), pAction->id, code);
taosMsleep(300);
action--;
continue;
} else {
terrno = code;
pTrans->code = code;
- mDebug("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id,
+ mInfo("trans:%d, %s:%d receive code:0x%x and wait another schedule, failedTimes:%d", pTrans->id,
mndTransStr(pAction->stage), pAction->id, code, pTrans->failedTimes);
break;
}
@@ -1285,7 +1290,7 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
static bool mndTransPerformPrepareStage(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
pTrans->stage = TRN_STAGE_REDO_ACTION;
- mDebug("trans:%d, stage from prepare to redoAction", pTrans->id);
+ mInfo("trans:%d, stage from prepare to redoAction", pTrans->id);
return continueExec;
}
@@ -1304,10 +1309,10 @@ static bool mndTransPerformRedoActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_COMMIT;
- mDebug("trans:%d, stage from redoAction to commit", pTrans->id);
+ mInfo("trans:%d, stage from redoAction to commit", pTrans->id);
continueExec = true;
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code));
+ mInfo("trans:%d, stage keep on redoAction since %s", pTrans->id, tstrerror(code));
continueExec = false;
} else {
pTrans->failedTimes++;
@@ -1347,7 +1352,7 @@ static bool mndTransPerformCommitStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_COMMIT_ACTION;
- mDebug("trans:%d, stage from commit to commitAction", pTrans->id);
+ mInfo("trans:%d, stage from commit to commitAction", pTrans->id);
continueExec = true;
} else {
pTrans->code = terrno;
@@ -1366,7 +1371,7 @@ static bool mndTransPerformCommitActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->code = 0;
pTrans->stage = TRN_STAGE_FINISHED;
- mDebug("trans:%d, stage from commitAction to finished", pTrans->id);
+ mInfo("trans:%d, stage from commitAction to finished", pTrans->id);
continueExec = true;
} else {
pTrans->code = terrno;
@@ -1384,10 +1389,10 @@ static bool mndTransPerformUndoActionStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->stage = TRN_STAGE_FINISHED;
- mDebug("trans:%d, stage from undoAction to finished", pTrans->id);
+ mInfo("trans:%d, stage from undoAction to finished", pTrans->id);
continueExec = true;
} else if (code == TSDB_CODE_ACTION_IN_PROGRESS) {
- mDebug("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code));
+ mInfo("trans:%d, stage keep on undoAction since %s", pTrans->id, tstrerror(code));
continueExec = false;
} else {
pTrans->failedTimes++;
@@ -1406,7 +1411,7 @@ static bool mndTransPerformRollbackStage(SMnode *pMnode, STrans *pTrans) {
if (code == 0) {
pTrans->stage = TRN_STAGE_UNDO_ACTION;
- mDebug("trans:%d, stage from rollback to undoAction", pTrans->id);
+ mInfo("trans:%d, stage from rollback to undoAction", pTrans->id);
continueExec = true;
} else {
pTrans->failedTimes++;
@@ -1431,7 +1436,7 @@ static bool mndTransPerfromFinishedStage(SMnode *pMnode, STrans *pTrans) {
mError("trans:%d, failed to write sdb since %s", pTrans->id, terrstr());
}
- mDebug("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
+ mInfo("trans:%d, execute finished, code:0x%x, failedTimes:%d", pTrans->id, pTrans->code, pTrans->failedTimes);
return continueExec;
}
@@ -1439,7 +1444,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
bool continueExec = true;
while (continueExec) {
- mDebug("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
+ mInfo("trans:%d, continue to execute, stage:%s", pTrans->id, mndTransStr(pTrans->stage));
pTrans->lastExecTime = taosGetTimestampMs();
switch (pTrans->stage) {
case TRN_STAGE_PREPARE:
@@ -1473,6 +1478,7 @@ void mndTransExecute(SMnode *pMnode, STrans *pTrans) {
}
static int32_t mndProcessTransTimer(SRpcMsg *pReq) {
+ mTrace("start to process trans timer");
mndTransPullup(pReq->info.node);
return 0;
}
@@ -1589,15 +1595,20 @@ static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)stage, false);
- char dbname1[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- STR_WITH_MAXSIZE_TO_VARSTR(dbname1, mndGetDbStr(pTrans->dbname1), pShow->pMeta->pSchemas[cols].bytes);
+ char opername[TSDB_TRANS_OPER_LEN + VARSTR_HEADER_SIZE] = {0};
+ STR_WITH_MAXSIZE_TO_VARSTR(opername, pTrans->opername, pShow->pMeta->pSchemas[cols].bytes);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)opername, false);
+
+ char dbname[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
+ STR_WITH_MAXSIZE_TO_VARSTR(dbname, mndGetDbStr(pTrans->dbname), pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)dbname1, false);
+ colDataAppend(pColInfo, numOfRows, (const char *)dbname, false);
- char dbname2[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- STR_WITH_MAXSIZE_TO_VARSTR(dbname2, mndGetDbStr(pTrans->dbname2), pShow->pMeta->pSchemas[cols].bytes);
+ char stbname[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
+ STR_WITH_MAXSIZE_TO_VARSTR(stbname, mndGetDbStr(pTrans->stbname), pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)dbname2, false);
+ colDataAppend(pColInfo, numOfRows, (const char *)stbname, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)&pTrans->failedTimes, false);
diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c
index 5da119bb30af5bb27fb40d1dc42391893fb98c43..5512fa410788a3eaea4adc4e5b08759a4568a999 100644
--- a/source/dnode/mnode/impl/src/mndUser.c
+++ b/source/dnode/mnode/impl/src/mndUser.c
@@ -79,14 +79,14 @@ static int32_t mndCreateDefaultUser(SMnode *pMnode, char *acct, char *user, char
if (pRaw == NULL) return -1;
sdbSetRawStatus(pRaw, SDB_STATUS_READY);
- mDebug("user:%s, will be created when deploying, raw:%p", userObj.user, pRaw);
+ mInfo("user:%s, will be created when deploying, raw:%p", userObj.user, pRaw);
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, NULL, "create-user");
if (pTrans == NULL) {
mError("user:%s, failed to create since %s", userObj.user, terrstr());
return -1;
}
- mDebug("trans:%d, used to create user:%s", pTrans->id, userObj.user);
+ mInfo("trans:%d, used to create user:%s", pTrans->id, userObj.user);
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) {
mError("trans:%d, failed to commit redo log since %s", pTrans->id, terrstr());
@@ -299,12 +299,12 @@ static int32_t mndCreateUser(SMnode *pMnode, char *acct, SCreateUserReq *pCreate
userObj.sysInfo = pCreate->sysInfo;
userObj.enable = pCreate->enable;
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "create-user");
if (pTrans == NULL) {
mError("user:%s, failed to create since %s", pCreate->user, terrstr());
return -1;
}
- mDebug("trans:%d, used to create user:%s", pTrans->id, pCreate->user);
+ mInfo("trans:%d, used to create user:%s", pTrans->id, pCreate->user);
SSdbRaw *pCommitRaw = mndUserActionEncode(&userObj);
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
@@ -336,7 +336,7 @@ static int32_t mndProcessCreateUserReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("user:%s, start to create", createReq.user);
+ mInfo("user:%s, start to create", createReq.user);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_CREATE_USER) != 0) {
goto _OVER;
}
@@ -383,12 +383,12 @@ _OVER:
}
static int32_t mndAlterUser(SMnode *pMnode, SUserObj *pOld, SUserObj *pNew, SRpcMsg *pReq) {
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "alter-user");
if (pTrans == NULL) {
mError("user:%s, failed to alter since %s", pOld->user, terrstr());
return -1;
}
- mDebug("trans:%d, used to alter user:%s", pTrans->id, pOld->user);
+ mInfo("trans:%d, used to alter user:%s", pTrans->id, pOld->user);
SSdbRaw *pCommitRaw = mndUserActionEncode(pNew);
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
@@ -446,7 +446,7 @@ static int32_t mndProcessAlterUserReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("user:%s, start to alter", alterReq.user);
+ mInfo("user:%s, start to alter", alterReq.user);
if (alterReq.user[0] == 0) {
terrno = TSDB_CODE_MND_INVALID_USER_FORMAT;
@@ -598,12 +598,12 @@ _OVER:
}
static int32_t mndDropUser(SMnode *pMnode, SRpcMsg *pReq, SUserObj *pUser) {
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq, "drop-user");
if (pTrans == NULL) {
mError("user:%s, failed to drop since %s", pUser->user, terrstr());
return -1;
}
- mDebug("trans:%d, used to drop user:%s", pTrans->id, pUser->user);
+ mInfo("trans:%d, used to drop user:%s", pTrans->id, pUser->user);
SSdbRaw *pCommitRaw = mndUserActionEncode(pUser);
if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) {
@@ -634,7 +634,7 @@ static int32_t mndProcessDropUserReq(SRpcMsg *pReq) {
goto _OVER;
}
- mDebug("user:%s, start to drop", dropReq.user);
+ mInfo("user:%s, start to drop", dropReq.user);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_DROP_USER) != 0) {
goto _OVER;
}
diff --git a/source/dnode/mnode/impl/src/mndVgroup.c b/source/dnode/mnode/impl/src/mndVgroup.c
index 09eed7fb32e8831e6b6c863b44edd3e9e28110a3..c1bc3408199ed92a7a101d04f66aa54f3ef47df2 100644
--- a/source/dnode/mnode/impl/src/mndVgroup.c
+++ b/source/dnode/mnode/impl/src/mndVgroup.c
@@ -234,6 +234,10 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
createReq.walRetentionSize = pDb->cfg.walRetentionSize;
createReq.walRollPeriod = pDb->cfg.walRollPeriod;
createReq.walSegmentSize = pDb->cfg.walSegmentSize;
+ createReq.sstTrigger = pDb->cfg.sstTrigger;
+ createReq.hashPrefix = pDb->cfg.hashPrefix;
+ createReq.hashSuffix = pDb->cfg.hashSuffix;
+ createReq.tsdbPageSize = pDb->cfg.tsdbPageSize;
for (int32_t v = 0; v < pVgroup->replica; ++v) {
SReplica *pReplica = &createReq.replicas[v];
@@ -400,7 +404,7 @@ static bool mndBuildDnodesArrayFp(SMnode *pMnode, void *pObj, void *p1, void *p2
pDnode->numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id);
pDnode->memUsed = mndGetVnodesMemory(pMnode, pDnode->id);
- mDebug("dnode:%d, vnodes:%d supportVnodes:%d isMnode:%d online:%d memory avail:%" PRId64 " used:%" PRId64, pDnode->id,
+ mInfo("dnode:%d, vnodes:%d supportVnodes:%d isMnode:%d online:%d memory avail:%" PRId64 " used:%" PRId64, pDnode->id,
pDnode->numOfVnodes, pDnode->numOfSupportVnodes, isMnode, online, pDnode->memAvail, pDnode->memUsed);
if (isMnode) {
@@ -693,6 +697,9 @@ static int32_t mndRetrieveVgroups(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppendNULL(pColInfo, numOfRows);
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->cacheUsage, false);
+
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppendNULL(pColInfo, numOfRows);
@@ -791,32 +798,43 @@ static int32_t mndRetrieveVnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
if (pShow->pIter == NULL) break;
for (int32_t i = 0; i < pVgroup->replica && numOfRows < rows; ++i) {
- SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
+ SVnodeGid *pVgid = &pVgroup->vnodeGid[i];
+ SColumnInfoData *pColInfo = NULL;
cols = 0;
- SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->vgId, false);
-
- SName name = {0};
- char db[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
- tNameFromString(&name, pVgroup->dbName, T_NAME_ACCT | T_NAME_DB);
- tNameGetDbName(&name, varDataVal(db));
- varDataSetLen(db, strlen(varDataVal(db)));
-
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)db, false);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->vgId, false);
- uint32_t val = 0;
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)&val, false);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->replica, false);
char buf[20] = {0};
STR_TO_VARSTR(buf, syncStr(pVgid->role));
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)buf, false);
+ const char *dbname = mndGetDbStr(pVgroup->dbName);
+ char b1[TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
+ if (dbname != NULL) {
+ STR_WITH_MAXSIZE_TO_VARSTR(b1, dbname, TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE);
+ } else {
+ STR_WITH_MAXSIZE_TO_VARSTR(b1, "NULL", TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE);
+ }
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)b1, false);
+
+ pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
+ colDataAppend(pColInfo, numOfRows, (const char *)&pVgid->dnodeId, false);
+
+ SDnodeObj *pDnode = mndAcquireDnode(pMnode, pVgid->dnodeId);
+ char b2[TSDB_EP_LEN + VARSTR_HEADER_SIZE] = {0};
+ if (pDnode != NULL) {
+ STR_WITH_MAXSIZE_TO_VARSTR(b2, pDnode->ep, TSDB_EP_LEN + VARSTR_HEADER_SIZE);
+ } else {
+ STR_WITH_MAXSIZE_TO_VARSTR(b2, "NULL", TSDB_EP_LEN + VARSTR_HEADER_SIZE);
+ }
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
- colDataAppend(pColInfo, numOfRows, (const char *)&pVgroup->replica, false); // onlines
+ colDataAppend(pColInfo, numOfRows, (const char *)b2, false);
numOfRows++;
}
@@ -837,7 +855,7 @@ int32_t mndAddVnodeToVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray) {
taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes);
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
SDnodeObj *pDnode = taosArrayGet(pArray, i);
- mDebug("dnode:%d, equivalent vnodes:%d", pDnode->id, pDnode->numOfVnodes);
+ mInfo("dnode:%d, equivalent vnodes:%d", pDnode->id, pDnode->numOfVnodes);
}
SVnodeGid *pVgid = &pVgroup->vnodeGid[pVgroup->replica];
@@ -887,7 +905,7 @@ int32_t mndRemoveVnodeFromVgroup(SMnode *pMnode, SVgObj *pVgroup, SArray *pArray
taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes);
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
SDnodeObj *pDnode = taosArrayGet(pArray, i);
- mDebug("dnode:%d, equivalent vnodes:%d", pDnode->id, pDnode->numOfVnodes);
+ mInfo("dnode:%d, equivalent vnodes:%d", pDnode->id, pDnode->numOfVnodes);
}
int32_t code = -1;
@@ -1156,7 +1174,7 @@ int32_t mndSetMoveVgroupsInfoToTrans(SMnode *pMnode, STrans *pTrans, int32_t del
static int32_t mndAddIncVgroupReplicaToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
int32_t newDnodeId) {
- mDebug("vgId:%d, will add 1 vnode, replica:%d dnode:%d", pVgroup->vgId, pVgroup->replica, newDnodeId);
+ mInfo("vgId:%d, will add 1 vnode, replica:%d dnode:%d", pVgroup->vgId, pVgroup->replica, newDnodeId);
SVnodeGid *pGid = &pVgroup->vnodeGid[pVgroup->replica];
pVgroup->replica++;
@@ -1172,7 +1190,7 @@ static int32_t mndAddIncVgroupReplicaToTrans(SMnode *pMnode, STrans *pTrans, SDb
static int32_t mndAddDecVgroupReplicaFromTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
int32_t delDnodeId) {
- mDebug("vgId:%d, will remove 1 vnode, replica:%d dnode:%d", pVgroup->vgId, pVgroup->replica, delDnodeId);
+ mInfo("vgId:%d, will remove 1 vnode, replica:%d dnode:%d", pVgroup->vgId, pVgroup->replica, delDnodeId);
SVnodeGid *pGid = NULL;
SVnodeGid delGid = {0};
@@ -1205,10 +1223,10 @@ static int32_t mndRedistributeVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb,
SSdbRaw *pRaw = NULL;
STrans *pTrans = NULL;
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "red-vgroup");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to redistribute vgroup, vgId:%d", pTrans->id, pVgroup->vgId);
+ mInfo("trans:%d, used to redistribute vgroup, vgId:%d", pTrans->id, pVgroup->vgId);
SVgObj newVg = {0};
memcpy(&newVg, pVgroup, sizeof(SVgObj));
@@ -1588,10 +1606,10 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
STrans *pTrans = NULL;
SArray *pArray = mndBuildDnodesArray(pMnode, 0);
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "split-vgroup");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to split vgroup, vgId:%d", pTrans->id, pVgroup->vgId);
+ mInfo("trans:%d, used to split vgroup, vgId:%d", pTrans->id, pVgroup->vgId);
SVgObj newVg1 = {0};
memcpy(&newVg1, pVgroup, sizeof(SVgObj));
@@ -1648,7 +1666,7 @@ static int32_t mndProcessSplitVgroupMsg(SRpcMsg *pReq) {
SVgObj *pVgroup = NULL;
SDbObj *pDb = NULL;
- mDebug("vgId:%d, start to split", vgId);
+ mInfo("vgId:%d, start to split", vgId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_SPLIT_VGROUP) != 0) {
goto _OVER;
}
@@ -1756,16 +1774,16 @@ static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) {
pBalancedVgroups = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (pBalancedVgroups == NULL) goto _OVER;
- pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq);
+ pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_GLOBAL, pReq, "balance-vgroup");
if (pTrans == NULL) goto _OVER;
mndTransSetSerial(pTrans);
- mDebug("trans:%d, used to balance vgroup", pTrans->id);
+ mInfo("trans:%d, used to balance vgroup", pTrans->id);
while (1) {
taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes);
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
SDnodeObj *pDnode = taosArrayGet(pArray, i);
- mDebug("dnode:%d, equivalent vnodes:%d support:%d, score:%f", pDnode->id, pDnode->numOfVnodes,
+ mInfo("dnode:%d, equivalent vnodes:%d support:%d, score:%f", pDnode->id, pDnode->numOfVnodes,
pDnode->numOfSupportVnodes, (float)pDnode->numOfVnodes / pDnode->numOfSupportVnodes);
}
@@ -1774,7 +1792,7 @@ static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) {
float srcScore = (float)(pSrc->numOfVnodes - 1) / pSrc->numOfSupportVnodes;
float dstScore = (float)(pDst->numOfVnodes + 1) / pDst->numOfSupportVnodes;
- mDebug("trans:%d, after balance, src dnode:%d score:%f, dst dnode:%d score:%f", pTrans->id, pSrc->id, srcScore,
+ mInfo("trans:%d, after balance, src dnode:%d score:%f, dst dnode:%d score:%f", pTrans->id, pSrc->id, srcScore,
pDst->id, dstScore);
if (srcScore > dstScore - 0.000001) {
@@ -1785,20 +1803,20 @@ static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) {
numOfVgroups++;
continue;
} else {
- mDebug("trans:%d, no vgroup need to balance from dnode:%d to dnode:%d", pTrans->id, pSrc->id, pDst->id);
+ mInfo("trans:%d, no vgroup need to balance from dnode:%d to dnode:%d", pTrans->id, pSrc->id, pDst->id);
break;
}
} else {
- mDebug("trans:%d, no vgroup need to balance any more", pTrans->id);
+ mInfo("trans:%d, no vgroup need to balance any more", pTrans->id);
break;
}
}
if (numOfVgroups <= 0) {
- mDebug("no need to balance vgroup");
+ mInfo("no need to balance vgroup");
code = 0;
} else {
- mDebug("start to balance vgroup, numOfVgroups:%d", numOfVgroups);
+ mInfo("start to balance vgroup, numOfVgroups:%d", numOfVgroups);
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = TSDB_CODE_ACTION_IN_PROGRESS;
}
@@ -1847,7 +1865,7 @@ static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) {
if (pArray == NULL) goto _OVER;
if (taosArrayGetSize(pArray) < 2) {
- mDebug("no need to balance vgroup since dnode num less than 2");
+ mInfo("no need to balance vgroup since dnode num less than 2");
code = 0;
} else {
code = mndBalanceVgroup(pMnode, pReq, pArray);
diff --git a/source/dnode/mnode/impl/test/trans/trans2.cpp b/source/dnode/mnode/impl/test/trans/trans2.cpp
index aee8aa27488da6dc6b8b0cbd06a3b34741e66a18..60be7cfbc03f2e19b1f8fd6f821eef08a7d56ee0 100644
--- a/source/dnode/mnode/impl/test/trans/trans2.cpp
+++ b/source/dnode/mnode/impl/test/trans/trans2.cpp
@@ -115,7 +115,7 @@ class MndTestTrans2 : public ::testing::Test {
userObj.superUser = 1;
SRpcMsg rpcMsg = {0};
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, conflict, &rpcMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, conflict, &rpcMsg, "");
SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj);
mndTransAppendRedolog(pTrans, pRedoRaw);
sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY);
@@ -148,7 +148,7 @@ class MndTestTrans2 : public ::testing::Test {
userObj.superUser = 1;
SRpcMsg rpcMsg = {0};
- STrans *pTrans = mndTransCreate(pMnode, policy, conflict, &rpcMsg);
+ STrans *pTrans = mndTransCreate(pMnode, policy, conflict, &rpcMsg, "");
SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj);
mndTransAppendRedolog(pTrans, pRedoRaw);
sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY);
@@ -220,7 +220,7 @@ class MndTestTrans2 : public ::testing::Test {
userObj.superUser = 1;
SRpcMsg rpcMsg = {0};
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, &rpcMsg);
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, &rpcMsg, "");
SSdbRaw *pRedoRaw = mndUserActionEncode(&userObj);
mndTransAppendRedolog(pTrans, pRedoRaw);
sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY);
diff --git a/source/dnode/mnode/sdb/src/sdb.c b/source/dnode/mnode/sdb/src/sdb.c
index 3db0087334c0eb92be39bc9b97d12356f63062ce..5393c42da33a45629d15f02df803135e853185d7 100644
--- a/source/dnode/mnode/sdb/src/sdb.c
+++ b/source/dnode/mnode/sdb/src/sdb.c
@@ -19,7 +19,7 @@
static int32_t sdbCreateDir(SSdb *pSdb);
SSdb *sdbInit(SSdbOpt *pOption) {
- mDebug("start to init sdb in %s", pOption->path);
+ mInfo("start to init sdb in %s", pOption->path);
SSdb *pSdb = taosMemoryCalloc(1, sizeof(SSdb));
if (pSdb == NULL) {
@@ -61,12 +61,12 @@ SSdb *sdbInit(SSdbOpt *pOption) {
pSdb->commitConfig = -1;
pSdb->pMnode = pOption->pMnode;
taosThreadMutexInit(&pSdb->filelock, NULL);
- mDebug("sdb init successfully");
+ mInfo("sdb init success");
return pSdb;
}
void sdbCleanup(SSdb *pSdb) {
- mDebug("start to cleanup sdb");
+ mInfo("start to cleanup sdb");
sdbWriteFile(pSdb, 0);
@@ -103,12 +103,12 @@ void sdbCleanup(SSdb *pSdb) {
pSdb->hashObjs[i] = NULL;
memset(&pSdb->locks[i], 0, sizeof(pSdb->locks[i]));
- mDebug("sdb table:%s is cleaned up", sdbTableName(i));
+ mInfo("sdb table:%s is cleaned up", sdbTableName(i));
}
taosThreadMutexDestroy(&pSdb->filelock);
taosMemoryFree(pSdb);
- mDebug("sdb is cleaned up");
+ mInfo("sdb is cleaned up");
}
int32_t sdbSetTable(SSdb *pSdb, SSdbTable table) {
@@ -139,7 +139,7 @@ int32_t sdbSetTable(SSdb *pSdb, SSdbTable table) {
pSdb->maxId[sdbType] = 0;
pSdb->hashObjs[sdbType] = hash;
- mDebug("sdb table:%s is initialized", sdbTableName(sdbType));
+ mInfo("sdb table:%s is initialized", sdbTableName(sdbType));
return 0;
}
@@ -175,7 +175,7 @@ void sdbGetCommitInfo(SSdb *pSdb, int64_t *index, int64_t *term, int64_t *config
*index = pSdb->commitIndex;
*term = pSdb->commitTerm;
*config = pSdb->commitConfig;
-#if 0
+#if 1
mTrace("mnode current info, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", commit index:%" PRId64
" term:%" PRId64 " config:%" PRId64,
pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig, *index, *term, *config);
diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c
index 00659939e9b1cc2399125bb96edd6bf31d9804fa..b5cfa7b0f68fc4e4b4683f83e0dc72fff373aa8e 100644
--- a/source/dnode/mnode/sdb/src/sdbFile.c
+++ b/source/dnode/mnode/sdb/src/sdbFile.c
@@ -23,25 +23,25 @@
#define SDB_FILE_VER 1
static int32_t sdbDeployData(SSdb *pSdb) {
- mDebug("start to deploy sdb");
+ mInfo("start to deploy sdb");
for (int32_t i = SDB_MAX - 1; i >= 0; --i) {
SdbDeployFp fp = pSdb->deployFps[i];
if (fp == NULL) continue;
- mDebug("start to deploy sdb:%s", sdbTableName(i));
+ mInfo("start to deploy sdb:%s", sdbTableName(i));
if ((*fp)(pSdb->pMnode) != 0) {
mError("failed to deploy sdb:%s since %s", sdbTableName(i), terrstr());
return -1;
}
}
- mDebug("sdb deploy successfully");
+ mInfo("sdb deploy success");
return 0;
}
static void sdbResetData(SSdb *pSdb) {
- mDebug("start to reset sdb");
+ mInfo("start to reset sdb");
for (ESdbType i = 0; i < SDB_MAX; ++i) {
SHashObj *hash = pSdb->hashObjs[i];
@@ -64,7 +64,7 @@ static void sdbResetData(SSdb *pSdb) {
taosHashClear(pSdb->hashObjs[i]);
pSdb->tableVer[i] = 0;
pSdb->maxId[i] = 0;
- mDebug("sdb:%s is reset", sdbTableName(i));
+ mInfo("sdb:%s is reset", sdbTableName(i));
}
pSdb->applyIndex = -1;
@@ -73,7 +73,7 @@ static void sdbResetData(SSdb *pSdb) {
pSdb->commitIndex = -1;
pSdb->commitTerm = -1;
pSdb->commitConfig = -1;
- mDebug("sdb reset successfully");
+ mInfo("sdb reset success");
}
static int32_t sdbReadFileHead(SSdb *pSdb, TdFilePtr pFile) {
@@ -229,7 +229,7 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
char file[PATH_MAX] = {0};
snprintf(file, sizeof(file), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
- mDebug("start to read sdb file:%s", file);
+ mInfo("start to read sdb file:%s", file);
SSdbRaw *pRaw = taosMemoryMalloc(TSDB_MAX_MSG_SIZE + 100);
if (pRaw == NULL) {
@@ -306,7 +306,7 @@ static int32_t sdbReadFileImp(SSdb *pSdb) {
pSdb->commitTerm = pSdb->applyTerm;
pSdb->commitConfig = pSdb->applyConfig;
memcpy(pSdb->tableVer, tableVer, sizeof(tableVer));
- mDebug("read sdb file:%s successfully, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file,
+ mInfo("read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file,
pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig);
_OVER:
@@ -339,7 +339,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
char curfile[PATH_MAX] = {0};
snprintf(curfile, sizeof(curfile), "%s%ssdb.data", pSdb->currDir, TD_DIRSEP);
- mDebug("start to write sdb file, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", commit index:%" PRId64
+ mInfo("start to write sdb file, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64 ", commit index:%" PRId64
" term:%" PRId64 " config:%" PRId64 ", file:%s",
pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig, pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig,
curfile);
@@ -361,7 +361,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
SdbEncodeFp encodeFp = pSdb->encodeFps[i];
if (encodeFp == NULL) continue;
- mDebug("write %s to sdb file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i));
+ mInfo("write %s to sdb file, total %d rows", sdbTableName(i), sdbGetSize(pSdb, i));
SHashObj *hash = pSdb->hashObjs[i];
TdThreadRwlock *pLock = &pSdb->locks[i];
@@ -437,7 +437,7 @@ static int32_t sdbWriteFileImp(SSdb *pSdb) {
pSdb->commitIndex = pSdb->applyIndex;
pSdb->commitTerm = pSdb->applyTerm;
pSdb->commitConfig = pSdb->applyConfig;
- mDebug("write sdb file successfully, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s",
+ mInfo("write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s",
pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig, curfile);
}
@@ -514,12 +514,12 @@ static void sdbCloseIter(SSdbIter *pIter) {
}
if (pIter->name != NULL) {
- taosRemoveFile(pIter->name);
+ (void)taosRemoveFile(pIter->name);
taosMemoryFree(pIter->name);
pIter->name = NULL;
}
- mDebug("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total);
+ mInfo("sdbiter:%p, is closed, total:%" PRId64, pIter, pIter->total);
taosMemoryFree(pIter);
}
@@ -556,7 +556,7 @@ int32_t sdbStartRead(SSdb *pSdb, SSdbIter **ppIter, int64_t *index, int64_t *ter
if (term != NULL) *term = commitTerm;
if (config != NULL) *config = commitConfig;
- mDebug("sdbiter:%p, is created to read snapshot, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64
+ mInfo("sdbiter:%p, is created to read snapshot, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64
" file:%s",
pIter, commitIndex, commitTerm, commitConfig, pIter->name);
return 0;
@@ -568,7 +568,7 @@ int32_t sdbStopRead(SSdb *pSdb, SSdbIter *pIter) {
}
int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) {
- int32_t maxlen = 100;
+ int32_t maxlen = 4096;
void *pBuf = taosMemoryCalloc(1, maxlen);
if (pBuf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -584,14 +584,14 @@ int32_t sdbDoRead(SSdb *pSdb, SSdbIter *pIter, void **ppBuf, int32_t *len) {
taosMemoryFree(pBuf);
return -1;
} else if (readlen == 0) {
- mDebug("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
+ mInfo("sdbiter:%p, read snapshot to the end, total:%" PRId64, pIter, pIter->total);
*ppBuf = NULL;
*len = 0;
taosMemoryFree(pBuf);
return 0;
} else { // (readlen <= maxlen)
pIter->total += readlen;
- mDebug("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
+ mInfo("sdbiter:%p, read:%d bytes from snapshot, total:%" PRId64, pIter, readlen, pIter->total);
*ppBuf = pBuf;
*len = readlen;
return 0;
@@ -606,11 +606,12 @@ int32_t sdbStartWrite(SSdb *pSdb, SSdbIter **ppIter) {
if (pIter->file == NULL) {
terrno = TAOS_SYSTEM_ERROR(errno);
mError("failed to open %s since %s", pIter->name, terrstr());
+ sdbCloseIter(pIter);
return -1;
}
*ppIter = pIter;
- mDebug("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name);
+ mInfo("sdbiter:%p, is created to write snapshot, file:%s", pIter, pIter->name);
return 0;
}
@@ -619,7 +620,7 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i
if (!isApply) {
sdbCloseIter(pIter);
- mDebug("sdbiter:%p, not apply to sdb", pIter);
+ mInfo("sdbiter:%p, not apply to sdb", pIter);
return 0;
}
@@ -636,9 +637,9 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i
return -1;
}
- sdbCloseIter(pIter);
if (sdbReadFile(pSdb) != 0) {
mError("sdbiter:%p, failed to read from %s since %s", pIter, datafile, terrstr());
+ sdbCloseIter(pIter);
return -1;
}
@@ -655,7 +656,8 @@ int32_t sdbStopWrite(SSdb *pSdb, SSdbIter *pIter, bool isApply, int64_t index, i
pSdb->commitIndex = index;
}
- mDebug("sdbiter:%p, successfully applyed to sdb", pIter);
+ mInfo("sdbiter:%p, success applyed to sdb", pIter);
+ sdbCloseIter(pIter);
return 0;
}
@@ -668,6 +670,6 @@ int32_t sdbDoWrite(SSdb *pSdb, SSdbIter *pIter, void *pBuf, int32_t len) {
}
pIter->total += writelen;
- mDebug("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total);
+ mInfo("sdbiter:%p, write:%d bytes to snapshot, total:%" PRId64, pIter, writelen, pIter->total);
return 0;
}
diff --git a/source/dnode/mnode/sdb/src/sdbHash.c b/source/dnode/mnode/sdb/src/sdbHash.c
index c579f82a9d18b73105e9f327d4d974f798fb3f76..ecdf8c71a77d1dde73d85b2b582e626f3197c21a 100644
--- a/source/dnode/mnode/sdb/src/sdbHash.c
+++ b/source/dnode/mnode/sdb/src/sdbHash.c
@@ -40,6 +40,8 @@ const char *sdbTableName(ESdbType type) {
return "auth";
case SDB_ACCT:
return "acct";
+ case SDB_STREAM_CK:
+ return "stream_ck";
case SDB_STREAM:
return "stream";
case SDB_OFFSET:
@@ -83,7 +85,7 @@ const char *sdbStatusName(ESdbStatus status) {
}
void sdbPrintOper(SSdb *pSdb, SSdbRow *pRow, const char *oper) {
-#if 0
+#if 1
EKeyType keyType = pSdb->keyTypes[pRow->type];
if (keyType == SDB_KEY_BINARY) {
@@ -219,14 +221,15 @@ static int32_t sdbDeleteRow(SSdb *pSdb, SHashObj *hash, SSdbRaw *pRaw, SSdbRow *
return terrno;
}
SSdbRow *pOldRow = *ppOldRow;
-
pOldRow->status = pRaw->status;
+
+ atomic_add_fetch_32(&pOldRow->refCount, 1);
sdbPrintOper(pSdb, pOldRow, "delete");
taosHashRemove(hash, pOldRow->pObj, keySize);
+ pSdb->tableVer[pOldRow->type]++;
taosThreadRwlockUnlock(pLock);
- pSdb->tableVer[pOldRow->type]++;
sdbFreeRow(pSdb, pRow, false);
sdbCheckRow(pSdb, pOldRow);
@@ -315,7 +318,7 @@ static void sdbCheckRow(SSdb *pSdb, SSdbRow *pRow) {
TdThreadRwlock *pLock = &pSdb->locks[pRow->type];
taosThreadRwlockWrlock(pLock);
- int32_t ref = atomic_load_32(&pRow->refCount);
+ int32_t ref = atomic_sub_fetch_32(&pRow->refCount, 1);
sdbPrintOper(pSdb, pRow, "check");
if (ref <= 0 && pRow->status == SDB_STATUS_DROPPED) {
sdbFreeRow(pSdb, pRow, true);
diff --git a/source/dnode/mnode/sdb/src/sdbRaw.c b/source/dnode/mnode/sdb/src/sdbRaw.c
index 95985cd3d933efa12dc70cd618288dcca57cfad9..3a16ee3f137d0736136f0439f12dd77163beb72a 100644
--- a/source/dnode/mnode/sdb/src/sdbRaw.c
+++ b/source/dnode/mnode/sdb/src/sdbRaw.c
@@ -37,7 +37,7 @@ SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) {
pRaw->sver = sver;
pRaw->dataLen = dataLen;
-#if 0
+#if 1
mTrace("raw:%p, is created, len:%d table:%s", pRaw, dataLen, sdbTableName(type));
#endif
return pRaw;
@@ -45,7 +45,7 @@ SSdbRaw *sdbAllocRaw(ESdbType type, int8_t sver, int32_t dataLen) {
void sdbFreeRaw(SSdbRaw *pRaw) {
if (pRaw != NULL) {
-#if 0
+#if 1
mTrace("raw:%p, is freed", pRaw);
#endif
taosMemoryFree(pRaw);
diff --git a/source/dnode/mnode/sdb/src/sdbRow.c b/source/dnode/mnode/sdb/src/sdbRow.c
index b362ee3a454a10c6296c1f0e8bc480f7446e7ea6..c078e7eb21016e251a0e70acd828631dbf86396e 100644
--- a/source/dnode/mnode/sdb/src/sdbRow.c
+++ b/source/dnode/mnode/sdb/src/sdbRow.c
@@ -23,7 +23,7 @@ SSdbRow *sdbAllocRow(int32_t objSize) {
return NULL;
}
-#if 0
+#if 1
mTrace("row:%p, is created, len:%d", pRow->pObj, objSize);
#endif
return pRow;
@@ -47,7 +47,7 @@ void sdbFreeRow(SSdb *pSdb, SSdbRow *pRow, bool callFunc) {
sdbPrintOper(pSdb, pRow, "free");
-#if 0
+#if 1
mTrace("row:%p, is freed", pRow->pObj);
#endif
taosMemoryFreeClear(pRow);
diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c
index b65189153ea4f0aa36680586e472eac4007a457f..efdc8b46934f9677868312c9310cffe989e73ab1 100644
--- a/source/dnode/qnode/src/qnode.c
+++ b/source/dnode/qnode/src/qnode.c
@@ -26,7 +26,7 @@ SQnode *qndOpen(const SQnodeOpt *pOption) {
return NULL;
}
- if (qWorkerInit(NODE_TYPE_QNODE, pQnode->qndId, NULL, (void **)&pQnode->pQuery, &pOption->msgCb)) {
+ if (qWorkerInit(NODE_TYPE_QNODE, pQnode->qndId, (void **)&pQnode->pQuery, &pOption->msgCb)) {
taosMemoryFreeClear(pQnode);
return NULL;
}
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index a3e17f53774c82ea9fca1ff0a88943c8e7971725..1f7a059ffc30623ddbdc442a9cbeb770afb57fff 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -29,6 +29,7 @@ target_sources(
# sma
"src/sma/smaEnv.c"
"src/sma/smaUtil.c"
+ "src/sma/smaFS.c"
"src/sma/smaOpen.c"
"src/sma/smaCommit.c"
"src/sma/smaRollup.c"
@@ -49,6 +50,9 @@ target_sources(
"src/tsdb/tsdbSnapshot.c"
"src/tsdb/tsdbCacheRead.c"
"src/tsdb/tsdbRetention.c"
+ "src/tsdb/tsdbDiskData.c"
+ "src/tsdb/tsdbCompact.c"
+ "src/tsdb/tsdbMergeTree.c"
# tq
"src/tq/tq.c"
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index a475e5409a4da5261fb9fb8a8febdd7a6cc67f71..f0fb8d4b021da7ace48bf513ff2aabb5d3e920ba 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -125,11 +125,16 @@ int32_t metaTbCursorNext(SMTbCursor *pTbCur);
// typedef struct STsdb STsdb;
typedef struct STsdbReader STsdbReader;
+#define TSDB_DEFAULT_STT_FILE 8
+#define TSDB_DEFAULT_PAGE_SIZE 4096
+
#define TIMEWINDOW_RANGE_CONTAINED 1
#define TIMEWINDOW_RANGE_EXTERNAL 2
-#define LASTROW_RETRIEVE_TYPE_ALL 0x1
-#define LASTROW_RETRIEVE_TYPE_SINGLE 0x2
+#define CACHESCAN_RETRIEVE_TYPE_ALL 0x1
+#define CACHESCAN_RETRIEVE_TYPE_SINGLE 0x2
+#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
+#define CACHESCAN_RETRIEVE_LAST 0x8
int32_t tsdbSetTableId(STsdbReader *pReader, int64_t uid);
int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, SArray *pTableList, STsdbReader **ppReader,
@@ -146,15 +151,41 @@ void *tsdbGetIdx(SMeta *pMeta);
void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
-int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
-int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
-int32_t tsdbLastrowReaderClose(void *pReader);
+int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList, int32_t numOfCols, void **pReader);
+int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
+int32_t tsdbCacherowsReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
+size_t tsdbCacheGetUsage(SVnode *pVnode);
// tq
+typedef struct SMetaTableInfo {
+ int64_t suid;
+ int64_t uid;
+ SSchemaWrapper *schema;
+ char tbName[TSDB_TABLE_NAME_LEN];
+} SMetaTableInfo;
+
+typedef struct SIdInfo {
+ int64_t version;
+ int32_t index;
+} SIdInfo;
+
+typedef struct SSnapContext {
+ SMeta *pMeta;
+ int64_t snapVersion;
+ TBC *pCur;
+ int64_t suid;
+ int8_t subType;
+ SHashObj *idVersion;
+ SHashObj *suidInfo;
+ SArray *idList;
+ int32_t index;
+ bool withMeta;
+ bool queryMetaOrData; // true-get meta, false-get data
+} SSnapContext;
typedef struct STqReader {
int64_t ver;
@@ -186,7 +217,7 @@ int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
int32_t tqSeekVer(STqReader *pReader, int64_t ver);
int32_t tqNextBlock(STqReader *pReader, SFetchRet *ret);
-int32_t tqReaderSetDataMsg(STqReader *pReader, SSubmitReq *pMsg, int64_t ver);
+int32_t tqReaderSetDataMsg(STqReader *pReader, const SSubmitReq *pMsg, int64_t ver);
bool tqNextDataBlock(STqReader *pReader);
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReader *pReader);
@@ -205,6 +236,13 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot);
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData);
+int32_t buildSnapContext(SMeta *pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
+ SSnapContext **ctxRet);
+int32_t getMetafromSnapShot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
+SMetaTableInfo getUidfromSnapShot(SSnapContext *ctx);
+int32_t setForSnapShot(SSnapContext *ctx, int64_t uid);
+int32_t destroySnapContext(SSnapContext *ctx);
+
// structs
struct STsdbCfg {
int8_t precision;
@@ -226,6 +264,7 @@ typedef struct {
int64_t numOfNTables;
int64_t numOfNTimeSeries;
int64_t numOfTimeSeries;
+ int64_t itvTimeSeries;
int64_t pointsWritten;
int64_t totalStorage;
int64_t compStorage;
@@ -252,6 +291,10 @@ struct SVnodeCfg {
SVnodeStats vndStats;
uint32_t hashBegin;
uint32_t hashEnd;
+ int16_t sttTrigger;
+ int16_t hashPrefix;
+ int16_t hashSuffix;
+ int32_t tsdbPageSize;
};
typedef struct {
diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h
index abfffc045f3ba5abeac1d171e0751bfeeadcb1cb..dade85b12d2903cbc8e3e089ab5701e1e07a4369 100644
--- a/source/dnode/vnode/src/inc/sma.h
+++ b/source/dnode/vnode/src/inc/sma.h
@@ -38,9 +38,10 @@ typedef struct SSmaEnv SSmaEnv;
typedef struct SSmaStat SSmaStat;
typedef struct STSmaStat STSmaStat;
typedef struct SRSmaStat SRSmaStat;
-typedef struct SSmaKey SSmaKey;
+typedef struct SRSmaRef SRSmaRef;
typedef struct SRSmaInfo SRSmaInfo;
typedef struct SRSmaInfoItem SRSmaInfoItem;
+typedef struct SRSmaFS SRSmaFS;
typedef struct SQTaskFile SQTaskFile;
typedef struct SQTaskFReader SQTaskFReader;
typedef struct SQTaskFWriter SQTaskFWriter;
@@ -54,10 +55,21 @@ struct SSmaEnv {
#define SMA_ENV_FLG_CLOSE ((int8_t)0x1)
+struct SRSmaRef {
+ int64_t refId; // for SRSmaStat
+ int64_t suid;
+};
+
typedef struct {
int8_t inited;
int32_t rsetId;
void *tmrHandle; // shared by all fetch tasks
+ /**
+ * @brief key: void* of SRSmaInfoItem, value: SRSmaRef
+ * N.B. Although there is a very small possibility that "void*" point to different objects while with the same
+ * address after release/renew, the functionality is not affected as it just used to fetch the rsma results.
+ */
+ SHashObj *refHash; // shared by all vgroups
} SSmaMgmt;
#define SMA_ENV_LOCK(env) (&(env)->lock)
@@ -73,20 +85,25 @@ struct STSmaStat {
struct SQTaskFile {
volatile int32_t nRef;
- int64_t commitID;
+ int32_t padding;
+ int64_t version;
int64_t size;
};
struct SQTaskFReader {
- SSma *pSma;
- SQTaskFile fTask;
- TdFilePtr pReadH;
+ SSma *pSma;
+ int64_t version;
+ TdFilePtr pReadH;
};
struct SQTaskFWriter {
- SSma *pSma;
- SQTaskFile fTask;
- TdFilePtr pWriteH;
- char *fname;
+ SSma *pSma;
+ int64_t version;
+ TdFilePtr pWriteH;
+ char *fname;
+};
+
+struct SRSmaFS {
+ SArray *aQTaskInf; // array of SQTaskFile
};
struct SRSmaStat {
@@ -95,9 +112,11 @@ struct SRSmaStat {
int64_t refId; // shared by fetch tasks
volatile int64_t nBufItems; // number of items in queue buffer
SRWLatch lock; // r/w lock for rsma fs(e.g. qtaskinfo)
- int8_t triggerStat; // shared by fetch tasks
- int8_t commitStat; // 0 not in committing, 1 in committing
- SArray *aTaskFile; // qTaskFiles committed recently(for recovery/snapshot r/w)
+ volatile int32_t nFetchAll; // active number of fetch all
+ volatile int8_t triggerStat; // shared by fetch tasks
+ volatile int8_t commitStat; // 0 not in committing, 1 in committing
+ volatile int8_t delFlag; // 0 no deleted SRSmaInfo, 1 has deleted SRSmaInfo
+ SRSmaFS fs; // for recovery/snapshot r/w
SHashObj *infoHash; // key: suid, value: SRSmaInfo
tsem_t notEmpty; // has items in queue buffer
};
@@ -117,21 +136,23 @@ struct SSmaStat {
#define RSMA_TRIGGER_STAT(r) (&(r)->triggerStat)
#define RSMA_COMMIT_STAT(r) (&(r)->commitStat)
#define RSMA_REF_ID(r) ((r)->refId)
+#define RSMA_FS(r) (&(r)->fs)
#define RSMA_FS_LOCK(r) (&(r)->lock)
struct SRSmaInfoItem {
int8_t level : 4;
int8_t fetchLevel : 4;
int8_t triggerStat;
- uint16_t nSkipped;
+ uint16_t nScanned;
int32_t maxDelay; // ms
tmr_h tmrId;
+ void *pStreamState;
};
struct SRSmaInfo {
+ SSma *pSma;
STSchema *pTSchema;
int64_t suid;
- int64_t refId; // refId of SRSmaStat
int64_t lastRecv; // ms
int8_t assigned; // 0 idle, 1 assgined for exec
int8_t delFlag;
@@ -162,14 +183,6 @@ enum {
TASK_TRIGGER_STAT_DROPPED = 5,
};
-enum {
- RSMA_ROLE_CREATE = 0,
- RSMA_ROLE_DROP = 1,
- RSMA_ROLE_SUBMIT = 2,
- RSMA_ROLE_FETCH = 3,
- RSMA_ROLE_ITERATE = 4,
-};
-
enum {
RSMA_RESTORE_REBOOT = 1,
RSMA_RESTORE_SYNC = 2,
@@ -181,89 +194,51 @@ typedef enum {
RSMA_EXEC_COMMIT = 3, // triggered by commit
} ERsmaExecType;
-void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
-void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
-
-int32_t tdDropTSma(SSma *pSma, char *pMsg);
-int32_t tdDropTSmaData(SSma *pSma, int64_t indexUid);
-int32_t tdInsertRSmaData(SSma *pSma, char *msg);
-
-int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat);
-int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat);
-int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo);
-int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo);
-
-void *tdAcquireSmaRef(int32_t rsetId, int64_t refId);
-int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId);
-
+// sma
int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType);
-
+void tdDestroySmaEnv(SSmaEnv *pSmaEnv);
+void *tdFreeSmaEnv(SSmaEnv *pSmaEnv);
int32_t tdLockSma(SSma *pSma);
int32_t tdUnLockSma(SSma *pSma);
+void *tdAcquireSmaRef(int32_t rsetId, int64_t refId);
+int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId);
-static FORCE_INLINE int8_t tdSmaStat(STSmaStat *pTStat) {
- if (pTStat) {
- return atomic_load_8(&pTStat->state);
- }
- return TSDB_SMA_STAT_UNKNOWN;
-}
-
-static FORCE_INLINE bool tdSmaStatIsOK(STSmaStat *pTStat, int8_t *state) {
- if (!pTStat) {
- return false;
- }
-
- if (state) {
- *state = atomic_load_8(&pTStat->state);
- return *state == TSDB_SMA_STAT_OK;
- }
- return atomic_load_8(&pTStat->state) == TSDB_SMA_STAT_OK;
-}
-
-static FORCE_INLINE bool tdSmaStatIsExpired(STSmaStat *pTStat) {
- return pTStat ? (atomic_load_8(&pTStat->state) & TSDB_SMA_STAT_EXPIRED) : true;
-}
-
-static FORCE_INLINE bool tdSmaStatIsDropped(STSmaStat *pTStat) {
- return pTStat ? (atomic_load_8(&pTStat->state) & TSDB_SMA_STAT_DROPPED) : true;
+static FORCE_INLINE void tdRefSmaStat(SSma *pSma, SSmaStat *pStat) {
+ int32_t ref = T_REF_INC(pStat);
+ smaDebug("vgId:%d, ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
}
-
-static FORCE_INLINE void tdSmaStatSetOK(STSmaStat *pTStat) {
- if (pTStat) {
- atomic_store_8(&pTStat->state, TSDB_SMA_STAT_OK);
- }
+static FORCE_INLINE void tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
+ int32_t ref = T_REF_DEC(pStat);
+ smaDebug("vgId:%d, unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
}
-static FORCE_INLINE void tdSmaStatSetExpired(STSmaStat *pTStat) {
- if (pTStat) {
- atomic_or_fetch_8(&pTStat->state, TSDB_SMA_STAT_EXPIRED);
- }
+// rsma
+void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
+int32_t tdRSmaFSOpen(SSma *pSma, int64_t version);
+void tdRSmaFSClose(SRSmaFS *fs);
+int32_t tdRSmaFSRef(SSma *pSma, SRSmaStat *pStat, int64_t version);
+void tdRSmaFSUnRef(SSma *pSma, SRSmaStat *pStat, int64_t version);
+int64_t tdRSmaFSMaxVer(SSma *pSma, SRSmaStat *pStat);
+int32_t tdRSmaFSUpsertQTaskFile(SRSmaFS *pFS, SQTaskFile *qTaskFile);
+int32_t tdRSmaRestore(SSma *pSma, int8_t type, int64_t committedVer);
+int32_t tdRSmaProcessCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
+int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type);
+int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
+int32_t tdRSmaProcessRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer);
+void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName);
+void tdRSmaQTaskInfoGetFullName(int32_t vgId, int64_t version, const char *path, char *outputName);
+void tdRSmaQTaskInfoGetFullPath(int32_t vgId, int8_t level, const char *path, char *outputName);
+void tdRSmaQTaskInfoGetFullPathEx(int32_t vgId, tb_uid_t suid, int8_t level, const char *path, char *outputName);
+
+static FORCE_INLINE void tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
+ int32_t ref = T_REF_INC(pRSmaInfo);
+ smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
}
-
-static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) {
- if (pTStat) {
- atomic_or_fetch_8(&pTStat->state, TSDB_SMA_STAT_DROPPED);
- }
+static FORCE_INLINE void tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
+ int32_t ref = T_REF_DEC(pRSmaInfo);
+ smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
}
-void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName);
-void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName);
-int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
-void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
-static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
-void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
-void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree);
-int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash);
-int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type);
-
-int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName);
-int32_t tdProcessRSmaRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer);
-int32_t tdRsmaRestore(SSma *pSma, int8_t type, int64_t committedVer);
-
-int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg);
-int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg);
-int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
-
// smaFileUtil ================
#define TD_FILE_HEAD_SIZE 512
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index cb5ec7aabe48363f57b68238be80a6c124af9509..f96afe6fba1b081d9511a3fdd6503be73fa1bdc3 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -67,8 +67,7 @@ typedef struct {
// tqExec
typedef struct {
- char* qmsg;
- qTaskInfo_t task;
+ char* qmsg;
} STqExecCol;
typedef struct {
@@ -82,7 +81,8 @@ typedef struct {
typedef struct {
int8_t subType;
- STqReader* pExecReader;
+ STqReader* pExecReader;
+ qTaskInfo_t task;
union {
STqExecCol execCol;
STqExecTb execTb;
@@ -101,7 +101,6 @@ typedef struct {
int64_t snapshotVer;
- // TODO remove
SWalReader* pWalReader;
SWalRef* pRef;
@@ -114,10 +113,20 @@ typedef struct {
} STqHandle;
+typedef struct {
+ SMqDataRsp dataRsp;
+ SMqRspHead rspHead;
+ char subKey[TSDB_SUBSCRIBE_KEY_LEN];
+ SRpcHandleInfo pInfo;
+} STqPushEntry;
+
struct STQ {
- SVnode* pVnode;
- char* path;
- SHashObj* pPushMgr; // consumerId -> STqHandle*
+ SVnode* pVnode;
+ char* path;
+
+ SRWLatch pushLock;
+
+ SHashObj* pPushMgr; // consumerId -> STqPushEntry
SHashObj* pHandle; // subKey -> STqHandle
SHashObj* pCheckInfo; // topic -> SAlterCheckInfo
@@ -141,12 +150,15 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle);
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle);
// tqRead
-int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* offset);
+int32_t tqScan(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* offset);
+int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset);
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** pHeadWithCkSum);
// tqExec
-int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp);
+int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp);
+int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols);
int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataRsp* pRsp);
+int32_t tqPushDataRsp(STQ* pTq, STqPushEntry* pPushEntry);
// tqMeta
int32_t tqMetaOpen(STQ* pTq);
@@ -176,17 +188,6 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data);
char* tqOffsetBuildFName(const char* path, int32_t ver);
int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname);
-static FORCE_INLINE void tqOffsetResetToData(STqOffsetVal* pOffsetVal, int64_t uid, int64_t ts) {
- pOffsetVal->type = TMQ_OFFSET__SNAPSHOT_DATA;
- pOffsetVal->uid = uid;
- pOffsetVal->ts = ts;
-}
-
-static FORCE_INLINE void tqOffsetResetToLog(STqOffsetVal* pOffsetVal, int64_t ver) {
- pOffsetVal->type = TMQ_OFFSET__LOG;
- pOffsetVal->version = ver;
-}
-
// tqStream
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask);
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index d1f5cfb122d6fdfee2cb8f54911a07a25cbb078c..75b2f7409626343d117b1e5840e7cfdb43e75d40 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -32,6 +32,12 @@ extern "C" {
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSD ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
+#define TSDB_CHECK_CODE(CODE, LINO, LABEL) \
+ if (CODE) { \
+ LINO = __LINE__; \
+ goto LABEL; \
+ }
+
typedef struct TSDBROW TSDBROW;
typedef struct TABLEID TABLEID;
typedef struct TSDBKEY TSDBKEY;
@@ -42,15 +48,14 @@ typedef struct SMemTable SMemTable;
typedef struct STbDataIter STbDataIter;
typedef struct SMapData SMapData;
typedef struct SBlockIdx SBlockIdx;
-typedef struct SBlock SBlock;
-typedef struct SBlockL SBlockL;
-typedef struct SColData SColData;
+typedef struct SDataBlk SDataBlk;
+typedef struct SSttBlk SSttBlk;
typedef struct SDiskDataHdr SDiskDataHdr;
typedef struct SBlockData SBlockData;
typedef struct SDelFile SDelFile;
typedef struct SHeadFile SHeadFile;
typedef struct SDataFile SDataFile;
-typedef struct SLastFile SLastFile;
+typedef struct SSttFile SSttFile;
typedef struct SSmaFile SSmaFile;
typedef struct SDFileSet SDFileSet;
typedef struct SDataFWriter SDataFWriter;
@@ -64,21 +69,39 @@ typedef struct STsdbReadSnap STsdbReadSnap;
typedef struct SBlockInfo SBlockInfo;
typedef struct SSmaInfo SSmaInfo;
typedef struct SBlockCol SBlockCol;
+typedef struct SVersionRange SVersionRange;
+typedef struct SLDataIter SLDataIter;
#define TSDB_FILE_DLMT ((uint32_t)0xF00AFA0F)
#define TSDB_MAX_SUBBLOCKS 8
#define TSDB_FHDR_SIZE 512
-#define HAS_NONE ((int8_t)0x1)
-#define HAS_NULL ((int8_t)0x2)
-#define HAS_VALUE ((int8_t)0x4)
-
#define VERSION_MIN 0
#define VERSION_MAX INT64_MAX
#define TSDBKEY_MIN ((TSDBKEY){.ts = TSKEY_MIN, .version = VERSION_MIN})
#define TSDBKEY_MAX ((TSDBKEY){.ts = TSKEY_MAX, .version = VERSION_MAX})
+#define TABLE_SAME_SCHEMA(SUID1, UID1, SUID2, UID2) ((SUID1) ? (SUID1) == (SUID2) : (UID1) == (UID2))
+
+#define PAGE_CONTENT_SIZE(PAGE) ((PAGE) - sizeof(TSCKSUM))
+#define LOGIC_TO_FILE_OFFSET(LOFFSET, PAGE) \
+ ((LOFFSET) / PAGE_CONTENT_SIZE(PAGE) * (PAGE) + (LOFFSET) % PAGE_CONTENT_SIZE(PAGE))
+#define FILE_TO_LOGIC_OFFSET(OFFSET, PAGE) ((OFFSET) / (PAGE)*PAGE_CONTENT_SIZE(PAGE) + (OFFSET) % (PAGE))
+#define PAGE_OFFSET(PGNO, PAGE) (((PGNO)-1) * (PAGE))
+#define OFFSET_PGNO(OFFSET, PAGE) ((OFFSET) / (PAGE) + 1)
+
+static FORCE_INLINE int64_t tsdbLogicToFileSize(int64_t lSize, int32_t szPage) {
+ int64_t fOffSet = LOGIC_TO_FILE_OFFSET(lSize, szPage);
+ int64_t pgno = OFFSET_PGNO(fOffSet, szPage);
+
+ if (fOffSet % szPage == 0) {
+ pgno--;
+ }
+
+ return pgno * szPage;
+}
+
// tsdbUtil.c ==============================================================================================
// TSDBROW
#define TSDBROW_TS(ROW) (((ROW)->type == 0) ? (ROW)->pTSRow->ts : (ROW)->pBlockData->aTSKEY[(ROW)->iRow])
@@ -111,29 +134,20 @@ int32_t tTABLEIDCmprFn(const void *p1, const void *p2);
int32_t tPutBlockCol(uint8_t *p, void *ph);
int32_t tGetBlockCol(uint8_t *p, void *ph);
int32_t tBlockColCmprFn(const void *p1, const void *p2);
-// SBlock
-void tBlockReset(SBlock *pBlock);
-int32_t tPutBlock(uint8_t *p, void *ph);
-int32_t tGetBlock(uint8_t *p, void *ph);
-int32_t tBlockCmprFn(const void *p1, const void *p2);
-bool tBlockHasSma(SBlock *pBlock);
-// SBlockL
-int32_t tPutBlockL(uint8_t *p, void *ph);
-int32_t tGetBlockL(uint8_t *p, void *ph);
+// SDataBlk
+void tDataBlkReset(SDataBlk *pBlock);
+int32_t tPutDataBlk(uint8_t *p, void *ph);
+int32_t tGetDataBlk(uint8_t *p, void *ph);
+int32_t tDataBlkCmprFn(const void *p1, const void *p2);
+bool tDataBlkHasSma(SDataBlk *pDataBlk);
+// SSttBlk
+int32_t tPutSttBlk(uint8_t *p, void *ph);
+int32_t tGetSttBlk(uint8_t *p, void *ph);
// SBlockIdx
int32_t tPutBlockIdx(uint8_t *p, void *ph);
int32_t tGetBlockIdx(uint8_t *p, void *ph);
int32_t tCmprBlockIdx(void const *lhs, void const *rhs);
int32_t tCmprBlockL(void const *lhs, void const *rhs);
-// SColdata
-void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn);
-void tColDataReset(SColData *pColData);
-void tColDataClear(void *ph);
-int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal);
-int32_t tColDataGetValue(SColData *pColData, int32_t iRow, SColVal *pColVal);
-int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest);
-int32_t tPutColData(uint8_t *p, SColData *pColData);
-int32_t tGetColData(uint8_t *p, SColData *pColData);
// SBlockData
#define tBlockDataFirstRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, 0)
#define tBlockDataLastRow(PBLOCKDATA) tsdbRowFromBlockData(PBLOCKDATA, (PBLOCKDATA)->nRow - 1)
@@ -142,7 +156,7 @@ int32_t tGetColData(uint8_t *p, SColData *pColData);
int32_t tBlockDataCreate(SBlockData *pBlockData);
void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear);
-int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema);
+int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema, int16_t *aCid, int32_t nCid);
int32_t tBlockDataInitEx(SBlockData *pBlockData, SBlockData *pBlockDataFrom);
void tBlockDataReset(SBlockData *pBlockData);
int32_t tBlockDataAppendRow(SBlockData *pBlockData, TSDBROW *pRow, STSchema *pTSchema, int64_t uid);
@@ -170,6 +184,7 @@ int32_t tGetDelData(uint8_t *p, void *ph);
void tMapDataReset(SMapData *pMapData);
void tMapDataClear(SMapData *pMapData);
int32_t tMapDataPutItem(SMapData *pMapData, void *pItem, int32_t (*tPutItemFn)(uint8_t *, void *));
+int32_t tMapDataCopy(SMapData *pFrom, SMapData *pTo);
void tMapDataGetItemByIdx(SMapData *pMapData, int32_t idx, void *pItem, int32_t (*tGetItemFn)(uint8_t *, void *));
int32_t tMapDataSearch(SMapData *pMapData, void *pSearchItem, int32_t (*tGetItemFn)(uint8_t *, void *),
int32_t (*tItemCmprFn)(const void *, const void *), void *pItem);
@@ -191,7 +206,6 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
uint8_t **ppBuf);
int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, int32_t nVal, SColData *pColData,
uint8_t **ppBuf);
-int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck);
// tsdbMemTable ==============================================================================================
// SMemTable
int32_t tsdbMemTableCreate(STsdb *pTsdb, SMemTable **ppMemTable);
@@ -215,7 +229,7 @@ bool tsdbDelFileIsSame(SDelFile *pDelFile1, SDelFile *pDelFile2);
int32_t tsdbDFileRollback(STsdb *pTsdb, SDFileSet *pSet, EDataFileT ftype);
int32_t tPutHeadFile(uint8_t *p, SHeadFile *pHeadFile);
int32_t tPutDataFile(uint8_t *p, SDataFile *pDataFile);
-int32_t tPutLastFile(uint8_t *p, SLastFile *pLastFile);
+int32_t tPutSttFile(uint8_t *p, SSttFile *pSttFile);
int32_t tPutSmaFile(uint8_t *p, SSmaFile *pSmaFile);
int32_t tPutDelFile(uint8_t *p, SDelFile *pDelFile);
int32_t tGetDelFile(uint8_t *p, SDelFile *pDelFile);
@@ -224,7 +238,7 @@ int32_t tGetDFileSet(uint8_t *p, SDFileSet *pSet);
void tsdbHeadFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SHeadFile *pHeadF, char fname[]);
void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF, char fname[]);
-void tsdbLastFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SLastFile *pLastF, char fname[]);
+void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]);
void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]);
// SDelFile
void tsdbDelFileName(STsdb *pTsdb, SDelFile *pFile, char fname[]);
@@ -249,8 +263,8 @@ int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pS
int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync);
int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter);
int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx);
-int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *pMapData, SBlockIdx *pBlockIdx);
-int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL);
+int32_t tsdbWriteDataBlk(SDataFWriter *pWriter, SMapData *mDataBlk, SBlockIdx *pBlockIdx);
+int32_t tsdbWriteSttBlk(SDataFWriter *pWriter, SArray *aSttBlk);
int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
int8_t cmprAlg, int8_t toLast);
@@ -259,11 +273,12 @@ int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo);
int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet);
int32_t tsdbDataFReaderClose(SDataFReader **ppReader);
int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx);
-int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *pMapData);
-int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL);
-int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg);
-int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData);
-int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData);
+int32_t tsdbReadDataBlk(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mDataBlk);
+int32_t tsdbReadSttBlk(SDataFReader *pReader, int32_t iStt, SArray *aSttBlk);
+int32_t tsdbReadBlockSma(SDataFReader *pReader, SDataBlk *pBlock, SArray *aColumnDataAgg);
+int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pBlock, SBlockData *pBlockData);
+int32_t tsdbReadSttBlock(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData);
+int32_t tsdbReadSttBlockEx(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData);
// SDelFWriter
int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb);
int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync);
@@ -276,8 +291,10 @@ int32_t tsdbDelFReaderClose(SDelFReader **ppReader);
int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData);
int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx);
// tsdbRead.c ==============================================================================================
-int32_t tsdbTakeReadSnap(STsdb *pTsdb, STsdbReadSnap **ppSnap);
-void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap);
+int32_t tsdbTakeReadSnap(STsdb *pTsdb, STsdbReadSnap **ppSnap, const char *id);
+void tsdbUntakeReadSnap(STsdb *pTsdb, STsdbReadSnap *pSnap, const char *id);
+// tsdbMerge.c ==============================================================================================
+int32_t tsdbMerge(STsdb *pTsdb);
#define TSDB_CACHE_NO(c) ((c).cacheLast == 0)
#define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0)
@@ -324,6 +341,11 @@ struct TSDBKEY {
TSKEY ts;
};
+struct SVersionRange {
+ uint64_t minVer;
+ uint64_t maxVer;
+};
+
typedef struct SMemSkipListNode SMemSkipListNode;
struct SMemSkipListNode {
int8_t level;
@@ -416,7 +438,7 @@ struct SSmaInfo {
int32_t size;
};
-struct SBlock {
+struct SDataBlk {
TSDBKEY minKey;
TSDBKEY maxKey;
int64_t minVer;
@@ -428,7 +450,7 @@ struct SBlock {
SSmaInfo smaInfo;
};
-struct SBlockL {
+struct SSttBlk {
int64_t suid;
int64_t minUid;
int64_t maxUid;
@@ -440,18 +462,6 @@ struct SBlockL {
SBlockInfo bInfo;
};
-struct SColData {
- int16_t cid;
- int8_t type;
- int8_t smaOn;
- int32_t nVal;
- uint8_t flag;
- uint8_t *pBitMap;
- int32_t *aOffset;
- int32_t nData;
- uint8_t *pData;
-};
-
// (SBlockData){.suid = 0, .uid = 0}: block data not initialized
// (SBlockData){.suid = suid, .uid = uid}: block data for ONE child table int .data file
// (SBlockData){.suid = suid, .uid = 0}: block data for N child tables int .last file
@@ -467,12 +477,6 @@ struct SBlockData {
SArray *aColData; // SArray
};
-// ================== TSDB global config
-extern bool tsdbForceKeepFile;
-
-#define TSDB_FS_ITER_FORWARD TSDB_ORDER_ASC
-#define TSDB_FS_ITER_BACKWARD TSDB_ORDER_DESC
-
struct TABLEID {
tb_uid_t suid;
tb_uid_t uid;
@@ -536,7 +540,7 @@ struct SDataFile {
int64_t size;
};
-struct SLastFile {
+struct SSttFile {
volatile int32_t nRef;
int64_t commitID;
@@ -556,8 +560,9 @@ struct SDFileSet {
int32_t fid;
SHeadFile *pHeadF;
SDataFile *pDataF;
- SLastFile *pLastF;
SSmaFile *pSmaF;
+ uint8_t nSttF;
+ SSttFile *aSttF[TSDB_MAX_STT_TRIGGER];
};
struct SRowIter {
@@ -572,37 +577,103 @@ struct SRowMerger {
SArray *pArray; // SArray
};
-struct SDelFWriter {
- STsdb *pTsdb;
- SDelFile fDel;
- TdFilePtr pWriteH;
+typedef struct {
+ char *path;
+ int32_t szPage;
+ int32_t flag;
+ TdFilePtr pFD;
+ int64_t pgno;
+ uint8_t *pBuf;
+ int64_t szFile;
+} STsdbFD;
+struct SDelFWriter {
+ STsdb *pTsdb;
+ SDelFile fDel;
+ STsdbFD *pWriteH;
uint8_t *aBuf[1];
};
+struct STsdbReadSnap {
+ SMemTable *pMem;
+ SMemTable *pIMem;
+ STsdbFS fs;
+};
+
struct SDataFWriter {
STsdb *pTsdb;
SDFileSet wSet;
- TdFilePtr pHeadFD;
- TdFilePtr pDataFD;
- TdFilePtr pLastFD;
- TdFilePtr pSmaFD;
+ STsdbFD *pHeadFD;
+ STsdbFD *pDataFD;
+ STsdbFD *pSmaFD;
+ STsdbFD *pSttFD;
SHeadFile fHead;
SDataFile fData;
- SLastFile fLast;
SSmaFile fSma;
+ SSttFile fStt[TSDB_MAX_STT_TRIGGER];
uint8_t *aBuf[4];
};
-struct STsdbReadSnap {
- SMemTable *pMem;
- SMemTable *pIMem;
- STsdbFS fs;
+struct SDataFReader {
+ STsdb *pTsdb;
+ SDFileSet *pSet;
+ STsdbFD *pHeadFD;
+ STsdbFD *pDataFD;
+ STsdbFD *pSmaFD;
+ STsdbFD *aSttFD[TSDB_MAX_STT_TRIGGER];
+ uint8_t *aBuf[3];
};
+typedef struct {
+ int64_t suid;
+ int64_t uid;
+ TSDBROW row;
+} SRowInfo;
+
+typedef struct SSttBlockLoadInfo {
+ SBlockData blockData[2];
+ SArray *aSttBlk;
+ int32_t blockIndex[2]; // to denote the loaded block in the corresponding position.
+ int32_t currentLoadBlockIndex;
+ int32_t loadBlocks;
+ double elapsedTime;
+ STSchema *pSchema;
+ int16_t *colIds;
+ int32_t numOfCols;
+} SSttBlockLoadInfo;
+
+typedef struct SMergeTree {
+ int8_t backward;
+ SRBTree rbt;
+ SArray *pIterList;
+ SLDataIter *pIter;
+ bool destroyLoadInfo;
+ SSttBlockLoadInfo *pLoadInfo;
+ const char *idStr;
+} SMergeTree;
+
+typedef struct {
+ int64_t suid;
+ int64_t uid;
+ STSchema *pTSchema;
+} SSkmInfo;
+
+int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
+ STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
+ bool destroyLoadInfo, const char *idStr);
+void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter);
+bool tMergeTreeNext(SMergeTree *pMTree);
+TSDBROW tMergeTreeGetRow(SMergeTree *pMTree);
+void tMergeTreeClose(SMergeTree *pMTree);
+
+SSttBlockLoadInfo *tCreateLastBlockLoadInfo(STSchema *pSchema, int16_t *colList, int32_t numOfCols);
+void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
+void getLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo, int64_t *blocks, double *el);
+void *destroyLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo);
+
// ========== inline functions ==========
static FORCE_INLINE int32_t tsdbKeyCmprFn(const void *p1, const void *p2) {
TSDBKEY *pKey1 = (TSDBKEY *)p1;
diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h
index 898e79928b8fbb3ceac2272b36e31b47690eee23..900d29b97ef36503ee1103231aff82b929512095 100644
--- a/source/dnode/vnode/src/inc/vnd.h
+++ b/source/dnode/vnode/src/inc/vnd.h
@@ -99,7 +99,6 @@ void vnodeSyncStart(SVnode* pVnode);
void vnodeSyncClose(SVnode* pVnode);
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg);
bool vnodeIsLeader(SVnode* pVnode);
-bool vnodeIsReadyForRead(SVnode* pVnode);
bool vnodeIsRoleLeader(SVnode* pVnode);
#ifdef __cplusplus
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 9b252df58b2a87c4baf8453ad597d62e50b61a33..4c8045d651b8f895113b3349a7dd39a7958d487e 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -36,6 +36,7 @@
#include "tlosertree.h"
#include "tlrucache.h"
#include "tmsgcb.h"
+#include "trbtree.h"
#include "tref.h"
#include "tskiplist.h"
#include "tstream.h"
@@ -70,8 +71,8 @@ typedef struct SStreamTaskReader SStreamTaskReader;
typedef struct SStreamTaskWriter SStreamTaskWriter;
typedef struct SStreamStateReader SStreamStateReader;
typedef struct SStreamStateWriter SStreamStateWriter;
-typedef struct SRsmaSnapReader SRsmaSnapReader;
-typedef struct SRsmaSnapWriter SRsmaSnapWriter;
+typedef struct SRSmaSnapReader SRSmaSnapReader;
+typedef struct SRSmaSnapWriter SRSmaSnapWriter;
typedef struct SSnapDataHdr SSnapDataHdr;
#define VNODE_META_DIR "meta"
@@ -102,8 +103,8 @@ int metaCommit(SMeta* pMeta);
int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList);
-int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp **pMetaRsp);
-int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids);
+int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp** pMetaRsp);
+int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids, int64_t* tbUid);
int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids);
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp);
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, bool isinline);
@@ -173,7 +174,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
// tq-stream
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
-int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* data, int64_t ver);
+int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* data, int64_t ver);
+int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver);
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg);
@@ -182,7 +184,7 @@ int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
-SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, bool createTb, int64_t suid,
+SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pSchema, SSchemaWrapper* pTagSchemaWrapper, bool createTb, int64_t suid,
const char* stbFullName, SBatchDeleteReq* pDeleteReq);
// sma
@@ -206,7 +208,7 @@ int32_t tdProcessRSmaCreate(SSma* pSma, SVCreateStbReq* pReq);
int32_t tdProcessRSmaSubmit(SSma* pSma, void* pMsg, int32_t inputType);
int32_t tdProcessRSmaDrop(SSma* pSma, SVDropStbReq* pReq);
int32_t tdFetchTbUidList(SSma* pSma, STbUidStore** ppStore, tb_uid_t suid, tb_uid_t uid);
-int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore);
+int32_t tdUpdateTbUidList(SSma* pSma, STbUidStore* pUidStore, bool isAdd);
void tdUidStoreDestory(STbUidStore* pStore);
void* tdUidStoreFree(STbUidStore* pStore);
@@ -246,14 +248,14 @@ int32_t tqOffsetSnapWrite(STqOffsetWriter* pWriter, uint8_t* pData, uint32_t nDa
// SStreamTaskReader ======================================
// SStreamStateWriter =====================================
// SStreamStateReader =====================================
-// SRsmaSnapReader ========================================
-int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader);
-int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader);
-int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData);
-// SRsmaSnapWriter ========================================
-int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter);
-int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
-int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback);
+// SRSmaSnapReader ========================================
+int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapReader** ppReader);
+int32_t rsmaSnapReaderClose(SRSmaSnapReader** ppReader);
+int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData);
+// SRSmaSnapWriter ========================================
+int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWriter** ppWriter);
+int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
+int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback);
typedef struct {
int8_t streamType; // sma or other
diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c
index 04e4c52c49d00df4c076b7488a09ff4ed83094ca..2ef4e7510ecfe2be6ba0b1d6f75fa45ac0ff5bf3 100644
--- a/source/dnode/vnode/src/meta/metaQuery.c
+++ b/source/dnode/vnode/src/meta/metaQuery.c
@@ -129,10 +129,16 @@ _err:
bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid) {
// query uid.idx
+ metaRLock(pMeta);
+
if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) {
+ metaULock(pMeta);
+
return false;
}
+ metaULock(pMeta);
+
return true;
}
@@ -182,9 +188,14 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) {
}
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName) {
+ int code = 0;
SMetaReader mr = {0};
metaReaderInit(&mr, (SMeta *)meta, 0);
- metaGetTableEntryByUid(&mr, uid);
+ code = metaGetTableEntryByUid(&mr, uid);
+ if (code < 0) {
+ metaReaderClear(&mr);
+ return -1;
+ }
STR_TO_VARSTR(tbName, mr.me.name);
metaReaderClear(&mr);
@@ -280,6 +291,42 @@ _query:
tDecoderClear(&dc);
goto _exit;
}
+ { // Traverse to find the previous qualified data
+ TBC *pCur;
+ tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
+ STbDbKey key = {.version = sver, .uid = INT64_MAX};
+ int c = 0;
+ tdbTbcMoveTo(pCur, &key, sizeof(key), &c);
+ if (c < 0) {
+ tdbTbcMoveToPrev(pCur);
+ }
+
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen = 0, kLen = 0;
+ while (1) {
+ int32_t ret = tdbTbcPrev(pCur, &pKey, &kLen, &pVal, &vLen);
+ if (ret < 0) break;
+
+ STbDbKey *tmp = (STbDbKey *)pKey;
+ if (tmp->uid != uid) {
+ continue;
+ }
+ SDecoder dcNew = {0};
+ SMetaEntry meNew = {0};
+ tDecoderInit(&dcNew, pVal, vLen);
+ metaDecodeEntry(&dcNew, &meNew);
+ pSchema = tCloneSSchemaWrapper(&meNew.stbEntry.schemaRow);
+ tDecoderClear(&dcNew);
+ tdbTbcClose(pCur);
+ tdbFree(pKey);
+ tdbFree(pVal);
+ goto _exit;
+ }
+ tdbFree(pKey);
+ tdbFree(pVal);
+ tdbTbcClose(pCur);
+ }
} else if (me.type == TSDB_CHILD_TABLE) {
uid = me.ctbEntry.suid;
tDecoderClear(&dc);
@@ -304,11 +351,13 @@ _query:
tDecoderClear(&dc);
_exit:
+ tDecoderClear(&dc);
metaULock(pMeta);
tdbFree(pData);
return pSchema;
_err:
+ tDecoderClear(&dc);
metaULock(pMeta);
tdbFree(pData);
return NULL;
@@ -340,10 +389,8 @@ int metaTtlSmaller(SMeta *pMeta, uint64_t ttl, SArray *uidList) {
ttlKey = *(STtlIdxKey *)pKey;
taosArrayPush(uidList, &ttlKey.uid);
}
- tdbTbcClose(pCur);
-
tdbFree(pKey);
-
+ tdbTbcClose(pCur);
return 0;
}
@@ -615,9 +662,14 @@ int64_t metaGetTbNum(SMeta *pMeta) {
// N.B. Called by statusReq per second
int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
// sum of (number of columns of stable - 1) * number of ctables (excluding timestamp column)
- int64_t num = 0;
- vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
- pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+ if (pMeta->pVnode->config.vndStats.numOfTimeSeries <= 0 ||
+ ++pMeta->pVnode->config.vndStats.itvTimeSeries % (60 * 5) == 0) {
+ int64_t num = 0;
+ vnodeGetTimeSeriesNum(pMeta->pVnode, &num);
+ pMeta->pVnode->config.vndStats.numOfTimeSeries = num;
+
+ pMeta->pVnode->config.vndStats.itvTimeSeries = (TD_VID(pMeta->pVnode) % 100) * 2;
+ }
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
}
@@ -820,7 +872,7 @@ SArray *metaGetSmaIdsByTable(SMeta *pMeta, tb_uid_t uid) {
pSmaIdxKey = (SSmaIdxKey *)pCur->pKey;
- if (taosArrayPush(pUids, &pSmaIdxKey->smaUid) < 0) {
+ if (!taosArrayPush(pUids, &pSmaIdxKey->smaUid)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
metaCloseSmaCursor(pCur);
taosArrayDestroy(pUids);
@@ -863,7 +915,7 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) {
}
}
- if (taosArrayPush(pUids, &uid) < 0) {
+ if (!taosArrayPush(pUids, &uid)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
metaCloseSmaCursor(pCur);
taosArrayDestroy(pUids);
@@ -887,6 +939,37 @@ const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
if (!find) {
return NULL;
}
+
+#ifdef TAG_FILTER_DEBUG
+ if (IS_VAR_DATA_TYPE(val->type)) {
+ char *buf = taosMemoryCalloc(val->nData + 1, 1);
+ memcpy(buf, val->pData, val->nData);
+ metaDebug("metaTag table val varchar index:%d cid:%d type:%d value:%s", 1, val->cid, val->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double dval = 0;
+ GET_TYPED_DATA(dval, double, val->type, &val->i64);
+ metaDebug("metaTag table val number index:%d cid:%d type:%d value:%f", 1, val->cid, val->type, dval);
+ }
+
+ SArray *pTagVals = NULL;
+ tTagToValArray((STag *)pTag, &pTagVals);
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
+
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
+ memcpy(buf, pTagVal->pData, pTagVal->nData);
+ metaDebug("metaTag table varchar index:%d cid:%d type:%d value:%s", i, pTagVal->cid, pTagVal->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double dval = 0;
+ GET_TYPED_DATA(dval, double, pTagVal->type, &pTagVal->i64);
+ metaDebug("metaTag table number index:%d cid:%d type:%d value:%f", i, pTagVal->cid, pTagVal->type, dval);
+ }
+ }
+#endif
+
return val;
}
diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c
index 973c3814074685128395bd50243bba8981af4200..9fdbe50f88ab9fbdd2570ea76f51d7cb26772d06 100644
--- a/source/dnode/vnode/src/meta/metaSnapshot.c
+++ b/source/dnode/vnode/src/meta/metaSnapshot.c
@@ -195,3 +195,437 @@ _err:
metaError("vgId:%d, vnode snapshot meta write failed since %s", TD_VID(pMeta->pVnode), tstrerror(code));
return code;
}
+
+typedef struct STableInfoForChildTable{
+ char *tableName;
+ SSchemaWrapper *schemaRow;
+ SSchemaWrapper *tagRow;
+}STableInfoForChildTable;
+
+static void destroySTableInfoForChildTable(void* data) {
+ STableInfoForChildTable* pData = (STableInfoForChildTable*)data;
+ taosMemoryFree(pData->tableName);
+ tDeleteSSchemaWrapper(pData->schemaRow);
+ tDeleteSSchemaWrapper(pData->tagRow);
+}
+
+static void MoveToSnapShotVersion(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ STbDbKey key = {.version = ctx->snapVersion, .uid = INT64_MAX};
+ int c = 0;
+ tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c);
+ if(c < 0){
+ tdbTbcMoveToPrev(ctx->pCur);
+ }
+}
+
+static int32_t MoveToPosition(SSnapContext* ctx, int64_t ver, int64_t uid){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ STbDbKey key = {.version = ver, .uid = uid};
+ int c = 0;
+ tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c);
+ return c;
+}
+
+static void MoveToFirst(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ tdbTbcMoveToFirst(ctx->pCur);
+}
+
+static void saveSuperTableInfoForChildTable(SMetaEntry *me, SHashObj *suidInfo){
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(suidInfo, &me->uid, sizeof(tb_uid_t));
+ if(data){
+ return;
+ }
+ STableInfoForChildTable dataTmp = {0};
+ dataTmp.tableName = strdup(me->name);
+
+ dataTmp.schemaRow = tCloneSSchemaWrapper(&me->stbEntry.schemaRow);
+ dataTmp.tagRow = tCloneSSchemaWrapper(&me->stbEntry.schemaTag);
+ taosHashPut(suidInfo, &me->uid, sizeof(tb_uid_t), &dataTmp, sizeof(STableInfoForChildTable));
+}
+
+int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta, SSnapContext** ctxRet){
+ SSnapContext* ctx = taosMemoryCalloc(1, sizeof(SSnapContext));
+ if(ctx == NULL) return -1;
+ *ctxRet = ctx;
+ ctx->pMeta = pMeta;
+ ctx->snapVersion = snapVersion;
+ ctx->suid = suid;
+ ctx->subType = subType;
+ ctx->queryMetaOrData = withMeta;
+ ctx->withMeta = withMeta;
+ ctx->idVersion = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
+ if(ctx->idVersion == NULL){
+ return -1;
+ }
+
+ ctx->suidInfo = taosHashInit(100, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
+ if(ctx->suidInfo == NULL){
+ return -1;
+ }
+ taosHashSetFreeFp(ctx->suidInfo, destroySTableInfoForChildTable);
+
+ ctx->index = 0;
+ ctx->idList = taosArrayInit(100, sizeof(int64_t));
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen = 0, kLen = 0;
+
+ metaDebug("tmqsnap init snapVersion:%" PRIi64, ctx->snapVersion);
+ MoveToFirst(ctx);
+ while(1){
+ int32_t ret = tdbTbcNext(ctx->pCur, &pKey, &kLen, &pVal, &vLen);
+ if (ret < 0) break;
+ STbDbKey *tmp = (STbDbKey*)pKey;
+ if (tmp->version > ctx->snapVersion) break;
+
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t));
+ if(idData) {
+ continue;
+ }
+
+ if (tdbTbGet(pMeta->pUidIdx, &tmp->uid, sizeof(tb_uid_t), NULL, NULL) < 0) { // check if table exist for now, need optimize later
+ continue;
+ }
+
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ if(ctx->subType == TOPIC_SUB_TYPE__TABLE){
+ if ((me.uid != ctx->suid && me.type == TSDB_SUPER_TABLE) ||
+ (me.ctbEntry.suid != ctx->suid && me.type == TSDB_CHILD_TABLE)){
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ taosArrayPush(ctx->idList, &tmp->uid);
+ metaDebug("tmqsnap init idlist name:%s, uid:%" PRIi64, me.name, tmp->uid);
+ SIdInfo info = {0};
+ taosHashPut(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t), &info, sizeof(SIdInfo));
+
+ tDecoderClear(&dc);
+ }
+ taosHashClear(ctx->idVersion);
+
+ MoveToSnapShotVersion(ctx);
+ while(1){
+ int32_t ret = tdbTbcPrev(ctx->pCur, &pKey, &kLen, &pVal, &vLen);
+ if (ret < 0) break;
+
+ STbDbKey *tmp = (STbDbKey*)pKey;
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t));
+ if(idData){
+ continue;
+ }
+ SIdInfo info = {.version = tmp->version, .index = 0};
+ taosHashPut(ctx->idVersion, &tmp->uid, sizeof(tb_uid_t), &info, sizeof(SIdInfo));
+
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ if(ctx->subType == TOPIC_SUB_TYPE__TABLE){
+ if ((me.uid != ctx->suid && me.type == TSDB_SUPER_TABLE) ||
+ (me.ctbEntry.suid != ctx->suid && me.type == TSDB_CHILD_TABLE)){
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_SUPER_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.uid == ctx->suid)) {
+ saveSuperTableInfoForChildTable(&me, ctx->suidInfo);
+ }
+ tDecoderClear(&dc);
+ }
+
+ for(int i = 0; i < taosArrayGetSize(ctx->idList); i++){
+ int64_t *uid = taosArrayGet(ctx->idList, i);
+ SIdInfo* idData = (SIdInfo*)taosHashGet(ctx->idVersion, uid, sizeof(int64_t));
+ ASSERT(idData);
+ idData->index = i;
+ metaDebug("tmqsnap init idVersion uid:%" PRIi64 " version:%" PRIi64 " index:%d", *uid, idData->version, idData->index);
+ }
+
+ tdbFree(pKey);
+ tdbFree(pVal);
+ return TDB_CODE_SUCCESS;
+}
+
+int32_t destroySnapContext(SSnapContext* ctx){
+ tdbTbcClose(ctx->pCur);
+ taosArrayDestroy(ctx->idList);
+ taosHashCleanup(ctx->idVersion);
+ taosHashCleanup(ctx->suidInfo);
+ taosMemoryFree(ctx);
+ return 0;
+}
+
+static int32_t buildNormalChildTableInfo(SVCreateTbReq *req, void **pBuf, int32_t *contLen){
+ int32_t ret = 0;
+ SVCreateTbBatchReq reqs = {0};
+
+ reqs.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq));
+ if (NULL == reqs.pArray){
+ ret = -1;
+ goto end;
+ }
+ taosArrayPush(reqs.pArray, req);
+ reqs.nReqs = 1;
+
+ tEncodeSize(tEncodeSVCreateTbBatchReq, &reqs, *contLen, ret);
+ if(ret < 0){
+ ret = -1;
+ goto end;
+ }
+ *contLen += sizeof(SMsgHead);
+ *pBuf = taosMemoryMalloc(*contLen);
+ if (NULL == *pBuf) {
+ ret = -1;
+ goto end;
+ }
+ SEncoder coder = {0};
+ tEncoderInit(&coder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), *contLen);
+ if (tEncodeSVCreateTbBatchReq(&coder, &reqs) < 0) {
+ taosMemoryFreeClear(*pBuf);
+ tEncoderClear(&coder);
+ ret = -1;
+ goto end;
+ }
+ tEncoderClear(&coder);
+
+end:
+ taosArrayDestroy(reqs.pArray);
+ return ret;
+}
+
+static int32_t buildSuperTableInfo(SVCreateStbReq *req, void **pBuf, int32_t *contLen){
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVCreateStbReq, req, *contLen, ret);
+ if (ret < 0) {
+ return -1;
+ }
+
+ *contLen += sizeof(SMsgHead);
+ *pBuf = taosMemoryMalloc(*contLen);
+ if (NULL == *pBuf) {
+ return -1;
+ }
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, POINTER_SHIFT(*pBuf, sizeof(SMsgHead)), *contLen);
+ if (tEncodeSVCreateStbReq(&encoder, req) < 0) {
+ taosMemoryFreeClear(*pBuf);
+ tEncoderClear(&encoder);
+ return -1;
+ }
+ tEncoderClear(&encoder);
+ return 0;
+}
+
+int32_t setForSnapShot(SSnapContext* ctx, int64_t uid){
+ int c = 0;
+
+ if(uid == 0){
+ ctx->index = 0;
+ return c;
+ }
+
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, &uid, sizeof(tb_uid_t));
+ if(!idInfo){
+ return -1;
+ }
+
+ ctx->index = idInfo->index;
+
+ return c;
+}
+
+int32_t getMetafromSnapShot(SSnapContext* ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid){
+ int32_t ret = 0;
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen = 0, kLen = 0;
+
+ while(1){
+ if(ctx->index >= taosArrayGetSize(ctx->idList)){
+ metaDebug("tmqsnap get meta end");
+ ctx->index = 0;
+ ctx->queryMetaOrData = false; // change to get data
+ return 0;
+ }
+
+ int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index);
+ ctx->index++;
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t));
+ ASSERT(idInfo);
+
+ *uid = *uidTmp;
+ ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
+ if(ret == 0){
+ break;
+ }
+ metaDebug("tmqsnap get meta not exist uid:%" PRIi64 " version:%" PRIi64, *uid, idInfo->version);
+ }
+
+ tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ metaDebug("tmqsnap get meta uid:%" PRIi64 " name:%s index:%d", *uid, me.name, ctx->index-1);
+
+ if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_SUPER_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.uid == ctx->suid)) {
+ SVCreateStbReq req = {0};
+ req.name = me.name;
+ req.suid = me.uid;
+ req.schemaRow = me.stbEntry.schemaRow;
+ req.schemaTag = me.stbEntry.schemaTag;
+ req.schemaRow.version = 1;
+ req.schemaTag.version = 1;
+
+ ret = buildSuperTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_STB;
+
+ } else if ((ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_CHILD_TABLE)
+ || (ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid)) {
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ ASSERT(data);
+ SVCreateTbReq req = {0};
+
+ req.type = TSDB_CHILD_TABLE;
+ req.name = me.name;
+ req.uid = me.uid;
+ req.commentLen = -1;
+ req.ctb.suid = me.ctbEntry.suid;
+ req.ctb.tagNum = data->tagRow->nCols;
+ req.ctb.name = data->tableName;
+
+ SArray* tagName = taosArrayInit(req.ctb.tagNum, TSDB_COL_NAME_LEN);
+ STag* p = (STag*)me.ctbEntry.pTags;
+ if(tTagIsJson(p)){
+ if (p->nTag != 0) {
+ SSchema* schema = &data->tagRow->pSchema[0];
+ taosArrayPush(tagName, schema->name);
+ }
+ }else{
+ SArray* pTagVals = NULL;
+ if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
+ ASSERT(0);
+ }
+ int16_t nCols = taosArrayGetSize(pTagVals);
+ for (int j = 0; j < nCols; ++j) {
+ STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
+ for(int i = 0; i < data->tagRow->nCols; i++){
+ SSchema *schema = &data->tagRow->pSchema[i];
+ if(schema->colId == pTagVal->cid){
+ taosArrayPush(tagName, schema->name);
+ }
+ }
+ }
+ taosArrayDestroy(pTagVals);
+ }
+// SIdInfo* sidInfo = (SIdInfo*)taosHashGet(ctx->idVersion, &me.ctbEntry.suid, sizeof(tb_uid_t));
+// if(sidInfo->version >= idInfo->version){
+// // need parse tag
+// STag* p = (STag*)me.ctbEntry.pTags;
+// SArray* pTagVals = NULL;
+// if (tTagToValArray((const STag*)p, &pTagVals) != 0) {
+// }
+//
+// int16_t nCols = taosArrayGetSize(pTagVals);
+// for (int j = 0; j < nCols; ++j) {
+// STagVal* pTagVal = (STagVal*)taosArrayGet(pTagVals, j);
+// }
+// }else{
+ req.ctb.pTag = me.ctbEntry.pTags;
+// }
+
+ req.ctb.tagName = tagName;
+ ret = buildNormalChildTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_TABLE;
+ taosArrayDestroy(tagName);
+ } else if(ctx->subType == TOPIC_SUB_TYPE__DB){
+ SVCreateTbReq req = {0};
+ req.type = TSDB_NORMAL_TABLE;
+ req.name = me.name;
+ req.uid = me.uid;
+ req.commentLen = -1;
+ req.ntb.schemaRow = me.ntbEntry.schemaRow;
+ ret = buildNormalChildTableInfo(&req, pBuf, contLen);
+ *type = TDMT_VND_CREATE_TABLE;
+ } else{
+ ASSERT(0);
+ }
+ tDecoderClear(&dc);
+
+ return ret;
+}
+
+SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx){
+ SMetaTableInfo result = {0};
+ void *pKey = NULL;
+ void *pVal = NULL;
+ int vLen, kLen;
+
+ while(1){
+ if(ctx->index >= taosArrayGetSize(ctx->idList)){
+ metaDebug("tmqsnap get uid info end");
+ return result;
+ }
+ int64_t* uidTmp = taosArrayGet(ctx->idList, ctx->index);
+ ctx->index++;
+ SIdInfo* idInfo = (SIdInfo*)taosHashGet(ctx->idVersion, uidTmp, sizeof(tb_uid_t));
+ ASSERT(idInfo);
+
+ int32_t ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
+ if(ret != 0) {
+ metaDebug("tmqsnap getUidfromSnapShot not exist uid:%" PRIi64 " version:%" PRIi64, *uidTmp, idInfo->version);
+ continue;
+ }
+ tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
+ SDecoder dc = {0};
+ SMetaEntry me = {0};
+ tDecoderInit(&dc, pVal, vLen);
+ metaDecodeEntry(&dc, &me);
+ metaDebug("tmqsnap get uid info uid:%" PRIi64 " name:%s index:%d", me.uid, me.name, ctx->index-1);
+
+ if (ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_CHILD_TABLE){
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ result.uid = me.uid;
+ result.suid = me.ctbEntry.suid;
+ result.schema = tCloneSSchemaWrapper(data->schemaRow);
+ strcpy(result.tbName, me.name);
+ tDecoderClear(&dc);
+ break;
+ } else if (ctx->subType == TOPIC_SUB_TYPE__DB && me.type == TSDB_NORMAL_TABLE) {
+ result.uid = me.uid;
+ result.suid = 0;
+ strcpy(result.tbName, me.name);
+ result.schema = tCloneSSchemaWrapper(&me.ntbEntry.schemaRow);
+ tDecoderClear(&dc);
+ break;
+ } else if(ctx->subType == TOPIC_SUB_TYPE__TABLE && me.type == TSDB_CHILD_TABLE && me.ctbEntry.suid == ctx->suid) {
+ STableInfoForChildTable* data = (STableInfoForChildTable*)taosHashGet(ctx->suidInfo, &me.ctbEntry.suid, sizeof(tb_uid_t));
+ result.uid = me.uid;
+ result.suid = me.ctbEntry.suid;
+ strcpy(result.tbName, me.name);
+ result.schema = tCloneSSchemaWrapper(data->schemaRow);
+ tDecoderClear(&dc);
+ break;
+ } else{
+ metaDebug("tmqsnap get uid continue");
+ tDecoderClear(&dc);
+ continue;
+ }
+ }
+
+ return result;
+}
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 352b6be32195a76a5ef03bd1abdebde8c88e1004..932afe89371433d16b162d9fc52a3c74fb2b0561 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -99,6 +99,7 @@ static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const
memcpy(val, (uint16_t *)&len, VARSTR_HEADER_SIZE);
type = TSDB_DATA_TYPE_VARCHAR;
term = indexTermCreate(suid, ADD_VALUE, type, key, nKey, val, len);
+ taosMemoryFree(val);
} else if (pTagVal->nData == 0) {
term = indexTermCreate(suid, ADD_VALUE, TSDB_DATA_TYPE_VARCHAR, key, nKey, pTagVal->pData, 0);
}
@@ -115,6 +116,7 @@ static int metaSaveJsonVarToIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry, const
indexMultiTermAdd(terms, term);
}
}
+ taosArrayDestroy(pTagVals);
indexJsonPut(pMeta->pTagIvtIdx, terms, tuid);
indexMultiTermDestroy(terms);
#endif
@@ -242,6 +244,7 @@ int metaDropSTable(SMeta *pMeta, int64_t verison, SVDropStbReq *pReq, SArray *tb
// check if super table exists
rc = tdbTbGet(pMeta->pNameIdx, pReq->name, strlen(pReq->name) + 1, &pData, &nData);
if (rc < 0 || *(tb_uid_t *)pData != pReq->suid) {
+ tdbFree(pData);
terrno = TSDB_CODE_TDB_STB_NOT_EXIST;
return -1;
}
@@ -307,7 +310,7 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
int64_t oversion;
SDecoder dc = {0};
int32_t ret;
- int32_t c;
+ int32_t c = -2;
tdbTbcOpen(pMeta->pUidIdx, &pUidIdxc, &pMeta->txn);
ret = tdbTbcMoveTo(pUidIdxc, &pReq->suid, sizeof(tb_uid_t), &c);
@@ -359,8 +362,9 @@ int metaAlterSTable(SMeta *pMeta, int64_t version, SVCreateStbReq *pReq) {
// update uid index
metaUpdateUidIdx(pMeta, &nStbEntry);
- if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf);
metaULock(pMeta);
+
+ if (oStbEntry.pBuf) taosMemoryFree(oStbEntry.pBuf);
tDecoderClear(&dc);
tdbTbcClose(pTbDbc);
tdbTbcClose(pUidIdxc);
@@ -413,6 +417,27 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMe
me.ctbEntry.suid = pReq->ctb.suid;
me.ctbEntry.pTags = pReq->ctb.pTag;
+#ifdef TAG_FILTER_DEBUG
+ SArray *pTagVals = NULL;
+ int32_t code = tTagToValArray((STag *)pReq->ctb.pTag, &pTagVals);
+ for (int i = 0; i < taosArrayGetSize(pTagVals); i++) {
+ STagVal *pTagVal = (STagVal *)taosArrayGet(pTagVals, i);
+
+ if (IS_VAR_DATA_TYPE(pTagVal->type)) {
+ char *buf = taosMemoryCalloc(pTagVal->nData + 1, 1);
+ memcpy(buf, pTagVal->pData, pTagVal->nData);
+ metaDebug("metaTag table:%s varchar index:%d cid:%d type:%d value:%s", pReq->name, i, pTagVal->cid,
+ pTagVal->type, buf);
+ taosMemoryFree(buf);
+ } else {
+ double val = 0;
+ GET_TYPED_DATA(val, double, pTagVal->type, &pTagVal->i64);
+ metaDebug("metaTag table:%s number index:%d cid:%d type:%d value:%f", pReq->name, i, pTagVal->cid,
+ pTagVal->type, val);
+ }
+ }
+#endif
+
++pMeta->pVnode->config.vndStats.numOfCTables;
} else {
me.ntbEntry.ctime = pReq->ctime;
@@ -453,7 +478,7 @@ _err:
return -1;
}
-int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids) {
+int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUids, tb_uid_t *tbUid) {
void *pData = NULL;
int nData = 0;
int rc = 0;
@@ -475,6 +500,10 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
taosArrayPush(tbUids, &uid);
}
+ if ((type == TSDB_CHILD_TABLE) && tbUid) {
+ *tbUid = uid;
+ }
+
tdbFree(pData);
return 0;
}
@@ -532,6 +561,9 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
SDecoder dc = {0};
rc = tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), &pData, &nData);
+ if (rc < 0) {
+ return -1;
+ }
int64_t version = ((SUidIdxVal *)pData)[0].version;
tdbTbGet(pMeta->pTbDb, &(STbDbKey){.version = version, .uid = uid}, sizeof(STbDbKey), &pData, &nData);
@@ -891,6 +923,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
taosArrayDestroy(pTagArray);
}
+ metaWLock(pMeta);
+
// save to table.db
metaSaveToTbDb(pMeta, &ctbEntry);
@@ -905,6 +939,8 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
tdbTbUpsert(pMeta->pCtbIdx, &ctbIdxKey, sizeof(ctbIdxKey), ctbEntry.ctbEntry.pTags,
((STag *)(ctbEntry.ctbEntry.pTags))->len, &pMeta->txn);
+ metaULock(pMeta);
+
tDecoderClear(&dc1);
tDecoderClear(&dc2);
if (ctbEntry.ctbEntry.pTags) taosMemoryFree((void *)ctbEntry.ctbEntry.pTags);
@@ -1161,10 +1197,11 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
const void *pTagData = NULL; //
int32_t nTagData = 0;
SDecoder dc = {0};
-
+ int32_t ret = 0;
// get super table
if (tdbTbGet(pMeta->pUidIdx, &pCtbEntry->ctbEntry.suid, sizeof(tb_uid_t), &pData, &nData) != 0) {
- return -1;
+ ret = -1;
+ goto end;
}
tbDbKey.uid = pCtbEntry->ctbEntry.suid;
tbDbKey.version = ((SUidIdxVal *)pData)[0].version;
@@ -1190,17 +1227,20 @@ static int metaUpdateTagIdx(SMeta *pMeta, const SMetaEntry *pCtbEntry) {
// nTagData = ((const STag *)pCtbEntry->ctbEntry.pTags)->len;
pTagData = pCtbEntry->ctbEntry.pTags;
nTagData = ((const STag *)pCtbEntry->ctbEntry.pTags)->len;
- return metaSaveJsonVarToIdx(pMeta, pCtbEntry, pTagColumn);
+ ret = metaSaveJsonVarToIdx(pMeta, pCtbEntry, pTagColumn);
+ goto end;
}
if (metaCreateTagIdxKey(pCtbEntry->ctbEntry.suid, pTagColumn->colId, pTagData, nTagData, pTagColumn->type,
pCtbEntry->uid, &pTagIdxKey, &nTagIdxKey) < 0) {
- return -1;
+ ret = -1;
+ goto end;
}
tdbTbUpsert(pMeta->pTagIdx, pTagIdxKey, nTagIdxKey, NULL, 0, &pMeta->txn);
+end:
metaDestroyTagIdxKey(pTagIdxKey);
tDecoderClear(&dc);
tdbFree(pData);
- return 0;
+ return ret;
}
static int metaSaveToSkmDb(SMeta *pMeta, const SMetaEntry *pME) {
diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c
index ca5367f39714ed1f3a979068b0a9a7204d385f8c..fb5caad2691fca40e136ad204b0a90ac700d4337 100644
--- a/source/dnode/vnode/src/sma/smaCommit.c
+++ b/source/dnode/vnode/src/sma/smaCommit.c
@@ -15,13 +15,15 @@
#include "sma.h"
+extern SSmaMgmt smaMgmt;
+
static int32_t tdProcessRSmaSyncPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma);
static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma);
-static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat);
+static int32_t tdUpdateQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat);
/**
* @brief Only applicable to Rollup SMA
@@ -166,114 +168,65 @@ static int32_t tdProcessRSmaSyncCommitImpl(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
-static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
- SVnode *pVnode = pSma->pVnode;
- int64_t committed = pRSmaStat->commitAppliedVer;
- TdDirPtr pDir = NULL;
- TdDirEntryPtr pDirEntry = NULL;
- char dir[TSDB_FILENAME_LEN];
- const char *pattern = "v[0-9]+qtaskinfo\\.ver([0-9]+)?$";
- regex_t regex;
- int code = 0;
-
- tdGetVndDirName(TD_VID(pVnode), tfsGetPrimaryPath(pVnode->pTfs), VNODE_RSMA_DIR, true, dir);
-
- // Resource allocation and init
- if ((code = regcomp(®ex, pattern, REG_EXTENDED)) != 0) {
- char errbuf[128];
- regerror(code, ®ex, errbuf, sizeof(errbuf));
- smaWarn("vgId:%d, rsma post commit, regcomp for %s failed since %s", TD_VID(pVnode), dir, errbuf);
- return TSDB_CODE_FAILED;
- }
-
- if ((pDir = taosOpenDir(dir)) == NULL) {
- regfree(®ex);
- terrno = TAOS_SYSTEM_ERROR(errno);
- smaDebug("vgId:%d, rsma post commit, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr());
- return TSDB_CODE_FAILED;
- }
-
- int32_t dirLen = strlen(dir);
- char *dirEnd = POINTER_SHIFT(dir, dirLen);
- regmatch_t regMatch[2];
- while ((pDirEntry = taosReadDir(pDir)) != NULL) {
- char *entryName = taosGetDirEntryName(pDirEntry);
- if (!entryName) {
- continue;
- }
-
- code = regexec(®ex, entryName, 2, regMatch, 0);
-
- if (code == 0) {
- // match
- int64_t version = -1;
- sscanf((const char *)POINTER_SHIFT(entryName, regMatch[1].rm_so), "%" PRIi64, &version);
- if ((version < committed) && (version > -1)) {
- strncpy(dirEnd, entryName, TSDB_FILENAME_LEN - dirLen);
- if (taosRemoveFile(dir) != 0) {
- terrno = TAOS_SYSTEM_ERROR(errno);
- smaWarn("vgId:%d, committed version:%" PRIi64 ", failed to remove %s since %s", TD_VID(pVnode), committed,
- dir, terrstr());
- } else {
- smaDebug("vgId:%d, committed version:%" PRIi64 ", success to remove %s", TD_VID(pVnode), committed, dir);
- }
- }
- } else if (code == REG_NOMATCH) {
- // not match
- smaTrace("vgId:%d, rsma post commit, not match %s", TD_VID(pVnode), entryName);
- continue;
- } else {
- // has other error
- char errbuf[128];
- regerror(code, ®ex, errbuf, sizeof(errbuf));
- smaWarn("vgId:%d, rsma post commit, regexec failed since %s", TD_VID(pVnode), errbuf);
-
- taosCloseDir(&pDir);
- regfree(®ex);
- return TSDB_CODE_FAILED;
- }
- }
-
- taosCloseDir(&pDir);
- regfree(®ex);
-
- return TSDB_CODE_SUCCESS;
-}
-
// SQTaskFile ======================================================
-// int32_t tCmprQTaskFile(void const *lhs, void const *rhs) {
-// int64_t *lCommitted = *(int64_t *)lhs;
-// SQTaskFile *rQTaskF = (SQTaskFile *)rhs;
-
-// if (lCommitted < rQTaskF->commitID) {
-// return -1;
-// } else if (lCommitted > rQTaskF->commitID) {
-// return 1;
-// }
-
-// return 0;
-// }
-#if 0
/**
* @brief At most time, there is only one qtaskinfo file committed latest in aTaskFile. Sometimes, there would be
* multiple qtaskinfo files supporting snapshot replication.
*
* @param pSma
- * @param pRSmaStat
+ * @param pStat
* @return int32_t
*/
-static int32_t tdCleanupQTaskInfoFiles(SSma *pSma, SRSmaStat *pRSmaStat) {
- SVnode *pVnode = pSma->pVnode;
- int64_t committed = pRSmaStat->commitAppliedVer;
- SArray *aTaskFile = pRSmaStat->aTaskFile;
+static int32_t tdUpdateQTaskInfoFiles(SSma *pSma, SRSmaStat *pStat) {
+ SVnode *pVnode = pSma->pVnode;
+ SRSmaFS *pFS = RSMA_FS(pStat);
+ int64_t committed = pStat->commitAppliedVer;
+ int64_t fsMaxVer = -1;
+ char qTaskInfoFullName[TSDB_FILENAME_LEN];
+
+ taosWLockLatch(RSMA_FS_LOCK(pStat));
+
+ for (int32_t i = 0; i < taosArrayGetSize(pFS->aQTaskInf);) {
+ SQTaskFile *pTaskF = taosArrayGet(pFS->aQTaskInf, i);
+ int32_t oldVal = atomic_fetch_sub_32(&pTaskF->nRef, 1);
+ if ((oldVal <= 1) && (pTaskF->version < committed)) {
+ tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->version, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
+ if (taosRemoveFile(qTaskInfoFullName) < 0) {
+ smaWarn("vgId:%d, cleanup qinf, committed %" PRIi64 ", failed to remove %s since %s", TD_VID(pVnode), committed,
+ qTaskInfoFullName, tstrerror(TAOS_SYSTEM_ERROR(errno)));
+ } else {
+ smaDebug("vgId:%d, cleanup qinf, committed %" PRIi64 ", success to remove %s", TD_VID(pVnode), committed,
+ qTaskInfoFullName);
+ }
+ taosArrayRemove(pFS->aQTaskInf, i);
+ continue;
+ }
+ ++i;
+ }
+
+ if (taosArrayGetSize(pFS->aQTaskInf) > 0) {
+ fsMaxVer = ((SQTaskFile *)taosArrayGetLast(pFS->aQTaskInf))->version;
+ }
- void *qTaskFile = taosArraySearch(aTaskFile, committed, tCmprQTaskFile, TD_LE);
-
+ if (fsMaxVer < committed) {
+ tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), committed, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
+ if (taosCheckExistFile(qTaskInfoFullName)) {
+ SQTaskFile qFile = {.nRef = 1, .padding = 0, .version = committed, .size = 0};
+ if (!taosArrayPush(pFS->aQTaskInf, &qFile)) {
+ taosWUnLockLatch(RSMA_FS_LOCK(pStat));
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ }
+ } else {
+ smaDebug("vgId:%d, update qinf, no need as committed %" PRIi64 " not larger than fsMaxVer %" PRIi64, TD_VID(pVnode),
+ committed, fsMaxVer);
+ }
+ taosWUnLockLatch(RSMA_FS_LOCK(pStat));
return TSDB_CODE_SUCCESS;
}
-#endif
/**
* @brief post-commit for rollup sma
@@ -290,8 +243,7 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) {
SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pSma);
- // cleanup outdated qtaskinfo files
- tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
+ tdUpdateQTaskInfoFiles(pSma, pRSmaStat);
return TSDB_CODE_SUCCESS;
}
@@ -312,15 +264,22 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
SSmaStat *pStat = SMA_ENV_STAT(pEnv);
SRSmaStat *pRSmaStat = SMA_STAT_RSMA(pStat);
+ int32_t nLoops = 0;
// step 1: set rsma stat
atomic_store_8(RSMA_TRIGGER_STAT(pRSmaStat), TASK_TRIGGER_STAT_PAUSED);
- atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 1);
+ while (atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 1) != 0) {
+ ++nLoops;
+ if (nLoops > 1000) {
+ sched_yield();
+ nLoops = 0;
+ }
+ }
pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied;
ASSERT(pRSmaStat->commitAppliedVer > 0);
// step 2: wait for all triggered fetch tasks to finish
- int32_t nLoops = 0;
+ nLoops = 0;
while (1) {
if (T_REF_VAL_GET(pStat) == 0) {
smaDebug("vgId:%d, rsma commit, fetch tasks are all finished", SMA_VID(pSma));
@@ -336,15 +295,21 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
/**
- * @brief step 3: consume the SubmitReq in buffer
+ * @brief step 3: commit should wait for all SubmitReq in buffer be consumed
* 1) This is high cost task and should not put in asyncPreCommit originally.
* 2) But, if put in asyncCommit, would trigger taskInfo cloning frequently.
*/
- if (tdRSmaProcessExecImpl(pSma, RSMA_EXEC_COMMIT) < 0) {
- return TSDB_CODE_FAILED;
+ nLoops = 0;
+ while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
+ ++nLoops;
+ if (nLoops > 1000) {
+ sched_yield();
+ nLoops = 0;
+ }
}
- smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma), (void*)taosGetSelfPthreadId());
+ smaInfo("vgId:%d, rsma commit, wait for all items to be consumed, TID:%p", SMA_VID(pSma),
+ (void *)taosGetSelfPthreadId());
nLoops = 0;
while (atomic_load_64(&pRSmaStat->nBufItems) > 0) {
++nLoops;
@@ -357,12 +322,12 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
if (tdRSmaPersistExecImpl(pRSmaStat, RSMA_INFO_HASH(pRSmaStat)) < 0) {
return TSDB_CODE_FAILED;
}
- smaInfo("vgId:%d, rsma commit, operator state commited, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
+ smaInfo("vgId:%d, rsma commit, operator state committed, TID:%p", SMA_VID(pSma), (void *)taosGetSelfPthreadId());
-#if 0 // consuming task of qTaskInfo clone
+#if 0 // consuming task of qTaskInfo clone
// step 4: swap queue/qall and iQueue/iQall
// lock
- // taosWLockLatch(SMA_ENV_LOCK(pEnv));
+ taosWLockLatch(SMA_ENV_LOCK(pEnv));
ASSERT(RSMA_INFO_HASH(pRSmaStat));
@@ -378,7 +343,7 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) {
}
// unlock
- // taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+ taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
#endif
return TSDB_CODE_SUCCESS;
@@ -420,33 +385,29 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
}
SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
- SArray *rsmaDeleted = NULL;
// step 1: merge qTaskInfo and iQTaskInfo
// lock
- // taosWLockLatch(SMA_ENV_LOCK(pEnv));
-
- void *pIter = NULL;
- while ((pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter))) {
- tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
- SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
- if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
- int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
- if (refVal == 0) {
- if (!rsmaDeleted) {
- if ((rsmaDeleted = taosArrayInit(1, sizeof(tb_uid_t)))) {
- taosArrayPush(rsmaDeleted, pSuid);
- }
+ if (1 == atomic_val_compare_exchange_8(&pRSmaStat->delFlag, 1, 0)) {
+ taosWLockLatch(SMA_ENV_LOCK(pEnv));
+
+ void *pIter = NULL;
+ while ((pIter = taosHashIterate(RSMA_INFO_HASH(pRSmaStat), pIter))) {
+ tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
+ SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter;
+ if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
+ int32_t refVal = T_REF_VAL_GET(pRSmaInfo);
+ if (refVal == 0) {
+ taosHashRemove(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(*pSuid));
+ } else {
+ smaDebug(
+ "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
+ "table:%" PRIi64,
+ SMA_VID(pSma), refVal, *pSuid);
}
- } else {
- smaDebug(
- "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for "
- "table:%" PRIi64,
- SMA_VID(pSma), refVal, *pSuid);
- }
- continue;
- }
+ continue;
+ }
#if 0
if (pRSmaInfo->taskInfo[0]) {
if (pRSmaInfo->iTaskInfo[0]) {
@@ -461,27 +422,13 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) {
taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter));
smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), *pSuid);
#endif
- }
-
- for (int32_t i = 0; i < taosArrayGetSize(rsmaDeleted); ++i) {
- tb_uid_t *pSuid = taosArrayGet(rsmaDeleted, i);
- void *pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t));
- if ((pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
- tdFreeRSmaInfo(pSma, pRSmaInfo, true);
- smaDebug(
- "vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for "
- "table:%" PRIi64,
- SMA_VID(pSma), *pSuid);
}
- taosHashRemove(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t));
- }
- taosArrayDestroy(rsmaDeleted);
- // unlock
- // taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+ // unlock
+ taosWUnLockLatch(SMA_ENV_LOCK(pEnv));
+ }
- // step 2: cleanup outdated qtaskinfo files
- tdCleanupQTaskInfoFiles(pSma, pRSmaStat);
+ tdUpdateQTaskInfoFiles(pSma, pRSmaStat);
atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c
index 32a419022a312f9ab21681b9bc6f819c7792f51e..64e5e49d0b116a7bed6d8ad5be49388db14bbd96 100644
--- a/source/dnode/vnode/src/sma/smaEnv.c
+++ b/source/dnode/vnode/src/sma/smaEnv.c
@@ -28,6 +28,8 @@ static int32_t tdInitSmaEnv(SSma *pSma, int8_t smaType, SSmaEnv **ppEnv);
static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma);
static int32_t tdRsmaStartExecutor(const SSma *pSma);
static int32_t tdRsmaStopExecutor(const SSma *pSma);
+static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType);
+static void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType);
static void *tdFreeTSmaStat(STSmaStat *pStat);
static void tdDestroyRSmaStat(void *pRSmaStat);
@@ -59,12 +61,23 @@ int32_t smaInit() {
return TSDB_CODE_FAILED;
}
+ int32_t type = (8 == POINTER_BYTES) ? TSDB_DATA_TYPE_UBIGINT : TSDB_DATA_TYPE_UINT;
+ smaMgmt.refHash = taosHashInit(64, taosGetDefaultHashFunction(type), true, HASH_ENTRY_LOCK);
+ if (!smaMgmt.refHash) {
+ taosCloseRef(smaMgmt.rsetId);
+ atomic_store_8(&smaMgmt.inited, 0);
+ smaError("failed to init sma tmr hanle since %s", terrstr());
+ return TSDB_CODE_FAILED;
+ }
+
// init fetch timer handle
smaMgmt.tmrHandle = taosTmrInit(10000, 100, 10000, "RSMA");
if (!smaMgmt.tmrHandle) {
taosCloseRef(smaMgmt.rsetId);
+ taosHashCleanup(smaMgmt.refHash);
+ smaMgmt.refHash = NULL;
atomic_store_8(&smaMgmt.inited, 0);
- smaError("failed to init sma tmr hanle since %s", terrstr());
+ smaError("failed to init sma tmr handle since %s", terrstr());
return TSDB_CODE_FAILED;
}
@@ -93,6 +106,8 @@ void smaCleanUp() {
if (old == 1) {
taosCloseRef(smaMgmt.rsetId);
+ taosHashCleanup(smaMgmt.refHash);
+ smaMgmt.refHash = NULL;
taosTmrCleanUp(smaMgmt.tmrHandle);
smaInfo("sma mgmt env is cleaned up, rsetId:%d, tmrHandle:%p", smaMgmt.rsetId, smaMgmt.tmrHandle);
atomic_store_8(&smaMgmt.inited, 0);
@@ -162,37 +177,19 @@ void *tdFreeSmaEnv(SSmaEnv *pSmaEnv) {
return NULL;
}
-int32_t tdRefSmaStat(SSma *pSma, SSmaStat *pStat) {
- if (!pStat) return 0;
+static void tRSmaInfoHashFreeNode(void *data) {
+ SRSmaInfo *pRSmaInfo = NULL;
+ SRSmaInfoItem *pItem = NULL;
- int ref = T_REF_INC(pStat);
- smaDebug("vgId:%d, ref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
- return 0;
-}
-
-int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) {
- if (!pStat) return 0;
-
- int ref = T_REF_DEC(pStat);
- smaDebug("vgId:%d, unref sma stat:%p, val:%d", SMA_VID(pSma), pStat, ref);
- return 0;
-}
-
-int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
- if (!pRSmaInfo) return 0;
-
- int ref = T_REF_INC(pRSmaInfo);
- smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
- return 0;
-}
-
-int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) {
- if (!pRSmaInfo) return 0;
-
- int ref = T_REF_DEC(pRSmaInfo);
- smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref);
-
- return 0;
+ if ((pRSmaInfo = *(SRSmaInfo **)data)) {
+ if ((pItem = RSMA_INFO_ITEM((SRSmaInfo *)pRSmaInfo, 0)) && pItem->level) {
+ taosHashRemove(smaMgmt.refHash, &pItem, POINTER_BYTES);
+ }
+ if ((pItem = RSMA_INFO_ITEM((SRSmaInfo *)pRSmaInfo, 1)) && pItem->level) {
+ taosHashRemove(smaMgmt.refHash, &pItem, POINTER_BYTES);
+ }
+ tdFreeRSmaInfo(pRSmaInfo->pSma, pRSmaInfo, true);
+ }
}
static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pSma) {
@@ -240,10 +237,16 @@ static int32_t tdInitSmaStat(SSmaStat **pSmaStat, int8_t smaType, const SSma *pS
if (!RSMA_INFO_HASH(pRSmaStat)) {
return TSDB_CODE_FAILED;
}
+ taosHashSetFreeFp(RSMA_INFO_HASH(pRSmaStat), tRSmaInfoHashFreeNode);
if (tdRsmaStartExecutor(pSma) < 0) {
return TSDB_CODE_FAILED;
}
+
+ if (!(RSMA_FS(pRSmaStat)->aQTaskInf = taosArrayInit(1, sizeof(SQTaskFile)))) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
} else if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
// TODO
} else {
@@ -275,17 +278,8 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
smaDebug("vgId:%d, destroy rsma stat %p", SMA_VID(pSma), pRSmaStat);
// step 1: set rsma trigger stat cancelled
atomic_store_8(RSMA_TRIGGER_STAT(pStat), TASK_TRIGGER_STAT_CANCELLED);
- tsem_destroy(&(pStat->notEmpty));
// step 2: destroy the rsma info and associated fetch tasks
- if (taosHashGetSize(RSMA_INFO_HASH(pStat)) > 0) {
- void *infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), NULL);
- while (infoHash) {
- SRSmaInfo *pSmaInfo = *(SRSmaInfo **)infoHash;
- tdFreeRSmaInfo(pSma, pSmaInfo, true);
- infoHash = taosHashIterate(RSMA_INFO_HASH(pStat), infoHash);
- }
- }
taosHashCleanup(RSMA_INFO_HASH(pStat));
// step 3: wait for all triggered fetch tasks to finish
@@ -307,12 +301,16 @@ static void tdDestroyRSmaStat(void *pRSmaStat) {
// step 4:
tdRsmaStopExecutor(pSma);
- // step 5: free pStat
+ // step 5:
+ tdRSmaFSClose(RSMA_FS(pStat));
+
+ // step 6: free pStat
+ tsem_destroy(&(pStat->notEmpty));
taosMemoryFreeClear(pStat);
}
}
-void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) {
+static void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) {
tdDestroySmaState(pSmaStat, smaType);
if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
taosMemoryFreeClear(pSmaStat);
@@ -329,7 +327,7 @@ void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType) {
* @return int32_t
*/
-int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
+static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
if (pSmaStat) {
if (smaType == TSDB_SMA_TYPE_TIME_RANGE) {
tdDestroyTSmaStat(SMA_STAT_TSMA(pSmaStat));
@@ -337,7 +335,7 @@ int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType) {
SRSmaStat *pRSmaStat = &pSmaStat->rsmaStat;
int32_t vid = SMA_VID(pRSmaStat->pSma);
int64_t refId = RSMA_REF_ID(pRSmaStat);
- if (taosRemoveRef(smaMgmt.rsetId, RSMA_REF_ID(pRSmaStat)) < 0) {
+ if (taosRemoveRef(smaMgmt.rsetId, refId) < 0) {
smaError("vgId:%d, remove refId:%" PRIi64 " from rsmaRef:%" PRIi32 " failed since %s", vid, refId,
smaMgmt.rsetId, terrstr());
} else {
@@ -388,7 +386,7 @@ int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType) {
}
break;
default:
- smaError("vgId:%d, undefined smaType:%", SMA_VID(pSma), smaType);
+ smaError("vgId:%d, undefined smaType:%" PRIi8, SMA_VID(pSma), smaType);
return TSDB_CODE_FAILED;
}
@@ -461,6 +459,8 @@ static int32_t tdRsmaStopExecutor(const SSma *pSma) {
taosThreadJoin(pthread[i], NULL);
}
}
+
+ smaInfo("vgId:%d, rsma executor stopped, number:%d", SMA_VID(pSma), tsNumOfVnodeRsmaThreads);
}
return 0;
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaFS.c b/source/dnode/vnode/src/sma/smaFS.c
new file mode 100644
index 0000000000000000000000000000000000000000..55378751eb729b046346d5d5aba3e37cc8c7ad4b
--- /dev/null
+++ b/source/dnode/vnode/src/sma/smaFS.c
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "sma.h"
+
+// =================================================================================================
+
+static int32_t tdFetchQTaskInfoFiles(SSma *pSma, int64_t version, SArray **output);
+static int32_t tdQTaskInfCmprFn1(const void *p1, const void *p2);
+static int32_t tdQTaskInfCmprFn2(const void *p1, const void *p2);
+/**
+ * @brief Open RSma FS from qTaskInfo files
+ *
+ * @param pSma
+ * @param version
+ * @return int32_t
+ */
+int32_t tdRSmaFSOpen(SSma *pSma, int64_t version) {
+ SVnode *pVnode = pSma->pVnode;
+ int64_t commitID = pVnode->state.commitID;
+ SSmaEnv *pEnv = SMA_RSMA_ENV(pSma);
+ SRSmaStat *pStat = NULL;
+ SArray *output = NULL;
+
+ terrno = TSDB_CODE_SUCCESS;
+
+ if (!pEnv) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (tdFetchQTaskInfoFiles(pSma, version, &output) < 0) {
+ goto _end;
+ }
+
+ pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv);
+
+ for (int32_t i = 0; i < taosArrayGetSize(output); ++i) {
+ int32_t vid = 0;
+ int64_t version = -1;
+ sscanf((const char *)taosArrayGetP(output, i), "v%dqinf.v%" PRIi64, &vid, &version);
+ SQTaskFile qTaskFile = {.version = version, .nRef = 1};
+ if ((terrno = tdRSmaFSUpsertQTaskFile(RSMA_FS(pStat), &qTaskFile)) < 0) {
+ goto _end;
+ }
+ smaInfo("vgId:%d, open fs, version:%" PRIi64 ", ref:%d", TD_VID(pVnode), qTaskFile.version, qTaskFile.nRef);
+ }
+
+_end:
+ for (int32_t i = 0; i < taosArrayGetSize(output); ++i) {
+ void *ptr = taosArrayGetP(output, i);
+ taosMemoryFreeClear(ptr);
+ }
+ taosArrayDestroy(output);
+
+ if (terrno != TSDB_CODE_SUCCESS) {
+ smaError("vgId:%d, open rsma fs failed since %s", TD_VID(pVnode), terrstr());
+ return TSDB_CODE_FAILED;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+void tdRSmaFSClose(SRSmaFS *fs) { taosArrayDestroy(fs->aQTaskInf); }
+
+static int32_t tdQTaskInfCmprFn1(const void *p1, const void *p2) {
+ if (*(int64_t *)p1 < ((SQTaskFile *)p2)->version) {
+ return -1;
+ } else if (*(int64_t *)p1 > ((SQTaskFile *)p2)->version) {
+ return 1;
+ }
+ return 0;
+}
+
+int32_t tdRSmaFSRef(SSma *pSma, SRSmaStat *pStat, int64_t version) {
+ SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
+ SQTaskFile *pTaskF = NULL;
+ int32_t oldVal = 0;
+
+ taosRLockLatch(RSMA_FS_LOCK(pStat));
+ if ((pTaskF = taosArraySearch(aQTaskInf, &version, tdQTaskInfCmprFn1, TD_EQ))) {
+ oldVal = atomic_fetch_add_32(&pTaskF->nRef, 1);
+ ASSERT(oldVal > 0);
+ }
+ taosRUnLockLatch(RSMA_FS_LOCK(pStat));
+ return oldVal;
+}
+
+int64_t tdRSmaFSMaxVer(SSma *pSma, SRSmaStat *pStat) {
+ SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
+ int64_t version = -1;
+
+ taosRLockLatch(RSMA_FS_LOCK(pStat));
+ if (taosArrayGetSize(aQTaskInf) > 0) {
+ version = ((SQTaskFile *)taosArrayGetLast(aQTaskInf))->version;
+ }
+ taosRUnLockLatch(RSMA_FS_LOCK(pStat));
+ return version;
+}
+
+void tdRSmaFSUnRef(SSma *pSma, SRSmaStat *pStat, int64_t version) {
+ SVnode *pVnode = pSma->pVnode;
+ SArray *aQTaskInf = RSMA_FS(pStat)->aQTaskInf;
+ char qTaskFullName[TSDB_FILENAME_LEN];
+ SQTaskFile *pTaskF = NULL;
+ int32_t idx = -1;
+
+ taosWLockLatch(RSMA_FS_LOCK(pStat));
+ if ((idx = taosArraySearchIdx(aQTaskInf, &version, tdQTaskInfCmprFn1, TD_EQ)) >= 0) {
+ ASSERT(idx < taosArrayGetSize(aQTaskInf));
+ pTaskF = taosArrayGet(aQTaskInf, idx);
+ if (atomic_sub_fetch_32(&pTaskF->nRef, 1) <= 0) {
+ tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), pTaskF->version, tfsGetPrimaryPath(pVnode->pTfs), qTaskFullName);
+ if (taosRemoveFile(qTaskFullName) < 0) {
+ smaWarn("vgId:%d, failed to remove %s since %s", TD_VID(pVnode), qTaskFullName,
+ tstrerror(TAOS_SYSTEM_ERROR(errno)));
+ } else {
+ smaDebug("vgId:%d, success to remove %s", TD_VID(pVnode), qTaskFullName);
+ }
+ taosArrayRemove(aQTaskInf, idx);
+ }
+ }
+ taosWUnLockLatch(RSMA_FS_LOCK(pStat));
+}
+
+/**
+ * @brief Fetch qtaskfiles LE than version
+ *
+ * @param pSma
+ * @param version
+ * @param output
+ * @return int32_t
+ */
+static int32_t tdFetchQTaskInfoFiles(SSma *pSma, int64_t version, SArray **output) {
+ SVnode *pVnode = pSma->pVnode;
+ TdDirPtr pDir = NULL;
+ TdDirEntryPtr pDirEntry = NULL;
+ char dir[TSDB_FILENAME_LEN];
+ const char *pattern = "v[0-9]+qinf\\.v([0-9]+)?$";
+ regex_t regex;
+ int code = 0;
+
+ terrno = TSDB_CODE_SUCCESS;
+
+ tdGetVndDirName(TD_VID(pVnode), tfsGetPrimaryPath(pVnode->pTfs), VNODE_RSMA_DIR, true, dir);
+
+ if (!taosCheckExistFile(dir)) {
+ smaDebug("vgId:%d, fetch qtask files, no need as dir %s not exist", TD_VID(pVnode), dir);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ // Resource allocation and init
+ if ((code = regcomp(®ex, pattern, REG_EXTENDED)) != 0) {
+ terrno = TSDB_CODE_RSMA_REGEX_MATCH;
+ char errbuf[128];
+ regerror(code, ®ex, errbuf, sizeof(errbuf));
+ smaWarn("vgId:%d, fetch qtask files, regcomp for %s failed since %s", TD_VID(pVnode), dir, errbuf);
+ return TSDB_CODE_FAILED;
+ }
+
+ if (!(pDir = taosOpenDir(dir))) {
+ regfree(®ex);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ smaError("vgId:%d, fetch qtask files, open dir %s failed since %s", TD_VID(pVnode), dir, terrstr());
+ return TSDB_CODE_FAILED;
+ }
+
+ int32_t dirLen = strlen(dir);
+ char *dirEnd = POINTER_SHIFT(dir, dirLen);
+ regmatch_t regMatch[2];
+ while ((pDirEntry = taosReadDir(pDir))) {
+ char *entryName = taosGetDirEntryName(pDirEntry);
+ if (!entryName) {
+ continue;
+ }
+
+ code = regexec(®ex, entryName, 2, regMatch, 0);
+
+ if (code == 0) {
+ // match
+ smaInfo("vgId:%d, fetch qtask files, max ver:%" PRIi64 ", %s found", TD_VID(pVnode), version, entryName);
+
+ int64_t ver = -1;
+ sscanf((const char *)POINTER_SHIFT(entryName, regMatch[1].rm_so), "%" PRIi64, &ver);
+ if ((ver <= version) && (ver > -1)) {
+ if (!(*output)) {
+ if (!(*output = taosArrayInit(1, POINTER_BYTES))) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _end;
+ }
+ }
+ char *entryDup = strdup(entryName);
+ if (!entryDup) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _end;
+ }
+ if (!taosArrayPush(*output, &entryDup)) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _end;
+ }
+ } else {
+ }
+ } else if (code == REG_NOMATCH) {
+ // not match
+ smaTrace("vgId:%d, fetch qtask files, not match %s", TD_VID(pVnode), entryName);
+ continue;
+ } else {
+ // has other error
+ char errbuf[128];
+ regerror(code, ®ex, errbuf, sizeof(errbuf));
+ smaWarn("vgId:%d, fetch qtask files, regexec failed since %s", TD_VID(pVnode), errbuf);
+ terrno = TSDB_CODE_RSMA_REGEX_MATCH;
+ goto _end;
+ }
+ }
+_end:
+ taosCloseDir(&pDir);
+ regfree(®ex);
+ return terrno == 0 ? TSDB_CODE_SUCCESS : TSDB_CODE_FAILED;
+}
+
+static int32_t tdQTaskFileCmprFn2(const void *p1, const void *p2) {
+ if (((SQTaskFile *)p1)->version < ((SQTaskFile *)p2)->version) {
+ return -1;
+ } else if (((SQTaskFile *)p1)->version > ((SQTaskFile *)p2)->version) {
+ return 1;
+ }
+
+ return 0;
+}
+
+int32_t tdRSmaFSUpsertQTaskFile(SRSmaFS *pFS, SQTaskFile *qTaskFile) {
+ int32_t code = 0;
+ int32_t idx = taosArraySearchIdx(pFS->aQTaskInf, qTaskFile, tdQTaskFileCmprFn2, TD_GE);
+
+ if (idx < 0) {
+ idx = taosArrayGetSize(pFS->aQTaskInf);
+ } else {
+ SQTaskFile *pTaskF = (SQTaskFile *)taosArrayGet(pFS->aQTaskInf, idx);
+ int32_t c = tdQTaskFileCmprFn2(pTaskF, qTaskFile);
+ if (c == 0) {
+ pTaskF->nRef = qTaskFile->nRef;
+ pTaskF->version = qTaskFile->version;
+ pTaskF->size = qTaskFile->size;
+ goto _exit;
+ }
+ }
+
+ if (taosArrayInsert(pFS->aQTaskInf, idx, qTaskFile) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+_exit:
+ return code;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaOpen.c b/source/dnode/vnode/src/sma/smaOpen.c
index 235fb1f94161256721dcb1f87ad3a2cc6a3e98f8..d9ffda279f16501ac8f39fde7fe14728640db676 100644
--- a/source/dnode/vnode/src/sma/smaOpen.c
+++ b/source/dnode/vnode/src/sma/smaOpen.c
@@ -16,17 +16,17 @@
#include "sma.h"
#include "tsdb.h"
-static int32_t smaEvalDays(SRetention *r, int8_t precision);
-static int32_t smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
+static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration);
+static int32_t smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type);
static int32_t rsmaRestore(SSma *pSma);
-#define SMA_SET_KEEP_CFG(l) \
+#define SMA_SET_KEEP_CFG(v, l) \
do { \
SRetention *r = &pCfg->retentions[l]; \
pKeepCfg->keep2 = convertTimeFromPrecisionToUnit(r->keep, pCfg->precision, TIME_UNIT_MINUTE); \
pKeepCfg->keep0 = pKeepCfg->keep2; \
pKeepCfg->keep1 = pKeepCfg->keep2; \
- pKeepCfg->days = smaEvalDays(r, pCfg->precision); \
+ pKeepCfg->days = smaEvalDays(v, pCfg->retentions, l, pCfg->precision, pCfg->days); \
} while (0)
#define SMA_OPEN_RSMA_IMPL(v, l) \
@@ -38,51 +38,78 @@ static int32_t rsmaRestore(SSma *pSma);
} \
break; \
} \
- smaSetKeepCfg(&keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
+ smaSetKeepCfg(v, &keepCfg, pCfg, TSDB_TYPE_RSMA_L##l); \
if (tsdbOpen(v, &SMA_RSMA_TSDB##l(pSma), VNODE_RSMA##l##_DIR, &keepCfg) < 0) { \
goto _err; \
} \
} while (0)
-#define RETENTION_DAYS_SPLIT_RATIO 10
-#define RETENTION_DAYS_SPLIT_MIN 1
-#define RETENTION_DAYS_SPLIT_MAX 30
+/**
+ * @brief Evaluate days(duration) for rsma level 1/2/3.
+ * 1) level 1: duration from "create database"
+ * 2) level 2/3: duration * (freq/freqL1)
+ * @param pVnode
+ * @param r
+ * @param level
+ * @param precision
+ * @param duration
+ * @return int32_t
+ */
+static int32_t smaEvalDays(SVnode *pVnode, SRetention *r, int8_t level, int8_t precision, int32_t duration) {
+ int32_t freqDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->freq, precision, TIME_UNIT_MINUTE);
+ int32_t keepDuration = convertTimeFromPrecisionToUnit((r + TSDB_RETENTION_L0)->keep, precision, TIME_UNIT_MINUTE);
+ int32_t days = duration; // min
-static int32_t smaEvalDays(SRetention *r, int8_t precision) {
- int32_t keepDays = convertTimeFromPrecisionToUnit(r->keep, precision, TIME_UNIT_DAY);
- int32_t freqDays = convertTimeFromPrecisionToUnit(r->freq, precision, TIME_UNIT_DAY);
+ if (days < freqDuration) {
+ days = freqDuration;
+ }
- int32_t days = keepDays / RETENTION_DAYS_SPLIT_RATIO;
- if (days <= RETENTION_DAYS_SPLIT_MIN) {
- days = RETENTION_DAYS_SPLIT_MIN;
- if (days < freqDays) {
- days = freqDays + 1;
- }
- } else {
- if (days > RETENTION_DAYS_SPLIT_MAX) {
- days = RETENTION_DAYS_SPLIT_MAX;
- }
- if (days < freqDays) {
- days = freqDays + 1;
- }
+ if (days > keepDuration) {
+ days = keepDuration;
+ }
+
+ if (level == TSDB_RETENTION_L0) {
+ goto end;
}
- return days * 1440;
+
+ ASSERT(level >= TSDB_RETENTION_L1 && level <= TSDB_RETENTION_L2);
+
+ freqDuration = convertTimeFromPrecisionToUnit((r + level)->freq, precision, TIME_UNIT_MINUTE);
+ keepDuration = convertTimeFromPrecisionToUnit((r + level)->keep, precision, TIME_UNIT_MINUTE);
+
+ int32_t nFreqTimes = (r + level)->freq / (r + TSDB_RETENTION_L0)->freq;
+ days *= (nFreqTimes > 1 ? nFreqTimes : 1);
+
+ if (days > keepDuration) {
+ days = keepDuration;
+ }
+
+ if (days > TSDB_MAX_DURATION_PER_FILE) {
+ days = TSDB_MAX_DURATION_PER_FILE;
+ }
+
+ if (days < freqDuration) {
+ days = freqDuration;
+ }
+end:
+ smaInfo("vgId:%d, evaluated duration for level %" PRIi8 " is %d, raw val:%d", TD_VID(pVnode), level + 1, days, duration);
+ return days;
}
-int smaSetKeepCfg(STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
+int smaSetKeepCfg(SVnode *pVnode, STsdbKeepCfg *pKeepCfg, STsdbCfg *pCfg, int type) {
pKeepCfg->precision = pCfg->precision;
switch (type) {
case TSDB_TYPE_TSMA:
ASSERT(0);
break;
case TSDB_TYPE_RSMA_L0:
- SMA_SET_KEEP_CFG(0);
+ SMA_SET_KEEP_CFG(pVnode, 0);
break;
case TSDB_TYPE_RSMA_L1:
- SMA_SET_KEEP_CFG(1);
+ SMA_SET_KEEP_CFG(pVnode, 1);
break;
case TSDB_TYPE_RSMA_L2:
- SMA_SET_KEEP_CFG(2);
+ SMA_SET_KEEP_CFG(pVnode, 2);
break;
default:
ASSERT(0);
@@ -123,7 +150,7 @@ int32_t smaOpen(SVnode *pVnode) {
}
// restore the rsma
- if (tdRsmaRestore(pSma, RSMA_RESTORE_REBOOT, pVnode->state.committed) < 0) {
+ if (tdRSmaRestore(pSma, RSMA_RESTORE_REBOOT, pVnode->state.committed) < 0) {
goto _err;
}
}
@@ -148,14 +175,14 @@ int32_t smaClose(SSma *pSma) {
/**
* @brief rsma env restore
- *
- * @param pSma
- * @param type
- * @param committedVer
- * @return int32_t
+ *
+ * @param pSma
+ * @param type
+ * @param committedVer
+ * @return int32_t
*/
-int32_t tdRsmaRestore(SSma *pSma, int8_t type, int64_t committedVer) {
+int32_t tdRSmaRestore(SSma *pSma, int8_t type, int64_t committedVer) {
ASSERT(VND_IS_RSMA(pSma->pVnode));
- return tdProcessRSmaRestoreImpl(pSma, type, committedVer);
+ return tdRSmaProcessRestoreImpl(pSma, type, committedVer);
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 426ab521fdd16e3d0907fd1c056da15d7d6937cc..27da9da02c2d6213d92ac4cf268e60a2680df839 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -19,22 +19,22 @@
#define RSMA_QTASKINFO_HEAD_LEN (sizeof(int32_t) + sizeof(int8_t) + sizeof(int64_t)) // len + type + suid
#define RSMA_QTASKEXEC_SMOOTH_SIZE (100) // cnt
#define RSMA_SUBMIT_BATCH_SIZE (1024) // cnt
-#define RSMA_FETCH_DELAY_MAX (900000) // ms
-#define RSMA_FETCH_ACTIVE_MAX (1800) // ms
+#define RSMA_FETCH_DELAY_MAX (120000) // ms
+#define RSMA_FETCH_ACTIVE_MAX (1000) // ms
+#define RSMA_FETCH_INTERVAL (5000) // ms
SSmaMgmt smaMgmt = {
.inited = 0,
.rsetId = -1,
};
-#define TD_QTASKINFO_FNAME_PREFIX "qtaskinfo.ver"
-#define TD_RSMAINFO_DEL_FILE "rsmainfo.del"
+#define TD_QTASKINFO_FNAME_PREFIX "qinf.v"
+
typedef struct SRSmaQTaskInfoItem SRSmaQTaskInfoItem;
typedef struct SRSmaQTaskInfoIter SRSmaQTaskInfoIter;
-typedef struct SRSmaExecQItem SRSmaExecQItem;
static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid);
-static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids);
+static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids, bool isAdd);
static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo,
int8_t idx);
static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize, int32_t inputType, SRSmaInfo *pInfo,
@@ -42,10 +42,12 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSiz
static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid);
static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo);
static void tdFreeRSmaSubmitItems(SArray *pItems);
-static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr);
+static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo);
static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema,
int64_t suid);
static void tdRSmaFetchTrigger(void *param, void *tmrId);
+static int32_t tdRSmaInfoClone(SSma *pSma, SRSmaInfo *pInfo);
+static void tdRSmaQTaskInfoFree(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level);
static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile);
static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish);
static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter);
@@ -82,11 +84,6 @@ struct SRSmaQTaskInfoIter {
int32_t nBufPos;
};
-struct SRSmaExecQItem {
- void *pRSmaInfo;
- void *qall;
-};
-
void tdRSmaQTaskInfoGetFileName(int32_t vgId, int64_t version, char *outputName) {
tdGetVndFileName(vgId, NULL, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
@@ -95,13 +92,25 @@ void tdRSmaQTaskInfoGetFullName(int32_t vgId, int64_t version, const char *path,
tdGetVndFileName(vgId, path, VNODE_RSMA_DIR, TD_QTASKINFO_FNAME_PREFIX, version, outputName);
}
+void tdRSmaQTaskInfoGetFullPath(int32_t vgId, int8_t level, const char *path, char *outputName) {
+ tdGetVndDirName(vgId, path, VNODE_RSMA_DIR, true, outputName);
+ int32_t rsmaLen = strlen(outputName);
+ snprintf(outputName + rsmaLen, TSDB_FILENAME_LEN - rsmaLen, "%" PRIi8, level);
+}
+
+void tdRSmaQTaskInfoGetFullPathEx(int32_t vgId, tb_uid_t suid, int8_t level, const char *path, char *outputName) {
+ tdGetVndDirName(vgId, path, VNODE_RSMA_DIR, true, outputName);
+ int32_t rsmaLen = strlen(outputName);
+ snprintf(outputName + rsmaLen, TSDB_FILENAME_LEN - rsmaLen, "%" PRIi64 "%s%" PRIi8, suid, TD_DIRSEP, level);
+}
+
static FORCE_INLINE int32_t tdRSmaQTaskInfoContLen(int32_t lenWithHead) {
return lenWithHead - RSMA_QTASKINFO_HEAD_LEN;
}
static FORCE_INLINE void tdRSmaQTaskInfoIterDestroy(SRSmaQTaskInfoIter *pIter) { taosMemoryFreeClear(pIter->pBuf); }
-void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
+static void tdRSmaQTaskInfoFree(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level) {
// Note: free/kill may in RC
if (!taskHandle || !(*taskHandle)) return;
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
@@ -128,20 +137,24 @@ void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) {
SRSmaInfoItem *pItem = &pInfo->items[i];
if (isDeepFree && pItem->tmrId) {
- smaDebug("vgId:%d, stop fetch timer %p for table %" PRIi64 " level %d", SMA_VID(pSma), pInfo->suid,
- pItem->tmrId, i + 1);
+ smaDebug("vgId:%d, stop fetch timer %p for table %" PRIi64 " level %d", SMA_VID(pSma), pItem->tmrId,
+ pInfo->suid, i + 1);
taosTmrStopA(&pItem->tmrId);
}
+ if (isDeepFree && pItem->pStreamState) {
+ streamStateClose(pItem->pStreamState);
+ }
+
if (isDeepFree && pInfo->taskInfo[i]) {
- tdFreeQTaskInfo(&pInfo->taskInfo[i], SMA_VID(pSma), i + 1);
+ tdRSmaQTaskInfoFree(&pInfo->taskInfo[i], SMA_VID(pSma), i + 1);
} else {
smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma),
pInfo->suid, i + 1);
}
if (pInfo->iTaskInfo[i]) {
- tdFreeQTaskInfo(&pInfo->iTaskInfo[i], SMA_VID(pSma), i + 1);
+ tdRSmaQTaskInfoFree(&pInfo->iTaskInfo[i], SMA_VID(pSma), i + 1);
} else {
smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty iTaskInfo",
SMA_VID(pSma), pInfo->suid, i + 1);
@@ -178,12 +191,13 @@ static FORCE_INLINE int32_t tdUidStoreInit(STbUidStore **pStore) {
return TSDB_CODE_SUCCESS;
}
-static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids) {
+static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids, bool isAdd) {
SRSmaInfo *pRSmaInfo = NULL;
if (!suid || !tbUids) {
terrno = TSDB_CODE_INVALID_PTR;
- smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr());
+ smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64 " since %s", SMA_VID(pSma), suid ? *suid : -1,
+ terrstr());
return TSDB_CODE_FAILED;
}
@@ -202,7 +216,7 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pRSmaInfo->taskInfo[i]) {
- if (((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true)) < 0)) {
+ if (((terrno = qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, isAdd)) < 0)) {
tdReleaseRSmaInfo(pSma, pRSmaInfo);
smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " level %d since %s", SMA_VID(pSma), *suid, i,
terrstr());
@@ -218,12 +232,12 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids)
return TSDB_CODE_SUCCESS;
}
-int32_t tdUpdateTbUidList(SSma *pSma, STbUidStore *pStore) {
+int32_t tdUpdateTbUidList(SSma *pSma, STbUidStore *pStore, bool isAdd) {
if (!pStore || (taosArrayGetSize(pStore->tbUids) == 0)) {
return TSDB_CODE_SUCCESS;
}
- if (tdUpdateTbUidListImpl(pSma, &pStore->suid, pStore->tbUids) != TSDB_CODE_SUCCESS) {
+ if (tdUpdateTbUidListImpl(pSma, &pStore->suid, pStore->tbUids, isAdd) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_FAILED;
}
@@ -232,7 +246,7 @@ int32_t tdUpdateTbUidList(SSma *pSma, STbUidStore *pStore) {
tb_uid_t *pTbSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL);
SArray *pTbUids = *(SArray **)pIter;
- if (tdUpdateTbUidListImpl(pSma, pTbSuid, pTbUids) != TSDB_CODE_SUCCESS) {
+ if (tdUpdateTbUidListImpl(pSma, pTbSuid, pTbUids, isAdd) != TSDB_CODE_SUCCESS) {
taosHashCancelIterate(pStore->uidHash, pIter);
return TSDB_CODE_FAILED;
}
@@ -293,19 +307,41 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
SRetention *pRetention = SMA_RETENTION(pSma);
STsdbCfg *pTsdbCfg = SMA_TSDB_CFG(pSma);
SVnode *pVnode = pSma->pVnode;
+ char taskInfDir[TSDB_FILENAME_LEN] = {0};
+ void *pStreamState = NULL;
+
+ // set the backend of stream state
+ tdRSmaQTaskInfoGetFullPathEx(TD_VID(pVnode), pRSmaInfo->suid, idx + 1, tfsGetPrimaryPath(pVnode->pTfs), taskInfDir);
+ if (!taosCheckExistFile(taskInfDir)) {
+ char *s = strdup(taskInfDir);
+ if (taosMulMkDir(taosDirName(s)) != 0) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ taosMemoryFree(s);
+ return TSDB_CODE_FAILED;
+ }
+ taosMemoryFree(s);
+ }
+ pStreamState = streamStateOpen(taskInfDir, NULL, true);
+ if (!pStreamState) {
+ terrno = TSDB_CODE_RSMA_STREAM_STATE_OPEN;
+ return TSDB_CODE_FAILED;
+ }
+
+
SReadHandle handle = {
.meta = pVnode->pMeta,
.vnode = pVnode,
.initTqReader = 1,
+ .pStateBackend = pStreamState,
};
-
pRSmaInfo->taskInfo[idx] = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
if (!pRSmaInfo->taskInfo[idx]) {
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
return TSDB_CODE_FAILED;
}
SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]);
- pItem->triggerStat = TASK_TRIGGER_STAT_INACTIVE;
+ pItem->triggerStat = TASK_TRIGGER_STAT_ACTIVE; // fetch the data when reboot
+ pItem->pStreamState = pStreamState;
if (param->maxdelay[idx] < TSDB_MIN_ROLLUP_MAX_DELAY) {
int64_t msInterval =
convertTimeFromPrecisionToUnit(pRetention[idx + 1].freq, pTsdbCfg->precision, TIME_UNIT_MILLISECOND);
@@ -318,10 +354,18 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
}
pItem->level = idx == 0 ? TSDB_RETENTION_L1 : TSDB_RETENTION_L2;
- taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
- smaInfo("vgId:%d, table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
+ ASSERT(pItem->level > 0);
+
+ SRSmaRef rsmaRef = {.refId = pStat->refId, .suid = pRSmaInfo->suid};
+ taosHashPut(smaMgmt.refHash, &pItem, POINTER_BYTES, &rsmaRef, sizeof(rsmaRef));
+
+ pItem->fetchLevel = pItem->level;
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+
+ smaInfo("vgId:%d, item:%p table:%" PRIi64 " level:%" PRIi8 " maxdelay:%" PRIi64 " watermark:%" PRIi64
", finally maxdelay:%" PRIi32,
- TD_VID(pVnode), pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx], pItem->maxDelay);
+ TD_VID(pVnode), pItem, pRSmaInfo->suid, idx + 1, param->maxdelay[idx], param->watermark[idx],
+ pItem->maxDelay);
}
return TSDB_CODE_SUCCESS;
}
@@ -335,7 +379,7 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
* @param tbName
* @return int32_t
*/
-int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName) {
+int32_t tdRSmaProcessCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName) {
if ((param->qmsgLen[0] == 0) && (param->qmsgLen[1] == 0)) {
smaDebug("vgId:%d, no qmsg1/qmsg2 for rollup table %s %" PRIi64, SMA_VID(pSma), tbName, suid);
return TSDB_CODE_SUCCESS;
@@ -373,7 +417,10 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
terrno = TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION;
goto _err;
}
+ pRSmaInfo->pSma = pSma;
pRSmaInfo->pTSchema = pTSchema;
+ pRSmaInfo->suid = suid;
+ T_REF_INIT_VAL(pRSmaInfo, 1);
if (!(pRSmaInfo->queue = taosOpenQueue())) {
goto _err;
}
@@ -387,9 +434,6 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con
if (!(pRSmaInfo->iQall = taosAllocateQall())) {
goto _err;
}
- pRSmaInfo->suid = suid;
- pRSmaInfo->refId = RSMA_REF_ID(pStat);
- T_REF_INIT_VAL(pRSmaInfo, 1);
if (tdSetRSmaInfoItemParams(pSma, param, pStat, pRSmaInfo, 0) < 0) {
goto _err;
@@ -432,7 +476,7 @@ int32_t tdProcessRSmaCreate(SSma *pSma, SVCreateStbReq *pReq) {
return TSDB_CODE_SUCCESS;
}
- return tdProcessRSmaCreateImpl(pSma, &pReq->rsmaParam, pReq->suid, pReq->name);
+ return tdRSmaProcessCreateImpl(pSma, &pReq->rsmaParam, pReq->suid, pReq->name);
}
/**
@@ -466,6 +510,7 @@ int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) {
}
// set del flag for data in mem
+ atomic_store_8(&pRSmaStat->delFlag, 1);
RSMA_INFO_SET_DEL(pRSmaInfo);
tdUnRefRSmaInfo(pSma, pRSmaInfo);
@@ -522,6 +567,7 @@ static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *uid)
}
if (!taosArrayPush(pUidArray, uid)) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosArrayDestroy(pUidArray);
return TSDB_CODE_FAILED;
}
if (taosHashPut(pStore->uidHash, &suid, sizeof(suid), &pUidArray, sizeof(pUidArray)) < 0) {
@@ -646,7 +692,8 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
while (1) {
uint64_t ts;
- int32_t code = qExecTaskOpt(taskInfo, pResList, &ts);
+ bool hasMore = false;
+ int32_t code = qExecTaskOpt(taskInfo, pResList, &ts, &hasMore, NULL);
if (code < 0) {
if (code == TSDB_CODE_QRY_IN_EXEC) {
break;
@@ -659,15 +706,15 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
if (taosArrayGetSize(pResList) == 0) {
if (terrno == 0) {
- // smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
+ // smaDebug("vgId:%d, no rsma level %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level);
} else {
- smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
+ smaDebug("vgId:%d, no rsma level %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr());
goto _err;
}
break;
} else {
- smaDebug("vgId:%d, rsma %" PRIi8 " data fetched", SMA_VID(pSma), pItem->level);
+ smaDebug("vgId:%d, rsma level %" PRIi8 " data fetched", SMA_VID(pSma), pItem->level);
}
#if 0
char flag[10] = {0};
@@ -681,21 +728,22 @@ static int32_t tdRSmaExecAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSma
// TODO: the schema update should be handled later(TD-17965)
if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) {
- smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
- SMA_VID(pSma), suid, pItem->level, terrstr());
+ smaError("vgId:%d, build submit req for rsma table %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma),
+ suid, pItem->level, terrstr());
goto _err;
}
if (pReq && tdProcessSubmitReq(sinkTsdb, output->info.version, pReq) < 0) {
taosMemoryFreeClear(pReq);
- smaError("vgId:%d, process submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s",
+ smaError("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " failed since %s",
SMA_VID(pSma), suid, pItem->level, terrstr());
goto _err;
}
- taosMemoryFreeClear(pReq);
- smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " version:%" PRIi64,
- SMA_VID(pSma), suid, pItem->level, output->info.version);
+ smaDebug("vgId:%d, process submit req for rsma table %" PRIi64 " level %" PRIi8 " ver %" PRIi64 " len %" PRIu32,
+ SMA_VID(pSma), suid, pItem->level, output->info.version, htonl(pReq->header.contLen));
+
+ taosMemoryFreeClear(pReq);
}
}
@@ -822,6 +870,95 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t msgSize,
return TSDB_CODE_SUCCESS;
}
+static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t srcTaskInfo, SRSmaParam *param,
+ tb_uid_t suid, int8_t idx) {
+ SVnode *pVnode = pSma->pVnode;
+ char *pOutput = NULL;
+ int32_t len = 0;
+
+ if ((terrno = qSerializeTaskStatus(srcTaskInfo, &pOutput, &len)) < 0) {
+ smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid,
+ terrstr());
+ goto _err;
+ }
+
+ SReadHandle handle = {
+ .meta = pVnode->pMeta,
+ .vnode = pVnode,
+ .initTqReader = 1,
+ };
+ ASSERT(!dstTaskInfo);
+ dstTaskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
+ if (!dstTaskInfo) {
+ terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
+ goto _err;
+ }
+
+ if (qDeserializeTaskStatus(dstTaskInfo, pOutput, len) < 0) {
+ smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
+ terrstr());
+ goto _err;
+ }
+
+ smaDebug("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid);
+
+ taosMemoryFreeClear(pOutput);
+ return TSDB_CODE_SUCCESS;
+_err:
+ taosMemoryFreeClear(pOutput);
+ tdRSmaQTaskInfoFree(dstTaskInfo, TD_VID(pVnode), idx + 1);
+ smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
+ terrstr());
+ return TSDB_CODE_FAILED;
+}
+
+/**
+ * @brief Clone qTaskInfo of SRSmaInfo
+ *
+ * @param pSma
+ * @param pInfo
+ * @return int32_t
+ */
+static int32_t tdRSmaInfoClone(SSma *pSma, SRSmaInfo *pInfo) {
+ SRSmaParam *param = NULL;
+ if (!pInfo) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SMetaReader mr = {0};
+ metaReaderInit(&mr, SMA_META(pSma), 0);
+ smaDebug("vgId:%d, rsma clone qTaskInfo for suid:%" PRIi64, SMA_VID(pSma), pInfo->suid);
+ if (metaGetTableEntryByUid(&mr, pInfo->suid) < 0) {
+ smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid,
+ terrstr());
+ goto _err;
+ }
+ ASSERT(mr.me.type == TSDB_SUPER_TABLE);
+ ASSERT(mr.me.uid == pInfo->suid);
+ if (TABLE_IS_ROLLUP(mr.me.flags)) {
+ param = &mr.me.stbEntry.rsmaParam;
+ for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+ if (!pInfo->iTaskInfo[i]) {
+ continue;
+ }
+ if (tdCloneQTaskInfo(pSma, pInfo->taskInfo[i], pInfo->iTaskInfo[i], param, pInfo->suid, i) < 0) {
+ goto _err;
+ }
+ }
+ smaDebug("vgId:%d, rsma clone env success for %" PRIi64, SMA_VID(pSma), pInfo->suid);
+ } else {
+ terrno = TSDB_CODE_RSMA_INVALID_SCHEMA;
+ goto _err;
+ }
+
+ metaReaderClear(&mr);
+ return TSDB_CODE_SUCCESS;
+_err:
+ metaReaderClear(&mr);
+ smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid, terrstr());
+ return TSDB_CODE_FAILED;
+}
+
/**
* @brief During async commit, the SRSmaInfo object would be COW from iRSmaInfoHash and write lock should be applied.
*
@@ -845,25 +982,25 @@ static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) {
return NULL;
}
- // taosRLockLatch(SMA_ENV_LOCK(pEnv));
+ taosRLockLatch(SMA_ENV_LOCK(pEnv));
pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t));
if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) {
if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
- // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
if (!pRSmaInfo->taskInfo[0]) {
- if (tdCloneRSmaInfo(pSma, pRSmaInfo) < 0) {
- // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ if (tdRSmaInfoClone(pSma, pRSmaInfo) < 0) {
+ taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
}
tdRefRSmaInfo(pSma, pRSmaInfo);
- // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
ASSERT(pRSmaInfo->suid == suid);
return pRSmaInfo;
}
- // taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
+ taosRUnLockLatch(SMA_ENV_LOCK(pEnv));
return NULL;
}
@@ -1011,7 +1148,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
" qmsgLen:%" PRIi32,
TD_VID(pVnode), suid, i, param->maxdelay[i], param->watermark[i], param->qmsgLen[i]);
}
- if (tdProcessRSmaCreateImpl(pSma, &mr.me.stbEntry.rsmaParam, suid, mr.me.name) < 0) {
+ if (tdRSmaProcessCreateImpl(pSma, &mr.me.stbEntry.rsmaParam, suid, mr.me.name) < 0) {
smaError("vgId:%d, rsma restore env failed for %" PRIi64 " since %s", TD_VID(pVnode), suid, terrstr());
goto _err;
}
@@ -1024,7 +1161,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
goto _err;
}
- if (tdUpdateTbUidList(pVnode->pSma, &uidStore) < 0) {
+ if (tdUpdateTbUidList(pVnode->pSma, &uidStore, true) < 0) {
smaError("vgId:%d, rsma restore, update tb uid list failed for %" PRIi64 " since %s", TD_VID(pVnode), suid,
terrstr());
goto _err;
@@ -1083,9 +1220,6 @@ static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTa
goto _err;
}
- SSmaEnv *pRSmaEnv = pSma->pRSmaEnv;
- SRSmaStat *pRSmaStat = (SRSmaStat *)SMA_ENV_STAT(pRSmaEnv);
-
SRSmaQTaskInfoIter fIter = {0};
if (tdRSmaQTaskInfoIterInit(&fIter, &tFile) < 0) {
tdRSmaQTaskInfoIterDestroy(&fIter);
@@ -1126,27 +1260,34 @@ static int32_t tdRSmaRestoreTSDataReload(SSma *pSma) {
return TSDB_CODE_SUCCESS;
}
-int32_t tdProcessRSmaRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer) {
+int32_t tdRSmaProcessRestoreImpl(SSma *pSma, int8_t type, int64_t qtaskFileVer) {
// step 1: iterate all stables to restore the rsma env
int64_t nTables = 0;
if (tdRSmaRestoreQTaskInfoInit(pSma, &nTables) < 0) {
goto _err;
}
-
if (nTables <= 0) {
smaDebug("vgId:%d, no need to restore rsma task %" PRIi8 " since no tables", SMA_VID(pSma), type);
return TSDB_CODE_SUCCESS;
}
+#if 0
// step 2: retrieve qtaskinfo items from the persistence file(rsma/qtaskinfo) and restore
if (tdRSmaRestoreQTaskInfoReload(pSma, type, qtaskFileVer) < 0) {
goto _err;
}
+#endif
// step 3: reload ts data from checkpoint
if (tdRSmaRestoreTSDataReload(pSma) < 0) {
goto _err;
}
+
+ // step 4: open SRSmaFS for qTaskFiles
+ if (tdRSmaFSOpen(pSma, qtaskFileVer) < 0) {
+ goto _err;
+ }
+
smaInfo("vgId:%d, restore rsma task %" PRIi8 " from qtaskf %" PRIi64 " succeed", SMA_VID(pSma), type, qtaskFileVer);
return TSDB_CODE_SUCCESS;
_err:
@@ -1245,29 +1386,31 @@ static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isF
return TSDB_CODE_FAILED;
}
- if (tdReadTFile(pTFile, pIter->qBuf, nBytes) != nBytes) {
+ if (tdReadTFile(pTFile, pIter->pBuf, nBytes) != nBytes) {
return TSDB_CODE_FAILED;
}
int32_t infoLen = 0;
- taosDecodeFixedI32(pIter->qBuf, &infoLen);
+ taosDecodeFixedI32(pIter->pBuf, &infoLen);
if (infoLen > nBytes) {
if (infoLen <= RSMA_QTASKINFO_BUFSIZE) {
terrno = TSDB_CODE_RSMA_FILE_CORRUPTED;
smaError("iterate rsma qtaskinfo file %s failed since %s", TD_TFILE_FULL_NAME(pIter->pTFile), terrstr());
return TSDB_CODE_FAILED;
}
- pIter->nAlloc = infoLen;
- void *pBuf = taosMemoryRealloc(pIter->pBuf, infoLen);
- if (!pBuf) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return TSDB_CODE_FAILED;
+ if (pIter->nAlloc < infoLen) {
+ pIter->nAlloc = infoLen;
+ void *pBuf = taosMemoryRealloc(pIter->pBuf, infoLen);
+ if (!pBuf) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return TSDB_CODE_FAILED;
+ }
+ pIter->pBuf = pBuf;
}
- pIter->pBuf = pBuf;
- pIter->qBuf = pIter->pBuf;
+
nBytes = infoLen;
- if (tdSeekTFile(pTFile, pIter->offset, SEEK_SET)) {
+ if (tdSeekTFile(pTFile, pIter->offset, SEEK_SET) < 0) {
return TSDB_CODE_FAILED;
}
@@ -1276,6 +1419,7 @@ static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isF
}
}
+ pIter->qBuf = pIter->pBuf;
pIter->offset += nBytes;
pIter->nBytes = nBytes;
pIter->nBufPos = 0;
@@ -1337,6 +1481,50 @@ static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIte
return TSDB_CODE_SUCCESS;
}
+int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
+ SSma *pSma = pRSmaStat->pSma;
+ SVnode *pVnode = pSma->pVnode;
+ int32_t vid = SMA_VID(pSma);
+
+ if (taosHashGetSize(pInfoHash) <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int64_t fsMaxVer = tdRSmaFSMaxVer(pSma, pRSmaStat);
+ if (pRSmaStat->commitAppliedVer <= fsMaxVer) {
+ smaDebug("vgId:%d, rsma persist, no need as applied %" PRIi64 " not larger than fsMaxVer %" PRIi64, vid,
+ pRSmaStat->commitAppliedVer, fsMaxVer);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ void *infoHash = NULL;
+ while ((infoHash = taosHashIterate(pInfoHash, infoHash))) {
+ SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash;
+
+ if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
+ continue;
+ }
+
+ for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
+ SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pRSmaInfo, i);
+ if (pItem && pItem->pStreamState) {
+ if (streamStateCommit(pItem->pStreamState) < 0) {
+ terrno = TSDB_CODE_RSMA_STREAM_STATE_COMMIT;
+ goto _err;
+ }
+ smaDebug("vgId:%d, rsma persist, stream state commit success, table %" PRIi64 " level %d", vid, pRSmaInfo->suid,
+ i + 1);
+ }
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+_err:
+ smaError("vgId:%d, rsma persist failed since %s", vid, terrstr());
+ return TSDB_CODE_FAILED;
+}
+
+#if 0
int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
SSma *pSma = pRSmaStat->pSma;
SVnode *pVnode = pSma->pVnode;
@@ -1353,17 +1541,24 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
return TSDB_CODE_SUCCESS;
}
+ int64_t fsMaxVer = tdRSmaFSMaxVer(pSma, pRSmaStat);
+ if (pRSmaStat->commitAppliedVer <= fsMaxVer) {
+ smaDebug("vgId:%d, rsma persist, no need as applied %" PRIi64 " not larger than fsMaxVer %" PRIi64, vid,
+ pRSmaStat->commitAppliedVer, fsMaxVer);
+ return TSDB_CODE_SUCCESS;
+ }
+
STFile tFile = {0};
#if 0
if (pRSmaStat->commitAppliedVer > 0) {
char qTaskInfoFName[TSDB_FILENAME_LEN];
tdRSmaQTaskInfoGetFileName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
- smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
+ smaError("vgId:%d, rsma persist, init %s failed since %s", vid, qTaskInfoFName, terrstr());
goto _err;
}
if (tdCreateTFile(&tFile, true, TD_FTYPE_RSMA_QTASKINFO) < 0) {
- smaError("vgId:%d, rsma persit, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr());
+ smaError("vgId:%d, rsma persist, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr());
goto _err;
}
smaDebug("vgId:%d, rsma, serialize qTaskInfo, file %s created", vid, TD_TFILE_FULL_NAME(&tFile));
@@ -1413,11 +1608,11 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
char qTaskInfoFName[TSDB_FILENAME_LEN];
tdRSmaQTaskInfoGetFileName(vid, pRSmaStat->commitAppliedVer, qTaskInfoFName);
if (tdInitTFile(&tFile, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFName) < 0) {
- smaError("vgId:%d, rsma persit, init %s failed since %s", vid, qTaskInfoFName, terrstr());
+ smaError("vgId:%d, rsma persist, init %s failed since %s", vid, qTaskInfoFName, terrstr());
goto _err;
}
if (tdCreateTFile(&tFile, true, TD_FTYPE_RSMA_QTASKINFO) < 0) {
- smaError("vgId:%d, rsma persit, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr());
+ smaError("vgId:%d, rsma persist, create %s failed since %s", vid, TD_TFILE_FULL_NAME(&tFile), terrstr());
goto _err;
}
smaDebug("vgId:%d, rsma, table %" PRIi64 " serialize qTaskInfo, file %s created", vid, pRSmaInfo->suid,
@@ -1461,7 +1656,7 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) {
}
return TSDB_CODE_SUCCESS;
_err:
- smaError("vgId:%d, rsma persit failed since %s", vid, terrstr());
+ smaError("vgId:%d, rsma persist failed since %s", vid, terrstr());
if (isFileCreated) {
tdRemoveTFile(&tFile);
tdDestroyTFile(&tFile);
@@ -1469,6 +1664,8 @@ _err:
return TSDB_CODE_FAILED;
}
+#endif
+
/**
* @brief trigger to get rsma result in async mode
*
@@ -1476,38 +1673,59 @@ _err:
* @param tmrId
*/
static void tdRSmaFetchTrigger(void *param, void *tmrId) {
- SRSmaInfoItem *pItem = param;
+ SRSmaRef *pRSmaRef = NULL;
SSma *pSma = NULL;
- SRSmaInfo *pRSmaInfo = tdGetRSmaInfoByItem(pItem);
+ SRSmaStat *pStat = NULL;
+ SRSmaInfo *pRSmaInfo = NULL;
+ SRSmaInfoItem *pItem = NULL;
- if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
- smaDebug("rsma fetch task not start since rsma info already deleted, rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId,
- pRSmaInfo->refId);
+ if (!(pRSmaRef = taosHashGet(smaMgmt.refHash, ¶m, POINTER_BYTES))) {
+ smaDebug("rsma fetch task not start since rsma info item:%p not exist in refHash:%p, rsetId:%d", param,
+ smaMgmt.refHash, smaMgmt.rsetId);
return;
}
- SRSmaStat *pStat = (SRSmaStat *)tdAcquireSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
-
- if (!pStat) {
- smaDebug("rsma fetch task not start since rsma stat already destroyed, rsetId:%" PRIi64 " refId:%d)",
- smaMgmt.rsetId, pRSmaInfo->refId);
+ if (!(pStat = (SRSmaStat *)tdAcquireSmaRef(smaMgmt.rsetId, pRSmaRef->refId))) {
+ smaDebug("rsma fetch task not start since rsma stat already destroyed, rsetId:%d refId:%" PRIi64 ")",
+ smaMgmt.rsetId, pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
+ taosHashRemove(smaMgmt.refHash, ¶m, POINTER_BYTES);
return;
}
pSma = pStat->pSma;
+ if (!(pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, pRSmaRef->suid))) {
+ smaDebug("rsma fetch task not start since rsma info not exist, rsetId:%d refId:%" PRIi64 ")", smaMgmt.rsetId,
+ pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
+ taosHashRemove(smaMgmt.refHash, ¶m, POINTER_BYTES);
+ return;
+ }
+
+ if (RSMA_INFO_IS_DEL(pRSmaInfo)) {
+ smaDebug("rsma fetch task not start since rsma info already deleted, rsetId:%d refId:%" PRIi64 ")", smaMgmt.rsetId,
+ pRSmaRef->refId); // pRSmaRef freed in taosHashRemove
+ tdReleaseRSmaInfo(pSma, pRSmaInfo);
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
+ taosHashRemove(smaMgmt.refHash, ¶m, POINTER_BYTES);
+ return;
+ }
+
+ pItem = *(SRSmaInfoItem **)¶m;
+
// if rsma trigger stat in paused, cancelled or finished, not start fetch task
int8_t rsmaTriggerStat = atomic_load_8(RSMA_TRIGGER_STAT(pStat));
switch (rsmaTriggerStat) {
case TASK_TRIGGER_STAT_PAUSED:
case TASK_TRIGGER_STAT_CANCELLED: {
- tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
smaDebug("vgId:%d, rsma fetch task not start for level %" PRIi8 " since stat is %" PRIi8
- ", rsetId rsetId:%" PRIi64 " refId:%d",
- SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId);
+ ", rsetId:%d refId:%" PRIi64,
+ SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaRef->refId);
if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) {
- taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
}
+ tdReleaseRSmaInfo(pSma, pRSmaInfo);
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
return;
}
default:
@@ -1518,7 +1736,7 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE);
switch (fetchTriggerStat) {
case TASK_TRIGGER_STAT_ACTIVE: {
- smaDebug("vgId:%d, rsma fetch task started for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
+ smaDebug("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64 " since stat is active",
SMA_VID(pSma), pItem->level, pRSmaInfo->suid);
// async process
pItem->fetchLevel = pItem->level;
@@ -1531,8 +1749,6 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
if (atomic_load_8(&pRSmaInfo->assigned) == 0) {
tsem_post(&(pStat->notEmpty));
}
- smaInfo("vgId:%d, rsma fetch task planned for level:%" PRIi8 " suid:%" PRIi64, SMA_VID(pSma), pItem->level,
- pRSmaInfo->suid);
} break;
case TASK_TRIGGER_STAT_PAUSED: {
smaDebug("vgId:%d, rsma fetch task not start for level:%" PRIi8 " suid:%" PRIi64 " since stat is paused",
@@ -1554,11 +1770,11 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) {
_end:
taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
- tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId);
+ tdReleaseRSmaInfo(pSma, pRSmaInfo);
+ tdReleaseSmaRef(smaMgmt.rsetId, pRSmaRef->refId);
}
static void tdFreeRSmaSubmitItems(SArray *pItems) {
- ASSERT(taosArrayGetSize(pItems) > 0);
for (int32_t i = 0; i < taosArrayGetSize(pItems); ++i) {
taosFreeQitem(*(void **)taosArrayGet(pItems, i));
}
@@ -1570,10 +1786,9 @@ static void tdFreeRSmaSubmitItems(SArray *pItems) {
*
* @param pSma
* @param pInfo
- * @param pSubmitArr
* @return int32_t
*/
-static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmitArr) {
+static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo) {
SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL};
for (int8_t i = 1; i <= TSDB_RETENTION_L2; ++i) {
SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, i - 1);
@@ -1584,21 +1799,23 @@ static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmi
continue;
}
- int64_t curMs = taosGetTimestampMs();
- if ((pItem->nSkipped * pItem->maxDelay) > RSMA_FETCH_DELAY_MAX) {
- smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch executed",
- SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay);
- } else if (((curMs - pInfo->lastRecv) < RSMA_FETCH_ACTIVE_MAX)) {
- ++pItem->nSkipped;
- smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch skipped ",
- SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
- continue;
+ if ((++pItem->nScanned * pItem->maxDelay) > RSMA_FETCH_DELAY_MAX) {
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nScanned:%" PRIi8 " maxDelay:%d, fetch executed",
+ SMA_VID(pSma), pInfo->suid, i, pItem->nScanned, pItem->maxDelay);
} else {
- smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch executed ",
- SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
+ int64_t curMs = taosGetTimestampMs();
+ if ((curMs - pInfo->lastRecv) < RSMA_FETCH_ACTIVE_MAX) {
+ smaTrace("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch skipped ",
+ SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
+ atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); // restore the active stat
+ continue;
+ } else {
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " curMs:%" PRIi64 " lastRecv:%" PRIi64 ", fetch executed ",
+ SMA_VID(pSma), pInfo->suid, i, curMs, pInfo->lastRecv);
+ }
}
- pItem->nSkipped = 0;
+ pItem->nScanned = 0;
if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) {
goto _err;
@@ -1609,20 +1826,18 @@ static int32_t tdRSmaFetchAllResult(SSma *pSma, SRSmaInfo *pInfo, SArray *pSubmi
}
tdCleanupStreamInputDataBlock(taskInfo);
- smaInfo("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8 " maxDelay:%d, fetch finished",
- SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay);
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nScanned:%" PRIi8 " maxDelay:%d, fetch finished",
+ SMA_VID(pSma), pInfo->suid, i, pItem->nScanned, pItem->maxDelay);
} else {
- smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nSkipped:%" PRIi8
+ smaDebug("vgId:%d, suid:%" PRIi64 " level:%" PRIi8 " nScanned:%" PRIi8
" maxDelay:%d, fetch not executed as fetch level is %" PRIi8,
- SMA_VID(pSma), pInfo->suid, i, pItem->nSkipped, pItem->maxDelay, pItem->fetchLevel);
+ SMA_VID(pSma), pInfo->suid, i, pItem->nScanned, pItem->maxDelay, pItem->fetchLevel);
}
}
_end:
- tdReleaseRSmaInfo(pSma, pInfo);
return TSDB_CODE_SUCCESS;
_err:
- tdReleaseRSmaInfo(pSma, pInfo);
return TSDB_CODE_FAILED;
}
@@ -1632,7 +1847,7 @@ static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SA
void *msg = NULL;
taosGetQitem(qall, (void **)&msg);
if (msg) {
- if (taosArrayPush(pSubmitArr, &msg) < 0) {
+ if (!taosArrayPush(pSubmitArr, &msg)) {
tdFreeRSmaSubmitItems(pSubmitArr);
goto _err;
}
@@ -1694,7 +1909,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
while (true) {
// step 1: rsma exec - consume data in buffer queue for all suids
- if (type == RSMA_EXEC_OVERFLOW || type == RSMA_EXEC_COMMIT) {
+ if (type == RSMA_EXEC_OVERFLOW) {
void *pIter = NULL;
while ((pIter = taosHashIterate(infoHash, pIter))) {
SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
@@ -1706,6 +1921,7 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
bool occupied = (batchMax <= 1);
if (batchMax > 1) {
batchMax = 100 / batchMax;
+ batchMax = TMAX(batchMax, 4);
}
while (occupied || (++batchCnt < batchMax)) { // greedy mode
taosReadAllQitems(pInfo->queue, pInfo->qall); // queue has mutex lock
@@ -1715,59 +1931,41 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
smaDebug("vgId:%d, batchSize:%d, execType:%" PRIi8, SMA_VID(pSma), qallItemSize, type);
}
- if (type == RSMA_EXEC_OVERFLOW) {
- tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
+ if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
+ int8_t oldStat = atomic_val_compare_exchange_8(RSMA_COMMIT_STAT(pRSmaStat), 0, 2);
+ if (oldStat == 0 ||
+ ((oldStat == 2) && atomic_load_8(RSMA_TRIGGER_STAT(pRSmaStat)) < TASK_TRIGGER_STAT_PAUSED)) {
+ atomic_fetch_add_32(&pRSmaStat->nFetchAll, 1);
+ tdRSmaFetchAllResult(pSma, pInfo);
+ if (0 == atomic_sub_fetch_32(&pRSmaStat->nFetchAll, 1)) {
+ atomic_store_8(RSMA_COMMIT_STAT(pRSmaStat), 0);
+ }
+ }
}
if (qallItemSize > 0) {
atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
continue;
} else if (RSMA_INFO_ITEM(pInfo, 0)->fetchLevel || RSMA_INFO_ITEM(pInfo, 1)->fetchLevel) {
- continue;
+ if (atomic_load_8(RSMA_COMMIT_STAT(pRSmaStat)) == 0) {
+ continue;
+ }
+ for (int32_t j = 0; j < TSDB_RETENTION_L2; ++j) {
+ SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, j);
+ if (pItem->fetchLevel) {
+ pItem->fetchLevel = 0;
+ taosTmrReset(tdRSmaFetchTrigger, RSMA_FETCH_INTERVAL, pItem, smaMgmt.tmrHandle, &pItem->tmrId);
+ }
+ }
}
break;
}
}
- ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
- }
- }
- if (type == RSMA_EXEC_COMMIT) {
- if (atomic_load_64(&pRSmaStat->nBufItems) <= 0) {
- break;
- } else {
- // commit should wait for all items be consumed
- continue;
- }
- }
- }
-#if 0
- else if (type == RSMA_EXEC_COMMIT) {
- while (pIter) {
- SRSmaInfo *pInfo = *(SRSmaInfo **)pIter;
- if (taosQueueItemSize(pInfo->iQueue)) {
- if (atomic_val_compare_exchange_8(&pInfo->assigned, 0, 1) == 0) {
- taosReadAllQitems(pInfo->iQueue, pInfo->iQall); // queue has mutex lock
- int32_t qallItemSize = taosQallItemSize(pInfo->iQall);
- if (qallItemSize > 0) {
- atomic_fetch_sub_64(&pRSmaStat->nBufItems, qallItemSize);
- nIdle = 0;
-
- // batch exec
- tdRSmaBatchExec(pSma, pInfo, pInfo->qall, pSubmitArr, type);
- }
-
- // tdRSmaFetchAllResult(pSma, pInfo, pSubmitArr);
- ASSERT(1 == atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0));
- }
+ atomic_val_compare_exchange_8(&pInfo->assigned, 1, 0);
}
- ASSERT(taosQueueItemSize(pInfo->iQueue) == 0);
- pIter = taosHashIterate(infoHash, pIter);
}
- break;
- }
-#endif
- else {
+ } else {
ASSERT(0);
}
@@ -1775,12 +1973,12 @@ int32_t tdRSmaProcessExecImpl(SSma *pSma, ERsmaExecType type) {
if (pEnv->flag & SMA_ENV_FLG_CLOSE) {
break;
}
-
+
tsem_wait(&pRSmaStat->notEmpty);
if ((pEnv->flag & SMA_ENV_FLG_CLOSE) && (atomic_load_64(&pRSmaStat->nBufItems) <= 0)) {
- smaInfo("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag,
- atomic_load_64(&pRSmaStat->nBufItems));
+ smaDebug("vgId:%d, exec task end, flag:%" PRIi8 ", nBufItems:%" PRIi64, SMA_VID(pSma), pEnv->flag,
+ atomic_load_64(&pRSmaStat->nBufItems));
break;
}
}
diff --git a/source/dnode/vnode/src/sma/smaSnapshot.c b/source/dnode/vnode/src/sma/smaSnapshot.c
index 335c15a539ef31d66d83377f90da225e45ffd893..4939fce20c1f672390551a906cac95c46eceef1a 100644
--- a/source/dnode/vnode/src/sma/smaSnapshot.c
+++ b/source/dnode/vnode/src/sma/smaSnapshot.c
@@ -15,11 +15,13 @@
#include "sma.h"
-static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppData);
-static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
+static int32_t rsmaSnapReadQTaskInfo(SRSmaSnapReader* pReader, uint8_t** ppData);
+static int32_t rsmaSnapWriteQTaskInfo(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData);
+static int32_t rsmaQTaskInfSnapReaderOpen(SRSmaSnapReader* pReader, int64_t version);
+static int32_t rsmaQTaskInfSnapReaderClose(SQTaskFReader** ppReader);
-// SRsmaSnapReader ========================================
-struct SRsmaSnapReader {
+// SRSmaSnapReader ========================================
+struct SRSmaSnapReader {
SSma* pSma;
int64_t sver;
int64_t ever;
@@ -33,13 +35,13 @@ struct SRsmaSnapReader {
SQTaskFReader* pQTaskFReader;
};
-int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapReader** ppReader) {
+int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapReader** ppReader) {
int32_t code = 0;
SVnode* pVnode = pSma->pVnode;
- SRsmaSnapReader* pReader = NULL;
+ SRSmaSnapReader* pReader = NULL;
// alloc
- pReader = (SRsmaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
+ pReader = (SRSmaSnapReader*)taosMemoryCalloc(1, sizeof(*pReader));
if (pReader == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@@ -48,7 +50,7 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapRead
pReader->sver = sver;
pReader->ever = ever;
- // rsma1/rsma2
+ // open rsma1/rsma2
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pSma->pRSmaTsdb[i]) {
code = tsdbSnapReaderOpen(pSma->pRSmaTsdb[i], sver, ever, i == 0 ? SNAP_DATA_RSMA1 : SNAP_DATA_RSMA2,
@@ -59,51 +61,114 @@ int32_t rsmaSnapReaderOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapRead
}
}
- // qtaskinfo
- // 1. add ref to qtaskinfo.v${ever} if exists and then start to replicate
+ // open qtaskinfo
+ if ((code = rsmaQTaskInfSnapReaderOpen(pReader, ever)) < 0) {
+ goto _err;
+ }
+
+ *ppReader = pReader;
+
+ return TSDB_CODE_SUCCESS;
+_err:
+ if (pReader) rsmaSnapReaderClose(&pReader);
+ *ppReader = NULL;
+ smaError("vgId:%d, vnode snapshot rsma reader open failed since %s", TD_VID(pVnode), tstrerror(code));
+ return TSDB_CODE_FAILED;
+}
+
+static int32_t rsmaQTaskInfSnapReaderOpen(SRSmaSnapReader* pReader, int64_t version) {
+ int32_t code = 0;
+ SSma* pSma = pReader->pSma;
+ SVnode* pVnode = pSma->pVnode;
+ SSmaEnv* pEnv = NULL;
+ SRSmaStat* pStat = NULL;
+
+ if (!(pEnv = SMA_RSMA_ENV(pSma))) {
+ smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo version %" PRIi64 " not need as env is NULL",
+ TD_VID(pVnode), version);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ pStat = (SRSmaStat*)SMA_ENV_STAT(pEnv);
+
+ int32_t ref = tdRSmaFSRef(pReader->pSma, pStat, version);
+ if (ref < 1) {
+ smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo version %" PRIi64 " not need as ref is %d",
+ TD_VID(pVnode), version, ref);
+ return TSDB_CODE_SUCCESS;
+ }
+
char qTaskInfoFullName[TSDB_FILENAME_LEN];
- tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), ever, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
+ tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), version, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
if (!taosCheckExistFile(qTaskInfoFullName)) {
- smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo not need as %s not exists", TD_VID(pVnode),
- qTaskInfoFullName);
- } else {
- pReader->pQTaskFReader = taosMemoryCalloc(1, sizeof(SQTaskFReader));
- if (!pReader->pQTaskFReader) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ tdRSmaFSUnRef(pSma, pStat, version);
+ smaInfo("vgId:%d, vnode snapshot rsma reader for qtaskinfo version %" PRIi64 " not need as %s not exist",
+ TD_VID(pVnode), version, qTaskInfoFullName);
+ return TSDB_CODE_SUCCESS;
+ }
- TdFilePtr qTaskF = taosOpenFile(qTaskInfoFullName, TD_FILE_READ);
- if (!qTaskF) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- pReader->pQTaskFReader->pReadH = qTaskF;
-#if 0
- SQTaskFile* pQTaskF = &pReader->pQTaskFReader->fTask;
- pQTaskF->nRef = 1;
-#endif
+ pReader->pQTaskFReader = taosMemoryCalloc(1, sizeof(SQTaskFReader));
+ if (!pReader->pQTaskFReader) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _end;
}
- *ppReader = pReader;
- smaInfo("vgId:%d, vnode snapshot rsma reader opened %s succeed", TD_VID(pVnode), qTaskInfoFullName);
+ TdFilePtr fp = taosOpenFile(qTaskInfoFullName, TD_FILE_READ);
+ if (!fp) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ taosMemoryFreeClear(pReader->pQTaskFReader);
+ goto _end;
+ }
+
+ pReader->pQTaskFReader->pReadH = fp;
+ pReader->pQTaskFReader->pSma = pSma;
+ pReader->pQTaskFReader->version = pReader->ever;
+
+_end:
+ if (code < 0) {
+ tdRSmaFSUnRef(pSma, pStat, version);
+ smaError("vgId:%d, vnode snapshot rsma reader open %s succeed", TD_VID(pVnode), qTaskInfoFullName);
+ return TSDB_CODE_FAILED;
+ }
+
+ smaInfo("vgId:%d, vnode snapshot rsma reader open %s succeed", TD_VID(pVnode), qTaskInfoFullName);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t rsmaQTaskInfSnapReaderClose(SQTaskFReader** ppReader) {
+ if (!(*ppReader)) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SSma* pSma = (*ppReader)->pSma;
+ SRSmaStat* pStat = SMA_RSMA_STAT(pSma);
+ int64_t version = (*ppReader)->version;
+
+ taosCloseFile(&(*ppReader)->pReadH);
+ tdRSmaFSUnRef(pSma, pStat, version);
+ taosMemoryFreeClear(*ppReader);
+ smaInfo("vgId:%d, vnode snapshot rsma reader closed for qTaskInfo version %" PRIi64, SMA_VID(pSma), version);
+
return TSDB_CODE_SUCCESS;
-_err:
- smaError("vgId:%d, vnode snapshot rsma reader opened failed since %s", TD_VID(pVnode), tstrerror(code));
- return TSDB_CODE_FAILED;
}
-static int32_t rsmaSnapReadQTaskInfo(SRsmaSnapReader* pReader, uint8_t** ppBuf) {
+static int32_t rsmaSnapReadQTaskInfo(SRSmaSnapReader* pReader, uint8_t** ppBuf) {
int32_t code = 0;
SSma* pSma = pReader->pSma;
int64_t n = 0;
uint8_t* pBuf = NULL;
SQTaskFReader* qReader = pReader->pQTaskFReader;
+ if (!qReader) {
+ *ppBuf = NULL;
+ smaInfo("vgId:%d, vnode snapshot rsma reader qtaskinfo, qTaskReader is NULL", SMA_VID(pSma));
+ return 0;
+ }
+
if (!qReader->pReadH) {
*ppBuf = NULL;
- smaInfo("vgId:%d, vnode snapshot rsma reader qtaskinfo, readh is empty", SMA_VID(pSma));
+ smaInfo("vgId:%d, vnode snapshot rsma reader qtaskinfo, readh is NULL", SMA_VID(pSma));
return 0;
}
@@ -153,7 +218,7 @@ _err:
return code;
}
-int32_t rsmaSnapRead(SRsmaSnapReader* pReader, uint8_t** ppData) {
+int32_t rsmaSnapRead(SRSmaSnapReader* pReader, uint8_t** ppData) {
int32_t code = 0;
*ppData = NULL;
@@ -205,9 +270,9 @@ _err:
return code;
}
-int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
+int32_t rsmaSnapReaderClose(SRSmaSnapReader** ppReader) {
int32_t code = 0;
- SRsmaSnapReader* pReader = *ppReader;
+ SRSmaSnapReader* pReader = *ppReader;
for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
if (pReader->pDataReader[i]) {
@@ -215,11 +280,7 @@ int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
}
}
- if (pReader->pQTaskFReader) {
- taosCloseFile(&pReader->pQTaskFReader->pReadH);
- taosMemoryFreeClear(pReader->pQTaskFReader);
- smaInfo("vgId:%d, vnode snapshot rsma reader closed for qTaskInfo", SMA_VID(pReader->pSma));
- }
+ rsmaQTaskInfSnapReaderClose(&pReader->pQTaskFReader);
smaInfo("vgId:%d, vnode snapshot rsma reader closed", SMA_VID(pReader->pSma));
@@ -227,8 +288,8 @@ int32_t rsmaSnapReaderClose(SRsmaSnapReader** ppReader) {
return code;
}
-// SRsmaSnapWriter ========================================
-struct SRsmaSnapWriter {
+// SRSmaSnapWriter ========================================
+struct SRSmaSnapWriter {
SSma* pSma;
int64_t sver;
int64_t ever;
@@ -244,13 +305,13 @@ struct SRsmaSnapWriter {
SQTaskFWriter* pQTaskFWriter;
};
-int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWriter** ppWriter) {
+int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRSmaSnapWriter** ppWriter) {
int32_t code = 0;
- SRsmaSnapWriter* pWriter = NULL;
+ SRSmaSnapWriter* pWriter = NULL;
SVnode* pVnode = pSma->pVnode;
// alloc
- pWriter = (SRsmaSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter));
+ pWriter = (SRSmaSnapWriter*)taosMemoryCalloc(1, sizeof(*pWriter));
if (pWriter == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@@ -277,6 +338,7 @@ int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWrit
tdRSmaQTaskInfoGetFullName(TD_VID(pVnode), 0, tfsGetPrimaryPath(pVnode->pTfs), qTaskInfoFullName);
TdFilePtr qTaskF = taosCreateFile(qTaskInfoFullName, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC);
if (!qTaskF) {
+ taosMemoryFree(qWriter);
code = TAOS_SYSTEM_ERROR(errno);
smaError("vgId:%d, rsma snapshot writer open %s failed since %s", TD_VID(pSma->pVnode), qTaskInfoFullName,
tstrerror(code));
@@ -297,13 +359,14 @@ int32_t rsmaSnapWriterOpen(SSma* pSma, int64_t sver, int64_t ever, SRsmaSnapWrit
_err:
smaError("vgId:%d, rsma snapshot writer open failed since %s", TD_VID(pSma->pVnode), tstrerror(code));
+ if (pWriter) rsmaSnapWriterClose(&pWriter, 0);
*ppWriter = NULL;
return code;
}
-int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
+int32_t rsmaSnapWriterClose(SRSmaSnapWriter** ppWriter, int8_t rollback) {
int32_t code = 0;
- SRsmaSnapWriter* pWriter = *ppWriter;
+ SRSmaSnapWriter* pWriter = *ppWriter;
SVnode* pVnode = pWriter->pSma->pVnode;
if (rollback) {
@@ -332,7 +395,7 @@ int32_t rsmaSnapWriterClose(SRsmaSnapWriter** ppWriter, int8_t rollback) {
pWriter->pQTaskFWriter->fname, qTaskInfoFullName);
// rsma restore
- if ((code = tdRsmaRestore(pWriter->pSma, RSMA_RESTORE_SYNC, pWriter->ever)) < 0) {
+ if ((code = tdRSmaRestore(pWriter->pSma, RSMA_RESTORE_SYNC, pWriter->ever)) < 0) {
goto _err;
}
smaInfo("vgId:%d, vnode snapshot rsma writer restore from %s succeed", SMA_VID(pWriter->pSma), qTaskInfoFullName);
@@ -349,7 +412,7 @@ _err:
return code;
}
-int32_t rsmaSnapWrite(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
+int32_t rsmaSnapWrite(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
@@ -377,7 +440,7 @@ _err:
return code;
}
-static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
+static int32_t rsmaSnapWriteQTaskInfo(SRSmaSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
SQTaskFWriter* qWriter = pWriter->pQTaskFWriter;
@@ -390,11 +453,11 @@ static int32_t rsmaSnapWriteQTaskInfo(SRsmaSnapWriter* pWriter, uint8_t* pData,
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
+ smaInfo("vgId:%d, vnode snapshot rsma write qtaskinfo %s succeed", SMA_VID(pWriter->pSma), qWriter->fname);
} else {
smaInfo("vgId:%d, vnode snapshot rsma write qtaskinfo is not needed", SMA_VID(pWriter->pSma));
}
- smaInfo("vgId:%d, vnode snapshot rsma write qtaskinfo %s succeed", SMA_VID(pWriter->pSma), qWriter->fname);
_exit:
return code;
diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c
index 1687cd46a07a7b0a70107eb825fb06b6f9314441..6c32fbbc847c2cb1d65831c34689cfc6939e7497 100644
--- a/source/dnode/vnode/src/sma/smaTimeRange.c
+++ b/source/dnode/vnode/src/sma/smaTimeRange.c
@@ -20,6 +20,10 @@
#define SMA_STORAGE_MINUTES_DAY 1440
#define SMA_STORAGE_SPLIT_FACTOR 14400 // least records in tsma file
+static int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg);
+static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg);
+static int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
+
// TODO: Who is responsible for resource allocate and release?
int32_t tdProcessTSmaInsert(SSma *pSma, int64_t indexUid, const char *msg) {
int32_t code = TSDB_CODE_SUCCESS;
@@ -59,7 +63,7 @@ int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *
* @param days unit is minute
* @return int32_t
*/
-int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days) {
+static int32_t tdProcessTSmaGetDaysImpl(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days) {
SDecoder coder = {0};
tDecoderInit(&coder, pCont, contLen);
@@ -106,7 +110,7 @@ _err:
* @param pMsg
* @return int32_t
*/
-int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
+static int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
SSmaCfg *pCfg = (SSmaCfg *)pMsg;
if (TD_VID(pSma->pVnode) == pCfg->dstVgId) {
@@ -145,7 +149,7 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) {
* @param msg
* @return int32_t
*/
-int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
+static int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
const SArray *pDataBlocks = (const SArray *)msg;
// TODO: destroy SSDataBlocks(msg)
if (!pDataBlocks) {
@@ -174,7 +178,6 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
return TSDB_CODE_FAILED;
}
- tdRefSmaStat(pSma, pStat);
pTsmaStat = SMA_STAT_TSMA(pStat);
if (!pTsmaStat->pTSma) {
@@ -201,7 +204,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
}
SBatchDeleteReq deleteReq;
- SSubmitReq *pSubmitReq = tqBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, true,
+ SSubmitReq *pSubmitReq = tqBlockToSubmit(pSma->pVnode, (const SArray *)msg, pTsmaStat->pTSchema, &pTsmaStat->pTSma->schemaTag, true,
pTsmaStat->pTSma->dstTbUid, pTsmaStat->pTSma->dstTbName, &deleteReq);
if (!pSubmitReq) {
@@ -226,9 +229,7 @@ int32_t tdProcessTSmaInsertImpl(SSma *pSma, int64_t indexUid, const char *msg) {
goto _err;
}
- tdUnRefSmaStat(pSma, pStat);
return TSDB_CODE_SUCCESS;
_err:
- tdUnRefSmaStat(pSma, pStat);
return TSDB_CODE_FAILED;
}
diff --git a/source/dnode/vnode/src/sma/smaUtil.c b/source/dnode/vnode/src/sma/smaUtil.c
index d771797963a5cd9d242fea1f4d65a5634f12b5e8..6d7b7df1ee4061070328ccf8522b04ecc8bccd49 100644
--- a/source/dnode/vnode/src/sma/smaUtil.c
+++ b/source/dnode/vnode/src/sma/smaUtil.c
@@ -290,108 +290,19 @@ int32_t tdRemoveTFile(STFile *pTFile) {
void *tdAcquireSmaRef(int32_t rsetId, int64_t refId) {
void *pResult = taosAcquireRef(rsetId, refId);
if (!pResult) {
- smaWarn("rsma acquire ref for rsetId:%" PRIi64 " refId:%d failed since %s", rsetId, refId, terrstr());
+ smaWarn("rsma acquire ref for rsetId:%d refId:%" PRIi64 " failed since %s", rsetId, refId, terrstr());
} else {
- smaDebug("rsma acquire ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId);
+ smaDebug("rsma acquire ref for rsetId:%d refId:%" PRIi64 " success", rsetId, refId);
}
return pResult;
}
int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId) {
if (taosReleaseRef(rsetId, refId) < 0) {
- smaWarn("rsma release ref for rsetId:%" PRIi64 " refId:%d failed since %s", rsetId, refId, terrstr());
+ smaWarn("rsma release ref for rsetId:%d refId:%" PRIi64 " failed since %s", rsetId, refId, terrstr());
return TSDB_CODE_FAILED;
}
- smaDebug("rsma release ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId);
+ smaDebug("rsma release ref for rsetId:%d refId:%" PRIi64 " success", rsetId, refId);
return TSDB_CODE_SUCCESS;
-}
-
-static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t srcTaskInfo, SRSmaParam *param,
- tb_uid_t suid, int8_t idx) {
- SVnode *pVnode = pSma->pVnode;
- char *pOutput = NULL;
- int32_t len = 0;
-
- if ((terrno = qSerializeTaskStatus(srcTaskInfo, &pOutput, &len)) < 0) {
- smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid,
- terrstr());
- goto _err;
- }
-
- SReadHandle handle = {
- .meta = pVnode->pMeta,
- .vnode = pVnode,
- .initTqReader = 1,
- };
- ASSERT(!dstTaskInfo);
- dstTaskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle);
- if (!dstTaskInfo) {
- terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
- goto _err;
- }
-
- if (qDeserializeTaskStatus(dstTaskInfo, pOutput, len) < 0) {
- smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
- terrstr());
- goto _err;
- }
-
- smaDebug("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid);
-
- taosMemoryFreeClear(pOutput);
- return TSDB_CODE_SUCCESS;
-_err:
- taosMemoryFreeClear(pOutput);
- tdFreeQTaskInfo(dstTaskInfo, TD_VID(pVnode), idx + 1);
- smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid,
- terrstr());
- return TSDB_CODE_FAILED;
-}
-
-/**
- * @brief Clone qTaskInfo of SRSmaInfo
- *
- * @param pSma
- * @param pInfo
- * @return int32_t
- */
-int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) {
- SRSmaParam *param = NULL;
- if (!pInfo) {
- return TSDB_CODE_SUCCESS;
- }
-
- SMetaReader mr = {0};
- metaReaderInit(&mr, SMA_META(pSma), 0);
- smaDebug("vgId:%d, rsma clone qTaskInfo for suid:%" PRIi64, SMA_VID(pSma), pInfo->suid);
- if (metaGetTableEntryByUid(&mr, pInfo->suid) < 0) {
- smaError("vgId:%d, rsma clone, failed to get table meta for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid,
- terrstr());
- goto _err;
- }
- ASSERT(mr.me.type == TSDB_SUPER_TABLE);
- ASSERT(mr.me.uid == pInfo->suid);
- if (TABLE_IS_ROLLUP(mr.me.flags)) {
- param = &mr.me.stbEntry.rsmaParam;
- for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) {
- if (!pInfo->iTaskInfo[i]) {
- continue;
- }
- if (tdCloneQTaskInfo(pSma, pInfo->taskInfo[i], pInfo->iTaskInfo[i], param, pInfo->suid, i) < 0) {
- goto _err;
- }
- }
- smaDebug("vgId:%d, rsma clone env success for %" PRIi64, SMA_VID(pSma), pInfo->suid);
- } else {
- terrno = TSDB_CODE_RSMA_INVALID_SCHEMA;
- goto _err;
- }
-
- metaReaderClear(&mr);
- return TSDB_CODE_SUCCESS;
-_err:
- metaReaderClear(&mr);
- smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", SMA_VID(pSma), pInfo->suid, terrstr());
- return TSDB_CODE_FAILED;
}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 3ff59ac2c01953fbbbd1c88a5c253bcac3c9d045..ed5a89441665d27f868acece7504fecadba998a7 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -51,6 +51,25 @@ void tqCleanUp() {
}
}
+static void destroySTqHandle(void* data) {
+ STqHandle* pData = (STqHandle*)data;
+ qDestroyTask(pData->execHandle.task);
+ if (pData->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ } else if (pData->execHandle.subType == TOPIC_SUB_TYPE__DB) {
+ tqCloseReader(pData->execHandle.pExecReader);
+ walCloseReader(pData->pWalReader);
+ taosHashCleanup(pData->execHandle.execDb.pFilterOutTbUid);
+ } else if (pData->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ walCloseReader(pData->pWalReader);
+ tqCloseReader(pData->execHandle.pExecReader);
+ }
+}
+
+static void tqPushEntryFree(void* data) {
+ STqPushEntry* p = *(void**)data;
+ taosMemoryFree(p);
+}
+
STQ* tqOpen(const char* path, SVnode* pVnode) {
STQ* pTq = taosMemoryCalloc(1, sizeof(STQ));
if (pTq == NULL) {
@@ -62,7 +81,11 @@ STQ* tqOpen(const char* path, SVnode* pVnode) {
pTq->pHandle = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
- pTq->pPushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
+ taosHashSetFreeFp(pTq->pHandle, destroySTqHandle);
+
+ taosInitRWLatch(&pTq->pushLock);
+ pTq->pPushMgr = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
+ taosHashSetFreeFp(pTq->pPushMgr, tqPushEntryFree);
pTq->pCheckInfo = taosHashInit(64, MurmurHash3_32, true, HASH_ENTRY_LOCK);
@@ -100,7 +123,13 @@ void tqClose(STQ* pTq) {
}
int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqMetaRsp* pRsp) {
- int32_t tlen = sizeof(SMqRspHead) + tEncodeSMqMetaRsp(NULL, pRsp);
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSMqMetaRsp, pRsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+ int32_t tlen = sizeof(SMqRspHead) + len;
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
return -1;
@@ -111,7 +140,11 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq,
((SMqRspHead*)buf)->consumerId = pReq->consumerId;
void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
- tEncodeSMqMetaRsp(&abuf, pRsp);
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, abuf, len);
+ tEncodeSMqMetaRsp(&encoder, pRsp);
+ tEncoderClear(&encoder);
SRpcMsg resp = {
.info = pMsg->info,
@@ -121,9 +154,67 @@ int32_t tqSendMetaPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq,
};
tmsgSendRsp(&resp);
- tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, res msg type %d, reqOffset:%" PRId64
- ", rspOffset:%" PRId64,
- TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->reqOffset, pRsp->rspOffset);
+ tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, res msg type %d, offset type:%d",
+ TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->resMsgType, pRsp->rspOffset.type);
+
+ return 0;
+}
+
+int32_t tqPushDataRsp(STQ* pTq, STqPushEntry* pPushEntry) {
+ SMqDataRsp* pRsp = &pPushEntry->dataRsp;
+
+ ASSERT(taosArrayGetSize(pRsp->blockData) == pRsp->blockNum);
+ ASSERT(taosArrayGetSize(pRsp->blockDataLen) == pRsp->blockNum);
+
+ ASSERT(!pRsp->withSchema);
+ ASSERT(taosArrayGetSize(pRsp->blockSchema) == 0);
+
+ if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
+ if (pRsp->blockNum > 0) {
+ ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version);
+ } else {
+ ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version);
+ }
+ }
+
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSMqDataRsp, pRsp, len, code);
+
+ if (code < 0) {
+ return -1;
+ }
+
+ int32_t tlen = sizeof(SMqRspHead) + len;
+ void* buf = rpcMallocCont(tlen);
+ if (buf == NULL) {
+ return -1;
+ }
+
+ memcpy(buf, &pPushEntry->rspHead, sizeof(SMqRspHead));
+
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, abuf, len);
+ tEncodeSMqDataRsp(&encoder, pRsp);
+ tEncoderClear(&encoder);
+
+ SRpcMsg rsp = {
+ .info = pPushEntry->pInfo,
+ .pCont = buf,
+ .contLen = tlen,
+ .code = 0,
+ };
+
+ tmsgSendRsp(&rsp);
+
+ char buf1[80] = {0};
+ char buf2[80] = {0};
+ tFormatOffset(buf1, 80, &pRsp->reqOffset);
+ tFormatOffset(buf2, 80, &pRsp->rspOffset);
+ tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) push rsp, block num: %d, reqOffset:%s, rspOffset:%s",
+ TD_VID(pTq->pVnode), pPushEntry->rspHead.consumerId, pRsp->head.epoch, pRsp->blockNum, buf1, buf2);
return 0;
}
@@ -132,6 +223,62 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
ASSERT(taosArrayGetSize(pRsp->blockData) == pRsp->blockNum);
ASSERT(taosArrayGetSize(pRsp->blockDataLen) == pRsp->blockNum);
+ ASSERT(!pRsp->withSchema);
+ ASSERT(taosArrayGetSize(pRsp->blockSchema) == 0);
+
+ if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
+ if (pRsp->blockNum > 0) {
+ ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version);
+ } else {
+ ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version);
+ }
+ }
+
+ int32_t len = 0;
+ int32_t code = 0;
+ tEncodeSize(tEncodeSMqDataRsp, pRsp, len, code);
+ if (code < 0) {
+ return -1;
+ }
+ int32_t tlen = sizeof(SMqRspHead) + len;
+ void* buf = rpcMallocCont(tlen);
+ if (buf == NULL) {
+ return -1;
+ }
+
+ ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
+ ((SMqRspHead*)buf)->epoch = pReq->epoch;
+ ((SMqRspHead*)buf)->consumerId = pReq->consumerId;
+
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMqRspHead));
+
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, abuf, len);
+ tEncodeSMqDataRsp(&encoder, pRsp);
+ tEncoderClear(&encoder);
+
+ SRpcMsg rsp = {
+ .info = pMsg->info,
+ .pCont = buf,
+ .contLen = tlen,
+ .code = 0,
+ };
+ tmsgSendRsp(&rsp);
+
+ char buf1[80] = {0};
+ char buf2[80] = {0};
+ tFormatOffset(buf1, 80, &pRsp->reqOffset);
+ tFormatOffset(buf2, 80, &pRsp->rspOffset);
+ tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, block num: %d, reqOffset:%s, rspOffset:%s",
+ TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2);
+
+ return 0;
+}
+
+int32_t tqSendTaosxRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const STaosxRsp* pRsp) {
+ ASSERT(taosArrayGetSize(pRsp->blockData) == pRsp->blockNum);
+ ASSERT(taosArrayGetSize(pRsp->blockDataLen) == pRsp->blockNum);
+
if (pRsp->withSchema) {
ASSERT(taosArrayGetSize(pRsp->blockSchema) == pRsp->blockNum);
} else {
@@ -148,7 +295,7 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
int32_t len = 0;
int32_t code = 0;
- tEncodeSize(tEncodeSMqDataRsp, pRsp, len, code);
+ tEncodeSize(tEncodeSTaosxRsp, pRsp, len, code);
if (code < 0) {
return -1;
}
@@ -158,7 +305,7 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
return -1;
}
- ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
+ ((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__TAOSX_RSP;
((SMqRspHead*)buf)->epoch = pReq->epoch;
((SMqRspHead*)buf)->consumerId = pReq->consumerId;
@@ -166,7 +313,7 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
SEncoder encoder = {0};
tEncoderInit(&encoder, abuf, len);
- tEncodeSMqDataRsp(&encoder, pRsp);
+ tEncodeSTaosxRsp(&encoder, pRsp);
tEncoderClear(&encoder);
SRpcMsg rsp = {
@@ -181,7 +328,8 @@ int32_t tqSendDataRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, con
char buf2[80] = {0};
tFormatOffset(buf1, 80, &pRsp->reqOffset);
tFormatOffset(buf2, 80, &pRsp->rspOffset);
- tqDebug("vgId:%d, from consumer:%" PRId64 ", (epoch %d) send rsp, block num: %d, reqOffset:%s, rspOffset:%s",
+ tqDebug("taosx rsp, vgId:%d, from consumer:%" PRId64
+ ", (epoch %d) send rsp, block num: %d, reqOffset:%s, rspOffset:%s",
TD_VID(pTq->pVnode), pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2);
return 0;
@@ -202,7 +350,7 @@ int32_t tqProcessOffsetCommitReq(STQ* pTq, int64_t version, char* msg, int32_t m
}
tDecoderClear(&decoder);
- if (offset.val.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (offset.val.type == TMQ_OFFSET__SNAPSHOT_DATA || offset.val.type == TMQ_OFFSET__SNAPSHOT_META) {
tqDebug("receive offset commit msg to %s on vgId:%d, offset(type:snapshot) uid:%" PRId64 ", ts:%" PRId64,
offset.subKey, TD_VID(pTq->pVnode), offset.val.uid, offset.val.ts);
} else if (offset.val.type == TMQ_OFFSET__LOG) {
@@ -272,6 +420,8 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su
return -1;
}
+ pRsp->withTbName = 0;
+#if 0
pRsp->withTbName = pReq->withTbName;
if (pRsp->withTbName) {
pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
@@ -280,6 +430,7 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su
return -1;
}
}
+#endif
if (subType == TOPIC_SUB_TYPE__COLUMN) {
pRsp->withSchema = false;
@@ -294,10 +445,25 @@ static int32_t tqInitDataRsp(SMqDataRsp* pRsp, const SMqPollReq* pReq, int8_t su
return 0;
}
+static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
+ pRsp->reqOffset = pReq->reqOffset;
+
+ pRsp->withTbName = 1;
+ pRsp->withSchema = 1;
+ pRsp->blockData = taosArrayInit(0, sizeof(void*));
+ pRsp->blockDataLen = taosArrayInit(0, sizeof(int32_t));
+ pRsp->blockTbName = taosArrayInit(0, sizeof(void*));
+ pRsp->blockSchema = taosArrayInit(0, sizeof(void*));
+
+ if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) {
+ return -1;
+ }
+ return 0;
+}
+
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq* pReq = pMsg->pCont;
int64_t consumerId = pReq->consumerId;
- int64_t timeout = pReq->timeout;
int32_t reqEpoch = pReq->epoch;
int32_t code = 0;
STqOffsetVal reqOffset = pReq->reqOffset;
@@ -333,9 +499,6 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
tqDebug("tmq poll: consumer %" PRId64 " (epoch %d), subkey %s, recv poll req in vg %d, req offset %s", consumerId,
pReq->epoch, pHandle->subKey, TD_VID(pTq->pVnode), buf);
- SMqDataRsp dataRsp = {0};
- tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
-
// 2.reset offset if needed
if (reqOffset.type > 0) {
fetchOffsetNew = reqOffset;
@@ -349,62 +512,123 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
TD_VID(pTq->pVnode), formatBuf);
} else {
if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
- if (pReq->useSnapshot && pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- if (!pHandle->fetchMeta) {
- tqOffsetResetToData(&fetchOffsetNew, 0, 0);
+ if (pReq->useSnapshot) {
+ if (pHandle->fetchMeta) {
+ tqOffsetResetToMeta(&fetchOffsetNew, 0);
} else {
- // reset to meta
- ASSERT(0);
+ tqOffsetResetToData(&fetchOffsetNew, 0, 0);
}
} else {
tqOffsetResetToLog(&fetchOffsetNew, walGetFirstVer(pTq->pVnode->pWal));
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
+ SMqDataRsp dataRsp = {0};
+ tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
+
tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
tqDebug("tmq poll: consumer %" PRId64 ", subkey %s, vg %d, offset reset to %" PRId64, consumerId,
pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.rspOffset.version);
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSMqDataRsp(&dataRsp);
+ return code;
} else if (reqOffset.type == TMQ_OFFSET__RESET_NONE) {
tqError("tmq poll: subkey %s, no offset committed for consumer %" PRId64
" in vg %d, subkey %s, reset none failed",
pHandle->subKey, consumerId, TD_VID(pTq->pVnode), pReq->subKey);
terrno = TSDB_CODE_TQ_NO_COMMITTED_OFFSET;
- code = -1;
- goto OVER;
+ return -1;
}
}
}
- // 3.query
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- /*if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {*/
- /*fetchOffsetNew.version++;*/
- /*}*/
- if (tqScan(pTq, pHandle, &dataRsp, &fetchOffsetNew) < 0) {
- ASSERT(0);
- code = -1;
- goto OVER;
- }
- if (dataRsp.blockNum == 0) {
- // TODO add to async task pool
- /*dataRsp.rspOffset.version--;*/
+ SMqDataRsp dataRsp = {0};
+ tqInitDataRsp(&dataRsp, pReq, pHandle->execHandle.subType);
+ // lock
+ taosWLockLatch(&pTq->pushLock);
+ tqScanData(pTq, pHandle, &dataRsp, &fetchOffsetNew);
+
+#if 1
+ if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
+ dataRsp.reqOffset.version == dataRsp.rspOffset.version) {
+ STqPushEntry* pPushEntry = taosMemoryCalloc(1, sizeof(STqPushEntry));
+ if (pPushEntry != NULL) {
+ pPushEntry->pInfo = pMsg->info;
+ memcpy(pPushEntry->subKey, pHandle->subKey, TSDB_SUBSCRIBE_KEY_LEN);
+ dataRsp.withTbName = 0;
+ memcpy(&pPushEntry->dataRsp, &dataRsp, sizeof(SMqDataRsp));
+ pPushEntry->rspHead.consumerId = consumerId;
+ pPushEntry->rspHead.epoch = reqEpoch;
+ pPushEntry->rspHead.mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
+ taosHashPut(pTq->pPushMgr, pHandle->subKey, strlen(pHandle->subKey) + 1, &pPushEntry, sizeof(void*));
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d save handle to push mgr", consumerId, pHandle->subKey,
+ TD_VID(pTq->pVnode));
+ // unlock
+ taosWUnLockLatch(&pTq->pushLock);
+ return 0;
+ }
}
+ taosWUnLockLatch(&pTq->pushLock);
+#endif
+
if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
code = -1;
}
- goto OVER;
+
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d, uid:%ld, version:%ld",
+ consumerId, pHandle->subKey, TD_VID(pTq->pVnode), dataRsp.blockNum, dataRsp.rspOffset.type,
+ dataRsp.rspOffset.uid, dataRsp.rspOffset.version);
+
+ tDeleteSMqDataRsp(&dataRsp);
+ return code;
+ }
+
+ // for taosx
+ ASSERT(pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN);
+
+ SMqMetaRsp metaRsp = {0};
+
+ STaosxRsp taosxRsp = {0};
+ tqInitTaosxRsp(&taosxRsp, pReq);
+
+ if (fetchOffsetNew.type != TMQ_OFFSET__LOG) {
+ tqScan(pTq, pHandle, &taosxRsp, &metaRsp, &fetchOffsetNew);
+
+ if (metaRsp.metaRspLen > 0) {
+ if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
+ code = -1;
+ }
+ tqDebug("tmq poll: consumer %ld, subkey %s, vg %d, send meta offset type:%d,uid:%ld,version:%ld", consumerId,
+ pHandle->subKey, TD_VID(pTq->pVnode), metaRsp.rspOffset.type, metaRsp.rspOffset.uid,
+ metaRsp.rspOffset.version);
+ taosMemoryFree(metaRsp.metaRsp);
+ tDeleteSTaosxRsp(&taosxRsp);
+ return code;
+ }
+
+ if (taosxRsp.blockNum > 0) {
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
+ code = -1;
+ }
+ tDeleteSTaosxRsp(&taosxRsp);
+ return code;
+ } else {
+ fetchOffsetNew = taosxRsp.rspOffset;
+ }
+
+ tqDebug("taosx poll: consumer %ld, subkey %s, vg %d, send data blockNum:%d, offset type:%d,uid:%ld,version:%ld",
+ consumerId, pHandle->subKey, TD_VID(pTq->pVnode), taosxRsp.blockNum, taosxRsp.rspOffset.type,
+ taosxRsp.rspOffset.uid, taosxRsp.rspOffset.version);
}
- if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__COLUMN) {
- ASSERT(fetchOffsetNew.type == TMQ_OFFSET__LOG);
+ if (fetchOffsetNew.type == TMQ_OFFSET__LOG) {
int64_t fetchVer = fetchOffsetNew.version + 1;
pCkHead = taosMemoryMalloc(sizeof(SWalCkHead) + 2048);
if (pCkHead == NULL) {
- code = -1;
- goto OVER;
+ tDeleteSTaosxRsp(&taosxRsp);
+ return -1;
}
walSetReaderCapacity(pHandle->pWalReader, 2048);
@@ -419,14 +643,13 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
}
if (tqFetchLog(pTq, pHandle, &fetchVer, &pCkHead) < 0) {
- // TODO add push mgr
-
- tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer);
- ASSERT(dataRsp.rspOffset.version >= dataRsp.reqOffset.version);
- if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
+ tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSTaosxRsp(&taosxRsp);
+ if (pCkHead) taosMemoryFree(pCkHead);
+ return code;
}
SWalCont* pHead = &pCkHead->head;
@@ -437,19 +660,19 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
- if (tqLogScanExec(pTq, &pHandle->execHandle, pCont, &dataRsp) < 0) {
+ if (tqTaosxScanLog(pTq, pHandle, pCont, &taosxRsp) < 0) {
/*ASSERT(0);*/
}
// TODO batch optimization:
// TODO continue scan until meeting batch requirement
- if (dataRsp.blockNum > 0 /* threshold */) {
- tqOffsetResetToLog(&dataRsp.rspOffset, fetchVer);
- ASSERT(dataRsp.rspOffset.version >= dataRsp.reqOffset.version);
-
- if (tqSendDataRsp(pTq, pMsg, pReq, &dataRsp) < 0) {
+ if (taosxRsp.blockNum > 0 /* threshold */) {
+ tqOffsetResetToLog(&taosxRsp.rspOffset, fetchVer);
+ if (tqSendTaosxRsp(pTq, pMsg, pReq, &taosxRsp) < 0) {
code = -1;
}
- goto OVER;
+ tDeleteSTaosxRsp(&taosxRsp);
+ if (pCkHead) taosMemoryFree(pCkHead);
+ return code;
} else {
fetchVer++;
}
@@ -458,49 +681,46 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
ASSERT(pHandle->fetchMeta);
ASSERT(IS_META_MSG(pHead->msgType));
tqDebug("fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
- SMqMetaRsp metaRsp = {0};
- /*metaRsp.reqOffset = pReq->reqOffset.version;*/
- metaRsp.rspOffset = fetchVer;
- /*metaRsp.rspOffsetNew.version = fetchVer;*/
- tqOffsetResetToLog(&metaRsp.reqOffsetNew, pReq->reqOffset.version);
- tqOffsetResetToLog(&metaRsp.rspOffsetNew, fetchVer);
+ tqOffsetResetToLog(&metaRsp.rspOffset, fetchVer);
metaRsp.resMsgType = pHead->msgType;
metaRsp.metaRspLen = pHead->bodyLen;
metaRsp.metaRsp = pHead->body;
if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
code = -1;
- goto OVER;
+ taosMemoryFree(pCkHead);
+ tDeleteSTaosxRsp(&taosxRsp);
+ return code;
}
code = 0;
- goto OVER;
+ if (pCkHead) taosMemoryFree(pCkHead);
+ tDeleteSTaosxRsp(&taosxRsp);
+ return code;
}
}
}
-
-OVER:
- if (pCkHead) taosMemoryFree(pCkHead);
- // TODO wrap in destroy func
- taosArrayDestroy(dataRsp.blockDataLen);
- taosArrayDestroyP(dataRsp.blockData, (FDelete)taosMemoryFree);
-
- if (dataRsp.withSchema) {
- taosArrayDestroyP(dataRsp.blockSchema, (FDelete)tDeleteSSchemaWrapper);
- }
-
- if (dataRsp.withTbName) {
- taosArrayDestroyP(dataRsp.blockTbName, (FDelete)taosMemoryFree);
- }
-
- return code;
+ tDeleteSTaosxRsp(&taosxRsp);
+ return 0;
}
int32_t tqProcessVgDeleteReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg;
- int32_t code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
- ASSERT(code == 0);
+ taosWLockLatch(&pTq->pushLock);
+ int32_t code = taosHashRemove(pTq->pPushMgr, pReq->subKey, strlen(pReq->subKey));
+ if (code != 0) {
+ tqDebug("vgId:%d, tq remove push handle %s", pTq->pVnode->config.vgId, pReq->subKey);
+ }
+ taosWUnLockLatch(&pTq->pushLock);
- tqOffsetDelete(pTq->pOffsetStore, pReq->subKey);
+ code = taosHashRemove(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
+ if (code != 0) {
+ tqError("cannot process tq delete req %s, since no such handle", pReq->subKey);
+ }
+
+ code = tqOffsetDelete(pTq->pOffsetStore, pReq->subKey);
+ if (code != 0) {
+ tqError("cannot process tq delete req %s, since no such offset", pReq->subKey);
+ }
if (tqMetaDeleteHandle(pTq, pReq->subKey) < 0) {
ASSERT(0);
@@ -550,7 +770,10 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
tqError("vgId:%d, build new consumer handle %s for consumer %d, but old consumerId is %ld", req.vgId, req.subKey,
req.newConsumerId, req.oldConsumerId);
}
- ASSERT(req.newConsumerId != -1);
+ if (req.newConsumerId == -1) {
+ tqError("vgId:%d, tq invalid rebalance request, new consumerId %ld", req.vgId, req.newConsumerId);
+ return 0;
+ }
STqHandle tqHandle = {0};
pHandle = &tqHandle;
/*taosInitRWLatch(&pExec->lock);*/
@@ -561,6 +784,7 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
pHandle->execHandle.subType = req.subType;
pHandle->fetchMeta = req.withMeta;
+
// TODO version should be assigned and refed during preprocess
SWalRef* pRef = walRefCommittedVer(pTq->pVnode->pWal);
if (pRef == NULL) {
@@ -570,36 +794,42 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
int64_t ver = pRef->refVer;
pHandle->pRef = pRef;
+ SReadHandle handle = {
+ .meta = pTq->pVnode->pMeta,
+ .vnode = pTq->pVnode,
+ .initTableReader = true,
+ .initTqReader = true,
+ .version = ver,
+ };
+ pHandle->snapshotVer = ver;
+
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
pHandle->execHandle.execCol.qmsg = req.qmsg;
- pHandle->snapshotVer = ver;
req.qmsg = NULL;
- SReadHandle handle = {
- .meta = pTq->pVnode->pMeta,
- .vnode = pTq->pVnode,
- .initTableReader = true,
- .initTqReader = true,
- .version = ver,
- };
- pHandle->execHandle.execCol.task =
+
+ pHandle->execHandle.task =
qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, &pHandle->execHandle.numOfCols,
&pHandle->execHandle.pSchemaWrapper);
- ASSERT(pHandle->execHandle.execCol.task);
+ ASSERT(pHandle->execHandle.task);
void* scanner = NULL;
- qExtractStreamScanner(pHandle->execHandle.execCol.task, &scanner);
+ qExtractStreamScanner(pHandle->execHandle.task, &scanner);
ASSERT(scanner);
pHandle->execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
ASSERT(pHandle->execHandle.pExecReader);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
-
pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode);
pHandle->execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ buildSnapContext(handle.meta, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta,
+ (SSnapContext**)(&handle.sContext));
+
+ pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
pHandle->pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
pHandle->execHandle.execTb.suid = req.suid;
+
SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
vnodeGetCtbIdList(pTq->pVnode, req.suid, tbUidList);
tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, req.suid);
@@ -610,6 +840,10 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
pHandle->execHandle.pExecReader = tqOpenReader(pTq->pVnode);
tqReaderSetTbUidList(pHandle->execHandle.pExecReader, tbUidList);
taosArrayDestroy(tbUidList);
+
+ buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
+ (SSnapContext**)(&handle.sContext));
+ pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, NULL, NULL);
}
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
tqDebug("try to persist handle %s consumer %" PRId64, req.subKey, pHandle->consumerId);
@@ -625,7 +859,9 @@ int32_t tqProcessVgChangeReq(STQ* pTq, int64_t version, char* msg, int32_t msgLe
atomic_add_fetch_32(&pHandle->epoch, 1);
if (tqMetaSaveHandle(pTq, req.subKey, pHandle) < 0) {
// TODO
+ ASSERT(0);
}
+ // close handle
}
return 0;
@@ -652,7 +888,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
// expand executor
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
- pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask);
+ pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false);
if (pTask->pState == NULL) {
return -1;
}
@@ -666,7 +902,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask) {
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle);
ASSERT(pTask->exec.executor);
} else if (pTask->taskLevel == TASK_LEVEL__AGG) {
- pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask);
+ pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false);
if (pTask->pState == NULL) {
return -1;
}
@@ -708,7 +944,117 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msg
return streamMetaAddSerializedTask(pTq->pStreamMeta, version, msg, msgLen);
}
-int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
+int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver) {
+ bool failed = false;
+ SDecoder* pCoder = &(SDecoder){0};
+ SDeleteRes* pRes = &(SDeleteRes){0};
+
+ pRes->uidList = taosArrayInit(0, sizeof(tb_uid_t));
+ if (pRes->uidList == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ failed = true;
+ }
+
+ tDecoderInit(pCoder, pReq, len);
+ tDecodeDeleteRes(pCoder, pRes);
+ tDecoderClear(pCoder);
+
+ int32_t sz = taosArrayGetSize(pRes->uidList);
+ if (sz == 0 || pRes->affectedRows == 0) {
+ taosArrayDestroy(pRes->uidList);
+ return 0;
+ }
+ SSDataBlock* pDelBlock = createSpecialDataBlock(STREAM_DELETE_DATA);
+ blockDataEnsureCapacity(pDelBlock, sz);
+ pDelBlock->info.rows = sz;
+ pDelBlock->info.version = ver;
+
+ for (int32_t i = 0; i < sz; i++) {
+ // start key column
+ SColumnInfoData* pStartCol = taosArrayGet(pDelBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ colDataAppend(pStartCol, i, (const char*)&pRes->skey, false); // end key column
+ SColumnInfoData* pEndCol = taosArrayGet(pDelBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ colDataAppend(pEndCol, i, (const char*)&pRes->ekey, false);
+ // uid column
+ SColumnInfoData* pUidCol = taosArrayGet(pDelBlock->pDataBlock, UID_COLUMN_INDEX);
+ int64_t* pUid = taosArrayGet(pRes->uidList, i);
+ colDataAppend(pUidCol, i, (const char*)pUid, false);
+
+ colDataAppendNULL(taosArrayGet(pDelBlock->pDataBlock, GROUPID_COLUMN_INDEX), i);
+ colDataAppendNULL(taosArrayGet(pDelBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), i);
+ colDataAppendNULL(taosArrayGet(pDelBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), i);
+ }
+
+ taosArrayDestroy(pRes->uidList);
+
+ int32_t* pRef = taosMemoryMalloc(sizeof(int32_t));
+ *pRef = 1;
+
+ void* pIter = NULL;
+ while (1) {
+ pIter = taosHashIterate(pTq->pStreamMeta->pTasks, pIter);
+ if (pIter == NULL) break;
+ SStreamTask* pTask = *(SStreamTask**)pIter;
+ if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue;
+
+ qDebug("delete req enqueue stream task: %d, ver: %" PRId64, pTask->taskId, ver);
+
+ if (!failed) {
+ SStreamRefDataBlock* pRefBlock = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM);
+ pRefBlock->type = STREAM_INPUT__REF_DATA_BLOCK;
+ pRefBlock->pBlock = pDelBlock;
+ pRefBlock->dataRef = pRef;
+ atomic_add_fetch_32(pRefBlock->dataRef, 1);
+
+ if (streamTaskInput(pTask, (SStreamQueueItem*)pRefBlock) < 0) {
+ qError("stream task input del failed, task id %d", pTask->taskId);
+ continue;
+ }
+ if (streamSchedExec(pTask) < 0) {
+ qError("stream task launch failed, task id %d", pTask->taskId);
+ continue;
+ }
+ } else {
+ streamTaskInputFail(pTask);
+ }
+ }
+ int32_t ref = atomic_sub_fetch_32(pRef, 1);
+ ASSERT(ref >= 0);
+ if (ref == 0) {
+ taosMemoryFree(pDelBlock);
+ taosMemoryFree(pRef);
+ }
+
+#if 0
+ SStreamDataBlock* pStreamBlock = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
+ pStreamBlock->type = STREAM_INPUT__DATA_BLOCK;
+ pStreamBlock->blocks = taosArrayInit(0, sizeof(SSDataBlock));
+ SSDataBlock block = {0};
+ assignOneDataBlock(&block, pDelBlock);
+ block.info.type = STREAM_DELETE_DATA;
+ taosArrayPush(pStreamBlock->blocks, &block);
+
+ if (!failed) {
+ if (streamTaskInput(pTask, (SStreamQueueItem*)pStreamBlock) < 0) {
+ qError("stream task input del failed, task id %d", pTask->taskId);
+ continue;
+ }
+
+ if (streamSchedExec(pTask) < 0) {
+ qError("stream task launch failed, task id %d", pTask->taskId);
+ continue;
+ }
+ } else {
+ streamTaskInputFail(pTask);
+ }
+ }
+ blockDataDestroy(pDelBlock);
+#endif
+
+ return 0;
+}
+
+int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
void* pIter = NULL;
bool failed = false;
SStreamDataSubmit* pSubmit = NULL;
@@ -841,6 +1187,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
SDecoder decoder;
tDecoderInit(&decoder, msgBody, msgLen);
tDecodeStreamRetrieveReq(&decoder, &req);
+ tDecoderClear(&decoder);
int32_t taskId = req.dstTaskId;
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
if (pTask) {
@@ -849,6 +1196,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) {
.code = 0,
};
streamProcessRetrieveReq(pTask, &req, &rsp);
+ tDeleteStreamRetrieveReq(&req);
return 0;
} else {
return -1;
diff --git a/source/dnode/vnode/src/tq/tqExec.c b/source/dnode/vnode/src/tq/tqExec.c
index 435bbb77b8cab0b6c631f98e30444501ae8faf03..58d051bec1aaa1d6d88309eb4ef17f2a9c736d4e 100644
--- a/source/dnode/vnode/src/tq/tqExec.c
+++ b/source/dnode/vnode/src/tq/tqExec.c
@@ -15,7 +15,7 @@
#include "tq.h"
-static int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols) {
+int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols) {
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
void* buf = taosMemoryCalloc(1, dataStrLen);
if (buf == NULL) return -1;
@@ -60,18 +60,20 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, SMqDataRsp* pRsp) {
return 0;
}
-int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
+int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVal* pOffset) {
const STqExecHandle* pExec = &pHandle->execHandle;
- qTaskInfo_t task = pExec->execCol.task;
+ ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
+
+ qTaskInfo_t task = pExec->task;
- if (qStreamPrepareScan(task, pOffset) < 0) {
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
if (pOffset->type == TMQ_OFFSET__LOG) {
pRsp->rspOffset = *pOffset;
return 0;
} else {
tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
- if (qStreamPrepareScan(task, pOffset) < 0) {
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
tqDebug("prepare scan failed, return");
pRsp->rspOffset = *pOffset;
return 0;
@@ -83,124 +85,148 @@ int64_t tqScan(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffsetVa
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
- tqDebug("task start to execute");
+ tqDebug("tmq task start to execute");
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
- tqDebug("task execute end, get %p", pDataBlock);
+ tqDebug("tmq task executed, get %p", pDataBlock);
- if (pDataBlock != NULL) {
- if (pRsp->withTbName) {
- if (pOffset->type == TMQ_OFFSET__LOG) {
- int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
- continue;
- }
- } else {
- pRsp->withTbName = 0;
- }
- }
- tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
- pRsp->blockNum++;
- if (pOffset->type == TMQ_OFFSET__LOG) {
- continue;
- } else {
- rowCnt += pDataBlock->info.rows;
- if (rowCnt <= 4096) continue;
- }
+ if (pDataBlock == NULL) {
+ break;
}
- if (pRsp->blockNum == 0 && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
- tqDebug("vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
- pHandle->snapshotVer + 1);
- tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
- qStreamPrepareScan(task, pOffset);
- continue;
- }
-
- void* meta = qStreamExtractMetaMsg(task);
- if (meta != NULL) {
- // tq add meta to rsp
- }
+ tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
+ pRsp->blockNum++;
- if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
- ASSERT(0);
+ if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ rowCnt += pDataBlock->info.rows;
+ if (rowCnt >= 4096) break;
}
+ }
- ASSERT(pRsp->rspOffset.type != 0);
+ if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
+ ASSERT(0);
+ return -1;
+ }
+ ASSERT(pRsp->rspOffset.type != 0);
-#if 0
- if (pRsp->reqOffset.type == TMQ_OFFSET__LOG) {
- if (pRsp->blockNum > 0) {
- ASSERT(pRsp->rspOffset.version > pRsp->reqOffset.version);
- } else {
- ASSERT(pRsp->rspOffset.version >= pRsp->reqOffset.version);
- }
+ if (pRsp->withTbName) {
+ if (pRsp->rspOffset.type == TMQ_OFFSET__LOG) {
+ int64_t uid = pExec->pExecReader->msgIter.uid;
+ tqAddTbNameToRsp(pTq, uid, pRsp);
+ } else {
+ pRsp->withTbName = false;
}
-#endif
-
- tqDebug("task exec exited");
- break;
}
+ ASSERT(pRsp->withSchema == false);
return 0;
}
-#if 0
-int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataRsp* pRsp, STqOffsetVal offset, int32_t workerId) {
- ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
- qTaskInfo_t task = pExec->execCol.task[workerId];
+int32_t tqScan(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMetaRsp* pMetaRsp, STqOffsetVal* pOffset) {
+ const STqExecHandle* pExec = &pHandle->execHandle;
+ qTaskInfo_t task = pExec->task;
- if (qStreamPrepareTsdbScan(task, offset.uid, offset.ts) < 0) {
- ASSERT(0);
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
+ tqDebug("prepare scan failed, return");
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ pRsp->rspOffset = *pOffset;
+ return 0;
+ } else {
+ tqOffsetResetToLog(pOffset, pHandle->snapshotVer);
+ if (qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType) < 0) {
+ tqDebug("prepare scan failed, return");
+ pRsp->rspOffset = *pOffset;
+ return 0;
+ }
+ }
}
int32_t rowCnt = 0;
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
+ tqDebug("tmqsnap task start to execute");
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
- if (pDataBlock == NULL) break;
+ tqDebug("tmqsnap task execute end, get %p", pDataBlock);
- ASSERT(pDataBlock->info.rows != 0);
- ASSERT(taosArrayGetSize(pDataBlock->pDataBlock) != 0);
+ if (pDataBlock != NULL) {
+ if (pRsp->withTbName) {
+ int64_t uid = 0;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ uid = pExec->pExecReader->msgIter.uid;
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
+ continue;
+ }
+ } else {
+ char* tbName = strdup(qExtractTbnameFromTask(task));
+ taosArrayPush(pRsp->blockTbName, &tbName);
+ }
+ }
+ if (pRsp->withSchema) {
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
+ } else {
+ SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task));
+ taosArrayPush(pRsp->blockSchema, &pSW);
+ }
+ }
- tqAddBlockDataToRsp(pDataBlock, pRsp);
+ tqAddBlockDataToRsp(pDataBlock, (SMqDataRsp*)pRsp, taosArrayGetSize(pDataBlock->pDataBlock));
+ pRsp->blockNum++;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ continue;
+ } else {
+ rowCnt += pDataBlock->info.rows;
+ if (rowCnt <= 4096) continue;
+ }
+ }
- if (pRsp->withTbName) {
- pRsp->withTbName = 0;
-#if 0
- int64_t uid;
- int64_t ts;
- if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
- ASSERT(0);
+ if (pDataBlock == NULL && pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ if (qStreamExtractPrepareUid(task) != 0) {
+ continue;
}
- tqAddTbNameToRsp(pTq, uid, pRsp);
-#endif
+ tqDebug("tmqsnap vgId: %d, tsdb consume over, switch to wal, ver %" PRId64, TD_VID(pTq->pVnode),
+ pHandle->snapshotVer + 1);
+ break;
}
- pRsp->blockNum++;
- rowCnt += pDataBlock->info.rows;
- if (rowCnt >= 4096) break;
+ if (pRsp->blockNum > 0) {
+ tqDebug("tmqsnap task exec exited, get data");
+ break;
+ }
+
+ SMqMetaRsp* tmp = qStreamExtractMetaMsg(task);
+ if (tmp->rspOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ tqOffsetResetToData(pOffset, tmp->rspOffset.uid, tmp->rspOffset.ts);
+ qStreamPrepareScan(task, pOffset, pHandle->execHandle.subType);
+ tmp->rspOffset.type = TMQ_OFFSET__SNAPSHOT_META;
+ tqDebug("tmqsnap task exec change to get data");
+ continue;
+ }
+
+ *pMetaRsp = *tmp;
+ tqDebug("tmqsnap task exec exited, get meta");
+
+ tqDebug("task exec exited");
+ break;
}
- int64_t uid;
- int64_t ts;
- if (qGetStreamScanStatus(task, &uid, &ts) < 0) {
+
+ if (qStreamExtractOffset(task, &pRsp->rspOffset) < 0) {
ASSERT(0);
}
- tqOffsetResetToData(&pRsp->rspOffset, uid, ts);
+ ASSERT(pRsp->rspOffset.type != 0);
return 0;
}
-#endif
-int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataRsp* pRsp) {
+int32_t tqTaosxScanLog(STQ* pTq, STqHandle* pHandle, SSubmitReq* pReq, STaosxRsp* pRsp) {
+ STqExecHandle* pExec = &pHandle->execHandle;
ASSERT(pExec->subType != TOPIC_SUB_TYPE__COLUMN);
if (pExec->subType == TOPIC_SUB_TYPE__TABLE) {
- pRsp->withSchema = 1;
STqReader* pReader = pExec->pExecReader;
tqReaderSetDataMsg(pReader, pReq, 0);
while (tqNextDataBlock(pReader)) {
@@ -210,18 +236,32 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
}
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
blockDataFreeRes(&block);
continue;
}
}
- tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
+ if (pHandle->fetchMeta) {
+ SSubmitBlk* pBlk = pReader->pBlock;
+ int32_t schemaLen = htonl(pBlk->schemaLen);
+ if (schemaLen > 0) {
+ if (pRsp->createTableNum == 0) {
+ pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(0, sizeof(void*));
+ }
+ void* createReq = taosMemoryCalloc(1, schemaLen);
+ memcpy(createReq, pBlk->data, schemaLen);
+ taosArrayPush(pRsp->createTableLen, &schemaLen);
+ taosArrayPush(pRsp->createTableReq, &createReq);
+ pRsp->createTableNum++;
+ }
+ }
+ tqAddBlockDataToRsp(&block, (SMqDataRsp*)pRsp, taosArrayGetSize(block.pDataBlock));
blockDataFreeRes(&block);
- tqAddBlockSchemaToRsp(pExec, pRsp);
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
pRsp->blockNum++;
}
} else if (pExec->subType == TOPIC_SUB_TYPE__DB) {
- pRsp->withSchema = 1;
STqReader* pReader = pExec->pExecReader;
tqReaderSetDataMsg(pReader, pReq, 0);
while (tqNextDataBlockFilterOut(pReader, pExec->execDb.pFilterOutTbUid)) {
@@ -231,14 +271,29 @@ int32_t tqLogScanExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataR
}
if (pRsp->withTbName) {
int64_t uid = pExec->pExecReader->msgIter.uid;
- if (tqAddTbNameToRsp(pTq, uid, pRsp) < 0) {
+ if (tqAddTbNameToRsp(pTq, uid, (SMqDataRsp*)pRsp) < 0) {
blockDataFreeRes(&block);
continue;
}
}
- tqAddBlockDataToRsp(&block, pRsp, taosArrayGetSize(block.pDataBlock));
+ if (pHandle->fetchMeta) {
+ SSubmitBlk* pBlk = pReader->pBlock;
+ int32_t schemaLen = htonl(pBlk->schemaLen);
+ if (schemaLen > 0) {
+ if (pRsp->createTableNum == 0) {
+ pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t));
+ pRsp->createTableReq = taosArrayInit(0, sizeof(void*));
+ }
+ void* createReq = taosMemoryCalloc(1, schemaLen);
+ memcpy(createReq, pBlk->data, schemaLen);
+ taosArrayPush(pRsp->createTableLen, &schemaLen);
+ taosArrayPush(pRsp->createTableReq, &createReq);
+ pRsp->createTableNum++;
+ }
+ }
+ tqAddBlockDataToRsp(&block, (SMqDataRsp*)pRsp, taosArrayGetSize(block.pDataBlock));
blockDataFreeRes(&block);
- tqAddBlockSchemaToRsp(pExec, pRsp);
+ tqAddBlockSchemaToRsp(pExec, (SMqDataRsp*)pRsp);
pRsp->blockNum++;
}
}
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index 405bc669bd23c27b2b234d2b60be4ef6def8bc80..c55e1059cf8842dccd7d6553d9cff5448dbc5ba5 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -18,12 +18,25 @@
int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeCStr(pEncoder, pHandle->subKey) < 0) return -1;
+ if (tEncodeI8(pEncoder, pHandle->fetchMeta) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->consumerId) < 0) return -1;
if (tEncodeI64(pEncoder, pHandle->snapshotVer) < 0) return -1;
if (tEncodeI32(pEncoder, pHandle->epoch) < 0) return -1;
if (tEncodeI8(pEncoder, pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tEncodeCStr(pEncoder, pHandle->execHandle.execCol.qmsg) < 0) return -1;
+ } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
+ int32_t size = taosHashGetSize(pHandle->execHandle.execDb.pFilterOutTbUid);
+ if (tEncodeI32(pEncoder, size) < 0) return -1;
+ void* pIter = NULL;
+ pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter);
+ while (pIter) {
+ int64_t* tbUid = (int64_t*)taosHashGetKey(pIter, NULL);
+ if (tEncodeI64(pEncoder, *tbUid) < 0) return -1;
+ pIter = taosHashIterate(pHandle->execHandle.execDb.pFilterOutTbUid, pIter);
+ }
+ } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ if (tEncodeI64(pEncoder, pHandle->execHandle.execTb.suid) < 0) return -1;
}
tEndEncode(pEncoder);
return pEncoder->pos;
@@ -32,12 +45,25 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeCStrTo(pDecoder, pHandle->subKey) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pHandle->fetchMeta) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->consumerId) < 0) return -1;
if (tDecodeI64(pDecoder, &pHandle->snapshotVer) < 0) return -1;
if (tDecodeI32(pDecoder, &pHandle->epoch) < 0) return -1;
if (tDecodeI8(pDecoder, &pHandle->execHandle.subType) < 0) return -1;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.execCol.qmsg) < 0) return -1;
+ } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__DB) {
+ pHandle->execHandle.execDb.pFilterOutTbUid =
+ taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ int32_t size = 0;
+ if (tDecodeI32(pDecoder, &size) < 0) return -1;
+ for (int32_t i = 0; i < size; i++) {
+ int64_t tbUid = 0;
+ if (tDecodeI64(pDecoder, &tbUid) < 0) return -1;
+ taosHashPut(pHandle->execHandle.execDb.pFilterOutTbUid, &tbUid, sizeof(int64_t), NULL, 0);
+ }
+ } else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ if (tDecodeI64(pDecoder, &pHandle->execHandle.execTb.suid) < 0) return -1;
}
tEndDecode(pDecoder);
return 0;
@@ -91,7 +117,7 @@ int32_t tqMetaSaveCheckInfo(STQ* pTq, const char* key, const void* value, int32_
return -1;
}
- if (tdbTbUpsert(pTq->pExecStore, key, strlen(key), value, vLen, &txn) < 0) {
+ if (tdbTbUpsert(pTq->pCheckStore, key, strlen(key), value, vLen, &txn) < 0) {
return -1;
}
@@ -249,27 +275,47 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
}
walRefVer(handle.pRef, handle.snapshotVer);
+ SReadHandle reader = {
+ .meta = pTq->pVnode->pMeta,
+ .vnode = pTq->pVnode,
+ .initTableReader = true,
+ .initTqReader = true,
+ .version = handle.snapshotVer,
+ };
+
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- SReadHandle reader = {
- .meta = pTq->pVnode->pMeta,
- .vnode = pTq->pVnode,
- .initTableReader = true,
- .initTqReader = true,
- .version = handle.snapshotVer,
- };
-
- handle.execHandle.execCol.task = qCreateQueueExecTaskInfo(
+ handle.execHandle.task = qCreateQueueExecTaskInfo(
handle.execHandle.execCol.qmsg, &reader, &handle.execHandle.numOfCols, &handle.execHandle.pSchemaWrapper);
- ASSERT(handle.execHandle.execCol.task);
+ ASSERT(handle.execHandle.task);
void* scanner = NULL;
- qExtractStreamScanner(handle.execHandle.execCol.task, &scanner);
+ qExtractStreamScanner(handle.execHandle.task, &scanner);
ASSERT(scanner);
handle.execHandle.pExecReader = qExtractReaderFromStreamScanner(scanner);
ASSERT(handle.execHandle.pExecReader);
- } else {
+ } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__DB) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
- handle.execHandle.execDb.pFilterOutTbUid =
- taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
+
+ buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
+ (SSnapContext**)(&reader.sContext));
+ handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL);
+ } else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
+ handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
+
+ SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
+ vnodeGetCtbIdList(pTq->pVnode, handle.execHandle.execTb.suid, tbUidList);
+ tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
+ for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
+ int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
+ tqDebug("vgId:%d, idx %d, uid:%" PRId64, TD_VID(pTq->pVnode), i, tbUid);
+ }
+ handle.execHandle.pExecReader = tqOpenReader(pTq->pVnode);
+ tqReaderSetTbUidList(handle.execHandle.pExecReader, tbUidList);
+ taosArrayDestroy(tbUidList);
+
+ buildSnapContext(reader.meta, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType,
+ handle.fetchMeta, (SSnapContext**)(&reader.sContext));
+ handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, NULL, NULL);
}
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, TD_VID(pTq->pVnode));
taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c
index 5c1d5d65b4f74297fcc0db81d16788f15ee58ab7..7097591c35820398547bc6093ee74c4e89a02ad8 100644
--- a/source/dnode/vnode/src/tq/tqOffset.c
+++ b/source/dnode/vnode/src/tq/tqOffset.c
@@ -145,8 +145,10 @@ int32_t tqOffsetCommitFile(STqOffsetStore* pStore) {
ASSERT(0);
tqError("write offset incomplete, len %d, write len %" PRId64, bodyLen, writeLen);
taosHashCancelIterate(pStore->pHash, pIter);
+ taosMemoryFree(buf);
return -1;
}
+ taosMemoryFree(buf);
}
// close and rename file
taosCloseFile(&pFile);
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index ed7fa80c476fff2d6436232b0e610f0b6f61f1cd..dcfb07f0ff51f9e820d75ae31d8445ec4e2f630f 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -213,20 +213,116 @@ int32_t tqPushMsgNew(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_
#endif
int tqPushMsg(STQ* pTq, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver) {
- if (vnodeIsRoleLeader(pTq->pVnode) && msgType == TDMT_VND_SUBMIT) {
- if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
+ tqDebug("vgId:%d tq push msg ver %ld, type: %s", pTq->pVnode->config.vgId, ver, TMSG_INFO(msgType));
- void* data = taosMemoryMalloc(msgLen);
- if (data == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- tqError("failed to copy data for stream since out of memory");
- return -1;
+ if (msgType == TDMT_VND_SUBMIT) {
+ // lock push mgr to avoid potential msg lost
+ taosWLockLatch(&pTq->pushLock);
+ tqDebug("vgId:%d push handle num %d", pTq->pVnode->config.vgId, taosHashGetSize(pTq->pPushMgr));
+ if (taosHashGetSize(pTq->pPushMgr) != 0) {
+ SArray* cachedKeys = taosArrayInit(0, sizeof(void*));
+ SArray* cachedKeyLens = taosArrayInit(0, sizeof(size_t));
+ void* data = taosMemoryMalloc(msgLen);
+ if (data == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ tqError("failed to copy data for stream since out of memory");
+ return -1;
+ }
+ memcpy(data, msg, msgLen);
+ SSubmitReq* pReq = (SSubmitReq*)data;
+ pReq->version = ver;
+
+ void* pIter = NULL;
+ while (1) {
+ pIter = taosHashIterate(pTq->pPushMgr, pIter);
+ if (pIter == NULL) break;
+ STqPushEntry* pPushEntry = *(STqPushEntry**)pIter;
+
+ STqHandle* pHandle = taosHashGet(pTq->pHandle, pPushEntry->subKey, strlen(pPushEntry->subKey));
+ if (pHandle == NULL) {
+ tqDebug("vgId:%d cannot find handle %s", pTq->pVnode->config.vgId, pPushEntry->subKey);
+ continue;
+ }
+ if (pPushEntry->dataRsp.reqOffset.version > ver) {
+ tqDebug("vgId:%d push entry req version %ld, while push version %ld, skip", pTq->pVnode->config.vgId,
+ pPushEntry->dataRsp.reqOffset.version, ver);
+ continue;
+ }
+ STqExecHandle* pExec = &pHandle->execHandle;
+ qTaskInfo_t task = pExec->task;
+
+ SMqDataRsp* pRsp = &pPushEntry->dataRsp;
+
+ // prepare scan mem data
+ qStreamScanMemData(task, pReq);
+
+ // exec
+ while (1) {
+ SSDataBlock* pDataBlock = NULL;
+ uint64_t ts = 0;
+ if (qExecTask(task, &pDataBlock, &ts) < 0) {
+ ASSERT(0);
+ }
+
+ if (pDataBlock == NULL) {
+ break;
+ }
+
+ tqAddBlockDataToRsp(pDataBlock, pRsp, pExec->numOfCols);
+ pRsp->blockNum++;
+ }
+
+ tqDebug("vgId:%d tq handle push, subkey: %s, block num: %d", pTq->pVnode->config.vgId, pPushEntry->subKey,
+ pRsp->blockNum);
+ if (pRsp->blockNum > 0) {
+ // set offset
+ tqOffsetResetToLog(&pRsp->rspOffset, ver);
+ // remove from hash
+ size_t kLen;
+ void* key = taosHashGetKey(pIter, &kLen);
+ void* keyCopy = taosMemoryMalloc(kLen);
+ memcpy(keyCopy, key, kLen);
+
+ taosArrayPush(cachedKeys, &keyCopy);
+ taosArrayPush(cachedKeyLens, &kLen);
+
+ tqPushDataRsp(pTq, pPushEntry);
+ }
+ }
+ // delete entry
+ for (int32_t i = 0; i < taosArrayGetSize(cachedKeys); i++) {
+ void* key = taosArrayGetP(cachedKeys, i);
+ size_t kLen = *(size_t*)taosArrayGet(cachedKeyLens, i);
+ if (taosHashRemove(pTq->pPushMgr, key, kLen) != 0) {
+ ASSERT(0);
+ }
+ }
+ taosArrayDestroyP(cachedKeys, (FDelete)taosMemoryFree);
+ taosArrayDestroy(cachedKeyLens);
}
- memcpy(data, msg, msgLen);
- SSubmitReq* pReq = (SSubmitReq*)data;
- pReq->version = ver;
+ // unlock
+ taosWUnLockLatch(&pTq->pushLock);
+ }
+
+ if (vnodeIsRoleLeader(pTq->pVnode)) {
+ if (msgType == TDMT_VND_SUBMIT) {
+ if (taosHashGetSize(pTq->pStreamMeta->pTasks) == 0) return 0;
+
+ void* data = taosMemoryMalloc(msgLen);
+ if (data == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ tqError("failed to copy data for stream since out of memory");
+ return -1;
+ }
+ memcpy(data, msg, msgLen);
+ SSubmitReq* pReq = (SSubmitReq*)data;
+ pReq->version = ver;
- tqProcessStreamTrigger(pTq, data, ver);
+ tqProcessSubmitReq(pTq, data, ver);
+ }
+ if (msgType == TDMT_VND_DELETE) {
+ tqProcessDelReq(pTq, POINTER_SHIFT(msg, sizeof(SMsgHead)), msgLen - sizeof(SMsgHead), ver);
+ }
}
return 0;
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index e6a331f20e1943a3e40b672a0ef214322db09c5c..3bd31e66608a0dcda4aad43edb4e8a885e22b81a 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -15,6 +15,176 @@
#include "tq.h"
+bool isValValidForTable(STqHandle* pHandle, SWalCont* pHead) {
+ if (pHandle->execHandle.subType != TOPIC_SUB_TYPE__TABLE) {
+ return true;
+ }
+
+ int16_t msgType = pHead->msgType;
+ char* body = pHead->body;
+ int32_t bodyLen = pHead->bodyLen;
+
+ int64_t tbSuid = pHandle->execHandle.execTb.suid;
+ int64_t realTbSuid = 0;
+ SDecoder coder;
+ void* data = POINTER_SHIFT(body, sizeof(SMsgHead));
+ int32_t len = bodyLen - sizeof(SMsgHead);
+ tDecoderInit(&coder, data, len);
+
+ if (msgType == TDMT_VND_CREATE_STB || msgType == TDMT_VND_ALTER_STB) {
+ SVCreateStbReq req = {0};
+ if (tDecodeSVCreateStbReq(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else if (msgType == TDMT_VND_DROP_STB) {
+ SVDropStbReq req = {0};
+ if (tDecodeSVDropStbReq(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else if (msgType == TDMT_VND_CREATE_TABLE) {
+ SVCreateTbBatchReq req = {0};
+ if (tDecodeSVCreateTbBatchReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ int32_t needRebuild = 0;
+ SVCreateTbReq* pCreateReq = NULL;
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if (pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid) {
+ needRebuild++;
+ }
+ }
+ if (needRebuild == 0) {
+ // do nothing
+ } else if (needRebuild == req.nReqs) {
+ realTbSuid = tbSuid;
+ } else {
+ realTbSuid = tbSuid;
+ SVCreateTbBatchReq reqNew = {0};
+ reqNew.pArray = taosArrayInit(req.nReqs, sizeof(struct SVCreateTbReq));
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ if (pCreateReq->type == TSDB_CHILD_TABLE && pCreateReq->ctb.suid == tbSuid) {
+ reqNew.nReqs++;
+ taosArrayPush(reqNew.pArray, pCreateReq);
+ }
+ }
+
+ int tlen;
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVCreateTbBatchReq, &reqNew, tlen, ret);
+ void* buf = taosMemoryMalloc(tlen);
+ if (NULL == buf) {
+ taosArrayDestroy(reqNew.pArray);
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ taosMemoryFreeClear(pCreateReq->comment);
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ taosArrayDestroy(pCreateReq->ctb.tagName);
+ }
+ }
+ goto end;
+ }
+ SEncoder coderNew = {0};
+ tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead));
+ tEncodeSVCreateTbBatchReq(&coderNew, &reqNew);
+ tEncoderClear(&coderNew);
+ memcpy(pHead->body + sizeof(SMsgHead), buf, tlen);
+ pHead->bodyLen = tlen + sizeof(SMsgHead);
+ taosMemoryFree(buf);
+ taosArrayDestroy(reqNew.pArray);
+ }
+
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pCreateReq = req.pReqs + iReq;
+ taosMemoryFreeClear(pCreateReq->comment);
+ if (pCreateReq->type == TSDB_CHILD_TABLE) {
+ taosArrayDestroy(pCreateReq->ctb.tagName);
+ }
+ }
+ } else if (msgType == TDMT_VND_ALTER_TABLE) {
+ SVAlterTbReq req = {0};
+
+ if (tDecodeSVAlterTbReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ SMetaReader mr = {0};
+ metaReaderInit(&mr, pHandle->execHandle.pExecReader->pVnodeMeta, 0);
+
+ if (metaGetTableEntryByName(&mr, req.tbName) < 0) {
+ metaReaderClear(&mr);
+ goto end;
+ }
+ realTbSuid = mr.me.ctbEntry.suid;
+ metaReaderClear(&mr);
+ } else if (msgType == TDMT_VND_DROP_TABLE) {
+ SVDropTbBatchReq req = {0};
+
+ if (tDecodeSVDropTbBatchReq(&coder, &req) < 0) {
+ goto end;
+ }
+
+ int32_t needRebuild = 0;
+ SVDropTbReq* pDropReq = NULL;
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+
+ if (pDropReq->suid == tbSuid) {
+ needRebuild++;
+ }
+ }
+ if (needRebuild == 0) {
+ // do nothing
+ } else if (needRebuild == req.nReqs) {
+ realTbSuid = tbSuid;
+ } else {
+ realTbSuid = tbSuid;
+ SVDropTbBatchReq reqNew = {0};
+ reqNew.pArray = taosArrayInit(req.nReqs, sizeof(SVDropTbReq));
+ for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
+ pDropReq = req.pReqs + iReq;
+ if (pDropReq->suid == tbSuid) {
+ reqNew.nReqs++;
+ taosArrayPush(reqNew.pArray, pDropReq);
+ }
+ }
+
+ int tlen;
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSVDropTbBatchReq, &reqNew, tlen, ret);
+ void* buf = taosMemoryMalloc(tlen);
+ if (NULL == buf) {
+ taosArrayDestroy(reqNew.pArray);
+ goto end;
+ }
+ SEncoder coderNew = {0};
+ tEncoderInit(&coderNew, buf, tlen - sizeof(SMsgHead));
+ tEncodeSVDropTbBatchReq(&coderNew, &reqNew);
+ tEncoderClear(&coderNew);
+ memcpy(pHead->body + sizeof(SMsgHead), buf, tlen);
+ pHead->bodyLen = tlen + sizeof(SMsgHead);
+ taosMemoryFree(buf);
+ taosArrayDestroy(reqNew.pArray);
+ }
+ } else if (msgType == TDMT_VND_DELETE) {
+ SDeleteRes req = {0};
+ if (tDecodeDeleteRes(&coder, &req) < 0) {
+ goto end;
+ }
+ realTbSuid = req.suid;
+ } else {
+ ASSERT(0);
+ }
+
+end:
+ tDecoderClear(&coder);
+ return tbSuid == realTbSuid;
+}
+
int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHead** ppCkHead) {
int32_t code = 0;
taosThreadMutexLock(&pHandle->pWalReader->mutex);
@@ -53,9 +223,11 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalCkHea
code = -1;
goto END;
}
- *fetchOffset = offset;
- code = 0;
- goto END;
+ if (isValValidForTable(pHandle, pHead)) {
+ *fetchOffset = offset;
+ code = 0;
+ goto END;
+ }
}
}
code = walSkipFetchBody(pHandle->pWalReader, *ppCkHead);
@@ -142,14 +314,18 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) {
return -1;
}
void* body = pReader->pWalReader->pHead->head.body;
+#if 0
if (pReader->pWalReader->pHead->head.msgType != TDMT_VND_SUBMIT) {
// TODO do filter
ret->fetchType = FETCH_TYPE__META;
ret->meta = pReader->pWalReader->pHead->head.body;
return 0;
} else {
- tqReaderSetDataMsg(pReader, body, pReader->pWalReader->pHead->head.version);
+#endif
+ tqReaderSetDataMsg(pReader, body, pReader->pWalReader->pHead->head.version);
+#if 0
}
+#endif
}
while (tqNextDataBlock(pReader)) {
@@ -161,6 +337,7 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) {
continue;
}
ret->fetchType = FETCH_TYPE__DATA;
+ tqDebug("return data rows %d", ret->data.info.rows);
return 0;
}
@@ -168,14 +345,14 @@ int32_t tqNextBlock(STqReader* pReader, SFetchRet* ret) {
ret->offset.type = TMQ_OFFSET__LOG;
ret->offset.version = pReader->ver;
ASSERT(pReader->ver >= 0);
- ret->fetchType = FETCH_TYPE__NONE;
+ ret->fetchType = FETCH_TYPE__SEP;
tqDebug("return offset %" PRId64 ", processed finish", ret->offset.version);
return 0;
}
}
}
-int32_t tqReaderSetDataMsg(STqReader* pReader, SSubmitReq* pMsg, int64_t ver) {
+int32_t tqReaderSetDataMsg(STqReader* pReader, const SSubmitReq* pMsg, int64_t ver) {
pReader->pMsg = pMsg;
if (tInitSubmitMsgIter(pMsg, &pReader->msgIter) < 0) return -1;
@@ -398,7 +575,7 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
if (pIter == NULL) break;
STqHandle* pExec = (STqHandle*)pIter;
if (pExec->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- int32_t code = qUpdateQualifiedTableId(pExec->execHandle.execCol.task, tbUidList, isAdd);
+ int32_t code = qUpdateQualifiedTableId(pExec->execHandle.task, tbUidList, isAdd);
ASSERT(code == 0);
} else if (pExec->execHandle.subType == TOPIC_SUB_TYPE__DB) {
if (!isAdd) {
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index 522bf46aa1fb9d28225f3118b9b6e1bed7a6cd75..16c2a5d033dc6e38497ba7e949817e421fcd87f6 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -48,7 +48,7 @@ int32_t tqBuildDeleteReq(SVnode* pVnode, const char* stbFullName, const SSDataBl
return 0;
}
-SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, bool createTb,
+SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchema* pTSchema, SSchemaWrapper* pTagSchemaWrapper, bool createTb,
int64_t suid, const char* stbFullName, SBatchDeleteReq* pDeleteReq) {
SSubmitReq* ret = NULL;
SArray* schemaReqs = NULL;
@@ -89,6 +89,32 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
return NULL;
}
+ SArray *tagName = taosArrayInit(1, TSDB_COL_NAME_LEN);
+ char tagNameStr[TSDB_COL_NAME_LEN] = {0};
+ strcpy(tagNameStr, "group_id");
+ taosArrayPush(tagName, tagNameStr);
+
+// STag* pTag = NULL;
+// taosArrayClear(tagArray);
+// SArray *tagName = taosArrayInit(1, TSDB_COL_NAME_LEN);
+// for(int j = 0; j < pTagSchemaWrapper->nCols; j++){
+// STagVal tagVal = {
+// .cid = pTagSchemaWrapper->pSchema[j].colId,
+// .type = pTagSchemaWrapper->pSchema[j].type,
+// .i64 = (int64_t)pDataBlock->info.groupId,
+// };
+// taosArrayPush(tagArray, &tagVal);
+// taosArrayPush(tagName, pTagSchemaWrapper->pSchema[j].name);
+// }
+//
+// tTagNew(tagArray, 1, false, &pTag);
+// if (pTag == NULL) {
+// terrno = TSDB_CODE_OUT_OF_MEMORY;
+// taosArrayDestroy(tagArray);
+// taosArrayDestroy(tagName);
+// return NULL;
+// }
+
SVCreateTbReq createTbReq = {0};
SName name = {0};
tNameFromString(&name, stbFullName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE);
@@ -99,6 +125,8 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
createTbReq.type = TSDB_CHILD_TABLE;
createTbReq.ctb.suid = suid;
createTbReq.ctb.pTag = (uint8_t*)pTag;
+ createTbReq.ctb.tagNum = taosArrayGetSize(tagArray);
+ createTbReq.ctb.tagName = tagName;
int32_t code;
int32_t schemaLen;
@@ -113,6 +141,7 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
void* schemaStr = taosMemoryMalloc(schemaLen);
if (schemaStr == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ tdDestroySVCreateTbReq(&createTbReq);
return NULL;
}
taosArrayPush(schemaReqs, &schemaStr);
@@ -123,6 +152,7 @@ SSubmitReq* tqBlockToSubmit(SVnode* pVnode, const SArray* pBlocks, const STSchem
code = tEncodeSVCreateTbReq(&encoder, &createTbReq);
if (code < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
+ tdDestroySVCreateTbReq(&createTbReq);
return NULL;
}
tEncoderClear(&encoder);
@@ -231,7 +261,7 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
ASSERT(pTask->tbSink.pTSchema);
deleteReq.deleteReqs = taosArrayInit(0, sizeof(SSingleDeleteReq));
- SSubmitReq* submitReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, true, pTask->tbSink.stbUid,
+ SSubmitReq* submitReq = tqBlockToSubmit(pVnode, pRes, pTask->tbSink.pTSchema, pTask->tbSink.pSchemaWrapper, true, pTask->tbSink.stbUid,
pTask->tbSink.stbFullName, &deleteReq);
tqDebug("vgId:%d, task %d convert blocks over, put into write-queue", TD_VID(pVnode), pTask->taskId);
@@ -274,7 +304,6 @@ void tqTableSink(SStreamTask* pTask, void* vnode, int64_t ver, void* data) {
};
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
- rpcFreeCont(submitReq);
tqDebug("failed to put into write-queue since %s", terrstr());
}
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index b9f38976747f7e73f6bc6b40fe9dd968a3b8cabe..3a921349e66ff34de862a882db33f49189246e1c 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -272,8 +272,8 @@ int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb
SColVal colVal = {0};
tTSRowGetVal(row, pTSchema, iCol, &colVal);
- if (colVal.isNone || colVal.isNull) {
- if (keyTs == tTsVal1->ts && !tColVal->isNone && !tColVal->isNull) {
+ if (!COL_VAL_IS_VALUE(&colVal)) {
+ if (keyTs == tTsVal1->ts && COL_VAL_IS_VALUE(tColVal)) {
invalidate = true;
break;
@@ -418,31 +418,18 @@ typedef enum {
} SFSLASTNEXTROWSTATES;
typedef struct {
- SFSLASTNEXTROWSTATES state; // [input]
- STsdb *pTsdb; // [input]
- SBlockIdx *pBlockIdxExp; // [input]
- STSchema *pTSchema; // [input]
+ SFSLASTNEXTROWSTATES state; // [input]
+ STsdb *pTsdb; // [input]
+ STSchema *pTSchema;// [input]
tb_uid_t suid;
tb_uid_t uid;
int32_t nFileSet;
int32_t iFileSet;
SArray *aDFileSet;
SDataFReader *pDataFReader;
- SArray *aBlockL;
- SBlockL *pBlockL;
- SBlockData *pBlockDataL;
- SBlockData blockDataL;
- int32_t nRow;
- int32_t iRow;
TSDBROW row;
- /*
- SArray *aBlockIdx;
- SBlockIdx *pBlockIdx;
- SMapData blockMap;
- int32_t nBlock;
- int32_t iBlock;
- SBlock block;
- */
+
+ SMergeTree mergeTree;
} SFSLastNextRowIter;
static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
@@ -451,22 +438,16 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
switch (state->state) {
case SFSLASTNEXTROW_FS:
- // state->aDFileSet = state->pTsdb->pFS->cState->aDFileSet;
state->nFileSet = taosArrayGetSize(state->aDFileSet);
state->iFileSet = state->nFileSet;
- state->pBlockDataL = NULL;
-
case SFSLASTNEXTROW_FILESET: {
SDFileSet *pFileSet = NULL;
_next_fileset:
if (--state->iFileSet >= 0) {
pFileSet = (SDFileSet *)taosArrayGet(state->aDFileSet, state->iFileSet);
} else {
- if (state->pBlockDataL) {
- tBlockDataDestroy(state->pBlockDataL, 1);
- state->pBlockDataL = NULL;
- }
+ // tMergeTreeClose(&state->mergeTree);
*ppRow = NULL;
return code;
@@ -475,68 +456,25 @@ static int32_t getNextRowFromFSLast(void *iter, TSDBROW **ppRow) {
code = tsdbDataFReaderOpen(&state->pDataFReader, state->pTsdb, pFileSet);
if (code) goto _err;
- if (!state->aBlockL) {
- state->aBlockL = taosArrayInit(0, sizeof(SBlockIdx));
- } else {
- taosArrayClear(state->aBlockL);
- }
-
- code = tsdbReadBlockL(state->pDataFReader, state->aBlockL);
- if (code) goto _err;
-
- // SBlockL *pBlockL = (SBlockL *)taosArrayGet(state->aBlockL, state->iBlockL);
-
- state->pBlockL = taosArraySearch(state->aBlockL, state->pBlockIdxExp, tCmprBlockL, TD_EQ);
- if (!state->pBlockL) {
+ SSttBlockLoadInfo* pLoadInfo = tCreateLastBlockLoadInfo(state->pTSchema, NULL, 0);
+ tMergeTreeOpen(&state->mergeTree, 1, state->pDataFReader, state->suid, state->uid,
+ &(STimeWindow){.skey = TSKEY_MIN, .ekey = TSKEY_MAX},
+ &(SVersionRange){.minVer = 0, .maxVer = UINT64_MAX}, pLoadInfo,true, NULL);
+ bool hasVal = tMergeTreeNext(&state->mergeTree);
+ if (!hasVal) {
+ state->state = SFSLASTNEXTROW_FILESET;
+ // tMergeTreeClose(&state->mergeTree);
goto _next_fileset;
}
-
- int64_t suid = state->pBlockL->suid;
- int64_t uid = state->pBlockL->maxUid;
-
- if (!state->pBlockDataL) {
- state->pBlockDataL = &state->blockDataL;
-
- tBlockDataCreate(state->pBlockDataL);
- }
- code = tBlockDataInit(state->pBlockDataL, suid, suid ? 0 : uid, state->pTSchema);
- if (code) goto _err;
- }
- case SFSLASTNEXTROW_BLOCKDATA:
- code = tsdbReadLastBlock(state->pDataFReader, state->pBlockL, state->pBlockDataL);
- if (code) goto _err;
-
- state->nRow = state->blockDataL.nRow;
- state->iRow = state->nRow - 1;
-
- if (!state->pBlockDataL->uid) {
- while (state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) {
- --state->iRow;
- }
- }
-
state->state = SFSLASTNEXTROW_BLOCKROW;
+ }
case SFSLASTNEXTROW_BLOCKROW:
- if (state->pBlockDataL->uid) {
- if (state->iRow >= 0) {
- state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow);
- *ppRow = &state->row;
-
- if (--state->iRow < 0) {
- state->state = SFSLASTNEXTROW_FILESET;
- }
- }
- } else {
- if (state->iRow >= 0 && state->pBlockIdxExp->uid == state->pBlockDataL->aUid[state->iRow]) {
- state->row = tsdbRowFromBlockData(state->pBlockDataL, state->iRow);
- *ppRow = &state->row;
-
- if (--state->iRow < 0 || state->pBlockIdxExp->uid != state->pBlockDataL->aUid[state->iRow]) {
- state->state = SFSLASTNEXTROW_FILESET;
- }
- }
+ state->row = tMergeTreeGetRow(&state->mergeTree);
+ *ppRow = &state->row;
+ bool hasVal = tMergeTreeNext(&state->mergeTree);
+ if (!hasVal) {
+ state->state = SFSLASTNEXTROW_FILESET;
}
-
return code;
default:
ASSERT(0);
@@ -548,15 +486,6 @@ _err:
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
- if (state->aBlockL) {
- taosArrayDestroy(state->aBlockL);
- state->aBlockL = NULL;
- }
- if (state->pBlockDataL) {
- tBlockDataDestroy(state->pBlockDataL, 1);
- state->pBlockDataL = NULL;
- }
-
*ppRow = NULL;
return code;
@@ -574,14 +503,6 @@ int32_t clearNextRowFromFSLast(void *iter) {
tsdbDataFReaderClose(&state->pDataFReader);
state->pDataFReader = NULL;
}
- if (state->aBlockL) {
- taosArrayDestroy(state->aBlockL);
- state->aBlockL = NULL;
- }
- if (state->pBlockDataL) {
- tBlockDataDestroy(state->pBlockDataL, 1);
- state->pBlockDataL = NULL;
- }
return code;
}
@@ -609,7 +530,7 @@ typedef struct SFSNextRowIter {
SMapData blockMap;
int32_t nBlock;
int32_t iBlock;
- SBlock block;
+ SDataBlk block;
SBlockData blockData;
SBlockData *pBlockData;
int32_t nRow;
@@ -670,7 +591,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
tMapDataReset(&state->blockMap);
- code = tsdbReadBlock(state->pDataFReader, state->pBlockIdx, &state->blockMap);
+ code = tsdbReadDataBlk(state->pDataFReader, state->pBlockIdx, &state->blockMap);
if (code) goto _err;
state->nBlock = state->blockMap.nItem;
@@ -684,16 +605,17 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
}
case SFSNEXTROW_BLOCKDATA:
if (state->iBlock >= 0) {
- SBlock block = {0};
+ SDataBlk block = {0};
- tBlockReset(&block);
+ tDataBlkReset(&block);
// tBlockDataReset(&state->blockData);
tBlockDataReset(state->pBlockData);
- tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetBlock);
+ tMapDataGetItemByIdx(&state->blockMap, state->iBlock, &block, tGetDataBlk);
/* code = tsdbReadBlockData(state->pDataFReader, &state->blockIdx, &block, &state->blockData, NULL, NULL); */
tBlockDataReset(state->pBlockData);
- code = tBlockDataInit(state->pBlockData, state->suid, state->uid, state->pTSchema);
+ TABLEID tid = {.suid = state->suid, .uid = state->uid};
+ code = tBlockDataInit(state->pBlockData, &tid, state->pTSchema, NULL, 0);
if (code) goto _err;
code = tsdbReadDataBlock(state->pDataFReader, &block, state->pBlockData);
@@ -878,7 +800,7 @@ static bool tsdbKeyDeleted(TSDBKEY *key, SArray *pSkyline, int64_t *iSkyline) {
if (key->ts > pItemBack->ts) {
return false;
} else if (key->ts >= pItemFront->ts && key->ts <= pItemBack->ts) {
- if ((key->version <= pItemFront->version || key->ts == pItemBack->ts && key->version <= pItemBack->version)) {
+ if (key->version <= pItemFront->version || (key->ts == pItemBack->ts && key->version <= pItemBack->version)) {
return true;
} else {
return false;
@@ -928,7 +850,7 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
tb_uid_t suid = getTableSuidByUid(uid, pTsdb);
- tsdbTakeReadSnap(pTsdb, &pIter->pReadSnap);
+ tsdbTakeReadSnap(pTsdb, &pIter->pReadSnap, NULL);
STbData *pMem = NULL;
if (pIter->pReadSnap->pMem) {
@@ -972,7 +894,6 @@ static int32_t nextRowIterOpen(CacheNextRowIter *pIter, tb_uid_t uid, STsdb *pTs
pIter->fsLastState.state = (SFSLASTNEXTROWSTATES)SFSNEXTROW_FS;
pIter->fsLastState.pTsdb = pTsdb;
pIter->fsLastState.aDFileSet = pIter->pReadSnap->fs.aDFileSet;
- pIter->fsLastState.pBlockIdxExp = &pIter->idx;
pIter->fsLastState.pTSchema = pTSchema;
pIter->fsLastState.suid = suid;
pIter->fsLastState.uid = uid;
@@ -1024,7 +945,7 @@ static int32_t nextRowIterClose(CacheNextRowIter *pIter) {
taosArrayDestroy(pIter->pSkyline);
}
- tsdbUntakeReadSnap(pIter->pTsdb, pIter->pReadSnap);
+ tsdbUntakeReadSnap(pIter->pTsdb, pIter->pReadSnap, NULL);
_err:
return code;
@@ -1145,7 +1066,7 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppRo
goto _err;
}
- if (pColVal->isNone && !setNoneCol) {
+ if (COL_VAL_IS_NONE(pColVal) && !setNoneCol) {
noneCol = iCol;
setNoneCol = true;
}
@@ -1170,9 +1091,9 @@ static int32_t mergeLastRow(tb_uid_t uid, STsdb *pTsdb, bool *dup, STSRow **ppRo
SColVal *tColVal = (SColVal *)taosArrayGet(pColArray, iCol);
tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal);
- if (tColVal->isNone && !pColVal->isNone) {
+ if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) {
taosArraySet(pColArray, iCol, pColVal);
- } else if (tColVal->isNone && pColVal->isNone && !setNoneCol) {
+ } else if (COL_VAL_IS_NONE(tColVal) && COL_VAL_IS_NONE(pColVal) && !setNoneCol) {
noneCol = iCol;
setNoneCol = true;
}
@@ -1244,7 +1165,7 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
goto _err;
}
- if ((pColVal->isNone || pColVal->isNull) && !setNoneCol) {
+ if (!COL_VAL_IS_VALUE(pColVal) && !setNoneCol) {
noneCol = iCol;
setNoneCol = true;
}
@@ -1264,9 +1185,9 @@ static int32_t mergeLast(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray) {
SColVal *tColVal = (SColVal *)taosArrayGet(pColArray, iCol);
tsdbRowGetColVal(pRow, pTSchema, iCol, pColVal);
- if ((tColVal->isNone || tColVal->isNull) && (!pColVal->isNone && !pColVal->isNull)) {
+ if (!COL_VAL_IS_VALUE(tColVal) && COL_VAL_IS_VALUE(pColVal)) {
taosArraySet(pColArray, iCol, &(SLastCol){.ts = rowTs, .colVal = *pColVal});
- } else if ((tColVal->isNone || tColVal->isNull) && (pColVal->isNone || pColVal->isNull) && !setNoneCol) {
+ } else if (!COL_VAL_IS_VALUE(tColVal) && !COL_VAL_IS_VALUE(pColVal) && !setNoneCol) {
noneCol = iCol;
setNoneCol = true;
}
@@ -1372,25 +1293,33 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand
// getTableCacheKeyS(uid, "l", key, &keyLen);
getTableCacheKey(uid, 1, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
- if (h) {
- } else {
- SArray *pLastArray = NULL;
- code = mergeLast(uid, pTsdb, &pLastArray);
- // if table's empty or error, return code of -1
- // if (code < 0 || pRow == NULL) {
- if (code < 0 || pLastArray == NULL) {
- *handle = NULL;
- return 0;
- }
-
- _taos_lru_deleter_t deleter = deleteTableCacheLast;
- LRUStatus status =
- taosLRUCacheInsert(pCache, key, keyLen, pLastArray, pLastArray->capacity, deleter, NULL, TAOS_LRU_PRIORITY_LOW);
- if (status != TAOS_LRU_STATUS_OK) {
- code = -1;
- }
+ if (!h) {
+ taosThreadMutexLock(&pTsdb->lruMutex);
h = taosLRUCacheLookup(pCache, key, keyLen);
+ if (!h) {
+ SArray *pLastArray = NULL;
+ code = mergeLast(uid, pTsdb, &pLastArray);
+ // if table's empty or error, return code of -1
+ // if (code < 0 || pRow == NULL) {
+ if (code < 0 || pLastArray == NULL) {
+ *handle = NULL;
+ return 0;
+ }
+
+ _taos_lru_deleter_t deleter = deleteTableCacheLast;
+ LRUStatus status = taosLRUCacheInsert(pCache, key, keyLen, pLastArray, pLastArray->capacity, deleter, NULL,
+ TAOS_LRU_PRIORITY_LOW);
+ if (status != TAOS_LRU_STATUS_OK) {
+ code = -1;
+ }
+
+ taosThreadMutexUnlock(&pTsdb->lruMutex);
+
+ h = taosLRUCacheLookup(pCache, key, keyLen);
+ } else {
+ taosThreadMutexUnlock(&pTsdb->lruMutex);
+ }
}
*handle = h;
@@ -1411,3 +1340,5 @@ void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity) {
}
size_t tsdbCacheGetCapacity(SVnode *pVnode) { return taosLRUCacheGetCapacity(pVnode->pTsdb->lruCache); }
+
+size_t tsdbCacheGetUsage(SVnode *pVnode) { return taosLRUCacheGetUsage(pVnode->pTsdb->lruCache); }
diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
index 66843d9a2844c44e77e798ab47032ef75370a544..5f981649f355f0384a0e089feb8da27d60630cc8 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
@@ -18,7 +18,7 @@
#include "tcommon.h"
#include "tsdb.h"
-typedef struct SLastrowReader {
+typedef struct SCacheRowsReader {
SVnode* pVnode;
STSchema* pSchema;
uint64_t uid;
@@ -27,9 +27,9 @@ typedef struct SLastrowReader {
int32_t type;
int32_t tableIndex; // currently returned result tables
SArray* pTableList; // table id list
-} SLastrowReader;
+} SCacheRowsReader;
-static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReader, const int32_t* slotIds) {
+static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds) {
ASSERT(pReader->numOfCols <= taosArrayGetSize(pBlock->pDataBlock));
int32_t numOfRows = pBlock->info.rows;
@@ -45,7 +45,7 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade
tTSRowGetVal(pRow, pReader->pSchema, slotId, &colVal);
if (IS_VAR_DATA_TYPE(colVal.type)) {
- if (colVal.isNull || colVal.isNone) {
+ if (!COL_VAL_IS_VALUE(&colVal)) {
colDataAppendNULL(pColInfoData, numOfRows);
} else {
varDataSetLen(pReader->transferBuf[slotId], colVal.value.nData);
@@ -53,7 +53,7 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade
colDataAppend(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
}
} else {
- colDataAppend(pColInfoData, numOfRows, (const char*)&colVal.value, colVal.isNull || colVal.isNone);
+ colDataAppend(pColInfoData, numOfRows, (const char*)&colVal.value, !COL_VAL_IS_VALUE(&colVal));
}
}
}
@@ -61,8 +61,10 @@ static void saveOneRow(STSRow* pRow, SSDataBlock* pBlock, SLastrowReader* pReade
pBlock->info.rows += 1;
}
-int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
- SLastrowReader* p = taosMemoryCalloc(1, sizeof(SLastrowReader));
+int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList, int32_t numOfCols, void** pReader) {
+ *pReader = NULL;
+
+ SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
if (p == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -81,9 +83,17 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
p->pTableList = pTableIdList;
p->transferBuf = taosMemoryCalloc(p->pSchema->numOfCols, POINTER_BYTES);
+ if (p->transferBuf == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
if (IS_VAR_DATA_TYPE(p->pSchema->columns[i].type)) {
p->transferBuf[i] = taosMemoryMalloc(p->pSchema->columns[i].bytes);
+ if (p->transferBuf[i] == NULL) {
+ tsdbCacherowsReaderClose(p);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
}
}
@@ -91,8 +101,8 @@ int32_t tsdbLastRowReaderOpen(void* pVnode, int32_t type, SArray* pTableIdList,
return TSDB_CODE_SUCCESS;
}
-int32_t tsdbLastrowReaderClose(void* pReader) {
- SLastrowReader* p = pReader;
+int32_t tsdbCacherowsReaderClose(void* pReader) {
+ SCacheRowsReader* p = pReader;
if (p->pSchema != NULL) {
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
@@ -107,27 +117,56 @@ int32_t tsdbLastrowReaderClose(void* pReader) {
return TSDB_CODE_SUCCESS;
}
-int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
+static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint64_t uid, STSRow** pRow,
+ LRUHandle** h) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if ((pr->type & CACHESCAN_RETRIEVE_LAST_ROW) == CACHESCAN_RETRIEVE_LAST_ROW) {
+ code = tsdbCacheGetLastrowH(lruCache, uid, pr->pVnode->pTsdb, h);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // no data in the table of Uid
+ if (*h != NULL) {
+ *pRow = (STSRow*)taosLRUCacheValue(lruCache, *h);
+ }
+ } else {
+ code = tsdbCacheGetLastH(lruCache, uid, pr->pVnode->pTsdb, h);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ // no data in the table of Uid
+ if (*h != NULL) {
+ SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, *h);
+ tsdbCacheLastArray2Row(pLast, pRow, pr->pSchema);
+ }
+ }
+
+ return code;
+}
+
+int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
if (pReader == NULL || pResBlock == NULL) {
return TSDB_CODE_INVALID_PARA;
}
- SLastrowReader* pr = pReader;
+ SCacheRowsReader* pr = pReader;
+ int32_t code = TSDB_CODE_SUCCESS;
SLRUCache* lruCache = pr->pVnode->pTsdb->lruCache;
LRUHandle* h = NULL;
STSRow* pRow = NULL;
size_t numOfTables = taosArrayGetSize(pr->pTableList);
// retrieve the only one last row of all tables in the uid list.
- if (pr->type == LASTROW_RETRIEVE_TYPE_SINGLE) {
+ if ((pr->type & CACHESCAN_RETRIEVE_TYPE_SINGLE) == CACHESCAN_RETRIEVE_TYPE_SINGLE) {
int64_t lastKey = INT64_MIN;
bool internalResult = false;
for (int32_t i = 0; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = taosArrayGet(pr->pTableList, i);
- int32_t code = tsdbCacheGetLastrowH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- // int32_t code = tsdbCacheGetLastH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
+ code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -136,9 +175,6 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t
continue;
}
- pRow = (STSRow*)taosLRUCacheValue(lruCache, h);
- // SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, h);
- // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema);
if (pRow->ts > lastKey) {
// Set result row into the same rowIndex repeatly, so we need to check if the internal result row has already
// appended or not.
@@ -155,25 +191,18 @@ int32_t tsdbRetrieveLastRow(void* pReader, SSDataBlock* pResBlock, const int32_t
tsdbCacheRelease(lruCache, h);
}
- } else if (pr->type == LASTROW_RETRIEVE_TYPE_ALL) {
+ } else if ((pr->type & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) {
for (int32_t i = pr->tableIndex; i < numOfTables; ++i) {
STableKeyInfo* pKeyInfo = taosArrayGet(pr->pTableList, i);
-
- int32_t code = tsdbCacheGetLastrowH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
- // int32_t code = tsdbCacheGetLastH(lruCache, pKeyInfo->uid, pr->pVnode->pTsdb, &h);
+ code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- // no data in the table of Uid
if (h == NULL) {
continue;
}
- pRow = (STSRow*)taosLRUCacheValue(lruCache, h);
- // SArray* pLast = (SArray*)taosLRUCacheValue(lruCache, h);
- // tsdbCacheLastArray2Row(pLast, &pRow, pr->pSchema);
-
saveOneRow(pRow, pResBlock, pr, slotIds);
taosArrayPush(pTableUidList, &pKeyInfo->uid);
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c
index 04a6de8472ade43b3e7e2b420f8e2d0c0656a2c8..5403395623de41ef448785ceb6f619c9b7e6a6f7 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c
@@ -14,17 +14,27 @@
*/
#include "tsdb.h"
-typedef struct {
- int64_t suid;
- int64_t uid;
- STSchema *pTSchema;
-} SSkmInfo;
+
+typedef enum { MEMORY_DATA_ITER = 0, STT_DATA_ITER } EDataIterT;
typedef struct {
- int64_t suid;
- int64_t uid;
- TSDBROW row;
-} SRowInfo;
+ SRBTreeNode n;
+ SRowInfo r;
+ EDataIterT type;
+ union {
+ struct {
+ int32_t iTbDataP;
+ STbDataIter iter;
+ }; // memory data iter
+ struct {
+ int32_t iStt;
+ SArray *aSttBlk;
+ int32_t iSttBlk;
+ SBlockData bData;
+ int32_t iRow;
+ }; // stt file data iter
+ };
+} SDataIter;
typedef struct {
STsdb *pTsdb;
@@ -35,8 +45,9 @@ typedef struct {
int32_t minRow;
int32_t maxRow;
int8_t cmprAlg;
- SArray *aTbDataP;
- STsdbFS fs;
+ int8_t sttTrigger;
+ SArray *aTbDataP; // memory
+ STsdbFS fs; // disk
// --------------
TSKEY nextKey; // reset by each table commit
int32_t commitFid;
@@ -45,25 +56,24 @@ typedef struct {
// commit file data
struct {
SDataFReader *pReader;
- // data
- SArray *aBlockIdx; // SArray
- int32_t iBlockIdx;
- SBlockIdx *pBlockIdx;
- SMapData mBlock; // SMapData
- SBlockData bData;
- // last
- SArray *aBlockL; // SArray
- int32_t iBlockL;
- SBlockData bDatal;
- int32_t iRow;
- SRowInfo *pRowInfo;
- SRowInfo rowInfo;
+ SArray *aBlockIdx; // SArray
+ int32_t iBlockIdx;
+ SBlockIdx *pBlockIdx;
+ SMapData mBlock; // SMapData
+ SBlockData bData;
} dReader;
+ struct {
+ SDataIter *pIter;
+ SRBTree rbt;
+ SDataIter dataIter;
+ SDataIter aDataIter[TSDB_MAX_STT_TRIGGER];
+ int8_t toLastOnly;
+ };
struct {
SDataFWriter *pWriter;
SArray *aBlockIdx; // SArray
- SArray *aBlockL; // SArray
- SMapData mBlock; // SMapData
+ SArray *aSttBlk; // SArray
+ SMapData mBlock; // SMapData
SBlockData bData;
SBlockData bDatal;
} dWriter;
@@ -82,6 +92,26 @@ static int32_t tsdbCommitData(SCommitter *pCommitter);
static int32_t tsdbCommitDel(SCommitter *pCommitter);
static int32_t tsdbCommitCache(SCommitter *pCommitter);
static int32_t tsdbEndCommit(SCommitter *pCommitter, int32_t eno);
+static int32_t tsdbNextCommitRow(SCommitter *pCommitter);
+
+int32_t tRowInfoCmprFn(const void *p1, const void *p2) {
+ SRowInfo *pInfo1 = (SRowInfo *)p1;
+ SRowInfo *pInfo2 = (SRowInfo *)p2;
+
+ if (pInfo1->suid < pInfo2->suid) {
+ return -1;
+ } else if (pInfo1->suid > pInfo2->suid) {
+ return 1;
+ }
+
+ if (pInfo1->uid < pInfo2->uid) {
+ return -1;
+ } else if (pInfo1->uid > pInfo2->uid) {
+ return 1;
+ }
+
+ return tsdbRowCmprFn(&pInfo1->row, &pInfo2->row);
+}
int32_t tsdbBegin(STsdb *pTsdb) {
int32_t code = 0;
@@ -290,19 +320,22 @@ _err:
return code;
}
-static int32_t tsdbCommitterUpdateTableSchema(SCommitter *pCommitter, int64_t suid, int64_t uid) {
+int32_t tsdbUpdateTableSchema(SMeta *pMeta, int64_t suid, int64_t uid, SSkmInfo *pSkmInfo) {
int32_t code = 0;
if (suid) {
- if (pCommitter->skmTable.suid == suid) goto _exit;
+ if (pSkmInfo->suid == suid) {
+ pSkmInfo->uid = uid;
+ goto _exit;
+ }
} else {
- if (pCommitter->skmTable.uid == uid) goto _exit;
+ if (pSkmInfo->uid == uid) goto _exit;
}
- pCommitter->skmTable.suid = suid;
- pCommitter->skmTable.uid = uid;
- tTSchemaDestroy(pCommitter->skmTable.pTSchema);
- code = metaGetTbTSchemaEx(pCommitter->pTsdb->pVnode->pMeta, suid, uid, -1, &pCommitter->skmTable.pTSchema);
+ pSkmInfo->suid = suid;
+ pSkmInfo->uid = uid;
+ tTSchemaDestroy(pSkmInfo->pTSchema);
+ code = metaGetTbTSchemaEx(pMeta, suid, uid, -1, &pSkmInfo->pTSchema);
if (code) goto _exit;
_exit:
@@ -334,54 +367,6 @@ _exit:
return code;
}
-static int32_t tsdbCommitterNextLastRow(SCommitter *pCommitter) {
- int32_t code = 0;
-
- ASSERT(pCommitter->dReader.pReader);
- ASSERT(pCommitter->dReader.pRowInfo);
-
- SBlockData *pBlockDatal = &pCommitter->dReader.bDatal;
- pCommitter->dReader.iRow++;
- if (pCommitter->dReader.iRow < pBlockDatal->nRow) {
- if (pBlockDatal->uid) {
- pCommitter->dReader.pRowInfo->uid = pBlockDatal->uid;
- } else {
- pCommitter->dReader.pRowInfo->uid = pBlockDatal->aUid[pCommitter->dReader.iRow];
- }
- pCommitter->dReader.pRowInfo->row = tsdbRowFromBlockData(pBlockDatal, pCommitter->dReader.iRow);
- } else {
- pCommitter->dReader.iBlockL++;
- if (pCommitter->dReader.iBlockL < taosArrayGetSize(pCommitter->dReader.aBlockL)) {
- SBlockL *pBlockL = (SBlockL *)taosArrayGet(pCommitter->dReader.aBlockL, pCommitter->dReader.iBlockL);
- int64_t suid = pBlockL->suid;
- int64_t uid = pBlockL->maxUid;
-
- code = tsdbCommitterUpdateTableSchema(pCommitter, suid, uid);
- if (code) goto _exit;
-
- code = tBlockDataInit(pBlockDatal, suid, suid ? 0 : uid, pCommitter->skmTable.pTSchema);
- if (code) goto _exit;
-
- code = tsdbReadLastBlock(pCommitter->dReader.pReader, pBlockL, pBlockDatal);
- if (code) goto _exit;
-
- pCommitter->dReader.iRow = 0;
- pCommitter->dReader.pRowInfo->suid = pBlockDatal->suid;
- if (pBlockDatal->uid) {
- pCommitter->dReader.pRowInfo->uid = pBlockDatal->uid;
- } else {
- pCommitter->dReader.pRowInfo->uid = pBlockDatal->aUid[0];
- }
- pCommitter->dReader.pRowInfo->row = tsdbRowFromBlockData(pBlockDatal, pCommitter->dReader.iRow);
- } else {
- pCommitter->dReader.pRowInfo = NULL;
- }
- }
-
-_exit:
- return code;
-}
-
static int32_t tsdbCommitterNextTableData(SCommitter *pCommitter) {
int32_t code = 0;
@@ -392,7 +377,7 @@ static int32_t tsdbCommitterNextTableData(SCommitter *pCommitter) {
pCommitter->dReader.pBlockIdx =
(SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx);
- code = tsdbReadBlock(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
+ code = tsdbReadDataBlk(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
if (code) goto _exit;
ASSERT(pCommitter->dReader.mBlock.nItem > 0);
@@ -404,6 +389,85 @@ _exit:
return code;
}
+static int32_t tsdbOpenCommitIter(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ pCommitter->pIter = NULL;
+ tRBTreeCreate(&pCommitter->rbt, tRowInfoCmprFn);
+
+ // memory
+ TSDBKEY tKey = {.ts = pCommitter->minKey, .version = VERSION_MIN};
+ SDataIter *pIter = &pCommitter->dataIter;
+ pIter->type = MEMORY_DATA_ITER;
+ pIter->iTbDataP = 0;
+ for (; pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP); pIter->iTbDataP++) {
+ STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, pIter->iTbDataP);
+ tsdbTbDataIterOpen(pTbData, &tKey, 0, &pIter->iter);
+ TSDBROW *pRow = tsdbTbDataIterGet(&pIter->iter);
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow));
+ pRow = NULL;
+ }
+
+ if (pRow == NULL) continue;
+
+ pIter->r.suid = pTbData->suid;
+ pIter->r.uid = pTbData->uid;
+ pIter->r.row = *pRow;
+ break;
+ }
+ ASSERT(pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP));
+ tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pIter);
+
+ // disk
+ pCommitter->toLastOnly = 0;
+ SDataFReader *pReader = pCommitter->dReader.pReader;
+ if (pReader) {
+ if (pReader->pSet->nSttF >= pCommitter->sttTrigger) {
+ int8_t iIter = 0;
+ for (int32_t iStt = 0; iStt < pReader->pSet->nSttF; iStt++) {
+ pIter = &pCommitter->aDataIter[iIter];
+ pIter->type = STT_DATA_ITER;
+ pIter->iStt = iStt;
+
+ code = tsdbReadSttBlk(pCommitter->dReader.pReader, iStt, pIter->aSttBlk);
+ if (code) goto _err;
+
+ if (taosArrayGetSize(pIter->aSttBlk) == 0) continue;
+
+ pIter->iSttBlk = 0;
+ SSttBlk *pSttBlk = (SSttBlk *)taosArrayGet(pIter->aSttBlk, 0);
+ code = tsdbReadSttBlockEx(pCommitter->dReader.pReader, iStt, pSttBlk, &pIter->bData);
+ if (code) goto _err;
+
+ pIter->iRow = 0;
+ pIter->r.suid = pIter->bData.suid;
+ pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[0];
+ pIter->r.row = tsdbRowFromBlockData(&pIter->bData, 0);
+
+ tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pIter);
+ iIter++;
+ }
+ } else {
+ for (int32_t iStt = 0; iStt < pReader->pSet->nSttF; iStt++) {
+ SSttFile *pSttFile = pReader->pSet->aSttF[iStt];
+ if (pSttFile->size > pSttFile->offset) {
+ pCommitter->toLastOnly = 1;
+ break;
+ }
+ }
+ }
+ }
+
+ code = tsdbNextCommitRow(pCommitter);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ return code;
+}
+
static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
int32_t code = 0;
STsdb *pTsdb = pCommitter->pTsdb;
@@ -416,8 +480,8 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
pCommitter->nextKey = TSKEY_MAX;
// Reader
- pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &(SDFileSet){.fid = pCommitter->commitFid},
- tDFileSetCmprFn, TD_EQ);
+ SDFileSet tDFileSet = {.fid = pCommitter->commitFid};
+ pRSet = (SDFileSet *)taosArraySearch(pCommitter->fs.aDFileSet, &tDFileSet, tDFileSetCmprFn, TD_EQ);
if (pRSet) {
code = tsdbDataFReaderOpen(&pCommitter->dReader.pReader, pTsdb, pRSet);
if (code) goto _err;
@@ -427,68 +491,58 @@ static int32_t tsdbCommitFileDataStart(SCommitter *pCommitter) {
if (code) goto _err;
pCommitter->dReader.iBlockIdx = 0;
- if (pCommitter->dReader.iBlockIdx < taosArrayGetSize(pCommitter->dReader.aBlockIdx)) {
- pCommitter->dReader.pBlockIdx =
- (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, pCommitter->dReader.iBlockIdx);
-
- code = tsdbReadBlock(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
+ if (taosArrayGetSize(pCommitter->dReader.aBlockIdx) > 0) {
+ pCommitter->dReader.pBlockIdx = (SBlockIdx *)taosArrayGet(pCommitter->dReader.aBlockIdx, 0);
+ code = tsdbReadDataBlk(pCommitter->dReader.pReader, pCommitter->dReader.pBlockIdx, &pCommitter->dReader.mBlock);
if (code) goto _err;
} else {
pCommitter->dReader.pBlockIdx = NULL;
}
tBlockDataReset(&pCommitter->dReader.bData);
-
- // last
- code = tsdbReadBlockL(pCommitter->dReader.pReader, pCommitter->dReader.aBlockL);
- if (code) goto _err;
-
- pCommitter->dReader.iBlockL = -1;
- pCommitter->dReader.iRow = -1;
- pCommitter->dReader.pRowInfo = &pCommitter->dReader.rowInfo;
- tBlockDataReset(&pCommitter->dReader.bDatal);
- code = tsdbCommitterNextLastRow(pCommitter);
- if (code) goto _err;
} else {
pCommitter->dReader.pBlockIdx = NULL;
- pCommitter->dReader.pRowInfo = NULL;
}
// Writer
- SHeadFile fHead;
- SDataFile fData;
- SLastFile fLast;
- SSmaFile fSma;
- SDFileSet wSet = {.pHeadF = &fHead, .pDataF = &fData, .pLastF = &fLast, .pSmaF = &fSma};
+ SHeadFile fHead = {.commitID = pCommitter->commitID};
+ SDataFile fData = {.commitID = pCommitter->commitID};
+ SSmaFile fSma = {.commitID = pCommitter->commitID};
+ SSttFile fStt = {.commitID = pCommitter->commitID};
+ SDFileSet wSet = {.fid = pCommitter->commitFid, .pHeadF = &fHead, .pDataF = &fData, .pSmaF = &fSma};
if (pRSet) {
- wSet.diskId = pRSet->diskId;
- wSet.fid = pCommitter->commitFid;
- fHead = (SHeadFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
+ ASSERT(pRSet->nSttF <= pCommitter->sttTrigger);
fData = *pRSet->pDataF;
- fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
fSma = *pRSet->pSmaF;
+ wSet.diskId = pRSet->diskId;
+ if (pRSet->nSttF < pCommitter->sttTrigger) {
+ for (int32_t iStt = 0; iStt < pRSet->nSttF; iStt++) {
+ wSet.aSttF[iStt] = pRSet->aSttF[iStt];
+ }
+ wSet.nSttF = pRSet->nSttF + 1;
+ } else {
+ wSet.nSttF = 1;
+ }
} else {
SDiskID did = {0};
-
tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
-
tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
-
wSet.diskId = did;
- wSet.fid = pCommitter->commitFid;
- fHead = (SHeadFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
- fData = (SDataFile){.commitID = pCommitter->commitID, .size = 0};
- fLast = (SLastFile){.commitID = pCommitter->commitID, .size = 0, .offset = 0};
- fSma = (SSmaFile){.commitID = pCommitter->commitID, .size = 0};
+ wSet.nSttF = 1;
}
+ wSet.aSttF[wSet.nSttF - 1] = &fStt;
code = tsdbDataFWriterOpen(&pCommitter->dWriter.pWriter, pTsdb, &wSet);
if (code) goto _err;
taosArrayClear(pCommitter->dWriter.aBlockIdx);
- taosArrayClear(pCommitter->dWriter.aBlockL);
+ taosArrayClear(pCommitter->dWriter.aSttBlk);
tMapDataReset(&pCommitter->dWriter.mBlock);
tBlockDataReset(&pCommitter->dWriter.bData);
tBlockDataReset(&pCommitter->dWriter.bDatal);
+ // open iter
+ code = tsdbOpenCommitIter(pCommitter);
+ if (code) goto _err;
+
_exit:
return code;
@@ -497,50 +551,45 @@ _err:
return code;
}
-static int32_t tsdbCommitDataBlock(SCommitter *pCommitter, SBlock *pBlock) {
- int32_t code = 0;
- SBlockData *pBlockData = &pCommitter->dWriter.bData;
- SBlock block;
+int32_t tsdbWriteDataBlock(SDataFWriter *pWriter, SBlockData *pBlockData, SMapData *mDataBlk, int8_t cmprAlg) {
+ int32_t code = 0;
- ASSERT(pBlockData->nRow > 0);
+ if (pBlockData->nRow == 0) return code;
- if (pBlock) {
- block = *pBlock; // as a subblock
- } else {
- tBlockReset(&block); // as a new block
- }
+ SDataBlk dataBlk;
+ tDataBlkReset(&dataBlk);
// info
- block.nRow += pBlockData->nRow;
+ dataBlk.nRow += pBlockData->nRow;
for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]};
if (iRow == 0) {
- if (tsdbKeyCmprFn(&block.minKey, &key) > 0) {
- block.minKey = key;
+ if (tsdbKeyCmprFn(&dataBlk.minKey, &key) > 0) {
+ dataBlk.minKey = key;
}
} else {
if (pBlockData->aTSKEY[iRow] == pBlockData->aTSKEY[iRow - 1]) {
- block.hasDup = 1;
+ dataBlk.hasDup = 1;
}
}
- if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&block.maxKey, &key) < 0) {
- block.maxKey = key;
+ if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&dataBlk.maxKey, &key) < 0) {
+ dataBlk.maxKey = key;
}
- block.minVer = TMIN(block.minVer, key.version);
- block.maxVer = TMAX(block.maxVer, key.version);
+ dataBlk.minVer = TMIN(dataBlk.minVer, key.version);
+ dataBlk.maxVer = TMAX(dataBlk.maxVer, key.version);
}
// write
- block.nSubBlock++;
- code = tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, &block.aSubBlock[block.nSubBlock - 1],
- ((block.nSubBlock == 1) && !block.hasDup) ? &block.smaInfo : NULL, pCommitter->cmprAlg, 0);
+ dataBlk.nSubBlock++;
+ code = tsdbWriteBlockData(pWriter, pBlockData, &dataBlk.aSubBlock[dataBlk.nSubBlock - 1],
+ ((dataBlk.nSubBlock == 1) && !dataBlk.hasDup) ? &dataBlk.smaInfo : NULL, cmprAlg, 0);
if (code) goto _err;
- // put SBlock
- code = tMapDataPutItem(&pCommitter->dWriter.mBlock, &block, tPutBlock);
+ // put SDataBlk
+ code = tMapDataPutItem(mDataBlk, &dataBlk, tPutDataBlk);
if (code) goto _err;
// clear
@@ -549,39 +598,38 @@ static int32_t tsdbCommitDataBlock(SCommitter *pCommitter, SBlock *pBlock) {
return code;
_err:
- tsdbError("vgId:%d tsdb commit data block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb commit data block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbCommitLastBlock(SCommitter *pCommitter) {
- int32_t code = 0;
- SBlockL blockL;
- SBlockData *pBlockData = &pCommitter->dWriter.bDatal;
+int32_t tsdbWriteSttBlock(SDataFWriter *pWriter, SBlockData *pBlockData, SArray *aSttBlk, int8_t cmprAlg) {
+ int32_t code = 0;
+ SSttBlk sstBlk;
- ASSERT(pBlockData->nRow > 0);
+ if (pBlockData->nRow == 0) return code;
// info
- blockL.suid = pBlockData->suid;
- blockL.nRow = pBlockData->nRow;
- blockL.minKey = TSKEY_MAX;
- blockL.maxKey = TSKEY_MIN;
- blockL.minVer = VERSION_MAX;
- blockL.maxVer = VERSION_MIN;
+ sstBlk.suid = pBlockData->suid;
+ sstBlk.nRow = pBlockData->nRow;
+ sstBlk.minKey = TSKEY_MAX;
+ sstBlk.maxKey = TSKEY_MIN;
+ sstBlk.minVer = VERSION_MAX;
+ sstBlk.maxVer = VERSION_MIN;
for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
- blockL.minKey = TMIN(blockL.minKey, pBlockData->aTSKEY[iRow]);
- blockL.maxKey = TMAX(blockL.maxKey, pBlockData->aTSKEY[iRow]);
- blockL.minVer = TMIN(blockL.minVer, pBlockData->aVersion[iRow]);
- blockL.maxVer = TMAX(blockL.maxVer, pBlockData->aVersion[iRow]);
+ sstBlk.minKey = TMIN(sstBlk.minKey, pBlockData->aTSKEY[iRow]);
+ sstBlk.maxKey = TMAX(sstBlk.maxKey, pBlockData->aTSKEY[iRow]);
+ sstBlk.minVer = TMIN(sstBlk.minVer, pBlockData->aVersion[iRow]);
+ sstBlk.maxVer = TMAX(sstBlk.maxVer, pBlockData->aVersion[iRow]);
}
- blockL.minUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[0];
- blockL.maxUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[pBlockData->nRow - 1];
+ sstBlk.minUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[0];
+ sstBlk.maxUid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[pBlockData->nRow - 1];
// write
- code = tsdbWriteBlockData(pCommitter->dWriter.pWriter, pBlockData, &blockL.bInfo, NULL, pCommitter->cmprAlg, 1);
+ code = tsdbWriteBlockData(pWriter, pBlockData, &sstBlk.bInfo, NULL, cmprAlg, 1);
if (code) goto _err;
- // push SBlockL
- if (taosArrayPush(pCommitter->dWriter.aBlockL, &blockL) == NULL) {
+ // push SSttBlk
+ if (taosArrayPush(aSttBlk, &sstBlk) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
@@ -592,773 +640,197 @@ static int32_t tsdbCommitLastBlock(SCommitter *pCommitter) {
return code;
_err:
- tsdbError("vgId:%d tsdb commit last block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb commit last block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbMergeCommitData(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) {
- int32_t code = 0;
- STbData *pTbData = pIter->pTbData;
- SBlockData *pBlockDataR = &pCommitter->dReader.bData;
- SBlockData *pBlockDataW = &pCommitter->dWriter.bData;
+static int32_t tsdbCommitFileDataEnd(SCommitter *pCommitter) {
+ int32_t code = 0;
- code = tsdbReadDataBlock(pCommitter->dReader.pReader, pBlock, pBlockDataR);
+ // write aBlockIdx
+ code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx);
if (code) goto _err;
- tBlockDataClear(pBlockDataW);
- int32_t iRow = 0;
- TSDBROW row;
- TSDBROW *pRow1 = tsdbTbDataIterGet(pIter);
- TSDBROW *pRow2 = &row;
- *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
- while (pRow1 && pRow2) {
- int32_t c = tsdbRowCmprFn(pRow1, pRow2);
-
- if (c < 0) {
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow1));
- if (code) goto _err;
-
- code = tBlockDataAppendRow(pBlockDataW, pRow1, pCommitter->skmRow.pTSchema, pTbData->uid);
- if (code) goto _err;
-
- // next
- tsdbTbDataIterNext(pIter);
- pRow1 = tsdbTbDataIterGet(pIter);
- } else if (c > 0) {
- code = tBlockDataAppendRow(pBlockDataW, pRow2, NULL, pTbData->uid);
- if (code) goto _err;
-
- iRow++;
- if (iRow < pBlockDataR->nRow) {
- *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
- } else {
- pRow2 = NULL;
- }
- } else {
- ASSERT(0);
- }
-
- // check
- if (pBlockDataW->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- }
- }
+ // write aSttBlk
+ code = tsdbWriteSttBlk(pCommitter->dWriter.pWriter, pCommitter->dWriter.aSttBlk);
+ if (code) goto _err;
- while (pRow2) {
- code = tBlockDataAppendRow(pBlockDataW, pRow2, NULL, pTbData->uid);
- if (code) goto _err;
+ // update file header
+ code = tsdbUpdateDFileSetHeader(pCommitter->dWriter.pWriter);
+ if (code) goto _err;
- iRow++;
- if (iRow < pBlockDataR->nRow) {
- *pRow2 = tsdbRowFromBlockData(pBlockDataR, iRow);
- } else {
- pRow2 = NULL;
- }
+ // upsert SDFileSet
+ code = tsdbFSUpsertFSet(&pCommitter->fs, &pCommitter->dWriter.pWriter->wSet);
+ if (code) goto _err;
- // check
- if (pBlockDataW->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- }
- }
+ // close and sync
+ code = tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 1);
+ if (code) goto _err;
- // check
- if (pBlockDataW->nRow > 0) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
+ if (pCommitter->dReader.pReader) {
+ code = tsdbDataFReaderClose(&pCommitter->dReader.pReader);
if (code) goto _err;
}
+_exit:
return code;
_err:
- tsdbError("vgId:%d, tsdb merge commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, commit file data end failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbCommitTableMemData(SCommitter *pCommitter, STbDataIter *pIter, TSDBKEY toKey) {
- int32_t code = 0;
- STbData *pTbData = pIter->pTbData;
- SBlockData *pBlockData = &pCommitter->dWriter.bData;
-
- tBlockDataClear(pBlockData);
- TSDBROW *pRow = tsdbTbDataIterGet(pIter);
- while (true) {
- if (pRow == NULL) {
- if (pBlockData->nRow > 0) {
- goto _write_block;
- } else {
- break;
- }
- }
-
- // update schema
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
+static int32_t tsdbMoveCommitData(SCommitter *pCommitter, TABLEID toTable) {
+ int32_t code = 0;
- // append
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
+ while (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, &toTable) < 0) {
+ SBlockIdx blockIdx = *pCommitter->dReader.pBlockIdx;
+ code = tsdbWriteDataBlk(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx);
if (code) goto _err;
- tsdbTbDataIterNext(pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow) {
- TSDBKEY rowKey = TSDBROW_KEY(pRow);
- if (tsdbKeyCmprFn(&rowKey, &toKey) >= 0) {
- pRow = NULL;
- }
+ if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
}
- if (pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- _write_block:
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- }
+ code = tsdbCommitterNextTableData(pCommitter);
+ if (code) goto _err;
}
return code;
_err:
- tsdbError("vgId:%d, tsdb commit table mem data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb move commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbGetNumOfRowsLessThan(STbDataIter *pIter, TSDBKEY key) {
- int32_t nRow = 0;
-
- STbDataIter iter = *pIter;
- while (true) {
- TSDBROW *pRow = tsdbTbDataIterGet(&iter);
- if (pRow == NULL) break;
+static int32_t tsdbCommitFileDataImpl(SCommitter *pCommitter);
+static int32_t tsdbCommitFileData(SCommitter *pCommitter) {
+ int32_t code = 0;
+ STsdb *pTsdb = pCommitter->pTsdb;
+ SMemTable *pMemTable = pTsdb->imem;
- int32_t c = tsdbKeyCmprFn(&TSDBROW_KEY(pRow), &key);
- if (c < 0) {
- nRow++;
- tsdbTbDataIterNext(&iter);
- } else if (c > 0) {
- break;
- } else {
- ASSERT(0);
- }
- }
+ // commit file data start
+ code = tsdbCommitFileDataStart(pCommitter);
+ if (code) goto _err;
- return nRow;
-}
+ // impl
+ code = tsdbCommitFileDataImpl(pCommitter);
+ if (code) goto _err;
-static int32_t tsdbMergeAsSubBlock(SCommitter *pCommitter, STbDataIter *pIter, SBlock *pBlock) {
- int32_t code = 0;
- STbData *pTbData = pIter->pTbData;
- SBlockData *pBlockData = &pCommitter->dWriter.bData;
+ // commit file data end
+ code = tsdbCommitFileDataEnd(pCommitter);
+ if (code) goto _err;
- tBlockDataClear(pBlockData);
- TSDBROW *pRow = tsdbTbDataIterGet(pIter);
- while (true) {
- if (pRow == NULL) break;
+ return code;
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
+_err:
+ tsdbError("vgId:%d, commit file data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbDataFReaderClose(&pCommitter->dReader.pReader);
+ tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 0);
+ return code;
+}
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
- if (code) goto _err;
+// ----------------------------------------------------------------------------
+static int32_t tsdbStartCommit(STsdb *pTsdb, SCommitter *pCommitter) {
+ int32_t code = 0;
- tsdbTbDataIterNext(pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow) {
- TSDBKEY rowKey = TSDBROW_KEY(pRow);
- if (tsdbKeyCmprFn(&rowKey, &pBlock->maxKey) > 0) {
- pRow = NULL;
- }
- }
- }
+ memset(pCommitter, 0, sizeof(*pCommitter));
+ ASSERT(pTsdb->mem && pTsdb->imem == NULL);
- ASSERT(pBlockData->nRow > 0 && pBlock->nRow + pBlockData->nRow <= pCommitter->maxRow);
+ taosThreadRwlockWrlock(&pTsdb->rwLock);
+ pTsdb->imem = pTsdb->mem;
+ pTsdb->mem = NULL;
+ taosThreadRwlockUnlock(&pTsdb->rwLock);
- code = tsdbCommitDataBlock(pCommitter, pBlock);
+ pCommitter->pTsdb = pTsdb;
+ pCommitter->commitID = pTsdb->pVnode->state.commitID;
+ pCommitter->minutes = pTsdb->keepCfg.days;
+ pCommitter->precision = pTsdb->keepCfg.precision;
+ pCommitter->minRow = pTsdb->pVnode->config.tsdbCfg.minRows;
+ pCommitter->maxRow = pTsdb->pVnode->config.tsdbCfg.maxRows;
+ pCommitter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression;
+ pCommitter->sttTrigger = pTsdb->pVnode->config.sttTrigger;
+ pCommitter->aTbDataP = tsdbMemTableGetTbDataArray(pTsdb->imem);
+ if (pCommitter->aTbDataP == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ code = tsdbFSCopy(pTsdb, &pCommitter->fs);
if (code) goto _err;
return code;
_err:
- tsdbError("vgId:%d, tsdb merge as subblock failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, tsdb start commit failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbMergeCommitLast(SCommitter *pCommitter, STbDataIter *pIter) {
- int32_t code = 0;
- STbData *pTbData = pIter->pTbData;
- int32_t nRow = tsdbGetNumOfRowsLessThan(pIter, (TSDBKEY){.ts = pCommitter->maxKey + 1, .version = VERSION_MIN});
+static int32_t tsdbCommitDataStart(SCommitter *pCommitter) {
+ int32_t code = 0;
- if (pCommitter->dReader.pRowInfo && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pRowInfo) == 0) {
- if (pCommitter->dReader.pRowInfo->suid) { // super table
- for (int32_t iRow = pCommitter->dReader.iRow; iRow < pCommitter->dReader.bDatal.nRow; iRow++) {
- if (pTbData->uid != pCommitter->dReader.bDatal.aUid[iRow]) break;
- nRow++;
- }
- } else { // normal table
- ASSERT(pCommitter->dReader.iRow == 0);
- nRow += pCommitter->dReader.bDatal.nRow;
- }
+ // reader
+ pCommitter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pCommitter->dReader.aBlockIdx == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
}
- if (nRow == 0) goto _exit;
+ code = tBlockDataCreate(&pCommitter->dReader.bData);
+ if (code) goto _exit;
+
+ // merger
+ for (int32_t iStt = 0; iStt < TSDB_MAX_STT_TRIGGER; iStt++) {
+ SDataIter *pIter = &pCommitter->aDataIter[iStt];
+ pIter->aSttBlk = taosArrayInit(0, sizeof(SSttBlk));
+ if (pIter->aSttBlk == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
- TSDBROW *pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
+ code = tBlockDataCreate(&pIter->bData);
+ if (code) goto _exit;
}
- SRowInfo *pRowInfo = pCommitter->dReader.pRowInfo;
- if (pRowInfo && pRowInfo->uid != pTbData->uid) {
- pRowInfo = NULL;
+ // writer
+ pCommitter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pCommitter->dWriter.aBlockIdx == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
}
- while (nRow) {
- SBlockData *pBlockData;
- int8_t toData;
+ pCommitter->dWriter.aSttBlk = taosArrayInit(0, sizeof(SSttBlk));
+ if (pCommitter->dWriter.aSttBlk == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
- if (nRow < pCommitter->minRow) { // to .last
- toData = 0;
- pBlockData = &pCommitter->dWriter.bDatal;
+ code = tBlockDataCreate(&pCommitter->dWriter.bData);
+ if (code) goto _exit;
- // commit and reset block data schema if need
- // QUESTION: Is there a case that pBlockData->nRow == 0 but need to change schema ?
- if (pBlockData->suid || pBlockData->uid) {
- if (pBlockData->suid != pTbData->suid || pBlockData->suid == 0) {
- if (pBlockData->nRow > 0) {
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
+ code = tBlockDataCreate(&pCommitter->dWriter.bDatal);
+ if (code) goto _exit;
- tBlockDataReset(pBlockData);
- }
- }
+_exit:
+ return code;
+}
- // set block data schema if need
- if (pBlockData->suid == 0 && pBlockData->uid == 0) {
- code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid);
- if (code) goto _err;
+static void tsdbCommitDataEnd(SCommitter *pCommitter) {
+ // reader
+ taosArrayDestroy(pCommitter->dReader.aBlockIdx);
+ tMapDataClear(&pCommitter->dReader.mBlock);
+ tBlockDataDestroy(&pCommitter->dReader.bData, 1);
- code =
- tBlockDataInit(pBlockData, pTbData->suid, pTbData->suid ? 0 : pTbData->uid, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
- }
-
- if (pBlockData->nRow + nRow > pCommitter->maxRow) {
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
- } else { // to .data
- toData = 1;
- pBlockData = &pCommitter->dWriter.bData;
- ASSERT(pBlockData->nRow == 0);
- }
-
- while (pRow && pRowInfo) {
- int32_t c = tsdbRowCmprFn(pRow, &pRowInfo->row);
- if (c < 0) {
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
-
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
- if (code) goto _err;
-
- tsdbTbDataIterNext(pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
- } else if (c > 0) {
- code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pTbData->uid);
- if (code) goto _err;
-
- code = tsdbCommitterNextLastRow(pCommitter);
- if (code) goto _err;
-
- pRowInfo = pCommitter->dReader.pRowInfo;
- if (pRowInfo && pRowInfo->uid != pTbData->uid) {
- pRowInfo = NULL;
- }
- } else {
- ASSERT(0);
- }
-
- nRow--;
- if (toData) {
- if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- goto _outer_break;
- }
- }
- }
-
- while (pRow) {
- code = tsdbCommitterUpdateRowSchema(pCommitter, pTbData->suid, pTbData->uid, TSDBROW_SVERSION(pRow));
- if (code) goto _err;
-
- code = tBlockDataAppendRow(pBlockData, pRow, pCommitter->skmRow.pTSchema, pTbData->uid);
- if (code) goto _err;
-
- tsdbTbDataIterNext(pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
-
- nRow--;
- if (toData) {
- if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- goto _outer_break;
- }
- }
- }
-
- while (pRowInfo) {
- code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pTbData->uid);
- if (code) goto _err;
-
- code = tsdbCommitterNextLastRow(pCommitter);
- if (code) goto _err;
-
- pRowInfo = pCommitter->dReader.pRowInfo;
- if (pRowInfo && pRowInfo->uid != pTbData->uid) {
- pRowInfo = NULL;
- }
-
- nRow--;
- if (toData) {
- if (nRow == 0 || pBlockData->nRow >= pCommitter->maxRow * 4 / 5) {
- code = tsdbCommitDataBlock(pCommitter, NULL);
- if (code) goto _err;
- goto _outer_break;
- }
- }
- }
-
- _outer_break:
- ASSERT(nRow >= 0);
- }
-
-_exit:
- return code;
-
-_err:
- tsdbError("vgId:%d tsdb merge commit last failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitTableData(SCommitter *pCommitter, STbData *pTbData) {
- int32_t code = 0;
-
- ASSERT(pCommitter->dReader.pBlockIdx == NULL || tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, pTbData) >= 0);
- ASSERT(pCommitter->dReader.pRowInfo == NULL || tTABLEIDCmprFn(pCommitter->dReader.pRowInfo, pTbData) >= 0);
-
- // merge commit table data
- STbDataIter iter = {0};
- STbDataIter *pIter = &iter;
- TSDBROW *pRow;
-
- tsdbTbDataIterOpen(pTbData, &(TSDBKEY){.ts = pCommitter->minKey, .version = VERSION_MIN}, 0, pIter);
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
-
- if (pRow == NULL) {
- if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, pTbData) == 0) {
- SBlockIdx blockIdx = {.suid = pTbData->suid, .uid = pTbData->uid};
- code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx);
- if (code) goto _err;
-
- if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
-
- goto _exit;
- }
-
- int32_t iBlock = 0;
- SBlock block;
- SBlock *pBlock = █
- if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pBlockIdx) == 0) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
-
- code = tsdbCommitterUpdateTableSchema(pCommitter, pTbData->suid, pTbData->uid);
- if (code) goto _err;
-
- tMapDataReset(&pCommitter->dWriter.mBlock);
- code = tBlockDataInit(&pCommitter->dReader.bData, pTbData->suid, pTbData->uid, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
- code = tBlockDataInit(&pCommitter->dWriter.bData, pTbData->suid, pTbData->uid, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
-
- // .data merge
- while (pBlock && pRow) {
- int32_t c = tBlockCmprFn(pBlock, &(SBlock){.minKey = TSDBROW_KEY(pRow), .maxKey = TSDBROW_KEY(pRow)});
- if (c < 0) { // disk
- code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock);
- if (code) goto _err;
-
- // next
- iBlock++;
- if (iBlock < pCommitter->dReader.mBlock.nItem) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
- } else if (c > 0) { // memory
- code = tsdbCommitTableMemData(pCommitter, pIter, pBlock->minKey);
- if (code) goto _err;
-
- // next
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
- } else { // merge
- int32_t nOvlp = tsdbGetNumOfRowsLessThan(pIter, pBlock->maxKey);
-
- ASSERT(nOvlp > 0);
-
- if (pBlock->nRow + nOvlp <= pCommitter->maxRow && pBlock->nSubBlock < TSDB_MAX_SUBBLOCKS) {
- code = tsdbMergeAsSubBlock(pCommitter, pIter, pBlock);
- if (code) goto _err;
- } else {
- code = tsdbMergeCommitData(pCommitter, pIter, pBlock);
- if (code) goto _err;
- }
-
- // next
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
- pRow = NULL;
- }
- iBlock++;
- if (iBlock < pCommitter->dReader.mBlock.nItem) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
- }
- }
-
- while (pBlock) {
- code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pBlock, tPutBlock);
- if (code) goto _err;
-
- // next
- iBlock++;
- if (iBlock < pCommitter->dReader.mBlock.nItem) {
- tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pBlock, tGetBlock);
- } else {
- pBlock = NULL;
- }
- }
-
- // .data append and .last merge
- code = tsdbMergeCommitLast(pCommitter, pIter);
- if (code) goto _err;
-
- // end
- if (pCommitter->dWriter.mBlock.nItem > 0) {
- SBlockIdx blockIdx = {.suid = pTbData->suid, .uid = pTbData->uid};
- code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx);
- if (code) goto _err;
-
- if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
-
-_exit:
- pRow = tsdbTbDataIterGet(pIter);
- if (pRow) {
- pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow));
- }
-
- return code;
-
-_err:
- tsdbError("vgId:%d tsdb commit table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitFileDataEnd(SCommitter *pCommitter) {
- int32_t code = 0;
-
- // write aBlockIdx
- code = tsdbWriteBlockIdx(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockIdx);
- if (code) goto _err;
-
- // write aBlockL
- code = tsdbWriteBlockL(pCommitter->dWriter.pWriter, pCommitter->dWriter.aBlockL);
- if (code) goto _err;
-
- // update file header
- code = tsdbUpdateDFileSetHeader(pCommitter->dWriter.pWriter);
- if (code) goto _err;
-
- // upsert SDFileSet
- code = tsdbFSUpsertFSet(&pCommitter->fs, &pCommitter->dWriter.pWriter->wSet);
- if (code) goto _err;
-
- // close and sync
- code = tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 1);
- if (code) goto _err;
-
- if (pCommitter->dReader.pReader) {
- code = tsdbDataFReaderClose(&pCommitter->dReader.pReader);
- if (code) goto _err;
- }
-
-_exit:
- return code;
-
-_err:
- tsdbError("vgId:%d, commit file data end failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbMoveCommitData(SCommitter *pCommitter, TABLEID toTable) {
- int32_t code = 0;
-
- // .data
- while (true) {
- if (pCommitter->dReader.pBlockIdx == NULL || tTABLEIDCmprFn(pCommitter->dReader.pBlockIdx, &toTable) >= 0) break;
-
- SBlockIdx blockIdx = *pCommitter->dReader.pBlockIdx;
- code = tsdbWriteBlock(pCommitter->dWriter.pWriter, &pCommitter->dReader.mBlock, &blockIdx);
- if (code) goto _err;
-
- if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- code = tsdbCommitterNextTableData(pCommitter);
- if (code) goto _err;
- }
-
- // .last
- while (true) {
- if (pCommitter->dReader.pRowInfo == NULL || tTABLEIDCmprFn(pCommitter->dReader.pRowInfo, &toTable) >= 0) break;
-
- SBlockData *pBlockDataR = &pCommitter->dReader.bDatal;
- SBlockData *pBlockDataW = &pCommitter->dWriter.bDatal;
- tb_uid_t suid = pCommitter->dReader.pRowInfo->suid;
- tb_uid_t uid = pCommitter->dReader.pRowInfo->uid;
-
- ASSERT((pBlockDataR->suid && !pBlockDataR->uid) || (!pBlockDataR->suid && pBlockDataR->uid));
- ASSERT(pBlockDataR->nRow > 0);
-
- // commit and reset block data schema if need
- if (pBlockDataW->suid || pBlockDataW->uid) {
- if (pBlockDataW->suid != suid || pBlockDataW->suid == 0) {
- if (pBlockDataW->nRow > 0) {
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
- tBlockDataReset(pBlockDataW);
- }
- }
-
- // set block data schema if need
- if (pBlockDataW->suid == 0 && pBlockDataW->uid == 0) {
- code = tsdbCommitterUpdateTableSchema(pCommitter, suid, uid);
- if (code) goto _err;
-
- code = tBlockDataInit(pBlockDataW, suid, suid ? 0 : uid, pCommitter->skmTable.pTSchema);
- if (code) goto _err;
- }
-
- // check if it can make sure that one table data in one block
- int32_t nRow = 0;
- if (pBlockDataR->suid) {
- int32_t iRow = pCommitter->dReader.iRow;
- while ((iRow < pBlockDataR->nRow) && (pBlockDataR->aUid[iRow] == uid)) {
- nRow++;
- iRow++;
- }
- } else {
- ASSERT(pCommitter->dReader.iRow == 0);
- nRow = pBlockDataR->nRow;
- }
-
- ASSERT(nRow > 0 && nRow < pCommitter->minRow);
-
- if (pBlockDataW->nRow + nRow > pCommitter->maxRow) {
- ASSERT(pBlockDataW->nRow > 0);
-
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
-
- while (nRow > 0) {
- code = tBlockDataAppendRow(pBlockDataW, &pCommitter->dReader.pRowInfo->row, NULL, uid);
- if (code) goto _err;
-
- code = tsdbCommitterNextLastRow(pCommitter);
- if (code) goto _err;
-
- nRow--;
- }
- }
-
- return code;
-
-_err:
- tsdbError("vgId:%d tsdb move commit data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitFileData(SCommitter *pCommitter) {
- int32_t code = 0;
- STsdb *pTsdb = pCommitter->pTsdb;
- SMemTable *pMemTable = pTsdb->imem;
-
- // commit file data start
- code = tsdbCommitFileDataStart(pCommitter);
- if (code) goto _err;
-
- // commit file data impl
- for (int32_t iTbData = 0; iTbData < taosArrayGetSize(pCommitter->aTbDataP); iTbData++) {
- STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, iTbData);
-
- // move commit until current (suid, uid)
- code = tsdbMoveCommitData(pCommitter, *(TABLEID *)pTbData);
- if (code) goto _err;
-
- // commit current table data
- code = tsdbCommitTableData(pCommitter, pTbData);
- if (code) goto _err;
-
- // move next reader table data if need
- if (pCommitter->dReader.pBlockIdx && tTABLEIDCmprFn(pTbData, pCommitter->dReader.pBlockIdx) == 0) {
- code = tsdbCommitterNextTableData(pCommitter);
- if (code) goto _err;
- }
- }
-
- code = tsdbMoveCommitData(pCommitter, (TABLEID){.suid = INT64_MAX, .uid = INT64_MAX});
- if (code) goto _err;
-
- if (pCommitter->dWriter.bDatal.nRow > 0) {
- code = tsdbCommitLastBlock(pCommitter);
- if (code) goto _err;
- }
-
- // commit file data end
- code = tsdbCommitFileDataEnd(pCommitter);
- if (code) goto _err;
-
- return code;
-
-_err:
- tsdbError("vgId:%d, commit file data failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- tsdbDataFReaderClose(&pCommitter->dReader.pReader);
- tsdbDataFWriterClose(&pCommitter->dWriter.pWriter, 0);
- return code;
-}
-
-// ----------------------------------------------------------------------------
-static int32_t tsdbStartCommit(STsdb *pTsdb, SCommitter *pCommitter) {
- int32_t code = 0;
-
- memset(pCommitter, 0, sizeof(*pCommitter));
- ASSERT(pTsdb->mem && pTsdb->imem == NULL);
-
- taosThreadRwlockWrlock(&pTsdb->rwLock);
- pTsdb->imem = pTsdb->mem;
- pTsdb->mem = NULL;
- taosThreadRwlockUnlock(&pTsdb->rwLock);
-
- pCommitter->pTsdb = pTsdb;
- pCommitter->commitID = pTsdb->pVnode->state.commitID;
- pCommitter->minutes = pTsdb->keepCfg.days;
- pCommitter->precision = pTsdb->keepCfg.precision;
- pCommitter->minRow = pTsdb->pVnode->config.tsdbCfg.minRows;
- pCommitter->maxRow = pTsdb->pVnode->config.tsdbCfg.maxRows;
- pCommitter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression;
- pCommitter->aTbDataP = tsdbMemTableGetTbDataArray(pTsdb->imem);
- if (pCommitter->aTbDataP == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- code = tsdbFSCopy(pTsdb, &pCommitter->fs);
- if (code) goto _err;
-
- return code;
-
-_err:
- tsdbError("vgId:%d, tsdb start commit failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-static int32_t tsdbCommitDataStart(SCommitter *pCommitter) {
- int32_t code = 0;
-
- // Reader
- pCommitter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
- if (pCommitter->dReader.aBlockIdx == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- code = tBlockDataCreate(&pCommitter->dReader.bData);
- if (code) goto _exit;
-
- pCommitter->dReader.aBlockL = taosArrayInit(0, sizeof(SBlockL));
- if (pCommitter->dReader.aBlockL == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- code = tBlockDataCreate(&pCommitter->dReader.bDatal);
- if (code) goto _exit;
-
- // Writer
- pCommitter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
- if (pCommitter->dWriter.aBlockIdx == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- pCommitter->dWriter.aBlockL = taosArrayInit(0, sizeof(SBlockL));
- if (pCommitter->dWriter.aBlockL == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- code = tBlockDataCreate(&pCommitter->dWriter.bData);
- if (code) goto _exit;
-
- code = tBlockDataCreate(&pCommitter->dWriter.bDatal);
- if (code) goto _exit;
-
-_exit:
- return code;
-}
-
-static void tsdbCommitDataEnd(SCommitter *pCommitter) {
- // Reader
- taosArrayDestroy(pCommitter->dReader.aBlockIdx);
- tMapDataClear(&pCommitter->dReader.mBlock);
- tBlockDataDestroy(&pCommitter->dReader.bData, 1);
- taosArrayDestroy(pCommitter->dReader.aBlockL);
- tBlockDataDestroy(&pCommitter->dReader.bDatal, 1);
+ // merger
+ for (int32_t iStt = 0; iStt < TSDB_MAX_STT_TRIGGER; iStt++) {
+ SDataIter *pIter = &pCommitter->aDataIter[iStt];
+ taosArrayDestroy(pIter->aSttBlk);
+ tBlockDataDestroy(&pIter->bData, 1);
+ }
- // Writer
+ // writer
taosArrayDestroy(pCommitter->dWriter.aBlockIdx);
- taosArrayDestroy(pCommitter->dWriter.aBlockL);
+ taosArrayDestroy(pCommitter->dWriter.aSttBlk);
tMapDataClear(&pCommitter->dWriter.mBlock);
tBlockDataDestroy(&pCommitter->dWriter.bData, 1);
tBlockDataDestroy(&pCommitter->dWriter.bDatal, 1);
@@ -1389,7 +861,7 @@ static int32_t tsdbCommitData(SCommitter *pCommitter) {
tsdbCommitDataEnd(pCommitter);
_exit:
- tsdbDebug("vgId:%d, commit data done, nRow:%" PRId64, TD_VID(pTsdb->pVnode), pMemTable->nRow);
+ tsdbInfo("vgId:%d, commit data done, nRow:%" PRId64, TD_VID(pTsdb->pVnode), pMemTable->nRow);
return code;
_err:
@@ -1515,6 +987,11 @@ static int32_t tsdbEndCommit(SCommitter *pCommitter, int32_t eno) {
tsdbFSDestroy(&pCommitter->fs);
taosArrayDestroy(pCommitter->aTbDataP);
+ // if (pCommitter->toMerge) {
+ // code = tsdbMerge(pTsdb);
+ // if (code) goto _err;
+ // }
+
tsdbInfo("vgId:%d, tsdb end commit", TD_VID(pTsdb->pVnode));
return code;
@@ -1522,3 +999,474 @@ _err:
tsdbError("vgId:%d, tsdb end commit failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
+
+// ================================================================================
+
+static FORCE_INLINE SRowInfo *tsdbGetCommitRow(SCommitter *pCommitter) {
+ return (pCommitter->pIter) ? &pCommitter->pIter->r : NULL;
+}
+
+static int32_t tsdbNextCommitRow(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ if (pCommitter->pIter) {
+ SDataIter *pIter = pCommitter->pIter;
+ if (pCommitter->pIter->type == MEMORY_DATA_ITER) { // memory
+ tsdbTbDataIterNext(&pIter->iter);
+ TSDBROW *pRow = tsdbTbDataIterGet(&pIter->iter);
+ while (true) {
+ if (pRow && TSDBROW_TS(pRow) > pCommitter->maxKey) {
+ pCommitter->nextKey = TMIN(pCommitter->nextKey, TSDBROW_TS(pRow));
+ pRow = NULL;
+ }
+
+ if (pRow) {
+ pIter->r.suid = pIter->iter.pTbData->suid;
+ pIter->r.uid = pIter->iter.pTbData->uid;
+ pIter->r.row = *pRow;
+ break;
+ }
+
+ pIter->iTbDataP++;
+ if (pIter->iTbDataP < taosArrayGetSize(pCommitter->aTbDataP)) {
+ STbData *pTbData = (STbData *)taosArrayGetP(pCommitter->aTbDataP, pIter->iTbDataP);
+ TSDBKEY keyFrom = {.ts = pCommitter->minKey, .version = VERSION_MIN};
+ tsdbTbDataIterOpen(pTbData, &keyFrom, 0, &pIter->iter);
+ pRow = tsdbTbDataIterGet(&pIter->iter);
+ continue;
+ } else {
+ pCommitter->pIter = NULL;
+ break;
+ }
+ }
+ } else if (pCommitter->pIter->type == STT_DATA_ITER) { // last file
+ pIter->iRow++;
+ if (pIter->iRow < pIter->bData.nRow) {
+ pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[pIter->iRow];
+ pIter->r.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
+ } else {
+ pIter->iSttBlk++;
+ if (pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk)) {
+ SSttBlk *pSttBlk = (SSttBlk *)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk);
+
+ code = tsdbReadSttBlockEx(pCommitter->dReader.pReader, pIter->iStt, pSttBlk, &pIter->bData);
+ if (code) goto _exit;
+
+ pIter->iRow = 0;
+ pIter->r.suid = pIter->bData.suid;
+ pIter->r.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[0];
+ pIter->r.row = tsdbRowFromBlockData(&pIter->bData, 0);
+ } else {
+ pCommitter->pIter = NULL;
+ }
+ }
+ } else {
+ ASSERT(0);
+ }
+
+ // compare with min in RB Tree
+ pIter = (SDataIter *)tRBTreeMin(&pCommitter->rbt);
+ if (pCommitter->pIter && pIter) {
+ int32_t c = tRowInfoCmprFn(&pCommitter->pIter->r, &pIter->r);
+ if (c > 0) {
+ tRBTreePut(&pCommitter->rbt, (SRBTreeNode *)pCommitter->pIter);
+ pCommitter->pIter = NULL;
+ } else {
+ ASSERT(c);
+ }
+ }
+ }
+
+ if (pCommitter->pIter == NULL) {
+ pCommitter->pIter = (SDataIter *)tRBTreeMin(&pCommitter->rbt);
+ if (pCommitter->pIter) {
+ tRBTreeDrop(&pCommitter->rbt, (SRBTreeNode *)pCommitter->pIter);
+ }
+ }
+
+_exit:
+ return code;
+}
+
+static int32_t tsdbCommitAheadBlock(SCommitter *pCommitter, SDataBlk *pDataBlk) {
+ int32_t code = 0;
+ SBlockData *pBlockData = &pCommitter->dWriter.bData;
+ SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter);
+ TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid};
+
+ tBlockDataClear(pBlockData);
+ while (pRowInfo) {
+ ASSERT(pRowInfo->row.type == 0);
+ code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row));
+ if (code) goto _err;
+
+ code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, pCommitter->skmRow.pTSchema, id.uid);
+ if (code) goto _err;
+
+ code = tsdbNextCommitRow(pCommitter);
+ if (code) goto _err;
+
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo) {
+ if (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid) {
+ pRowInfo = NULL;
+ } else {
+ TSDBKEY tKey = TSDBROW_KEY(&pRowInfo->row);
+ if (tsdbKeyCmprFn(&tKey, &pDataBlk->minKey) >= 0) pRowInfo = NULL;
+ }
+ }
+
+ if (pBlockData->nRow >= pCommitter->maxRow) {
+ code =
+ tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBlockData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBlockData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d, tsdb commit ahead block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbCommitMergeBlock(SCommitter *pCommitter, SDataBlk *pDataBlk) {
+ int32_t code = 0;
+ SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter);
+ TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid};
+ SBlockData *pBDataR = &pCommitter->dReader.bData;
+ SBlockData *pBDataW = &pCommitter->dWriter.bData;
+
+ code = tsdbReadDataBlock(pCommitter->dReader.pReader, pDataBlk, pBDataR);
+ if (code) goto _err;
+
+ tBlockDataClear(pBDataW);
+ int32_t iRow = 0;
+ TSDBROW row = tsdbRowFromBlockData(pBDataR, 0);
+ TSDBROW *pRow = &row;
+
+ while (pRow && pRowInfo) {
+ int32_t c = tsdbRowCmprFn(pRow, &pRowInfo->row);
+ if (c < 0) {
+ code = tBlockDataAppendRow(pBDataW, pRow, NULL, id.uid);
+ if (code) goto _err;
+
+ iRow++;
+ if (iRow < pBDataR->nRow) {
+ row = tsdbRowFromBlockData(pBDataR, iRow);
+ } else {
+ pRow = NULL;
+ }
+ } else if (c > 0) {
+ ASSERT(pRowInfo->row.type == 0);
+ code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row));
+ if (code) goto _err;
+
+ code = tBlockDataAppendRow(pBDataW, &pRowInfo->row, pCommitter->skmRow.pTSchema, id.uid);
+ if (code) goto _err;
+
+ code = tsdbNextCommitRow(pCommitter);
+ if (code) goto _err;
+
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo) {
+ if (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid) {
+ pRowInfo = NULL;
+ } else {
+ TSDBKEY tKey = TSDBROW_KEY(&pRowInfo->row);
+ if (tsdbKeyCmprFn(&tKey, &pDataBlk->maxKey) > 0) pRowInfo = NULL;
+ }
+ }
+ } else {
+ ASSERT(0);
+ }
+
+ if (pBDataW->nRow >= pCommitter->maxRow) {
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+
+ while (pRow) {
+ code = tBlockDataAppendRow(pBDataW, pRow, NULL, id.uid);
+ if (code) goto _err;
+
+ iRow++;
+ if (iRow < pBDataR->nRow) {
+ row = tsdbRowFromBlockData(pBDataR, iRow);
+ } else {
+ pRow = NULL;
+ }
+
+ if (pBDataW->nRow >= pCommitter->maxRow) {
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBDataW, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d, tsdb commit merge block failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbMergeTableData(SCommitter *pCommitter, TABLEID id) {
+ int32_t code = 0;
+ SBlockIdx *pBlockIdx = pCommitter->dReader.pBlockIdx;
+
+ ASSERT(pBlockIdx == NULL || tTABLEIDCmprFn(pBlockIdx, &id) >= 0);
+ if (pBlockIdx && pBlockIdx->suid == id.suid && pBlockIdx->uid == id.uid) {
+ int32_t iBlock = 0;
+ SDataBlk block;
+ SDataBlk *pDataBlk = █
+ SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter);
+
+ ASSERT(pRowInfo->suid == id.suid && pRowInfo->uid == id.uid);
+
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk);
+ while (pDataBlk && pRowInfo) {
+ SDataBlk tBlock = {.minKey = TSDBROW_KEY(&pRowInfo->row), .maxKey = TSDBROW_KEY(&pRowInfo->row)};
+ int32_t c = tDataBlkCmprFn(pDataBlk, &tBlock);
+
+ if (c < 0) {
+ code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pDataBlk, tPutDataBlk);
+ if (code) goto _err;
+
+ iBlock++;
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk);
+ } else {
+ pDataBlk = NULL;
+ }
+ } else if (c > 0) {
+ code = tsdbCommitAheadBlock(pCommitter, pDataBlk);
+ if (code) goto _err;
+
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) pRowInfo = NULL;
+ } else {
+ code = tsdbCommitMergeBlock(pCommitter, pDataBlk);
+ if (code) goto _err;
+
+ iBlock++;
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk);
+ } else {
+ pDataBlk = NULL;
+ }
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) pRowInfo = NULL;
+ }
+ }
+
+ while (pDataBlk) {
+ code = tMapDataPutItem(&pCommitter->dWriter.mBlock, pDataBlk, tPutDataBlk);
+ if (code) goto _err;
+
+ iBlock++;
+ if (iBlock < pCommitter->dReader.mBlock.nItem) {
+ tMapDataGetItemByIdx(&pCommitter->dReader.mBlock, iBlock, pDataBlk, tGetDataBlk);
+ } else {
+ pDataBlk = NULL;
+ }
+ }
+
+ code = tsdbCommitterNextTableData(pCommitter);
+ if (code) goto _err;
+ }
+
+_exit:
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb merge table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbInitLastBlockIfNeed(SCommitter *pCommitter, TABLEID id) {
+ int32_t code = 0;
+
+ SBlockData *pBDatal = &pCommitter->dWriter.bDatal;
+ if (pBDatal->suid || pBDatal->uid) {
+ if ((pBDatal->suid != id.suid) || (id.suid == 0)) {
+ code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, pBDatal, pCommitter->dWriter.aSttBlk, pCommitter->cmprAlg);
+ if (code) goto _exit;
+ tBlockDataReset(pBDatal);
+ }
+ }
+
+ if (!pBDatal->suid && !pBDatal->uid) {
+ ASSERT(pCommitter->skmTable.suid == id.suid);
+ ASSERT(pCommitter->skmTable.uid == id.uid);
+ TABLEID tid = {.suid = id.suid, .uid = id.suid ? 0 : id.uid};
+ code = tBlockDataInit(pBDatal, &tid, pCommitter->skmTable.pTSchema, NULL, 0);
+ if (code) goto _exit;
+ }
+
+_exit:
+ return code;
+}
+
+static int32_t tsdbAppendLastBlock(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ SBlockData *pBData = &pCommitter->dWriter.bData;
+ SBlockData *pBDatal = &pCommitter->dWriter.bDatal;
+
+ TABLEID id = {.suid = pBData->suid, .uid = pBData->uid};
+ code = tsdbInitLastBlockIfNeed(pCommitter, id);
+ if (code) goto _err;
+
+ for (int32_t iRow = 0; iRow < pBData->nRow; iRow++) {
+ TSDBROW row = tsdbRowFromBlockData(pBData, iRow);
+ code = tBlockDataAppendRow(pBDatal, &row, NULL, pBData->uid);
+ if (code) goto _err;
+
+ if (pBDatal->nRow >= pCommitter->maxRow) {
+ code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, pBDatal, pCommitter->dWriter.aSttBlk, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+
+ return code;
+
+_err:
+ return code;
+}
+
+static int32_t tsdbCommitTableData(SCommitter *pCommitter, TABLEID id) {
+ int32_t code = 0;
+
+ SRowInfo *pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) {
+ pRowInfo = NULL;
+ }
+
+ if (pRowInfo == NULL) goto _exit;
+
+ SBlockData *pBData;
+ if (pCommitter->toLastOnly) {
+ pBData = &pCommitter->dWriter.bDatal;
+ code = tsdbInitLastBlockIfNeed(pCommitter, id);
+ if (code) goto _err;
+ } else {
+ pBData = &pCommitter->dWriter.bData;
+ ASSERT(pBData->nRow == 0);
+ }
+
+ while (pRowInfo) {
+ STSchema *pTSchema = NULL;
+ if (pRowInfo->row.type == 0) {
+ code = tsdbCommitterUpdateRowSchema(pCommitter, id.suid, id.uid, TSDBROW_SVERSION(&pRowInfo->row));
+ if (code) goto _err;
+ pTSchema = pCommitter->skmRow.pTSchema;
+ }
+
+ code = tBlockDataAppendRow(pBData, &pRowInfo->row, pTSchema, id.uid);
+ if (code) goto _err;
+
+ code = tsdbNextCommitRow(pCommitter);
+ if (code) goto _err;
+
+ pRowInfo = tsdbGetCommitRow(pCommitter);
+ if (pRowInfo && (pRowInfo->suid != id.suid || pRowInfo->uid != id.uid)) {
+ pRowInfo = NULL;
+ }
+
+ if (pBData->nRow >= pCommitter->maxRow) {
+ if (pCommitter->toLastOnly) {
+ code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, pBData, pCommitter->dWriter.aSttBlk, pCommitter->cmprAlg);
+ if (code) goto _err;
+ } else {
+ code =
+ tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
+ }
+
+ if (!pCommitter->toLastOnly && pBData->nRow) {
+ if (pBData->nRow > pCommitter->minRow) {
+ code = tsdbWriteDataBlock(pCommitter->dWriter.pWriter, pBData, &pCommitter->dWriter.mBlock, pCommitter->cmprAlg);
+ if (code) goto _err;
+ } else {
+ code = tsdbAppendLastBlock(pCommitter);
+ if (code) goto _err;
+ }
+ }
+
+_exit:
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb commit table data failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbCommitFileDataImpl(SCommitter *pCommitter) {
+ int32_t code = 0;
+
+ SRowInfo *pRowInfo;
+ TABLEID id = {0};
+ while ((pRowInfo = tsdbGetCommitRow(pCommitter)) != NULL) {
+ ASSERT(pRowInfo->suid != id.suid || pRowInfo->uid != id.uid);
+ id.suid = pRowInfo->suid;
+ id.uid = pRowInfo->uid;
+
+ code = tsdbMoveCommitData(pCommitter, id);
+ if (code) goto _err;
+
+ // start
+ tMapDataReset(&pCommitter->dWriter.mBlock);
+
+ // impl
+ code = tsdbUpdateTableSchema(pCommitter->pTsdb->pVnode->pMeta, id.suid, id.uid, &pCommitter->skmTable);
+ if (code) goto _err;
+ code = tBlockDataInit(&pCommitter->dReader.bData, &id, pCommitter->skmTable.pTSchema, NULL, 0);
+ if (code) goto _err;
+ code = tBlockDataInit(&pCommitter->dWriter.bData, &id, pCommitter->skmTable.pTSchema, NULL, 0);
+ if (code) goto _err;
+
+ /* merge with data in .data file */
+ code = tsdbMergeTableData(pCommitter, id);
+ if (code) goto _err;
+
+ /* handle remain table data */
+ code = tsdbCommitTableData(pCommitter, id);
+ if (code) goto _err;
+
+ // end
+ if (pCommitter->dWriter.mBlock.nItem > 0) {
+ SBlockIdx blockIdx = {.suid = id.suid, .uid = id.uid};
+ code = tsdbWriteDataBlk(pCommitter->dWriter.pWriter, &pCommitter->dWriter.mBlock, &blockIdx);
+ if (code) goto _err;
+
+ if (taosArrayPush(pCommitter->dWriter.aBlockIdx, &blockIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+ }
+
+ id.suid = INT64_MAX;
+ id.uid = INT64_MAX;
+ code = tsdbMoveCommitData(pCommitter, id);
+ if (code) goto _err;
+
+ code = tsdbWriteSttBlock(pCommitter->dWriter.pWriter, &pCommitter->dWriter.bDatal, pCommitter->dWriter.aSttBlk,
+ pCommitter->cmprAlg);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb commit file data impl failed since %s", TD_VID(pCommitter->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCompact.c b/source/dnode/vnode/src/tsdb/tsdbCompact.c
new file mode 100644
index 0000000000000000000000000000000000000000..fb3917be64faa058b52f1f13a86ec0034486f279
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbCompact.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdb.h"
+
+typedef struct {
+ STsdb *pTsdb;
+ STsdbFS fs;
+} STsdbCompactor;
+
+int32_t tsdbCompact(STsdb *pTsdb) {
+ int32_t code = 0;
+ // TODO
+ return code;
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbDiskData.c b/source/dnode/vnode/src/tsdb/tsdbDiskData.c
new file mode 100644
index 0000000000000000000000000000000000000000..3bd71f0ea6465cadcda3924247715a88721be3d8
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbDiskData.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdb.h"
+
+typedef struct SDiskColBuilder SDiskColBuilder;
+struct SDiskColBuilder {
+ uint8_t flags;
+ uint8_t *pBitMap;
+ int32_t *aOffset;
+ int32_t nData;
+ uint8_t *pData;
+};
+
+int32_t tDiskColAddVal(SDiskColBuilder *pBuilder, SColVal *pColVal) {
+ int32_t code = 0;
+ // TODO
+ return code;
+}
+
+// ================================================================
+typedef struct SDiskDataBuilder SDiskDataBuilder;
+struct SDiskDataBuilder {
+ SDiskDataHdr hdr;
+ SArray *aBlockCol; // SArray
+};
+
+int32_t tDiskDataBuilderCreate(SDiskDataBuilder **ppBuilder) {
+ int32_t code = 0;
+ // TODO
+ return code;
+}
+
+void tDiskDataBuilderDestroy(SDiskDataBuilder *pBuilder) {
+ // TODO
+}
+
+void tDiskDataBuilderInit(SDiskDataBuilder *pBuilder, int64_t suid, int64_t uid, STSchema *pTSchema, int8_t cmprAlg) {
+ pBuilder->hdr = (SDiskDataHdr){.delimiter = TSDB_FILE_DLMT, //
+ .fmtVer = 0,
+ .suid = suid,
+ .uid = uid,
+ .cmprAlg = cmprAlg};
+}
+
+void tDiskDataBuilderReset(SDiskDataBuilder *pBuilder) {
+ // TODO
+}
+
+int32_t tDiskDataBuilderAddRow(SDiskDataBuilder *pBuilder, TSDBROW *pRow, STSchema *pTSchema, int64_t uid) {
+ int32_t code = 0;
+
+ // uid (todo)
+
+ // version (todo)
+
+ // TSKEY (todo)
+
+ SRowIter iter = {0};
+ tRowIterInit(&iter, pRow, pTSchema);
+
+ for (int32_t iDiskCol = 0; iDiskCol < 0; iDiskCol++) {
+ }
+
+ return code;
+}
+
+int32_t tDiskDataBuilderGet(SDiskDataBuilder *pBuilder, uint8_t **ppData) {
+ int32_t code = 0;
+ // TODO
+ return code;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbFS.c b/source/dnode/vnode/src/tsdb/tsdbFS.c
index 247de993381d98713fa6a4ca1938c11b044c8cd6..10926ae6ad31ae8d609dd20819ad03bec2e26c57 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFS.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFS.c
@@ -21,6 +21,9 @@ static int32_t tsdbEncodeFS(uint8_t *p, STsdbFS *pFS) {
int8_t hasDel = pFS->pDelFile ? 1 : 0;
uint32_t nSet = taosArrayGetSize(pFS->aDFileSet);
+ // version
+ n += tPutI8(p ? p + n : p, 0);
+
// SDelFile
n += tPutI8(p ? p + n : p, hasDel);
if (hasDel) {
@@ -55,7 +58,7 @@ static int32_t tsdbGnrtCurrent(STsdb *pTsdb, STsdbFS *pFS, char *fname) {
taosCalcChecksumAppend(0, pData, size);
// create and write
- pFD = taosOpenFile(fname, TD_FILE_WRITE | TD_FILE_CREATE);
+ pFD = taosOpenFile(fname, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
if (pFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
@@ -110,7 +113,7 @@ _err:
// taosRemoveFile(fname);
// }
-// // last
+// // stt
// if (isSameDisk && pFrom->pLastF->commitID == pTo->pLastF->commitID) {
// if (pFrom->pLastF->size > pTo->pLastF->size) {
// code = tsdbDFileRollback(pFS->pTsdb, pTo, TSDB_LAST_FILE);
@@ -140,7 +143,7 @@ _err:
// tsdbDataFileName(pFS->pTsdb, pFrom->diskId, pFrom->fid, pFrom->pDataF, fname);
// taosRemoveFile(fname);
-// // last
+// // stt
// tsdbLastFileName(pFS->pTsdb, pFrom->diskId, pFrom->fid, pFrom->pLastF, fname);
// taosRemoveFile(fname);
@@ -254,8 +257,10 @@ void tsdbFSDestroy(STsdbFS *pFS) {
SDFileSet *pSet = (SDFileSet *)taosArrayGet(pFS->aDFileSet, iSet);
taosMemoryFree(pSet->pHeadF);
taosMemoryFree(pSet->pDataF);
- taosMemoryFree(pSet->pLastF);
taosMemoryFree(pSet->pSmaF);
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ taosMemoryFree(pSet->aSttF[iStt]);
+ }
}
taosArrayDestroy(pFS->aDFileSet);
@@ -274,7 +279,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
goto _err;
}
- if (size != pTsdb->fs.pDelFile->size) {
+ if (size != tsdbLogicToFileSize(pTsdb->fs.pDelFile->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
@@ -290,7 +295,7 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
- if (size != pSet->pHeadF->size) {
+ if (size != tsdbLogicToFileSize(pSet->pHeadF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
}
@@ -301,38 +306,40 @@ static int32_t tsdbScanAndTryFixFS(STsdb *pTsdb) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
- if (size < pSet->pDataF->size) {
+ if (size < tsdbLogicToFileSize(pSet->pDataF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
- } else if (size > pSet->pDataF->size) {
+ } else if (size > tsdbLogicToFileSize(pSet->pDataF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = tsdbDFileRollback(pTsdb, pSet, TSDB_DATA_FILE);
if (code) goto _err;
}
- // last ===========
- tsdbLastFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pLastF, fname);
- if (taosStatFile(fname, &size, NULL)) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- if (size != pSet->pLastF->size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
// sma =============
tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname);
if (taosStatFile(fname, &size, NULL)) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
- if (size < pSet->pSmaF->size) {
+ if (size < tsdbLogicToFileSize(pSet->pSmaF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = TSDB_CODE_FILE_CORRUPTED;
goto _err;
- } else if (size > pSet->pSmaF->size) {
+ } else if (size > tsdbLogicToFileSize(pSet->pSmaF->size, pTsdb->pVnode->config.tsdbPageSize)) {
code = tsdbDFileRollback(pTsdb, pSet, TSDB_SMA_FILE);
if (code) goto _err;
}
+
+ // stt ===========
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname);
+ if (taosStatFile(fname, &size, NULL)) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ if (size != tsdbLogicToFileSize(pSet->aSttF[iStt]->size, pTsdb->pVnode->config.tsdbPageSize)) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _err;
+ }
+ }
}
{
@@ -360,10 +367,12 @@ static int32_t tsdbRecoverFS(STsdb *pTsdb, uint8_t *pData, int64_t nData) {
int32_t code = 0;
int8_t hasDel;
uint32_t nSet;
- int32_t n;
+ int32_t n = 0;
+
+ // version
+ n += tGetI8(pData + n, NULL);
// SDelFile
- n = 0;
n += tGetI8(pData + n, &hasDel);
if (hasDel) {
pTsdb->fs.pDelFile = (SDelFile *)taosMemoryMalloc(sizeof(SDelFile));
@@ -382,41 +391,15 @@ static int32_t tsdbRecoverFS(STsdb *pTsdb, uint8_t *pData, int64_t nData) {
taosArrayClear(pTsdb->fs.aDFileSet);
n += tGetU32v(pData + n, &nSet);
for (uint32_t iSet = 0; iSet < nSet; iSet++) {
- SDFileSet fSet;
-
- // head
- fSet.pHeadF = (SHeadFile *)taosMemoryCalloc(1, sizeof(SHeadFile));
- if (fSet.pHeadF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- fSet.pHeadF->nRef = 1;
-
- // data
- fSet.pDataF = (SDataFile *)taosMemoryCalloc(1, sizeof(SDataFile));
- if (fSet.pDataF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- fSet.pDataF->nRef = 1;
-
- // last
- fSet.pLastF = (SLastFile *)taosMemoryCalloc(1, sizeof(SLastFile));
- if (fSet.pLastF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- fSet.pLastF->nRef = 1;
+ SDFileSet fSet = {0};
- // sma
- fSet.pSmaF = (SSmaFile *)taosMemoryCalloc(1, sizeof(SSmaFile));
- if (fSet.pSmaF == NULL) {
+ int32_t nt = tGetDFileSet(pData + n, &fSet);
+ if (nt < 0) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- fSet.pSmaF->nRef = 1;
- n += tGetDFileSet(pData + n, &fSet);
+ n += nt;
if (taosArrayPush(pTsdb->fs.aDFileSet, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -532,13 +515,15 @@ int32_t tsdbFSClose(STsdb *pTsdb) {
ASSERT(pSet->pDataF->nRef == 1);
taosMemoryFree(pSet->pDataF);
- // last
- ASSERT(pSet->pLastF->nRef == 1);
- taosMemoryFree(pSet->pLastF);
-
// sma
ASSERT(pSet->pSmaF->nRef == 1);
taosMemoryFree(pSet->pSmaF);
+
+ // stt
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ ASSERT(pSet->aSttF[iStt]->nRef == 1);
+ taosMemoryFree(pSet->aSttF[iStt]);
+ }
}
taosArrayDestroy(pTsdb->fs.aDFileSet);
@@ -586,15 +571,7 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
}
*fSet.pDataF = *pSet->pDataF;
- // data
- fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
- if (fSet.pLastF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
- *fSet.pLastF = *pSet->pLastF;
-
- // last
+ // sma
fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
if (fSet.pSmaF == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -602,6 +579,16 @@ int32_t tsdbFSCopy(STsdb *pTsdb, STsdbFS *pFS) {
}
*fSet.pSmaF = *pSet->pSmaF;
+ // stt
+ for (fSet.nSttF = 0; fSet.nSttF < pSet->nSttF; fSet.nSttF++) {
+ fSet.aSttF[fSet.nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (fSet.aSttF[fSet.nSttF] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+ *fSet.aSttF[fSet.nSttF] = *pSet->aSttF[fSet.nSttF];
+ }
+
if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@@ -651,14 +638,38 @@ int32_t tsdbFSUpsertFSet(STsdbFS *pFS, SDFileSet *pSet) {
if (c == 0) {
*pDFileSet->pHeadF = *pSet->pHeadF;
*pDFileSet->pDataF = *pSet->pDataF;
- *pDFileSet->pLastF = *pSet->pLastF;
*pDFileSet->pSmaF = *pSet->pSmaF;
+ // stt
+ if (pSet->nSttF > pDFileSet->nSttF) {
+ ASSERT(pSet->nSttF == pDFileSet->nSttF + 1);
+
+ pDFileSet->aSttF[pDFileSet->nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pDFileSet->aSttF[pDFileSet->nSttF] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+ *pDFileSet->aSttF[pDFileSet->nSttF] = *pSet->aSttF[pSet->nSttF - 1];
+ pDFileSet->nSttF++;
+ } else if (pSet->nSttF < pDFileSet->nSttF) {
+ ASSERT(pSet->nSttF == 1);
+ for (int32_t iStt = 1; iStt < pDFileSet->nSttF; iStt++) {
+ taosMemoryFree(pDFileSet->aSttF[iStt]);
+ }
+
+ *pDFileSet->aSttF[0] = *pSet->aSttF[0];
+ pDFileSet->nSttF = 1;
+ } else {
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ *pDFileSet->aSttF[iStt] = *pSet->aSttF[iStt];
+ }
+ }
goto _exit;
}
}
- SDFileSet fSet = {.diskId = pSet->diskId, .fid = pSet->fid};
+ ASSERT(pSet->nSttF == 1);
+ SDFileSet fSet = {.diskId = pSet->diskId, .fid = pSet->fid, .nSttF = 1};
// head
fSet.pHeadF = (SHeadFile *)taosMemoryMalloc(sizeof(SHeadFile));
@@ -676,21 +687,21 @@ int32_t tsdbFSUpsertFSet(STsdbFS *pFS, SDFileSet *pSet) {
}
*fSet.pDataF = *pSet->pDataF;
- // data
- fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
- if (fSet.pLastF == NULL) {
+ // sma
+ fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
+ if (fSet.pSmaF == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- *fSet.pLastF = *pSet->pLastF;
+ *fSet.pSmaF = *pSet->pSmaF;
- // last
- fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
- if (fSet.pSmaF == NULL) {
+ // stt
+ fSet.aSttF[0] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (fSet.aSttF[0] == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
}
- *fSet.pSmaF = *pSet->pSmaF;
+ *fSet.aSttF[0] = *pSet->aSttF[0];
if (taosArrayInsert(pFS->aDFileSet, idx, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -836,27 +847,6 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
pSetOld->pDataF->size = pSetNew->pDataF->size;
}
- // last
- fSet.pLastF = pSetOld->pLastF;
- if ((!sameDisk) || (pSetOld->pLastF->commitID != pSetNew->pLastF->commitID)) {
- pSetOld->pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
- if (pSetOld->pLastF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- *pSetOld->pLastF = *pSetNew->pLastF;
- pSetOld->pLastF->nRef = 1;
-
- nRef = atomic_sub_fetch_32(&fSet.pLastF->nRef, 1);
- if (nRef == 0) {
- tsdbLastFileName(pTsdb, pSetOld->diskId, pSetOld->fid, fSet.pLastF, fname);
- taosRemoveFile(fname);
- taosMemoryFree(fSet.pLastF);
- }
- } else {
- ASSERT(pSetOld->pLastF->size == pSetNew->pLastF->size);
- }
-
// sma
fSet.pSmaF = pSetOld->pSmaF;
if ((!sameDisk) || (pSetOld->pSmaF->commitID != pSetNew->pSmaF->commitID)) {
@@ -879,6 +869,84 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
pSetOld->pSmaF->size = pSetNew->pSmaF->size;
}
+ // stt
+ if (sameDisk) {
+ if (pSetNew->nSttF > pSetOld->nSttF) {
+ ASSERT(pSetNew->nSttF = pSetOld->nSttF + 1);
+ pSetOld->aSttF[pSetOld->nSttF] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pSetOld->aSttF[pSetOld->nSttF] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *pSetOld->aSttF[pSetOld->nSttF] = *pSetNew->aSttF[pSetOld->nSttF];
+ pSetOld->aSttF[pSetOld->nSttF]->nRef = 1;
+ pSetOld->nSttF++;
+ } else if (pSetNew->nSttF < pSetOld->nSttF) {
+ ASSERT(pSetNew->nSttF == 1);
+ for (int32_t iStt = 0; iStt < pSetOld->nSttF; iStt++) {
+ SSttFile *pSttFile = pSetOld->aSttF[iStt];
+ nRef = atomic_sub_fetch_32(&pSttFile->nRef, 1);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSttFile, fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSttFile);
+ }
+ pSetOld->aSttF[iStt] = NULL;
+ }
+
+ pSetOld->nSttF = 1;
+ pSetOld->aSttF[0] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pSetOld->aSttF[0] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *pSetOld->aSttF[0] = *pSetNew->aSttF[0];
+ pSetOld->aSttF[0]->nRef = 1;
+ } else {
+ for (int32_t iStt = 0; iStt < pSetOld->nSttF; iStt++) {
+ if (pSetOld->aSttF[iStt]->commitID != pSetNew->aSttF[iStt]->commitID) {
+ SSttFile *pSttFile = pSetOld->aSttF[iStt];
+ nRef = atomic_sub_fetch_32(&pSttFile->nRef, 1);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSttFile, fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSttFile);
+ }
+
+ pSetOld->aSttF[iStt] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pSetOld->aSttF[iStt] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *pSetOld->aSttF[iStt] = *pSetNew->aSttF[iStt];
+ pSetOld->aSttF[iStt]->nRef = 1;
+ } else {
+ ASSERT(pSetOld->aSttF[iStt]->size == pSetOld->aSttF[iStt]->size);
+ ASSERT(pSetOld->aSttF[iStt]->offset == pSetOld->aSttF[iStt]->offset);
+ }
+ }
+ }
+ } else {
+ ASSERT(pSetOld->nSttF == pSetNew->nSttF);
+ for (int32_t iStt = 0; iStt < pSetOld->nSttF; iStt++) {
+ SSttFile *pSttFile = pSetOld->aSttF[iStt];
+ nRef = atomic_sub_fetch_32(&pSttFile->nRef, 1);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSttFile, fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSttFile);
+ }
+
+ pSetOld->aSttF[iStt] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (pSetOld->aSttF[iStt] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *pSetOld->aSttF[iStt] = *pSetNew->aSttF[iStt];
+ pSetOld->aSttF[iStt]->nRef = 1;
+ }
+ }
+
if (!sameDisk) {
pSetOld->diskId = pSetNew->diskId;
}
@@ -902,13 +970,6 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
taosMemoryFree(pSetOld->pDataF);
}
- nRef = atomic_sub_fetch_32(&pSetOld->pLastF->nRef, 1);
- if (nRef == 0) {
- tsdbLastFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSetOld->pLastF, fname);
- taosRemoveFile(fname);
- taosMemoryFree(pSetOld->pLastF);
- }
-
nRef = atomic_sub_fetch_32(&pSetOld->pSmaF->nRef, 1);
if (nRef == 0) {
tsdbSmaFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSetOld->pSmaF, fname);
@@ -916,12 +977,20 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
taosMemoryFree(pSetOld->pSmaF);
}
+ for (int8_t iStt = 0; iStt < pSetOld->nSttF; iStt++) {
+ nRef = atomic_sub_fetch_32(&pSetOld->aSttF[iStt]->nRef, 1);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSetOld->diskId, pSetOld->fid, pSetOld->aSttF[iStt], fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSetOld->aSttF[iStt]);
+ }
+ }
+
taosArrayRemove(pTsdb->fs.aDFileSet, iOld);
continue;
_add_new:
- fSet.diskId = pSetNew->diskId;
- fSet.fid = pSetNew->fid;
+ fSet = (SDFileSet){.diskId = pSetNew->diskId, .fid = pSetNew->fid, .nSttF = 1};
// head
fSet.pHeadF = (SHeadFile *)taosMemoryMalloc(sizeof(SHeadFile));
@@ -941,15 +1010,6 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
*fSet.pDataF = *pSetNew->pDataF;
fSet.pDataF->nRef = 1;
- // last
- fSet.pLastF = (SLastFile *)taosMemoryMalloc(sizeof(SLastFile));
- if (fSet.pLastF == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- *fSet.pLastF = *pSetNew->pLastF;
- fSet.pLastF->nRef = 1;
-
// sma
fSet.pSmaF = (SSmaFile *)taosMemoryMalloc(sizeof(SSmaFile));
if (fSet.pSmaF == NULL) {
@@ -959,6 +1019,16 @@ int32_t tsdbFSCommit2(STsdb *pTsdb, STsdbFS *pFSNew) {
*fSet.pSmaF = *pSetNew->pSmaF;
fSet.pSmaF->nRef = 1;
+ // stt
+ ASSERT(pSetNew->nSttF == 1);
+ fSet.aSttF[0] = (SSttFile *)taosMemoryMalloc(sizeof(SSttFile));
+ if (fSet.aSttF[0] == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ *fSet.aSttF[0] = *pSetNew->aSttF[0];
+ fSet.aSttF[0]->nRef = 1;
+
if (taosArrayInsert(pTsdb->fs.aDFileSet, iOld, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@@ -1002,12 +1072,14 @@ int32_t tsdbFSRef(STsdb *pTsdb, STsdbFS *pFS) {
nRef = atomic_fetch_add_32(&pSet->pDataF->nRef, 1);
ASSERT(nRef > 0);
- nRef = atomic_fetch_add_32(&pSet->pLastF->nRef, 1);
- ASSERT(nRef > 0);
-
nRef = atomic_fetch_add_32(&pSet->pSmaF->nRef, 1);
ASSERT(nRef > 0);
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ nRef = atomic_fetch_add_32(&pSet->aSttF[iStt]->nRef, 1);
+ ASSERT(nRef > 0);
+ }
+
if (taosArrayPush(pFS->aDFileSet, &fSet) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;
@@ -1053,15 +1125,6 @@ void tsdbFSUnref(STsdb *pTsdb, STsdbFS *pFS) {
taosMemoryFree(pSet->pDataF);
}
- // last
- nRef = atomic_sub_fetch_32(&pSet->pLastF->nRef, 1);
- ASSERT(nRef >= 0);
- if (nRef == 0) {
- tsdbLastFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pLastF, fname);
- taosRemoveFile(fname);
- taosMemoryFree(pSet->pLastF);
- }
-
// sma
nRef = atomic_sub_fetch_32(&pSet->pSmaF->nRef, 1);
ASSERT(nRef >= 0);
@@ -1070,6 +1133,18 @@ void tsdbFSUnref(STsdb *pTsdb, STsdbFS *pFS) {
taosRemoveFile(fname);
taosMemoryFree(pSet->pSmaF);
}
+
+ // stt
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ nRef = atomic_sub_fetch_32(&pSet->aSttF[iStt]->nRef, 1);
+ ASSERT(nRef >= 0);
+ if (nRef == 0) {
+ tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname);
+ taosRemoveFile(fname);
+ taosMemoryFree(pSet->aSttF[iStt]);
+ /* code */
+ }
+ }
}
taosArrayDestroy(pFS->aDFileSet);
diff --git a/source/dnode/vnode/src/tsdb/tsdbFile.c b/source/dnode/vnode/src/tsdb/tsdbFile.c
index 00d2ac848f6d599fef54d9957047521e27062c89..3c944584de7ae10b21cb75913d1f920198288fe4 100644
--- a/source/dnode/vnode/src/tsdb/tsdbFile.c
+++ b/source/dnode/vnode/src/tsdb/tsdbFile.c
@@ -53,22 +53,22 @@ static int32_t tGetDataFile(uint8_t *p, SDataFile *pDataFile) {
return n;
}
-int32_t tPutLastFile(uint8_t *p, SLastFile *pLastFile) {
+int32_t tPutSttFile(uint8_t *p, SSttFile *pSttFile) {
int32_t n = 0;
- n += tPutI64v(p ? p + n : p, pLastFile->commitID);
- n += tPutI64v(p ? p + n : p, pLastFile->size);
- n += tPutI64v(p ? p + n : p, pLastFile->offset);
+ n += tPutI64v(p ? p + n : p, pSttFile->commitID);
+ n += tPutI64v(p ? p + n : p, pSttFile->size);
+ n += tPutI64v(p ? p + n : p, pSttFile->offset);
return n;
}
-static int32_t tGetLastFile(uint8_t *p, SLastFile *pLastFile) {
+static int32_t tGetSttFile(uint8_t *p, SSttFile *pSttFile) {
int32_t n = 0;
- n += tGetI64v(p + n, &pLastFile->commitID);
- n += tGetI64v(p + n, &pLastFile->size);
- n += tGetI64v(p + n, &pLastFile->offset);
+ n += tGetI64v(p + n, &pSttFile->commitID);
+ n += tGetI64v(p + n, &pSttFile->size);
+ n += tGetI64v(p + n, &pSttFile->offset);
return n;
}
@@ -102,9 +102,9 @@ void tsdbDataFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SDataFile *pDataF,
TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pDataF->commitID, ".data");
}
-void tsdbLastFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SLastFile *pLastF, char fname[]) {
+void tsdbSttFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSttFile *pSttF, char fname[]) {
snprintf(fname, TSDB_FILENAME_LEN - 1, "%s%s%s%sv%df%dver%" PRId64 "%s", tfsGetDiskPath(pTsdb->pVnode->pTfs, did),
- TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pLastF->commitID, ".last");
+ TD_DIRSEP, pTsdb->path, TD_DIRSEP, TD_VID(pTsdb->pVnode), fid, pSttF->commitID, ".stt");
}
void tsdbSmaFileName(STsdb *pTsdb, SDiskID did, int32_t fid, SSmaFile *pSmaF, char fname[]) {
@@ -148,7 +148,7 @@ int32_t tsdbDFileRollback(STsdb *pTsdb, SDFileSet *pSet, EDataFileT ftype) {
}
// ftruncate
- if (taosFtruncateFile(pFD, size) < 0) {
+ if (taosFtruncateFile(pFD, tsdbLogicToFileSize(size, pTsdb->pVnode->config.tsdbPageSize)) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
@@ -194,9 +194,11 @@ int32_t tPutDFileSet(uint8_t *p, SDFileSet *pSet) {
n += tPutDataFile(p ? p + n : p, pSet->pDataF);
n += tPutSmaFile(p ? p + n : p, pSet->pSmaF);
- // last
- n += tPutU8(p ? p + n : p, 1); // for future compatibility
- n += tPutLastFile(p ? p + n : p, pSet->pLastF);
+ // stt
+ n += tPutU8(p ? p + n : p, pSet->nSttF);
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ n += tPutSttFile(p ? p + n : p, pSet->aSttF[iStt]);
+ }
return n;
}
@@ -208,15 +210,40 @@ int32_t tGetDFileSet(uint8_t *p, SDFileSet *pSet) {
n += tGetI32v(p + n, &pSet->diskId.id);
n += tGetI32v(p + n, &pSet->fid);
- // data
+ // head
+ pSet->pHeadF = (SHeadFile *)taosMemoryCalloc(1, sizeof(SHeadFile));
+ if (pSet->pHeadF == NULL) {
+ return -1;
+ }
+ pSet->pHeadF->nRef = 1;
n += tGetHeadFile(p + n, pSet->pHeadF);
+
+ // data
+ pSet->pDataF = (SDataFile *)taosMemoryCalloc(1, sizeof(SDataFile));
+ if (pSet->pDataF == NULL) {
+ return -1;
+ }
+ pSet->pDataF->nRef = 1;
n += tGetDataFile(p + n, pSet->pDataF);
+
+ // sma
+ pSet->pSmaF = (SSmaFile *)taosMemoryCalloc(1, sizeof(SSmaFile));
+ if (pSet->pSmaF == NULL) {
+ return -1;
+ }
+ pSet->pSmaF->nRef = 1;
n += tGetSmaFile(p + n, pSet->pSmaF);
- // last
- uint8_t nLast;
- n += tGetU8(p + n, &nLast);
- n += tGetLastFile(p + n, pSet->pLastF);
+ // stt
+ n += tGetU8(p + n, &pSet->nSttF);
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ pSet->aSttF[iStt] = (SSttFile *)taosMemoryCalloc(1, sizeof(SSttFile));
+ if (pSet->aSttF[iStt] == NULL) {
+ return -1;
+ }
+ pSet->aSttF[iStt]->nRef = 1;
+ n += tGetSttFile(p + n, pSet->aSttF[iStt]);
+ }
return n;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
new file mode 100644
index 0000000000000000000000000000000000000000..7e6a0d04ffc74b9719316df93851527fe2cbbcc6
--- /dev/null
+++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "tsdb.h"
+
+// SLDataIter =================================================
+struct SLDataIter {
+ SRBTreeNode node;
+ SSttBlk *pSttBlk;
+ SDataFReader *pReader;
+ int32_t iStt;
+ int8_t backward;
+ int32_t iSttBlk;
+ int32_t iRow;
+ SRowInfo rInfo;
+ uint64_t uid;
+ STimeWindow timeWindow;
+ SVersionRange verRange;
+ SSttBlockLoadInfo* pBlockLoadInfo;
+};
+
+SSttBlockLoadInfo* tCreateLastBlockLoadInfo(STSchema* pSchema, int16_t* colList, int32_t numOfCols) {
+ SSttBlockLoadInfo* pLoadInfo = taosMemoryCalloc(TSDB_DEFAULT_STT_FILE, sizeof(SSttBlockLoadInfo));
+ if (pLoadInfo == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ for(int32_t i = 0; i < TSDB_DEFAULT_STT_FILE; ++i) {
+ pLoadInfo[i].blockIndex[0] = -1;
+ pLoadInfo[i].blockIndex[1] = -1;
+ pLoadInfo[i].currentLoadBlockIndex = 1;
+
+ int32_t code = tBlockDataCreate(&pLoadInfo[i].blockData[0]);
+ if (code) {
+ terrno = code;
+ }
+
+ code = tBlockDataCreate(&pLoadInfo[i].blockData[1]);
+ if (code) {
+ terrno = code;
+ }
+
+ pLoadInfo[i].aSttBlk = taosArrayInit(4, sizeof(SSttBlk));
+ pLoadInfo[i].pSchema = pSchema;
+ pLoadInfo[i].colIds = colList;
+ pLoadInfo[i].numOfCols = numOfCols;
+ }
+
+ return pLoadInfo;
+}
+
+void resetLastBlockLoadInfo(SSttBlockLoadInfo* pLoadInfo) {
+ for(int32_t i = 0; i < TSDB_DEFAULT_STT_FILE; ++i) {
+ pLoadInfo[i].currentLoadBlockIndex = 1;
+ pLoadInfo[i].blockIndex[0] = -1;
+ pLoadInfo[i].blockIndex[1] = -1;
+
+ taosArrayClear(pLoadInfo[i].aSttBlk);
+
+ pLoadInfo[i].elapsedTime = 0;
+ pLoadInfo[i].loadBlocks = 0;
+ }
+}
+
+void getLastBlockLoadInfo(SSttBlockLoadInfo* pLoadInfo, int64_t* blocks, double* el) {
+ for(int32_t i = 0; i < TSDB_DEFAULT_STT_FILE; ++i) {
+ *el += pLoadInfo[i].elapsedTime;
+ *blocks += pLoadInfo[i].loadBlocks;
+ }
+}
+
+void* destroyLastBlockLoadInfo(SSttBlockLoadInfo* pLoadInfo) {
+ for(int32_t i = 0; i < TSDB_DEFAULT_STT_FILE; ++i) {
+ pLoadInfo[i].currentLoadBlockIndex = 1;
+ pLoadInfo[i].blockIndex[0] = -1;
+ pLoadInfo[i].blockIndex[1] = -1;
+
+ tBlockDataDestroy(&pLoadInfo[i].blockData[0], true);
+ tBlockDataDestroy(&pLoadInfo[i].blockData[1], true);
+
+ taosArrayDestroy(pLoadInfo[i].aSttBlk);
+ }
+
+ taosMemoryFree(pLoadInfo);
+ return NULL;
+}
+
+static SBlockData* loadLastBlock(SLDataIter *pIter, const char* idStr) {
+ int32_t code = 0;
+
+ SSttBlockLoadInfo* pInfo = pIter->pBlockLoadInfo;
+ if (pInfo->blockIndex[0] == pIter->iSttBlk) {
+ return &pInfo->blockData[0];
+ }
+
+ if (pInfo->blockIndex[1] == pIter->iSttBlk) {
+ return &pInfo->blockData[1];
+ }
+
+ pInfo->currentLoadBlockIndex ^= 1;
+ if (pIter->pSttBlk != NULL) { // current block not loaded yet
+ int64_t st = taosGetTimestampUs();
+
+ SBlockData* pBlock = &pInfo->blockData[pInfo->currentLoadBlockIndex];
+
+ TABLEID id = {0};
+ if (pIter->pSttBlk->suid != 0) {
+ id.suid = pIter->pSttBlk->suid;
+ } else {
+ id.uid = pIter->uid;
+ }
+
+ tBlockDataInit(pBlock, &id, pInfo->pSchema, pInfo->colIds, pInfo->numOfCols);
+ code = tsdbReadSttBlock(pIter->pReader, pIter->iStt, pIter->pSttBlk, pBlock);
+
+ double el = (taosGetTimestampUs() - st)/ 1000.0;
+ pInfo->elapsedTime += el;
+ pInfo->loadBlocks += 1;
+
+ tsdbDebug("read last block, index:%d, last file index:%d, elapsed time:%.2f ms, %s", pIter->iSttBlk, pIter->iStt, el, idStr);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _exit;
+ }
+
+ pInfo->blockIndex[pInfo->currentLoadBlockIndex] = pIter->iSttBlk;
+ pIter->iRow = (pIter->backward) ? pInfo->blockData[pInfo->currentLoadBlockIndex].nRow : -1;
+ }
+
+ return &pInfo->blockData[pInfo->currentLoadBlockIndex];
+
+ _exit:
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ }
+
+ return NULL;
+}
+
+// find the earliest block that contains the required records
+static FORCE_INLINE int32_t findEarliestIndex(int32_t index, uint64_t uid, const SSttBlk* pBlockList, int32_t num, int32_t backward) {
+ int32_t i = index;
+ int32_t step = backward? 1:-1;
+ while (i >= 0 && i < num && uid >= pBlockList[i].minUid && uid <= pBlockList[i].maxUid) {
+ i += step;
+ }
+ return i - step;
+}
+
+static int32_t binarySearchForStartBlock(SSttBlk*pBlockList, int32_t num, uint64_t uid, int32_t backward) {
+ int32_t midPos = -1;
+ if (num <= 0) {
+ return -1;
+ }
+
+ int32_t firstPos = 0;
+ int32_t lastPos = num - 1;
+
+ // find the first position which is bigger than the key
+ if ((uid > pBlockList[lastPos].maxUid) || (uid < pBlockList[firstPos].minUid)) {
+ return -1;
+ }
+
+ while (1) {
+ if (uid >= pBlockList[firstPos].minUid && uid <= pBlockList[firstPos].maxUid) {
+ return findEarliestIndex(firstPos, uid, pBlockList, num, backward);
+ }
+
+ if (uid > pBlockList[lastPos].maxUid || uid < pBlockList[firstPos].minUid) {
+ return -1;
+ }
+
+ int32_t numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1u) + firstPos;
+
+ if (uid < pBlockList[midPos].minUid) {
+ lastPos = midPos - 1;
+ } else if (uid > pBlockList[midPos].maxUid) {
+ firstPos = midPos + 1;
+ } else {
+ return findEarliestIndex(midPos, uid, pBlockList, num, backward);
+ }
+ }
+}
+
+static FORCE_INLINE int32_t findEarliestRow(int32_t index, uint64_t uid, const uint64_t* uidList, int32_t num, int32_t backward) {
+ int32_t i = index;
+ int32_t step = backward? 1:-1;
+ while (i >= 0 && i < num && uid == uidList[i]) {
+ i += step;
+ }
+ return i - step;
+}
+
+static int32_t binarySearchForStartRowIndex(uint64_t* uidList, int32_t num, uint64_t uid, int32_t backward) {
+ int32_t firstPos = 0;
+ int32_t lastPos = num - 1;
+
+ // find the first position which is bigger than the key
+ if ((uid > uidList[lastPos]) || (uid < uidList[firstPos])) {
+ return -1;
+ }
+
+ while (1) {
+ if (uid == uidList[firstPos]) {
+ return findEarliestRow(firstPos, uid, uidList, num, backward);
+ }
+
+ if (uid > uidList[lastPos] || uid < uidList[firstPos]) {
+ return -1;
+ }
+
+ int32_t numOfRows = lastPos - firstPos + 1;
+ int32_t midPos = (numOfRows >> 1u) + firstPos;
+
+ if (uid < uidList[midPos]) {
+ lastPos = midPos - 1;
+ } else if (uid > uidList[midPos]) {
+ firstPos = midPos + 1;
+ } else {
+ return findEarliestRow(midPos, uid, uidList, num, backward);
+ }
+ }
+}
+
+int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t iStt, int8_t backward, uint64_t suid,
+ uint64_t uid, STimeWindow *pTimeWindow, SVersionRange *pRange, SSttBlockLoadInfo* pBlockLoadInfo) {
+ int32_t code = 0;
+ *pIter = taosMemoryCalloc(1, sizeof(SLDataIter));
+ if (*pIter == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ (*pIter)->uid = uid;
+ (*pIter)->pReader = pReader;
+ (*pIter)->iStt = iStt;
+ (*pIter)->backward = backward;
+ (*pIter)->verRange = *pRange;
+ (*pIter)->timeWindow = *pTimeWindow;
+
+ (*pIter)->pBlockLoadInfo = pBlockLoadInfo;
+ if (taosArrayGetSize(pBlockLoadInfo->aSttBlk) == 0) {
+ code = tsdbReadSttBlk(pReader, iStt, pBlockLoadInfo->aSttBlk);
+ if (code) {
+ goto _exit;
+ } else {
+ size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
+ SArray* pTmp = taosArrayInit(size, sizeof(SSttBlk));
+ for(int32_t i = 0; i < size; ++i) {
+ SSttBlk* p = taosArrayGet(pBlockLoadInfo->aSttBlk, i);
+ if (p->suid == suid) {
+ taosArrayPush(pTmp, p);
+ }
+ }
+
+ taosArrayDestroy(pBlockLoadInfo->aSttBlk);
+ pBlockLoadInfo->aSttBlk = pTmp;
+ }
+ }
+
+ size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
+
+ // find the start block
+ (*pIter)->iSttBlk = binarySearchForStartBlock(pBlockLoadInfo->aSttBlk->pData, size, uid, backward);
+ if ((*pIter)->iSttBlk != -1) {
+ (*pIter)->pSttBlk = taosArrayGet(pBlockLoadInfo->aSttBlk, (*pIter)->iSttBlk);
+ (*pIter)->iRow = ((*pIter)->backward) ? (*pIter)->pSttBlk->nRow : -1;
+ }
+
+_exit:
+ return code;
+}
+
+void tLDataIterClose(SLDataIter *pIter) {
+ taosMemoryFree(pIter);
+}
+
+void tLDataIterNextBlock(SLDataIter *pIter) {
+ int32_t step = pIter->backward ? -1 : 1;
+ pIter->iSttBlk += step;
+
+ int32_t index = -1;
+ size_t size = pIter->pBlockLoadInfo->aSttBlk->size;
+ for (int32_t i = pIter->iSttBlk; i < size && i >= 0; i += step) {
+ SSttBlk *p = taosArrayGet(pIter->pBlockLoadInfo->aSttBlk, i);
+ if ((!pIter->backward) && p->minUid > pIter->uid) {
+ break;
+ }
+
+ if (pIter->backward && p->maxUid < pIter->uid) {
+ break;
+ }
+
+ // check uid firstly
+ if (p->minUid <= pIter->uid && p->maxUid >= pIter->uid) {
+ if ((!pIter->backward) && p->minKey > pIter->timeWindow.ekey) {
+ break;
+ }
+
+ if (pIter->backward && p->maxKey < pIter->timeWindow.skey) {
+ break;
+ }
+
+ // check time range secondly
+ if (p->minKey <= pIter->timeWindow.ekey && p->maxKey >= pIter->timeWindow.skey) {
+ if ((!pIter->backward) && p->minVer > pIter->verRange.maxVer) {
+ break;
+ }
+
+ if (pIter->backward && p->maxVer < pIter->verRange.minVer) {
+ break;
+ }
+
+ if (p->minVer <= pIter->verRange.maxVer && p->maxVer >= pIter->verRange.minVer) {
+ index = i;
+ break;
+ }
+ }
+ }
+ }
+
+ pIter->pSttBlk = NULL;
+ if (index != -1) {
+ pIter->iSttBlk = index;
+ pIter->pSttBlk = (SSttBlk *)taosArrayGet(pIter->pBlockLoadInfo->aSttBlk, pIter->iSttBlk);
+ }
+}
+
+static void findNextValidRow(SLDataIter *pIter, const char* idStr) {
+ int32_t step = pIter->backward ? -1 : 1;
+
+ bool hasVal = false;
+ int32_t i = pIter->iRow;
+
+ SBlockData *pBlockData = loadLastBlock(pIter, idStr);
+
+ // mostly we only need to find the start position for a given table
+ if ((((i == 0) && (!pIter->backward)) || (i == pBlockData->nRow - 1 && pIter->backward)) && pBlockData->aUid != NULL) {
+ i = binarySearchForStartRowIndex((uint64_t*)pBlockData->aUid, pBlockData->nRow, pIter->uid, pIter->backward);
+ if (i == -1) {
+ pIter->iRow = -1;
+ return;
+ }
+ }
+
+ for (; i < pBlockData->nRow && i >= 0; i += step) {
+ if (pBlockData->aUid != NULL) {
+ if (!pIter->backward) {
+ /*if (pBlockData->aUid[i] < pIter->uid) {
+ continue;
+ } else */if (pBlockData->aUid[i] > pIter->uid) {
+ break;
+ }
+ } else {
+ /*if (pBlockData->aUid[i] > pIter->uid) {
+ continue;
+ } else */if (pBlockData->aUid[i] < pIter->uid) {
+ break;
+ }
+ }
+ }
+
+ int64_t ts = pBlockData->aTSKEY[i];
+ if (!pIter->backward) { // asc
+ if (ts > pIter->timeWindow.ekey) { // no more data
+ break;
+ } else if (ts < pIter->timeWindow.skey) {
+ continue;
+ }
+ } else {
+ if (ts < pIter->timeWindow.skey) {
+ break;
+ } else if (ts > pIter->timeWindow.ekey) {
+ continue;
+ }
+ }
+
+ int64_t ver = pBlockData->aVersion[i];
+ if (ver < pIter->verRange.minVer) {
+ continue;
+ }
+
+ // todo opt handle desc case
+ if (ver > pIter->verRange.maxVer) {
+ continue;
+ }
+
+ hasVal = true;
+ break;
+ }
+
+ pIter->iRow = (hasVal) ? i : -1;
+}
+
+bool tLDataIterNextRow(SLDataIter *pIter, const char* idStr) {
+ int32_t code = 0;
+ int32_t step = pIter->backward ? -1 : 1;
+
+ // no qualified last file block in current file, no need to fetch row
+ if (pIter->pSttBlk == NULL) {
+ return false;
+ }
+
+ int32_t iBlockL = pIter->iSttBlk;
+ SBlockData *pBlockData = loadLastBlock(pIter, idStr);
+ pIter->iRow += step;
+
+ while (1) {
+ findNextValidRow(pIter, idStr);
+
+ if (pIter->iRow >= pBlockData->nRow || pIter->iRow < 0) {
+ tLDataIterNextBlock(pIter);
+ if (pIter->pSttBlk == NULL) { // no more data
+ goto _exit;
+ }
+ } else {
+ break;
+ }
+
+ if (iBlockL != pIter->iSttBlk) {
+ pBlockData = loadLastBlock(pIter, idStr);
+ pIter->iRow += step;
+ }
+ }
+
+ pIter->rInfo.suid = pBlockData->suid;
+ pIter->rInfo.uid = pBlockData->uid;
+ pIter->rInfo.row = tsdbRowFromBlockData(pBlockData, pIter->iRow);
+
+_exit:
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
+ }
+
+ return (code == TSDB_CODE_SUCCESS) && (pIter->pSttBlk != NULL);
+}
+
+SRowInfo *tLDataIterGet(SLDataIter *pIter) { return &pIter->rInfo; }
+
+// SMergeTree =================================================
+static FORCE_INLINE int32_t tLDataIterCmprFn(const void *p1, const void *p2) {
+ SLDataIter *pIter1 = (SLDataIter *)(((uint8_t *)p1) - sizeof(SRBTreeNode));
+ SLDataIter *pIter2 = (SLDataIter *)(((uint8_t *)p2) - sizeof(SRBTreeNode));
+
+ TSDBKEY key1 = TSDBROW_KEY(&pIter1->rInfo.row);
+ TSDBKEY key2 = TSDBROW_KEY(&pIter2->rInfo.row);
+
+ if (key1.ts < key2.ts) {
+ return -1;
+ } else if (key1.ts > key2.ts) {
+ return 1;
+ } else {
+ if (key1.version < key2.version) {
+ return -1;
+ } else if (key1.version > key2.version) {
+ return 1;
+ } else {
+ return 0;
+ }
+ }
+}
+
+int32_t tMergeTreeOpen(SMergeTree *pMTree, int8_t backward, SDataFReader *pFReader, uint64_t suid, uint64_t uid,
+ STimeWindow *pTimeWindow, SVersionRange *pVerRange, SSttBlockLoadInfo *pBlockLoadInfo,
+ bool destroyLoadInfo, const char *idStr) {
+ pMTree->backward = backward;
+ pMTree->pIter = NULL;
+ pMTree->pIterList = taosArrayInit(4, POINTER_BYTES);
+ if (pMTree->pIterList == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pMTree->idStr = idStr;
+
+ tRBTreeCreate(&pMTree->rbt, tLDataIterCmprFn);
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ pMTree->pLoadInfo = pBlockLoadInfo;
+ pMTree->destroyLoadInfo = destroyLoadInfo;
+
+ for (int32_t i = 0; i < pFReader->pSet->nSttF; ++i) { // open all last file
+ struct SLDataIter* pIter = NULL;
+ code = tLDataIterOpen(&pIter, pFReader, i, pMTree->backward, suid, uid, pTimeWindow, pVerRange, &pMTree->pLoadInfo[i]);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _end;
+ }
+
+ bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr);
+ if (hasVal) {
+ taosArrayPush(pMTree->pIterList, &pIter);
+ tMergeTreeAddIter(pMTree, pIter);
+ } else {
+ tLDataIterClose(pIter);
+ }
+ }
+
+ return code;
+
+_end:
+ tMergeTreeClose(pMTree);
+ return code;
+}
+
+void tMergeTreeAddIter(SMergeTree *pMTree, SLDataIter *pIter) { tRBTreePut(&pMTree->rbt, (SRBTreeNode *)pIter); }
+
+bool tMergeTreeNext(SMergeTree *pMTree) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (pMTree->pIter) {
+ SLDataIter *pIter = pMTree->pIter;
+
+ bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr);
+ if (!hasVal) {
+ pMTree->pIter = NULL;
+ }
+
+ // compare with min in RB Tree
+ pIter = (SLDataIter *)tRBTreeMin(&pMTree->rbt);
+ if (pMTree->pIter && pIter) {
+ int32_t c = pMTree->rbt.cmprFn(RBTREE_NODE_PAYLOAD(&pMTree->pIter->node), RBTREE_NODE_PAYLOAD(&pIter->node));
+ if (c > 0) {
+ tRBTreePut(&pMTree->rbt, (SRBTreeNode *)pMTree->pIter);
+ pMTree->pIter = NULL;
+ } else {
+ ASSERT(c);
+ }
+ }
+ }
+
+ if (pMTree->pIter == NULL) {
+ pMTree->pIter = (SLDataIter *)tRBTreeMin(&pMTree->rbt);
+ if (pMTree->pIter) {
+ tRBTreeDrop(&pMTree->rbt, (SRBTreeNode *)pMTree->pIter);
+ }
+ }
+
+ return pMTree->pIter != NULL;
+}
+
+TSDBROW tMergeTreeGetRow(SMergeTree *pMTree) { return pMTree->pIter->rInfo.row; }
+
+void tMergeTreeClose(SMergeTree *pMTree) {
+ size_t size = taosArrayGetSize(pMTree->pIterList);
+ for (int32_t i = 0; i < size; ++i) {
+ SLDataIter *pIter = taosArrayGetP(pMTree->pIterList, i);
+ tLDataIterClose(pIter);
+ }
+
+ pMTree->pIterList = taosArrayDestroy(pMTree->pIterList);
+ pMTree->pIter = NULL;
+
+ if (pMTree->destroyLoadInfo) {
+ pMTree->pLoadInfo = destroyLastBlockLoadInfo(pMTree->pLoadInfo);
+ pMTree->destroyLoadInfo = false;
+ }
+}
diff --git a/source/dnode/vnode/src/tsdb/tsdbOpen.c b/source/dnode/vnode/src/tsdb/tsdbOpen.c
index ec760e3c57c277ed3183bb73729f7a655daa0304..fcbcff924879995639f1a07a2f267506ca72a4af 100644
--- a/source/dnode/vnode/src/tsdb/tsdbOpen.c
+++ b/source/dnode/vnode/src/tsdb/tsdbOpen.c
@@ -85,6 +85,8 @@ _err:
int tsdbClose(STsdb **pTsdb) {
if (*pTsdb) {
taosThreadRwlockDestroy(&(*pTsdb)->rwLock);
+ tsdbMemTableDestroy((*pTsdb)->mem);
+ (*pTsdb)->mem = NULL;
tsdbFSClose(*pTsdb);
tsdbCloseCache(*pTsdb);
taosMemoryFreeClear(*pTsdb);
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index cd40a9acc21d61a21defd45184d1f955b398d3b5..c3cb5f9eb8ab8317d8f1a9e581819b75cff2811c 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -17,8 +17,6 @@
#include "tsdb.h"
#define ASCENDING_TRAVERSE(o) (o == TSDB_ORDER_ASC)
-#define ALL_ROWS_CHECKED_INDEX (INT16_MIN)
-#define DEFAULT_ROW_INDEX_VAL (-1)
typedef enum {
EXTERNAL_ROWS_PREV = 0x1,
@@ -34,21 +32,20 @@ typedef struct {
typedef struct {
int32_t numOfBlocks;
- int32_t numOfLastBlocks;
+ int32_t numOfLastFiles;
} SBlockNumber;
typedef struct STableBlockScanInfo {
uint64_t uid;
TSKEY lastKey;
- SMapData mapData; // block info (compressed)
- SArray* pBlockList; // block data index list
- SIterInfo iter; // mem buffer skip list iterator
- SIterInfo iiter; // imem buffer skip list iterator
- SArray* delSkyline; // delete info for this table
- int32_t fileDelIndex; // file block delete index
- int32_t lastBlockDelIndex;// delete index for last block
- bool iterInit; // whether to initialize the in-memory skip list iterator or not
- int16_t indexInBlockL;// row position in last block
+ SMapData mapData; // block info (compressed)
+ SArray* pBlockList; // block data index list
+ SIterInfo iter; // mem buffer skip list iterator
+ SIterInfo iiter; // imem buffer skip list iterator
+ SArray* delSkyline; // delete info for this table
+ int32_t fileDelIndex; // file block delete index
+ int32_t lastBlockDelIndex; // delete index for last block
+ bool iterInit; // whether to initialize the in-memory skip list iterator or not
} STableBlockScanInfo;
typedef struct SBlockOrderWrapper {
@@ -73,6 +70,8 @@ typedef struct SIOCostSummary {
double smaLoadTime;
int64_t lastBlockLoad;
double lastBlockLoadTime;
+ int64_t composedBlocks;
+ double buildComposedBlockTime;
} SIOCostSummary;
typedef struct SBlockLoadSuppInfo {
@@ -80,31 +79,25 @@ typedef struct SBlockLoadSuppInfo {
SColumnDataAgg tsColAgg;
SColumnDataAgg** plist;
int16_t* colIds; // column ids for loading file block data
+ int32_t numOfCols;
char** buildBuf; // build string tmp buffer, todo remove it later after all string format being updated.
} SBlockLoadSuppInfo;
-typedef struct SVersionRange {
- uint64_t minVer;
- uint64_t maxVer;
-} SVersionRange;
-
typedef struct SLastBlockReader {
- SArray* pBlockL;
- int32_t currentBlockIndex;
- SBlockData lastBlockData;
- STimeWindow window;
- SVersionRange verRange;
- int32_t order;
- uint64_t uid;
- int16_t* rowIndex; // row index ptr, usually from the STableBlockScanInfo->indexInBlockL
+ STimeWindow window;
+ SVersionRange verRange;
+ int32_t order;
+ uint64_t uid;
+ SMergeTree mergeTree;
+ SSttBlockLoadInfo* pInfo;
} SLastBlockReader;
typedef struct SFilesetIter {
- int32_t numOfFiles; // number of total files
- int32_t index; // current accessed index in the list
- SArray* pFileList; // data file list
+ int32_t numOfFiles; // number of total files
+ int32_t index; // current accessed index in the list
+ SArray* pFileList; // data file list
int32_t order;
- SLastBlockReader* pLastBlockReader; // last file block reader
+ SLastBlockReader* pLastBlockReader; // last file block reader
} SFilesetIter;
typedef struct SFileDataBlockInfo {
@@ -116,9 +109,9 @@ typedef struct SFileDataBlockInfo {
typedef struct SDataBlockIter {
int32_t numOfBlocks;
int32_t index;
- SArray* blockList; // SArray
+ SArray* blockList; // SArray
int32_t order;
- SBlock block; // current SBlock data
+ SDataBlk block; // current SDataBlk data
SHashObj* pTableMap;
} SDataBlockIter;
@@ -129,11 +122,17 @@ typedef struct SFileBlockDumpInfo {
bool allDumped;
} SFileBlockDumpInfo;
+typedef struct SUidOrderCheckInfo {
+ uint64_t* tableUidList; // access table uid list in uid ascending order list
+ int32_t currentIndex; // index in table uid list
+} SUidOrderCheckInfo;
+
typedef struct SReaderStatus {
- bool loadFromFile; // check file stage
+ bool loadFromFile; // check file stage
bool composedDataBlock; // the returned data block is a composed block or not
- SHashObj* pTableMap; // SHash
- STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks.
+ SHashObj* pTableMap; // SHash
+ STableBlockScanInfo* pTableIter; // table iterator used in building in-memory buffer data blocks.
+ SUidOrderCheckInfo uidCheckInfo; // check all table in uid order
SFileBlockDumpInfo fBlockDumpInfo;
SDFileSet* pCurrentFileset; // current opened file set
SBlockData fileBlockData;
@@ -169,35 +168,43 @@ static int buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, i
static TSDBROW* getValidMemRow(SIterInfo* pIter, const SArray* pDelList, STsdbReader* pReader);
static int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pScanInfo, STsdbReader* pReader,
SRowMerger* pMerger);
-static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger);
+static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts,
+ SRowMerger* pMerger);
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader);
static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid);
static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
- int32_t rowIndex);
+ int32_t rowIndex);
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order);
-static void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
- STsdbReader* pReader, bool* freeTSRow);
-static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
- STSRow** pTSRow);
-static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, STsdbReader* pReader);
+static int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList,
+ STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow);
+static int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo,
+ STsdbReader* pReader, STSRow** pTSRow);
+static int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
+ STsdbReader* pReader);
static int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
STbData* piMemTbData);
static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* retentions, const char* idstr,
int8_t* pLevel);
static SVersionRange getQueryVerRange(SVnode* pVnode, SQueryTableDataCond* pCond, int8_t level);
-static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader);
-static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
-static int32_t doBuildDataBlock(STsdbReader* pReader);
+static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader);
+static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
+static int32_t doBuildDataBlock(STsdbReader* pReader);
+static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
+static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo);
+static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
+
+static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
static int32_t setColumnIdSlotList(STsdbReader* pReader, SSDataBlock* pBlock) {
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
size_t numOfCols = blockDataGetNumOfCols(pBlock);
+ pSupInfo->numOfCols = numOfCols;
pSupInfo->colIds = taosMemoryMalloc(numOfCols * sizeof(int16_t));
pSupInfo->buildBuf = taosMemoryCalloc(numOfCols, POINTER_BYTES);
if (pSupInfo->buildBuf == NULL || pSupInfo->colIds == NULL) {
@@ -228,15 +235,13 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
}
for (int32_t j = 0; j < numOfTables; ++j) {
- STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid, .indexInBlockL = DEFAULT_ROW_INDEX_VAL};
+ STableBlockScanInfo info = {.lastKey = 0, .uid = idList[j].uid};
if (ASCENDING_TRAVERSE(pTsdbReader->order)) {
- if (info.lastKey == INT64_MIN || info.lastKey < pTsdbReader->window.skey) {
- info.lastKey = pTsdbReader->window.skey;
- }
-
- ASSERT(info.lastKey >= pTsdbReader->window.skey && info.lastKey <= pTsdbReader->window.ekey);
+ int64_t skey = pTsdbReader->window.skey;
+ info.lastKey = (skey > INT64_MIN) ? (skey - 1) : skey;
} else {
- info.lastKey = pTsdbReader->window.skey;
+ int64_t ekey = pTsdbReader->window.ekey;
+ info.lastKey = (ekey < INT64_MAX) ? (ekey + 1) : ekey;
}
taosHashPut(pTableMap, &info.uid, sizeof(uint64_t), &info, sizeof(info));
@@ -250,7 +255,7 @@ static SHashObj* createDataBlockScanInfo(STsdbReader* pTsdbReader, const STableK
return pTableMap;
}
-static void resetDataBlockScanInfo(SHashObj* pTableMap) {
+static void resetDataBlockScanInfo(SHashObj* pTableMap, int64_t ts) {
STableBlockScanInfo* p = NULL;
while ((p = taosHashIterate(pTableMap, p)) != NULL) {
@@ -261,6 +266,7 @@ static void resetDataBlockScanInfo(SHashObj* pTableMap) {
}
p->delSkyline = taosArrayDestroy(p->delSkyline);
+ p->lastKey = ts;
}
}
@@ -322,7 +328,7 @@ static void limitOutputBufferSize(const SQueryTableDataCond* pCond, int32_t* cap
}
// init file iterator
-static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader/*int32_t order, const char* idstr*/) {
+static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdbReader* pReader) {
size_t numOfFileset = taosArrayGetSize(aDFileSet);
pIter->index = ASCENDING_TRAVERSE(pReader->order) ? -1 : numOfFileset;
@@ -337,17 +343,22 @@ static int32_t initFilesetIterator(SFilesetIter* pIter, SArray* aDFileSet, STsdb
tsdbError("failed to prepare the last block iterator, code:%d %s", tstrerror(code), pReader->idStr);
return code;
}
+ }
- SLastBlockReader* pLReader = pIter->pLastBlockReader;
- pLReader->pBlockL = taosArrayInit(4, sizeof(SBlockL));
- pLReader->order = pReader->order;
- pLReader->window = pReader->window;
- pLReader->verRange = pReader->verRange;
- pLReader->currentBlockIndex = -1;
+ SLastBlockReader* pLReader = pIter->pLastBlockReader;
+ pLReader->order = pReader->order;
+ pLReader->window = pReader->window;
+ pLReader->verRange = pReader->verRange;
- int32_t code = tBlockDataCreate(&pLReader->lastBlockData);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
+ pLReader->uid = 0;
+ tMergeTreeClose(&pLReader->mergeTree);
+
+ if (pLReader->pInfo == NULL) {
+ // here we ignore the first column, which is always be the primary timestamp column
+ pLReader->pInfo = tCreateLastBlockLoadInfo(pReader->pSchema, &pReader->suppInfo.colIds[1], pReader->suppInfo.numOfCols - 1);
+ if (pLReader->pInfo == NULL) {
+ tsdbDebug("init fileset iterator failed, code:%s %s", tstrerror(terrno), pReader->idStr);
+ return terrno;
}
}
@@ -364,6 +375,13 @@ static bool filesetIteratorNext(SFilesetIter* pIter, STsdbReader* pReader) {
return false;
}
+ SIOCostSummary* pSum = &pReader->cost;
+ getLastBlockLoadInfo(pIter->pLastBlockReader->pInfo, &pSum->lastBlockLoad, &pReader->cost.lastBlockLoadTime);
+
+ pIter->pLastBlockReader->uid = 0;
+ tMergeTreeClose(&pIter->pLastBlockReader->mergeTree);
+ resetLastBlockLoadInfo(pIter->pLastBlockReader->pInfo);
+
// check file the time range of coverage
STimeWindow win = {0};
@@ -408,7 +426,7 @@ _err:
return false;
}
-static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, SHashObj* pTableMap) {
+static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order) {
pIter->order = order;
pIter->index = -1;
pIter->numOfBlocks = 0;
@@ -417,7 +435,6 @@ static void resetDataBlockIterator(SDataBlockIter* pIter, int32_t order, SHashOb
} else {
taosArrayClear(pIter->blockList);
}
- pIter->pTableMap = pTableMap;
}
static void cleanupDataBlockIterator(SDataBlockIter* pIter) { taosArrayDestroy(pIter->blockList); }
@@ -469,7 +486,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsd
pReader->pTsdb = getTsdbByRetentions(pVnode, pCond->twindows.skey, pVnode->config.tsdbCfg.retentions, idstr, &level);
pReader->suid = pCond->suid;
pReader->order = pCond->order;
- pReader->capacity = 4096;
+ pReader->capacity = capacity;
pReader->idStr = (idstr != NULL) ? strdup(idstr) : NULL;
pReader->verRange = getQueryVerRange(pVnode, pCond, level);
pReader->type = pCond->type;
@@ -573,14 +590,12 @@ static void cleanupTableScanInfo(SHashObj* pTableMap) {
}
// reset the index in last block when handing a new file
- px->indexInBlockL = -1;
tMapDataClear(&px->mapData);
taosArrayClear(px->pBlockList);
}
}
-static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray* pLastBlockIndex,
- SBlockNumber * pBlockNum, SArray* pQualifiedLastBlock) {
+static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SBlockNumber* pBlockNum) {
int32_t numOfQTable = 0;
size_t sizeInDisk = 0;
size_t numOfTables = taosArrayGetSize(pIndexList);
@@ -594,12 +609,12 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray*
STableBlockScanInfo* pScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockIdx->uid, sizeof(int64_t));
tMapDataReset(&pScanInfo->mapData);
- tsdbReadBlock(pReader->pFileReader, pBlockIdx, &pScanInfo->mapData);
+ tsdbReadDataBlk(pReader->pFileReader, pBlockIdx, &pScanInfo->mapData);
sizeInDisk += pScanInfo->mapData.nData;
for (int32_t j = 0; j < pScanInfo->mapData.nItem; ++j) {
- SBlock block = {0};
- tMapDataGetItemByIdx(&pScanInfo->mapData, j, &block, tGetBlock);
+ SDataBlk block = {0};
+ tMapDataGetItemByIdx(&pScanInfo->mapData, j, &block, tGetDataBlk);
// 1. time range check
if (block.minKey.ts > pReader->window.ekey || block.maxKey.ts < pReader->window.skey) {
@@ -625,35 +640,15 @@ static int32_t doLoadFileBlock(STsdbReader* pReader, SArray* pIndexList, SArray*
}
}
- size_t numOfLast = taosArrayGetSize(pLastBlockIndex);
- for(int32_t i = 0; i < numOfLast; ++i) {
- SBlockL* pLastBlock = taosArrayGet(pLastBlockIndex, i);
- if (pLastBlock->suid != pReader->suid) {
- continue;
- }
-
- {
- // 1. time range check
- if (pLastBlock->minKey > pReader->window.ekey || pLastBlock->maxKey < pReader->window.skey) {
- continue;
- }
-
- // 2. version range check
- if (pLastBlock->minVer > pReader->verRange.maxVer || pLastBlock->maxVer < pReader->verRange.minVer) {
- continue;
- }
-
- pBlockNum->numOfLastBlocks += 1;
- taosArrayPush(pQualifiedLastBlock, pLastBlock);
- }
- }
-
- int32_t total = pBlockNum->numOfLastBlocks + pBlockNum->numOfBlocks;
+ pBlockNum->numOfLastFiles = pReader->pFileReader->pSet->nSttF;
+ int32_t total = pBlockNum->numOfLastFiles + pBlockNum->numOfBlocks;
double el = (taosGetTimestampUs() - st) / 1000.0;
- tsdbDebug("load block of %d tables completed, blocks:%d in %d tables, lastBlock:%d, size:%.2f Kb, elapsed time:%.2f ms %s",
- numOfTables, pBlockNum->numOfBlocks, numOfQTable, pBlockNum->numOfLastBlocks, sizeInDisk
- / 1000.0, el, pReader->idStr);
+ tsdbDebug(
+ "load block of %d tables completed, blocks:%d in %d tables, last-files:%d, block-info-size:%.2f Kb, elapsed "
+ "time:%.2f ms %s",
+ numOfTables, pBlockNum->numOfBlocks, numOfQTable, pBlockNum->numOfLastFiles, sizeInDisk / 1000.0, el,
+ pReader->idStr);
pReader->cost.numOfBlocks += total;
pReader->cost.headFileLoadTime += el;
@@ -670,7 +665,7 @@ static void setBlockAllDumped(SFileBlockDumpInfo* pDumpInfo, int64_t maxKey, int
static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_t colIndex, SColVal* pColVal,
SBlockLoadSuppInfo* pSup) {
if (IS_VAR_DATA_TYPE(pColVal->type)) {
- if (pColVal->isNull || pColVal->isNone) {
+ if (!COL_VAL_IS_VALUE(pColVal)) {
colDataAppendNULL(pColInfoData, rowIndex);
} else {
varDataSetLen(pSup->buildBuf[colIndex], pColVal->value.nData);
@@ -679,7 +674,7 @@ static void doCopyColVal(SColumnInfoData* pColInfoData, int32_t rowIndex, int32_
colDataAppend(pColInfoData, rowIndex, pSup->buildBuf[colIndex], false);
}
} else {
- colDataAppend(pColInfoData, rowIndex, (const char*)&pColVal->value, pColVal->isNull || pColVal->isNone);
+ colDataAppend(pColInfoData, rowIndex, (const char*)&pColVal->value, !COL_VAL_IS_VALUE(pColVal));
}
}
@@ -693,15 +688,148 @@ static SFileDataBlockInfo* getCurrentBlockInfo(SDataBlockIter* pBlockIter) {
return pBlockInfo;
}
-static SBlock* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; }
+static SDataBlk* getCurrentBlock(SDataBlockIter* pBlockIter) { return &pBlockIter->block; }
+
+int32_t binarySearchForTs(char* pValue, int num, TSKEY key, int order) {
+ int32_t midPos = -1;
+ int32_t numOfRows;
+
+ ASSERT(order == TSDB_ORDER_ASC || order == TSDB_ORDER_DESC);
+
+ TSKEY* keyList = (TSKEY*)pValue;
+ int32_t firstPos = 0;
+ int32_t lastPos = num - 1;
+
+ if (order == TSDB_ORDER_DESC) {
+ // find the first position which is smaller than the key
+ while (1) {
+ if (key >= keyList[firstPos]) return firstPos;
+ if (key == keyList[lastPos]) return lastPos;
+
+ if (key < keyList[lastPos]) {
+ lastPos += 1;
+ if (lastPos >= num) {
+ return -1;
+ } else {
+ return lastPos;
+ }
+ }
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1) + firstPos;
+
+ if (key < keyList[midPos]) {
+ firstPos = midPos + 1;
+ } else if (key > keyList[midPos]) {
+ lastPos = midPos - 1;
+ } else {
+ break;
+ }
+ }
+
+ } else {
+ // find the first position which is bigger than the key
+ while (1) {
+ if (key <= keyList[firstPos]) return firstPos;
+ if (key == keyList[lastPos]) return lastPos;
+
+ if (key > keyList[lastPos]) {
+ lastPos = lastPos + 1;
+ if (lastPos >= num)
+ return -1;
+ else
+ return lastPos;
+ }
+
+ numOfRows = lastPos - firstPos + 1;
+ midPos = (numOfRows >> 1u) + firstPos;
+
+ if (key < keyList[midPos]) {
+ lastPos = midPos - 1;
+ } else if (key > keyList[midPos]) {
+ firstPos = midPos + 1;
+ } else {
+ break;
+ }
+ }
+ }
+
+ return midPos;
+}
+
+static int doBinarySearchKey(TSKEY* keyList, int num, int pos, TSKEY key, int order) {
+ // start end position
+ int s, e;
+ s = pos;
+
+ // check
+ assert(pos >= 0 && pos < num);
+ assert(num > 0);
+
+ if (order == TSDB_ORDER_ASC) {
+ // find the first position which is smaller than the key
+ e = num - 1;
+ if (key < keyList[pos]) return -1;
+ while (1) {
+ // check can return
+ if (key >= keyList[e]) return e;
+ if (key <= keyList[s]) return s;
+ if (e - s <= 1) return s;
+
+ // change start or end position
+ int mid = s + (e - s + 1) / 2;
+ if (keyList[mid] > key)
+ e = mid;
+ else if (keyList[mid] < key)
+ s = mid;
+ else
+ return mid;
+ }
+ } else { // DESC
+ // find the first position which is bigger than the key
+ e = 0;
+ if (key > keyList[pos]) return -1;
+ while (1) {
+ // check can return
+ if (key <= keyList[e]) return e;
+ if (key >= keyList[s]) return s;
+ if (s - e <= 1) return s;
+
+ // change start or end position
+ int mid = s - (s - e + 1) / 2;
+ if (keyList[mid] < key)
+ e = mid;
+ else if (keyList[mid] > key)
+ s = mid;
+ else
+ return mid;
+ }
+ }
+}
+
+int32_t getEndPosInDataBlock(STsdbReader* pReader, SBlockData* pBlockData, SDataBlk* pBlock, int32_t pos) {
+ // NOTE: reverse the order to find the end position in data block
+ int32_t endPos = -1;
+ bool asc = ASCENDING_TRAVERSE(pReader->order);
+
+ if (asc && pReader->window.ekey >= pBlock->maxKey.ts) {
+ endPos = pBlock->nRow - 1;
+ } else if (!asc && pReader->window.skey <= pBlock->minKey.ts) {
+ endPos = 0;
+ } else {
+ endPos = doBinarySearchKey(pBlockData->aTSKEY, pBlock->nRow, pos, pReader->window.ekey, pReader->order);
+ }
+
+ return endPos;
+}
static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo) {
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
SBlockData* pBlockData = &pStatus->fileBlockData;
- SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(pBlockIter);
- SBlock* pBlock = getCurrentBlock(pBlockIter);
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
+ SDataBlk* pBlock = getCurrentBlock(pBlockIter);
SSDataBlock* pResBlock = pReader->pResBlock;
int32_t numOfOutputCols = blockDataGetNumOfCols(pResBlock);
@@ -713,23 +841,46 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
bool asc = ASCENDING_TRAVERSE(pReader->order);
int32_t step = asc ? 1 : -1;
- int32_t rowIndex = 0;
- int32_t remain = asc ? (pBlockData->nRow - pDumpInfo->rowIndex) : (pDumpInfo->rowIndex + 1);
- int32_t endIndex = 0;
- if (remain <= pReader->capacity) {
- endIndex = pBlockData->nRow;
- } else {
- endIndex = pDumpInfo->rowIndex + step * pReader->capacity;
+ if ((pDumpInfo->rowIndex == 0 && asc) || (pDumpInfo->rowIndex == pBlock->nRow - 1 && (!asc))) {
+ if (asc && pReader->window.skey <= pBlock->minKey.ts) {
+ //pDumpInfo->rowIndex = 0;
+ } else
+ if (!asc && pReader->window.ekey >= pBlock->maxKey.ts) {
+ //pDumpInfo->rowIndex = pBlock->nRow - 1;
+ } else {
+ int32_t pos = asc ? pBlock->nRow - 1 : 0;
+ int32_t order = (pReader->order == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
+ pDumpInfo->rowIndex = doBinarySearchKey(pBlockData->aTSKEY, pBlock->nRow, pos, pReader->window.skey, order);
+ }
+ }
+
+ // time window check
+ int32_t endIndex = getEndPosInDataBlock(pReader, pBlockData, pBlock, pDumpInfo->rowIndex);
+ if (endIndex == -1) {
+ setBlockAllDumped(pDumpInfo, pReader->window.ekey, pReader->order);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ endIndex += step;
+ int32_t remain = asc ? (endIndex - pDumpInfo->rowIndex) : (pDumpInfo->rowIndex - endIndex);
+ if (remain > pReader->capacity) { // output buffer check
remain = pReader->capacity;
}
+ int32_t rowIndex = 0;
+
int32_t i = 0;
SColumnInfoData* pColData = taosArrayGet(pResBlock->pDataBlock, i);
if (pColData->info.colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
- for (int32_t j = pDumpInfo->rowIndex; j < endIndex && j >= 0; j += step) {
- colDataAppend(pColData, rowIndex++, (const char*)&pBlockData->aTSKEY[j], false);
+ if (asc) {
+ memcpy(pColData->pData, &pBlockData->aTSKEY[pDumpInfo->rowIndex], remain * sizeof(int64_t));
+ } else {
+ for (int32_t j = pDumpInfo->rowIndex; rowIndex < remain; j += step) {
+ colDataAppendInt64(pColData, rowIndex++, &pBlockData->aTSKEY[j]);
+ }
}
+
i += 1;
}
@@ -743,13 +894,32 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
if (pData->cid < pColData->info.colId) {
colIndex += 1;
} else if (pData->cid == pColData->info.colId) {
- for (int32_t j = pDumpInfo->rowIndex; j < endIndex && j >= 0; j += step) {
- tColDataGetValue(pData, j, &cv);
- doCopyColVal(pColData, rowIndex++, i, &cv, pSupInfo);
+ if (pData->flag == HAS_NONE || pData->flag == HAS_NULL || pData->flag == (HAS_NULL | HAS_NONE)) {
+ colDataAppendNNULL(pColData, 0, remain);
+ } else {
+ if (IS_NUMERIC_TYPE(pColData->info.type) && asc) {
+ uint8_t* p = pData->pData + tDataTypes[pData->type].bytes * pDumpInfo->rowIndex;
+ memcpy(pColData->pData, p, remain * tDataTypes[pData->type].bytes);
+
+ // null value exists, check one-by-one
+ if (pData->flag != HAS_VALUE) {
+ for (int32_t j = pDumpInfo->rowIndex; rowIndex < remain; j += step, rowIndex++) {
+ uint8_t v = tColDataGetBitValue(pData, j);
+ if (v == 0 || v == 1) {
+ colDataSetNull_f(pColData->nullbitmap, rowIndex);
+ }
+ }
+ }
+ } else {
+ for (int32_t j = pDumpInfo->rowIndex; rowIndex < remain; j += step) {
+ tColDataGetValue(pData, j, &cv);
+ doCopyColVal(pColData, rowIndex++, i, &cv, pSupInfo);
+ }
+ }
}
+
colIndex += 1;
i += 1;
- ASSERT(rowIndex == remain);
} else { // the specified column does not exist in file block, fill with null data
colDataAppendNNULL(pColData, 0, remain);
i += 1;
@@ -765,7 +935,13 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
pResBlock->info.rows = remain;
pDumpInfo->rowIndex += step * remain;
- setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
+ if (pDumpInfo->rowIndex >= 0 && pDumpInfo->rowIndex < pBlock->nRow) {
+// int64_t ts = pBlockData->aTSKEY[pDumpInfo->rowIndex];
+// setBlockAllDumped(pDumpInfo, ts, pReader->order);
+ } else {
+ int64_t k = asc ? pBlock->maxKey.ts : pBlock->minKey.ts;
+ setBlockAllDumped(pDumpInfo, k, pReader->order);
+ }
double elapsedTime = (taosGetTimestampUs() - st) / 1000.0;
pReader->cost.blockLoadTime += elapsedTime;
@@ -773,21 +949,28 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader, STableBlockScanIn
int32_t unDumpedRows = asc ? pBlock->nRow - pDumpInfo->rowIndex : pDumpInfo->rowIndex + 1;
tsdbDebug("%p copy file block to sdatablock, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
", rows:%d, remain:%d, minVer:%" PRId64 ", maxVer:%" PRId64 ", elapsed time:%.2f ms, %s",
- pReader, pBlockIter->index, pFBlock->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, remain, unDumpedRows,
- pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
+ pReader, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlock->minKey.ts, pBlock->maxKey.ts, remain,
+ unDumpedRows, pBlock->minVer, pBlock->maxVer, elapsedTime, pReader->idStr);
return TSDB_CODE_SUCCESS;
}
-static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData) {
+static int32_t doLoadFileBlockData(STsdbReader* pReader, SDataBlockIter* pBlockIter, SBlockData* pBlockData, uint64_t uid) {
int64_t st = taosGetTimestampUs();
+ tBlockDataReset(pBlockData);
+ TABLEID tid = {.suid = pReader->suid, .uid = uid};
+ int32_t code = tBlockDataInit(pBlockData, &tid, pReader->pSchema, &pReader->suppInfo.colIds[1], pReader->suppInfo.numOfCols-1);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
ASSERT(pBlockInfo != NULL);
- SBlock* pBlock = getCurrentBlock(pBlockIter);
- int32_t code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
+ SDataBlk* pBlock = getCurrentBlock(pBlockIter);
+ code = tsdbReadDataBlock(pReader->pFileReader, pBlock, pBlockData);
if (code != TSDB_CODE_SUCCESS) {
tsdbError("%p error occurs in loading file block, global index:%d, table index:%d, brange:%" PRId64 "-%" PRId64
", rows:%d, code:%s %s",
@@ -863,8 +1046,8 @@ static int32_t doSetCurrentBlock(SDataBlockIter* pBlockIter) {
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(pBlockIter);
if (pBlockInfo != NULL) {
STableBlockScanInfo* pScanInfo = taosHashGet(pBlockIter->pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
- int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pBlockInfo->tbBlockIdx);
- tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetBlock);
+ int32_t* mapDataIndex = taosArrayGet(pScanInfo->pBlockList, pBlockInfo->tbBlockIdx);
+ tMapDataGetItemByIdx(&pScanInfo->mapData, *mapDataIndex, &pBlockIter->block, tGetDataBlk);
}
#if 0
@@ -879,6 +1062,7 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
pBlockIter->numOfBlocks = numOfBlocks;
taosArrayClear(pBlockIter->blockList);
+ pBlockIter->pTableMap = pReader->status.pTableMap;
// access data blocks according to the offset of each block in asc/desc order.
int32_t numOfTables = (int32_t)taosHashGetSize(pReader->status.pTableMap);
@@ -914,12 +1098,12 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
}
sup.pDataBlockInfo[sup.numOfTables] = (SBlockOrderWrapper*)buf;
- SBlock block = {0};
+ SDataBlk block = {0};
for (int32_t k = 0; k < num; ++k) {
SBlockOrderWrapper wrapper = {0};
int32_t* mapDataIndex = taosArrayGet(pTableScanInfo->pBlockList, k);
- tMapDataGetItemByIdx(&pTableScanInfo->mapData, *mapDataIndex, &block, tGetBlock);
+ tMapDataGetItemByIdx(&pTableScanInfo->mapData, *mapDataIndex, &block, tGetDataBlk);
wrapper.uid = pTableScanInfo->uid;
wrapper.offset = block.aSubBlock[0].offset;
@@ -980,8 +1164,8 @@ static int32_t initBlockIterator(STsdbReader* pReader, SDataBlockIter* pBlockIte
}
int64_t et = taosGetTimestampUs();
- tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, numOfBlocks, (et - st) / 1000.0,
- pReader->idStr);
+ tsdbDebug("%p %d data blocks access order completed, elapsed time:%.2f ms %s", pReader, numOfBlocks,
+ (et - st) / 1000.0, pReader->idStr);
cleanupBlockOrderSupporter(&sup);
taosMemoryFree(pTree);
@@ -1008,15 +1192,15 @@ static bool blockIteratorNext(SDataBlockIter* pBlockIter) {
/**
* This is an two rectangles overlap cases.
*/
-static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* pVerRange, SBlock* pBlock) {
+static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* pVerRange, SDataBlk* pBlock) {
return (pWindow->ekey < pBlock->maxKey.ts && pWindow->ekey >= pBlock->minKey.ts) ||
(pWindow->skey > pBlock->minKey.ts && pWindow->skey <= pBlock->maxKey.ts) ||
(pVerRange->minVer > pBlock->minVer && pVerRange->minVer <= pBlock->maxVer) ||
(pVerRange->maxVer < pBlock->maxVer && pVerRange->maxVer >= pBlock->minVer);
}
-static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STableBlockScanInfo* pTableBlockScanInfo,
- int32_t* nextIndex, int32_t order) {
+static SDataBlk* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STableBlockScanInfo* pTableBlockScanInfo,
+ int32_t* nextIndex, int32_t order) {
bool asc = ASCENDING_TRAVERSE(order);
if (asc && pFBlockInfo->tbBlockIdx >= taosArrayGetSize(pTableBlockScanInfo->pBlockList) - 1) {
return NULL;
@@ -1029,10 +1213,10 @@ static SBlock* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STab
int32_t step = asc ? 1 : -1;
*nextIndex = pFBlockInfo->tbBlockIdx + step;
- SBlock* pBlock = taosMemoryCalloc(1, sizeof(SBlock));
- int32_t* indexInMapdata = taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex);
+ SDataBlk* pBlock = taosMemoryCalloc(1, sizeof(SDataBlk));
+ int32_t* indexInMapdata = taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex);
- tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, *indexInMapdata, pBlock, tGetBlock);
+ tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, *indexInMapdata, pBlock, tGetDataBlk);
return pBlock;
}
@@ -1075,7 +1259,7 @@ static int32_t setFileBlockActiveInBlockIter(SDataBlockIter* pBlockIter, int32_t
return TSDB_CODE_SUCCESS;
}
-static bool overlapWithNeighborBlock(SBlock* pBlock, SBlock* pNeighbor, int32_t order) {
+static bool overlapWithNeighborBlock(SDataBlk* pBlock, SDataBlk* pNeighbor, int32_t order) {
// it is the last block in current file, no chance to overlap with neighbor blocks.
if (ASCENDING_TRAVERSE(order)) {
return pBlock->maxKey.ts == pNeighbor->minKey.ts;
@@ -1084,19 +1268,19 @@ static bool overlapWithNeighborBlock(SBlock* pBlock, SBlock* pNeighbor, int32_t
}
}
-static bool bufferDataInFileBlockGap(int32_t order, TSDBKEY key, SBlock* pBlock) {
+static bool bufferDataInFileBlockGap(int32_t order, TSDBKEY key, SDataBlk* pBlock) {
bool ascScan = ASCENDING_TRAVERSE(order);
return (ascScan && (key.ts != TSKEY_INITIAL_VAL && key.ts <= pBlock->minKey.ts)) ||
(!ascScan && (key.ts != TSKEY_INITIAL_VAL && key.ts >= pBlock->maxKey.ts));
}
-static bool keyOverlapFileBlock(TSDBKEY key, SBlock* pBlock, SVersionRange* pVerRange) {
+static bool keyOverlapFileBlock(TSDBKEY key, SDataBlk* pBlock, SVersionRange* pVerRange) {
return (key.ts >= pBlock->minKey.ts && key.ts <= pBlock->maxKey.ts) && (pBlock->maxVer >= pVerRange->minVer) &&
(pBlock->minVer <= pVerRange->maxVer);
}
-static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock) {
+static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, const SDataBlk* pBlock) {
size_t num = taosArrayGetSize(pBlockScanInfo->delSkyline);
for (int32_t i = pBlockScanInfo->fileDelIndex; i < num; i += 1) {
@@ -1109,14 +1293,8 @@ static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, cons
if (p->version >= pBlock->minVer) {
if (i < num - 1) {
TSDBKEY* pnext = taosArrayGet(pBlockScanInfo->delSkyline, i + 1);
- if (i + 1 == num - 1) { // pnext is the last point
- if (pnext->ts >= pBlock->minKey.ts) {
- return true;
- }
- } else {
- if (pnext->ts >= pBlock->minKey.ts && pnext->version >= pBlock->minVer) {
- return true;
- }
+ if (pnext->ts >= pBlock->minKey.ts) {
+ return true;
}
} else { // it must be the last point
ASSERT(p->version == 0);
@@ -1130,7 +1308,7 @@ static bool doCheckforDatablockOverlap(STableBlockScanInfo* pBlockScanInfo, cons
return false;
}
-static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBlock* pBlock, int32_t order) {
+static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SDataBlk* pBlock, int32_t order) {
if (pBlockScanInfo->delSkyline == NULL) {
return false;
}
@@ -1160,53 +1338,79 @@ static bool overlapWithDelSkyline(STableBlockScanInfo* pBlockScanInfo, const SBl
}
}
-// 1. the version of all rows should be less than the endVersion
-// 2. current block should not overlap with next neighbor block
-// 3. current timestamp should not be overlap with each other
-// 4. output buffer should be large enough to hold all rows in current block
-// 5. delete info should not overlap with current block data
-static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pFBlock, SBlock* pBlock,
- STableBlockScanInfo* pScanInfo, TSDBKEY key, SLastBlockReader* pLastBlockReader) {
- int32_t neighborIndex = 0;
- SBlock* pNeighbor = getNeighborBlockOfSameTable(pFBlock, pScanInfo, &neighborIndex, pReader->order);
+typedef struct {
+ bool overlapWithNeighborBlock;
+ bool hasDupTs;
+ bool overlapWithDelInfo;
+ bool overlapWithLastBlock;
+ bool overlapWithKeyInBuf;
+ bool partiallyRequired;
+ bool moreThanCapcity;
+} SDataBlockToLoadInfo;
+
+static void getBlockToLoadInfo(SDataBlockToLoadInfo* pInfo, SFileDataBlockInfo* pBlockInfo, SDataBlk* pBlock,
+ STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, SLastBlockReader* pLastBlockReader,
+ STsdbReader* pReader) {
+ int32_t neighborIndex = 0;
+ SDataBlk* pNeighbor = getNeighborBlockOfSameTable(pBlockInfo, pScanInfo, &neighborIndex, pReader->order);
// overlap with neighbor
- bool overlapWithNeighbor = false;
if (pNeighbor) {
- overlapWithNeighbor = overlapWithNeighborBlock(pBlock, pNeighbor, pReader->order);
+ pInfo->overlapWithNeighborBlock = overlapWithNeighborBlock(pBlock, pNeighbor, pReader->order);
taosMemoryFree(pNeighbor);
}
// has duplicated ts of different version in this block
- bool hasDup = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true;
- bool overlapWithDel = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order);
+ pInfo->hasDupTs = (pBlock->nSubBlock == 1) ? pBlock->hasDup : true;
+ pInfo->overlapWithDelInfo = overlapWithDelSkyline(pScanInfo, pBlock, pReader->order);
- // todo here we need to each key in the last files to identify if it is really overlapped with last block
- bool overlapWithlastBlock = false;
- if (taosArrayGetSize(pLastBlockReader->pBlockL) > 0 && (pLastBlockReader->currentBlockIndex != -1)) {
- SBlockL *pBlockL = taosArrayGet(pLastBlockReader->pBlockL, pLastBlockReader->currentBlockIndex);
- overlapWithlastBlock = !(pBlock->maxKey.ts < pBlockL->minKey || pBlock->minKey.ts > pBlockL->maxKey);
+ if (hasDataInLastBlock(pLastBlockReader)) {
+ int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
+ pInfo->overlapWithLastBlock = !(pBlock->maxKey.ts < tsLast || pBlock->minKey.ts > tsLast);
}
- bool moreThanOutputCapacity = pBlock->nRow > pReader->capacity;
- bool partiallyRequired = dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock);
- bool overlapWithKey = keyOverlapFileBlock(key, pBlock, &pReader->verRange);
+ pInfo->moreThanCapcity = pBlock->nRow > pReader->capacity;
+ pInfo->partiallyRequired = dataBlockPartiallyRequired(&pReader->window, &pReader->verRange, pBlock);
+ pInfo->overlapWithKeyInBuf = keyOverlapFileBlock(keyInBuf, pBlock, &pReader->verRange);
+}
+
+// 1. the version of all rows should be less than the endVersion
+// 2. current block should not overlap with next neighbor block
+// 3. current timestamp should not be overlap with each other
+// 4. output buffer should be large enough to hold all rows in current block
+// 5. delete info should not overlap with current block data
+// 6. current block should not contain the duplicated ts
+static bool fileBlockShouldLoad(STsdbReader* pReader, SFileDataBlockInfo* pBlockInfo, SDataBlk* pBlock,
+ STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, SLastBlockReader* pLastBlockReader) {
+ SDataBlockToLoadInfo info = {0};
+ getBlockToLoadInfo(&info, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader, pReader);
- bool loadDataBlock = (overlapWithNeighbor || hasDup || partiallyRequired || overlapWithKey ||
- moreThanOutputCapacity || overlapWithDel || overlapWithlastBlock);
+ bool loadDataBlock =
+ (info.overlapWithNeighborBlock || info.hasDupTs || info.partiallyRequired || info.overlapWithKeyInBuf ||
+ info.moreThanCapcity || info.overlapWithDelInfo || info.overlapWithLastBlock);
// log the reason why load the datablock for profile
if (loadDataBlock) {
tsdbDebug("%p uid:%" PRIu64
" need to load the datablock, overlapwithneighborblock:%d, hasDup:%d, partiallyRequired:%d, "
"overlapWithKey:%d, greaterThanBuf:%d, overlapWithDel:%d, overlapWithlastBlock:%d, %s",
- pReader, pFBlock->uid, overlapWithNeighbor, hasDup, partiallyRequired, overlapWithKey,
- moreThanOutputCapacity, overlapWithDel, overlapWithlastBlock, pReader->idStr);
+ pReader, pBlockInfo->uid, info.overlapWithNeighborBlock, info.hasDupTs, info.partiallyRequired,
+ info.overlapWithKeyInBuf, info.moreThanCapcity, info.overlapWithDelInfo, info.overlapWithLastBlock,
+ pReader->idStr);
}
return loadDataBlock;
}
+static bool isCleanFileDataBlock(STsdbReader* pReader, SFileDataBlockInfo* pBlockInfo, SDataBlk* pBlock,
+ STableBlockScanInfo* pScanInfo, TSDBKEY keyInBuf, SLastBlockReader* pLastBlockReader) {
+ SDataBlockToLoadInfo info = {0};
+ getBlockToLoadInfo(&info, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader, pReader);
+ bool isCleanFileBlock = !(info.overlapWithNeighborBlock || info.hasDupTs || info.overlapWithKeyInBuf ||
+ info.overlapWithDelInfo || info.overlapWithLastBlock);
+ return isCleanFileBlock;
+}
+
static int32_t buildDataBlockFromBuf(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, int64_t endKey) {
if (!(pBlockScanInfo->iiter.hasVal || pBlockScanInfo->iter.hasVal)) {
return TSDB_CODE_SUCCESS;
@@ -1252,6 +1456,38 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB
return false;
}
+static bool nextRowFromLastBlocks(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pBlockScanInfo) {
+ while (1) {
+ bool hasVal = tMergeTreeNext(&pLastBlockReader->mergeTree);
+ if (!hasVal) {
+ return false;
+ }
+
+ TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ TSDBKEY k = TSDBROW_KEY(&row);
+ if (!hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order)) {
+ return true;
+ }
+ }
+}
+
+static bool tryCopyDistinctRowFromSttBlock(TSDBROW* fRow, SLastBlockReader* pLastBlockReader,
+ STableBlockScanInfo* pScanInfo, int64_t ts, STsdbReader* pReader) {
+ bool hasVal = nextRowFromLastBlocks(pLastBlockReader, pScanInfo);
+ if (hasVal) {
+ int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
+ if (next1 != ts) {
+ doAppendRowFromFileBlock(pReader->pResBlock, pReader, fRow->pBlockData, fRow->iRow);
+ return true;
+ }
+ } else {
+ doAppendRowFromFileBlock(pReader->pResBlock, pReader, fRow->pBlockData, fRow->iRow);
+ return true;
+ }
+
+ return false;
+}
+
static FORCE_INLINE STSchema* doGetSchemaForTSRow(int32_t sversion, STsdbReader* pReader, uint64_t uid) {
// always set the newest schema version in pReader->pSchema
if (pReader->pSchema == NULL) {
@@ -1285,18 +1521,16 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
int64_t tsLast = INT64_MIN;
- if ((pLastBlockReader->lastBlockData.nRow > 0) && hasDataInLastBlock(pLastBlockReader)) {
+ if (hasDataInLastBlock(pLastBlockReader)) {
tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
}
- TSDBKEY k = TSDBROW_KEY(pRow);
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
-
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
+ TSDBKEY k = TSDBROW_KEY(pRow);
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
int64_t minKey = 0;
if (pReader->order == TSDB_ORDER_ASC) {
- minKey = INT64_MAX; // chosen the minimum value
+ minKey = INT64_MAX; // chosen the minimum value
if (minKey > tsLast && hasDataInLastBlock(pLastBlockReader)) {
minKey = tsLast;
}
@@ -1305,7 +1539,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
minKey = k.ts;
}
- if (minKey > key && pBlockData->nRow > 0) {
+ if (minKey > key && hasDataInFileBlock(pBlockData, pDumpInfo)) {
minKey = key;
}
} else {
@@ -1318,7 +1552,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
minKey = k.ts;
}
- if (minKey < key && pBlockData->nRow > 0) {
+ if (minKey < key && hasDataInFileBlock(pBlockData, pDumpInfo)) {
minKey = key;
}
}
@@ -1326,7 +1560,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
bool init = false;
// ASC: file block ---> last block -----> imem -----> mem
- //DESC: mem -----> imem -----> last block -----> file block
+ // DESC: mem -----> imem -----> last block -----> file block
if (pReader->order == TSDB_ORDER_ASC) {
if (minKey == key) {
init = true;
@@ -1335,7 +1569,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
tRowMerge(&merge, &fRow1);
} else {
@@ -1364,7 +1598,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
tRowMerge(&merge, &fRow1);
} else {
@@ -1385,7 +1619,11 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
}
}
- tRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
@@ -1396,27 +1634,55 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader, STsdbReader* pReader,
STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData,
bool mergeBlockData) {
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
- int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
+ SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
+ int64_t tsLastBlock = getCurrentKeyInLastBlock(pLastBlockReader);
STSRow* pTSRow = NULL;
SRowMerger merge = {0};
+ TSDBROW fRow = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
- TSDBROW fRow = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ // only last block exists
+ if ((!mergeBlockData) || (tsLastBlock != pBlockData->aTSKEY[pDumpInfo->rowIndex])) {
+ if (tryCopyDistinctRowFromSttBlock(&fRow, pLastBlockReader, pBlockScanInfo, tsLastBlock, pReader)) {
+ return TSDB_CODE_SUCCESS;
+ } else {
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
- doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ tRowMerge(&merge, &fRow1);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
- // merge with block data if ts == key
- if (mergeBlockData) {
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- }
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ }
+ } else { // not merge block data
+ tRowMergerInit(&merge, &fRow, pReader->pSchema);
+ doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLastBlock, &merge);
+ ASSERT(mergeBlockData);
+
+ // merge with block data if ts == key
+ if (tsLastBlock == pBlockData->aTSKEY[pDumpInfo->rowIndex]) {
+ doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+ }
+
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+
+ taosMemoryFree(pTSRow);
+ tRowMergerClear(&merge);
+ }
- taosMemoryFree(pTSRow);
- tRowMergerClear(&merge);
return TSDB_CODE_SUCCESS;
}
@@ -1424,9 +1690,9 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- if (pBlockData->nRow > 0) {
+ if (hasDataInFileBlock(pBlockData, pDumpInfo)) {
// no last block available, only data block exists
- if (pLastBlockReader->lastBlockData.nRow == 0 || (!hasDataInLastBlock(pLastBlockReader))) {
+ if (!hasDataInLastBlock(pLastBlockReader)) {
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
}
@@ -1436,7 +1702,7 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
ASSERT(ts >= key);
if (ASCENDING_TRAVERSE(pReader->order)) {
- if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist
+ if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist
return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader);
} else if (key == ts) {
STSRow* pTSRow = NULL;
@@ -1444,14 +1710,22 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
+
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ tRowMerge(&merge, &fRow1);
+
doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
+ return code;
} else {
ASSERT(0);
return TSDB_CODE_SUCCESS;
@@ -1476,13 +1750,12 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader);
ASSERT(pRow != NULL && piRow != NULL);
- SBlockData* pLastBlockData = &pLastBlockReader->lastBlockData;
int64_t tsLast = INT64_MIN;
if (hasDataInLastBlock(pLastBlockReader)) {
tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
}
- int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
+ int64_t key = hasDataInFileBlock(pBlockData, pDumpInfo) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN;
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
@@ -1498,7 +1771,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
minKey = ik.ts;
}
- if (minKey > key && pBlockData->nRow > 0) {
+ if (minKey > key && hasDataInFileBlock(pBlockData, pDumpInfo)) {
minKey = key;
}
@@ -1506,7 +1779,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
minKey = tsLast;
}
} else {
- minKey = INT64_MIN; // let find the maximum ts value
+ minKey = INT64_MIN; // let find the maximum ts value
if (minKey < k.ts) {
minKey = k.ts;
}
@@ -1515,7 +1788,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
minKey = ik.ts;
}
- if (minKey < key && pBlockData->nRow > 0) {
+ if (minKey < key && hasDataInFileBlock(pBlockData, pDumpInfo)) {
minKey = key;
}
@@ -1537,7 +1810,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
tRowMerge(&merge, &fRow1);
} else {
@@ -1587,7 +1860,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
}
if (minKey == tsLast) {
- TSDBROW fRow1 = tsdbRowFromBlockData(pLastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
if (init) {
tRowMerge(&merge, &fRow1);
} else {
@@ -1608,158 +1881,81 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
}
}
- tRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
- return TSDB_CODE_SUCCESS;
+ return code;
}
-#if 0
-static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) {
- SRowMerger merge = {0};
- STSRow* pTSRow = NULL;
-
- SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- SArray* pDelList = pBlockScanInfo->delSkyline;
-
- TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pDelList, pReader);
- TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pDelList, pReader);
- ASSERT(pRow != NULL && piRow != NULL);
-
- int64_t key = pBlockData->aTSKEY[pDumpInfo->rowIndex];
- bool freeTSRow = false;
+static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
+ if (pBlockScanInfo->iterInit) {
+ return TSDB_CODE_SUCCESS;
+ }
- uint64_t uid = pBlockScanInfo->uid;
+ int32_t code = TSDB_CODE_SUCCESS;
- TSDBKEY k = TSDBROW_KEY(pRow);
- TSDBKEY ik = TSDBROW_KEY(piRow);
+ TSDBKEY startKey = {0};
if (ASCENDING_TRAVERSE(pReader->order)) {
- // [1&2] key <= [k.ts && ik.ts]
- if (key <= k.ts && key <= ik.ts) {
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
-
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
-
- if (ik.ts == key) {
- tRowMerge(&merge, piRow);
- doMergeRowsInBuf(&pBlockScanInfo->iiter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
- }
-
- if (k.ts == key) {
- tRowMerge(&merge, pRow);
- doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
- }
-
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
- return TSDB_CODE_SUCCESS;
- } else { // key > ik.ts || key > k.ts
- ASSERT(key != ik.ts);
-
- // [3] ik.ts < key <= k.ts
- // [4] ik.ts < k.ts <= key
- if (ik.ts < k.ts) {
- doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
- if (freeTSRow) {
- taosMemoryFree(pTSRow);
- }
- return TSDB_CODE_SUCCESS;
- }
+ startKey = (TSDBKEY){.ts = pReader->window.skey, .version = pReader->verRange.minVer};
+ } else {
+ startKey = (TSDBKEY){.ts = pReader->window.ekey, .version = pReader->verRange.maxVer};
+ }
- // [5] k.ts < key <= ik.ts
- // [6] k.ts < ik.ts <= key
- if (k.ts < ik.ts) {
- doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
- if (freeTSRow) {
- taosMemoryFree(pTSRow);
- }
- return TSDB_CODE_SUCCESS;
- }
+ int32_t backward = (!ASCENDING_TRAVERSE(pReader->order));
- // [7] k.ts == ik.ts < key
- if (k.ts == ik.ts) {
- ASSERT(key > ik.ts && key > k.ts);
+ STbData* d = NULL;
+ if (pReader->pReadSnap->pMem != NULL) {
+ d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid);
+ if (d != NULL) {
+ code = tsdbTbDataIterCreate(d, &startKey, backward, &pBlockScanInfo->iter.iter);
+ if (code == TSDB_CODE_SUCCESS) {
+ pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL);
- doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
- taosMemoryFree(pTSRow);
- return TSDB_CODE_SUCCESS;
+ tsdbDebug("%p uid:%" PRId64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
+ "-%" PRId64 " %s",
+ pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, d->minKey, d->maxKey, pReader->idStr);
+ } else {
+ tsdbError("%p uid:%" PRId64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid,
+ tstrerror(code), pReader->idStr);
+ return code;
}
}
- } else { // descending order scan
- // [1/2] k.ts >= ik.ts && k.ts >= key
- if (k.ts >= ik.ts && k.ts >= key) {
- STSchema* pSchema = doGetSchemaForTSRow(TSDBROW_SVERSION(pRow), pReader, pBlockScanInfo->uid);
-
- tRowMergerInit(&merge, pRow, pSchema);
- doMergeRowsInBuf(&pBlockScanInfo->iter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
-
- if (ik.ts == k.ts) {
- tRowMerge(&merge, piRow);
- doMergeRowsInBuf(&pBlockScanInfo->iiter, uid, key, pBlockScanInfo->delSkyline, &merge, pReader);
- }
-
- if (k.ts == key) {
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- tRowMerge(&merge, &fRow);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- }
-
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
- return TSDB_CODE_SUCCESS;
- } else {
- ASSERT(ik.ts != k.ts); // this case has been included in the previous if branch
-
- // [3] ik.ts > k.ts >= Key
- // [4] ik.ts > key >= k.ts
- if (ik.ts > key) {
- doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
- if (freeTSRow) {
- taosMemoryFree(pTSRow);
- }
- return TSDB_CODE_SUCCESS;
- }
-
- // [5] key > ik.ts > k.ts
- // [6] key > k.ts > ik.ts
- if (key > ik.ts) {
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- tRowMergerInit(&merge, &fRow, pReader->pSchema);
-
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
- taosMemoryFree(pTSRow);
- return TSDB_CODE_SUCCESS;
- }
-
- //[7] key = ik.ts > k.ts
- if (key == ik.ts) {
- doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow);
+ } else {
+ tsdbDebug("%p uid:%" PRId64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
+ }
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
- tRowMerge(&merge, &fRow);
- doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid);
+ STbData* di = NULL;
+ if (pReader->pReadSnap->pIMem != NULL) {
+ di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid);
+ if (di != NULL) {
+ code = tsdbTbDataIterCreate(di, &startKey, backward, &pBlockScanInfo->iiter.iter);
+ if (code == TSDB_CODE_SUCCESS) {
+ pBlockScanInfo->iiter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iiter.iter) != NULL);
- taosMemoryFree(pTSRow);
- return TSDB_CODE_SUCCESS;
+ tsdbDebug("%p uid:%" PRId64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
+ "-%" PRId64 " %s",
+ pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, di->minKey, di->maxKey, pReader->idStr);
+ } else {
+ tsdbError("%p uid:%" PRId64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid,
+ tstrerror(code), pReader->idStr);
+ return code;
}
}
+ } else {
+ tsdbDebug("%p uid:%" PRId64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
}
- ASSERT(0);
- return -1;
+ initDelSkylineIterator(pBlockScanInfo, pReader, d, di);
+
+ pBlockScanInfo->iterInit = true;
+ return TSDB_CODE_SUCCESS;
}
-#endif
static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo,
STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
@@ -1790,122 +1986,94 @@ static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDum
return true;
}
-static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
-
-static void initLastBlockReader(SLastBlockReader* pLastBlockReader, uint64_t uid, int16_t* startPos) {
- pLastBlockReader->uid = uid;
- pLastBlockReader->rowIndex = startPos;
-
- if (*startPos == -1) {
- if (ASCENDING_TRAVERSE(pLastBlockReader->order)) {
- // do nothing
- } else {
- *startPos = pLastBlockReader->lastBlockData.nRow;
- }
+static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
+ // the last block reader has been initialized for this table.
+ if (pLBlockReader->uid == pScanInfo->uid) {
+ return true;
}
-}
-static void setAllRowsChecked(SLastBlockReader *pLastBlockReader) {
- *pLastBlockReader->rowIndex = ALL_ROWS_CHECKED_INDEX;
-}
-
-static bool nextRowInLastBlock(SLastBlockReader *pLastBlockReader, STableBlockScanInfo* pBlockScanInfo) {
- int32_t step = (pLastBlockReader->order == TSDB_ORDER_ASC) ? 1 : -1;
- if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
- return false;
+ if (pLBlockReader->uid != 0) {
+ tMergeTreeClose(&pLBlockReader->mergeTree);
}
- *(pLastBlockReader->rowIndex) += step;
-
- SBlockData* pBlockData = &pLastBlockReader->lastBlockData;
- for(int32_t i = *(pLastBlockReader->rowIndex); i < pBlockData->nRow && i >= 0; i += step) {
- if (pBlockData->aUid != NULL && pBlockData->aUid[i] != pLastBlockReader->uid) {
- continue;
- }
-
- int64_t ts = pBlockData->aTSKEY[i];
- if (ts < pLastBlockReader->window.skey) {
- continue;
- }
-
- int64_t ver = pBlockData->aVersion[i];
- if (ver < pLastBlockReader->verRange.minVer) {
- continue;
- }
-
- // no data any more, todo opt handle desc case
- if (ts > pLastBlockReader->window.ekey) {
- continue;
- }
-
- // todo opt handle desc case
- if (ver > pLastBlockReader->verRange.maxVer) {
- continue;
- }
+ initMemDataIterator(pScanInfo, pReader);
+ pLBlockReader->uid = pScanInfo->uid;
- TSDBKEY k = {.ts = ts, .version = ver};
- if (hasBeenDropped(pBlockScanInfo->delSkyline, &pBlockScanInfo->lastBlockDelIndex, &k, pLastBlockReader->order)) {
- continue;
- }
+ int32_t step = ASCENDING_TRAVERSE(pLBlockReader->order) ? 1 : -1;
+ STimeWindow w = pLBlockReader->window;
+ if (ASCENDING_TRAVERSE(pLBlockReader->order)) {
+ w.skey = pScanInfo->lastKey + step;
+ } else {
+ w.ekey = pScanInfo->lastKey + step;
+ }
- *(pLastBlockReader->rowIndex) = i;
- return true;
+ int32_t code =
+ tMergeTreeOpen(&pLBlockReader->mergeTree, (pLBlockReader->order == TSDB_ORDER_DESC), pReader->pFileReader,
+ pReader->suid, pScanInfo->uid, &w, &pLBlockReader->verRange, pLBlockReader->pInfo, false, pReader->idStr);
+ if (code != TSDB_CODE_SUCCESS) {
+ return false;
}
- // set all data is consumed in last block
- setAllRowsChecked(pLastBlockReader);
- return false;
+ return nextRowFromLastBlocks(pLBlockReader, pScanInfo);
}
static int64_t getCurrentKeyInLastBlock(SLastBlockReader* pLastBlockReader) {
- SBlockData* pBlockData = &pLastBlockReader->lastBlockData;
- return pBlockData->aTSKEY[*pLastBlockReader->rowIndex];
+ TSDBROW row = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
+ return TSDBROW_TS(&row);
}
-static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) {
- if (*pLastBlockReader->rowIndex == ALL_ROWS_CHECKED_INDEX) {
- return false;
+static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader) { return pLastBlockReader->mergeTree.pIter != NULL; }
+bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo) {
+ if (pBlockData->nRow > 0) {
+ ASSERT(pBlockData->nRow == pDumpInfo->totalRows);
}
-
- ASSERT(pLastBlockReader->lastBlockData.nRow > 0);
- return true;
+
+ return pBlockData->nRow > 0 && (!pDumpInfo->allDumped);
}
-int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key, STsdbReader* pReader) {
+int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBlockScanInfo, int64_t key,
+ STsdbReader* pReader) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
-
- TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
-
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
return TSDB_CODE_SUCCESS;
} else {
+ TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
+
STSRow* pTSRow = NULL;
SRowMerger merge = {0};
tRowMergerInit(&merge, &fRow, pReader->pSchema);
doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge);
- tRowMergerGetRow(&merge, &pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, &pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+
doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
return TSDB_CODE_SUCCESS;
}
-
- return TSDB_CODE_SUCCESS;
}
static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo,
SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
- int64_t key = (pBlockData->nRow > 0)? pBlockData->aTSKEY[pDumpInfo->rowIndex]:INT64_MIN;
- TSDBROW* pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
- TSDBROW* piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
-
+ int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN;
if (pBlockScanInfo->iter.hasVal && pBlockScanInfo->iiter.hasVal) {
return doMergeMultiLevelRows(pReader, pBlockScanInfo, pBlockData, pLastBlockReader);
} else {
+ TSDBROW *pRow = NULL, *piRow = NULL;
+ if (pBlockScanInfo->iter.hasVal) {
+ pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader);
+ }
+
+ if (pBlockScanInfo->iiter.hasVal) {
+ piRow = getValidMemRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader);
+ }
+
// imem + file + last block
if (pBlockScanInfo->iiter.hasVal) {
return doMergeBufAndFileRows(pReader, pBlockScanInfo, piRow, &pBlockScanInfo->iiter, key, pLastBlockReader);
@@ -1924,24 +2092,34 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI
static int32_t buildComposedDataBlock(STsdbReader* pReader) {
SSDataBlock* pResBlock = pReader->pResBlock;
- SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
+ SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
+ SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
+
+ int64_t st = taosGetTimestampUs();
STableBlockScanInfo* pBlockScanInfo = NULL;
if (pBlockInfo != NULL) {
pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
- } else {
+ SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader);
+
+ // it is a clean block, load it directly
+ if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader)) {
+ if (pReader->order == TSDB_ORDER_ASC ||
+ (pReader->order == TSDB_ORDER_DESC && (!hasDataInLastBlock(pLastBlockReader)))) {
+ copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
+ goto _end;
+ }
+ }
+ } else { // file blocks not exist
pBlockScanInfo = pReader->status.pTableIter;
}
- SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SBlockData* pBlockData = &pReader->status.fileBlockData;
int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
- int64_t st = taosGetTimestampUs();
-
while (1) {
- // todo check the validate of row in file block
bool hasBlockData = false;
{
while (pBlockData->nRow > 0) { // find the first qualified row in data block
@@ -1952,14 +2130,14 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
pDumpInfo->rowIndex += step;
- SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) {
setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
break;
}
}
}
-
+
bool hasBlockLData = hasDataInLastBlock(pLastBlockReader);
// no data in last block and block, no need to proceed.
@@ -1971,7 +2149,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
// currently loaded file data block is consumed
if ((pBlockData->nRow > 0) && (pDumpInfo->rowIndex >= pBlockData->nRow || pDumpInfo->rowIndex < 0)) {
- SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
break;
}
@@ -1981,85 +2159,28 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
}
}
+_end:
pResBlock->info.uid = pBlockScanInfo->uid;
blockDataUpdateTsWindow(pResBlock, 0);
setComposedBlockFlag(pReader, true);
- int64_t et = taosGetTimestampUs();
-
- tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64 " rows:%d, elapsed time:%.2f ms %s",
- pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
- pResBlock->info.rows, (et - st) / 1000.0, pReader->idStr);
-
- return TSDB_CODE_SUCCESS;
-}
-
-void setComposedBlockFlag(STsdbReader* pReader, bool composed) { pReader->status.composedDataBlock = composed; }
-
-static int32_t initMemDataIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) {
- if (pBlockScanInfo->iterInit) {
- return TSDB_CODE_SUCCESS;
- }
-
- int32_t code = TSDB_CODE_SUCCESS;
-
- TSDBKEY startKey = {0};
- if (ASCENDING_TRAVERSE(pReader->order)) {
- startKey = (TSDBKEY){.ts = pReader->window.skey, .version = pReader->verRange.minVer};
- } else {
- startKey = (TSDBKEY){.ts = pReader->window.ekey, .version = pReader->verRange.maxVer};
- }
-
- int32_t backward = (!ASCENDING_TRAVERSE(pReader->order));
-
- STbData* d = NULL;
- if (pReader->pReadSnap->pMem != NULL) {
- d = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pMem, pReader->suid, pBlockScanInfo->uid);
- if (d != NULL) {
- code = tsdbTbDataIterCreate(d, &startKey, backward, &pBlockScanInfo->iter.iter);
- if (code == TSDB_CODE_SUCCESS) {
- pBlockScanInfo->iter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iter.iter) != NULL);
-
- tsdbDebug("%p uid:%" PRId64 ", check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
- "-%" PRId64 " %s",
- pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, d->minKey, d->maxKey, pReader->idStr);
- } else {
- tsdbError("%p uid:%" PRId64 ", failed to create iterator for imem, code:%s, %s", pReader, pBlockScanInfo->uid,
- tstrerror(code), pReader->idStr);
- return code;
- }
- }
- } else {
- tsdbDebug("%p uid:%" PRId64 ", no data in mem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
- }
+ double el = (taosGetTimestampUs() - st) / 1000.0;
- STbData* di = NULL;
- if (pReader->pReadSnap->pIMem != NULL) {
- di = tsdbGetTbDataFromMemTable(pReader->pReadSnap->pIMem, pReader->suid, pBlockScanInfo->uid);
- if (di != NULL) {
- code = tsdbTbDataIterCreate(di, &startKey, backward, &pBlockScanInfo->iiter.iter);
- if (code == TSDB_CODE_SUCCESS) {
- pBlockScanInfo->iiter.hasVal = (tsdbTbDataIterGet(pBlockScanInfo->iiter.iter) != NULL);
+ pReader->cost.composedBlocks += 1;
+ pReader->cost.buildComposedBlockTime += el;
- tsdbDebug("%p uid:%" PRId64 ", check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64
- "-%" PRId64 " %s",
- pReader, pBlockScanInfo->uid, startKey.ts, pReader->order, di->minKey, di->maxKey, pReader->idStr);
- } else {
- tsdbError("%p uid:%" PRId64 ", failed to create iterator for mem, code:%s, %s", pReader, pBlockScanInfo->uid,
- tstrerror(code), pReader->idStr);
- return code;
- }
- }
- } else {
- tsdbDebug("%p uid:%" PRId64 ", no data in imem, %s", pReader, pBlockScanInfo->uid, pReader->idStr);
+ if (pResBlock->info.rows > 0) {
+ tsdbDebug("%p uid:%" PRIu64 ", composed data block created, brange:%" PRIu64 "-%" PRIu64
+ " rows:%d, elapsed time:%.2f ms %s",
+ pReader, pBlockScanInfo->uid, pResBlock->info.window.skey, pResBlock->info.window.ekey,
+ pResBlock->info.rows, el, pReader->idStr);
}
- initDelSkylineIterator(pBlockScanInfo, pReader, d, di);
-
- pBlockScanInfo->iterInit = true;
return TSDB_CODE_SUCCESS;
}
+void setComposedBlockFlag(STsdbReader* pReader, bool composed) { pReader->status.composedDataBlock = composed; }
+
int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STbData* pMemTbData,
STbData* piMemTbData) {
if (pBlockScanInfo->delSkyline != NULL) {
@@ -2142,10 +2263,8 @@ _err:
return code;
}
-static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
- TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
-
- initMemDataIterator(pScanInfo, pReader);
+TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
+ TSDBKEY key = {.ts = TSKEY_INITIAL_VAL};
TSDBROW* pRow = getValidMemRow(&pScanInfo->iter, pScanInfo->delSkyline, pReader);
if (pRow != NULL) {
key = TSDBROW_KEY(pRow);
@@ -2165,12 +2284,10 @@ static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* p
static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
SReaderStatus* pStatus = &pReader->status;
pBlockNum->numOfBlocks = 0;
- pBlockNum->numOfLastBlocks = 0;
+ pBlockNum->numOfLastFiles = 0;
size_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
SArray* pIndexList = taosArrayInit(numOfTables, sizeof(SBlockIdx));
- SArray* pLastBlocks = pStatus->fileIter.pLastBlockReader->pBlockL;
- taosArrayClear(pLastBlocks);
while (1) {
bool hasNext = filesetIteratorNext(&pStatus->fileIter, pReader);
@@ -2185,32 +2302,16 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
return code;
}
- code = tsdbReadBlockL(pReader->pFileReader, pLastBlocks);
- if (code != TSDB_CODE_SUCCESS) {
- taosArrayDestroy(pIndexList);
- return code;
- }
-
- if (taosArrayGetSize(pIndexList) > 0 || taosArrayGetSize(pLastBlocks) > 0) {
- SArray* pQLastBlock = taosArrayInit(4, sizeof(SBlockL));
-
- code = doLoadFileBlock(pReader, pIndexList, pLastBlocks, pBlockNum, pQLastBlock);
+ if (taosArrayGetSize(pIndexList) > 0 || pReader->pFileReader->pSet->nSttF > 0) {
+ code = doLoadFileBlock(pReader, pIndexList, pBlockNum);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(pIndexList);
- taosArrayDestroy(pQLastBlock);
return code;
}
- if (pBlockNum->numOfBlocks + pBlockNum->numOfLastBlocks > 0) {
- ASSERT(taosArrayGetSize(pQLastBlock) == pBlockNum->numOfLastBlocks);
- taosArrayClear(pLastBlocks);
- taosArrayAddAll(pLastBlocks, pQLastBlock);
-
- taosArrayDestroy(pQLastBlock);
+ if (pBlockNum->numOfBlocks + pBlockNum->numOfLastFiles > 0) {
break;
}
-
- taosArrayDestroy(pQLastBlock);
}
// no blocks in current file, try next files
@@ -2220,101 +2321,101 @@ static int32_t moveToNextFile(STsdbReader* pReader, SBlockNumber* pBlockNum) {
return TSDB_CODE_SUCCESS;
}
-static int32_t doLoadRelatedLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo *pBlockScanInfo, STsdbReader* pReader) {
- SArray* pBlocks = pLastBlockReader->pBlockL;
- SBlockL* pBlock = NULL;
-
- uint64_t uid = pBlockScanInfo->uid;
- int32_t totalLastBlocks = (int32_t)taosArrayGetSize(pBlocks);
+static int32_t uidComparFunc(const void* p1, const void* p2) {
+ uint64_t pu1 = *(uint64_t*)p1;
+ uint64_t pu2 = *(uint64_t*)p2;
+ if (pu1 == pu2) {
+ return 0;
+ } else {
+ return (pu1 < pu2) ? -1 : 1;
+ }
+}
- initMemDataIterator(pBlockScanInfo, pReader);
+static void extractOrderedTableUidList(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) {
+ int32_t index = 0;
+ int32_t total = taosHashGetSize(pStatus->pTableMap);
- // find the correct SBlockL. todo binary search
- int32_t index = -1;
- for (int32_t i = 0; i < totalLastBlocks; ++i) {
- SBlockL* p = taosArrayGet(pBlocks, i);
- if (p->minUid <= uid && p->maxUid >= uid) {
- index = i;
- pBlock = p;
- break;
- }
+ void* p = taosHashIterate(pStatus->pTableMap, NULL);
+ while (p != NULL) {
+ STableBlockScanInfo* pScanInfo = p;
+ pOrderCheckInfo->tableUidList[index++] = pScanInfo->uid;
+ p = taosHashIterate(pStatus->pTableMap, p);
}
- if (index == -1) {
- pLastBlockReader->currentBlockIndex = index;
- tBlockDataReset(&pLastBlockReader->lastBlockData);
- return TSDB_CODE_SUCCESS;
- }
+ taosSort(pOrderCheckInfo->tableUidList, total, sizeof(uint64_t), uidComparFunc);
+}
- // the required last datablock has already loaded
- if (index == pLastBlockReader->currentBlockIndex) {
+static int32_t initOrderCheckInfo(SUidOrderCheckInfo* pOrderCheckInfo, SReaderStatus* pStatus) {
+ int32_t total = taosHashGetSize(pStatus->pTableMap);
+ if (total == 0) {
return TSDB_CODE_SUCCESS;
}
- int64_t st = taosGetTimestampUs();
- int32_t code = tBlockDataInit(&pLastBlockReader->lastBlockData, pReader->suid, pReader->suid ? 0 : uid, pReader->pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- tsdbError("%p init block data failed, code:%s %s", pReader, tstrerror(code), pReader->idStr);
- return code;
- }
-
- code = tsdbReadLastBlock(pReader->pFileReader, pBlock, &pLastBlockReader->lastBlockData);
+ if (pOrderCheckInfo->tableUidList == NULL) {
+ pOrderCheckInfo->currentIndex = 0;
+ pOrderCheckInfo->tableUidList = taosMemoryMalloc(total * sizeof(uint64_t));
+ if (pOrderCheckInfo->tableUidList == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
- double el = (taosGetTimestampUs() - st) / 1000.0;
- if (code != TSDB_CODE_SUCCESS) {
- tsdbError("%p error occurs in loading last block into buffer, last block index:%d, total:%d code:%s %s", pReader,
- pLastBlockReader->currentBlockIndex, totalLastBlocks, tstrerror(code), pReader->idStr);
+ extractOrderedTableUidList(pOrderCheckInfo, pStatus);
+ uint64_t uid = pOrderCheckInfo->tableUidList[0];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
} else {
- tsdbDebug("%p load last block completed, uid:%" PRIu64
- " last block index:%d, total:%d rows:%d, minVer:%d, maxVer:%d, brange:%" PRId64 "-%" PRId64
- " elapsed time:%.2f ms, %s",
- pReader, uid, index, totalLastBlocks, pBlock->nRow, pBlock->minVer, pBlock->maxVer, pBlock->minKey,
- pBlock->maxKey, el, pReader->idStr);
- }
+ if (pStatus->pTableIter == NULL) { // it is the last block of a new file
+ pOrderCheckInfo->currentIndex = 0;
+ uint64_t uid = pOrderCheckInfo->tableUidList[pOrderCheckInfo->currentIndex];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+
+ // the tableMap has already updated
+ if (pStatus->pTableIter == NULL) {
+ void* p = taosMemoryRealloc(pOrderCheckInfo->tableUidList, total * sizeof(uint64_t));
+ if (p == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pOrderCheckInfo->tableUidList = p;
+ extractOrderedTableUidList(pOrderCheckInfo, pStatus);
- pLastBlockReader->currentBlockIndex = index;
- pReader->cost.lastBlockLoad += 1;
- pReader->cost.lastBlockLoadTime += el;
+ uid = pOrderCheckInfo->tableUidList[0];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+ }
+ }
+ }
return TSDB_CODE_SUCCESS;
}
+static bool moveToNextTable(SUidOrderCheckInfo* pOrderedCheckInfo, SReaderStatus* pStatus) {
+ pOrderedCheckInfo->currentIndex += 1;
+ if (pOrderedCheckInfo->currentIndex >= taosHashGetSize(pStatus->pTableMap)) {
+ pStatus->pTableIter = NULL;
+ return false;
+ }
+
+ uint64_t uid = pOrderedCheckInfo->tableUidList[pOrderedCheckInfo->currentIndex];
+ pStatus->pTableIter = taosHashGet(pStatus->pTableMap, &uid, sizeof(uid));
+ ASSERT(pStatus->pTableIter != NULL);
+ return true;
+}
+
static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
- SReaderStatus* pStatus = &pReader->status;
+ SReaderStatus* pStatus = &pReader->status;
SLastBlockReader* pLastBlockReader = pStatus->fileIter.pLastBlockReader;
- while(1) {
- if (pStatus->pTableIter == NULL) {
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, NULL);
- if (pStatus->pTableIter == NULL) {
- return TSDB_CODE_SUCCESS;
- }
- }
+ SUidOrderCheckInfo* pOrderedCheckInfo = &pStatus->uidCheckInfo;
+ int32_t code = initOrderCheckInfo(pOrderedCheckInfo, pStatus);
+ if (code != TSDB_CODE_SUCCESS || (taosHashGetSize(pStatus->pTableMap) == 0)) {
+ return code;
+ }
+ while (1) {
// load the last data block of current table
- // todo opt perf by avoiding load last block repeatly
STableBlockScanInfo* pScanInfo = pStatus->pTableIter;
- int32_t code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- if (pLastBlockReader->currentBlockIndex != -1) {
- initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL);
- int32_t index = pScanInfo->indexInBlockL;
- if (index == DEFAULT_ROW_INDEX_VAL || index == pLastBlockReader->lastBlockData.nRow) {
- bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo);
- if (!hasData) { // current table does not have rows in last block, try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
- return TSDB_CODE_SUCCESS;
- }
- continue;
- }
- }
- } else { // no data in last block, try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
+ bool hasVal = initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
+ if (!hasVal) {
+ bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
+ if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
continue;
@@ -2330,17 +2431,16 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) {
}
// current table is exhausted, let's try next table
- pStatus->pTableIter = taosHashIterate(pStatus->pTableMap, pStatus->pTableIter);
- if (pStatus->pTableIter == NULL) {
+ bool hasNexTable = moveToNextTable(pOrderedCheckInfo, pStatus);
+ if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
}
}
static int32_t doBuildDataBlock(STsdbReader* pReader) {
- TSDBKEY key = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- SBlock* pBlock = NULL;
+ int32_t code = TSDB_CODE_SUCCESS;
+ SDataBlk* pBlock = NULL;
SReaderStatus* pStatus = &pReader->status;
SDataBlockIter* pBlockIter = &pStatus->blockIter;
@@ -2358,53 +2458,34 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
pBlock = getCurrentBlock(pBlockIter);
}
- {
- key = getCurrentKeyInBuf(pScanInfo, pReader);
-
- // load the last data block of current table
- code = doLoadRelatedLastBlock(pLastBlockReader, pScanInfo, pReader);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- // note: the lastblock may be null here
- initLastBlockReader(pLastBlockReader, pScanInfo->uid, &pScanInfo->indexInBlockL);
- if (pScanInfo->indexInBlockL == DEFAULT_ROW_INDEX_VAL || pScanInfo->indexInBlockL == pLastBlockReader->lastBlockData.nRow) {
- bool hasData = nextRowInLastBlock(pLastBlockReader, pScanInfo);
- }
- }
+ initLastBlockReader(pLastBlockReader, pScanInfo, pReader);
+ TSDBKEY keyInBuf = getCurrentKeyInBuf(pScanInfo, pReader);
if (pBlockInfo == NULL) { // build data block from last data file
ASSERT(pBlockIter->numOfBlocks == 0);
code = buildComposedDataBlock(pReader);
- } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, key, pLastBlockReader)) {
- tBlockDataReset(&pStatus->fileBlockData);
- code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pScanInfo->uid, pReader->pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData);
+ } else if (fileBlockShouldLoad(pReader, pBlockInfo, pBlock, pScanInfo, keyInBuf, pLastBlockReader)) {
+ code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pScanInfo->uid);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// build composed data block
code = buildComposedDataBlock(pReader);
- } else if (bufferDataInFileBlockGap(pReader->order, key, pBlock)) {
+ } else if (bufferDataInFileBlockGap(pReader->order, keyInBuf, pBlock)) {
// data in memory that are earlier than current file block
- // todo rows in buffer should be less than the file block in asc, greater than file block in desc
+ // rows in buffer should be less than the file block in asc, greater than file block in desc
int64_t endKey = (ASCENDING_TRAVERSE(pReader->order)) ? pBlock->minKey.ts : pBlock->maxKey.ts;
code = buildDataBlockFromBuf(pReader, pScanInfo, endKey);
} else {
if (hasDataInLastBlock(pLastBlockReader) && !ASCENDING_TRAVERSE(pReader->order)) {
// only return the rows in last block
int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader);
- ASSERT (tsLast >= pBlock->maxKey.ts);
+ ASSERT(tsLast >= pBlock->maxKey.ts);
tBlockDataReset(&pReader->status.fileBlockData);
code = buildComposedDataBlock(pReader);
- } else { // whole block is required, return it directly
+ } else { // whole block is required, return it directly
SDataBlockInfo* pInfo = &pReader->pResBlock->info;
pInfo->rows = pBlock->nRow;
pInfo->uid = pScanInfo->uid;
@@ -2451,7 +2532,7 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
// set the correct start position in case of the first/last file block, according to the query time window
static void initBlockDumpInfo(STsdbReader* pReader, SDataBlockIter* pBlockIter) {
- SBlock* pBlock = getCurrentBlock(pBlockIter);
+ SDataBlk* pBlock = getCurrentBlock(pBlockIter);
SReaderStatus* pStatus = &pReader->status;
@@ -2471,7 +2552,7 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
}
// all data files are consumed, try data in buffer
- if (num.numOfBlocks + num.numOfLastBlocks == 0) {
+ if (num.numOfBlocks + num.numOfLastFiles == 0) {
pReader->status.loadFromFile = false;
return code;
}
@@ -2479,14 +2560,11 @@ static int32_t initForFirstBlockInFile(STsdbReader* pReader, SDataBlockIter* pBl
// initialize the block iterator for a new fileset
if (num.numOfBlocks > 0) {
code = initBlockIterator(pReader, pBlockIter, num.numOfBlocks);
- } else { // no block data, only last block exists
+ } else { // no block data, only last block exists
tBlockDataReset(&pReader->status.fileBlockData);
- resetDataBlockIterator(pBlockIter, pReader->order, pReader->status.pTableMap);
+ resetDataBlockIterator(pBlockIter, pReader->order);
}
- SLastBlockReader* pLReader = pReader->status.fileIter.pLastBlockReader;
- pLReader->currentBlockIndex = -1;
-
// set the correct start position according to the query time window
initBlockDumpInfo(pReader, pBlockIter);
return code;
@@ -2504,7 +2582,7 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
if (pBlockIter->numOfBlocks == 0) {
- _begin:
+ _begin:
code = doLoadLastBlockSequentially(pReader);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -2551,21 +2629,24 @@ static int32_t buildBlockFromFiles(STsdbReader* pReader) {
bool hasNext = blockIteratorNext(&pReader->status.blockIter);
if (hasNext) { // check for the next block in the block accessed order list
initBlockDumpInfo(pReader, pBlockIter);
- } else if (taosArrayGetSize(pReader->status.fileIter.pLastBlockReader->pBlockL) > 0) { // data blocks in current file are exhausted, let's try the next file now
- tBlockDataReset(&pReader->status.fileBlockData);
- resetDataBlockIterator(pBlockIter, pReader->order, pReader->status.pTableMap);
- goto _begin;
} else {
- code = initForFirstBlockInFile(pReader, pBlockIter);
+ if (pReader->status.pCurrentFileset->nSttF > 0) {
+ // data blocks in current file are exhausted, let's try the next file now
+ tBlockDataReset(&pReader->status.fileBlockData);
+ resetDataBlockIterator(pBlockIter, pReader->order);
+ goto _begin;
+ } else {
+ code = initForFirstBlockInFile(pReader, pBlockIter);
- // error happens or all the data files are completely checked
- if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
- return code;
- }
+ // error happens or all the data files are completely checked
+ if ((code != TSDB_CODE_SUCCESS) || (pReader->status.loadFromFile == false)) {
+ return code;
+ }
- // this file does not have blocks, let's start check the last block file
- if (pBlockIter->numOfBlocks == 0) {
- goto _begin;
+ // this file does not have blocks, let's start check the last block file
+ if (pBlockIter->numOfBlocks == 0) {
+ goto _begin;
+ }
}
}
}
@@ -2587,7 +2668,11 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* ret
int8_t* pLevel) {
if (VND_IS_RSMA(pVnode)) {
int8_t level = 0;
- int64_t now = taosGetTimestamp(pVnode->config.tsdbCfg.precision);
+ int8_t precision = pVnode->config.tsdbCfg.precision;
+ int64_t now = taosGetTimestamp(precision);
+ int64_t offset = tsQueryRsmaTolerance * ((precision == TSDB_TIME_PRECISION_MILLI) ? 1
+ : (precision == TSDB_TIME_PRECISION_MICRO) ? 1000
+ : 1000000);
for (int8_t i = 0; i < TSDB_RETENTION_MAX; ++i) {
SRetention* pRetention = retentions + level;
@@ -2597,7 +2682,7 @@ static STsdb* getTsdbByRetentions(SVnode* pVnode, TSKEY winSKey, SRetention* ret
}
break;
}
- if ((now - pRetention->keep) <= winSKey) {
+ if ((now - pRetention->keep) <= (winSKey + offset)) {
break;
}
++level;
@@ -2824,7 +2909,7 @@ typedef enum {
CHECK_FILEBLOCK_QUIT = 0x2,
} CHECK_FILEBLOCK_STATE;
-static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanInfo* pScanInfo, SBlock* pBlock,
+static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanInfo* pScanInfo, SDataBlk* pBlock,
SFileDataBlockInfo* pFBlock, SRowMerger* pMerger, int64_t key,
CHECK_FILEBLOCK_STATE* state) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
@@ -2833,8 +2918,8 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn
*state = CHECK_FILEBLOCK_QUIT;
int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
- int32_t nextIndex = -1;
- SBlock* pNeighborBlock = getNeighborBlockOfSameTable(pFBlock, pScanInfo, &nextIndex, pReader->order);
+ int32_t nextIndex = -1;
+ SDataBlk* pNeighborBlock = getNeighborBlockOfSameTable(pFBlock, pScanInfo, &nextIndex, pReader->order);
if (pNeighborBlock == NULL) { // do nothing
return 0;
}
@@ -2854,13 +2939,7 @@ static int32_t checkForNeighborFileBlock(STsdbReader* pReader, STableBlockScanIn
setFileBlockActiveInBlockIter(pBlockIter, neighborIndex, step);
// 3. load the neighbor block, and set it to be the currently accessed file data block
- tBlockDataReset(&pStatus->fileBlockData);
- int32_t code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pFBlock->uid, pReader->pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
-
- code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData);
+ int32_t code = doLoadFileBlockData(pReader, pBlockIter, &pStatus->fileBlockData, pFBlock->uid);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2898,7 +2977,7 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
CHECK_FILEBLOCK_STATE st;
SFileDataBlockInfo* pFileBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
- SBlock* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter);
+ SDataBlk* pCurrentBlock = getCurrentBlock(&pReader->status.blockIter);
checkForNeighborFileBlock(pReader, pScanInfo, pCurrentBlock, pFileBlockInfo, pMerger, key, &st);
if (st == CHECK_FILEBLOCK_QUIT) {
break;
@@ -2909,11 +2988,13 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
return TSDB_CODE_SUCCESS;
}
-int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts, SRowMerger* pMerger) {
- while(nextRowInLastBlock(pLastBlockReader, pScanInfo)) {
+int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts,
+ SRowMerger* pMerger) {
+ pScanInfo->lastKey = ts;
+ while (nextRowFromLastBlocks(pLastBlockReader, pScanInfo)) {
int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
if (next1 == ts) {
- TSDBROW fRow1 = tsdbRowFromBlockData(&pLastBlockReader->lastBlockData, *pLastBlockReader->rowIndex);
+ TSDBROW fRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree);
tRowMerge(pMerger, &fRow1);
} else {
break;
@@ -2923,8 +3004,8 @@ int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockSc
return TSDB_CODE_SUCCESS;
}
-void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
- STsdbReader* pReader, bool* freeTSRow) {
+int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow,
+ STsdbReader* pReader, bool* freeTSRow) {
TSDBROW* pNextRow = NULL;
TSDBROW current = *pRow;
@@ -2934,19 +3015,19 @@ void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SAr
if (!pIter->hasVal) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
} else { // has next point in mem/imem
pNextRow = getValidMemRow(pIter, pDelList, pReader);
if (pNextRow == NULL) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
}
if (current.pTSRow->ts != pNextRow->pTSRow->ts) {
*pTSRow = current.pTSRow;
*freeTSRow = false;
- return;
+ return TSDB_CODE_SUCCESS;
}
}
}
@@ -2966,14 +3047,18 @@ void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SAr
tRowMergerAdd(&merge, pNextRow, pTSchema1);
doMergeRowsInBuf(pIter, uid, current.pTSRow->ts, pDelList, &merge, pReader);
- tRowMergerGetRow(&merge, pTSRow);
- tRowMergerClear(&merge);
+ int32_t code = tRowMergerGetRow(&merge, pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ tRowMergerClear(&merge);
*freeTSRow = true;
+ return TSDB_CODE_SUCCESS;
}
-void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
- STSRow** pTSRow) {
+int32_t doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader,
+ STSRow** pTSRow) {
SRowMerger merge = {0};
TSDBKEY k = TSDBROW_KEY(pRow);
@@ -2997,7 +3082,8 @@ void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlo
doMergeRowsInBuf(&pBlockScanInfo->iiter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
}
- tRowMergerGetRow(&merge, pTSRow);
+ int32_t code = tRowMergerGetRow(&merge, pTSRow);
+ return code;
}
int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow, int64_t endKey,
@@ -3027,28 +3113,31 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
TSDBKEY k = TSDBROW_KEY(pRow);
TSDBKEY ik = TSDBROW_KEY(piRow);
+ int32_t code = TSDB_CODE_SUCCESS;
if (ik.ts != k.ts) {
if (((ik.ts < k.ts) && asc) || ((ik.ts > k.ts) && (!asc))) { // ik.ts < k.ts
- doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
+ code = doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
} else if (((k.ts < ik.ts) && asc) || ((k.ts > ik.ts) && (!asc))) {
- doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
+ code = doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
}
} else { // ik.ts == k.ts
- doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
*freeTSRow = true;
+ code = doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
}
- return TSDB_CODE_SUCCESS;
+ return code;
}
if (pBlockScanInfo->iter.hasVal && pRow != NULL) {
- doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow);
- return TSDB_CODE_SUCCESS;
+ return doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader,
+ freeTSRow);
}
if (pBlockScanInfo->iiter.hasVal && piRow != NULL) {
- doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
- return TSDB_CODE_SUCCESS;
+ return doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow);
}
return TSDB_CODE_SUCCESS;
@@ -3098,7 +3187,8 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
return TSDB_CODE_SUCCESS;
}
-int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData, int32_t rowIndex) {
+int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
+ int32_t rowIndex) {
int32_t i = 0, j = 0;
int32_t outputRowIndex = pResBlock->info.rows;
@@ -3111,18 +3201,24 @@ int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, S
}
SColVal cv = {0};
- int32_t numOfInputCols = taosArrayGetSize(pBlockData->aIdx);
- int32_t numOfOutputCols = blockDataGetNumOfCols(pResBlock);
+ int32_t numOfInputCols = pBlockData->aIdx->size;
+ int32_t numOfOutputCols = pResBlock->pDataBlock->size;
while (i < numOfOutputCols && j < numOfInputCols) {
- SColumnInfoData* pCol = taosArrayGet(pResBlock->pDataBlock, i);
+ SColumnInfoData* pCol = TARRAY_GET_ELEM(pResBlock->pDataBlock, i);
SColData* pData = tBlockDataGetColDataByIdx(pBlockData, j);
+ if (pData->cid < pCol->info.colId) {
+ j += 1;
+ continue;
+ }
+
if (pData->cid == pCol->info.colId) {
tColDataGetValue(pData, rowIndex, &cv);
doCopyColVal(pCol, outputRowIndex, i, &cv, pSupInfo);
j += 1;
- } else { // the specified column does not exist in file block, fill with null data
+ } else if (pData->cid > pCol->info.colId) {
+ // the specified column does not exist in file block, fill with null data
colDataAppendNULL(pCol, outputRowIndex);
}
@@ -3175,7 +3271,7 @@ int32_t tsdbSetTableId(STsdbReader* pReader, int64_t uid) {
ASSERT(pReader != NULL);
taosHashClear(pReader->status.pTableMap);
- STableBlockScanInfo info = {.lastKey = 0, .uid = uid, .indexInBlockL = DEFAULT_ROW_INDEX_VAL};
+ STableBlockScanInfo info = {.lastKey = 0, .uid = uid};
taosHashPut(pReader->status.pTableMap, &info.uid, sizeof(uint64_t), &info, sizeof(info));
return TDB_CODE_SUCCESS;
}
@@ -3196,10 +3292,30 @@ void* tsdbGetIvtIdx(SMeta* pMeta) {
uint64_t getReaderMaxVersion(STsdbReader* pReader) { return pReader->verRange.maxVer; }
+static int32_t doOpenReaderImpl(STsdbReader* pReader) {
+ SDataBlockIter* pBlockIter = &pReader->status.blockIter;
+
+ initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
+ resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
+
+ // no data in files, let's try buffer in memory
+ if (pReader->status.fileIter.numOfFiles == 0) {
+ pReader->status.loadFromFile = false;
+ return TSDB_CODE_SUCCESS;
+ } else {
+ return initForFirstBlockInFile(pReader, pBlockIter);
+ }
+}
// ====================================== EXPOSED APIs ======================================
int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTableList, STsdbReader** ppReader,
const char* idstr) {
+ STimeWindow window = pCond->twindows;
+ if (pCond->type == TIMEWINDOW_RANGE_EXTERNAL) {
+ pCond->twindows.skey += 1;
+ pCond->twindows.ekey -= 1;
+ }
+
int32_t code = tsdbReaderCreate(pVnode, pCond, ppReader, 4096, idstr);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
@@ -3207,21 +3323,20 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
// check for query time window
STsdbReader* pReader = *ppReader;
- if (isEmptyQueryTimeWindow(&pReader->window)) {
+ if (isEmptyQueryTimeWindow(&pReader->window) && pCond->type == TIMEWINDOW_RANGE_CONTAINED) {
tsdbDebug("%p query window not overlaps with the data set, no result returned, %s", pReader, pReader->idStr);
return TSDB_CODE_SUCCESS;
}
if (pCond->type == TIMEWINDOW_RANGE_EXTERNAL) {
// update the SQueryTableDataCond to create inner reader
- STimeWindow w = pCond->twindows;
- int32_t order = pCond->order;
+ int32_t order = pCond->order;
if (order == TSDB_ORDER_ASC) {
- pCond->twindows.ekey = pCond->twindows.skey;
+ pCond->twindows.ekey = window.skey;
pCond->twindows.skey = INT64_MIN;
pCond->order = TSDB_ORDER_DESC;
} else {
- pCond->twindows.skey = pCond->twindows.ekey;
+ pCond->twindows.skey = window.ekey;
pCond->twindows.ekey = INT64_MAX;
pCond->order = TSDB_ORDER_ASC;
}
@@ -3233,27 +3348,39 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
}
if (order == TSDB_ORDER_ASC) {
- pCond->twindows.skey = w.ekey;
+ pCond->twindows.skey = window.ekey;
pCond->twindows.ekey = INT64_MAX;
} else {
pCond->twindows.skey = INT64_MIN;
- pCond->twindows.ekey = w.ekey;
+ pCond->twindows.ekey = window.ekey;
}
+ pCond->order = order;
+
code = tsdbReaderCreate(pVnode, pCond, &pReader->innerReader[1], 1, idstr);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
}
+ // NOTE: the endVersion in pCond is the data version not schema version, so pCond->endVersion is not correct here.
if (pCond->suid != 0) {
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pReader->suid, -1);
+ if (pReader->pSchema == NULL) {
+ tsdbError("failed to get table schema, suid:%" PRIu64 ", ver:%" PRId64 " , %s", pReader->suid, -1,
+ pReader->idStr);
+ }
} else if (taosArrayGetSize(pTableList) > 0) {
STableKeyInfo* pKey = taosArrayGet(pTableList, 0);
pReader->pSchema = metaGetTbTSchema(pReader->pTsdb->pVnode->pMeta, pKey->uid, -1);
+ if (pReader->pSchema == NULL) {
+ tsdbError("failed to get table schema, uid:%" PRIu64 ", ver:%" PRId64 " , %s", pKey->uid, -1, pReader->idStr);
+ }
}
+ STsdbReader* p = pReader->innerReader[0] != NULL ? pReader->innerReader[0] : pReader;
+
int32_t numOfTables = taosArrayGetSize(pTableList);
- pReader->status.pTableMap = createDataBlockScanInfo(pReader, pTableList->pData, numOfTables);
+ pReader->status.pTableMap = createDataBlockScanInfo(p, pTableList->pData, numOfTables);
if (pReader->status.pTableMap == NULL) {
tsdbReaderClose(pReader);
*ppReader = NULL;
@@ -3262,46 +3389,46 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, SArray* pTabl
goto _err;
}
- code = tsdbTakeReadSnap(pReader->pTsdb, &pReader->pReadSnap);
+ code = tsdbTakeReadSnap(pReader->pTsdb, &pReader->pReadSnap, pReader->idStr);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
if (pReader->type == TIMEWINDOW_RANGE_CONTAINED) {
- SDataBlockIter* pBlockIter = &pReader->status.blockIter;
-
- initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
- resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
-
- // no data in files, let's try buffer in memory
- if (pReader->status.fileIter.numOfFiles == 0) {
- pReader->status.loadFromFile = false;
- } else {
- code = initForFirstBlockInFile(pReader, pBlockIter);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = doOpenReaderImpl(pReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
}
} else {
- STsdbReader* pPrevReader = pReader->innerReader[0];
- SDataBlockIter* pBlockIter = &pPrevReader->status.blockIter;
-
- code = tsdbTakeReadSnap(pPrevReader->pTsdb, &pPrevReader->pReadSnap);
+ STsdbReader* pPrevReader = pReader->innerReader[0];
+ STsdbReader* pNextReader = pReader->innerReader[1];
+
+ // we need only one row
+ pPrevReader->capacity = 1;
+ pPrevReader->status.pTableMap = pReader->status.pTableMap;
+ pPrevReader->pSchema = pReader->pSchema;
+ pPrevReader->pMemSchema = pReader->pMemSchema;
+ pPrevReader->pReadSnap = pReader->pReadSnap;
+
+ pNextReader->capacity = 1;
+ pNextReader->status.pTableMap = pReader->status.pTableMap;
+ pNextReader->pSchema = pReader->pSchema;
+ pNextReader->pMemSchema = pReader->pMemSchema;
+ pNextReader->pReadSnap = pReader->pReadSnap;
+
+ code = doOpenReaderImpl(pPrevReader);
if (code != TSDB_CODE_SUCCESS) {
- goto _err;
+ return code;
}
- initFilesetIterator(&pPrevReader->status.fileIter, pPrevReader->pReadSnap->fs.aDFileSet, pPrevReader);
- resetDataBlockIterator(&pPrevReader->status.blockIter, pPrevReader->order, pReader->status.pTableMap);
+ code = doOpenReaderImpl(pNextReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
- // no data in files, let's try buffer in memory
- if (pPrevReader->status.fileIter.numOfFiles == 0) {
- pPrevReader->status.loadFromFile = false;
- } else {
- code = initForFirstBlockInFile(pPrevReader, pBlockIter);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ code = doOpenReaderImpl(pReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
}
}
@@ -3318,8 +3445,28 @@ void tsdbReaderClose(STsdbReader* pReader) {
return;
}
+ {
+ if (pReader->innerReader[0] != NULL) {
+ STsdbReader* p = pReader->innerReader[0];
+
+ p->status.pTableMap = NULL;
+ p->pReadSnap = NULL;
+ p->pSchema = NULL;
+ p->pMemSchema = NULL;
+
+ p = pReader->innerReader[1];
+
+ p->status.pTableMap = NULL;
+ p->pReadSnap = NULL;
+ p->pSchema = NULL;
+ p->pMemSchema = NULL;
+
+ tsdbReaderClose(pReader->innerReader[0]);
+ tsdbReaderClose(pReader->innerReader[1]);
+ }
+ }
+
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
- tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap);
taosMemoryFreeClear(pSupInfo->plist);
taosMemoryFree(pSupInfo->colIds);
@@ -3330,6 +3477,7 @@ void tsdbReaderClose(STsdbReader* pReader) {
taosMemoryFreeClear(pSupInfo->buildBuf[i]);
}
}
+
taosMemoryFree(pSupInfo->buildBuf);
tBlockDataDestroy(&pReader->status.fileBlockData, true);
@@ -3343,23 +3491,31 @@ void tsdbReaderClose(STsdbReader* pReader) {
tsdbDataFReaderClose(&pReader->pFileReader);
}
+ tsdbUntakeReadSnap(pReader->pTsdb, pReader->pReadSnap, pReader->idStr);
+
+ taosMemoryFree(pReader->status.uidCheckInfo.tableUidList);
+ SIOCostSummary* pCost = &pReader->cost;
+
SFilesetIter* pFilesetIter = &pReader->status.fileIter;
if (pFilesetIter->pLastBlockReader != NULL) {
- tBlockDataDestroy(&pFilesetIter->pLastBlockReader->lastBlockData, true);
- taosArrayDestroy(pFilesetIter->pLastBlockReader->pBlockL);
- taosMemoryFree(pFilesetIter->pLastBlockReader);
- }
+ SLastBlockReader* pLReader = pFilesetIter->pLastBlockReader;
+ tMergeTreeClose(&pLReader->mergeTree);
- SIOCostSummary* pCost = &pReader->cost;
+ getLastBlockLoadInfo(pLReader->pInfo, &pCost->lastBlockLoad, &pCost->lastBlockLoadTime);
+
+ pLReader->pInfo = destroyLastBlockLoadInfo(pLReader->pInfo);
+ taosMemoryFree(pLReader);
+ }
- tsdbDebug("%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
- " SMA-time:%.2f ms, fileBlocks:%" PRId64
- ", fileBlocks-time:%.2f ms, "
- "build in-memory-block-time:%.2f ms, lastBlocks:%" PRId64
- ", lastBlocks-time:%.2f ms, STableBlockScanInfo size:%.2f Kb %s",
- pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaDataLoad, pCost->smaLoadTime,
- pCost->numOfBlocks, pCost->blockLoadTime, pCost->buildmemBlock, pCost->lastBlockLoad,
- pCost->lastBlockLoadTime, numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr);
+ tsdbDebug(
+ "%p :io-cost summary: head-file:%" PRIu64 ", head-file time:%.2f ms, SMA:%" PRId64
+ " SMA-time:%.2f ms, fileBlocks:%" PRId64
+ ", fileBlocks-load-time:%.2f ms, "
+ "build in-memory-block-time:%.2f ms, lastBlocks:%" PRId64 ", lastBlocks-time:%.2f ms, composed-blocks:%" PRId64
+ ", composed-blocks-time:%.2fms, STableBlockScanInfo size:%.2f Kb %s",
+ pReader, pCost->headFileLoad, pCost->headFileLoadTime, pCost->smaDataLoad, pCost->smaLoadTime, pCost->numOfBlocks,
+ pCost->blockLoadTime, pCost->buildmemBlock, pCost->lastBlockLoad, pCost->lastBlockLoadTime, pCost->composedBlocks,
+ pCost->buildComposedBlockTime, numOfTables * sizeof(STableBlockScanInfo) / 1000.0, pReader->idStr);
taosMemoryFree(pReader->idStr);
taosMemoryFree(pReader->pSchema);
@@ -3401,32 +3557,32 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
return false;
}
- if (pReader->innerReader[0] != NULL) {
+ if (pReader->innerReader[0] != NULL && pReader->step == 0) {
bool ret = doTsdbNextDataBlock(pReader->innerReader[0]);
+ resetDataBlockScanInfo(pReader->innerReader[0]->status.pTableMap, pReader->innerReader[0]->window.ekey);
+ pReader->step = EXTERNAL_ROWS_PREV;
+
if (ret) {
- pReader->step = EXTERNAL_ROWS_PREV;
return ret;
}
+ }
- tsdbReaderClose(pReader->innerReader[0]);
- pReader->innerReader[0] = NULL;
+ if (pReader->step == EXTERNAL_ROWS_PREV) {
+ pReader->step = EXTERNAL_ROWS_MAIN;
}
- pReader->step = EXTERNAL_ROWS_MAIN;
bool ret = doTsdbNextDataBlock(pReader);
if (ret) {
return ret;
}
- if (pReader->innerReader[1] != NULL) {
+ if (pReader->innerReader[1] != NULL && pReader->step == EXTERNAL_ROWS_MAIN) {
+ resetDataBlockScanInfo(pReader->innerReader[1]->status.pTableMap, pReader->window.ekey);
bool ret1 = doTsdbNextDataBlock(pReader->innerReader[1]);
+ pReader->step = EXTERNAL_ROWS_NEXT;
if (ret1) {
- pReader->step = EXTERNAL_ROWS_NEXT;
return ret1;
}
-
- tsdbReaderClose(pReader->innerReader[1]);
- pReader->innerReader[1] = NULL;
}
return false;
@@ -3470,12 +3626,12 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SColumnDataAgg*** pBlockS
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pReader->status.blockIter);
- SBlock* pBlock = getCurrentBlock(&pReader->status.blockIter);
- int64_t stime = taosGetTimestampUs();
+ SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ int64_t stime = taosGetTimestampUs();
SBlockLoadSuppInfo* pSup = &pReader->suppInfo;
- if (tBlockHasSma(pBlock)) {
+ if (tDataBlkHasSma(pBlock)) {
code = tsdbReadBlockSma(pReader->pFileReader, pBlock, pSup->pColAgg);
if (code != TSDB_CODE_SUCCESS) {
tsdbDebug("vgId:%d, failed to load block SMA for uid %" PRIu64 ", code:%s, %s", 0, pFBlock->uid, tstrerror(code),
@@ -3541,14 +3697,7 @@ static SArray* doRetrieveDataBlock(STsdbReader* pReader) {
SFileDataBlockInfo* pFBlock = getCurrentBlockInfo(&pStatus->blockIter);
STableBlockScanInfo* pBlockScanInfo = taosHashGet(pStatus->pTableMap, &pFBlock->uid, sizeof(pFBlock->uid));
- tBlockDataReset(&pStatus->fileBlockData);
- int32_t code = tBlockDataInit(&pStatus->fileBlockData, pReader->suid, pBlockScanInfo->uid, pReader->pSchema);
- if (code != TSDB_CODE_SUCCESS) {
- terrno = code;
- return NULL;
- }
-
- code = doLoadFileBlockData(pReader, &pStatus->blockIter, &pStatus->fileBlockData);
+ int32_t code = doLoadFileBlockData(pReader, &pStatus->blockIter, &pStatus->fileBlockData, pBlockScanInfo->uid);
if (code != TSDB_CODE_SUCCESS) {
tBlockDataDestroy(&pStatus->fileBlockData, 1);
terrno = code;
@@ -3590,11 +3739,12 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
tsdbDataFReaderClose(&pReader->pFileReader);
int32_t numOfTables = taosHashGetSize(pReader->status.pTableMap);
- tsdbDataFReaderClose(&pReader->pFileReader);
initFilesetIterator(&pReader->status.fileIter, pReader->pReadSnap->fs.aDFileSet, pReader);
- resetDataBlockIterator(&pReader->status.blockIter, pReader->order, pReader->status.pTableMap);
- resetDataBlockScanInfo(pReader->status.pTableMap);
+ resetDataBlockIterator(&pReader->status.blockIter, pReader->order);
+
+ int64_t ts = ASCENDING_TRAVERSE(pReader->order) ? pReader->window.skey - 1 : pReader->window.ekey + 1;
+ resetDataBlockScanInfo(pReader->status.pTableMap, ts);
int32_t code = 0;
SDataBlockIter* pBlockIter = &pReader->status.blockIter;
@@ -3611,8 +3761,10 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
}
}
- tsdbDebug("%p reset reader, suid:%" PRIu64 ", numOfTables:%d, query range:%" PRId64 " - %" PRId64 " in query %s",
- pReader, pReader->suid, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->idStr);
+ tsdbDebug("%p reset reader, suid:%" PRIu64 ", numOfTables:%d, skey:%" PRId64 ", query range:%" PRId64 " - %" PRId64
+ " in query %s",
+ pReader, pReader->suid, numOfTables, pCond->twindows.skey, pReader->window.skey, pReader->window.ekey,
+ pReader->idStr);
return code;
}
@@ -3652,7 +3804,7 @@ int32_t tsdbGetFileBlocksDistInfo(STsdbReader* pReader, STableBlockDistInfo* pTa
while (true) {
if (hasNext) {
- SBlock* pBlock = getCurrentBlock(pBlockIter);
+ SDataBlk* pBlock = getCurrentBlock(pBlockIter);
int32_t numOfRows = pBlock->nRow;
pTableBlockInfo->totalRows += numOfRows;
@@ -3757,7 +3909,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
return TSDB_CODE_SUCCESS;
}
-int32_t tsdbTakeReadSnap(STsdb* pTsdb, STsdbReadSnap** ppSnap) {
+int32_t tsdbTakeReadSnap(STsdb* pTsdb, STsdbReadSnap** ppSnap, const char* idStr) {
int32_t code = 0;
// alloc
@@ -3800,12 +3952,12 @@ int32_t tsdbTakeReadSnap(STsdb* pTsdb, STsdbReadSnap** ppSnap) {
goto _exit;
}
- tsdbTrace("vgId:%d, take read snapshot", TD_VID(pTsdb->pVnode));
+ tsdbTrace("vgId:%d, take read snapshot, %s", TD_VID(pTsdb->pVnode), idStr);
_exit:
return code;
}
-void tsdbUntakeReadSnap(STsdb* pTsdb, STsdbReadSnap* pSnap) {
+void tsdbUntakeReadSnap(STsdb* pTsdb, STsdbReadSnap* pSnap, const char* idStr) {
if (pSnap) {
if (pSnap->pMem) {
tsdbUnrefMemTable(pSnap->pMem);
@@ -3818,6 +3970,5 @@ void tsdbUntakeReadSnap(STsdb* pTsdb, STsdbReadSnap* pSnap) {
tsdbFSUnref(pTsdb, &pSnap->fs);
taosMemoryFree(pSnap);
}
-
- tsdbTrace("vgId:%d, untake read snapshot", TD_VID(pTsdb->pVnode));
+ tsdbTrace("vgId:%d, untake read snapshot, %s", TD_VID(pTsdb->pVnode), idStr);
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
index c8f3862071b3628fdefd26df58ea3cb01e80d302..fc577e39626ff18554938abfcf9c3f8498342c6a 100644
--- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
+++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
@@ -15,743 +15,934 @@
#include "tsdb.h"
-// SDelFWriter ====================================================
-int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb) {
- int32_t code = 0;
- char fname[TSDB_FILENAME_LEN];
- char hdr[TSDB_FHDR_SIZE] = {0};
- SDelFWriter *pDelFWriter;
- int64_t n;
+// =============== PAGE-WISE FILE ===============
+static int32_t tsdbOpenFile(const char *path, int32_t szPage, int32_t flag, STsdbFD **ppFD) {
+ int32_t code = 0;
+ STsdbFD *pFD;
- // alloc
- pDelFWriter = (SDelFWriter *)taosMemoryCalloc(1, sizeof(*pDelFWriter));
- if (pDelFWriter == NULL) {
+ *ppFD = NULL;
+
+ pFD = (STsdbFD *)taosMemoryCalloc(1, sizeof(*pFD) + strlen(path) + 1);
+ if (pFD == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ goto _exit;
}
- pDelFWriter->pTsdb = pTsdb;
- pDelFWriter->fDel = *pFile;
- tsdbDelFileName(pTsdb, pFile, fname);
- pDelFWriter->pWriteH = taosOpenFile(fname, TD_FILE_WRITE | TD_FILE_CREATE);
- if (pDelFWriter->pWriteH == NULL) {
+ pFD->path = (char *)&pFD[1];
+ strcpy(pFD->path, path);
+ pFD->szPage = szPage;
+ pFD->flag = flag;
+ pFD->pFD = taosOpenFile(path, flag);
+ if (pFD->pFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ goto _exit;
}
-
- // update header
- n = taosWriteFile(pDelFWriter->pWriteH, &hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
+ pFD->szPage = szPage;
+ pFD->pgno = 0;
+ pFD->pBuf = taosMemoryCalloc(1, szPage);
+ if (pFD->pBuf == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pFD);
+ goto _exit;
+ }
+ if (taosStatFile(path, &pFD->szFile, NULL) < 0) {
code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ goto _exit;
}
+ ASSERT(pFD->szFile % szPage == 0);
+ pFD->szFile = pFD->szFile / szPage;
+ *ppFD = pFD;
- pDelFWriter->fDel.size = TSDB_FHDR_SIZE;
- pDelFWriter->fDel.offset = 0;
-
- *ppWriter = pDelFWriter;
+_exit:
return code;
+}
-_err:
- tsdbError("vgId:%d, failed to open del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- *ppWriter = NULL;
+static void tsdbCloseFile(STsdbFD **ppFD) {
+ STsdbFD *pFD = *ppFD;
+ taosMemoryFree(pFD->pBuf);
+ taosCloseFile(&pFD->pFD);
+ taosMemoryFree(pFD);
+ *ppFD = NULL;
+}
+
+static int32_t tsdbWriteFilePage(STsdbFD *pFD) {
+ int32_t code = 0;
+
+ if (pFD->pgno > 0) {
+ int64_t n = taosLSeekFile(pFD->pFD, PAGE_OFFSET(pFD->pgno, pFD->szPage), SEEK_SET);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _exit;
+ }
+
+ taosCalcChecksumAppend(0, pFD->pBuf, pFD->szPage);
+
+ n = taosWriteFile(pFD->pFD, pFD->pBuf, pFD->szPage);
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _exit;
+ }
+
+ if (pFD->szFile < pFD->pgno) {
+ pFD->szFile = pFD->pgno;
+ }
+ }
+ pFD->pgno = 0;
+
+_exit:
return code;
}
-int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) {
- int32_t code = 0;
- SDelFWriter *pWriter = *ppWriter;
- STsdb *pTsdb = pWriter->pTsdb;
+static int32_t tsdbReadFilePage(STsdbFD *pFD, int64_t pgno) {
+ int32_t code = 0;
- // sync
- if (sync && taosFsyncFile(pWriter->pWriteH) < 0) {
+ ASSERT(pgno <= pFD->szFile);
+
+ // seek
+ int64_t offset = PAGE_OFFSET(pgno, pFD->szPage);
+ int64_t n = taosLSeekFile(pFD->pFD, offset, SEEK_SET);
+ if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ goto _exit;
}
- // close
- if (taosCloseFile(&pWriter->pWriteH) < 0) {
+ // read
+ n = taosReadFile(pFD->pFD, pFD->pBuf, pFD->szPage);
+ if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ goto _exit;
+ } else if (n < pFD->szPage) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _exit;
}
- for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t *); iBuf++) {
- tFree(pWriter->aBuf[iBuf]);
+ // check
+ if (!taosCheckChecksumWhole(pFD->pBuf, pFD->szPage)) {
+ code = TSDB_CODE_FILE_CORRUPTED;
+ goto _exit;
}
- taosMemoryFree(pWriter);
- *ppWriter = NULL;
- return code;
+ pFD->pgno = pgno;
-_err:
- tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+_exit:
return code;
}
-int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx) {
+static int32_t tsdbWriteFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t size) {
int32_t code = 0;
- int64_t size;
- int64_t n;
+ int64_t fOffset = LOGIC_TO_FILE_OFFSET(offset, pFD->szPage);
+ int64_t pgno = OFFSET_PGNO(fOffset, pFD->szPage);
+ int64_t bOffset = fOffset % pFD->szPage;
+ int64_t n = 0;
+
+ do {
+ if (pFD->pgno != pgno) {
+ code = tsdbWriteFilePage(pFD);
+ if (code) goto _exit;
+
+ if (pgno <= pFD->szFile) {
+ code = tsdbReadFilePage(pFD, pgno);
+ if (code) goto _exit;
+ } else {
+ pFD->pgno = pgno;
+ }
+ }
- // prepare
- size = sizeof(uint32_t);
- for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
- size += tPutDelData(NULL, taosArrayGet(aDelData, iDelData));
- }
- size += sizeof(TSCKSUM);
+ int64_t nWrite = TMIN(PAGE_CONTENT_SIZE(pFD->szPage) - bOffset, size - n);
+ memcpy(pFD->pBuf + bOffset, pBuf + n, nWrite);
- // alloc
- code = tRealloc(&pWriter->aBuf[0], size);
- if (code) goto _err;
+ pgno++;
+ bOffset = 0;
+ n += nWrite;
+ } while (n < size);
- // build
- n = 0;
- n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
- n += tPutDelData(pWriter->aBuf[0] + n, taosArrayGet(aDelData, iDelData));
- }
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
+_exit:
+ return code;
+}
- ASSERT(n + sizeof(TSCKSUM) == size);
+static int32_t tsdbReadFile(STsdbFD *pFD, int64_t offset, uint8_t *pBuf, int64_t size) {
+ int32_t code = 0;
+ int64_t n = 0;
+ int64_t fOffset = LOGIC_TO_FILE_OFFSET(offset, pFD->szPage);
+ int64_t pgno = OFFSET_PGNO(fOffset, pFD->szPage);
+ int32_t szPgCont = PAGE_CONTENT_SIZE(pFD->szPage);
+ int64_t bOffset = fOffset % pFD->szPage;
+
+ ASSERT(pgno && pgno <= pFD->szFile);
+ ASSERT(bOffset < szPgCont);
+
+ while (n < size) {
+ if (pFD->pgno != pgno) {
+ code = tsdbReadFilePage(pFD, pgno);
+ if (code) goto _exit;
+ }
- // write
- n = taosWriteFile(pWriter->pWriteH, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ int64_t nRead = TMIN(szPgCont - bOffset, size - n);
+ memcpy(pBuf + n, pFD->pBuf + bOffset, nRead);
+
+ n += nRead;
+ pgno++;
+ bOffset = 0;
}
- ASSERT(n == size);
+_exit:
+ return code;
+}
- // update
- pDelIdx->offset = pWriter->fDel.size;
- pDelIdx->size = size;
- pWriter->fDel.size += size;
+static int32_t tsdbFsyncFile(STsdbFD *pFD) {
+ int32_t code = 0;
- return code;
+ code = tsdbWriteFilePage(pFD);
+ if (code) goto _exit;
-_err:
- tsdbError("vgId:%d, failed to write del data since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ if (taosFsyncFile(pFD->pFD) < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _exit;
+ }
+
+_exit:
return code;
}
-int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx) {
- int32_t code = 0;
- int64_t size;
- int64_t n;
- SDelIdx *pDelIdx;
+// SDataFWriter ====================================================
+int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet) {
+ int32_t code = 0;
+ int32_t flag;
+ int64_t n;
+ int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
+ SDataFWriter *pWriter = NULL;
+ char fname[TSDB_FILENAME_LEN];
+ char hdr[TSDB_FHDR_SIZE] = {0};
- // prepare
- size = sizeof(uint32_t);
- for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
- size += tPutDelIdx(NULL, taosArrayGet(aDelIdx, iDelIdx));
+ // alloc
+ pWriter = taosMemoryCalloc(1, sizeof(*pWriter));
+ if (pWriter == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ pWriter->pTsdb = pTsdb;
+ pWriter->wSet = (SDFileSet){.diskId = pSet->diskId,
+ .fid = pSet->fid,
+ .pHeadF = &pWriter->fHead,
+ .pDataF = &pWriter->fData,
+ .pSmaF = &pWriter->fSma,
+ .nSttF = pSet->nSttF};
+ pWriter->fHead = *pSet->pHeadF;
+ pWriter->fData = *pSet->pDataF;
+ pWriter->fSma = *pSet->pSmaF;
+ for (int8_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ pWriter->wSet.aSttF[iStt] = &pWriter->fStt[iStt];
+ pWriter->fStt[iStt] = *pSet->aSttF[iStt];
}
- size += sizeof(TSCKSUM);
- // alloc
- code = tRealloc(&pWriter->aBuf[0], size);
+ // head
+ flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
+ tsdbHeadFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fHead, fname);
+ code = tsdbOpenFile(fname, szPage, flag, &pWriter->pHeadFD);
if (code) goto _err;
- // build
- n = 0;
- n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
- n += tPutDelIdx(pWriter->aBuf[0] + n, taosArrayGet(aDelIdx, iDelIdx));
+ code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
+ pWriter->fHead.size += TSDB_FHDR_SIZE;
+
+ // data
+ if (pWriter->fData.size == 0) {
+ flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
+ } else {
+ flag = TD_FILE_READ | TD_FILE_WRITE;
+ }
+ tsdbDataFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fData, fname);
+ code = tsdbOpenFile(fname, szPage, flag, &pWriter->pDataFD);
+ if (code) goto _err;
+ if (pWriter->fData.size == 0) {
+ code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
+ pWriter->fData.size += TSDB_FHDR_SIZE;
}
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
- ASSERT(n + sizeof(TSCKSUM) == size);
+ // sma
+ if (pWriter->fSma.size == 0) {
+ flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
+ } else {
+ flag = TD_FILE_READ | TD_FILE_WRITE;
+ }
+ tsdbSmaFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fSma, fname);
+ code = tsdbOpenFile(fname, szPage, flag, &pWriter->pSmaFD);
+ if (code) goto _err;
+ if (pWriter->fSma.size == 0) {
+ code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
- // write
- n = taosWriteFile(pWriter->pWriteH, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ pWriter->fSma.size += TSDB_FHDR_SIZE;
}
- // update
- pWriter->fDel.offset = pWriter->fDel.size;
- pWriter->fDel.size += size;
+ // stt
+ ASSERT(pWriter->fStt[pSet->nSttF - 1].size == 0);
+ flag = TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
+ tsdbSttFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fStt[pSet->nSttF - 1], fname);
+ code = tsdbOpenFile(fname, szPage, flag, &pWriter->pSttFD);
+ if (code) goto _err;
+ code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
+ pWriter->fStt[pWriter->wSet.nSttF - 1].size += TSDB_FHDR_SIZE;
+ *ppWriter = pWriter;
return code;
_err:
- tsdbError("vgId:%d, write del idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, tsdb data file writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ *ppWriter = NULL;
return code;
}
-int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter) {
+int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) {
int32_t code = 0;
- char hdr[TSDB_FHDR_SIZE];
- int64_t size = TSDB_FHDR_SIZE;
- int64_t n;
+ STsdb *pTsdb = NULL;
- // build
- memset(hdr, 0, size);
- tPutDelFile(hdr, &pWriter->fDel);
- taosCalcChecksumAppend(0, hdr, size);
+ if (*ppWriter == NULL) goto _exit;
- // seek
- if (taosLSeekFile(pWriter->pWriteH, 0, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ pTsdb = (*ppWriter)->pTsdb;
+ if (sync) {
+ code = tsdbFsyncFile((*ppWriter)->pHeadFD);
+ if (code) goto _err;
- // write
- n = taosWriteFile(pWriter->pWriteH, hdr, size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ code = tsdbFsyncFile((*ppWriter)->pDataFD);
+ if (code) goto _err;
+
+ code = tsdbFsyncFile((*ppWriter)->pSmaFD);
+ if (code) goto _err;
+
+ code = tsdbFsyncFile((*ppWriter)->pSttFD);
+ if (code) goto _err;
}
+ tsdbCloseFile(&(*ppWriter)->pHeadFD);
+ tsdbCloseFile(&(*ppWriter)->pDataFD);
+ tsdbCloseFile(&(*ppWriter)->pSmaFD);
+ tsdbCloseFile(&(*ppWriter)->pSttFD);
+
+ for (int32_t iBuf = 0; iBuf < sizeof((*ppWriter)->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree((*ppWriter)->aBuf[iBuf]);
+ }
+ taosMemoryFree(*ppWriter);
+_exit:
+ *ppWriter = NULL;
return code;
_err:
- tsdbError("vgId:%d, update del file hdr failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, data file writer close failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-// SDelFReader ====================================================
-struct SDelFReader {
- STsdb *pTsdb;
- SDelFile fDel;
- TdFilePtr pReadH;
+int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter) {
+ int32_t code = 0;
+ int64_t n;
+ char hdr[TSDB_FHDR_SIZE];
- uint8_t *aBuf[1];
-};
+ // head ==============
+ memset(hdr, 0, TSDB_FHDR_SIZE);
+ tPutHeadFile(hdr, &pWriter->fHead);
+ code = tsdbWriteFile(pWriter->pHeadFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
-int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb) {
- int32_t code = 0;
- char fname[TSDB_FILENAME_LEN];
- SDelFReader *pDelFReader;
- int64_t n;
+ // data ==============
+ memset(hdr, 0, TSDB_FHDR_SIZE);
+ tPutDataFile(hdr, &pWriter->fData);
+ code = tsdbWriteFile(pWriter->pDataFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
- // alloc
- pDelFReader = (SDelFReader *)taosMemoryCalloc(1, sizeof(*pDelFReader));
- if (pDelFReader == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- // open impl
- pDelFReader->pTsdb = pTsdb;
- pDelFReader->fDel = *pFile;
+ // sma ==============
+ memset(hdr, 0, TSDB_FHDR_SIZE);
+ tPutSmaFile(hdr, &pWriter->fSma);
+ code = tsdbWriteFile(pWriter->pSmaFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
- tsdbDelFileName(pTsdb, pFile, fname);
- pDelFReader->pReadH = taosOpenFile(fname, TD_FILE_READ);
- if (pDelFReader->pReadH == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- taosMemoryFree(pDelFReader);
- goto _err;
- }
+ // stt ==============
+ memset(hdr, 0, TSDB_FHDR_SIZE);
+ tPutSttFile(hdr, &pWriter->fStt[pWriter->wSet.nSttF - 1]);
+ code = tsdbWriteFile(pWriter->pSttFD, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
-_exit:
- *ppReader = pDelFReader;
return code;
_err:
- tsdbError("vgId:%d, del file reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- *ppReader = NULL;
+ tsdbError("vgId:%d, update DFileSet header failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbDelFReaderClose(SDelFReader **ppReader) {
- int32_t code = 0;
- SDelFReader *pReader = *ppReader;
+int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) {
+ int32_t code = 0;
+ SHeadFile *pHeadFile = &pWriter->fHead;
+ int64_t size;
+ int64_t n;
- if (pReader) {
- if (taosCloseFile(&pReader->pReadH) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _exit;
- }
- for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(uint8_t *); iBuf++) {
- tFree(pReader->aBuf[iBuf]);
- }
- taosMemoryFree(pReader);
+ // check
+ if (taosArrayGetSize(aBlockIdx) == 0) {
+ pHeadFile->offset = pHeadFile->size;
+ goto _exit;
}
- *ppReader = NULL;
-
-_exit:
- return code;
-}
-
-int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData) {
- int32_t code = 0;
- int64_t offset = pDelIdx->offset;
- int64_t size = pDelIdx->size;
- int64_t n;
-
- taosArrayClear(aDelData);
- // seek
- if (taosLSeekFile(pReader->pReadH, offset, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // prepare
+ size = 0;
+ for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
+ size += tPutBlockIdx(NULL, taosArrayGet(aBlockIdx, iBlockIdx));
}
// alloc
- code = tRealloc(&pReader->aBuf[0], size);
+ code = tRealloc(&pWriter->aBuf[0], size);
if (code) goto _err;
- // read
- n = taosReadFile(pReader->pReadH, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // // decode
+ // build
n = 0;
-
- uint32_t delimiter;
- n += tGetU32(pReader->aBuf[0] + n, &delimiter);
- while (n < size - sizeof(TSCKSUM)) {
- SDelData delData;
- n += tGetDelData(pReader->aBuf[0] + n, &delData);
-
- if (taosArrayPush(aDelData, &delData) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
+ n += tPutBlockIdx(pWriter->aBuf[0] + n, taosArrayGet(aBlockIdx, iBlockIdx));
}
+ ASSERT(n == size);
+
+ // write
+ code = tsdbWriteFile(pWriter->pHeadFD, pHeadFile->size, pWriter->aBuf[0], size);
+ if (code) goto _err;
- ASSERT(n == size - sizeof(TSCKSUM));
+ // update
+ pHeadFile->offset = pHeadFile->size;
+ pHeadFile->size += size;
+_exit:
+ // tsdbTrace("vgId:%d write block idx, offset:%" PRId64 " size:%" PRId64 " nBlockIdx:%d",
+ // TD_VID(pWriter->pTsdb->pVnode),
+ // pHeadFile->offset, size, taosArrayGetSize(aBlockIdx));
return code;
_err:
- tsdbError("vgId:%d, read del data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, write block idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx) {
- int32_t code = 0;
- int32_t n;
- int64_t offset = pReader->fDel.offset;
- int64_t size = pReader->fDel.size - offset;
-
- taosArrayClear(aDelIdx);
+int32_t tsdbWriteDataBlk(SDataFWriter *pWriter, SMapData *mDataBlk, SBlockIdx *pBlockIdx) {
+ int32_t code = 0;
+ SHeadFile *pHeadFile = &pWriter->fHead;
+ int64_t size;
+ int64_t n;
- // seek
- if (taosLSeekFile(pReader->pReadH, offset, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ ASSERT(mDataBlk->nItem > 0);
// alloc
- code = tRealloc(&pReader->aBuf[0], size);
+ size = tPutMapData(NULL, mDataBlk);
+ code = tRealloc(&pWriter->aBuf[0], size);
if (code) goto _err;
- // read
- n = taosReadFile(pReader->pReadH, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // decode
- n = 0;
- uint32_t delimiter;
- n += tGetU32(pReader->aBuf[0] + n, &delimiter);
- ASSERT(delimiter == TSDB_FILE_DLMT);
-
- while (n < size - sizeof(TSCKSUM)) {
- SDelIdx delIdx;
-
- n += tGetDelIdx(pReader->aBuf[0] + n, &delIdx);
+ // build
+ n = tPutMapData(pWriter->aBuf[0], mDataBlk);
- if (taosArrayPush(aDelIdx, &delIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
+ // write
+ code = tsdbWriteFile(pWriter->pHeadFD, pHeadFile->size, pWriter->aBuf[0], size);
+ if (code) goto _err;
- ASSERT(n == size - sizeof(TSCKSUM));
+ // update
+ pBlockIdx->offset = pHeadFile->size;
+ pBlockIdx->size = size;
+ pHeadFile->size += size;
+ tsdbTrace("vgId:%d, write block, file ID:%d commit ID:%d suid:%" PRId64 " uid:%" PRId64 " offset:%" PRId64
+ " size:%" PRId64 " nItem:%d",
+ TD_VID(pWriter->pTsdb->pVnode), pWriter->wSet.fid, pHeadFile->commitID, pBlockIdx->suid, pBlockIdx->uid,
+ pBlockIdx->offset, pBlockIdx->size, mDataBlk->nItem);
return code;
_err:
- tsdbError("vgId:%d, read del idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, write block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-// SDataFReader ====================================================
-struct SDataFReader {
- STsdb *pTsdb;
- SDFileSet *pSet;
- TdFilePtr pHeadFD;
- TdFilePtr pDataFD;
- TdFilePtr pLastFD;
- TdFilePtr pSmaFD;
-
- uint8_t *aBuf[3];
-};
-
-int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet) {
- int32_t code = 0;
- SDataFReader *pReader;
- char fname[TSDB_FILENAME_LEN];
+int32_t tsdbWriteSttBlk(SDataFWriter *pWriter, SArray *aSttBlk) {
+ int32_t code = 0;
+ SSttFile *pSttFile = &pWriter->fStt[pWriter->wSet.nSttF - 1];
+ int64_t size;
+ int64_t n;
- // alloc
- pReader = (SDataFReader *)taosMemoryCalloc(1, sizeof(*pReader));
- if (pReader == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ // check
+ if (taosArrayGetSize(aSttBlk) == 0) {
+ pSttFile->offset = pSttFile->size;
+ goto _exit;
}
- pReader->pTsdb = pTsdb;
- pReader->pSet = pSet;
- // open impl
- // head
- tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname);
- pReader->pHeadFD = taosOpenFile(fname, TD_FILE_READ);
- if (pReader->pHeadFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // size
+ size = 0;
+ for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aSttBlk); iBlockL++) {
+ size += tPutSttBlk(NULL, taosArrayGet(aSttBlk, iBlockL));
}
- // data
- tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname);
- pReader->pDataFD = taosOpenFile(fname, TD_FILE_READ);
- if (pReader->pDataFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // alloc
+ code = tRealloc(&pWriter->aBuf[0], size);
+ if (code) goto _err;
- // last
- tsdbLastFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pLastF, fname);
- pReader->pLastFD = taosOpenFile(fname, TD_FILE_READ);
- if (pReader->pLastFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // encode
+ n = 0;
+ for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aSttBlk); iBlockL++) {
+ n += tPutSttBlk(pWriter->aBuf[0] + n, taosArrayGet(aSttBlk, iBlockL));
}
- // sma
- tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname);
- pReader->pSmaFD = taosOpenFile(fname, TD_FILE_READ);
- if (pReader->pSmaFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // write
+ code = tsdbWriteFile(pWriter->pSttFD, pSttFile->size, pWriter->aBuf[0], size);
+ if (code) goto _err;
- *ppReader = pReader;
+ // update
+ pSttFile->offset = pSttFile->size;
+ pSttFile->size += size;
+
+_exit:
+ tsdbTrace("vgId:%d tsdb write stt block, loffset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode),
+ pSttFile->offset, size);
return code;
_err:
- tsdbError("vgId:%d, tsdb data file reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- *ppReader = NULL;
+ tsdbError("vgId:%d tsdb write blockl failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbDataFReaderClose(SDataFReader **ppReader) {
+static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData, SSmaInfo *pSmaInfo) {
int32_t code = 0;
- if (*ppReader == NULL) goto _exit;
- if (taosCloseFile(&(*ppReader)->pHeadFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ pSmaInfo->offset = 0;
+ pSmaInfo->size = 0;
- if (taosCloseFile(&(*ppReader)->pDataFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // encode
+ for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
+ SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
- if (taosCloseFile(&(*ppReader)->pLastFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type)) continue;
- if (taosCloseFile(&(*ppReader)->pSmaFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ SColumnDataAgg sma;
+ tsdbCalcColDataSMA(pColData, &sma);
- for (int32_t iBuf = 0; iBuf < sizeof((*ppReader)->aBuf) / sizeof(uint8_t *); iBuf++) {
- tFree((*ppReader)->aBuf[iBuf]);
+ code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &sma));
+ if (code) goto _err;
+ pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &sma);
}
- taosMemoryFree(*ppReader);
+ // write
+ if (pSmaInfo->size) {
+ code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size);
+ if (code) goto _err;
+
+ code = tsdbWriteFile(pWriter->pSmaFD, pWriter->fSma.size, pWriter->aBuf[0], pSmaInfo->size);
+ if (code) goto _err;
+
+ pSmaInfo->offset = pWriter->fSma.size;
+ pWriter->fSma.size += pSmaInfo->size;
+ }
-_exit:
- *ppReader = NULL;
return code;
_err:
- tsdbError("vgId:%d, data file reader close failed since %s", TD_VID((*ppReader)->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb write block sma failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx) {
- int32_t code = 0;
- int64_t offset = pReader->pSet->pHeadF->offset;
- int64_t size = pReader->pSet->pHeadF->size - offset;
- int64_t n;
- uint32_t delimiter;
+int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
+ int8_t cmprAlg, int8_t toLast) {
+ int32_t code = 0;
- taosArrayClear(aBlockIdx);
- if (size == 0) {
- goto _exit;
- }
+ ASSERT(pBlockData->nRow > 0);
- // alloc
- code = tRealloc(&pReader->aBuf[0], size);
+ if (toLast) {
+ pBlkInfo->offset = pWriter->fStt[pWriter->wSet.nSttF - 1].size;
+ } else {
+ pBlkInfo->offset = pWriter->fData.size;
+ }
+ pBlkInfo->szBlock = 0;
+ pBlkInfo->szKey = 0;
+
+ int32_t aBufN[4] = {0};
+ code = tCmprBlockData(pBlockData, cmprAlg, NULL, NULL, pWriter->aBuf, aBufN);
if (code) goto _err;
- // seek
- if (taosLSeekFile(pReader->pHeadFD, offset, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // write =================
+ STsdbFD *pFD = toLast ? pWriter->pSttFD : pWriter->pDataFD;
- // read
- n = taosReadFile(pReader->pHeadFD, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ pBlkInfo->szKey = aBufN[3] + aBufN[2];
+ pBlkInfo->szBlock = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ int64_t offset = pBlkInfo->offset;
+ code = tsdbWriteFile(pFD, offset, pWriter->aBuf[3], aBufN[3]);
+ if (code) goto _err;
+ offset += aBufN[3];
- // decode
- n = 0;
- n = tGetU32(pReader->aBuf[0] + n, &delimiter);
- ASSERT(delimiter == TSDB_FILE_DLMT);
+ code = tsdbWriteFile(pFD, offset, pWriter->aBuf[2], aBufN[2]);
+ if (code) goto _err;
+ offset += aBufN[2];
- while (n < size - sizeof(TSCKSUM)) {
- SBlockIdx blockIdx;
- n += tGetBlockIdx(pReader->aBuf[0] + n, &blockIdx);
+ if (aBufN[1]) {
+ code = tsdbWriteFile(pFD, offset, pWriter->aBuf[1], aBufN[1]);
+ if (code) goto _err;
+ offset += aBufN[1];
+ }
- if (taosArrayPush(aBlockIdx, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ if (aBufN[0]) {
+ code = tsdbWriteFile(pFD, offset, pWriter->aBuf[0], aBufN[0]);
+ if (code) goto _err;
+ }
+
+ // update info
+ if (toLast) {
+ pWriter->fStt[pWriter->wSet.nSttF - 1].size += pBlkInfo->szBlock;
+ } else {
+ pWriter->fData.size += pBlkInfo->szBlock;
}
- ASSERT(n + sizeof(TSCKSUM) == size);
+ // ================= SMA ====================
+ if (pSmaInfo) {
+ code = tsdbWriteBlockSma(pWriter, pBlockData, pSmaInfo);
+ if (code) goto _err;
+ }
_exit:
+ tsdbTrace("vgId:%d tsdb write block data, suid:%" PRId64 " uid:%" PRId64 " nRow:%d, offset:%" PRId64 " size:%d",
+ TD_VID(pWriter->pTsdb->pVnode), pBlockData->suid, pBlockData->uid, pBlockData->nRow, pBlkInfo->offset,
+ pBlkInfo->szBlock);
return code;
_err:
- tsdbError("vgId:%d, read block idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d tsdb write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadBlockL(SDataFReader *pReader, SArray *aBlockL) {
- int32_t code = 0;
- int64_t offset = pReader->pSet->pLastF->offset;
- int64_t size = pReader->pSet->pLastF->size - offset;
- int64_t n;
- uint32_t delimiter;
+int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) {
+ int32_t code = 0;
+ int64_t n;
+ int64_t size;
+ TdFilePtr pOutFD = NULL;
+ TdFilePtr PInFD = NULL;
+ int32_t szPage = pTsdb->pVnode->config.szPage;
+ char fNameFrom[TSDB_FILENAME_LEN];
+ char fNameTo[TSDB_FILENAME_LEN];
- taosArrayClear(aBlockL);
- if (size == 0) {
- goto _exit;
+ // head
+ tsdbHeadFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pHeadF, fNameFrom);
+ tsdbHeadFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pHeadF, fNameTo);
+ pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
+ if (pOutFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
}
-
- // alloc
- code = tRealloc(&pReader->aBuf[0], size);
- if (code) goto _err;
-
- // seek
- if (taosLSeekFile(pReader->pLastFD, offset, SEEK_SET) < 0) {
+ PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
+ if (PInFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
-
- // read
- n = taosReadFile(pReader->pLastFD, pReader->aBuf[0], size);
+ n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pHeadF->size, szPage));
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
}
+ taosCloseFile(&pOutFD);
+ taosCloseFile(&PInFD);
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
+ // data
+ tsdbDataFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pDataF, fNameFrom);
+ tsdbDataFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pDataF, fNameTo);
+ pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
+ if (pOutFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
+ PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
+ if (PInFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ n = taosFSendFile(pOutFD, PInFD, 0, LOGIC_TO_FILE_OFFSET(pSetFrom->pDataF->size, szPage));
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ taosCloseFile(&pOutFD);
+ taosCloseFile(&PInFD);
- // decode
- n = 0;
- n = tGetU32(pReader->aBuf[0] + n, &delimiter);
- ASSERT(delimiter == TSDB_FILE_DLMT);
-
- while (n < size - sizeof(TSCKSUM)) {
- SBlockL blockl;
- n += tGetBlockL(pReader->aBuf[0] + n, &blockl);
+ // sma
+ tsdbSmaFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pSmaF, fNameFrom);
+ tsdbSmaFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pSmaF, fNameTo);
+ pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
+ if (pOutFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
+ if (PInFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->pSmaF->size, szPage));
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ taosCloseFile(&pOutFD);
+ taosCloseFile(&PInFD);
- if (taosArrayPush(aBlockL, &blockl) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
+ // stt
+ for (int8_t iStt = 0; iStt < pSetFrom->nSttF; iStt++) {
+ tsdbSttFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->aSttF[iStt], fNameFrom);
+ tsdbSttFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->aSttF[iStt], fNameTo);
+ pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
+ if (pOutFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
+ if (PInFD == NULL) {
+ code = TAOS_SYSTEM_ERROR(errno);
+ goto _err;
+ }
+ n = taosFSendFile(pOutFD, PInFD, 0, tsdbLogicToFileSize(pSetFrom->aSttF[iStt]->size, szPage));
+ if (n < 0) {
+ code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
+ taosCloseFile(&pOutFD);
+ taosCloseFile(&PInFD);
}
- ASSERT(n + sizeof(TSCKSUM) == size);
-
-_exit:
return code;
_err:
- tsdbError("vgId:%d read blockl failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadBlock(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mBlock) {
- int32_t code = 0;
- int64_t offset = pBlockIdx->offset;
- int64_t size = pBlockIdx->size;
- int64_t n;
- int64_t tn;
+// SDataFReader ====================================================
+int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet) {
+ int32_t code = 0;
+ SDataFReader *pReader;
+ int32_t szPage = pTsdb->pVnode->config.tsdbPageSize;
+ char fname[TSDB_FILENAME_LEN];
// alloc
- code = tRealloc(&pReader->aBuf[0], size);
- if (code) goto _err;
-
- // seek
- if (taosLSeekFile(pReader->pHeadFD, offset, SEEK_SET) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
+ pReader = (SDataFReader *)taosMemoryCalloc(1, sizeof(*pReader));
+ if (pReader == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
+ pReader->pTsdb = pTsdb;
+ pReader->pSet = pSet;
- // read
- n = taosReadFile(pReader->pHeadFD, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ // head
+ tsdbHeadFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pHeadF, fname);
+ code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pHeadFD);
+ if (code) goto _err;
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
+ // data
+ tsdbDataFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pDataF, fname);
+ code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pDataFD);
+ if (code) goto _err;
+
+ // sma
+ tsdbSmaFileName(pTsdb, pSet->diskId, pSet->fid, pSet->pSmaF, fname);
+ code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->pSmaFD);
+ if (code) goto _err;
+
+ // stt
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ tsdbSttFileName(pTsdb, pSet->diskId, pSet->fid, pSet->aSttF[iStt], fname);
+ code = tsdbOpenFile(fname, szPage, TD_FILE_READ, &pReader->aSttFD[iStt]);
+ if (code) goto _err;
}
- // decode
- n = 0;
+ *ppReader = pReader;
+ return code;
- uint32_t delimiter;
- n += tGetU32(pReader->aBuf[0] + n, &delimiter);
- ASSERT(delimiter == TSDB_FILE_DLMT);
+_err:
+ tsdbError("vgId:%d, tsdb data file reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ *ppReader = NULL;
+ return code;
+}
- tn = tGetMapData(pReader->aBuf[0] + n, mBlock);
- if (tn < 0) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+int32_t tsdbDataFReaderClose(SDataFReader **ppReader) {
+ int32_t code = 0;
+ if (*ppReader == NULL) return code;
+
+ // head
+ tsdbCloseFile(&(*ppReader)->pHeadFD);
+
+ // data
+ tsdbCloseFile(&(*ppReader)->pDataFD);
+
+ // sma
+ tsdbCloseFile(&(*ppReader)->pSmaFD);
+
+ // stt
+ for (int32_t iStt = 0; iStt < TSDB_MAX_STT_TRIGGER; iStt++) {
+ if ((*ppReader)->aSttFD[iStt]) {
+ tsdbCloseFile(&(*ppReader)->aSttFD[iStt]);
+ }
}
- n += tn;
- ASSERT(n + sizeof(TSCKSUM) == size);
+ for (int32_t iBuf = 0; iBuf < sizeof((*ppReader)->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree((*ppReader)->aBuf[iBuf]);
+ }
+ taosMemoryFree(*ppReader);
+ *ppReader = NULL;
return code;
_err:
- tsdbError("vgId:%d, read block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, data file reader close failed since %s", TD_VID((*ppReader)->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbReadBlockSma(SDataFReader *pReader, SBlock *pBlock, SArray *aColumnDataAgg) {
- int32_t code = 0;
- SSmaInfo *pSmaInfo = &pBlock->smaInfo;
-
- ASSERT(pSmaInfo->size > 0);
+int32_t tsdbReadBlockIdx(SDataFReader *pReader, SArray *aBlockIdx) {
+ int32_t code = 0;
+ SHeadFile *pHeadFile = pReader->pSet->pHeadF;
+ int64_t offset = pHeadFile->offset;
+ int64_t size = pHeadFile->size - offset;
- taosArrayClear(aColumnDataAgg);
+ taosArrayClear(aBlockIdx);
+ if (size == 0) return code;
// alloc
- int32_t size = pSmaInfo->size + sizeof(TSCKSUM);
code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
- // seek
- int64_t n = taosLSeekFile(pReader->pSmaFD, pSmaInfo->offset, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < pSmaInfo->offset) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
// read
- n = taosReadFile(pReader->pSmaFD, pReader->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
-
- // check
- if (!taosCheckChecksumWhole(pReader->aBuf[0], size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _err;
- }
+ code = tsdbReadFile(pReader->pHeadFD, offset, pReader->aBuf[0], size);
+ if (code) goto _err;
// decode
- n = 0;
- while (n < pSmaInfo->size) {
- SColumnDataAgg sma;
+ int64_t n = 0;
+ while (n < size) {
+ SBlockIdx blockIdx;
+ n += tGetBlockIdx(pReader->aBuf[0] + n, &blockIdx);
- n += tGetColumnDataAgg(pReader->aBuf[0] + n, &sma);
- if (taosArrayPush(aColumnDataAgg, &sma) == NULL) {
+ if (taosArrayPush(aBlockIdx, &blockIdx) == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
}
+ ASSERT(n == size);
return code;
_err:
- tsdbError("vgId:%d tsdb read block sma failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, read block idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
return code;
}
-static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo, int8_t fromLast,
- SBlockData *pBlockData) {
- int32_t code = 0;
-
- tBlockDataClear(pBlockData);
+int32_t tsdbReadSttBlk(SDataFReader *pReader, int32_t iStt, SArray *aSttBlk) {
+ int32_t code = 0;
+ SSttFile *pSttFile = pReader->pSet->aSttF[iStt];
+ int64_t offset = pSttFile->offset;
+ int64_t size = pSttFile->size - offset;
- TdFilePtr pFD = fromLast ? pReader->pLastFD : pReader->pDataFD;
+ taosArrayClear(aSttBlk);
+ if (size == 0) return code;
- // uid + version + tskey
- code = tsdbReadAndCheck(pFD, pBlkInfo->offset, &pReader->aBuf[0], pBlkInfo->szKey, 1);
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
- SDiskDataHdr hdr;
- uint8_t *p = pReader->aBuf[0] + tGetDiskDataHdr(pReader->aBuf[0], &hdr);
+
+ // read
+ code = tsdbReadFile(pReader->aSttFD[iStt], offset, pReader->aBuf[0], size);
+ if (code) goto _err;
+
+ // decode
+ int64_t n = 0;
+ while (n < size) {
+ SSttBlk sttBlk;
+ n += tGetSttBlk(pReader->aBuf[0] + n, &sttBlk);
+
+ if (taosArrayPush(aSttBlk, &sttBlk) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+ ASSERT(n == size);
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d read stt blk failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadDataBlk(SDataFReader *pReader, SBlockIdx *pBlockIdx, SMapData *mDataBlk) {
+ int32_t code = 0;
+ int64_t offset = pBlockIdx->offset;
+ int64_t size = pBlockIdx->size;
+
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
+ if (code) goto _err;
+
+ // read
+ code = tsdbReadFile(pReader->pHeadFD, offset, pReader->aBuf[0], size);
+ if (code) goto _err;
+
+ // decode
+ int64_t n = tGetMapData(pReader->aBuf[0], mDataBlk);
+ if (n < 0) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ ASSERT(n == size);
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d, read block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadBlockSma(SDataFReader *pReader, SDataBlk *pDataBlk, SArray *aColumnDataAgg) {
+ int32_t code = 0;
+ SSmaInfo *pSmaInfo = &pDataBlk->smaInfo;
+
+ ASSERT(pSmaInfo->size > 0);
+
+ taosArrayClear(aColumnDataAgg);
+
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], pSmaInfo->size);
+ if (code) goto _err;
+
+ // read
+ code = tsdbReadFile(pReader->pSmaFD, pSmaInfo->offset, pReader->aBuf[0], pSmaInfo->size);
+ if (code) goto _err;
+
+ // decode
+ int32_t n = 0;
+ while (n < pSmaInfo->size) {
+ SColumnDataAgg sma;
+ n += tGetColumnDataAgg(pReader->aBuf[0] + n, &sma);
+
+ if (taosArrayPush(aColumnDataAgg, &sma) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+ ASSERT(n == pSmaInfo->size);
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb read block sma failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo, SBlockData *pBlockData,
+ int32_t iStt) {
+ int32_t code = 0;
+
+ tBlockDataClear(pBlockData);
+
+ STsdbFD *pFD = (iStt < 0) ? pReader->pDataFD : pReader->aSttFD[iStt];
+
+ // uid + version + tskey
+ code = tRealloc(&pReader->aBuf[0], pBlkInfo->szKey);
+ if (code) goto _err;
+
+ code = tsdbReadFile(pFD, pBlkInfo->offset, pReader->aBuf[0], pBlkInfo->szKey);
+ if (code) goto _err;
+
+ SDiskDataHdr hdr;
+ uint8_t *p = pReader->aBuf[0] + tGetDiskDataHdr(pReader->aBuf[0], &hdr);
ASSERT(hdr.delimiter == TSDB_FILE_DLMT);
ASSERT(pBlockData->suid == hdr.suid);
@@ -782,14 +973,18 @@ static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo
if (code) goto _err;
p += hdr.szKey;
- ASSERT(p - pReader->aBuf[0] == pBlkInfo->szKey - sizeof(TSCKSUM));
+ ASSERT(p - pReader->aBuf[0] == pBlkInfo->szKey);
// read and decode columns
if (taosArrayGetSize(pBlockData->aIdx) == 0) goto _exit;
if (hdr.szBlkCol > 0) {
int64_t offset = pBlkInfo->offset + pBlkInfo->szKey;
- code = tsdbReadAndCheck(pFD, offset, &pReader->aBuf[0], hdr.szBlkCol + sizeof(TSCKSUM), 1);
+
+ code = tRealloc(&pReader->aBuf[0], hdr.szBlkCol);
+ if (code) goto _err;
+
+ code = tsdbReadFile(pFD, offset, pReader->aBuf[0], hdr.szBlkCol);
if (code) goto _err;
}
@@ -827,10 +1022,13 @@ static int32_t tsdbReadBlockDataImpl(SDataFReader *pReader, SBlockInfo *pBlkInfo
}
} else {
// decode from binary
- int64_t offset = pBlkInfo->offset + pBlkInfo->szKey + hdr.szBlkCol + sizeof(TSCKSUM) + pBlockCol->offset;
- int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM);
+ int64_t offset = pBlkInfo->offset + pBlkInfo->szKey + hdr.szBlkCol + pBlockCol->offset;
+ int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue;
+
+ code = tRealloc(&pReader->aBuf[1], size);
+ if (code) goto _err;
- code = tsdbReadAndCheck(pFD, offset, &pReader->aBuf[1], size, 0);
+ code = tsdbReadFile(pFD, offset, pReader->aBuf[1], size);
if (code) goto _err;
code = tsdbDecmprColData(pReader->aBuf[1], pBlockCol, hdr.cmprAlg, hdr.nRow, pColData, &pReader->aBuf[2]);
@@ -847,13 +1045,39 @@ _err:
return code;
}
-int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBlockData) {
+int32_t tsdbReadDataBlockEx(SDataFReader *pReader, SDataBlk *pDataBlk, SBlockData *pBlockData) {
+ int32_t code = 0;
+ SBlockInfo *pBlockInfo = &pDataBlk->aSubBlock[0];
+
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], pBlockInfo->szBlock);
+ if (code) goto _err;
+
+ // read
+ code = tsdbReadFile(pReader->pDataFD, pBlockInfo->offset, pReader->aBuf[0], pBlockInfo->szBlock);
+ if (code) goto _err;
+
+ // decmpr
+ code = tDecmprBlockData(pReader->aBuf[0], pBlockInfo->szBlock, pBlockData, &pReader->aBuf[1]);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d tsdb read data block ex failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+ return code;
+}
+
+int32_t tsdbReadDataBlock(SDataFReader *pReader, SDataBlk *pDataBlk, SBlockData *pBlockData) {
int32_t code = 0;
- code = tsdbReadBlockDataImpl(pReader, &pBlock->aSubBlock[0], 0, pBlockData);
+ code = tsdbReadBlockDataImpl(pReader, &pDataBlk->aSubBlock[0], pBlockData, -1);
if (code) goto _err;
- if (pBlock->nSubBlock > 1) {
+ ASSERT(pDataBlk->nSubBlock == 1);
+
+#if 0
+ if (pDataBlk->nSubBlock > 1) {
SBlockData bData1;
SBlockData bData2;
@@ -867,8 +1091,8 @@ int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBl
tBlockDataInitEx(&bData1, pBlockData);
tBlockDataInitEx(&bData2, pBlockData);
- for (int32_t iSubBlock = 1; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- code = tsdbReadBlockDataImpl(pReader, &pBlock->aSubBlock[iSubBlock], 0, &bData1);
+ for (int32_t iSubBlock = 1; iSubBlock < pDataBlk->nSubBlock; iSubBlock++) {
+ code = tsdbReadBlockDataImpl(pReader, &pDataBlk->aSubBlock[iSubBlock], &bData1);
if (code) {
tBlockDataDestroy(&bData1, 1);
tBlockDataDestroy(&bData2, 1);
@@ -893,6 +1117,7 @@ int32_t tsdbReadDataBlock(SDataFReader *pReader, SBlock *pBlock, SBlockData *pBl
tBlockDataDestroy(&bData1, 1);
tBlockDataDestroy(&bData2, 1);
}
+#endif
return code;
@@ -901,325 +1126,157 @@ _err:
return code;
}
-int32_t tsdbReadLastBlock(SDataFReader *pReader, SBlockL *pBlockL, SBlockData *pBlockData) {
+int32_t tsdbReadSttBlock(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData) {
int32_t code = 0;
+ int32_t lino = 0;
- code = tsdbReadBlockDataImpl(pReader, &pBlockL->bInfo, 1, pBlockData);
- if (code) goto _err;
-
- return code;
+ code = tsdbReadBlockDataImpl(pReader, &pSttBlk->bInfo, pBlockData, iStt);
+ TSDB_CHECK_CODE(code, lino, _exit);
-_err:
- tsdbError("vgId:%d tsdb read last block failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
+_exit:
+ if (code) {
+ tsdbError("vgId:%d %s failed at %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code));
+ }
return code;
}
-// SDataFWriter ====================================================
-int32_t tsdbDataFWriterOpen(SDataFWriter **ppWriter, STsdb *pTsdb, SDFileSet *pSet) {
- int32_t code = 0;
- int32_t flag;
- int64_t n;
- SDataFWriter *pWriter = NULL;
- char fname[TSDB_FILENAME_LEN];
- char hdr[TSDB_FHDR_SIZE] = {0};
+int32_t tsdbReadSttBlockEx(SDataFReader *pReader, int32_t iStt, SSttBlk *pSttBlk, SBlockData *pBlockData) {
+ int32_t code = 0;
+ int32_t lino = 0;
// alloc
- pWriter = taosMemoryCalloc(1, sizeof(*pWriter));
- if (pWriter == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- if (code) goto _err;
- pWriter->pTsdb = pTsdb;
- pWriter->wSet = (SDFileSet){.diskId = pSet->diskId,
- .fid = pSet->fid,
- .pHeadF = &pWriter->fHead,
- .pDataF = &pWriter->fData,
- .pLastF = &pWriter->fLast,
- .pSmaF = &pWriter->fSma};
- pWriter->fHead = *pSet->pHeadF;
- pWriter->fData = *pSet->pDataF;
- pWriter->fLast = *pSet->pLastF;
- pWriter->fSma = *pSet->pSmaF;
+ code = tRealloc(&pReader->aBuf[0], pSttBlk->bInfo.szBlock);
+ TSDB_CHECK_CODE(code, lino, _exit);
- // head
- flag = TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
- tsdbHeadFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fHead, fname);
- pWriter->pHeadFD = taosOpenFile(fname, flag);
- if (pWriter->pHeadFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // read
+ code = tsdbReadFile(pReader->aSttFD[iStt], pSttBlk->bInfo.offset, pReader->aBuf[0], pSttBlk->bInfo.szBlock);
+ TSDB_CHECK_CODE(code, lino, _exit);
- n = taosWriteFile(pWriter->pHeadFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // decmpr
+ code = tDecmprBlockData(pReader->aBuf[0], pSttBlk->bInfo.szBlock, pBlockData, &pReader->aBuf[1]);
+ TSDB_CHECK_CODE(code, lino, _exit);
- ASSERT(n == TSDB_FHDR_SIZE);
+_exit:
+ if (code) {
+ tsdbError("vgId:%d %s failed at %d since %s", TD_VID(pReader->pTsdb->pVnode), __func__, lino, tstrerror(code));
+ }
+ return code;
+}
- pWriter->fHead.size += TSDB_FHDR_SIZE;
+// SDelFWriter ====================================================
+int32_t tsdbDelFWriterOpen(SDelFWriter **ppWriter, SDelFile *pFile, STsdb *pTsdb) {
+ int32_t code = 0;
+ char fname[TSDB_FILENAME_LEN];
+ uint8_t hdr[TSDB_FHDR_SIZE] = {0};
+ SDelFWriter *pDelFWriter;
+ int64_t n;
- // data
- if (pWriter->fData.size == 0) {
- flag = TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
- } else {
- flag = TD_FILE_WRITE;
- }
- tsdbDataFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fData, fname);
- pWriter->pDataFD = taosOpenFile(fname, flag);
- if (pWriter->pDataFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
+ // alloc
+ pDelFWriter = (SDelFWriter *)taosMemoryCalloc(1, sizeof(*pDelFWriter));
+ if (pDelFWriter == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- if (pWriter->fData.size == 0) {
- n = taosWriteFile(pWriter->pDataFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ pDelFWriter->pTsdb = pTsdb;
+ pDelFWriter->fDel = *pFile;
- pWriter->fData.size += TSDB_FHDR_SIZE;
- } else {
- n = taosLSeekFile(pWriter->pDataFD, 0, SEEK_END);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ tsdbDelFileName(pTsdb, pFile, fname);
+ code = tsdbOpenFile(fname, pTsdb->pVnode->config.tsdbPageSize, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE,
+ &pDelFWriter->pWriteH);
+ if (code) goto _err;
- ASSERT(n == pWriter->fData.size);
- }
+ // update header
+ code = tsdbWriteFile(pDelFWriter->pWriteH, 0, hdr, TSDB_FHDR_SIZE);
+ if (code) goto _err;
- // last
- if (pWriter->fLast.size == 0) {
- flag = TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
- } else {
- flag = TD_FILE_WRITE;
- }
- tsdbLastFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fLast, fname);
- pWriter->pLastFD = taosOpenFile(fname, flag);
- if (pWriter->pLastFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- if (pWriter->fLast.size == 0) {
- n = taosWriteFile(pWriter->pLastFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ pDelFWriter->fDel.size = TSDB_FHDR_SIZE;
+ pDelFWriter->fDel.offset = 0;
- pWriter->fLast.size += TSDB_FHDR_SIZE;
- } else {
- n = taosLSeekFile(pWriter->pLastFD, 0, SEEK_END);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ *ppWriter = pDelFWriter;
+ return code;
- ASSERT(n == pWriter->fLast.size);
- }
+_err:
+ tsdbError("vgId:%d, failed to open del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ *ppWriter = NULL;
+ return code;
+}
- // sma
- if (pWriter->fSma.size == 0) {
- flag = TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC;
- } else {
- flag = TD_FILE_WRITE;
- }
- tsdbSmaFileName(pTsdb, pWriter->wSet.diskId, pWriter->wSet.fid, &pWriter->fSma, fname);
- pWriter->pSmaFD = taosOpenFile(fname, flag);
- if (pWriter->pSmaFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+int32_t tsdbDelFWriterClose(SDelFWriter **ppWriter, int8_t sync) {
+ int32_t code = 0;
+ SDelFWriter *pWriter = *ppWriter;
+ STsdb *pTsdb = pWriter->pTsdb;
+
+ // sync
+ if (sync) {
+ code = tsdbFsyncFile(pWriter->pWriteH);
+ if (code) goto _err;
}
- if (pWriter->fSma.size == 0) {
- n = taosWriteFile(pWriter->pSmaFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- pWriter->fSma.size += TSDB_FHDR_SIZE;
- } else {
- n = taosLSeekFile(pWriter->pSmaFD, 0, SEEK_END);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // close
+ tsdbCloseFile(&pWriter->pWriteH);
- ASSERT(n == pWriter->fSma.size);
+ for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree(pWriter->aBuf[iBuf]);
}
+ taosMemoryFree(pWriter);
- *ppWriter = pWriter;
+ *ppWriter = NULL;
return code;
_err:
- tsdbError("vgId:%d, tsdb data file writer open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- *ppWriter = NULL;
+ tsdbError("vgId:%d, failed to close del file writer since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbDataFWriterClose(SDataFWriter **ppWriter, int8_t sync) {
+int32_t tsdbWriteDelData(SDelFWriter *pWriter, SArray *aDelData, SDelIdx *pDelIdx) {
int32_t code = 0;
- STsdb *pTsdb = NULL;
+ int64_t size;
+ int64_t n;
- if (*ppWriter == NULL) goto _exit;
+ // prepare
+ size = 0;
+ for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
+ size += tPutDelData(NULL, taosArrayGet(aDelData, iDelData));
+ }
- pTsdb = (*ppWriter)->pTsdb;
- if (sync) {
- if (taosFsyncFile((*ppWriter)->pHeadFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosFsyncFile((*ppWriter)->pDataFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosFsyncFile((*ppWriter)->pLastFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosFsyncFile((*ppWriter)->pSmaFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- }
-
- if (taosCloseFile(&(*ppWriter)->pHeadFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosCloseFile(&(*ppWriter)->pDataFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosCloseFile(&(*ppWriter)->pLastFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- if (taosCloseFile(&(*ppWriter)->pSmaFD) < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- for (int32_t iBuf = 0; iBuf < sizeof((*ppWriter)->aBuf) / sizeof(uint8_t *); iBuf++) {
- tFree((*ppWriter)->aBuf[iBuf]);
- }
- taosMemoryFree(*ppWriter);
-_exit:
- *ppWriter = NULL;
- return code;
-
-_err:
- tsdbError("vgId:%d, data file writer close failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-int32_t tsdbUpdateDFileSetHeader(SDataFWriter *pWriter) {
- int32_t code = 0;
- int64_t n;
- char hdr[TSDB_FHDR_SIZE];
-
- // head ==============
- memset(hdr, 0, TSDB_FHDR_SIZE);
- tPutHeadFile(hdr, &pWriter->fHead);
- taosCalcChecksumAppend(0, hdr, TSDB_FHDR_SIZE);
-
- n = taosLSeekFile(pWriter->pHeadFD, 0, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosWriteFile(pWriter->pHeadFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- // data ==============
- memset(hdr, 0, TSDB_FHDR_SIZE);
- tPutDataFile(hdr, &pWriter->fData);
- taosCalcChecksumAppend(0, hdr, TSDB_FHDR_SIZE);
-
- n = taosLSeekFile(pWriter->pDataFD, 0, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosWriteFile(pWriter->pDataFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- // last ==============
- memset(hdr, 0, TSDB_FHDR_SIZE);
- tPutLastFile(hdr, &pWriter->fLast);
- taosCalcChecksumAppend(0, hdr, TSDB_FHDR_SIZE);
-
- n = taosLSeekFile(pWriter->pLastFD, 0, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // alloc
+ code = tRealloc(&pWriter->aBuf[0], size);
+ if (code) goto _err;
- n = taosWriteFile(pWriter->pLastFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
+ // build
+ n = 0;
+ for (int32_t iDelData = 0; iDelData < taosArrayGetSize(aDelData); iDelData++) {
+ n += tPutDelData(pWriter->aBuf[0] + n, taosArrayGet(aDelData, iDelData));
}
+ ASSERT(n == size);
- // sma ==============
- memset(hdr, 0, TSDB_FHDR_SIZE);
- tPutSmaFile(hdr, &pWriter->fSma);
- taosCalcChecksumAppend(0, hdr, TSDB_FHDR_SIZE);
-
- n = taosLSeekFile(pWriter->pSmaFD, 0, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // write
+ code = tsdbWriteFile(pWriter->pWriteH, pWriter->fDel.size, pWriter->aBuf[0], size);
+ if (code) goto _err;
- n = taosWriteFile(pWriter->pSmaFD, hdr, TSDB_FHDR_SIZE);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // update
+ pDelIdx->offset = pWriter->fDel.size;
+ pDelIdx->size = size;
+ pWriter->fDel.size += size;
return code;
_err:
- tsdbError("vgId:%d, update DFileSet header failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, failed to write del data since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) {
- int32_t code = 0;
- SHeadFile *pHeadFile = &pWriter->fHead;
- int64_t size = 0;
- int64_t n;
-
- // check
- if (taosArrayGetSize(aBlockIdx) == 0) {
- pHeadFile->offset = pHeadFile->size;
- goto _exit;
- }
+int32_t tsdbWriteDelIdx(SDelFWriter *pWriter, SArray *aDelIdx) {
+ int32_t code = 0;
+ int64_t size;
+ int64_t n;
+ SDelIdx *pDelIdx;
// prepare
- size = sizeof(uint32_t);
- for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
- size += tPutBlockIdx(NULL, taosArrayGet(aBlockIdx, iBlockIdx));
+ size = 0;
+ for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
+ size += tPutDelIdx(NULL, taosArrayGet(aDelIdx, iDelIdx));
}
- size += sizeof(TSCKSUM);
// alloc
code = tRealloc(&pWriter->aBuf[0], size);
@@ -1227,383 +1284,170 @@ int32_t tsdbWriteBlockIdx(SDataFWriter *pWriter, SArray *aBlockIdx) {
// build
n = 0;
- n = tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- for (int32_t iBlockIdx = 0; iBlockIdx < taosArrayGetSize(aBlockIdx); iBlockIdx++) {
- n += tPutBlockIdx(pWriter->aBuf[0] + n, taosArrayGet(aBlockIdx, iBlockIdx));
+ for (int32_t iDelIdx = 0; iDelIdx < taosArrayGetSize(aDelIdx); iDelIdx++) {
+ n += tPutDelIdx(pWriter->aBuf[0] + n, taosArrayGet(aDelIdx, iDelIdx));
}
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
-
- ASSERT(n + sizeof(TSCKSUM) == size);
+ ASSERT(n == size);
// write
- n = taosWriteFile(pWriter->pHeadFD, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ code = tsdbWriteFile(pWriter->pWriteH, pWriter->fDel.size, pWriter->aBuf[0], size);
+ if (code) goto _err;
// update
- pHeadFile->offset = pHeadFile->size;
- pHeadFile->size += size;
+ pWriter->fDel.offset = pWriter->fDel.size;
+ pWriter->fDel.size += size;
-_exit:
- tsdbTrace("vgId:%d write block idx, offset:%" PRId64 " size:%" PRId64 " nBlockIdx:%d", TD_VID(pWriter->pTsdb->pVnode),
- pHeadFile->offset, size, taosArrayGetSize(aBlockIdx));
return code;
_err:
- tsdbError("vgId:%d, write block idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, write del idx failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbWriteBlock(SDataFWriter *pWriter, SMapData *mBlock, SBlockIdx *pBlockIdx) {
- int32_t code = 0;
- SHeadFile *pHeadFile = &pWriter->fHead;
- int64_t size;
- int64_t n;
-
- ASSERT(mBlock->nItem > 0);
-
- // alloc
- size = sizeof(uint32_t) + tPutMapData(NULL, mBlock) + sizeof(TSCKSUM);
- code = tRealloc(&pWriter->aBuf[0], size);
- if (code) goto _err;
+int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter) {
+ int32_t code = 0;
+ char hdr[TSDB_FHDR_SIZE] = {0};
+ int64_t size = TSDB_FHDR_SIZE;
+ int64_t n;
// build
- n = 0;
- n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- n += tPutMapData(pWriter->aBuf[0] + n, mBlock);
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
-
- ASSERT(n + sizeof(TSCKSUM) == size);
+ tPutDelFile(hdr, &pWriter->fDel);
// write
- n = taosWriteFile(pWriter->pHeadFD, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- // update
- pBlockIdx->offset = pHeadFile->size;
- pBlockIdx->size = size;
- pHeadFile->size += size;
+ code = tsdbWriteFile(pWriter->pWriteH, 0, hdr, size);
+ if (code) goto _err;
- tsdbTrace("vgId:%d, write block, file ID:%d commit ID:%d suid:%" PRId64 " uid:%" PRId64 " offset:%" PRId64
- " size:%" PRId64 " nItem:%d",
- TD_VID(pWriter->pTsdb->pVnode), pWriter->wSet.fid, pHeadFile->commitID, pBlockIdx->suid, pBlockIdx->uid,
- pBlockIdx->offset, pBlockIdx->size, mBlock->nItem);
return code;
_err:
- tsdbError("vgId:%d, write block failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, update del file hdr failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
+// SDelFReader ====================================================
+struct SDelFReader {
+ STsdb *pTsdb;
+ SDelFile fDel;
+ STsdbFD *pReadH;
+ uint8_t *aBuf[1];
+};
-int32_t tsdbWriteBlockL(SDataFWriter *pWriter, SArray *aBlockL) {
- int32_t code = 0;
- SLastFile *pLastFile = &pWriter->fLast;
- int64_t size;
- int64_t n;
-
- // check
- if (taosArrayGetSize(aBlockL) == 0) {
- pLastFile->offset = pLastFile->size;
- goto _exit;
- }
-
- // size
- size = sizeof(uint32_t); // TSDB_FILE_DLMT
- for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aBlockL); iBlockL++) {
- size += tPutBlockL(NULL, taosArrayGet(aBlockL, iBlockL));
- }
- size += sizeof(TSCKSUM);
+int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb) {
+ int32_t code = 0;
+ char fname[TSDB_FILENAME_LEN];
+ SDelFReader *pDelFReader;
+ int64_t n;
// alloc
- code = tRealloc(&pWriter->aBuf[0], size);
- if (code) goto _err;
-
- // encode
- n = 0;
- n += tPutU32(pWriter->aBuf[0] + n, TSDB_FILE_DLMT);
- for (int32_t iBlockL = 0; iBlockL < taosArrayGetSize(aBlockL); iBlockL++) {
- n += tPutBlockL(pWriter->aBuf[0] + n, taosArrayGet(aBlockL, iBlockL));
- }
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
-
- ASSERT(n + sizeof(TSCKSUM) == size);
-
- // write
- n = taosWriteFile(pWriter->pLastFD, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
+ pDelFReader = (SDelFReader *)taosMemoryCalloc(1, sizeof(*pDelFReader));
+ if (pDelFReader == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- // update
- pLastFile->offset = pLastFile->size;
- pLastFile->size += size;
+ // open impl
+ pDelFReader->pTsdb = pTsdb;
+ pDelFReader->fDel = *pFile;
-_exit:
- tsdbTrace("vgId:%d tsdb write blockl, loffset:%" PRId64 " size:%" PRId64, TD_VID(pWriter->pTsdb->pVnode),
- pLastFile->offset, size);
+ tsdbDelFileName(pTsdb, pFile, fname);
+ code = tsdbOpenFile(fname, pTsdb->pVnode->config.tsdbPageSize, TD_FILE_READ, &pDelFReader->pReadH);
+ if (code) goto _err;
+
+ *ppReader = pDelFReader;
return code;
_err:
- tsdbError("vgId:%d tsdb write blockl failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, del file reader open failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ *ppReader = NULL;
return code;
}
-static void tsdbUpdateBlockInfo(SBlockData *pBlockData, SBlock *pBlock) {
- for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
- TSDBKEY key = {.ts = pBlockData->aTSKEY[iRow], .version = pBlockData->aVersion[iRow]};
-
- if (iRow == 0) {
- if (tsdbKeyCmprFn(&pBlock->minKey, &key) > 0) {
- pBlock->minKey = key;
- }
- } else {
- if (pBlockData->aTSKEY[iRow] == pBlockData->aTSKEY[iRow - 1]) {
- pBlock->hasDup = 1;
- }
- }
+int32_t tsdbDelFReaderClose(SDelFReader **ppReader) {
+ int32_t code = 0;
+ SDelFReader *pReader = *ppReader;
- if (iRow == pBlockData->nRow - 1 && tsdbKeyCmprFn(&pBlock->maxKey, &key) < 0) {
- pBlock->maxKey = key;
+ if (pReader) {
+ tsdbCloseFile(&pReader->pReadH);
+ for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(uint8_t *); iBuf++) {
+ tFree(pReader->aBuf[iBuf]);
}
-
- pBlock->minVer = TMIN(pBlock->minVer, key.version);
- pBlock->maxVer = TMAX(pBlock->maxVer, key.version);
+ taosMemoryFree(pReader);
}
- pBlock->nRow += pBlockData->nRow;
+ *ppReader = NULL;
+
+_exit:
+ return code;
}
-static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData, SSmaInfo *pSmaInfo) {
+int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData) {
int32_t code = 0;
+ int64_t offset = pDelIdx->offset;
+ int64_t size = pDelIdx->size;
+ int64_t n;
- pSmaInfo->offset = 0;
- pSmaInfo->size = 0;
-
- // encode
- for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
- SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
-
- if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type)) continue;
-
- SColumnDataAgg sma;
- tsdbCalcColDataSMA(pColData, &sma);
-
- code = tRealloc(&pWriter->aBuf[0], pSmaInfo->size + tPutColumnDataAgg(NULL, &sma));
- if (code) goto _err;
- pSmaInfo->size += tPutColumnDataAgg(pWriter->aBuf[0] + pSmaInfo->size, &sma);
- }
+ taosArrayClear(aDelData);
- // write
- if (pSmaInfo->size) {
- int32_t size = pSmaInfo->size + sizeof(TSCKSUM);
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
+ if (code) goto _err;
- code = tRealloc(&pWriter->aBuf[0], size);
- if (code) goto _err;
+ // read
+ code = tsdbReadFile(pReader->pReadH, offset, pReader->aBuf[0], size);
+ if (code) goto _err;
- taosCalcChecksumAppend(0, pWriter->aBuf[0], size);
+ // // decode
+ n = 0;
+ while (n < size) {
+ SDelData delData;
+ n += tGetDelData(pReader->aBuf[0] + n, &delData);
- int64_t n = taosWriteFile(pWriter->pSmaFD, pWriter->aBuf[0], size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
+ if (taosArrayPush(aDelData, &delData) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
-
- pSmaInfo->offset = pWriter->fSma.size;
- pWriter->fSma.size += size;
}
+ ASSERT(n == size);
return code;
_err:
- tsdbError("vgId:%d tsdb write block sma failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, read del data failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
return code;
}
-int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, SBlockInfo *pBlkInfo, SSmaInfo *pSmaInfo,
- int8_t cmprAlg, int8_t toLast) {
+int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx) {
int32_t code = 0;
+ int32_t n;
+ int64_t offset = pReader->fDel.offset;
+ int64_t size = pReader->fDel.size - offset;
- ASSERT(pBlockData->nRow > 0);
-
- pBlkInfo->offset = toLast ? pWriter->fLast.size : pWriter->fData.size;
- pBlkInfo->szBlock = 0;
- pBlkInfo->szKey = 0;
+ taosArrayClear(aDelIdx);
- int32_t aBufN[4] = {0};
- code = tCmprBlockData(pBlockData, cmprAlg, NULL, NULL, pWriter->aBuf, aBufN);
+ // alloc
+ code = tRealloc(&pReader->aBuf[0], size);
if (code) goto _err;
- // write =================
- TdFilePtr pFD = toLast ? pWriter->pLastFD : pWriter->pDataFD;
-
- pBlkInfo->szKey = aBufN[3] + aBufN[2];
- pBlkInfo->szBlock = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
-
- int64_t n = taosWriteFile(pFD, pWriter->aBuf[3], aBufN[3]);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // read
+ code = tsdbReadFile(pReader->pReadH, offset, pReader->aBuf[0], size);
+ if (code) goto _err;
- n = taosWriteFile(pFD, pWriter->aBuf[2], aBufN[2]);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
+ // decode
+ n = 0;
+ while (n < size) {
+ SDelIdx delIdx;
- if (aBufN[1]) {
- n = taosWriteFile(pFD, pWriter->aBuf[1], aBufN[1]);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- }
+ n += tGetDelIdx(pReader->aBuf[0] + n, &delIdx);
- if (aBufN[0]) {
- n = taosWriteFile(pFD, pWriter->aBuf[0], aBufN[0]);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
+ if (taosArrayPush(aDelIdx, &delIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
}
- // update info
- if (toLast) {
- pWriter->fLast.size += pBlkInfo->szBlock;
- } else {
- pWriter->fData.size += pBlkInfo->szBlock;
- }
-
- // ================= SMA ====================
- if (pSmaInfo) {
- code = tsdbWriteBlockSma(pWriter, pBlockData, pSmaInfo);
- if (code) goto _err;
- }
-
-_exit:
- tsdbTrace("vgId:%d tsdb write block data, suid:%" PRId64 " uid:%" PRId64 " nRow:%d, offset:%" PRId64 " size:%d",
- TD_VID(pWriter->pTsdb->pVnode), pBlockData->suid, pBlockData->uid, pBlockData->nRow, pBlkInfo->offset,
- pBlkInfo->szBlock);
- return code;
-
-_err:
- tsdbError("vgId:%d tsdb write block data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
- return code;
-}
-
-int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) {
- int32_t code = 0;
- int64_t n;
- int64_t size;
- TdFilePtr pOutFD = NULL; // TODO
- TdFilePtr PInFD = NULL; // TODO
- char fNameFrom[TSDB_FILENAME_LEN];
- char fNameTo[TSDB_FILENAME_LEN];
-
- // head
- tsdbHeadFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pHeadF, fNameFrom);
- tsdbHeadFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pHeadF, fNameTo);
-
- pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
- if (pOutFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
- if (PInFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->pHeadF->size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- taosCloseFile(&pOutFD);
- taosCloseFile(&PInFD);
-
- // data
- tsdbDataFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pDataF, fNameFrom);
- tsdbDataFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pDataF, fNameTo);
-
- pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
- if (pOutFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
- if (PInFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->pDataF->size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- taosCloseFile(&pOutFD);
- taosCloseFile(&PInFD);
-
- // last
- tsdbLastFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pLastF, fNameFrom);
- tsdbLastFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pLastF, fNameTo);
-
- pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
- if (pOutFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
- if (PInFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->pLastF->size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- taosCloseFile(&pOutFD);
- taosCloseFile(&PInFD);
-
- // sma
- tsdbSmaFileName(pTsdb, pSetFrom->diskId, pSetFrom->fid, pSetFrom->pSmaF, fNameFrom);
- tsdbSmaFileName(pTsdb, pSetTo->diskId, pSetTo->fid, pSetTo->pSmaF, fNameTo);
-
- pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
- if (pOutFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
- if (PInFD == NULL) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
-
- n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->pSmaF->size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _err;
- }
- taosCloseFile(&pOutFD);
- taosCloseFile(&PInFD);
+ ASSERT(n == size);
return code;
_err:
- tsdbError("vgId:%d, tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
+ tsdbError("vgId:%d, read del idx failed since %s", TD_VID(pReader->pTsdb->pVnode), tstrerror(code));
return code;
-}
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/src/tsdb/tsdbRetention.c b/source/dnode/vnode/src/tsdb/tsdbRetention.c
index a30b9154ab07084adc31c65089d223ac728445ae..2c68c571765f063c79898f4b7bc722a675cb1a08 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRetention.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRetention.c
@@ -53,18 +53,19 @@ int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) {
if (code) goto _err;
for (int32_t iSet = 0; iSet < taosArrayGetSize(fs.aDFileSet); iSet++) {
- SDFileSet *pSet = (SDFileSet *)taosArrayGet(pTsdb->fs.aDFileSet, iSet);
+ SDFileSet *pSet = (SDFileSet *)taosArrayGet(fs.aDFileSet, iSet);
int32_t expLevel = tsdbFidLevel(pSet->fid, &pTsdb->keepCfg, now);
SDiskID did;
if (expLevel < 0) {
taosMemoryFree(pSet->pHeadF);
taosMemoryFree(pSet->pDataF);
- taosMemoryFree(pSet->pLastF);
+ taosMemoryFree(pSet->aSttF[0]);
taosMemoryFree(pSet->pSmaF);
taosArrayRemove(fs.aDFileSet, iSet);
iSet--;
} else {
+ if (expLevel == 0) continue;
if (tfsAllocDisk(pTsdb->pVnode->pTfs, expLevel, &did) < 0) {
code = terrno;
goto _exit;
@@ -82,8 +83,6 @@ int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) {
code = tsdbFSUpsertFSet(&fs, &fSet);
if (code) goto _err;
}
-
- /* code */
}
// do change fs
diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
index ab2b2b617a3d36dbc2c86c2a2207cffac8f087f6..a928dc3484c67461a8b5a49c39d60e3af4336e8b 100644
--- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
+++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
@@ -16,6 +16,29 @@
#include "tsdb.h"
// STsdbSnapReader ========================================
+typedef enum { SNAP_DATA_FILE_ITER = 0, SNAP_STT_FILE_ITER } EFIterT;
+typedef struct {
+ SRBTreeNode n;
+ SRowInfo rInfo;
+ EFIterT type;
+ union {
+ struct {
+ SArray* aBlockIdx;
+ int32_t iBlockIdx;
+ SBlockIdx* pBlockIdx;
+ SMapData mBlock;
+ int32_t iBlock;
+ }; // .data file
+ struct {
+ int32_t iStt;
+ SArray* aSttBlk;
+ int32_t iSttBlk;
+ }; // .stt file
+ };
+ SBlockData bData;
+ int32_t iRow;
+} SFDataIter;
+
struct STsdbSnapReader {
STsdb* pTsdb;
int64_t sver;
@@ -26,146 +49,301 @@ struct STsdbSnapReader {
int8_t dataDone;
int32_t fid;
SDataFReader* pDataFReader;
- SArray* aBlockIdx; // SArray
- SArray* aBlockL; // SArray
- SBlockIdx* pBlockIdx;
- SBlockL* pBlockL;
-
- int32_t iBlockIdx;
- int32_t iBlockL;
- SMapData mBlock; // SMapData
- int32_t iBlock;
- SBlockData oBlockData;
- SBlockData nBlockData;
+ SFDataIter* pIter;
+ SRBTree rbt;
+ SFDataIter aFDataIter[TSDB_MAX_STT_TRIGGER + 1];
+ SBlockData bData;
+ SSkmInfo skmTable;
// for del file
int8_t delDone;
SDelFReader* pDelFReader;
SArray* aDelIdx; // SArray
int32_t iDelIdx;
SArray* aDelData; // SArray
+ uint8_t* aBuf[5];
};
-static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
+extern int32_t tRowInfoCmprFn(const void* p1, const void* p2);
+extern int32_t tsdbReadDataBlockEx(SDataFReader* pReader, SDataBlk* pDataBlk, SBlockData* pBlockData);
+extern int32_t tsdbUpdateTableSchema(SMeta* pMeta, int64_t suid, int64_t uid, SSkmInfo* pSkmInfo);
+
+static int32_t tsdbSnapReadOpenFile(STsdbSnapReader* pReader) {
int32_t code = 0;
- STsdb* pTsdb = pReader->pTsdb;
- while (true) {
- if (pReader->pDataFReader == NULL) {
- // next
- SDFileSet dFileSet = {.fid = pReader->fid};
- SDFileSet* pSet = taosArraySearch(pReader->fs.aDFileSet, &dFileSet, tDFileSetCmprFn, TD_GT);
- if (pSet == NULL) goto _exit;
- pReader->fid = pSet->fid;
-
- // load
- code = tsdbDataFReaderOpen(&pReader->pDataFReader, pTsdb, pSet);
- if (code) goto _err;
+ SDFileSet dFileSet = {.fid = pReader->fid};
+ SDFileSet* pSet = taosArraySearch(pReader->fs.aDFileSet, &dFileSet, tDFileSetCmprFn, TD_GT);
+ if (pSet == NULL) return code;
- code = tsdbReadBlockIdx(pReader->pDataFReader, pReader->aBlockIdx);
- if (code) goto _err;
+ pReader->fid = pSet->fid;
+ code = tsdbDataFReaderOpen(&pReader->pDataFReader, pReader->pTsdb, pSet);
+ if (code) goto _err;
+
+ pReader->pIter = NULL;
+ tRBTreeCreate(&pReader->rbt, tRowInfoCmprFn);
+
+ // .data file
+ SFDataIter* pIter = &pReader->aFDataIter[0];
+ pIter->type = SNAP_DATA_FILE_ITER;
+
+ code = tsdbReadBlockIdx(pReader->pDataFReader, pIter->aBlockIdx);
+ if (code) goto _err;
- code = tsdbReadBlockL(pReader->pDataFReader, pReader->aBlockL);
+ for (pIter->iBlockIdx = 0; pIter->iBlockIdx < taosArrayGetSize(pIter->aBlockIdx); pIter->iBlockIdx++) {
+ pIter->pBlockIdx = (SBlockIdx*)taosArrayGet(pIter->aBlockIdx, pIter->iBlockIdx);
+
+ code = tsdbReadDataBlk(pReader->pDataFReader, pIter->pBlockIdx, &pIter->mBlock);
+ if (code) goto _err;
+
+ for (pIter->iBlock = 0; pIter->iBlock < pIter->mBlock.nItem; pIter->iBlock++) {
+ SDataBlk dataBlk;
+ tMapDataGetItemByIdx(&pIter->mBlock, pIter->iBlock, &dataBlk, tGetDataBlk);
+
+ if (dataBlk.minVer > pReader->ever || dataBlk.maxVer < pReader->sver) continue;
+
+ code = tsdbReadDataBlockEx(pReader->pDataFReader, &dataBlk, &pIter->bData);
if (code) goto _err;
- // init
- pReader->iBlockIdx = 0;
- if (pReader->iBlockIdx < taosArrayGetSize(pReader->aBlockIdx)) {
- pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx);
+ ASSERT(pIter->pBlockIdx->suid == pIter->bData.suid);
+ ASSERT(pIter->pBlockIdx->uid == pIter->bData.uid);
- code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock);
- if (code) goto _err;
+ for (pIter->iRow = 0; pIter->iRow < pIter->bData.nRow; pIter->iRow++) {
+ int64_t rowVer = pIter->bData.aVersion[pIter->iRow];
- pReader->iBlock = 0;
- } else {
- pReader->pBlockIdx = NULL;
+ if (rowVer >= pReader->sver && rowVer <= pReader->ever) {
+ pIter->rInfo.suid = pIter->pBlockIdx->suid;
+ pIter->rInfo.uid = pIter->pBlockIdx->uid;
+ pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
+ goto _add_iter_and_break;
+ }
}
+ }
- pReader->iBlockL = 0;
- while (true) {
- if (pReader->iBlockL >= taosArrayGetSize(pReader->aBlockL)) {
- pReader->pBlockL = NULL;
- break;
- }
+ continue;
- pReader->pBlockL = (SBlockL*)taosArrayGet(pReader->aBlockL, pReader->iBlockL);
- if (pReader->pBlockL->minVer <= pReader->ever && pReader->pBlockL->maxVer >= pReader->sver) {
- // TODO
- break;
- }
+ _add_iter_and_break:
+ tRBTreePut(&pReader->rbt, (SRBTreeNode*)pIter);
+ break;
+ }
- pReader->iBlockL++;
- }
+ // .stt file
+ pIter = &pReader->aFDataIter[1];
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ pIter->type = SNAP_STT_FILE_ITER;
+ pIter->iStt = iStt;
- tsdbInfo("vgId:%d, vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pTsdb->pVnode), pTsdb->path,
- pReader->fid);
+ code = tsdbReadSttBlk(pReader->pDataFReader, iStt, pIter->aSttBlk);
+ if (code) goto _err;
+
+ for (pIter->iSttBlk = 0; pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk); pIter->iSttBlk++) {
+ SSttBlk* pSttBlk = (SSttBlk*)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk);
+
+ if (pSttBlk->minVer > pReader->ever) continue;
+ if (pSttBlk->maxVer < pReader->sver) continue;
+
+ code = tsdbReadSttBlockEx(pReader->pDataFReader, iStt, pSttBlk, &pIter->bData);
+ if (code) goto _err;
+
+ for (pIter->iRow = 0; pIter->iRow < pIter->bData.nRow; pIter->iRow++) {
+ int64_t rowVer = pIter->bData.aVersion[pIter->iRow];
+
+ if (rowVer >= pReader->sver && rowVer <= pReader->ever) {
+ pIter->rInfo.suid = pIter->bData.suid;
+ pIter->rInfo.uid = pIter->bData.uid;
+ pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
+ goto _add_iter;
+ }
+ }
}
- while (true) {
- if (pReader->pBlockIdx && pReader->pBlockL) {
- TABLEID id = {.suid = pReader->pBlockL->suid, .uid = pReader->pBlockL->minUid};
+ continue;
- ASSERT(0);
+ _add_iter:
+ tRBTreePut(&pReader->rbt, (SRBTreeNode*)pIter);
+ pIter++;
+ }
- // if (tTABLEIDCmprFn(pReader->pBlockIdx, &minId) < 0) {
- // // TODO
- // } else if (tTABLEIDCmprFn(pReader->pBlockIdx, &maxId) < 0) {
- // // TODO
- // } else {
- // // TODO
- // }
- } else if (pReader->pBlockIdx) {
- while (pReader->iBlock < pReader->mBlock.nItem) {
- SBlock block;
- tMapDataGetItemByIdx(&pReader->mBlock, pReader->iBlock, &block, tGetBlock);
-
- if (block.minVer <= pReader->ever && block.maxVer >= pReader->sver) {
- // load data (todo)
- }
+ tsdbInfo("vgId:%d, vnode snapshot tsdb open data file to read for %s, fid:%d", TD_VID(pReader->pTsdb->pVnode),
+ pReader->pTsdb->path, pReader->fid);
+ return code;
- // next
- pReader->iBlock++;
- if (*ppData) break;
+_err:
+ tsdbError("vgId:%d vnode snapshot tsdb snap read open file failed since %s", TD_VID(pReader->pTsdb->pVnode),
+ tstrerror(code));
+ return code;
+}
+
+static SRowInfo* tsdbSnapGetRow(STsdbSnapReader* pReader) { return pReader->pIter ? &pReader->pIter->rInfo : NULL; }
+
+static int32_t tsdbSnapNextRow(STsdbSnapReader* pReader) {
+ int32_t code = 0;
+
+ if (pReader->pIter) {
+ SFDataIter* pIter = pReader->pIter;
+
+ while (true) {
+ _find_row:
+ for (pIter->iRow++; pIter->iRow < pIter->bData.nRow; pIter->iRow++) {
+ int64_t rowVer = pIter->bData.aVersion[pIter->iRow];
+
+ if (rowVer >= pReader->sver && rowVer <= pReader->ever) {
+ pIter->rInfo.uid = pIter->bData.uid ? pIter->bData.uid : pIter->bData.aUid[pIter->iRow];
+ pIter->rInfo.row = tsdbRowFromBlockData(&pIter->bData, pIter->iRow);
+ goto _out;
}
+ }
- if (pReader->iBlock >= pReader->mBlock.nItem) {
- pReader->iBlockIdx++;
- if (pReader->iBlockIdx < taosArrayGetSize(pReader->aBlockIdx)) {
- pReader->pBlockIdx = (SBlockIdx*)taosArrayGet(pReader->aBlockIdx, pReader->iBlockIdx);
+ if (pIter->type == SNAP_DATA_FILE_ITER) {
+ while (true) {
+ for (pIter->iBlock++; pIter->iBlock < pIter->mBlock.nItem; pIter->iBlock++) {
+ SDataBlk dataBlk;
+ tMapDataGetItemByIdx(&pIter->mBlock, pIter->iBlock, &dataBlk, tGetDataBlk);
- code = tsdbReadBlock(pReader->pDataFReader, pReader->pBlockIdx, &pReader->mBlock);
+ if (dataBlk.minVer > pReader->ever || dataBlk.maxVer < pReader->sver) continue;
+
+ code = tsdbReadDataBlockEx(pReader->pDataFReader, &dataBlk, &pIter->bData);
if (code) goto _err;
- pReader->iBlock = 0;
- } else {
- pReader->pBlockIdx = NULL;
+ pIter->iRow = -1;
+ goto _find_row;
}
+
+ pIter->iBlockIdx++;
+ if (pIter->iBlockIdx >= taosArrayGetSize(pIter->aBlockIdx)) break;
+
+ pIter->pBlockIdx = (SBlockIdx*)taosArrayGet(pIter->aBlockIdx, pIter->iBlockIdx);
+ code = tsdbReadDataBlk(pReader->pDataFReader, pIter->pBlockIdx, &pIter->mBlock);
+ if (code) goto _err;
+ pIter->iBlock = -1;
}
- if (*ppData) goto _exit;
- } else if (pReader->pBlockL) {
- while (pReader->pBlockL) {
- if (pReader->pBlockL->minVer <= pReader->ever && pReader->pBlockL->maxVer >= pReader->sver) {
- // load data (todo)
- }
+ pReader->pIter = NULL;
+ } else if (pIter->type == SNAP_STT_FILE_ITER) {
+ for (pIter->iSttBlk++; pIter->iSttBlk < taosArrayGetSize(pIter->aSttBlk); pIter->iSttBlk++) {
+ SSttBlk* pSttBlk = (SSttBlk*)taosArrayGet(pIter->aSttBlk, pIter->iSttBlk);
- // next
- pReader->iBlockL++;
- if (pReader->iBlockL < taosArrayGetSize(pReader->aBlockL)) {
- pReader->pBlockL = (SBlockL*)taosArrayGetSize(pReader->aBlockL);
- } else {
- pReader->pBlockL = NULL;
- }
+ if (pSttBlk->minVer > pReader->ever || pSttBlk->maxVer < pReader->sver) continue;
- if (*ppData) goto _exit;
+ code = tsdbReadSttBlockEx(pReader->pDataFReader, pIter->iStt, pSttBlk, &pIter->bData);
+ if (code) goto _err;
+
+ pIter->iRow = -1;
+ goto _find_row;
}
+
+ pReader->pIter = NULL;
+ } else {
+ ASSERT(0);
+ }
+ }
+
+ _out:
+ pIter = (SFDataIter*)tRBTreeMin(&pReader->rbt);
+ if (pReader->pIter && pIter) {
+ int32_t c = tRowInfoCmprFn(&pReader->pIter->rInfo, &pIter->rInfo);
+ if (c > 0) {
+ tRBTreePut(&pReader->rbt, (SRBTreeNode*)pReader->pIter);
+ pReader->pIter = NULL;
} else {
+ ASSERT(c);
+ }
+ }
+ }
+
+ if (pReader->pIter == NULL) {
+ pReader->pIter = (SFDataIter*)tRBTreeMin(&pReader->rbt);
+ if (pReader->pIter) {
+ tRBTreeDrop(&pReader->rbt, (SRBTreeNode*)pReader->pIter);
+ }
+ }
+
+ return code;
+
+_err:
+ return code;
+}
+
+static int32_t tsdbSnapCmprData(STsdbSnapReader* pReader, uint8_t** ppData) {
+ int32_t code = 0;
+
+ ASSERT(pReader->bData.nRow);
+
+ int32_t aBufN[5] = {0};
+ code = tCmprBlockData(&pReader->bData, TWO_STAGE_COMP, NULL, NULL, pReader->aBuf, aBufN);
+ if (code) goto _exit;
+
+ int32_t size = aBufN[0] + aBufN[1] + aBufN[2] + aBufN[3];
+ *ppData = taosMemoryMalloc(sizeof(SSnapDataHdr) + size);
+ if (*ppData == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ SSnapDataHdr* pHdr = (SSnapDataHdr*)*ppData;
+ pHdr->type = SNAP_DATA_TSDB;
+ pHdr->size = size;
+
+ memcpy(pHdr->data, pReader->aBuf[3], aBufN[3]);
+ memcpy(pHdr->data + aBufN[3], pReader->aBuf[2], aBufN[2]);
+ if (aBufN[1]) {
+ memcpy(pHdr->data + aBufN[3] + aBufN[2], pReader->aBuf[1], aBufN[1]);
+ }
+ if (aBufN[0]) {
+ memcpy(pHdr->data + aBufN[3] + aBufN[2] + aBufN[1], pReader->aBuf[0], aBufN[0]);
+ }
+
+_exit:
+ return code;
+}
+
+static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) {
+ int32_t code = 0;
+ STsdb* pTsdb = pReader->pTsdb;
+
+ while (true) {
+ if (pReader->pDataFReader == NULL) {
+ code = tsdbSnapReadOpenFile(pReader);
+ if (code) goto _err;
+ }
+
+ if (pReader->pDataFReader == NULL) break;
+
+ SRowInfo* pRowInfo = tsdbSnapGetRow(pReader);
+ if (pRowInfo == NULL) {
+ tsdbDataFReaderClose(&pReader->pDataFReader);
+ continue;
+ }
+
+ TABLEID id = {.suid = pRowInfo->suid, .uid = pRowInfo->uid};
+ SBlockData* pBlockData = &pReader->bData;
+
+ code = tsdbUpdateTableSchema(pTsdb->pVnode->pMeta, id.suid, id.uid, &pReader->skmTable);
+ if (code) goto _err;
+
+ code = tBlockDataInit(pBlockData, &id, pReader->skmTable.pTSchema, NULL, 0);
+ if (code) goto _err;
+
+ while (pRowInfo->suid == id.suid && pRowInfo->uid == id.uid) {
+ code = tBlockDataAppendRow(pBlockData, &pRowInfo->row, NULL, pRowInfo->uid);
+ if (code) goto _err;
+
+ code = tsdbSnapNextRow(pReader);
+ if (code) goto _err;
+
+ pRowInfo = tsdbSnapGetRow(pReader);
+ if (pRowInfo == NULL) {
tsdbDataFReaderClose(&pReader->pDataFReader);
break;
}
+
+ if (pBlockData->nRow >= 4096) break;
}
+
+ code = tsdbSnapCmprData(pReader, ppData);
+ if (code) goto _err;
+
+ break;
}
-_exit:
return code;
_err:
@@ -216,7 +394,6 @@ static int32_t tsdbSnapReadDel(STsdbSnapReader* pReader, uint8_t** ppData) {
size += tPutDelData(NULL, pDelData);
}
}
-
if (size == 0) continue;
// org data
@@ -292,23 +469,33 @@ int32_t tsdbSnapReaderOpen(STsdb* pTsdb, int64_t sver, int64_t ever, int8_t type
goto _err;
}
+ // data
pReader->fid = INT32_MIN;
- pReader->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
- if (pReader->aBlockIdx == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- pReader->aBlockL = taosArrayInit(0, sizeof(SBlockL));
- if (pReader->aBlockL == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ for (int32_t iIter = 0; iIter < sizeof(pReader->aFDataIter) / sizeof(pReader->aFDataIter[0]); iIter++) {
+ SFDataIter* pIter = &pReader->aFDataIter[iIter];
+
+ if (iIter == 0) {
+ pIter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pIter->aBlockIdx == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ } else {
+ pIter->aSttBlk = taosArrayInit(0, sizeof(SSttBlk));
+ if (pIter->aSttBlk == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
+ }
+
+ code = tBlockDataCreate(&pIter->bData);
+ if (code) goto _err;
}
- pReader->mBlock = tMapDataInit();
- code = tBlockDataCreate(&pReader->oBlockData);
- if (code) goto _err;
- code = tBlockDataCreate(&pReader->nBlockData);
+
+ code = tBlockDataCreate(&pReader->bData);
if (code) goto _err;
+ // del
pReader->aDelIdx = taosArrayInit(0, sizeof(SDelIdx));
if (pReader->aDelIdx == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -335,18 +522,26 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) {
int32_t code = 0;
STsdbSnapReader* pReader = *ppReader;
- if (pReader->pDataFReader) {
- tsdbDataFReaderClose(&pReader->pDataFReader);
- }
- taosArrayDestroy(pReader->aBlockL);
- taosArrayDestroy(pReader->aBlockIdx);
- tMapDataClear(&pReader->mBlock);
- tBlockDataDestroy(&pReader->oBlockData, 1);
- tBlockDataDestroy(&pReader->nBlockData, 1);
-
- if (pReader->pDelFReader) {
- tsdbDelFReaderClose(&pReader->pDelFReader);
+ // data
+ if (pReader->pDataFReader) tsdbDataFReaderClose(&pReader->pDataFReader);
+ for (int32_t iIter = 0; iIter < sizeof(pReader->aFDataIter) / sizeof(pReader->aFDataIter[0]); iIter++) {
+ SFDataIter* pIter = &pReader->aFDataIter[iIter];
+
+ if (iIter == 0) {
+ taosArrayDestroy(pIter->aBlockIdx);
+ tMapDataClear(&pIter->mBlock);
+ } else {
+ taosArrayDestroy(pIter->aSttBlk);
+ }
+
+ tBlockDataDestroy(&pIter->bData, 1);
}
+
+ tBlockDataDestroy(&pReader->bData, 1);
+ tTSchemaDestroy(pReader->skmTable.pTSchema);
+
+ // del
+ if (pReader->pDelFReader) tsdbDelFReaderClose(&pReader->pDelFReader);
taosArrayDestroy(pReader->aDelIdx);
taosArrayDestroy(pReader->aDelData);
@@ -354,6 +549,10 @@ int32_t tsdbSnapReaderClose(STsdbSnapReader** ppReader) {
tsdbInfo("vgId:%d, vnode snapshot tsdb reader closed for %s", TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path);
+ for (int32_t iBuf = 0; iBuf < sizeof(pReader->aBuf) / sizeof(pReader->aBuf[0]); iBuf++) {
+ tFree(pReader->aBuf[iBuf]);
+ }
+
taosMemoryFree(pReader);
*ppReader = NULL;
return code;
@@ -410,40 +609,37 @@ struct STsdbSnapWriter {
STsdbFS fs;
// config
- int32_t minutes;
- int8_t precision;
- int32_t minRow;
- int32_t maxRow;
- int8_t cmprAlg;
- int64_t commitID;
-
+ int32_t minutes;
+ int8_t precision;
+ int32_t minRow;
+ int32_t maxRow;
+ int8_t cmprAlg;
+ int64_t commitID;
uint8_t* aBuf[5];
+
// for data file
SBlockData bData;
-
- int32_t fid;
- SDataFReader* pDataFReader;
- SArray* aBlockIdx; // SArray
- int32_t iBlockIdx;
- SBlockIdx* pBlockIdx;
- SMapData mBlock; // SMapData
- int32_t iBlock;
- SBlockData* pBlockData;
- int32_t iRow;
- SBlockData bDataR;
- SArray* aBlockL; // SArray
- int32_t iBlockL;
- SBlockData lDataR;
-
- SDataFWriter* pDataFWriter;
- SBlockIdx* pBlockIdxW; // NULL when no committing table
- SBlock blockW;
- SBlockData bDataW;
- SBlockIdx blockIdxW;
-
- SMapData mBlockW; // SMapData
- SArray* aBlockIdxW; // SArray
- SArray* aBlockLW; // SArray
+ int32_t fid;
+ TABLEID id;
+ SSkmInfo skmTable;
+ struct {
+ SDataFReader* pReader;
+ SArray* aBlockIdx;
+ int32_t iBlockIdx;
+ SBlockIdx* pBlockIdx;
+ SMapData mDataBlk;
+ int32_t iDataBlk;
+ SBlockData bData;
+ int32_t iRow;
+ } dReader;
+ struct {
+ SDataFWriter* pWriter;
+ SArray* aBlockIdx;
+ SMapData mDataBlk;
+ SArray* aSttBlk;
+ SBlockData bData;
+ SBlockData sData;
+ } dWriter;
// for del file
SDelFReader* pDelFReader;
@@ -454,518 +650,448 @@ struct STsdbSnapWriter {
SArray* aDelIdxW;
};
-static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
+// SNAP_DATA_TSDB
+extern int32_t tsdbWriteDataBlock(SDataFWriter* pWriter, SBlockData* pBlockData, SMapData* mDataBlk, int8_t cmprAlg);
+extern int32_t tsdbWriteSttBlock(SDataFWriter* pWriter, SBlockData* pBlockData, SArray* aSttBlk, int8_t cmprAlg);
+
+static int32_t tsdbSnapNextTableData(STsdbSnapWriter* pWriter) {
int32_t code = 0;
- ASSERT(pWriter->pDataFWriter);
+ ASSERT(pWriter->dReader.iRow >= pWriter->dReader.bData.nRow);
- if (pWriter->pBlockIdxW == NULL) goto _exit;
+ if (pWriter->dReader.iBlockIdx < taosArrayGetSize(pWriter->dReader.aBlockIdx)) {
+ pWriter->dReader.pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->dReader.aBlockIdx, pWriter->dReader.iBlockIdx);
- // consume remain rows
- if (pWriter->pBlockData) {
- ASSERT(pWriter->iRow < pWriter->pBlockData->nRow);
- while (pWriter->iRow < pWriter->pBlockData->nRow) {
- code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow), NULL,
- 0); // todo
- if (code) goto _err;
+ code = tsdbReadDataBlk(pWriter->dReader.pReader, pWriter->dReader.pBlockIdx, &pWriter->dReader.mDataBlk);
+ if (code) goto _exit;
- if (pWriter->bDataW.nRow >= pWriter->maxRow * 4 / 5) {
- // pWriter->blockW.last = 0;
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- // &pWriter->blockW, pWriter->cmprAlg);
- if (code) goto _err;
+ pWriter->dReader.iBlockIdx++;
+ } else {
+ pWriter->dReader.pBlockIdx = NULL;
+ tMapDataReset(&pWriter->dReader.mDataBlk);
+ }
+ pWriter->dReader.iDataBlk = 0; // point to the next one
+ tBlockDataReset(&pWriter->dReader.bData);
+ pWriter->dReader.iRow = 0;
- code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
- if (code) goto _err;
+_exit:
+ return code;
+}
- tBlockReset(&pWriter->blockW);
- tBlockDataClear(&pWriter->bDataW);
- }
+static int32_t tsdbSnapWriteCopyData(STsdbSnapWriter* pWriter, TABLEID* pId) {
+ int32_t code = 0;
+
+ while (true) {
+ if (pWriter->dReader.pBlockIdx == NULL) break;
+ if (tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, pId) >= 0) break;
+
+ SBlockIdx blkIdx = *pWriter->dReader.pBlockIdx;
+ code = tsdbWriteDataBlk(pWriter->dWriter.pWriter, &pWriter->dReader.mDataBlk, &blkIdx);
+ if (code) goto _exit;
- pWriter->iRow++;
+ if (taosArrayPush(pWriter->dWriter.aBlockIdx, &blkIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
}
+
+ code = tsdbSnapNextTableData(pWriter);
+ if (code) goto _exit;
}
- // write remain data if has
- if (pWriter->bDataW.nRow > 0) {
- // pWriter->blockW.last = 0;
- if (pWriter->bDataW.nRow < pWriter->minRow) {
- if (pWriter->iBlock > pWriter->mBlock.nItem) {
- // pWriter->blockW.last = 1;
- }
- }
+_exit:
+ return code;
+}
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- // &pWriter->blockW, pWriter->cmprAlg);
- // if (code) goto _err;
+static int32_t tsdbSnapWriteTableDataStart(STsdbSnapWriter* pWriter, TABLEID* pId) {
+ int32_t code = 0;
- code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
- if (code) goto _err;
+ code = tsdbSnapWriteCopyData(pWriter, pId);
+ if (code) goto _err;
+
+ pWriter->id.suid = pId->suid;
+ pWriter->id.uid = pId->uid;
+
+ code = tsdbUpdateTableSchema(pWriter->pTsdb->pVnode->pMeta, pId->suid, pId->uid, &pWriter->skmTable);
+ if (code) goto _err;
+
+ tMapDataReset(&pWriter->dWriter.mDataBlk);
+ code = tBlockDataInit(&pWriter->dWriter.bData, pId, pWriter->skmTable.pTSchema, NULL, 0);
+ if (code) goto _err;
+
+ return code;
+
+_err:
+ tsdbError("vgId:%d %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code));
+ return code;
+}
+
+static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWriter) {
+ int32_t code = 0;
+
+ if (pWriter->id.suid == 0 && pWriter->id.uid == 0) return code;
+
+ int32_t c = 1;
+ if (pWriter->dReader.pBlockIdx) {
+ c = tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, &pWriter->id);
+ ASSERT(c >= 0);
}
- while (true) {
- if (pWriter->iBlock >= pWriter->mBlock.nItem) break;
+ if (c == 0) {
+ SBlockData* pBData = &pWriter->dWriter.bData;
- SBlock block;
- tMapDataGetItemByIdx(&pWriter->mBlock, pWriter->iBlock, &block, tGetBlock);
+ for (; pWriter->dReader.iRow < pWriter->dReader.bData.nRow; pWriter->dReader.iRow++) {
+ TSDBROW row = tsdbRowFromBlockData(&pWriter->dReader.bData, pWriter->dReader.iRow);
- // if (block.last) {
- // code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, &pWriter->bDataR, NULL, NULL);
- // if (code) goto _err;
+ code = tBlockDataAppendRow(pBData, &row, NULL, pWriter->id.uid);
+ if (code) goto _err;
- // tBlockReset(&block);
- // block.last = 1;
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pWriter->pBlockIdxW, &block,
- // pWriter->cmprAlg);
- // if (code) goto _err;
- // }
+ if (pBData->nRow >= pWriter->maxRow) {
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, pBData, &pWriter->dWriter.mDataBlk, pWriter->cmprAlg);
+ if (code) goto _err;
+ }
+ }
- code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, pBData, &pWriter->dWriter.mDataBlk, pWriter->cmprAlg);
if (code) goto _err;
- pWriter->iBlock++;
+ for (; pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem; pWriter->dReader.iDataBlk++) {
+ SDataBlk dataBlk;
+ tMapDataGetItemByIdx(&pWriter->dReader.mDataBlk, pWriter->dReader.iDataBlk, &dataBlk, tGetDataBlk);
+
+ code = tMapDataPutItem(&pWriter->dWriter.mDataBlk, &dataBlk, tPutDataBlk);
+ if (code) goto _err;
+ }
+
+ code = tsdbSnapNextTableData(pWriter);
+ if (code) goto _err;
}
- // SBlock
- // code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, NULL, pWriter->pBlockIdxW);
- // if (code) goto _err;
+ if (pWriter->dWriter.mDataBlk.nItem) {
+ SBlockIdx blockIdx = {.suid = pWriter->id.suid, .uid = pWriter->id.uid};
+ code = tsdbWriteDataBlk(pWriter->dWriter.pWriter, &pWriter->dWriter.mDataBlk, &blockIdx);
- // SBlockIdx
- if (taosArrayPush(pWriter->aBlockIdxW, pWriter->pBlockIdxW) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ if (taosArrayPush(pWriter->dWriter.aBlockIdx, &blockIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
}
-_exit:
- tsdbInfo("vgId:%d, tsdb snapshot write table data end for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
+ pWriter->id.suid = 0;
+ pWriter->id.uid = 0;
+
return code;
_err:
- tsdbError("vgId:%d, tsdb snapshot write table data end for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
return code;
}
-static int32_t tsdbSnapMoveWriteTableData(STsdbSnapWriter* pWriter, SBlockIdx* pBlockIdx) {
+static int32_t tsdbSnapWriteOpenFile(STsdbSnapWriter* pWriter, int32_t fid) {
int32_t code = 0;
+ STsdb* pTsdb = pWriter->pTsdb;
- code = tsdbReadBlock(pWriter->pDataFReader, pBlockIdx, &pWriter->mBlock);
- if (code) goto _err;
+ ASSERT(pWriter->dWriter.pWriter == NULL);
+
+ pWriter->fid = fid;
+ pWriter->id = (TABLEID){0};
+ SDFileSet* pSet = taosArraySearch(pWriter->fs.aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ);
- // SBlockData
- SBlock block;
- tMapDataReset(&pWriter->mBlockW);
- for (int32_t iBlock = 0; iBlock < pWriter->mBlock.nItem; iBlock++) {
- tMapDataGetItemByIdx(&pWriter->mBlock, iBlock, &block, tGetBlock);
-
- // if (block.last) {
- // code = tsdbReadBlockData(pWriter->pDataFReader, pBlockIdx, &block, &pWriter->bDataR, NULL, NULL);
- // if (code) goto _err;
-
- // tBlockReset(&block);
- // block.last = 1;
- // code =
- // tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataR, NULL, NULL, pBlockIdx, &block,
- // pWriter->cmprAlg);
- // if (code) goto _err;
- // }
-
- code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
+ // Reader
+ if (pSet) {
+ code = tsdbDataFReaderOpen(&pWriter->dReader.pReader, pWriter->pTsdb, pSet);
if (code) goto _err;
- }
- // SBlock
- SBlockIdx blockIdx = {.suid = pBlockIdx->suid, .uid = pBlockIdx->uid};
- code = tsdbWriteBlock(pWriter->pDataFWriter, &pWriter->mBlockW, &blockIdx);
+ code = tsdbReadBlockIdx(pWriter->dReader.pReader, pWriter->dReader.aBlockIdx);
+ if (code) goto _err;
+ } else {
+ ASSERT(pWriter->dReader.pReader == NULL);
+ taosArrayClear(pWriter->dReader.aBlockIdx);
+ }
+ pWriter->dReader.iBlockIdx = 0; // point to the next one
+ code = tsdbSnapNextTableData(pWriter);
if (code) goto _err;
- // SBlockIdx
- if (taosArrayPush(pWriter->aBlockIdxW, &blockIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ // Writer
+ SHeadFile fHead = {.commitID = pWriter->commitID};
+ SDataFile fData = {.commitID = pWriter->commitID};
+ SSmaFile fSma = {.commitID = pWriter->commitID};
+ SSttFile fStt = {.commitID = pWriter->commitID};
+ SDFileSet wSet = {.fid = pWriter->fid, .pHeadF = &fHead, .pDataF = &fData, .pSmaF = &fSma};
+ if (pSet) {
+ wSet.diskId = pSet->diskId;
+ fData = *pSet->pDataF;
+ fSma = *pSet->pSmaF;
+ for (int32_t iStt = 0; iStt < pSet->nSttF; iStt++) {
+ wSet.aSttF[iStt] = pSet->aSttF[iStt];
+ }
+ wSet.nSttF = pSet->nSttF + 1; // TODO: fix pSet->nSttF == pTsdb->maxFile
+ } else {
+ SDiskID did = {0};
+ tfsAllocDisk(pTsdb->pVnode->pTfs, 0, &did);
+ tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
+ wSet.diskId = did;
+ wSet.nSttF = 1;
}
+ wSet.aSttF[wSet.nSttF - 1] = &fStt;
+
+ code = tsdbDataFWriterOpen(&pWriter->dWriter.pWriter, pWriter->pTsdb, &wSet);
+ if (code) goto _err;
+ taosArrayClear(pWriter->dWriter.aBlockIdx);
+ tMapDataReset(&pWriter->dWriter.mDataBlk);
+ taosArrayClear(pWriter->dWriter.aSttBlk);
+ tBlockDataReset(&pWriter->dWriter.bData);
+ tBlockDataReset(&pWriter->dWriter.sData);
-_exit:
return code;
_err:
- tsdbError("vgId:%d, tsdb snapshot move write table data for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
return code;
}
-static int32_t tsdbSnapWriteTableDataImpl(STsdbSnapWriter* pWriter) {
- int32_t code = 0;
- SBlockData* pBlockData = &pWriter->bData;
- int32_t iRow = 0;
- TSDBROW row;
- TSDBROW* pRow = &row;
+static int32_t tsdbSnapWriteCloseFile(STsdbSnapWriter* pWriter) {
+ int32_t code = 0;
- // // correct schema
- // code = tBlockDataCorrectSchema(&pWriter->bDataW, pBlockData);
- // if (code) goto _err;
+ ASSERT(pWriter->dWriter.pWriter);
- // loop to merge
- *pRow = tsdbRowFromBlockData(pBlockData, iRow);
- while (true) {
- if (pRow == NULL) break;
+ code = tsdbSnapWriteTableDataEnd(pWriter);
+ if (code) goto _err;
- if (pWriter->pBlockData) {
- ASSERT(pWriter->iRow < pWriter->pBlockData->nRow);
+ // copy remain table data
+ TABLEID id = {.suid = INT64_MAX, .uid = INT64_MAX};
+ code = tsdbSnapWriteCopyData(pWriter, &id);
+ if (code) goto _err;
- int32_t c = tsdbRowCmprFn(pRow, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow));
+ code =
+ tsdbWriteSttBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.sData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg);
+ if (code) goto _err;
- ASSERT(c);
+ // Indices
+ code = tsdbWriteBlockIdx(pWriter->dWriter.pWriter, pWriter->dWriter.aBlockIdx);
+ if (code) goto _err;
- if (c < 0) {
- // code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL);
- // if (code) goto _err;
-
- iRow++;
- if (iRow < pWriter->pBlockData->nRow) {
- *pRow = tsdbRowFromBlockData(pBlockData, iRow);
- } else {
- pRow = NULL;
- }
- } else if (c > 0) {
- // code = tBlockDataAppendRow(&pWriter->bDataW, &tsdbRowFromBlockData(pWriter->pBlockData, pWriter->iRow),
- // NULL); if (code) goto _err;
+ code = tsdbWriteSttBlk(pWriter->dWriter.pWriter, pWriter->dWriter.aSttBlk);
+ if (code) goto _err;
- pWriter->iRow++;
- if (pWriter->iRow >= pWriter->pBlockData->nRow) {
- pWriter->pBlockData = NULL;
- }
- }
- } else {
- TSDBKEY key = TSDBROW_KEY(pRow);
+ code = tsdbUpdateDFileSetHeader(pWriter->dWriter.pWriter);
+ if (code) goto _err;
- while (true) {
- if (pWriter->iBlock >= pWriter->mBlock.nItem) break;
+ code = tsdbFSUpsertFSet(&pWriter->fs, &pWriter->dWriter.pWriter->wSet);
+ if (code) goto _err;
- SBlock block;
- int32_t c;
+ code = tsdbDataFWriterClose(&pWriter->dWriter.pWriter, 1);
+ if (code) goto _err;
- tMapDataGetItemByIdx(&pWriter->mBlock, pWriter->iBlock, &block, tGetBlock);
+ if (pWriter->dReader.pReader) {
+ code = tsdbDataFReaderClose(&pWriter->dReader.pReader);
+ if (code) goto _err;
+ }
- // if (block.last) {
- // pWriter->pBlockData = &pWriter->bDataR;
+_exit:
+ return code;
- // code = tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL,
- // NULL); if (code) goto _err; pWriter->iRow = 0;
+_err:
+ return code;
+}
- // pWriter->iBlock++;
- // break;
- // }
+static int32_t tsdbSnapWriteToDataFile(STsdbSnapWriter* pWriter, int32_t iRow, int8_t* done) {
+ int32_t code = 0;
- c = tsdbKeyCmprFn(&block.maxKey, &key);
+ SBlockData* pBData = &pWriter->bData;
+ TABLEID id = {.suid = pBData->suid, .uid = pBData->uid ? pBData->uid : pBData->aUid[iRow]};
+ TSDBROW row = tsdbRowFromBlockData(pBData, iRow);
+ TSDBKEY key = TSDBROW_KEY(&row);
- ASSERT(c);
+ *done = 0;
+ while (pWriter->dReader.iRow < pWriter->dReader.bData.nRow ||
+ pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem) {
+ // Merge row by row
+ for (; pWriter->dReader.iRow < pWriter->dReader.bData.nRow; pWriter->dReader.iRow++) {
+ TSDBROW trow = tsdbRowFromBlockData(&pWriter->dReader.bData, pWriter->dReader.iRow);
+ TSDBKEY tKey = TSDBROW_KEY(&trow);
- if (c < 0) {
- if (pWriter->bDataW.nRow) {
- // pWriter->blockW.last = 0;
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- // &pWriter->blockW, pWriter->cmprAlg);
- // if (code) goto _err;
+ ASSERT(pWriter->dReader.bData.suid == id.suid && pWriter->dReader.bData.uid == id.uid);
- code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
- if (code) goto _err;
+ int32_t c = tsdbKeyCmprFn(&key, &tKey);
+ if (c < 0) {
+ code = tBlockDataAppendRow(&pWriter->dWriter.bData, &row, NULL, id.uid);
+ if (code) goto _err;
+ } else if (c > 0) {
+ code = tBlockDataAppendRow(&pWriter->dWriter.bData, &trow, NULL, id.uid);
+ if (code) goto _err;
+ } else {
+ ASSERT(0);
+ }
- tBlockReset(&pWriter->blockW);
- tBlockDataClear(&pWriter->bDataW);
- }
+ if (pWriter->dWriter.bData.nRow >= pWriter->maxRow) {
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk,
+ pWriter->cmprAlg);
+ if (code) goto _err;
+ }
- code = tMapDataPutItem(&pWriter->mBlockW, &block, tPutBlock);
- if (code) goto _err;
+ if (c < 0) {
+ *done = 1;
+ goto _exit;
+ }
+ }
- pWriter->iBlock++;
- } else {
- c = tsdbKeyCmprFn(&tBlockDataLastKey(pBlockData), &block.minKey);
+ // Merge row by block
+ SDataBlk tDataBlk = {.minKey = key, .maxKey = key};
+ for (; pWriter->dReader.iDataBlk < pWriter->dReader.mDataBlk.nItem; pWriter->dReader.iDataBlk++) {
+ SDataBlk dataBlk;
+ tMapDataGetItemByIdx(&pWriter->dReader.mDataBlk, pWriter->dReader.iDataBlk, &dataBlk, tGetDataBlk);
- ASSERT(c);
+ int32_t c = tDataBlkCmprFn(&dataBlk, &tDataBlk);
+ if (c < 0) {
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk,
+ pWriter->cmprAlg);
+ if (code) goto _err;
- if (c > 0) {
- pWriter->pBlockData = &pWriter->bDataR;
- // code =
- // tsdbReadBlockData(pWriter->pDataFReader, pWriter->pBlockIdx, &block, pWriter->pBlockData, NULL,
- // NULL);
- // if (code) goto _err;
- pWriter->iRow = 0;
+ code = tMapDataPutItem(&pWriter->dWriter.mDataBlk, &dataBlk, tPutDataBlk);
+ if (code) goto _err;
+ } else if (c > 0) {
+ code = tBlockDataAppendRow(&pWriter->dWriter.bData, &row, NULL, id.uid);
+ if (code) goto _err;
- pWriter->iBlock++;
- }
- break;
+ if (pWriter->dWriter.bData.nRow >= pWriter->maxRow) {
+ code = tsdbWriteDataBlock(pWriter->dWriter.pWriter, &pWriter->dWriter.bData, &pWriter->dWriter.mDataBlk,
+ pWriter->cmprAlg);
+ if (code) goto _err;
}
- }
-
- if (pWriter->pBlockData) continue;
-
- // code = tBlockDataAppendRow(&pWriter->bDataW, pRow, NULL);
- // if (code) goto _err;
- iRow++;
- if (iRow < pBlockData->nRow) {
- *pRow = tsdbRowFromBlockData(pBlockData, iRow);
+ *done = 1;
+ goto _exit;
} else {
- pRow = NULL;
+ code = tsdbReadDataBlockEx(pWriter->dReader.pReader, &dataBlk, &pWriter->dReader.bData);
+ if (code) goto _err;
+ pWriter->dReader.iRow = 0;
+
+ pWriter->dReader.iDataBlk++;
+ break;
}
}
-
- _check_write:
- if (pWriter->bDataW.nRow < pWriter->maxRow * 4 / 5) continue;
-
- _write_block:
- // code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->bDataW, NULL, NULL, pWriter->pBlockIdxW,
- // &pWriter->blockW, pWriter->cmprAlg);
- // if (code) goto _err;
-
- code = tMapDataPutItem(&pWriter->mBlockW, &pWriter->blockW, tPutBlock);
- if (code) goto _err;
-
- tBlockReset(&pWriter->blockW);
- tBlockDataClear(&pWriter->bDataW);
}
+_exit:
return code;
_err:
- tsdbError("vgId:%d, vnode snapshot tsdb write table data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
+ tsdbError("vgId:%d %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code));
return code;
}
-static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) {
- int32_t code = 0;
- SBlockData* pBlockData = &pWriter->bData;
- TSDBKEY keyFirst = tBlockDataFirstKey(pBlockData);
- TSDBKEY keyLast = tBlockDataLastKey(pBlockData);
-
- // end last table write if should
- if (pWriter->pBlockIdxW) {
- int32_t c = tTABLEIDCmprFn(pWriter->pBlockIdxW, &id);
- if (c < 0) {
- // end
- code = tsdbSnapWriteTableDataEnd(pWriter);
- if (code) goto _err;
-
- // reset
- pWriter->pBlockIdxW = NULL;
- } else if (c > 0) {
- ASSERT(0);
- }
- }
-
- // start new table data write if need
- if (pWriter->pBlockIdxW == NULL) {
- // write table data ahead
- while (true) {
- if (pWriter->iBlockIdx >= taosArrayGetSize(pWriter->aBlockIdx)) break;
-
- SBlockIdx* pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx);
- int32_t c = tTABLEIDCmprFn(pBlockIdx, &id);
+static int32_t tsdbSnapWriteToSttFile(STsdbSnapWriter* pWriter, int32_t iRow) {
+ int32_t code = 0;
- if (c >= 0) break;
+ TABLEID id = {.suid = pWriter->bData.suid,
+ .uid = pWriter->bData.uid ? pWriter->bData.uid : pWriter->bData.aUid[iRow]};
+ TSDBROW row = tsdbRowFromBlockData(&pWriter->bData, iRow);
+ SBlockData* pBData = &pWriter->dWriter.sData;
- code = tsdbSnapMoveWriteTableData(pWriter, pBlockIdx);
+ if (pBData->suid || pBData->uid) {
+ if (!TABLE_SAME_SCHEMA(pBData->suid, pBData->uid, id.suid, id.uid)) {
+ code = tsdbWriteSttBlock(pWriter->dWriter.pWriter, pBData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg);
if (code) goto _err;
- pWriter->iBlockIdx++;
+ pBData->suid = 0;
+ pBData->uid = 0;
}
+ }
- // reader
- pWriter->pBlockIdx = NULL;
- if (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) {
- ASSERT(pWriter->pDataFReader);
-
- SBlockIdx* pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx);
- int32_t c = tTABLEIDCmprFn(pBlockIdx, &id);
-
- ASSERT(c >= 0);
-
- if (c == 0) {
- pWriter->pBlockIdx = pBlockIdx;
- pWriter->iBlockIdx++;
- }
- }
-
- if (pWriter->pBlockIdx) {
- code = tsdbReadBlock(pWriter->pDataFReader, pWriter->pBlockIdx, &pWriter->mBlock);
- if (code) goto _err;
- } else {
- tMapDataReset(&pWriter->mBlock);
- }
- pWriter->iBlock = 0;
- pWriter->pBlockData = NULL;
- pWriter->iRow = 0;
-
- // writer
- pWriter->pBlockIdxW = &pWriter->blockIdxW;
- pWriter->pBlockIdxW->suid = id.suid;
- pWriter->pBlockIdxW->uid = id.uid;
+ if (pBData->suid == 0 && pBData->uid == 0) {
+ code = tsdbUpdateTableSchema(pWriter->pTsdb->pVnode->pMeta, pWriter->id.suid, pWriter->id.uid, &pWriter->skmTable);
+ if (code) goto _err;
- tBlockReset(&pWriter->blockW);
- tBlockDataReset(&pWriter->bDataW);
- tMapDataReset(&pWriter->mBlockW);
+ TABLEID tid = {.suid = pWriter->id.suid, .uid = pWriter->id.suid ? 0 : pWriter->id.uid};
+ code = tBlockDataInit(pBData, &tid, pWriter->skmTable.pTSchema, NULL, 0);
+ if (code) goto _err;
}
- ASSERT(pWriter->pBlockIdxW && pWriter->pBlockIdxW->suid == id.suid && pWriter->pBlockIdxW->uid == id.uid);
- ASSERT(pWriter->pBlockIdx == NULL || (pWriter->pBlockIdx->suid == id.suid && pWriter->pBlockIdx->uid == id.uid));
-
- code = tsdbSnapWriteTableDataImpl(pWriter);
+ code = tBlockDataAppendRow(pBData, &row, NULL, id.uid);
if (code) goto _err;
+ if (pBData->nRow >= pWriter->maxRow) {
+ code = tsdbWriteSttBlock(pWriter->dWriter.pWriter, pBData, pWriter->dWriter.aSttBlk, pWriter->cmprAlg);
+ if (code) goto _err;
+ }
+
_exit:
- tsdbDebug("vgId:%d, vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d, vnode snapshot tsdb write data impl for %s failed since %s", TD_VID(pWriter->pTsdb->pVnode),
- pWriter->pTsdb->path, tstrerror(code));
return code;
}
-static int32_t tsdbSnapWriteDataEnd(STsdbSnapWriter* pWriter) {
+static int32_t tsdbSnapWriteRowData(STsdbSnapWriter* pWriter, int32_t iRow) {
int32_t code = 0;
- STsdb* pTsdb = pWriter->pTsdb;
-
- if (pWriter->pDataFWriter == NULL) goto _exit;
- // finish current table
- code = tsdbSnapWriteTableDataEnd(pWriter);
- if (code) goto _err;
+ SBlockData* pBlockData = &pWriter->bData;
+ TABLEID id = {.suid = pBlockData->suid, .uid = pBlockData->uid ? pBlockData->uid : pBlockData->aUid[iRow]};
- // move remain table
- while (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) {
- code = tsdbSnapMoveWriteTableData(pWriter, (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx));
+ // End last table data write if need
+ if (tTABLEIDCmprFn(&pWriter->id, &id) != 0) {
+ code = tsdbSnapWriteTableDataEnd(pWriter);
if (code) goto _err;
-
- pWriter->iBlockIdx++;
}
- // write remain stuff
- if (taosArrayGetSize(pWriter->aBlockLW) > 0) {
- code = tsdbWriteBlockL(pWriter->pDataFWriter, pWriter->aBlockIdxW);
+ // Start new table data write if need
+ if (pWriter->id.suid == 0 && pWriter->id.uid == 0) {
+ code = tsdbSnapWriteTableDataStart(pWriter, &id);
if (code) goto _err;
}
- if (taosArrayGetSize(pWriter->aBlockIdx) > 0) {
- code = tsdbWriteBlockIdx(pWriter->pDataFWriter, pWriter->aBlockIdxW);
+ // Merge with .data file data
+ int8_t done = 0;
+ if (pWriter->dReader.pBlockIdx && tTABLEIDCmprFn(pWriter->dReader.pBlockIdx, &id) == 0) {
+ code = tsdbSnapWriteToDataFile(pWriter, iRow, &done);
if (code) goto _err;
}
- code = tsdbFSUpsertFSet(&pWriter->fs, &pWriter->pDataFWriter->wSet);
- if (code) goto _err;
-
- code = tsdbDataFWriterClose(&pWriter->pDataFWriter, 1);
- if (code) goto _err;
-
- if (pWriter->pDataFReader) {
- code = tsdbDataFReaderClose(&pWriter->pDataFReader);
+ // Append to the .stt data block (todo: check if need to set/reload sst block)
+ if (!done) {
+ code = tsdbSnapWriteToSttFile(pWriter, iRow);
if (code) goto _err;
}
_exit:
- tsdbInfo("vgId:%d, vnode snapshot tsdb writer data end for %s", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
_err:
- tsdbError("vgId:%d, vnode snapshot tsdb writer data end for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
- tstrerror(code));
+ tsdbError("vgId:%d %s failed since %s", TD_VID(pWriter->pTsdb->pVnode), __func__, tstrerror(code));
return code;
}
static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
- int32_t code = 0;
- STsdb* pTsdb = pWriter->pTsdb;
- SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
- TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr));
- int64_t n;
-
- // decode
+ int32_t code = 0;
+ STsdb* pTsdb = pWriter->pTsdb;
SBlockData* pBlockData = &pWriter->bData;
- code = tDecmprBlockData(pData + sizeof(SSnapDataHdr) + sizeof(TABLEID), pHdr->size - sizeof(TABLEID), pBlockData,
- pWriter->aBuf);
- if (code) goto _err;
- // open file
- TSDBKEY keyFirst = {.version = pBlockData->aVersion[0], .ts = pBlockData->aTSKEY[0]};
- TSDBKEY keyLast = {.version = pBlockData->aVersion[pBlockData->nRow - 1],
- .ts = pBlockData->aTSKEY[pBlockData->nRow - 1]};
+ // Decode data
+ SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
+ code = tDecmprBlockData(pHdr->data, pHdr->size, pBlockData, pWriter->aBuf);
+ if (code) goto _err;
- int32_t fid = tsdbKeyFid(keyFirst.ts, pWriter->minutes, pWriter->precision);
- ASSERT(fid == tsdbKeyFid(keyLast.ts, pWriter->minutes, pWriter->precision));
- if (pWriter->pDataFWriter == NULL || pWriter->fid != fid) {
- // end last file data write if need
- code = tsdbSnapWriteDataEnd(pWriter);
- if (code) goto _err;
+ ASSERT(pBlockData->nRow > 0);
- pWriter->fid = fid;
+ // Loop to handle each row
+ for (int32_t iRow = 0; iRow < pBlockData->nRow; iRow++) {
+ TSKEY ts = pBlockData->aTSKEY[iRow];
+ int32_t fid = tsdbKeyFid(ts, pWriter->minutes, pWriter->precision);
- // read
- SDFileSet* pSet = taosArraySearch(pWriter->fs.aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ);
- if (pSet) {
- code = tsdbDataFReaderOpen(&pWriter->pDataFReader, pTsdb, pSet);
- if (code) goto _err;
+ if (pWriter->dWriter.pWriter == NULL || pWriter->fid != fid) {
+ if (pWriter->dWriter.pWriter) {
+ ASSERT(fid > pWriter->fid);
- code = tsdbReadBlockIdx(pWriter->pDataFReader, pWriter->aBlockIdx);
- if (code) goto _err;
+ code = tsdbSnapWriteCloseFile(pWriter);
+ if (code) goto _err;
+ }
- code = tsdbReadBlockL(pWriter->pDataFReader, pWriter->aBlockL);
+ code = tsdbSnapWriteOpenFile(pWriter, fid);
if (code) goto _err;
- } else {
- ASSERT(pWriter->pDataFReader == NULL);
- taosArrayClear(pWriter->aBlockIdx);
- taosArrayClear(pWriter->aBlockL);
- }
- pWriter->iBlockIdx = 0;
- pWriter->pBlockIdx = NULL;
- tMapDataReset(&pWriter->mBlock);
- pWriter->iBlock = 0;
- pWriter->pBlockData = NULL;
- pWriter->iRow = 0;
- pWriter->iBlockL = 0;
- tBlockDataReset(&pWriter->bDataR);
- tBlockDataReset(&pWriter->lDataR);
-
- // write
- SHeadFile fHead;
- SDataFile fData;
- SLastFile fLast;
- SSmaFile fSma;
- SDFileSet wSet = {.pHeadF = &fHead, .pDataF = &fData, .pLastF = &fLast, .pSmaF = &fSma};
-
- if (pSet) {
- wSet.diskId = pSet->diskId;
- wSet.fid = fid;
- fHead = (SHeadFile){.commitID = pWriter->commitID, .offset = 0, .size = 0};
- fData = *pSet->pDataF;
- fLast = (SLastFile){.commitID = pWriter->commitID, .size = 0};
- fSma = *pSet->pSmaF;
- } else {
- wSet.diskId = (SDiskID){.level = 0, .id = 0};
- wSet.fid = fid;
- fHead = (SHeadFile){.commitID = pWriter->commitID, .offset = 0, .size = 0};
- fData = (SDataFile){.commitID = pWriter->commitID, .size = 0};
- fLast = (SLastFile){.commitID = pWriter->commitID, .size = 0, .offset = 0};
- fSma = (SSmaFile){.commitID = pWriter->commitID, .size = 0};
}
- code = tsdbDataFWriterOpen(&pWriter->pDataFWriter, pTsdb, &wSet);
+ code = tsdbSnapWriteRowData(pWriter, iRow);
if (code) goto _err;
-
- taosArrayClear(pWriter->aBlockIdxW);
- taosArrayClear(pWriter->aBlockLW);
- tMapDataReset(&pWriter->mBlockW);
- pWriter->pBlockIdxW = NULL;
- tBlockDataReset(&pWriter->bDataW);
}
- code = tsdbSnapWriteTableData(pWriter, id);
- if (code) goto _err;
-
- tsdbInfo("vgId:%d, vnode snapshot tsdb write data for %s, fid:%d suid:%" PRId64 " uid:%" PRId64 " nRow:%d",
- TD_VID(pTsdb->pVnode), pTsdb->path, fid, id.suid, id.suid, pBlockData->nRow);
return code;
_err:
@@ -974,10 +1100,41 @@ _err:
return code;
}
+// SNAP_DATA_DEL
+static int32_t tsdbSnapMoveWriteDelData(STsdbSnapWriter* pWriter, TABLEID* pId) {
+ int32_t code = 0;
+
+ while (true) {
+ if (pWriter->iDelIdx >= taosArrayGetSize(pWriter->aDelIdxR)) break;
+
+ SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
+
+ if (tTABLEIDCmprFn(pDelIdx, pId) >= 0) break;
+
+ code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
+ if (code) goto _exit;
+
+ SDelIdx delIdx = *pDelIdx;
+ code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
+ if (code) goto _exit;
+
+ if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ pWriter->iDelIdx++;
+ }
+
+_exit:
+ return code;
+}
+
static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
STsdb* pTsdb = pWriter->pTsdb;
+ // Open del file if not opened yet
if (pWriter->pDelFWriter == NULL) {
SDelFile* pDelFile = pWriter->fs.pDelFile;
@@ -988,38 +1145,28 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32
code = tsdbReadDelIdx(pWriter->pDelFReader, pWriter->aDelIdxR);
if (code) goto _err;
+ } else {
+ taosArrayClear(pWriter->aDelIdxR);
}
+ pWriter->iDelIdx = 0;
// writer
- SDelFile delFile = {.commitID = pWriter->commitID, .offset = 0, .size = 0};
+ SDelFile delFile = {.commitID = pWriter->commitID};
code = tsdbDelFWriterOpen(&pWriter->pDelFWriter, &delFile, pTsdb);
if (code) goto _err;
+ taosArrayClear(pWriter->aDelIdxW);
}
- // process the del data
- TABLEID id = *(TABLEID*)(pData + sizeof(SSnapDataHdr));
-
- while (true) {
- if (pWriter->iDelIdx >= taosArrayGetSize(pWriter->aDelIdxR)) break;
- if (tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) >= 0) break;
-
- SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
-
- code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
- if (code) goto _err;
-
- SDelIdx delIdx = *pDelIdx;
- code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
- if (code) goto _err;
+ SSnapDataHdr* pHdr = (SSnapDataHdr*)pData;
+ TABLEID id = *(TABLEID*)pHdr->data;
- if (taosArrayPush(pWriter->aDelIdxW, &delIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
+ ASSERT(pHdr->size + sizeof(SSnapDataHdr) == nData);
- pWriter->iDelIdx++;
- }
+ // Move write data < id
+ code = tsdbSnapMoveWriteDelData(pWriter, &id);
+ if (code) goto _err;
+ // Merge incoming data with current
if (pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR) &&
tTABLEIDCmprFn(taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx), &id) == 0) {
SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
@@ -1053,7 +1200,6 @@ static int32_t tsdbSnapWriteDel(STsdbSnapWriter* pWriter, uint8_t* pData, uint32
goto _err;
}
-_exit:
return code;
_err:
@@ -1066,23 +1212,14 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) {
int32_t code = 0;
STsdb* pTsdb = pWriter->pTsdb;
- if (pWriter->pDelFWriter == NULL) goto _exit;
-
- for (; pWriter->iDelIdx < taosArrayGetSize(pWriter->aDelIdxR); pWriter->iDelIdx++) {
- SDelIdx* pDelIdx = (SDelIdx*)taosArrayGet(pWriter->aDelIdxR, pWriter->iDelIdx);
-
- code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
- if (code) goto _err;
+ if (pWriter->pDelFWriter == NULL) return code;
- SDelIdx delIdx = *pDelIdx;
- code = tsdbWriteDelData(pWriter->pDelFWriter, pWriter->aDelData, &delIdx);
- if (code) goto _err;
+ TABLEID id = {.suid = INT64_MAX, .uid = INT64_MAX};
+ code = tsdbSnapMoveWriteDelData(pWriter, &id);
+ if (code) goto _err;
- if (taosArrayPush(pWriter->aDelIdxR, &delIdx) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
- }
+ code = tsdbWriteDelIdx(pWriter->pDelFWriter, pWriter->aDelIdxW);
+ if (code) goto _err;
code = tsdbUpdateDelFileHdr(pWriter->pDelFWriter);
if (code) goto _err;
@@ -1098,7 +1235,6 @@ static int32_t tsdbSnapWriteDelEnd(STsdbSnapWriter* pWriter) {
if (code) goto _err;
}
-_exit:
tsdbInfo("vgId:%d, vnode snapshot tsdb write del for %s end", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
@@ -1108,6 +1244,7 @@ _err:
return code;
}
+// APIs
int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWriter** ppWriter) {
int32_t code = 0;
STsdbSnapWriter* pWriter = NULL;
@@ -1133,39 +1270,38 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
pWriter->cmprAlg = pTsdb->pVnode->config.tsdbCfg.compression;
pWriter->commitID = pTsdb->pVnode->state.commitID;
- // for data file
+ // SNAP_DATA_TSDB
code = tBlockDataCreate(&pWriter->bData);
-
if (code) goto _err;
- pWriter->aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
- if (pWriter->aBlockIdx == NULL) {
+
+ pWriter->fid = INT32_MIN;
+ pWriter->id = (TABLEID){0};
+ // Reader
+ pWriter->dReader.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pWriter->dReader.aBlockIdx == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- code = tBlockDataCreate(&pWriter->bDataR);
+ code = tBlockDataCreate(&pWriter->dReader.bData);
if (code) goto _err;
- pWriter->aBlockL = taosArrayInit(0, sizeof(SBlockL));
- if (pWriter->aBlockL == NULL) {
+ // Writer
+ pWriter->dWriter.aBlockIdx = taosArrayInit(0, sizeof(SBlockIdx));
+ if (pWriter->dWriter.aBlockIdx == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
-
- pWriter->aBlockIdxW = taosArrayInit(0, sizeof(SBlockIdx));
- if (pWriter->aBlockIdxW == NULL) {
+ pWriter->dWriter.aSttBlk = taosArrayInit(0, sizeof(SSttBlk));
+ if (pWriter->dWriter.aSttBlk == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
- code = tBlockDataCreate(&pWriter->bDataW);
+ code = tBlockDataCreate(&pWriter->dWriter.bData);
+ if (code) goto _err;
+ code = tBlockDataCreate(&pWriter->dWriter.sData);
if (code) goto _err;
- pWriter->aBlockLW = taosArrayInit(0, sizeof(SBlockL));
- if (pWriter->aBlockLW == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
- }
-
- // for del file
+ // SNAP_DATA_DEL
pWriter->aDelIdxR = taosArrayInit(0, sizeof(SDelIdx));
if (pWriter->aDelIdxR == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
@@ -1186,6 +1322,7 @@ int32_t tsdbSnapWriterOpen(STsdb* pTsdb, int64_t sver, int64_t ever, STsdbSnapWr
tsdbInfo("vgId:%d, tsdb snapshot writer open for %s succeed", TD_VID(pTsdb->pVnode), pTsdb->path);
return code;
+
_err:
tsdbError("vgId:%d, tsdb snapshot writer open for %s failed since %s", TD_VID(pTsdb->pVnode), pTsdb->path,
tstrerror(code));
@@ -1196,14 +1333,17 @@ _err:
int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
int32_t code = 0;
STsdbSnapWriter* pWriter = *ppWriter;
+ STsdb* pTsdb = pWriter->pTsdb;
if (rollback) {
ASSERT(0);
// code = tsdbFSRollback(pWriter->pTsdb->pFS);
// if (code) goto _err;
} else {
- code = tsdbSnapWriteDataEnd(pWriter);
- if (code) goto _err;
+ if (pWriter->dWriter.pWriter) {
+ code = tsdbSnapWriteCloseFile(pWriter);
+ if (code) goto _err;
+ }
code = tsdbSnapWriteDelEnd(pWriter);
if (code) goto _err;
@@ -1211,14 +1351,44 @@ int32_t tsdbSnapWriterClose(STsdbSnapWriter** ppWriter, int8_t rollback) {
code = tsdbFSCommit1(pWriter->pTsdb, &pWriter->fs);
if (code) goto _err;
+ // lock
+ taosThreadRwlockWrlock(&pTsdb->rwLock);
+
code = tsdbFSCommit2(pWriter->pTsdb, &pWriter->fs);
- if (code) goto _err;
+ if (code) {
+ taosThreadRwlockUnlock(&pTsdb->rwLock);
+ goto _err;
+ }
+
+ // unlock
+ taosThreadRwlockUnlock(&pTsdb->rwLock);
}
+ // SNAP_DATA_DEL
+ taosArrayDestroy(pWriter->aDelIdxW);
+ taosArrayDestroy(pWriter->aDelData);
+ taosArrayDestroy(pWriter->aDelIdxR);
+
+ // SNAP_DATA_TSDB
+
+ // Writer
+ tBlockDataDestroy(&pWriter->dWriter.sData, 1);
+ tBlockDataDestroy(&pWriter->dWriter.bData, 1);
+ taosArrayDestroy(pWriter->dWriter.aSttBlk);
+ tMapDataClear(&pWriter->dWriter.mDataBlk);
+ taosArrayDestroy(pWriter->dWriter.aBlockIdx);
+
+ // Reader
+ tBlockDataDestroy(&pWriter->dReader.bData, 1);
+ tMapDataClear(&pWriter->dReader.mDataBlk);
+ taosArrayDestroy(pWriter->dReader.aBlockIdx);
+
+ tBlockDataDestroy(&pWriter->bData, 1);
+ tTSchemaDestroy(pWriter->skmTable.pTSchema);
+
for (int32_t iBuf = 0; iBuf < sizeof(pWriter->aBuf) / sizeof(uint8_t*); iBuf++) {
tFree(pWriter->aBuf[iBuf]);
}
-
tsdbInfo("vgId:%d, vnode snapshot tsdb writer close for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
taosMemoryFree(pWriter);
*ppWriter = NULL;
@@ -1243,8 +1413,8 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
goto _exit;
} else {
- if (pWriter->pDataFWriter) {
- code = tsdbSnapWriteDataEnd(pWriter);
+ if (pWriter->dWriter.pWriter) {
+ code = tsdbSnapWriteCloseFile(pWriter);
if (code) goto _err;
}
}
@@ -1257,7 +1427,6 @@ int32_t tsdbSnapWrite(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData)
_exit:
tsdbDebug("vgId:%d, tsdb snapshot write for %s succeed", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path);
-
return code;
_err:
diff --git a/source/dnode/vnode/src/tsdb/tsdbUtil.c b/source/dnode/vnode/src/tsdb/tsdbUtil.c
index 6db9d5e6f40c5d35e52d90dd86b28f4cb7a94676..4999e7a49a82764534b316994b38fa9e0c9af895 100644
--- a/source/dnode/vnode/src/tsdb/tsdbUtil.c
+++ b/source/dnode/vnode/src/tsdb/tsdbUtil.c
@@ -51,6 +51,22 @@ _exit:
return code;
}
+int32_t tMapDataCopy(SMapData *pFrom, SMapData *pTo) {
+ int32_t code = 0;
+
+ pTo->nItem = pFrom->nItem;
+ pTo->nData = pFrom->nData;
+ code = tRealloc((uint8_t **)&pTo->aOffset, sizeof(int32_t) * pFrom->nItem);
+ if (code) goto _exit;
+ code = tRealloc(&pTo->pData, pFrom->nData);
+ if (code) goto _exit;
+ memcpy(pTo->aOffset, pFrom->aOffset, sizeof(int32_t) * pFrom->nItem);
+ memcpy(pTo->pData, pFrom->pData, pFrom->nData);
+
+_exit:
+ return code;
+}
+
int32_t tMapDataSearch(SMapData *pMapData, void *pSearchItem, int32_t (*tGetItemFn)(uint8_t *, void *),
int32_t (*tItemCmprFn)(const void *, const void *), void *pItem) {
int32_t code = 0;
@@ -198,7 +214,7 @@ int32_t tCmprBlockIdx(void const *lhs, void const *rhs) {
int32_t tCmprBlockL(void const *lhs, void const *rhs) {
SBlockIdx *lBlockIdx = (SBlockIdx *)lhs;
- SBlockL *rBlockL = (SBlockL *)rhs;
+ SSttBlk *rBlockL = (SSttBlk *)rhs;
if (lBlockIdx->suid < rBlockL->suid) {
return -1;
@@ -215,69 +231,69 @@ int32_t tCmprBlockL(void const *lhs, void const *rhs) {
return 0;
}
-// SBlock ======================================================
-void tBlockReset(SBlock *pBlock) {
- *pBlock = (SBlock){.minKey = TSDBKEY_MAX, .maxKey = TSDBKEY_MIN, .minVer = VERSION_MAX, .maxVer = VERSION_MIN};
+// SDataBlk ======================================================
+void tDataBlkReset(SDataBlk *pDataBlk) {
+ *pDataBlk = (SDataBlk){.minKey = TSDBKEY_MAX, .maxKey = TSDBKEY_MIN, .minVer = VERSION_MAX, .maxVer = VERSION_MIN};
}
-int32_t tPutBlock(uint8_t *p, void *ph) {
- int32_t n = 0;
- SBlock *pBlock = (SBlock *)ph;
-
- n += tPutI64v(p ? p + n : p, pBlock->minKey.version);
- n += tPutI64v(p ? p + n : p, pBlock->minKey.ts);
- n += tPutI64v(p ? p + n : p, pBlock->maxKey.version);
- n += tPutI64v(p ? p + n : p, pBlock->maxKey.ts);
- n += tPutI64v(p ? p + n : p, pBlock->minVer);
- n += tPutI64v(p ? p + n : p, pBlock->maxVer);
- n += tPutI32v(p ? p + n : p, pBlock->nRow);
- n += tPutI8(p ? p + n : p, pBlock->hasDup);
- n += tPutI8(p ? p + n : p, pBlock->nSubBlock);
- for (int8_t iSubBlock = 0; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- n += tPutI64v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].offset);
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szBlock);
- n += tPutI32v(p ? p + n : p, pBlock->aSubBlock[iSubBlock].szKey);
- }
- if (pBlock->nSubBlock == 1 && !pBlock->hasDup) {
- n += tPutI64v(p ? p + n : p, pBlock->smaInfo.offset);
- n += tPutI32v(p ? p + n : p, pBlock->smaInfo.size);
+int32_t tPutDataBlk(uint8_t *p, void *ph) {
+ int32_t n = 0;
+ SDataBlk *pDataBlk = (SDataBlk *)ph;
+
+ n += tPutI64v(p ? p + n : p, pDataBlk->minKey.version);
+ n += tPutI64v(p ? p + n : p, pDataBlk->minKey.ts);
+ n += tPutI64v(p ? p + n : p, pDataBlk->maxKey.version);
+ n += tPutI64v(p ? p + n : p, pDataBlk->maxKey.ts);
+ n += tPutI64v(p ? p + n : p, pDataBlk->minVer);
+ n += tPutI64v(p ? p + n : p, pDataBlk->maxVer);
+ n += tPutI32v(p ? p + n : p, pDataBlk->nRow);
+ n += tPutI8(p ? p + n : p, pDataBlk->hasDup);
+ n += tPutI8(p ? p + n : p, pDataBlk->nSubBlock);
+ for (int8_t iSubBlock = 0; iSubBlock < pDataBlk->nSubBlock; iSubBlock++) {
+ n += tPutI64v(p ? p + n : p, pDataBlk->aSubBlock[iSubBlock].offset);
+ n += tPutI32v(p ? p + n : p, pDataBlk->aSubBlock[iSubBlock].szBlock);
+ n += tPutI32v(p ? p + n : p, pDataBlk->aSubBlock[iSubBlock].szKey);
+ }
+ if (pDataBlk->nSubBlock == 1 && !pDataBlk->hasDup) {
+ n += tPutI64v(p ? p + n : p, pDataBlk->smaInfo.offset);
+ n += tPutI32v(p ? p + n : p, pDataBlk->smaInfo.size);
}
return n;
}
-int32_t tGetBlock(uint8_t *p, void *ph) {
- int32_t n = 0;
- SBlock *pBlock = (SBlock *)ph;
-
- n += tGetI64v(p + n, &pBlock->minKey.version);
- n += tGetI64v(p + n, &pBlock->minKey.ts);
- n += tGetI64v(p + n, &pBlock->maxKey.version);
- n += tGetI64v(p + n, &pBlock->maxKey.ts);
- n += tGetI64v(p + n, &pBlock->minVer);
- n += tGetI64v(p + n, &pBlock->maxVer);
- n += tGetI32v(p + n, &pBlock->nRow);
- n += tGetI8(p + n, &pBlock->hasDup);
- n += tGetI8(p + n, &pBlock->nSubBlock);
- for (int8_t iSubBlock = 0; iSubBlock < pBlock->nSubBlock; iSubBlock++) {
- n += tGetI64v(p + n, &pBlock->aSubBlock[iSubBlock].offset);
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szBlock);
- n += tGetI32v(p + n, &pBlock->aSubBlock[iSubBlock].szKey);
- }
- if (pBlock->nSubBlock == 1 && !pBlock->hasDup) {
- n += tGetI64v(p + n, &pBlock->smaInfo.offset);
- n += tGetI32v(p + n, &pBlock->smaInfo.size);
+int32_t tGetDataBlk(uint8_t *p, void *ph) {
+ int32_t n = 0;
+ SDataBlk *pDataBlk = (SDataBlk *)ph;
+
+ n += tGetI64v(p + n, &pDataBlk->minKey.version);
+ n += tGetI64v(p + n, &pDataBlk->minKey.ts);
+ n += tGetI64v(p + n, &pDataBlk->maxKey.version);
+ n += tGetI64v(p + n, &pDataBlk->maxKey.ts);
+ n += tGetI64v(p + n, &pDataBlk->minVer);
+ n += tGetI64v(p + n, &pDataBlk->maxVer);
+ n += tGetI32v(p + n, &pDataBlk->nRow);
+ n += tGetI8(p + n, &pDataBlk->hasDup);
+ n += tGetI8(p + n, &pDataBlk->nSubBlock);
+ for (int8_t iSubBlock = 0; iSubBlock < pDataBlk->nSubBlock; iSubBlock++) {
+ n += tGetI64v(p + n, &pDataBlk->aSubBlock[iSubBlock].offset);
+ n += tGetI32v(p + n, &pDataBlk->aSubBlock[iSubBlock].szBlock);
+ n += tGetI32v(p + n, &pDataBlk->aSubBlock[iSubBlock].szKey);
+ }
+ if (pDataBlk->nSubBlock == 1 && !pDataBlk->hasDup) {
+ n += tGetI64v(p + n, &pDataBlk->smaInfo.offset);
+ n += tGetI32v(p + n, &pDataBlk->smaInfo.size);
} else {
- pBlock->smaInfo.offset = 0;
- pBlock->smaInfo.size = 0;
+ pDataBlk->smaInfo.offset = 0;
+ pDataBlk->smaInfo.size = 0;
}
return n;
}
-int32_t tBlockCmprFn(const void *p1, const void *p2) {
- SBlock *pBlock1 = (SBlock *)p1;
- SBlock *pBlock2 = (SBlock *)p2;
+int32_t tDataBlkCmprFn(const void *p1, const void *p2) {
+ SDataBlk *pBlock1 = (SDataBlk *)p1;
+ SDataBlk *pBlock2 = (SDataBlk *)p2;
if (tsdbKeyCmprFn(&pBlock1->maxKey, &pBlock2->minKey) < 0) {
return -1;
@@ -288,48 +304,48 @@ int32_t tBlockCmprFn(const void *p1, const void *p2) {
return 0;
}
-bool tBlockHasSma(SBlock *pBlock) {
- if (pBlock->nSubBlock > 1) return false;
- if (pBlock->hasDup) return false;
+bool tDataBlkHasSma(SDataBlk *pDataBlk) {
+ if (pDataBlk->nSubBlock > 1) return false;
+ if (pDataBlk->hasDup) return false;
- return pBlock->smaInfo.size > 0;
+ return pDataBlk->smaInfo.size > 0;
}
-// SBlockL ======================================================
-int32_t tPutBlockL(uint8_t *p, void *ph) {
+// SSttBlk ======================================================
+int32_t tPutSttBlk(uint8_t *p, void *ph) {
int32_t n = 0;
- SBlockL *pBlockL = (SBlockL *)ph;
-
- n += tPutI64(p ? p + n : p, pBlockL->suid);
- n += tPutI64(p ? p + n : p, pBlockL->minUid);
- n += tPutI64(p ? p + n : p, pBlockL->maxUid);
- n += tPutI64v(p ? p + n : p, pBlockL->minKey);
- n += tPutI64v(p ? p + n : p, pBlockL->maxKey);
- n += tPutI64v(p ? p + n : p, pBlockL->minVer);
- n += tPutI64v(p ? p + n : p, pBlockL->maxVer);
- n += tPutI32v(p ? p + n : p, pBlockL->nRow);
- n += tPutI64v(p ? p + n : p, pBlockL->bInfo.offset);
- n += tPutI32v(p ? p + n : p, pBlockL->bInfo.szBlock);
- n += tPutI32v(p ? p + n : p, pBlockL->bInfo.szKey);
+ SSttBlk *pSttBlk = (SSttBlk *)ph;
+
+ n += tPutI64(p ? p + n : p, pSttBlk->suid);
+ n += tPutI64(p ? p + n : p, pSttBlk->minUid);
+ n += tPutI64(p ? p + n : p, pSttBlk->maxUid);
+ n += tPutI64v(p ? p + n : p, pSttBlk->minKey);
+ n += tPutI64v(p ? p + n : p, pSttBlk->maxKey);
+ n += tPutI64v(p ? p + n : p, pSttBlk->minVer);
+ n += tPutI64v(p ? p + n : p, pSttBlk->maxVer);
+ n += tPutI32v(p ? p + n : p, pSttBlk->nRow);
+ n += tPutI64v(p ? p + n : p, pSttBlk->bInfo.offset);
+ n += tPutI32v(p ? p + n : p, pSttBlk->bInfo.szBlock);
+ n += tPutI32v(p ? p + n : p, pSttBlk->bInfo.szKey);
return n;
}
-int32_t tGetBlockL(uint8_t *p, void *ph) {
+int32_t tGetSttBlk(uint8_t *p, void *ph) {
int32_t n = 0;
- SBlockL *pBlockL = (SBlockL *)ph;
-
- n += tGetI64(p + n, &pBlockL->suid);
- n += tGetI64(p + n, &pBlockL->minUid);
- n += tGetI64(p + n, &pBlockL->maxUid);
- n += tGetI64v(p + n, &pBlockL->minKey);
- n += tGetI64v(p + n, &pBlockL->maxKey);
- n += tGetI64v(p + n, &pBlockL->minVer);
- n += tGetI64v(p + n, &pBlockL->maxVer);
- n += tGetI32v(p + n, &pBlockL->nRow);
- n += tGetI64v(p + n, &pBlockL->bInfo.offset);
- n += tGetI32v(p + n, &pBlockL->bInfo.szBlock);
- n += tGetI32v(p + n, &pBlockL->bInfo.szKey);
+ SSttBlk *pSttBlk = (SSttBlk *)ph;
+
+ n += tGetI64(p + n, &pSttBlk->suid);
+ n += tGetI64(p + n, &pSttBlk->minUid);
+ n += tGetI64(p + n, &pSttBlk->maxUid);
+ n += tGetI64v(p + n, &pSttBlk->minKey);
+ n += tGetI64v(p + n, &pSttBlk->maxKey);
+ n += tGetI64v(p + n, &pSttBlk->minVer);
+ n += tGetI64v(p + n, &pSttBlk->maxVer);
+ n += tGetI32v(p + n, &pSttBlk->nRow);
+ n += tGetI64v(p + n, &pSttBlk->bInfo.offset);
+ n += tGetI32v(p + n, &pSttBlk->bInfo.szBlock);
+ n += tGetI32v(p + n, &pSttBlk->bInfo.szKey);
return n;
}
@@ -689,12 +705,12 @@ int32_t tRowMergerAdd(SRowMerger *pMerger, TSDBROW *pRow, STSchema *pTSchema) {
tsdbRowGetColVal(pRow, pTSchema, jCol++, pColVal);
if (key.version > pMerger->version) {
- if (!pColVal->isNone) {
+ if (!COL_VAL_IS_NONE(pColVal)) {
taosArraySet(pMerger->pArray, iCol, pColVal);
}
} else if (key.version < pMerger->version) {
SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol);
- if (tColVal->isNone && !pColVal->isNone) {
+ if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) {
taosArraySet(pMerger->pArray, iCol, pColVal);
}
} else {
@@ -760,12 +776,12 @@ int32_t tRowMerge(SRowMerger *pMerger, TSDBROW *pRow) {
tsdbRowGetColVal(pRow, pMerger->pTSchema, iCol, pColVal);
if (key.version > pMerger->version) {
- if (!pColVal->isNone) {
+ if (!COL_VAL_IS_NONE(pColVal)) {
taosArraySet(pMerger->pArray, iCol, pColVal);
}
} else if (key.version < pMerger->version) {
SColVal *tColVal = (SColVal *)taosArrayGet(pMerger->pArray, iCol);
- if (tColVal->isNone && !pColVal->isNone) {
+ if (COL_VAL_IS_NONE(tColVal) && !COL_VAL_IS_NONE(pColVal)) {
taosArraySet(pMerger->pArray, iCol, pColVal);
}
} else {
@@ -893,248 +909,6 @@ int32_t tsdbBuildDeleteSkyline(SArray *aDelData, int32_t sidx, int32_t eidx, SAr
return code;
}
-// SColData ========================================
-void tColDataInit(SColData *pColData, int16_t cid, int8_t type, int8_t smaOn) {
- pColData->cid = cid;
- pColData->type = type;
- pColData->smaOn = smaOn;
- tColDataReset(pColData);
-}
-
-void tColDataReset(SColData *pColData) {
- pColData->nVal = 0;
- pColData->flag = 0;
- pColData->nData = 0;
-}
-
-void tColDataClear(void *ph) {
- SColData *pColData = (SColData *)ph;
-
- tFree(pColData->pBitMap);
- tFree((uint8_t *)pColData->aOffset);
- tFree(pColData->pData);
-}
-
-int32_t tColDataAppendValue(SColData *pColData, SColVal *pColVal) {
- int32_t code = 0;
- int64_t size;
- SValue value = {0};
- SValue *pValue = &value;
-
- ASSERT(pColVal->cid == pColData->cid);
- ASSERT(pColVal->type == pColData->type);
-
- // realloc bitmap
- size = BIT2_SIZE(pColData->nVal + 1);
- code = tRealloc(&pColData->pBitMap, size);
- if (code) goto _exit;
- if ((pColData->nVal & 3) == 0) {
- pColData->pBitMap[pColData->nVal >> 2] = 0;
- }
-
- // put value
- if (pColVal->isNone) {
- pColData->flag |= HAS_NONE;
- SET_BIT2(pColData->pBitMap, pColData->nVal, 0);
- } else if (pColVal->isNull) {
- pColData->flag |= HAS_NULL;
- SET_BIT2(pColData->pBitMap, pColData->nVal, 1);
- } else {
- pColData->flag |= HAS_VALUE;
- SET_BIT2(pColData->pBitMap, pColData->nVal, 2);
- pValue = &pColVal->value;
- }
-
- if (IS_VAR_DATA_TYPE(pColData->type)) {
- // offset
- code = tRealloc((uint8_t **)&pColData->aOffset, sizeof(int32_t) * (pColData->nVal + 1));
- if (code) goto _exit;
- pColData->aOffset[pColData->nVal] = pColData->nData;
-
- // value
- if ((!pColVal->isNone) && (!pColVal->isNull)) {
- code = tRealloc(&pColData->pData, pColData->nData + pColVal->value.nData);
- if (code) goto _exit;
- memcpy(pColData->pData + pColData->nData, pColVal->value.pData, pColVal->value.nData);
- pColData->nData += pColVal->value.nData;
- }
- } else {
- code = tRealloc(&pColData->pData, pColData->nData + tPutValue(NULL, pValue, pColVal->type));
- if (code) goto _exit;
- pColData->nData += tPutValue(pColData->pData + pColData->nData, pValue, pColVal->type);
- }
-
- pColData->nVal++;
-
-_exit:
- return code;
-}
-
-int32_t tColDataCopy(SColData *pColDataSrc, SColData *pColDataDest) {
- int32_t code = 0;
- int32_t size;
-
- ASSERT(pColDataSrc->nVal > 0);
- ASSERT(pColDataDest->cid = pColDataSrc->cid);
- ASSERT(pColDataDest->type = pColDataSrc->type);
-
- pColDataDest->smaOn = pColDataSrc->smaOn;
- pColDataDest->nVal = pColDataSrc->nVal;
- pColDataDest->flag = pColDataSrc->flag;
-
- // bitmap
- if (pColDataSrc->flag != HAS_NONE && pColDataSrc->flag != HAS_NULL && pColDataSrc->flag != HAS_VALUE) {
- size = BIT2_SIZE(pColDataSrc->nVal);
- code = tRealloc(&pColDataDest->pBitMap, size);
- if (code) goto _exit;
- memcpy(pColDataDest->pBitMap, pColDataSrc->pBitMap, size);
- }
-
- // offset
- if (IS_VAR_DATA_TYPE(pColDataDest->type)) {
- size = sizeof(int32_t) * pColDataSrc->nVal;
-
- code = tRealloc((uint8_t **)&pColDataDest->aOffset, size);
- if (code) goto _exit;
-
- memcpy(pColDataDest->aOffset, pColDataSrc->aOffset, size);
- }
-
- // value
- pColDataDest->nData = pColDataSrc->nData;
- code = tRealloc(&pColDataDest->pData, pColDataSrc->nData);
- if (code) goto _exit;
- memcpy(pColDataDest->pData, pColDataSrc->pData, pColDataDest->nData);
-
-_exit:
- return code;
-}
-
-int32_t tColDataGetValue(SColData *pColData, int32_t iVal, SColVal *pColVal) {
- int32_t code = 0;
-
- ASSERT(iVal < pColData->nVal);
- ASSERT(pColData->flag);
-
- if (pColData->flag == HAS_NONE) {
- *pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
- goto _exit;
- } else if (pColData->flag == HAS_NULL) {
- *pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
- goto _exit;
- } else if (pColData->flag != HAS_VALUE) {
- uint8_t v = GET_BIT2(pColData->pBitMap, iVal);
- if (v == 0) {
- *pColVal = COL_VAL_NONE(pColData->cid, pColData->type);
- goto _exit;
- } else if (v == 1) {
- *pColVal = COL_VAL_NULL(pColData->cid, pColData->type);
- goto _exit;
- }
- }
-
- // get value
- SValue value;
- if (IS_VAR_DATA_TYPE(pColData->type)) {
- if (iVal + 1 < pColData->nVal) {
- value.nData = pColData->aOffset[iVal + 1] - pColData->aOffset[iVal];
- } else {
- value.nData = pColData->nData - pColData->aOffset[iVal];
- }
-
- value.pData = pColData->pData + pColData->aOffset[iVal];
- } else {
- tGetValue(pColData->pData + tDataTypes[pColData->type].bytes * iVal, &value, pColData->type);
- }
- *pColVal = COL_VAL_VALUE(pColData->cid, pColData->type, value);
-
-_exit:
- return code;
-}
-
-int32_t tPutColData(uint8_t *p, SColData *pColData) {
- int32_t n = 0;
-
- n += tPutI16v(p ? p + n : p, pColData->cid);
- n += tPutI8(p ? p + n : p, pColData->type);
- n += tPutI8(p ? p + n : p, pColData->smaOn);
- n += tPutI32v(p ? p + n : p, pColData->nVal);
- n += tPutU8(p ? p + n : p, pColData->flag);
-
- if (pColData->flag == HAS_NONE || pColData->flag == HAS_NULL) goto _exit;
- if (pColData->flag != HAS_VALUE) {
- // bitmap
-
- int32_t size = BIT2_SIZE(pColData->nVal);
- if (p) {
- memcpy(p + n, pColData->pBitMap, size);
- }
- n += size;
- }
- if (IS_VAR_DATA_TYPE(pColData->type)) {
- // offset
-
- int32_t size = sizeof(int32_t) * pColData->nVal;
- if (p) {
- memcpy(p + n, pColData->aOffset, size);
- }
- n += size;
- }
- n += tPutI32v(p ? p + n : p, pColData->nData);
- if (p) {
- memcpy(p + n, pColData->pData, pColData->nData);
- }
- n += pColData->nData;
-
-_exit:
- return n;
-}
-
-int32_t tGetColData(uint8_t *p, SColData *pColData) {
- int32_t n = 0;
-
- n += tGetI16v(p + n, &pColData->cid);
- n += tGetI8(p + n, &pColData->type);
- n += tGetI8(p + n, &pColData->smaOn);
- n += tGetI32v(p + n, &pColData->nVal);
- n += tGetU8(p + n, &pColData->flag);
-
- if (pColData->flag == HAS_NONE || pColData->flag == HAS_NULL) goto _exit;
- if (pColData->flag != HAS_VALUE) {
- // bitmap
-
- int32_t size = BIT2_SIZE(pColData->nVal);
- pColData->pBitMap = p + n;
- n += size;
- }
- if (IS_VAR_DATA_TYPE(pColData->type)) {
- // offset
-
- int32_t size = sizeof(int32_t) * pColData->nVal;
- pColData->aOffset = (int32_t *)(p + n);
- n += size;
- }
- n += tGetI32v(p + n, &pColData->nData);
- pColData->pData = p + n;
- n += pColData->nData;
-
-_exit:
- return n;
-}
-
-static FORCE_INLINE int32_t tColDataCmprFn(const void *p1, const void *p2) {
- SColData *pColData1 = (SColData *)p1;
- SColData *pColData2 = (SColData *)p2;
-
- if (pColData1->cid < pColData2->cid) {
- return -1;
- } else if (pColData1->cid > pColData2->cid) {
- return 1;
- }
-
- return 0;
-}
-
// SBlockData ======================================================
int32_t tBlockDataCreate(SBlockData *pBlockData) {
int32_t code = 0;
@@ -1166,7 +940,7 @@ void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear) {
tFree((uint8_t *)pBlockData->aVersion);
tFree((uint8_t *)pBlockData->aTSKEY);
taosArrayDestroy(pBlockData->aIdx);
- taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataClear : NULL);
+ taosArrayDestroyEx(pBlockData->aColData, deepClear ? tColDataDestroy : NULL);
pBlockData->aUid = NULL;
pBlockData->aVersion = NULL;
pBlockData->aTSKEY = NULL;
@@ -1174,24 +948,47 @@ void tBlockDataDestroy(SBlockData *pBlockData, int8_t deepClear) {
pBlockData->aColData = NULL;
}
-int32_t tBlockDataInit(SBlockData *pBlockData, int64_t suid, int64_t uid, STSchema *pTSchema) {
+int32_t tBlockDataInit(SBlockData *pBlockData, TABLEID *pId, STSchema *pTSchema, int16_t *aCid, int32_t nCid) {
int32_t code = 0;
- ASSERT(suid || uid);
+ ASSERT(pId->suid || pId->uid);
- pBlockData->suid = suid;
- pBlockData->uid = uid;
+ pBlockData->suid = pId->suid;
+ pBlockData->uid = pId->uid;
pBlockData->nRow = 0;
taosArrayClear(pBlockData->aIdx);
- for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) {
+ if (aCid) {
+ int32_t iColumn = 1;
STColumn *pTColumn = &pTSchema->columns[iColumn];
+ for (int32_t iCid = 0; iCid < nCid; iCid++) {
+ while (pTColumn && pTColumn->colId < aCid[iCid]) {
+ iColumn++;
+ pTColumn = (iColumn < pTSchema->numOfCols) ? &pTSchema->columns[iColumn] : NULL;
+ }
- SColData *pColData;
- code = tBlockDataAddColData(pBlockData, iColumn - 1, &pColData);
- if (code) goto _exit;
+ if (pTColumn == NULL) {
+ break;
+ } else if (pTColumn->colId == aCid[iCid]) {
+ SColData *pColData;
+ code = tBlockDataAddColData(pBlockData, taosArrayGetSize(pBlockData->aIdx), &pColData);
+ if (code) goto _exit;
+ tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0);
+
+ iColumn++;
+ pTColumn = (iColumn < pTSchema->numOfCols) ? &pTSchema->columns[iColumn] : NULL;
+ }
+ }
+ } else {
+ for (int32_t iColumn = 1; iColumn < pTSchema->numOfCols; iColumn++) {
+ STColumn *pTColumn = &pTSchema->columns[iColumn];
+
+ SColData *pColData;
+ code = tBlockDataAddColData(pBlockData, iColumn - 1, &pColData);
+ if (code) goto _exit;
- tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0);
+ tColDataInit(pColData, pTColumn->colId, pTColumn->type, (pTColumn->flags & COL_SMA_ON) ? 1 : 0);
+ }
}
_exit:
@@ -1235,7 +1032,7 @@ void tBlockDataClear(SBlockData *pBlockData) {
pBlockData->nRow = 0;
for (int32_t iColData = 0; iColData < taosArrayGetSize(pBlockData->aIdx); iColData++) {
SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
- tColDataReset(pColData);
+ tColDataClear(pColData);
}
}
@@ -1485,7 +1282,7 @@ void tBlockDataGetColData(SBlockData *pBlockData, int16_t cid, SColData **ppColD
while (lidx <= ridx) {
int32_t midx = (lidx + ridx) / 2;
SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, midx);
- int32_t c = tColDataCmprFn(pColData, &(SColData){.cid = cid});
+ int32_t c = (pColData->cid == cid) ? 0 : ((pColData->cid > cid) ? 1 : -1);
if (c == 0) {
*ppColData = pColData;
@@ -1532,7 +1329,7 @@ int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut,
if (code) goto _exit;
blockCol.offset = aBufN[0];
- aBufN[0] = aBufN[0] + blockCol.szBitmap + blockCol.szOffset + blockCol.szValue + sizeof(TSCKSUM);
+ aBufN[0] = aBufN[0] + blockCol.szBitmap + blockCol.szOffset + blockCol.szValue;
}
code = tRealloc(&aBuf[1], hdr.szBlkCol + tPutBlockCol(NULL, &blockCol));
@@ -1540,15 +1337,8 @@ int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut,
hdr.szBlkCol += tPutBlockCol(aBuf[1] + hdr.szBlkCol, &blockCol);
}
- aBufN[1] = 0;
- if (hdr.szBlkCol > 0) {
- aBufN[1] = hdr.szBlkCol + sizeof(TSCKSUM);
-
- code = tRealloc(&aBuf[1], aBufN[1]);
- if (code) goto _exit;
-
- taosCalcChecksumAppend(0, aBuf[1], aBufN[1]);
- }
+ // SBlockCol
+ aBufN[1] = hdr.szBlkCol;
// uid + version + tskey
aBufN[2] = 0;
@@ -1569,16 +1359,11 @@ int32_t tCmprBlockData(SBlockData *pBlockData, int8_t cmprAlg, uint8_t **ppOut,
if (code) goto _exit;
aBufN[2] += hdr.szKey;
- aBufN[2] += sizeof(TSCKSUM);
- code = tRealloc(&aBuf[2], aBufN[2]);
- if (code) goto _exit;
-
// hdr
aBufN[3] = tPutDiskDataHdr(NULL, &hdr);
code = tRealloc(&aBuf[3], aBufN[3]);
if (code) goto _exit;
tPutDiskDataHdr(aBuf[3], &hdr);
- taosCalcChecksumAppend(taosCalcChecksum(0, aBuf[3], aBufN[3]), aBuf[2], aBufN[2]);
// aggragate
if (ppOut) {
@@ -1603,17 +1388,13 @@ _exit:
int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uint8_t *aBuf[]) {
int32_t code = 0;
- tBlockDataClear(pBlockData);
+ tBlockDataReset(pBlockData);
int32_t n = 0;
SDiskDataHdr hdr = {0};
// SDiskDataHdr
n += tGetDiskDataHdr(pIn + n, &hdr);
- if (!taosCheckChecksumWhole(pIn, n + hdr.szUid + hdr.szVer + hdr.szKey + sizeof(TSCKSUM))) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _exit;
- }
ASSERT(hdr.delimiter == TSDB_FILE_DLMT);
pBlockData->suid = hdr.suid;
@@ -1641,7 +1422,7 @@ int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uin
code = tsdbDecmprData(pIn + n, hdr.szKey, TSDB_DATA_TYPE_TIMESTAMP, hdr.cmprAlg, (uint8_t **)&pBlockData->aTSKEY,
sizeof(TSKEY) * hdr.nRow, &aBuf[0]);
if (code) goto _exit;
- n = n + hdr.szKey + sizeof(TSCKSUM);
+ n += hdr.szKey;
// loop to decode each column data
if (hdr.szBlkCol == 0) goto _exit;
@@ -1663,8 +1444,8 @@ int32_t tDecmprBlockData(uint8_t *pIn, int32_t szIn, SBlockData *pBlockData, uin
if (code) goto _exit;
}
} else {
- code = tsdbDecmprColData(pIn + n + hdr.szBlkCol + sizeof(TSCKSUM) + blockCol.offset, &blockCol, hdr.cmprAlg,
- hdr.nRow, pColData, &aBuf[0]);
+ code = tsdbDecmprColData(pIn + n + hdr.szBlkCol + blockCol.offset, &blockCol, hdr.cmprAlg, hdr.nRow, pColData,
+ &aBuf[0]);
if (code) goto _exit;
}
}
@@ -1747,7 +1528,7 @@ void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg) {
for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
tColDataGetValue(pColData, iVal, pColVal);
- if (pColVal->isNone || pColVal->isNull) {
+ if (!COL_VAL_IS_VALUE(pColVal)) {
pColAgg->numOfNull++;
} else {
switch (pColData->type) {
@@ -1986,47 +1767,16 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
int32_t size = 0;
// bitmap
if (pColData->flag != HAS_VALUE) {
- uint8_t *pBitMap = pColData->pBitMap;
- int32_t szBitMap = BIT2_SIZE(pColData->nVal);
-
- // BIT2 to BIT1
- if (pColData->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) {
+ int32_t szBitMap;
+ if (pColData->flag == (HAS_VALUE | HAS_NULL | HAS_NONE)) {
+ szBitMap = BIT2_SIZE(pColData->nVal);
+ } else {
szBitMap = BIT1_SIZE(pColData->nVal);
- pBitMap = taosMemoryCalloc(1, szBitMap);
- if (pBitMap == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _exit;
- }
-
- for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
- uint8_t v = GET_BIT2(pColData->pBitMap, iVal);
- switch (pColData->flag) {
- case (HAS_NULL | HAS_NONE):
- SET_BIT1(pBitMap, iVal, v);
- break;
- case (HAS_VALUE | HAS_NONE):
- if (v) {
- SET_BIT1(pBitMap, iVal, 1);
- } else {
- SET_BIT1(pBitMap, iVal, 0);
- }
- break;
- case (HAS_VALUE | HAS_NULL):
- SET_BIT1(pBitMap, iVal, v - 1);
- break;
- default:
- ASSERT(0);
- }
- }
}
- code = tsdbCmprData(pBitMap, szBitMap, TSDB_DATA_TYPE_TINYINT, cmprAlg, ppOut, nOut + size, &pBlockCol->szBitmap,
- ppBuf);
+ code = tsdbCmprData(pColData->pBitMap, szBitMap, TSDB_DATA_TYPE_TINYINT, cmprAlg, ppOut, nOut + size,
+ &pBlockCol->szBitmap, ppBuf);
if (code) goto _exit;
-
- if (pColData->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) {
- taosMemoryFree(pBitMap);
- }
}
size += pBlockCol->szBitmap;
@@ -2039,19 +1789,13 @@ int32_t tsdbCmprColData(SColData *pColData, int8_t cmprAlg, SBlockCol *pBlockCol
size += pBlockCol->szOffset;
// value
- if (pColData->flag != (HAS_NULL | HAS_NONE)) {
+ if ((pColData->flag != (HAS_NULL | HAS_NONE)) && pColData->nData) {
code = tsdbCmprData((uint8_t *)pColData->pData, pColData->nData, pColData->type, cmprAlg, ppOut, nOut + size,
&pBlockCol->szValue, ppBuf);
if (code) goto _exit;
}
size += pBlockCol->szValue;
- // checksum
- size += sizeof(TSCKSUM);
- code = tRealloc(ppOut, nOut + size);
- if (code) goto _exit;
- taosCalcChecksumAppend(0, *ppOut + nOut, size);
-
_exit:
return code;
}
@@ -2060,12 +1804,6 @@ int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, in
uint8_t **ppBuf) {
int32_t code = 0;
- int32_t size = pBlockCol->szBitmap + pBlockCol->szOffset + pBlockCol->szValue + sizeof(TSCKSUM);
- if (!taosCheckChecksumWhole(pIn, size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _exit;
- }
-
ASSERT(pColData->cid == pBlockCol->cid);
ASSERT(pColData->type == pBlockCol->type);
pColData->smaOn = pBlockCol->smaOn;
@@ -2076,46 +1814,15 @@ int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, in
uint8_t *p = pIn;
// bitmap
if (pBlockCol->szBitmap) {
- if (pBlockCol->flag != (HAS_VALUE | HAS_NULL | HAS_NONE)) {
- uint8_t *pBitMap = NULL;
- code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pBitMap,
- BIT1_SIZE(pColData->nVal), ppBuf);
- if (code) goto _exit;
-
- code = tRealloc(&pColData->pBitMap, BIT2_SIZE(pColData->nVal));
- if (code) {
- tFree(pBitMap);
- goto _exit;
- }
-
- // BIT1 to BIT2
- for (int32_t iVal = 0; iVal < nVal; iVal++) {
- uint8_t v = GET_BIT1(pBitMap, iVal);
- switch (pBlockCol->flag) {
- case (HAS_NULL | HAS_NONE):
- SET_BIT2(pColData->pBitMap, iVal, v);
- break;
- case (HAS_VALUE | HAS_NONE):
- if (v) {
- SET_BIT2(pColData->pBitMap, iVal, 2);
- } else {
- SET_BIT2(pColData->pBitMap, iVal, 0);
- }
- break;
- case (HAS_VALUE | HAS_NULL):
- SET_BIT2(pColData->pBitMap, iVal, v + 1);
- break;
- default:
- ASSERT(0);
- }
- }
-
- tFree(pBitMap);
+ int32_t szBitMap;
+ if (pColData->flag == (HAS_VALUE | HAS_NULL | HAS_NONE)) {
+ szBitMap = BIT2_SIZE(pColData->nVal);
} else {
- code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pColData->pBitMap,
- BIT2_SIZE(pColData->nVal), ppBuf);
- if (code) goto _exit;
+ szBitMap = BIT1_SIZE(pColData->nVal);
}
+
+ code = tsdbDecmprData(p, pBlockCol->szBitmap, TSDB_DATA_TYPE_TINYINT, cmprAlg, &pColData->pBitMap, szBitMap, ppBuf);
+ if (code) goto _exit;
}
p += pBlockCol->szBitmap;
@@ -2137,37 +1844,3 @@ int32_t tsdbDecmprColData(uint8_t *pIn, SBlockCol *pBlockCol, int8_t cmprAlg, in
_exit:
return code;
}
-
-int32_t tsdbReadAndCheck(TdFilePtr pFD, int64_t offset, uint8_t **ppOut, int32_t size, int8_t toCheck) {
- int32_t code = 0;
-
- // alloc
- code = tRealloc(ppOut, size);
- if (code) goto _exit;
-
- // seek
- int64_t n = taosLSeekFile(pFD, offset, SEEK_SET);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _exit;
- }
-
- // read
- n = taosReadFile(pFD, *ppOut, size);
- if (n < 0) {
- code = TAOS_SYSTEM_ERROR(errno);
- goto _exit;
- } else if (n < size) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _exit;
- }
-
- // check
- if (toCheck && !taosCheckChecksumWhole(*ppOut, size)) {
- code = TSDB_CODE_FILE_CORRUPTED;
- goto _exit;
- }
-
-_exit:
- return code;
-}
diff --git a/source/dnode/vnode/src/tsdb/tsdbWrite.c b/source/dnode/vnode/src/tsdb/tsdbWrite.c
index 383652531e211504983444d9d783ddf9189f5161..0a9fbf92a4bf62326aa9755b827b83d0d510d2f7 100644
--- a/source/dnode/vnode/src/tsdb/tsdbWrite.c
+++ b/source/dnode/vnode/src/tsdb/tsdbWrite.c
@@ -39,7 +39,7 @@ int tsdbInsertData(STsdb *pTsdb, int64_t version, SSubmitReq *pMsg, SSubmitRsp *
SSubmitBlkRsp r = {0};
tGetSubmitMsgNext(&msgIter, &pBlock);
if (pBlock == NULL) break;
- if (tsdbInsertTableData(pTsdb, version, &msgIter, pBlock, &r) < 0) {
+ if ((terrno = tsdbInsertTableData(pTsdb, version, &msgIter, pBlock, &r)) < 0) {
return -1;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeBufPool.c b/source/dnode/vnode/src/vnd/vnodeBufPool.c
index 5a22114ab42206f0f63a4c41a3d4c53c438ff68b..6e02425b55a39463d25638a2190de96f3459d657 100644
--- a/source/dnode/vnode/src/vnd/vnodeBufPool.c
+++ b/source/dnode/vnode/src/vnd/vnodeBufPool.c
@@ -53,6 +53,10 @@ int vnodeCloseBufPool(SVnode *pVnode) {
vnodeBufPoolDestroy(pPool);
}
+ if (pVnode->inUse) {
+ vnodeBufPoolDestroy(pVnode->inUse);
+ pVnode->inUse = NULL;
+ }
vDebug("vgId:%d, vnode buffer pool is closed", TD_VID(pVnode));
return 0;
@@ -177,4 +181,4 @@ void vnodeBufPoolUnRef(SVBufPool *pPool) {
taosThreadMutexUnlock(&pVnode->mutex);
}
-}
\ No newline at end of file
+}
diff --git a/source/dnode/vnode/src/vnd/vnodeCfg.c b/source/dnode/vnode/src/vnd/vnodeCfg.c
index 580ab8bc93cac3a5057821f238cf85fc1011fa38..5adb2eb3592e7bcba7803b5e2972f0bdd3689adb 100644
--- a/source/dnode/vnode/src/vnd/vnodeCfg.c
+++ b/source/dnode/vnode/src/vnd/vnodeCfg.c
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+#include "tutil.h"
#include "vnd.h"
const SVnodeCfg vnodeCfgDefault = {.vgId = -1,
@@ -47,7 +48,9 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1,
},
.hashBegin = 0,
.hashEnd = 0,
- .hashMethod = 0};
+ .hashMethod = 0,
+ .sttTrigger = TSDB_DEFAULT_STT_FILE,
+ .tsdbPageSize = TSDB_DEFAULT_PAGE_SIZE};
int vnodeCheckCfg(const SVnodeCfg *pCfg) {
// TODO
@@ -106,9 +109,13 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "wal.retentionSize", pCfg->walCfg.retentionSize) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "wal.segSize", pCfg->walCfg.segSize) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "wal.level", pCfg->walCfg.level) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "sstTrigger", pCfg->sttTrigger) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashBegin", pCfg->hashBegin) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashEnd", pCfg->hashEnd) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "hashMethod", pCfg->hashMethod) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "hashPrefix", pCfg->hashPrefix) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "hashSuffix", pCfg->hashSuffix) < 0) return -1;
+ if (tjsonAddIntegerToObject(pJson, "tsdbPageSize", pCfg->tsdbPageSize) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "syncCfg.myIndex", pCfg->syncCfg.myIndex) < 0) return -1;
@@ -128,6 +135,9 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
tjsonAddItemToArray(pNodeInfoArr, pNodeInfo);
}
+ // add tsdb page size config
+ if (tjsonAddIntegerToObject(pJson, "tsdbPageSize", pCfg->tsdbPageSize) < 0) return -1;
+
return 0;
}
@@ -205,12 +215,18 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "wal.level", pCfg->walCfg.level, code);
if (code < 0) return -1;
+ tjsonGetNumberValue(pJson, "sstTrigger", pCfg->sttTrigger, code);
+ if (code < 0) pCfg->sttTrigger = TSDB_DEFAULT_SST_TRIGGER;
tjsonGetNumberValue(pJson, "hashBegin", pCfg->hashBegin, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashEnd", pCfg->hashEnd, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "hashMethod", pCfg->hashMethod, code);
if (code < 0) return -1;
+ tjsonGetNumberValue(pJson, "hashPrefix", pCfg->hashPrefix, code);
+ if (code < 0) pCfg->hashPrefix = TSDB_DEFAULT_HASH_PREFIX;
+ tjsonGetNumberValue(pJson, "hashSuffix", pCfg->hashSuffix, code);
+ if (code < 0) pCfg->hashSuffix = TSDB_DEFAULT_HASH_SUFFIX;
tjsonGetNumberValue(pJson, "syncCfg.replicaNum", pCfg->syncCfg.replicaNum, code);
if (code < 0) return -1;
@@ -239,6 +255,9 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
tjsonGetStringValue(pNodeInfo, "nodeFqdn", (pCfg->syncCfg.nodeInfo)[i].nodeFqdn);
}
+ tjsonGetNumberValue(pJson, "tsdbPageSize", pCfg->tsdbPageSize, code);
+ if (code < 0) pCfg->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE * 1024;
+
return 0;
}
@@ -247,7 +266,8 @@ int vnodeValidateTableHash(SVnode *pVnode, char *tableFName) {
switch (pVnode->config.hashMethod) {
default:
- hashValue = MurmurHash3_32(tableFName, strlen(tableFName));
+ hashValue = taosGetTbHashVal(tableFName, strlen(tableFName), pVnode->config.hashMethod, pVnode->config.hashPrefix,
+ pVnode->config.hashSuffix);
break;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index a4fd984fb762a934b48489da254b3af1aa4dc908..b5307cecf29d21bf68773c41ed318341e083d814 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -60,6 +60,8 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
snprintf(dir, TSDB_FILENAME_LEN, "%s%s%s", tfsGetPrimaryPath(pTfs), TD_DIRSEP, path);
+ info.config = vnodeCfgDefault;
+
// load vnode info
ret = vnodeLoadInfo(dir, &info);
if (ret < 0) {
@@ -159,7 +161,6 @@ SVnode *vnodeOpen(const char *path, STfs *pTfs, SMsgCb msgCb) {
// open sync
if (vnodeSyncOpen(pVnode, dir)) {
vError("vgId:%d, failed to open sync since %s", TD_VID(pVnode), tstrerror(terrno));
- terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
}
@@ -172,6 +173,7 @@ _err:
if (pVnode->pTsdb) tsdbClose(&pVnode->pTsdb);
if (pVnode->pSma) smaClose(pVnode->pSma);
if (pVnode->pMeta) metaClose(pVnode->pMeta);
+ if (pVnode->pPool) vnodeCloseBufPool(pVnode);
tsem_destroy(&(pVnode->canCommit));
taosMemoryFree(pVnode);
diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c
index 8d799e919d1e4c06cfec6438d7a4a34fc336993d..825bf8a0a7c0adbf5a19c08c2e2418da3b6e571a 100644
--- a/source/dnode/vnode/src/vnd/vnodeQuery.c
+++ b/source/dnode/vnode/src/vnd/vnodeQuery.c
@@ -16,7 +16,7 @@
#include "vnd.h"
int vnodeQueryOpen(SVnode *pVnode) {
- return qWorkerInit(NODE_TYPE_VNODE, TD_VID(pVnode), NULL, (void **)&pVnode->pQuery, &pVnode->msgCb);
+ return qWorkerInit(NODE_TYPE_VNODE, TD_VID(pVnode), (void **)&pVnode->pQuery, &pVnode->msgCb);
}
void vnodeQueryClose(SVnode *pVnode) { qWorkerDestroy((void **)&pVnode->pQuery); }
@@ -368,6 +368,7 @@ _exit:
int32_t vnodeGetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
pLoad->vgId = TD_VID(pVnode);
pLoad->syncState = syncGetMyRole(pVnode->sync);
+ pLoad->cacheUsage = tsdbCacheGetUsage(pVnode);
pLoad->numOfTables = metaGetTbNum(pVnode->pMeta);
pLoad->numOfTimeSeries = metaGetTimeSeriesNum(pVnode->pMeta);
pLoad->totalStorage = (int64_t)3 * 1073741824;
@@ -424,8 +425,8 @@ int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list) {
return TSDB_CODE_SUCCESS;
}
-int32_t vnodeGetStbIdList(SVnode* pVnode, int64_t suid, SArray* list) {
- SMStbCursor* pCur = metaOpenStbCursor(pVnode->pMeta, suid);
+int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list) {
+ SMStbCursor *pCur = metaOpenStbCursor(pVnode->pMeta, suid);
if (!pCur) {
return TSDB_CODE_FAILED;
}
@@ -467,9 +468,13 @@ static int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) {
STSchema *pTSchema = metaGetTbTSchema(pVnode->pMeta, suid, -1);
// metaGetTbTSchemaEx(pVnode->pMeta, suid, suid, -1, &pTSchema);
- *num = pTSchema->numOfCols;
+ if (pTSchema) {
+ *num = pTSchema->numOfCols;
- taosMemoryFree(pTSchema);
+ taosMemoryFree(pTSchema);
+ } else {
+ *num = 2;
+ }
return TSDB_CODE_SUCCESS;
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSnapshot.c b/source/dnode/vnode/src/vnd/vnodeSnapshot.c
index 5a81f9191920ffac0ae055e140e7e59448dfd406..08c3a34699b6f9f83b366e42f77e42a6094f09ed 100644
--- a/source/dnode/vnode/src/vnd/vnodeSnapshot.c
+++ b/source/dnode/vnode/src/vnd/vnodeSnapshot.c
@@ -39,7 +39,7 @@ struct SVSnapReader {
SStreamStateReader *pStreamStateReader;
// rsma
int8_t rsmaDone;
- SRsmaSnapReader *pRsmaReader;
+ SRSmaSnapReader *pRsmaReader;
};
int32_t vnodeSnapReaderOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapReader **ppReader) {
@@ -241,7 +241,7 @@ struct SVSnapWriter {
SStreamTaskWriter *pStreamTaskWriter;
SStreamStateWriter *pStreamStateWriter;
// rsma
- SRsmaSnapWriter *pRsmaSnapWriter;
+ SRSmaSnapWriter *pRsmaSnapWriter;
};
int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWriter **ppWriter) {
@@ -354,7 +354,8 @@ int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData) {
code = metaSnapWrite(pWriter->pMetaSnapWriter, pData, nData);
if (code) goto _err;
} break;
- case SNAP_DATA_TSDB: {
+ case SNAP_DATA_TSDB:
+ case SNAP_DATA_DEL: {
// tsdb
if (pWriter->pTsdbSnapWriter == NULL) {
code = tsdbSnapWriterOpen(pVnode->pTsdb, pWriter->sver, pWriter->ever, &pWriter->pTsdbSnapWriter);
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 85feecff1a0247790eec14b503b7dda6247a6c87..28093dfc709eab590d444ee342a4acf376f379dc 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -289,7 +289,7 @@ int32_t vnodePreprocessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
vTrace("message in vnode query queue is processing");
- if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsReadyForRead(pVnode)) {
+ if ((pMsg->msgType == TDMT_SCH_QUERY) && !vnodeIsLeader(pVnode)) {
vnodeRedirectRpcMsg(pVnode, pMsg);
return 0;
}
@@ -311,7 +311,12 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
vTrace("vgId:%d, msg:%p in fetch queue is processing", pVnode->config.vgId, pMsg);
if ((pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_VND_TABLE_META || pMsg->msgType == TDMT_VND_TABLE_CFG ||
pMsg->msgType == TDMT_VND_BATCH_META) &&
- !vnodeIsReadyForRead(pVnode)) {
+ !vnodeIsLeader(pVnode)) {
+ vnodeRedirectRpcMsg(pVnode, pMsg);
+ return 0;
+ }
+
+ if (pMsg->msgType == TDMT_VND_CONSUME && !pVnode->restored) {
vnodeRedirectRpcMsg(pVnode, pMsg);
return 0;
}
@@ -371,7 +376,7 @@ void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) {
if (NULL == pMetaRsp) {
return;
}
-
+
strcpy(pMetaRsp->dbFName, pVnode->config.dbname);
pMetaRsp->dbId = pVnode->config.dbId;
pMetaRsp->vgId = TD_VID(pVnode);
@@ -496,6 +501,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
// loop to create table
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
pCreateReq = req.pReqs + iReq;
+ memset(&cRsp, 0, sizeof(cRsp));
if ((terrno = grantCheck(TSDB_GRANT_TIMESERIES)) < 0) {
rcode = -1;
@@ -526,14 +532,15 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
cRsp.code = TSDB_CODE_SUCCESS;
tdFetchTbUidList(pVnode->pSma, &pStore, pCreateReq->ctb.suid, pCreateReq->uid);
taosArrayPush(tbUids, &pCreateReq->uid);
- vnodeUpdateMetaRsp(pVnode, cRsp.pMeta);
+ vnodeUpdateMetaRsp(pVnode, cRsp.pMeta);
}
taosArrayPush(rsp.pArray, &cRsp);
}
+ vDebug("vgId:%d, add %d new created tables into query table list", TD_VID(pVnode), (int32_t)taosArrayGetSize(tbUids));
tqUpdateTbUidList(pVnode->pTq, tbUids, true);
- if (tdUpdateTbUidList(pVnode->pSma, pStore) < 0) {
+ if (tdUpdateTbUidList(pVnode->pSma, pStore, true) < 0) {
goto _exit;
}
tdUidStoreFree(pStore);
@@ -691,6 +698,7 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq
SEncoder encoder = {0};
int32_t ret;
SArray *tbUids = NULL;
+ STbUidStore *pStore = NULL;
pRsp->msgType = TDMT_VND_DROP_TABLE_RSP;
pRsp->pCont = NULL;
@@ -714,9 +722,10 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq
for (int32_t iReq = 0; iReq < req.nReqs; iReq++) {
SVDropTbReq *pDropTbReq = req.pReqs + iReq;
SVDropTbRsp dropTbRsp = {0};
+ tb_uid_t tbUid = 0;
/* code */
- ret = metaDropTable(pVnode->pMeta, version, pDropTbReq, tbUids);
+ ret = metaDropTable(pVnode->pMeta, version, pDropTbReq, tbUids, &tbUid);
if (ret < 0) {
if (pDropTbReq->igNotExists && terrno == TSDB_CODE_VND_TABLE_NOT_EXIST) {
dropTbRsp.code = TSDB_CODE_SUCCESS;
@@ -725,15 +734,18 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq
}
} else {
dropTbRsp.code = TSDB_CODE_SUCCESS;
+ if (tbUid > 0) tdFetchTbUidList(pVnode->pSma, &pStore, pDropTbReq->suid, tbUid);
}
taosArrayPush(rsp.pArray, &dropTbRsp);
}
tqUpdateTbUidList(pVnode->pTq, tbUids, false);
+ tdUpdateTbUidList(pVnode->pSma, pStore, false);
_exit:
taosArrayDestroy(tbUids);
+ tdUidStoreFree(pStore);
tDecoderClear(&decoder);
tEncodeSize(tEncodeSVDropTbBatchRsp, &rsp, pRsp->contLen, ret);
pRsp->pCont = rpcMallocCont(pRsp->contLen);
@@ -801,7 +813,6 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
SSubmitRsp submitRsp = {0};
SSubmitMsgIter msgIter = {0};
SSubmitBlk *pBlock;
- SSubmitRsp rsp = {0};
SVCreateTbReq createTbReq = {0};
SDecoder decoder = {0};
int32_t nRows;
@@ -879,8 +890,9 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
if (NULL != submitBlkRsp.pMeta) {
vnodeUpdateMetaRsp(pVnode, submitBlkRsp.pMeta);
}
+
+ taosArrayPush(newTbUids, &createTbReq.uid);
}
- taosArrayPush(newTbUids, &createTbReq.uid);
submitBlkRsp.uid = createTbReq.uid;
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
@@ -911,6 +923,12 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
submitRsp.affectedRows += submitBlkRsp.affectedRows;
taosArrayPush(submitRsp.pArray, &submitBlkRsp);
}
+
+ if (taosArrayGetSize(newTbUids) > 0) {
+ vDebug("vgId:%d, add %d table into query table list in handling submit", TD_VID(pVnode),
+ (int32_t)taosArrayGetSize(newTbUids));
+ }
+
tqUpdateTbUidList(pVnode->pTq, newTbUids, true);
_exit:
@@ -1106,6 +1124,7 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq
tDecoderInit(pCoder, pReq, len);
tDecodeDeleteRes(pCoder, pRes);
+ ASSERT(taosArrayGetSize(pRes->uidList) == 0 || (pRes->skey != 0 && pRes->ekey != 0));
for (int32_t iUid = 0; iUid < taosArrayGetSize(pRes->uidList); iUid++) {
code = tsdbDeleteTableData(pVnode->pTsdb, version, pRes->suid, *(uint64_t *)taosArrayGet(pRes->uidList, iUid),
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index 65d4e9aaf10cf9c0ee723eb70b94166cde1fba0d..980761cd145b7f27ae96cc00d21259083cdefba9 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -240,7 +240,7 @@ void vnodeProposeWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs)
isWeak, isBlock, msg, numOfMsgs, arrayPos, pMsg->info.handle);
if (!pVnode->restored) {
- vGError("vgId:%d, msg:%p failed to process since not leader", vgId, pMsg);
+ vGError("vgId:%d, msg:%p failed to process since restore not finished", vgId, pMsg);
terrno = TSDB_CODE_APP_NOT_READY;
vnodeHandleProposeError(pVnode, pMsg, TSDB_CODE_APP_NOT_READY);
rpcFreeCont(pMsg->pCont);
@@ -676,6 +676,20 @@ static void vnodeLeaderTransfer(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsm
static void vnodeRestoreFinish(struct SSyncFSM *pFsm) {
SVnode *pVnode = pFsm->data;
+
+ do {
+ int32_t itemSize = tmsgGetQueueSize(&pVnode->msgCb, pVnode->config.vgId, APPLY_QUEUE);
+ if (itemSize == 0) {
+ vInfo("vgId:%d, apply queue is empty, restore finish", pVnode->config.vgId);
+ break;
+ } else {
+ vInfo("vgId:%d, restore not finish since %d items in apply queue", pVnode->config.vgId);
+ taosMsleep(10);
+ }
+ } while (true);
+
+ walApplyVer(pVnode->pWal, pVnode->state.applied);
+
pVnode->restored = true;
vDebug("vgId:%d, sync restore finished", pVnode->config.vgId);
}
@@ -782,16 +796,3 @@ bool vnodeIsLeader(SVnode *pVnode) {
return true;
}
-bool vnodeIsReadyForRead(SVnode *pVnode) {
- if (syncIsReady(pVnode->sync)) {
- return true;
- }
-
- if (syncIsReadyForRead(pVnode->sync)) {
- return true;
- }
-
- vDebug("vgId:%d, vnode not ready for read, state:%s, last:%ld, cmt:%ld", pVnode->config.vgId,
- syncGetMyRoleStr(pVnode->sync), syncGetLastIndex(pVnode->sync), syncGetCommitIndex(pVnode->sync));
- return false;
-}
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index 585b33930c2cae0332ee77a3933d5a86288c77bc..3c4cc9f7a2d11f10b5303a7769dbfe085f38f6a1 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -13,15 +13,15 @@
* along with this program. If not, see .
*/
-#include "trpc.h"
-#include "query.h"
-#include "tname.h"
#include "catalogInt.h"
+#include "query.h"
#include "systable.h"
+#include "tname.h"
#include "tref.h"
+#include "trpc.h"
-int32_t ctgInitGetTbMetaTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- SName *name = (SName*)param;
+int32_t ctgInitGetTbMetaTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ SName* name = (SName*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_TB_META;
@@ -45,13 +45,14 @@ int32_t ctgInitGetTbMetaTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), name->tname);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetTbMetasTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- SName *name = (SName*)param;
+int32_t ctgInitGetTbMetasTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ SName* name = (SName*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_TB_META_BATCH;
@@ -69,14 +70,14 @@ int32_t ctgInitGetTbMetasTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
- pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbMetaNum);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbMetaNum);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetDbVgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- char *dbFName = (char*)param;
+int32_t ctgInitGetDbVgTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ char* dbFName = (char*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_DB_VGROUP;
@@ -94,13 +95,14 @@ int32_t ctgInitGetDbVgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), dbFName);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), dbFName);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- char *dbFName = (char*)param;
+int32_t ctgInitGetDbCfgTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ char* dbFName = (char*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_DB_CFG;
@@ -118,13 +120,14 @@ int32_t ctgInitGetDbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), dbFName);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), dbFName);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- char *dbFName = (char*)param;
+int32_t ctgInitGetDbInfoTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ char* dbFName = (char*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_DB_INFO;
@@ -142,14 +145,14 @@ int32_t ctgInitGetDbInfoTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), dbFName);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbFName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), dbFName);
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- SName *name = (SName*)param;
+int32_t ctgInitGetTbHashTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ SName* name = (SName*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_TB_HASH;
@@ -173,13 +176,14 @@ int32_t ctgInitGetTbHashTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tableName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tableName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), name->tname);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetTbHashsTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- SName *name = (SName*)param;
+int32_t ctgInitGetTbHashsTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ SName* name = (SName*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_TB_HASH_BATCH;
@@ -197,14 +201,13 @@ int32_t ctgInitGetTbHashsTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d",
- pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbHashNum);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, dbNum:%d, tbNum:%d", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), taosArrayGetSize(ctx->pNames), pJob->tbHashNum);
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
+int32_t ctgInitGetQnodeTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
SCtgTask task = {0};
task.type = CTG_TASK_GET_QNODE;
@@ -219,7 +222,7 @@ int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetDnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
+int32_t ctgInitGetDnodeTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
SCtgTask task = {0};
task.type = CTG_TASK_GET_DNODE;
@@ -234,8 +237,8 @@ int32_t ctgInitGetDnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- char *name = (char*)param;
+int32_t ctgInitGetIndexTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ char* name = (char*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_INDEX;
@@ -253,13 +256,14 @@ int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, indexFName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, indexFName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), name);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetUdfTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- char *name = (char*)param;
+int32_t ctgInitGetUdfTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ char* name = (char*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_UDF;
@@ -277,14 +281,15 @@ int32_t ctgInitGetUdfTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, udfName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, udfName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), name);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetUserTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- SUserAuthInfo *user = (SUserAuthInfo*)param;
- SCtgTask task = {0};
+int32_t ctgInitGetUserTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ SUserAuthInfo* user = (SUserAuthInfo*)param;
+ SCtgTask task = {0};
task.type = CTG_TASK_GET_USER;
task.taskId = taskIdx;
@@ -301,12 +306,13 @@ int32_t ctgInitGetUserTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, user:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), user->user);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, user:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), user->user);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetSvrVerTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
+int32_t ctgInitGetSvrVerTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
SCtgTask task = {0};
task.type = CTG_TASK_GET_SVR_VER;
@@ -320,8 +326,8 @@ int32_t ctgInitGetSvrVerTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetTbIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- SName *name = (SName*)param;
+int32_t ctgInitGetTbIndexTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ SName* name = (SName*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_TB_INDEX;
@@ -344,13 +350,14 @@ int32_t ctgInitGetTbIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), name->tname);
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitGetTbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
- SName *name = (SName*)param;
+int32_t ctgInitGetTbCfgTask(SCtgJob* pJob, int32_t taskIdx, void* param) {
+ SName* name = (SName*)param;
SCtgTask task = {0};
task.type = CTG_TASK_GET_TB_CFG;
@@ -373,13 +380,13 @@ int32_t ctgInitGetTbCfgTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
taosArrayPush(pJob->pTasks, &task);
- qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type), name->tname);
+ qDebug("QID:0x%" PRIx64 " the %dth task type %s initialized, tbName:%s", pJob->queryId, taskIdx,
+ ctgTaskTypeStr(task.type), name->tname);
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, const SCatalogReq* pReq) {
+int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob* pJob, const SCatalogReq* pReq) {
SHashObj* pDb = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
SHashObj* pTb = taosHashInit(taskNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
if (NULL == pDb || NULL == pTb) {
@@ -427,7 +434,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con
for (int32_t i = 0; i < pJob->tbCfgNum; ++i) {
SName* name = taosArrayGet(pReq->pTableCfg, i);
- char dbFName[TSDB_DB_FNAME_LEN];
+ char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(name, dbFName);
taosHashPut(pDb, dbFName, strlen(dbFName), dbFName, TSDB_DB_FNAME_LEN);
}
@@ -455,7 +462,6 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con
taosHashCleanup(pTb);
-
for (int32_t i = 0; i < pJob->tbIndexNum; ++i) {
SName* name = taosArrayGet(pReq->pTableIndex, i);
ctgDropTbIndexEnqueue(pCtg, name, true);
@@ -464,7 +470,7 @@ int32_t ctgHandleForceUpdate(SCatalog* pCtg, int32_t taskNum, SCtgJob *pJob, con
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, int32_t *taskId) {
+int32_t ctgInitTask(SCtgJob* pJob, CTG_TASK_TYPE type, void* param, int32_t* taskId) {
int32_t tid = atomic_fetch_add_32(&pJob->taskIdx, 1);
CTG_LOCK(CTG_WRITE, &pJob->taskLock);
@@ -478,7 +484,8 @@ int32_t ctgInitTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, int32_t *tas
return TSDB_CODE_SUCCESS;
}
-int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const SCatalogReq* pReq, catalogCallback fp, void* param) {
+int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgJob** job, const SCatalogReq* pReq, catalogCallback fp,
+ void* param) {
int32_t code = 0;
int32_t tbMetaNum = (int32_t)ctgGetTablesReqNum(pReq->pTableMeta);
int32_t dbVgNum = (int32_t)taosArrayGetSize(pReq->pDbVgroup);
@@ -494,7 +501,8 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
int32_t tbIndexNum = (int32_t)taosArrayGetSize(pReq->pTableIndex);
int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg);
- int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum;
+ int32_t taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + svrVerNum + dbCfgNum + indexNum +
+ userNum + dbInfoNum + tbIndexNum + tbCfgNum;
*job = taosMemoryCalloc(1, sizeof(SCtgJob));
if (NULL == *job) {
@@ -502,13 +510,13 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgJob *pJob = *job;
+ SCtgJob* pJob = *job;
pJob->subTaskNum = taskNum;
pJob->queryId = pConn->requestId;
pJob->userFp = fp;
- pJob->pCtg = pCtg;
- pJob->conn = *pConn;
+ pJob->pCtg = pCtg;
+ pJob->conn = *pConn;
pJob->userParam = param;
pJob->tbMetaNum = tbMetaNum;
@@ -526,7 +534,8 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
pJob->svrVerNum = svrVerNum;
#if CTG_BATCH_FETCH
- pJob->pBatchs = taosHashInit(CTG_DEFAULT_BATCH_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
+ pJob->pBatchs =
+ taosHashInit(CTG_DEFAULT_BATCH_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (NULL == pJob->pBatchs) {
ctgError("taosHashInit %d batch failed", CTG_DEFAULT_BATCH_NUM);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
@@ -625,10 +634,10 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, const
taosAcquireRef(gCtgMgmt.jobPool, pJob->refId);
- qDebug("QID:0x%" PRIx64 ", jobId: 0x%" PRIx64 " initialized, task num %d, forceUpdate %d", pJob->queryId, pJob->refId, taskNum, pReq->forceUpdate);
+ qDebug("QID:0x%" PRIx64 ", jobId: 0x%" PRIx64 " initialized, task num %d, forceUpdate %d", pJob->queryId, pJob->refId,
+ taskNum, pReq->forceUpdate);
return TSDB_CODE_SUCCESS;
-
_return:
ctgFreeJob(*job);
@@ -658,7 +667,6 @@ int32_t ctgDumpTbMetasRes(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
-
int32_t ctgDumpDbVgRes(SCtgTask* pTask) {
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pDbVgroup) {
@@ -772,7 +780,6 @@ int32_t ctgDumpDnodeRes(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
-
int32_t ctgDumpDbCfgRes(SCtgTask* pTask) {
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pDbCfg) {
@@ -848,15 +855,15 @@ int32_t ctgDumpSvrVer(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgCallSubCb(SCtgTask *pTask) {
+int32_t ctgCallSubCb(SCtgTask* pTask) {
int32_t code = 0;
CTG_LOCK(CTG_WRITE, &pTask->lock);
int32_t parentNum = taosArrayGetSize(pTask->pParents);
for (int32_t i = 0; i < parentNum; ++i) {
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
- SCtgTask* pParent = taosArrayGetP(pTask->pParents, i);
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgTask* pParent = taosArrayGetP(pTask->pParents, i);
pParent->subRes.code = pTask->code;
if (TSDB_CODE_SUCCESS == pTask->code) {
@@ -866,7 +873,7 @@ int32_t ctgCallSubCb(SCtgTask *pTask) {
}
}
- SCtgMsgCtx *pParMsgCtx = CTG_GET_TASK_MSGCTX(pParent, -1);
+ SCtgMsgCtx* pParMsgCtx = CTG_GET_TASK_MSGCTX(pParent, -1);
pParMsgCtx->pBatchs = pMsgCtx->pBatchs;
CTG_ERR_JRET(pParent->subRes.fp(pParent));
@@ -895,7 +902,7 @@ int32_t ctgCallUserCb(void* param) {
int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) {
SCtgJob* pJob = pTask->pJob;
- int32_t code = 0;
+ int32_t code = 0;
if (CTG_TASK_DONE == pTask->status) {
return TSDB_CODE_SUCCESS;
@@ -910,7 +917,8 @@ int32_t ctgHandleTaskEnd(SCtgTask* pTask, int32_t rspCode) {
int32_t taskDone = atomic_add_fetch_32(&pJob->taskDone, 1);
if (taskDone < taosArrayGetSize(pJob->pTasks)) {
- qDebug("QID:0x%" PRIx64 " task done: %d, total: %d", pJob->queryId, taskDone, (int32_t)taosArrayGetSize(pJob->pTasks));
+ qDebug("QID:0x%" PRIx64 " task done: %d, total: %d", pJob->queryId, taskDone,
+ (int32_t)taosArrayGetSize(pJob->pTasks));
return TSDB_CODE_SUCCESS;
}
@@ -920,25 +928,25 @@ _return:
pJob->jobResCode = code;
- //taosSsleep(2);
- //qDebug("QID:0x%" PRIx64 " ctg after sleep", pJob->queryId);
+ // taosSsleep(2);
+ // qDebug("QID:0x%" PRIx64 " ctg after sleep", pJob->queryId);
taosAsyncExec(ctgCallUserCb, pJob, NULL);
CTG_RET(code);
}
-int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
- SCtgDBCache *dbCache = NULL;
- SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
+ SCtgDBCache* dbCache = NULL;
+ SCtgTask* pTask = tReq->pTask;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
- SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
- SName* pName = ctx->pName;
- int32_t flag = ctx->flag;
- int32_t* vgId = &ctx->vgId;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
+ SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
+ SName* pName = ctx->pName;
+ int32_t flag = ctx->flag;
+ int32_t* vgId = &ctx->vgId;
CTG_ERR_JRET(ctgProcessRspMsg(pMsgCtx->out, reqType, pMsg->pData, pMsg->len, rspCode, pMsgCtx->target));
@@ -1057,25 +1065,25 @@ int32_t ctgHandleGetTbMetaRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
}
-/*
- else if (CTG_IS_META_CTABLE(pOut->metaType)) {
- SName stbName = *pName;
- strcpy(stbName.tname, pOut->tbName);
- SCtgTbMetaCtx stbCtx = {0};
- stbCtx.flag = flag;
- stbCtx.pName = &stbName;
-
- CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
- if (NULL == pOut->tbMeta) {
- ctgDebug("stb no longer exist, stbName:%s", stbName.tname);
- CTG_ERR_JRET(ctgRelaunchGetTbMetaTask(pTask));
+ /*
+ else if (CTG_IS_META_CTABLE(pOut->metaType)) {
+ SName stbName = *pName;
+ strcpy(stbName.tname, pOut->tbName);
+ SCtgTbMetaCtx stbCtx = {0};
+ stbCtx.flag = flag;
+ stbCtx.pName = &stbName;
- return TSDB_CODE_SUCCESS;
- }
+ CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
+ if (NULL == pOut->tbMeta) {
+ ctgDebug("stb no longer exist, stbName:%s", stbName.tname);
+ CTG_ERR_JRET(ctgRelaunchGetTbMetaTask(pTask));
- memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
- }
-*/
+ return TSDB_CODE_SUCCESS;
+ }
+
+ memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
+ }
+ */
TSWAP(pTask->res, pOut->tbMeta);
@@ -1092,20 +1100,19 @@ _return:
CTG_RET(code);
}
-
-int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
- SCtgDBCache *dbCache = NULL;
- SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
+ SCtgDBCache* dbCache = NULL;
+ SCtgTask* pTask = tReq->pTask;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
- SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx;
- SCtgFetch* pFetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
- SName* pName = ctgGetFetchName(ctx->pNames, pFetch);
- int32_t flag = pFetch->flag;
- int32_t* vgId = &pFetch->vgId;
- bool taskDone = false;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
+ SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx;
+ SCtgFetch* pFetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
+ SName* pName = ctgGetFetchName(ctx->pNames, pFetch);
+ int32_t flag = pFetch->flag;
+ int32_t* vgId = &pFetch->vgId;
+ bool taskDone = false;
CTG_ERR_JRET(ctgProcessRspMsg(pMsgCtx->out, reqType, pMsg->pData, pMsg->len, rspCode, pMsgCtx->target));
@@ -1225,25 +1232,25 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
}
-/*
- else if (CTG_IS_META_CTABLE(pOut->metaType)) {
- SName stbName = *pName;
- strcpy(stbName.tname, pOut->tbName);
- SCtgTbMetaCtx stbCtx = {0};
- stbCtx.flag = flag;
- stbCtx.pName = &stbName;
-
- CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
- if (NULL == pOut->tbMeta) {
- ctgDebug("stb no longer exist, stbName:%s", stbName.tname);
- CTG_ERR_JRET(ctgRelaunchGetTbMetaTask(pTask));
+ /*
+ else if (CTG_IS_META_CTABLE(pOut->metaType)) {
+ SName stbName = *pName;
+ strcpy(stbName.tname, pOut->tbName);
+ SCtgTbMetaCtx stbCtx = {0};
+ stbCtx.flag = flag;
+ stbCtx.pName = &stbName;
- return TSDB_CODE_SUCCESS;
- }
+ CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
+ if (NULL == pOut->tbMeta) {
+ ctgDebug("stb no longer exist, stbName:%s", stbName.tname);
+ CTG_ERR_JRET(ctgRelaunchGetTbMetaTask(pTask));
- memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
- }
-*/
+ return TSDB_CODE_SUCCESS;
+ }
+
+ memcpy(pOut->tbMeta, &pOut->ctbMeta, sizeof(pOut->ctbMeta));
+ }
+ */
SMetaRes* pRes = taosArrayGet(ctx->pResList, pFetch->resIdx);
pRes->code = 0;
@@ -1277,19 +1284,18 @@ _return:
CTG_RET(code);
}
-
-int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
- SCtgTask* pTask = tReq->pTask;
+int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
+ SCtgTask* pTask = tReq->pTask;
SCtgDbVgCtx* ctx = (SCtgDbVgCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
switch (reqType) {
case TDMT_MND_USE_DB: {
SUseDbOutput* pOut = (SUseDbOutput*)pTask->msgCtx.out;
- SDBVgInfo* pDb = NULL;
+ SDBVgInfo* pDb = NULL;
CTG_ERR_JRET(ctgGenerateVgList(pCtg, pOut->dbVgroup->vgHash, (SArray**)&pTask->res));
@@ -1304,7 +1310,6 @@ int32_t ctgHandleGetDbVgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *
break;
}
-
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1312,11 +1317,11 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
- SCtgTask* pTask = tReq->pTask;
+int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
+ SCtgTask* pTask = tReq->pTask;
SCtgTbHashCtx* ctx = (SCtgTbHashCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
+ SCatalog* pCtg = pTask->pJob->pCtg;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1342,7 +1347,6 @@ int32_t ctgHandleGetTbHashRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf
break;
}
-
_return:
ctgHandleTaskEnd(pTask, code);
@@ -1350,14 +1354,14 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
- SCtgTask* pTask = tReq->pTask;
+int32_t ctgHandleGetTbHashsRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
+ SCtgTask* pTask = tReq->pTask;
SCtgTbHashsCtx* ctx = (SCtgTbHashsCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
- SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
- SCtgFetch* pFetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
- bool taskDone = false;
+ SCatalog* pCtg = pTask->pJob->pCtg;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
+ SCtgFetch* pFetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
+ bool taskDone = false;
CTG_ERR_JRET(ctgProcessRspMsg(pMsgCtx->out, reqType, pMsg->pData, pMsg->len, rspCode, pMsgCtx->target));
@@ -1388,9 +1392,9 @@ _return:
if (code) {
STablesReq* pReq = taosArrayGet(ctx->pNames, pFetch->dbIdx);
- int32_t num = taosArrayGetSize(pReq->pTables);
+ int32_t num = taosArrayGetSize(pReq->pTables);
for (int32_t i = 0; i < num; ++i) {
- SMetaRes *pRes = taosArrayGet(ctx->pResList, pFetch->resIdx + i);
+ SMetaRes* pRes = taosArrayGet(ctx->pResList, pFetch->resIdx + i);
pRes->code = code;
pRes->pRes = NULL;
}
@@ -1408,14 +1412,13 @@ _return:
CTG_RET(code);
}
-
-int32_t ctgHandleGetTbIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
+int32_t ctgHandleGetTbIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
STableIndex* pOut = (STableIndex*)pTask->msgCtx.out;
- SArray* pInfo = NULL;
+ SArray* pInfo = NULL;
CTG_ERR_JRET(ctgCloneTableIndex(pOut->pIndex, &pInfo));
pTask->res = pInfo;
@@ -1432,8 +1435,8 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetTbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
+int32_t ctgHandleGetTbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1446,8 +1449,8 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetDbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
+int32_t ctgHandleGetDbCfgRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1460,13 +1463,12 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetDbInfoRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
+int32_t ctgHandleGetDbInfoRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
CTG_RET(TSDB_CODE_APP_ERROR);
}
-
-int32_t ctgHandleGetQnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
+int32_t ctgHandleGetQnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1479,8 +1481,8 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetDnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
+int32_t ctgHandleGetDnodeRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1493,8 +1495,8 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
+int32_t ctgHandleGetIndexRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1507,8 +1509,8 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetUdfRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
+int32_t ctgHandleGetUdfRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1521,12 +1523,12 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetUserRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
- SCtgTask* pTask = tReq->pTask;
- SCtgUserCtx* ctx = (SCtgUserCtx*)pTask->taskCtx;
- SCatalog* pCtg = pTask->pJob->pCtg;
- bool pass = false;
+int32_t ctgHandleGetUserRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
+ SCtgTask* pTask = tReq->pTask;
+ SCtgUserCtx* ctx = (SCtgUserCtx*)pTask->taskCtx;
+ SCatalog* pCtg = pTask->pJob->pCtg;
+ bool pass = false;
SGetUserAuthRsp* pOut = (SGetUserAuthRsp*)pTask->msgCtx.out;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1541,9 +1543,11 @@ int32_t ctgHandleGetUserRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *
goto _return;
}
- if (ctx->user.type == AUTH_TYPE_READ && pOut->readDbs && taosHashGet(pOut->readDbs, ctx->user.dbFName, strlen(ctx->user.dbFName))) {
+ if (ctx->user.type == AUTH_TYPE_READ && pOut->readDbs &&
+ taosHashGet(pOut->readDbs, ctx->user.dbFName, strlen(ctx->user.dbFName))) {
pass = true;
- } else if (ctx->user.type == AUTH_TYPE_WRITE && pOut->writeDbs && taosHashGet(pOut->writeDbs, ctx->user.dbFName, strlen(ctx->user.dbFName))) {
+ } else if (ctx->user.type == AUTH_TYPE_WRITE && pOut->writeDbs &&
+ taosHashGet(pOut->writeDbs, ctx->user.dbFName, strlen(ctx->user.dbFName))) {
pass = true;
}
@@ -1566,8 +1570,8 @@ _return:
CTG_RET(code);
}
-int32_t ctgHandleGetSvrVerRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
- int32_t code = 0;
+int32_t ctgHandleGetSvrVerRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBuf* pMsg, int32_t rspCode) {
+ int32_t code = 0;
SCtgTask* pTask = tReq->pTask;
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@@ -1581,16 +1585,16 @@ _return:
CTG_RET(code);
}
-int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq *tReq, int32_t flag, SName* pName, int32_t* vgId) {
- SCtgTask* pTask = tReq->pTask;
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq* tReq, int32_t flag, SName* pName, int32_t* vgId) {
+ SCtgTask* pTask = tReq->pTask;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- int32_t code = 0;
+ int32_t code = 0;
if (CTG_FLAG_IS_SYS_DB(flag)) {
ctgDebug("will refresh sys db tbmeta, tbName:%s", tNameGetTableName(pName));
- CTG_RET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, (char *)pName->dbname, (char *)pName->tname, NULL, tReq));
+ CTG_RET(ctgGetTbMetaFromMnodeImpl(pCtg, pConn, (char*)pName->dbname, (char*)pName->tname, NULL, tReq));
}
if (CTG_FLAG_IS_STB(flag)) {
@@ -1600,8 +1604,8 @@ int32_t ctgAsyncRefreshTbMeta(SCtgTaskReq *tReq, int32_t flag, SName* pName, int
CTG_RET(ctgGetTbMetaFromMnode(pCtg, pConn, pName, NULL, tReq));
}
- SCtgDBCache *dbCache = NULL;
- char dbFName[TSDB_DB_FNAME_LEN] = {0};
+ SCtgDBCache* dbCache = NULL;
+ char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pName, dbFName);
CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache));
@@ -1631,11 +1635,11 @@ _return:
CTG_RET(code);
}
-int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetTbMetaTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1647,7 +1651,7 @@ int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
}
SCtgTbMetaCtx* pCtx = (SCtgTbMetaCtx*)pTask->taskCtx;
- SCtgTaskReq tReq;
+ SCtgTaskReq tReq;
tReq.pTask = pTask;
tReq.msgIdx = -1;
CTG_ERR_RET(ctgAsyncRefreshTbMeta(&tReq, pCtx->flag, pCtx->pName, &pCtx->vgId));
@@ -1655,11 +1659,11 @@ int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetTbMetasTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgTbMetasCtx* pCtx = (SCtgTbMetasCtx*)pTask->taskCtx;
- SCtgJob* pJob = pTask->pJob;
+ SCtgTbMetasCtx* pCtx = (SCtgTbMetasCtx*)pTask->taskCtx;
+ SCtgJob* pJob = pTask->pJob;
int32_t dbNum = taosArrayGetSize(pCtx->pNames);
int32_t fetchIdx = 0;
@@ -1683,9 +1687,9 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
- SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
- SName* pName = ctgGetFetchName(pCtx->pNames, pFetch);
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, i);
+ SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
+ SName* pName = ctgGetFetchName(pCtx->pNames, pFetch);
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, i);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1699,14 +1703,14 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) {
- int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetDbVgTask(SCtgTask* pTask) {
+ int32_t code = 0;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgDBCache *dbCache = NULL;
- SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgDBCache* dbCache = NULL;
+ SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1740,14 +1744,14 @@ _return:
CTG_RET(code);
}
-int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) {
- int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetTbHashTask(SCtgTask* pTask) {
+ int32_t code = 0;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgDBCache *dbCache = NULL;
- SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgDBCache* dbCache = NULL;
+ SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1785,16 +1789,16 @@ _return:
CTG_RET(code);
}
-int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetTbHashsTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgTbHashsCtx* pCtx = (SCtgTbHashsCtx*)pTask->taskCtx;
- SCtgDBCache *dbCache = NULL;
- SCtgJob* pJob = pTask->pJob;
- int32_t dbNum = taosArrayGetSize(pCtx->pNames);
- int32_t fetchIdx = 0;
- int32_t baseResIdx = 0;
- int32_t code = 0;
+ SCtgTbHashsCtx* pCtx = (SCtgTbHashsCtx*)pTask->taskCtx;
+ SCtgDBCache* dbCache = NULL;
+ SCtgJob* pJob = pTask->pJob;
+ int32_t dbNum = taosArrayGetSize(pCtx->pNames);
+ int32_t fetchIdx = 0;
+ int32_t baseResIdx = 0;
+ int32_t code = 0;
for (int32_t i = 0; i < dbNum; ++i) {
STablesReq* pReq = taosArrayGet(pCtx->pNames, i);
@@ -1805,7 +1809,8 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
SCtgTaskReq tReq;
tReq.pTask = pTask;
tReq.msgIdx = -1;
- CTG_ERR_JRET(ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
+ CTG_ERR_JRET(
+ ctgGetVgInfosFromHashValue(pCtg, &tReq, dbCache->vgCache.vgInfo, pCtx, pReq->dbFName, pReq->pTables, false));
ctgReleaseVgInfoToCache(pCtg, dbCache);
dbCache = NULL;
@@ -1831,9 +1836,9 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) {
taosArraySetSize(pTask->msgCtxs, pCtx->fetchNum);
for (int32_t i = 0; i < pCtx->fetchNum; ++i) {
- SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
+ SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i);
STablesReq* pReq = taosArrayGet(pCtx->pNames, pFetch->dbIdx);
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, i);
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, i);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1858,15 +1863,14 @@ _return:
return code;
}
-
-int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) {
- int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetTbIndexTask(SCtgTask* pTask) {
+ int32_t code = 0;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgTbIndexCtx* pCtx = (SCtgTbIndexCtx*)pTask->taskCtx;
- SArray* pRes = NULL;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgTbIndexCtx* pCtx = (SCtgTbIndexCtx*)pTask->taskCtx;
+ SArray* pRes = NULL;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1883,16 +1887,16 @@ int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) {
- int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetTbCfgTask(SCtgTask* pTask) {
+ int32_t code = 0;
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgTbCfgCtx* pCtx = (SCtgTbCfgCtx*)pTask->taskCtx;
- SArray* pRes = NULL;
- char dbFName[TSDB_DB_FNAME_LEN];
+ SCtgTbCfgCtx* pCtx = (SCtgTbCfgCtx*)pTask->taskCtx;
+ SArray* pRes = NULL;
+ char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(pCtx->pName, dbFName);
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1930,12 +1934,11 @@ _return:
CTG_RET(code);
}
-
-int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetQnodeTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1944,11 +1947,11 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetDnodeTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1957,13 +1960,12 @@ int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetDbCfgTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -1973,13 +1975,13 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) {
- int32_t code = 0;
- SCatalog* pCtg = pTask->pJob->pCtg;
- SCtgDBCache *dbCache = NULL;
+int32_t ctgLaunchGetDbInfoTask(SCtgTask* pTask) {
+ int32_t code = 0;
+ SCatalog* pCtg = pTask->pJob->pCtg;
+ SCtgDBCache* dbCache = NULL;
SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -2013,12 +2015,12 @@ _return:
CTG_RET(code);
}
-int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetIndexTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -2028,12 +2030,12 @@ int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetUdfTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -2043,14 +2045,14 @@ int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetUserTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx;
- bool inCache = false;
- bool pass = false;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx;
+ bool inCache = false;
+ bool pass = false;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -2072,11 +2074,11 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchGetSvrVerTask(SCtgTask *pTask) {
- SCatalog* pCtg = pTask->pJob->pCtg;
+int32_t ctgLaunchGetSvrVerTask(SCtgTask* pTask) {
+ SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
- SCtgJob* pJob = pTask->pJob;
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgJob* pJob = pTask->pJob;
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
if (NULL == pMsgCtx->pBatchs) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
@@ -2086,7 +2088,7 @@ int32_t ctgLaunchGetSvrVerTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask) {
+int32_t ctgRelaunchGetTbMetaTask(SCtgTask* pTask) {
ctgResetTbMetaTask(pTask);
CTG_ERR_RET(ctgLaunchGetTbMetaTask(pTask));
@@ -2094,7 +2096,7 @@ int32_t ctgRelaunchGetTbMetaTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgGetTbCfgCb(SCtgTask *pTask) {
+int32_t ctgGetTbCfgCb(SCtgTask* pTask) {
int32_t code = 0;
CTG_ERR_JRET(pTask->subRes.code);
@@ -2124,7 +2126,6 @@ int32_t ctgCompDbVgTasks(SCtgTask* pTask, void* param, bool* equal) {
return TSDB_CODE_SUCCESS;
}
-
int32_t ctgCompTbMetaTasks(SCtgTask* pTask, void* param, bool* equal) {
SCtgTbMetaCtx* ctx = pTask->taskCtx;
@@ -2145,38 +2146,38 @@ int32_t ctgCloneDbVg(SCtgTask* pTask, void** pRes) {
CTG_RET(cloneDbVgInfo(pOut->dbVgroup, (SDBVgInfo**)pRes));
}
-
SCtgAsyncFps gCtgAsyncFps[] = {
- {ctgInitGetQnodeTask, ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes, NULL, NULL},
- {ctgInitGetDnodeTask, ctgLaunchGetDnodeTask, ctgHandleGetDnodeRsp, ctgDumpDnodeRes, NULL, NULL},
- {ctgInitGetDbVgTask, ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes, ctgCompDbVgTasks, ctgCloneDbVg},
- {ctgInitGetDbCfgTask, ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes, NULL, NULL},
- {ctgInitGetDbInfoTask, ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes, NULL, NULL},
- {ctgInitGetTbMetaTask, ctgLaunchGetTbMetaTask, ctgHandleGetTbMetaRsp, ctgDumpTbMetaRes, ctgCompTbMetaTasks, ctgCloneTbMeta},
- {ctgInitGetTbHashTask, ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes, NULL, NULL},
- {ctgInitGetTbIndexTask, ctgLaunchGetTbIndexTask, ctgHandleGetTbIndexRsp, ctgDumpTbIndexRes, NULL, NULL},
- {ctgInitGetTbCfgTask, ctgLaunchGetTbCfgTask, ctgHandleGetTbCfgRsp, ctgDumpTbCfgRes, NULL, NULL},
- {ctgInitGetIndexTask, ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes, NULL, NULL},
- {ctgInitGetUdfTask, ctgLaunchGetUdfTask, ctgHandleGetUdfRsp, ctgDumpUdfRes, NULL, NULL},
- {ctgInitGetUserTask, ctgLaunchGetUserTask, ctgHandleGetUserRsp, ctgDumpUserRes, NULL, NULL},
- {ctgInitGetSvrVerTask, ctgLaunchGetSvrVerTask, ctgHandleGetSvrVerRsp, ctgDumpSvrVer, NULL, NULL},
- {ctgInitGetTbMetasTask, ctgLaunchGetTbMetasTask, ctgHandleGetTbMetasRsp, ctgDumpTbMetasRes, NULL, NULL},
- {ctgInitGetTbHashsTask, ctgLaunchGetTbHashsTask, ctgHandleGetTbHashsRsp, ctgDumpTbHashsRes, NULL, NULL},
+ {ctgInitGetQnodeTask, ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes, NULL, NULL},
+ {ctgInitGetDnodeTask, ctgLaunchGetDnodeTask, ctgHandleGetDnodeRsp, ctgDumpDnodeRes, NULL, NULL},
+ {ctgInitGetDbVgTask, ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes, ctgCompDbVgTasks, ctgCloneDbVg},
+ {ctgInitGetDbCfgTask, ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes, NULL, NULL},
+ {ctgInitGetDbInfoTask, ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes, NULL, NULL},
+ {ctgInitGetTbMetaTask, ctgLaunchGetTbMetaTask, ctgHandleGetTbMetaRsp, ctgDumpTbMetaRes, ctgCompTbMetaTasks,
+ ctgCloneTbMeta},
+ {ctgInitGetTbHashTask, ctgLaunchGetTbHashTask, ctgHandleGetTbHashRsp, ctgDumpTbHashRes, NULL, NULL},
+ {ctgInitGetTbIndexTask, ctgLaunchGetTbIndexTask, ctgHandleGetTbIndexRsp, ctgDumpTbIndexRes, NULL, NULL},
+ {ctgInitGetTbCfgTask, ctgLaunchGetTbCfgTask, ctgHandleGetTbCfgRsp, ctgDumpTbCfgRes, NULL, NULL},
+ {ctgInitGetIndexTask, ctgLaunchGetIndexTask, ctgHandleGetIndexRsp, ctgDumpIndexRes, NULL, NULL},
+ {ctgInitGetUdfTask, ctgLaunchGetUdfTask, ctgHandleGetUdfRsp, ctgDumpUdfRes, NULL, NULL},
+ {ctgInitGetUserTask, ctgLaunchGetUserTask, ctgHandleGetUserRsp, ctgDumpUserRes, NULL, NULL},
+ {ctgInitGetSvrVerTask, ctgLaunchGetSvrVerTask, ctgHandleGetSvrVerRsp, ctgDumpSvrVer, NULL, NULL},
+ {ctgInitGetTbMetasTask, ctgLaunchGetTbMetasTask, ctgHandleGetTbMetasRsp, ctgDumpTbMetasRes, NULL, NULL},
+ {ctgInitGetTbHashsTask, ctgLaunchGetTbHashsTask, ctgHandleGetTbHashsRsp, ctgDumpTbHashsRes, NULL, NULL},
};
-int32_t ctgMakeAsyncRes(SCtgJob *pJob) {
+int32_t ctgMakeAsyncRes(SCtgJob* pJob) {
int32_t code = 0;
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
for (int32_t i = 0; i < taskNum; ++i) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
+ SCtgTask* pTask = taosArrayGet(pJob->pTasks, i);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].dumpResFp)(pTask));
}
return TSDB_CODE_SUCCESS;
}
-int32_t ctgSearchExistingTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, int32_t* taskId) {
+int32_t ctgSearchExistingTask(SCtgJob* pJob, CTG_TASK_TYPE type, void* param, int32_t* taskId) {
bool equal = false;
SCtgTask* pTask = NULL;
int32_t code = 0;
@@ -2186,7 +2187,7 @@ int32_t ctgSearchExistingTask(SCtgJob *pJob, CTG_TASK_TYPE type, void* param, in
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
for (int32_t i = 0; i < taskNum; ++i) {
pTask = taosArrayGet(pJob->pTasks, i);
- if (type != pTask->type) {
+ if (type != pTask->type) {
continue;
}
@@ -2206,15 +2207,15 @@ _return:
CTG_RET(code);
}
-int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) {
+int32_t ctgSetSubTaskCb(SCtgTask* pSub, SCtgTask* pTask) {
int32_t code = 0;
CTG_LOCK(CTG_WRITE, &pSub->lock);
if (CTG_TASK_DONE == pSub->status) {
pTask->subRes.code = pSub->code;
CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].cloneFp)(pSub, &pTask->subRes.res));
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
- SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1);
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgMsgCtx* pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1);
pMsgCtx->pBatchs = pSubMsgCtx->pBatchs;
CTG_ERR_JRET(pTask->subRes.fp(pTask));
@@ -2233,8 +2234,7 @@ _return:
CTG_RET(code);
}
-
-int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp, void* param) {
+int32_t ctgLaunchSubTask(SCtgTask* pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp, void* param) {
SCtgJob* pJob = pTask->pJob;
int32_t subTaskId = -1;
bool newTask = false;
@@ -2254,8 +2254,8 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask));
if (newTask) {
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
- SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1);
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgMsgCtx* pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1);
pSubMsgCtx->pBatchs = pMsgCtx->pBatchs;
CTG_ERR_RET((*gCtgAsyncFps[pSub->type].launchFp)(pSub));
@@ -2265,11 +2265,11 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp,
return TSDB_CODE_SUCCESS;
}
-int32_t ctgLaunchJob(SCtgJob *pJob) {
+int32_t ctgLaunchJob(SCtgJob* pJob) {
int32_t taskNum = taosArrayGetSize(pJob->pTasks);
for (int32_t i = 0; i < taskNum; ++i) {
- SCtgTask *pTask = taosArrayGet(pJob->pTasks, i);
+ SCtgTask* pTask = taosArrayGet(pJob->pTasks, i);
qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId);
CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask));
@@ -2289,6 +2289,3 @@ int32_t ctgLaunchJob(SCtgJob *pJob) {
return TSDB_CODE_SUCCESS;
}
-
-
-
diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c
index 6935489ff4000aa37aed912769120713102ed4c1..3a774afdcce2fc21334686a39a65fcb83606bab2 100644
--- a/source/libs/catalog/src/ctgCache.c
+++ b/source/libs/catalog/src/ctgCache.c
@@ -13,96 +13,46 @@
* along with this program. If not, see .
*/
-#include "trpc.h"
-#include "query.h"
-#include "tname.h"
#include "catalogInt.h"
+#include "query.h"
#include "systable.h"
+#include "tname.h"
+#include "trpc.h"
-SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = {
- {
- CTG_OP_UPDATE_VGROUP,
- "update vgInfo",
- ctgOpUpdateVgroup
- },
- {
- CTG_OP_UPDATE_TB_META,
- "update tbMeta",
- ctgOpUpdateTbMeta
- },
- {
- CTG_OP_DROP_DB_CACHE,
- "drop DB",
- ctgOpDropDbCache
- },
- {
- CTG_OP_DROP_DB_VGROUP,
- "drop DBVgroup",
- ctgOpDropDbVgroup
- },
- {
- CTG_OP_DROP_STB_META,
- "drop stbMeta",
- ctgOpDropStbMeta
- },
- {
- CTG_OP_DROP_TB_META,
- "drop tbMeta",
- ctgOpDropTbMeta
- },
- {
- CTG_OP_UPDATE_USER,
- "update user",
- ctgOpUpdateUser
- },
- {
- CTG_OP_UPDATE_VG_EPSET,
- "update epset",
- ctgOpUpdateEpset
- },
- {
- CTG_OP_UPDATE_TB_INDEX,
- "update tbIndex",
- ctgOpUpdateTbIndex
- },
- {
- CTG_OP_DROP_TB_INDEX,
- "drop tbIndex",
- ctgOpDropTbIndex
- },
- {
- CTG_OP_CLEAR_CACHE,
- "clear cache",
- ctgOpClearCache
- }
-};
-
-
-
+SCtgOperation gCtgCacheOperation[CTG_OP_MAX] = {{CTG_OP_UPDATE_VGROUP, "update vgInfo", ctgOpUpdateVgroup},
+ {CTG_OP_UPDATE_TB_META, "update tbMeta", ctgOpUpdateTbMeta},
+ {CTG_OP_DROP_DB_CACHE, "drop DB", ctgOpDropDbCache},
+ {CTG_OP_DROP_DB_VGROUP, "drop DBVgroup", ctgOpDropDbVgroup},
+ {CTG_OP_DROP_STB_META, "drop stbMeta", ctgOpDropStbMeta},
+ {CTG_OP_DROP_TB_META, "drop tbMeta", ctgOpDropTbMeta},
+ {CTG_OP_UPDATE_USER, "update user", ctgOpUpdateUser},
+ {CTG_OP_UPDATE_VG_EPSET, "update epset", ctgOpUpdateEpset},
+ {CTG_OP_UPDATE_TB_INDEX, "update tbIndex", ctgOpUpdateTbIndex},
+ {CTG_OP_DROP_TB_INDEX, "drop tbIndex", ctgOpDropTbIndex},
+ {CTG_OP_CLEAR_CACHE, "clear cache", ctgOpClearCache}};
int32_t ctgRLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache, bool *inCache) {
CTG_LOCK(CTG_READ, &dbCache->vgCache.vgLock);
-
+
if (dbCache->deleted) {
CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock);
- ctgDebug("db is dropping, dbId:0x%"PRIx64, dbCache->dbId);
-
+ ctgDebug("db is dropping, dbId:0x%" PRIx64, dbCache->dbId);
+
*inCache = false;
return TSDB_CODE_SUCCESS;
}
-
if (NULL == dbCache->vgCache.vgInfo) {
CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock);
*inCache = false;
- ctgDebug("db vgInfo is empty, dbId:0x%"PRIx64, dbCache->dbId);
+ ctgDebug("db vgInfo is empty, dbId:0x%" PRIx64, dbCache->dbId);
return TSDB_CODE_SUCCESS;
}
*inCache = true;
-
+
return TSDB_CODE_SUCCESS;
}
@@ -110,7 +60,7 @@ int32_t ctgWLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache) {
CTG_LOCK(CTG_WRITE, &dbCache->vgCache.vgLock);
if (dbCache->deleted) {
- ctgDebug("db is dropping, dbId:0x%"PRIx64, dbCache->dbId);
+ ctgDebug("db is dropping, dbId:0x%" PRIx64, dbCache->dbId);
CTG_UNLOCK(CTG_WRITE, &dbCache->vgCache.vgLock);
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
}
@@ -118,19 +68,13 @@ int32_t ctgWLockVgInfo(SCatalog *pCtg, SCtgDBCache *dbCache) {
return TSDB_CODE_SUCCESS;
}
-void ctgRUnlockVgInfo(SCtgDBCache *dbCache) {
- CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock);
-}
+void ctgRUnlockVgInfo(SCtgDBCache *dbCache) { CTG_UNLOCK(CTG_READ, &dbCache->vgCache.vgLock); }
-void ctgWUnlockVgInfo(SCtgDBCache *dbCache) {
- CTG_UNLOCK(CTG_WRITE, &dbCache->vgCache.vgLock);
-}
+void ctgWUnlockVgInfo(SCtgDBCache *dbCache) { CTG_UNLOCK(CTG_WRITE, &dbCache->vgCache.vgLock); }
-void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache) {
- CTG_UNLOCK(CTG_READ, &dbCache->dbLock);
-}
+void ctgReleaseDBCache(SCatalog *pCtg, SCtgDBCache *dbCache) { CTG_UNLOCK(CTG_READ, &dbCache->dbLock); }
-int32_t ctgAcquireDBCacheImpl(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache, bool acquire) {
+int32_t ctgAcquireDBCacheImpl(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache, bool acquire) {
char *p = strchr(dbFName, '.');
if (p && IS_SYS_DBNAME(p + 1)) {
dbFName = p + 1;
@@ -150,35 +94,35 @@ int32_t ctgAcquireDBCacheImpl(SCatalog* pCtg, const char *dbFName, SCtgDBCache *
if (dbCache->deleted) {
if (acquire) {
ctgReleaseDBCache(pCtg, dbCache);
- }
-
+ }
+
*pCache = NULL;
ctgDebug("db is removing from cache, dbFName:%s", dbFName);
return TSDB_CODE_SUCCESS;
}
*pCache = dbCache;
-
+
return TSDB_CODE_SUCCESS;
}
-int32_t ctgAcquireDBCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache) {
+int32_t ctgAcquireDBCache(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache) {
CTG_RET(ctgAcquireDBCacheImpl(pCtg, dbFName, pCache, true));
}
-int32_t ctgGetDBCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache) {
+int32_t ctgGetDBCache(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache) {
CTG_RET(ctgAcquireDBCacheImpl(pCtg, dbFName, pCache, false));
}
-void ctgReleaseVgInfoToCache(SCatalog* pCtg, SCtgDBCache *dbCache) {
+void ctgReleaseVgInfoToCache(SCatalog *pCtg, SCtgDBCache *dbCache) {
ctgRUnlockVgInfo(dbCache);
ctgReleaseDBCache(pCtg, dbCache);
}
-void ctgReleaseTbMetaToCache(SCatalog* pCtg, SCtgDBCache *dbCache, SCtgTbCache* pCache) {
+void ctgReleaseTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache) {
if (pCache) {
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
- taosHashRelease(dbCache->tbCache, pCache);
+ taosHashRelease(dbCache->tbCache, pCache);
}
if (dbCache) {
@@ -186,10 +130,10 @@ void ctgReleaseTbMetaToCache(SCatalog* pCtg, SCtgDBCache *dbCache, SCtgTbCache*
}
}
-void ctgReleaseTbIndexToCache(SCatalog* pCtg, SCtgDBCache *dbCache, SCtgTbCache* pCache) {
+void ctgReleaseTbIndexToCache(SCatalog *pCtg, SCtgDBCache *dbCache, SCtgTbCache *pCache) {
if (pCache) {
CTG_UNLOCK(CTG_READ, &pCache->indexLock);
- taosHashRelease(dbCache->tbCache, pCache);
+ taosHashRelease(dbCache->tbCache, pCache);
}
if (dbCache) {
@@ -197,10 +141,10 @@ void ctgReleaseTbIndexToCache(SCatalog* pCtg, SCtgDBCache *dbCache, SCtgTbCache*
}
}
-int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache) {
+int32_t ctgAcquireVgInfoFromCache(SCatalog *pCtg, const char *dbFName, SCtgDBCache **pCache) {
SCtgDBCache *dbCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
- if (NULL == dbCache) {
+ if (NULL == dbCache) {
ctgDebug("db %s not in cache", dbFName);
goto _return;
}
@@ -217,7 +161,7 @@ int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCac
CTG_CACHE_STAT_INC(numOfVgHit, 1);
ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName);
-
+
return TSDB_CODE_SUCCESS;
_return:
@@ -229,19 +173,19 @@ _return:
*pCache = NULL;
CTG_CACHE_STAT_INC(numOfVgMiss, 1);
-
+
return TSDB_CODE_SUCCESS;
}
-int32_t ctgAcquireTbMetaFromCache(SCatalog* pCtg, char *dbFName, char* tbName, SCtgDBCache **pDb, SCtgTbCache** pTb) {
+int32_t ctgAcquireTbMetaFromCache(SCatalog *pCtg, char *dbFName, char *tbName, SCtgDBCache **pDb, SCtgTbCache **pTb) {
SCtgDBCache *dbCache = NULL;
- SCtgTbCache* pCache = NULL;
+ SCtgTbCache *pCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
if (NULL == dbCache) {
ctgDebug("db %s not in cache", dbFName);
goto _return;
}
-
+
pCache = taosHashAcquire(dbCache->tbCache, tbName, strlen(tbName));
if (NULL == pCache) {
ctgDebug("tb %s not in cache, dbFName:%s", tbName, dbFName);
@@ -258,7 +202,7 @@ int32_t ctgAcquireTbMetaFromCache(SCatalog* pCtg, char *dbFName, char* tbName, S
*pTb = pCache;
ctgDebug("tb %s meta got in cache, dbFName:%s", tbName, dbFName);
-
+
CTG_CACHE_STAT_INC(numOfMetaHit, 1);
return TSDB_CODE_SUCCESS;
@@ -268,20 +212,20 @@ _return:
ctgReleaseTbMetaToCache(pCtg, dbCache, pCache);
CTG_CACHE_STAT_INC(numOfMetaMiss, 1);
-
+
return TSDB_CODE_SUCCESS;
}
-int32_t ctgAcquireStbMetaFromCache(SCatalog* pCtg, char *dbFName, uint64_t suid, SCtgDBCache **pDb, SCtgTbCache** pTb) {
- SCtgDBCache* dbCache = NULL;
- SCtgTbCache* pCache = NULL;
+int32_t ctgAcquireStbMetaFromCache(SCatalog *pCtg, char *dbFName, uint64_t suid, SCtgDBCache **pDb, SCtgTbCache **pTb) {
+ SCtgDBCache *dbCache = NULL;
+ SCtgTbCache *pCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
if (NULL == dbCache) {
ctgDebug("db %s not in cache", dbFName);
goto _return;
}
-
- char* stName = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
+
+ char *stName = taosHashAcquire(dbCache->stbCache, &suid, sizeof(suid));
if (NULL == stName) {
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", suid, dbFName);
goto _return;
@@ -304,7 +248,7 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog* pCtg, char *dbFName, uint64_t suid,
*pTb = pCache;
ctgDebug("stb 0x%" PRIx64 " meta got in cache, dbFName:%s", suid, dbFName);
-
+
CTG_CACHE_STAT_INC(numOfMetaHit, 1);
return TSDB_CODE_SUCCESS;
@@ -317,20 +261,19 @@ _return:
*pDb = NULL;
*pTb = NULL;
-
+
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgAcquireTbIndexFromCache(SCatalog* pCtg, char *dbFName, char* tbName, SCtgDBCache **pDb, SCtgTbCache** pTb) {
+int32_t ctgAcquireTbIndexFromCache(SCatalog *pCtg, char *dbFName, char *tbName, SCtgDBCache **pDb, SCtgTbCache **pTb) {
SCtgDBCache *dbCache = NULL;
- SCtgTbCache* pCache = NULL;
+ SCtgTbCache *pCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
if (NULL == dbCache) {
ctgDebug("db %s not in cache", dbFName);
goto _return;
}
-
+
int32_t sz = 0;
pCache = taosHashAcquire(dbCache->tbCache, tbName, strlen(tbName));
if (NULL == pCache) {
@@ -348,7 +291,7 @@ int32_t ctgAcquireTbIndexFromCache(SCatalog* pCtg, char *dbFName, char* tbName,
*pTb = pCache;
ctgDebug("tb %s index got in cache, dbFName:%s", tbName, dbFName);
-
+
CTG_CACHE_STAT_INC(numOfIndexHit, 1);
return TSDB_CODE_SUCCESS;
@@ -358,32 +301,31 @@ _return:
ctgReleaseTbIndexToCache(pCtg, dbCache, pCache);
CTG_CACHE_STAT_INC(numOfIndexMiss, 1);
-
+
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgTbMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName, int32_t *exist) {
+int32_t ctgTbMetaExistInCache(SCatalog *pCtg, char *dbFName, char *tbName, int32_t *exist) {
SCtgDBCache *dbCache = NULL;
SCtgTbCache *tbCache = NULL;
ctgAcquireTbMetaFromCache(pCtg, dbFName, tbName, &dbCache, &tbCache);
if (NULL == tbCache) {
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
-
+
*exist = 0;
return TSDB_CODE_SUCCESS;
}
*exist = 1;
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
-
+
return TSDB_CODE_SUCCESS;
}
-int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta) {
- int32_t code = 0;
+int32_t ctgReadTbMetaFromCache(SCatalog *pCtg, SCtgTbMetaCtx *ctx, STableMeta **pTableMeta) {
+ int32_t code = 0;
SCtgDBCache *dbCache = NULL;
- SCtgTbCache *tbCache = NULL;
+ SCtgTbCache *tbCache = NULL;
*pTableMeta = NULL;
char dbFName[TSDB_DB_FNAME_LEN] = {0};
@@ -399,12 +341,12 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
return TSDB_CODE_SUCCESS;
}
- STableMeta* tbMeta = tbCache->pMeta;
+ STableMeta *tbMeta = tbCache->pMeta;
ctx->tbInfo.inCache = true;
ctx->tbInfo.dbId = dbCache->dbId;
ctx->tbInfo.suid = tbMeta->suid;
ctx->tbInfo.tbType = tbMeta->tableType;
-
+
if (tbMeta->tableType != TSDB_CHILD_TABLE) {
int32_t metaSize = CTG_META_SIZE(tbMeta);
*pTableMeta = taosMemoryCalloc(1, metaSize);
@@ -414,14 +356,14 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
}
memcpy(*pTableMeta, tbMeta, metaSize);
-
+
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", ctx->pName->tname, tbMeta->tableType, dbFName);
return TSDB_CODE_SUCCESS;
}
// PROCESS FOR CHILD TABLE
-
+
int32_t metaSize = sizeof(SCTableMeta);
*pTableMeta = taosMemoryCalloc(1, metaSize);
if (NULL == *pTableMeta) {
@@ -429,10 +371,10 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
}
memcpy(*pTableMeta, tbMeta, metaSize);
-
+
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
- ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s",
- ctx->pName->tname, ctx->tbInfo.tbType, dbFName);
+ ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", ctx->pName->tname,
+ ctx->tbInfo.tbType, dbFName);
ctgAcquireStbMetaFromCache(pCtg, dbFName, ctx->tbInfo.suid, &dbCache, &tbCache);
if (NULL == tbCache) {
@@ -441,17 +383,17 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
ctgDebug("stb 0x%" PRIx64 " meta not in cache", ctx->tbInfo.suid);
return TSDB_CODE_SUCCESS;
}
-
- STableMeta* stbMeta = tbCache->pMeta;
- if (stbMeta->suid != ctx->tbInfo.suid) {
+
+ STableMeta *stbMeta = tbCache->pMeta;
+ if (stbMeta->suid != ctx->tbInfo.suid) {
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
- ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%"PRIx64 , stbMeta->suid, ctx->tbInfo.suid);
+ ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%" PRIx64, stbMeta->suid, ctx->tbInfo.suid);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
metaSize = CTG_META_SIZE(stbMeta);
*pTableMeta = taosMemoryRealloc(*pTableMeta, metaSize);
- if (NULL == *pTableMeta) {
+ if (NULL == *pTableMeta) {
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
@@ -461,24 +403,24 @@ int32_t ctgReadTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta**
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
ctgDebug("Got tb %s meta from cache, dbFName:%s", ctx->pName->tname, dbFName);
-
+
return TSDB_CODE_SUCCESS;
_return:
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
taosMemoryFreeClear(*pTableMeta);
-
+
CTG_RET(code);
}
-int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType, uint64_t *suid,
- char *stbName) {
+int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver, int32_t *tver, int32_t *tbType,
+ uint64_t *suid, char *stbName) {
*sver = -1;
*tver = -1;
SCtgDBCache *dbCache = NULL;
- SCtgTbCache *tbCache = NULL;
+ SCtgTbCache *tbCache = NULL;
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pTableName, dbFName);
@@ -488,7 +430,7 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver,
return TSDB_CODE_SUCCESS;
}
- STableMeta* tbMeta = tbCache->pMeta;
+ STableMeta *tbMeta = tbCache->pMeta;
*tbType = tbMeta->tableType;
*suid = tbMeta->suid;
@@ -496,29 +438,29 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver,
*sver = tbMeta->sversion;
*tver = tbMeta->tversion;
- ctgDebug("Got tb %s ver from cache, dbFName:%s, tbType:%d, sver:%d, tver:%d, suid:0x%" PRIx64,
- pTableName->tname, dbFName, *tbType, *sver, *tver, *suid);
+ ctgDebug("Got tb %s ver from cache, dbFName:%s, tbType:%d, sver:%d, tver:%d, suid:0x%" PRIx64, pTableName->tname,
+ dbFName, *tbType, *sver, *tver, *suid);
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
return TSDB_CODE_SUCCESS;
}
// PROCESS FOR CHILD TABLE
-
+
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
ctgDebug("Got ctb %s ver from cache, will continue to get its stb ver, dbFName:%s", pTableName->tname, dbFName);
-
+
ctgAcquireStbMetaFromCache(pCtg, dbFName, *suid, &dbCache, &tbCache);
if (NULL == tbCache) {
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
ctgDebug("stb 0x%" PRIx64 " meta not in cache", *suid);
return TSDB_CODE_SUCCESS;
}
-
- STableMeta* stbMeta = tbCache->pMeta;
+
+ STableMeta *stbMeta = tbCache->pMeta;
if (stbMeta->suid != *suid) {
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
- ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid:0x%" PRIx64 , stbMeta->suid, *suid);
+ ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid:0x%" PRIx64, stbMeta->suid, *suid);
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
@@ -533,15 +475,15 @@ int32_t ctgReadTbVerFromCache(SCatalog *pCtg, SName *pTableName, int32_t *sver,
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
- ctgDebug("Got tb %s sver %d tver %d from cache, type:%d, dbFName:%s", pTableName->tname, *sver, *tver, *tbType, dbFName);
+ ctgDebug("Got tb %s sver %d tver %d from cache, type:%d, dbFName:%s", pTableName->tname, *sver, *tver, *tbType,
+ dbFName);
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgReadTbTypeFromCache(SCatalog* pCtg, char* dbFName, char *tbName, int32_t *tbType) {
+int32_t ctgReadTbTypeFromCache(SCatalog *pCtg, char *dbFName, char *tbName, int32_t *tbType) {
SCtgDBCache *dbCache = NULL;
- SCtgTbCache *tbCache = NULL;
+ SCtgTbCache *tbCache = NULL;
CTG_ERR_RET(ctgAcquireTbMetaFromCache(pCtg, dbFName, tbName, &dbCache, &tbCache));
if (NULL == tbCache) {
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
@@ -551,15 +493,15 @@ int32_t ctgReadTbTypeFromCache(SCatalog* pCtg, char* dbFName, char *tbName, int3
*tbType = tbCache->pMeta->tableType;
ctgReleaseTbMetaToCache(pCtg, dbCache, tbCache);
- ctgDebug("Got tb %s tbType %d from cache, dbFName:%s", tbName, *tbType, dbFName);
-
+ ctgDebug("Got tb %s tbType %d from cache, dbFName:%s", tbName, *tbType, dbFName);
+
return TSDB_CODE_SUCCESS;
}
-int32_t ctgReadTbIndexFromCache(SCatalog* pCtg, SName* pTableName, SArray** pRes) {
- int32_t code = 0;
+int32_t ctgReadTbIndexFromCache(SCatalog *pCtg, SName *pTableName, SArray **pRes) {
+ int32_t code = 0;
SCtgDBCache *dbCache = NULL;
- SCtgTbCache *tbCache = NULL;
+ SCtgTbCache *tbCache = NULL;
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pTableName, dbFName);
@@ -580,14 +522,14 @@ _return:
CTG_RET(code);
}
-int32_t ctgChkAuthFromCache(SCatalog* pCtg, char* user, char* dbFName, AUTH_TYPE type, bool *inCache, bool *pass) {
+int32_t ctgChkAuthFromCache(SCatalog *pCtg, char *user, char *dbFName, AUTH_TYPE type, bool *inCache, bool *pass) {
char *p = strchr(dbFName, '.');
if (p) {
++p;
} else {
p = dbFName;
}
-
+
if (IS_SYS_DBNAME(p)) {
*inCache = true;
*pass = true;
@@ -605,7 +547,7 @@ int32_t ctgChkAuthFromCache(SCatalog* pCtg, char* user, char* dbFName, AUTH_TYPE
ctgDebug("Got user from cache, user:%s", user);
CTG_CACHE_STAT_INC(numOfUserHit, 1);
-
+
if (pUser->superUser) {
*pass = true;
return TSDB_CODE_SUCCESS;
@@ -617,54 +559,53 @@ int32_t ctgChkAuthFromCache(SCatalog* pCtg, char* user, char* dbFName, AUTH_TYPE
CTG_UNLOCK(CTG_READ, &pUser->lock);
return TSDB_CODE_SUCCESS;
}
-
+
if (pUser->readDbs && taosHashGet(pUser->readDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_READ) {
*pass = true;
}
-
+
if (pUser->writeDbs && taosHashGet(pUser->writeDbs, dbFName, strlen(dbFName)) && type == AUTH_TYPE_WRITE) {
*pass = true;
}
CTG_UNLOCK(CTG_READ, &pUser->lock);
-
+
return TSDB_CODE_SUCCESS;
_return:
*inCache = false;
CTG_CACHE_STAT_INC(numOfUserMiss, 1);
-
+
return TSDB_CODE_SUCCESS;
}
void ctgDequeue(SCtgCacheOperation **op) {
SCtgQNode *orig = gCtgMgmt.queue.head;
-
+
SCtgQNode *node = gCtgMgmt.queue.head->next;
gCtgMgmt.queue.head = gCtgMgmt.queue.head->next;
CTG_QUEUE_DEC();
-
+
taosMemoryFreeClear(orig);
*op = node->op;
}
-
-int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) {
+int32_t ctgEnqueue(SCatalog *pCtg, SCtgCacheOperation *operation) {
SCtgQNode *node = taosMemoryCalloc(1, sizeof(SCtgQNode));
if (NULL == node) {
qError("calloc %d failed", (int32_t)sizeof(SCtgQNode));
CTG_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- bool syncOp = operation->syncOp;
- char* opName = gCtgCacheOperation[operation->opId].name;
+ bool syncOp = operation->syncOp;
+ char *opName = gCtgCacheOperation[operation->opId].name;
if (operation->syncOp) {
tsem_init(&operation->rspSem, 0, 0);
}
-
+
node->op = operation;
CTG_LOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
@@ -699,12 +640,11 @@ int32_t ctgEnqueue(SCatalog* pCtg, SCtgCacheOperation *operation) {
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgDropDbCacheEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId) {
- int32_t code = 0;
+int32_t ctgDropDbCacheEnqueue(SCatalog *pCtg, const char *dbFName, int64_t dbId) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_DROP_DB_CACHE;
-
+
SCtgDropDBMsg *msg = taosMemoryMalloc(sizeof(SCtgDropDBMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgDropDBMsg));
@@ -732,12 +672,12 @@ _return:
CTG_RET(code);
}
-int32_t ctgDropDbVgroupEnqueue(SCatalog* pCtg, const char *dbFName, bool syncOp) {
- int32_t code = 0;
+int32_t ctgDropDbVgroupEnqueue(SCatalog *pCtg, const char *dbFName, bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_DROP_DB_VGROUP;
op->syncOp = syncOp;
-
+
SCtgDropDbVgroupMsg *msg = taosMemoryMalloc(sizeof(SCtgDropDbVgroupMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgDropDbVgroupMsg));
@@ -764,14 +704,13 @@ _return:
CTG_RET(code);
}
-
-
-int32_t ctgDropStbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid, bool syncOp) {
- int32_t code = 0;
+int32_t ctgDropStbMetaEnqueue(SCatalog *pCtg, const char *dbFName, int64_t dbId, const char *stbName, uint64_t suid,
+ bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_DROP_STB_META;
op->syncOp = syncOp;
-
+
SCtgDropStbMetaMsg *msg = taosMemoryMalloc(sizeof(SCtgDropStbMetaMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgDropStbMetaMsg));
@@ -796,14 +735,12 @@ _return:
CTG_RET(code);
}
-
-
-int32_t ctgDropTbMetaEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncOp) {
- int32_t code = 0;
+int32_t ctgDropTbMetaEnqueue(SCatalog *pCtg, const char *dbFName, int64_t dbId, const char *tbName, bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_DROP_TB_META;
op->syncOp = syncOp;
-
+
SCtgDropTblMetaMsg *msg = taosMemoryMalloc(sizeof(SCtgDropTblMetaMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgDropTblMetaMsg));
@@ -827,12 +764,12 @@ _return:
CTG_RET(code);
}
-int32_t ctgUpdateVgroupEnqueue(SCatalog* pCtg, const char *dbFName, int64_t dbId, SDBVgInfo* dbInfo, bool syncOp) {
- int32_t code = 0;
+int32_t ctgUpdateVgroupEnqueue(SCatalog *pCtg, const char *dbFName, int64_t dbId, SDBVgInfo *dbInfo, bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_UPDATE_VGROUP;
op->syncOp = syncOp;
-
+
SCtgUpdateVgMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateVgMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateVgMsg));
@@ -864,12 +801,12 @@ _return:
CTG_RET(code);
}
-int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool syncOp) {
- int32_t code = 0;
+int32_t ctgUpdateTbMetaEnqueue(SCatalog *pCtg, STableMetaOutput *output, bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_UPDATE_TB_META;
op->syncOp = syncOp;
-
+
SCtgUpdateTbMetaMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTbMetaMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTbMetaMsg));
@@ -889,7 +826,7 @@ int32_t ctgUpdateTbMetaEnqueue(SCatalog* pCtg, STableMetaOutput *output, bool sy
CTG_ERR_JRET(ctgEnqueue(pCtg, op));
return TSDB_CODE_SUCCESS;
-
+
_return:
if (output) {
@@ -898,15 +835,15 @@ _return:
}
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEpSet* pEpSet) {
- int32_t code = 0;
+int32_t ctgUpdateVgEpsetEnqueue(SCatalog *pCtg, char *dbFName, int32_t vgId, SEpSet *pEpSet) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_UPDATE_VG_EPSET;
-
+
SCtgUpdateEpsetMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateEpsetMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateEpsetMsg));
@@ -923,22 +860,20 @@ int32_t ctgUpdateVgEpsetEnqueue(SCatalog* pCtg, char *dbFName, int32_t vgId, SEp
CTG_ERR_JRET(ctgEnqueue(pCtg, op));
return TSDB_CODE_SUCCESS;
-
+
_return:
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-
-
-int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncOp) {
- int32_t code = 0;
+int32_t ctgUpdateUserEnqueue(SCatalog *pCtg, SGetUserAuthRsp *pAuth, bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_UPDATE_USER;
op->syncOp = syncOp;
-
+
SCtgUpdateUserMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateUserMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateUserMsg));
@@ -951,23 +886,23 @@ int32_t ctgUpdateUserEnqueue(SCatalog* pCtg, SGetUserAuthRsp *pAuth, bool syncOp
op->data = msg;
CTG_ERR_JRET(ctgEnqueue(pCtg, op));
-
+
return TSDB_CODE_SUCCESS;
-
+
_return:
tFreeSGetUserAuthRsp(pAuth);
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-int32_t ctgUpdateTbIndexEnqueue(SCatalog* pCtg, STableIndex **pIndex, bool syncOp) {
- int32_t code = 0;
+int32_t ctgUpdateTbIndexEnqueue(SCatalog *pCtg, STableIndex **pIndex, bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_UPDATE_TB_INDEX;
op->syncOp = syncOp;
-
+
SCtgUpdateTbIndexMsg *msg = taosMemoryMalloc(sizeof(SCtgUpdateTbIndexMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgUpdateTbIndexMsg));
@@ -983,22 +918,22 @@ int32_t ctgUpdateTbIndexEnqueue(SCatalog* pCtg, STableIndex **pIndex, bool syncO
*pIndex = NULL;
return TSDB_CODE_SUCCESS;
-
+
_return:
taosArrayDestroyEx((*pIndex)->pIndex, tFreeSTableIndexInfo);
taosMemoryFreeClear(*pIndex);
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-int32_t ctgDropTbIndexEnqueue(SCatalog* pCtg, SName* pName, bool syncOp) {
- int32_t code = 0;
+int32_t ctgDropTbIndexEnqueue(SCatalog *pCtg, SName *pName, bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_DROP_TB_INDEX;
op->syncOp = syncOp;
-
+
SCtgDropTbIndexMsg *msg = taosMemoryMalloc(sizeof(SCtgDropTbIndexMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgDropTbIndexMsg));
@@ -1012,25 +947,24 @@ int32_t ctgDropTbIndexEnqueue(SCatalog* pCtg, SName* pName, bool syncOp) {
op->data = msg;
CTG_ERR_JRET(ctgEnqueue(pCtg, op));
-
+
return TSDB_CODE_SUCCESS;
-
+
_return:
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-
-int32_t ctgClearCacheEnqueue(SCatalog* pCtg, bool freeCtg, bool stopQueue, bool syncOp) {
- int32_t code = 0;
+int32_t ctgClearCacheEnqueue(SCatalog *pCtg, bool freeCtg, bool stopQueue, bool syncOp) {
+ int32_t code = 0;
SCtgCacheOperation *op = taosMemoryCalloc(1, sizeof(SCtgCacheOperation));
op->opId = CTG_OP_CLEAR_CACHE;
op->syncOp = syncOp;
op->stopQueue = stopQueue;
op->unLocked = true;
-
+
SCtgClearCacheMsg *msg = taosMemoryMalloc(sizeof(SCtgClearCacheMsg));
if (NULL == msg) {
ctgError("malloc %d failed", (int32_t)sizeof(SCtgClearCacheMsg));
@@ -1042,24 +976,23 @@ int32_t ctgClearCacheEnqueue(SCatalog* pCtg, bool freeCtg, bool stopQueue, bool
op->data = msg;
CTG_ERR_JRET(ctgEnqueue(pCtg, op));
-
+
return TSDB_CODE_SUCCESS;
-
+
_return:
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-
int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type) {
mgmt->slotRIdx = 0;
mgmt->slotNum = rentSec / CTG_RENT_SLOT_SECOND;
mgmt->type = type;
size_t msgSize = sizeof(SCtgRentSlot) * mgmt->slotNum;
-
+
mgmt->slots = taosMemoryCalloc(1, msgSize);
if (NULL == mgmt->slots) {
qError("calloc %d failed", (int32_t)msgSize);
@@ -1067,34 +1000,34 @@ int32_t ctgMetaRentInit(SCtgRentMgmt *mgmt, uint32_t rentSec, int8_t type) {
}
qDebug("meta rent initialized, type:%d, slotNum:%d", type, mgmt->slotNum);
-
+
return TSDB_CODE_SUCCESS;
}
-
int32_t ctgMetaRentAdd(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size) {
int16_t widx = abs((int)(id % mgmt->slotNum));
SCtgRentSlot *slot = &mgmt->slots[widx];
- int32_t code = 0;
-
+ int32_t code = 0;
+
CTG_LOCK(CTG_WRITE, &slot->lock);
if (NULL == slot->meta) {
slot->meta = taosArrayInit(CTG_DEFAULT_RENT_SLOT_SIZE, size);
if (NULL == slot->meta) {
- qError("taosArrayInit %d failed, id:0x%"PRIx64", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx, mgmt->type);
+ qError("taosArrayInit %d failed, id:0x%" PRIx64 ", slot idx:%d, type:%d", CTG_DEFAULT_RENT_SLOT_SIZE, id, widx,
+ mgmt->type);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
}
if (NULL == taosArrayPush(slot->meta, meta)) {
- qError("taosArrayPush meta to rent failed, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
+ qError("taosArrayPush meta to rent failed, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
slot->needSort = true;
- qDebug("add meta to rent, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
+ qDebug("add meta to rent, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
_return:
@@ -1102,20 +1035,22 @@ _return:
CTG_RET(code);
}
-int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size, __compar_fn_t sortCompare, __compar_fn_t searchCompare) {
+int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t size, __compar_fn_t sortCompare,
+ __compar_fn_t searchCompare) {
int16_t widx = abs((int)(id % mgmt->slotNum));
SCtgRentSlot *slot = &mgmt->slots[widx];
- int32_t code = 0;
+ int32_t code = 0;
CTG_LOCK(CTG_WRITE, &slot->lock);
if (NULL == slot->meta) {
- qDebug("empty meta slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
+ qDebug("empty meta slot, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
if (slot->needSort) {
- qDebug("meta slot before sorte, slot idx:%d, type:%d, size:%d", widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta));
+ qDebug("meta slot before sorte, slot idx:%d, type:%d, size:%d", widx, mgmt->type,
+ (int32_t)taosArrayGetSize(slot->meta));
taosArraySort(slot->meta, sortCompare);
slot->needSort = false;
qDebug("meta slot sorted, slot idx:%d, type:%d, size:%d", widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta));
@@ -1123,20 +1058,22 @@ int32_t ctgMetaRentUpdate(SCtgRentMgmt *mgmt, void *meta, int64_t id, int32_t si
void *orig = taosArraySearch(slot->meta, &id, searchCompare, TD_EQ);
if (NULL == orig) {
- qDebug("meta not found in slot, id:0x%"PRIx64", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type, (int32_t)taosArrayGetSize(slot->meta));
+ qDebug("meta not found in slot, id:0x%" PRIx64 ", slot idx:%d, type:%d, size:%d", id, widx, mgmt->type,
+ (int32_t)taosArrayGetSize(slot->meta));
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
memcpy(orig, meta, size);
- qDebug("meta in rent updated, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
+ qDebug("meta in rent updated, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
_return:
CTG_UNLOCK(CTG_WRITE, &slot->lock);
if (code) {
- qDebug("meta in rent update failed, will try to add it, code:%x, id:0x%"PRIx64", slot idx:%d, type:%d", code, id, widx, mgmt->type);
+ qDebug("meta in rent update failed, will try to add it, code:%x, id:0x%" PRIx64 ", slot idx:%d, type:%d", code, id,
+ widx, mgmt->type);
CTG_RET(ctgMetaRentAdd(mgmt, meta, id, size));
}
@@ -1147,11 +1084,11 @@ int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortComp
int16_t widx = abs((int)(id % mgmt->slotNum));
SCtgRentSlot *slot = &mgmt->slots[widx];
- int32_t code = 0;
-
+ int32_t code = 0;
+
CTG_LOCK(CTG_WRITE, &slot->lock);
if (NULL == slot->meta) {
- qError("empty meta slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
+ qError("empty meta slot, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
@@ -1163,13 +1100,13 @@ int32_t ctgMetaRentRemove(SCtgRentMgmt *mgmt, int64_t id, __compar_fn_t sortComp
int32_t idx = taosArraySearchIdx(slot->meta, &id, searchCompare, TD_EQ);
if (idx < 0) {
- qError("meta not found in slot, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
+ qError("meta not found in slot, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
taosArrayRemove(slot->meta, idx);
- qDebug("meta in rent removed, id:0x%"PRIx64", slot idx:%d, type:%d", id, widx, mgmt->type);
+ qDebug("meta in rent removed, id:0x%" PRIx64 ", slot idx:%d, type:%d", id, widx, mgmt->type);
_return:
@@ -1178,7 +1115,6 @@ _return:
CTG_RET(code);
}
-
int32_t ctgMetaRentGetImpl(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_t size) {
int16_t ridx = atomic_add_fetch_16(&mgmt->slotRIdx, 1);
if (ridx >= mgmt->slotNum) {
@@ -1187,8 +1123,8 @@ int32_t ctgMetaRentGetImpl(SCtgRentMgmt *mgmt, void **res, uint32_t *num, int32_
}
SCtgRentSlot *slot = &mgmt->slots[ridx];
- int32_t code = 0;
-
+ int32_t code = 0;
+
CTG_LOCK(CTG_READ, &slot->lock);
if (NULL == slot->meta) {
qDebug("empty meta in slot:%d, type:%d", ridx, mgmt->type);
@@ -1254,13 +1190,15 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) {
SCtgDBCache newDBCache = {0};
newDBCache.dbId = dbId;
- newDBCache.tbCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
+ newDBCache.tbCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY),
+ true, HASH_ENTRY_LOCK);
if (NULL == newDBCache.tbCache) {
ctgError("taosHashInit %d metaCache failed", gCtgMgmt.cfg.maxTblCacheNum);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
- newDBCache.stbCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), true, HASH_ENTRY_LOCK);
+ newDBCache.stbCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT),
+ true, HASH_ENTRY_LOCK);
if (NULL == newDBCache.stbCache) {
ctgError("taosHashInit %d stbCache failed", gCtgMgmt.cfg.maxTblCacheNum);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
@@ -1272,21 +1210,21 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) {
ctgDebug("db already in cache, dbFName:%s", dbFName);
goto _return;
}
-
+
ctgError("taosHashPut db to cache failed, dbFName:%s", dbFName);
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_CACHE_STAT_INC(numOfDb, 1);
-
+
SDbVgVersion vgVersion = {.dbId = newDBCache.dbId, .vgVersion = -1};
strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName));
- ctgDebug("db added to cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId);
+ ctgDebug("db added to cache, dbFName:%s, dbId:0x%" PRIx64, dbFName, dbId);
CTG_ERR_RET(ctgMetaRentAdd(&pCtg->dbRent, &vgVersion, dbId, sizeof(SDbVgVersion)));
- ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:0x%"PRIx64, dbFName, vgVersion.vgVersion, dbId);
+ ctgDebug("db added to rent, dbFName:%s, vgVersion:%d, dbId:0x%" PRIx64, dbFName, vgVersion.vgVersion, dbId);
return TSDB_CODE_SUCCESS;
@@ -1297,30 +1235,29 @@ _return:
CTG_RET(code);
}
-
-void ctgRemoveStbRent(SCatalog* pCtg, SCtgDBCache *dbCache) {
+void ctgRemoveStbRent(SCatalog *pCtg, SCtgDBCache *dbCache) {
if (NULL == dbCache->stbCache) {
return;
}
-
+
void *pIter = taosHashIterate(dbCache->stbCache, NULL);
while (pIter) {
uint64_t *suid = NULL;
suid = taosHashGetKey(pIter, NULL);
- if (TSDB_CODE_SUCCESS == ctgMetaRentRemove(&pCtg->stbRent, *suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)) {
- ctgDebug("stb removed from rent, suid:0x%"PRIx64, *suid);
+ if (TSDB_CODE_SUCCESS ==
+ ctgMetaRentRemove(&pCtg->stbRent, *suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare)) {
+ ctgDebug("stb removed from rent, suid:0x%" PRIx64, *suid);
}
-
+
pIter = taosHashIterate(dbCache->stbCache, pIter);
}
}
-
-int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* dbFName) {
+int32_t ctgRemoveDBFromCache(SCatalog *pCtg, SCtgDBCache *dbCache, const char *dbFName) {
uint64_t dbId = dbCache->dbId;
-
- ctgInfo("start to remove db from cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbCache->dbId);
+
+ ctgInfo("start to remove db from cache, dbFName:%s, dbId:0x%" PRIx64, dbFName, dbCache->dbId);
CTG_LOCK(CTG_WRITE, &dbCache->dbLock);
@@ -1331,7 +1268,7 @@ int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* d
CTG_UNLOCK(CTG_WRITE, &dbCache->dbLock);
CTG_ERR_RET(ctgMetaRentRemove(&pCtg->dbRent, dbId, ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare));
- ctgDebug("db removed from rent, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId);
+ ctgDebug("db removed from rent, dbFName:%s, dbId:0x%" PRIx64, dbFName, dbId);
if (taosHashRemove(pCtg->dbCache, dbFName, strlen(dbFName))) {
ctgInfo("taosHashRemove from dbCache failed, may be removed, dbFName:%s", dbFName);
@@ -1339,19 +1276,18 @@ int32_t ctgRemoveDBFromCache(SCatalog* pCtg, SCtgDBCache *dbCache, const char* d
}
CTG_CACHE_STAT_DEC(numOfDb, 1);
- ctgInfo("db removed from cache, dbFName:%s, dbId:0x%"PRIx64, dbFName, dbId);
-
+ ctgInfo("db removed from cache, dbFName:%s, dbId:0x%" PRIx64, dbFName, dbId);
+
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgGetAddDBCache(SCatalog* pCtg, const char *dbFName, uint64_t dbId, SCtgDBCache **pCache) {
- int32_t code = 0;
+int32_t ctgGetAddDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId, SCtgDBCache **pCache) {
+ int32_t code = 0;
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(pCtg, dbFName, &dbCache);
-
+
if (dbCache) {
- // TODO OPEN IT
+ // TODO OPEN IT
#if 0
if (dbCache->dbId == dbId) {
*pCache = dbCache;
@@ -1368,7 +1304,7 @@ int32_t ctgGetAddDBCache(SCatalog* pCtg, const char *dbFName, uint64_t dbId, SCt
*pCache = dbCache;
return TSDB_CODE_SUCCESS;
}
-
+
if (dbCache->dbId == dbId) {
*pCache = dbCache;
return TSDB_CODE_SUCCESS;
@@ -1376,7 +1312,7 @@ int32_t ctgGetAddDBCache(SCatalog* pCtg, const char *dbFName, uint64_t dbId, SCt
#endif
CTG_ERR_RET(ctgRemoveDBFromCache(pCtg, dbCache, dbFName));
}
-
+
CTG_ERR_RET(ctgAddNewDBCache(pCtg, dbFName, dbId));
ctgGetDBCache(pCtg, dbFName, &dbCache);
@@ -1386,7 +1322,8 @@ int32_t ctgGetAddDBCache(SCatalog* pCtg, const char *dbFName, uint64_t dbId, SCt
return TSDB_CODE_SUCCESS;
}
-int32_t ctgUpdateRentStbVersion(SCatalog *pCtg, char* dbFName, char* tbName, uint64_t dbId, uint64_t suid, SCtgTbCache* pCache) {
+int32_t ctgUpdateRentStbVersion(SCatalog *pCtg, char *dbFName, char *tbName, uint64_t dbId, uint64_t suid,
+ SCtgTbCache *pCache) {
SSTableVersion metaRent = {.dbId = dbId, .suid = suid};
if (pCache->pMeta) {
metaRent.sversion = pCache->pMeta->sversion;
@@ -1396,49 +1333,51 @@ int32_t ctgUpdateRentStbVersion(SCatalog *pCtg, char* dbFName, char* tbName, uin
if (pCache->pIndex) {
metaRent.smaVer = pCache->pIndex->version;
}
-
+
strcpy(metaRent.dbFName, dbFName);
strcpy(metaRent.stbName, tbName);
-
- CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->stbRent, &metaRent, metaRent.suid, sizeof(SSTableVersion), ctgStbVersionSortCompare, ctgStbVersionSearchCompare));
- ctgDebug("db %s,0x%" PRIx64 " stb %s,0x%" PRIx64 " sver %d tver %d smaVer %d updated to stbRent",
- dbFName, dbId, tbName, suid, metaRent.sversion, metaRent.tversion, metaRent.smaVer);
+ CTG_ERR_RET(ctgMetaRentUpdate(&pCtg->stbRent, &metaRent, metaRent.suid, sizeof(SSTableVersion),
+ ctgStbVersionSortCompare, ctgStbVersionSearchCompare));
- return TSDB_CODE_SUCCESS;
-}
+ ctgDebug("db %s,0x%" PRIx64 " stb %s,0x%" PRIx64 " sver %d tver %d smaVer %d updated to stbRent", dbFName, dbId,
+ tbName, suid, metaRent.sversion, metaRent.tversion, metaRent.smaVer);
+ return TSDB_CODE_SUCCESS;
+}
-int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, uint64_t dbId, char *tbName, STableMeta *meta, int32_t metaSize) {
+int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, uint64_t dbId, char *tbName,
+ STableMeta *meta, int32_t metaSize) {
if (NULL == dbCache->tbCache || NULL == dbCache->stbCache) {
taosMemoryFree(meta);
- ctgError("db is dropping, dbId:0x%"PRIx64, dbCache->dbId);
+ ctgError("db is dropping, dbId:0x%" PRIx64, dbCache->dbId);
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
}
- bool isStb = meta->tableType == TSDB_SUPER_TABLE;
- SCtgTbCache* pCache = taosHashGet(dbCache->tbCache, tbName, strlen(tbName));
- STableMeta *orig = (pCache ? pCache->pMeta : NULL);
- int8_t origType = 0;
- uint64_t origSuid = 0;
-
+ bool isStb = meta->tableType == TSDB_SUPER_TABLE;
+ SCtgTbCache *pCache = taosHashGet(dbCache->tbCache, tbName, strlen(tbName));
+ STableMeta *orig = (pCache ? pCache->pMeta : NULL);
+ int8_t origType = 0;
+ uint64_t origSuid = 0;
+
if (orig) {
origType = orig->tableType;
- if (origType == meta->tableType && orig->uid == meta->uid && (origType == TSDB_CHILD_TABLE || (orig->sversion >= meta->sversion && orig->tversion >= meta->tversion))) {
+ if (origType == meta->tableType && orig->uid == meta->uid &&
+ (origType == TSDB_CHILD_TABLE || (orig->sversion >= meta->sversion && orig->tversion >= meta->tversion))) {
taosMemoryFree(meta);
ctgDebug("ignore table %s meta update", tbName);
return TSDB_CODE_SUCCESS;
}
-
+
if (origType == TSDB_SUPER_TABLE) {
if (taosHashRemove(dbCache->stbCache, &orig->suid, sizeof(orig->suid))) {
- ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:0x%"PRIx64, dbFName, tbName, orig->suid);
+ ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:0x%" PRIx64, dbFName, tbName, orig->suid);
} else {
CTG_CACHE_STAT_DEC(numOfStb, 1);
- ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:0x%"PRIx64, dbFName, tbName, orig->suid);
+ ctgDebug("stb removed from stbCache, dbFName:%s, stb:%s, suid:0x%" PRIx64, dbFName, tbName, orig->suid);
}
-
+
origSuid = orig->suid;
}
}
@@ -1451,7 +1390,7 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
ctgError("taosHashPut new tbCache failed, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
pCache = taosHashGet(dbCache->tbCache, tbName, strlen(tbName));
} else {
taosMemoryFree(pCache->pMeta);
@@ -1469,35 +1408,37 @@ int32_t ctgWriteTbMetaToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFNam
return TSDB_CODE_SUCCESS;
}
- if (origSuid != meta->suid && taosHashPut(dbCache->stbCache, &meta->suid, sizeof(meta->suid), tbName, strlen(tbName) + 1) != 0) {
- ctgError("taosHashPut to stable cache failed, suid:0x%"PRIx64, meta->suid);
+ if (origSuid != meta->suid &&
+ taosHashPut(dbCache->stbCache, &meta->suid, sizeof(meta->suid), tbName, strlen(tbName) + 1) != 0) {
+ ctgError("taosHashPut to stable cache failed, suid:0x%" PRIx64, meta->suid);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
CTG_CACHE_STAT_INC(numOfStb, 1);
- ctgDebug("stb 0x%" PRIx64 " updated to cache, dbFName:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName, meta->tableType);
+ ctgDebug("stb 0x%" PRIx64 " updated to cache, dbFName:%s, tbName:%s, tbType:%d", meta->suid, dbFName, tbName,
+ meta->tableType);
CTG_ERR_RET(ctgUpdateRentStbVersion(pCtg, dbFName, tbName, dbId, meta->suid, pCache));
-
+
return TSDB_CODE_SUCCESS;
}
-int32_t ctgWriteTbIndexToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char* dbFName, char *tbName, STableIndex **index) {
+int32_t ctgWriteTbIndexToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, char *tbName, STableIndex **index) {
if (NULL == dbCache->tbCache) {
ctgFreeSTableIndex(*index);
taosMemoryFreeClear(*index);
- ctgError("db is dropping, dbId:0x%"PRIx64, dbCache->dbId);
+ ctgError("db is dropping, dbId:0x%" PRIx64, dbCache->dbId);
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
}
- STableIndex* pIndex = *index;
- uint64_t suid = pIndex->suid;
- SCtgTbCache* pCache = taosHashGet(dbCache->tbCache, tbName, strlen(tbName));
+ STableIndex *pIndex = *index;
+ uint64_t suid = pIndex->suid;
+ SCtgTbCache *pCache = taosHashGet(dbCache->tbCache, tbName, strlen(tbName));
if (NULL == pCache) {
SCtgTbCache cache = {0};
cache.pIndex = pIndex;
-
+
if (taosHashPut(dbCache->tbCache, tbName, strlen(tbName), &cache, sizeof(cache)) != 0) {
ctgFreeSTableIndex(*index);
taosMemoryFreeClear(*index);
@@ -1506,12 +1447,13 @@ int32_t ctgWriteTbIndexToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char* dbFNa
}
*index = NULL;
- ctgDebug("table %s index updated to cache, ver:%d, num:%d", tbName, pIndex->version, (int32_t)taosArrayGetSize(pIndex->pIndex));
+ ctgDebug("table %s index updated to cache, ver:%d, num:%d", tbName, pIndex->version,
+ (int32_t)taosArrayGetSize(pIndex->pIndex));
if (suid) {
CTG_ERR_RET(ctgUpdateRentStbVersion(pCtg, dbFName, tbName, dbCache->dbId, pIndex->suid, &cache));
}
-
+
return TSDB_CODE_SUCCESS;
}
@@ -1526,24 +1468,25 @@ int32_t ctgWriteTbIndexToCache(SCatalog *pCtg, SCtgDBCache *dbCache, char* dbFNa
pCache->pIndex = pIndex;
*index = NULL;
- ctgDebug("table %s index updated to cache, ver:%d, num:%d", tbName, pIndex->version, (int32_t)taosArrayGetSize(pIndex->pIndex));
+ ctgDebug("table %s index updated to cache, ver:%d, num:%d", tbName, pIndex->version,
+ (int32_t)taosArrayGetSize(pIndex->pIndex));
if (suid) {
CTG_ERR_RET(ctgUpdateRentStbVersion(pCtg, dbFName, tbName, dbCache->dbId, suid, pCache));
}
-
+
return TSDB_CODE_SUCCESS;
}
-int32_t ctgUpdateTbMetaToCache(SCatalog* pCtg, STableMetaOutput* pOut, bool syncReq) {
- STableMetaOutput* pOutput = NULL;
- int32_t code = 0;
-
+int32_t ctgUpdateTbMetaToCache(SCatalog *pCtg, STableMetaOutput *pOut, bool syncReq) {
+ STableMetaOutput *pOutput = NULL;
+ int32_t code = 0;
+
CTG_ERR_RET(ctgCloneMetaOutput(pOut, &pOutput));
CTG_ERR_JRET(ctgUpdateTbMetaEnqueue(pCtg, pOutput, syncReq));
return TSDB_CODE_SUCCESS;
-
+
_return:
ctgFreeSTableMetaOutput(pOutput);
@@ -1551,11 +1494,11 @@ _return:
}
void ctgClearAllInstance(void) {
- SCatalog* pCtg = NULL;
+ SCatalog *pCtg = NULL;
- void* pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
+ void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
while (pIter) {
- pCtg = *(SCatalog**)pIter;
+ pCtg = *(SCatalog **)pIter;
if (pCtg) {
ctgClearHandle(pCtg);
@@ -1566,11 +1509,11 @@ void ctgClearAllInstance(void) {
}
void ctgFreeAllInstance(void) {
- SCatalog* pCtg = NULL;
+ SCatalog *pCtg = NULL;
- void* pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
+ void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
while (pIter) {
- pCtg = *(SCatalog**)pIter;
+ pCtg = *(SCatalog **)pIter;
if (pCtg) {
ctgFreeHandle(pCtg);
@@ -1582,51 +1525,51 @@ void ctgFreeAllInstance(void) {
taosHashClear(gCtgMgmt.pCluster);
}
-
int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgUpdateVgMsg *msg = operation->data;
- SDBVgInfo* dbInfo = msg->dbInfo;
- char* dbFName = msg->dbFName;
- SCatalog* pCtg = msg->pCtg;
-
+ SDBVgInfo *dbInfo = msg->dbInfo;
+ char *dbFName = msg->dbFName;
+ SCatalog *pCtg = msg->pCtg;
+
if (NULL == dbInfo->vgHash) {
goto _return;
}
-
+
if (dbInfo->vgVersion < 0 || taosHashGetSize(dbInfo->vgHash) <= 0) {
- ctgError("invalid db vgInfo, dbFName:%s, vgHash:%p, vgVersion:%d, vgHashSize:%d",
- dbFName, dbInfo->vgHash, dbInfo->vgVersion, taosHashGetSize(dbInfo->vgHash));
+ ctgError("invalid db vgInfo, dbFName:%s, vgHash:%p, vgVersion:%d, vgHashSize:%d", dbFName, dbInfo->vgHash,
+ dbInfo->vgVersion, taosHashGetSize(dbInfo->vgHash));
CTG_ERR_JRET(TSDB_CODE_APP_ERROR);
}
- bool newAdded = false;
+ bool newAdded = false;
SDbVgVersion vgVersion = {.dbId = msg->dbId, .vgVersion = dbInfo->vgVersion, .numOfTable = dbInfo->numOfTable};
SCtgDBCache *dbCache = NULL;
CTG_ERR_JRET(ctgGetAddDBCache(msg->pCtg, dbFName, msg->dbId, &dbCache));
if (NULL == dbCache) {
- ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%"PRIx64, dbFName, msg->dbId);
+ ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%" PRIx64, dbFName, msg->dbId);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
SCtgVgCache *vgCache = &dbCache->vgCache;
CTG_ERR_JRET(ctgWLockVgInfo(msg->pCtg, dbCache));
-
+
if (vgCache->vgInfo) {
SDBVgInfo *vgInfo = vgCache->vgInfo;
-
+
if (dbInfo->vgVersion < vgInfo->vgVersion) {
ctgDebug("db vgVer is old, dbFName:%s, vgVer:%d, curVer:%d", dbFName, dbInfo->vgVersion, vgInfo->vgVersion);
ctgWUnlockVgInfo(dbCache);
-
+
goto _return;
}
if (dbInfo->vgVersion == vgInfo->vgVersion && dbInfo->numOfTable == vgInfo->numOfTable) {
- ctgDebug("no new db vgVer or numOfTable, dbFName:%s, vgVer:%d, numOfTable:%d", dbFName, dbInfo->vgVersion, dbInfo->numOfTable);
+ ctgDebug("no new db vgVer or numOfTable, dbFName:%s, vgVer:%d, numOfTable:%d", dbFName, dbInfo->vgVersion,
+ dbInfo->numOfTable);
ctgWUnlockVgInfo(dbCache);
-
+
goto _return;
}
@@ -1636,61 +1579,63 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
vgCache->vgInfo = dbInfo;
msg->dbInfo = NULL;
- ctgDebug("db vgInfo updated, dbFName:%s, vgVer:%d, dbId:0x%"PRIx64, dbFName, vgVersion.vgVersion, vgVersion.dbId);
+ ctgDebug("db vgInfo updated, dbFName:%s, vgVer:%d, dbId:0x%" PRIx64, dbFName, vgVersion.vgVersion, vgVersion.dbId);
ctgWUnlockVgInfo(dbCache);
dbCache = NULL;
strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName));
- CTG_ERR_RET(ctgMetaRentUpdate(&msg->pCtg->dbRent, &vgVersion, vgVersion.dbId, sizeof(SDbVgVersion), ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare));
+ CTG_ERR_JRET(ctgMetaRentUpdate(&msg->pCtg->dbRent, &vgVersion, vgVersion.dbId, sizeof(SDbVgVersion),
+ ctgDbVgVersionSortCompare, ctgDbVgVersionSearchCompare));
_return:
ctgFreeVgInfo(msg->dbInfo);
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgDropDBMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
+ SCatalog *pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(msg->pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
goto _return;
}
-
+
if (dbCache->dbId != msg->dbId) {
- ctgInfo("dbId already updated, dbFName:%s, dbId:0x%"PRIx64 ", targetId:0x%"PRIx64, msg->dbFName, dbCache->dbId, msg->dbId);
+ ctgInfo("dbId already updated, dbFName:%s, dbId:0x%" PRIx64 ", targetId:0x%" PRIx64, msg->dbFName, dbCache->dbId,
+ msg->dbId);
goto _return;
}
-
+
CTG_ERR_JRET(ctgRemoveDBFromCache(pCtg, dbCache, msg->dbFName));
_return:
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
int32_t ctgOpDropDbVgroup(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgDropDbVgroupMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
+ SCatalog *pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(msg->pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
goto _return;
}
-
- CTG_ERR_RET(ctgWLockVgInfo(pCtg, dbCache));
-
+
+ CTG_ERR_JRET(ctgWLockVgInfo(pCtg, dbCache));
+
ctgFreeVgInfo(dbCache->vgCache.vgInfo);
dbCache->vgCache.vgInfo = NULL;
@@ -1701,17 +1646,16 @@ int32_t ctgOpDropDbVgroup(SCtgCacheOperation *operation) {
_return:
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-
int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgUpdateTbMetaMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
- STableMetaOutput* pMeta = msg->pMeta;
- SCtgDBCache *dbCache = NULL;
+ SCatalog *pCtg = msg->pCtg;
+ STableMetaOutput *pMeta = msg->pMeta;
+ SCtgDBCache *dbCache = NULL;
if ((!CTG_IS_META_CTABLE(pMeta->metaType)) && NULL == pMeta->tbMeta) {
ctgError("no valid tbmeta got from meta rsp, dbFName:%s, tbName:%s", pMeta->dbFName, pMeta->tbName);
@@ -1721,8 +1665,8 @@ int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
if (CTG_IS_META_BOTH(pMeta->metaType) && TSDB_SUPER_TABLE != pMeta->tbMeta->tableType) {
ctgError("table type error, expected:%d, actual:%d", TSDB_SUPER_TABLE, pMeta->tbMeta->tableType);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
- }
-
+ }
+
CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pMeta->dbFName, pMeta->dbId, &dbCache));
if (NULL == dbCache) {
ctgInfo("conflict db update, ignore this update, dbFName:%s, dbId:0x%" PRIx64, pMeta->dbFName, pMeta->dbId);
@@ -1731,17 +1675,19 @@ int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
if (CTG_IS_META_TABLE(pMeta->metaType) || CTG_IS_META_BOTH(pMeta->metaType)) {
int32_t metaSize = CTG_META_SIZE(pMeta->tbMeta);
- CTG_ERR_JRET(ctgWriteTbMetaToCache(pCtg, dbCache, pMeta->dbFName, pMeta->dbId, pMeta->tbName, pMeta->tbMeta, metaSize));
+ CTG_ERR_JRET(
+ ctgWriteTbMetaToCache(pCtg, dbCache, pMeta->dbFName, pMeta->dbId, pMeta->tbName, pMeta->tbMeta, metaSize));
pMeta->tbMeta = NULL;
}
if (CTG_IS_META_CTABLE(pMeta->metaType) || CTG_IS_META_BOTH(pMeta->metaType)) {
- SCTableMeta* ctbMeta = taosMemoryMalloc(sizeof(SCTableMeta));
+ SCTableMeta *ctbMeta = taosMemoryMalloc(sizeof(SCTableMeta));
if (NULL == ctbMeta) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
memcpy(ctbMeta, &pMeta->ctbMeta, sizeof(SCTableMeta));
- CTG_ERR_JRET(ctgWriteTbMetaToCache(pCtg, dbCache, pMeta->dbFName, pMeta->dbId, pMeta->ctbName, (STableMeta *)ctbMeta, sizeof(SCTableMeta)));
+ CTG_ERR_JRET(ctgWriteTbMetaToCache(pCtg, dbCache, pMeta->dbFName, pMeta->dbId, pMeta->ctbName,
+ (STableMeta *)ctbMeta, sizeof(SCTableMeta)));
}
_return:
@@ -1750,37 +1696,37 @@ _return:
taosMemoryFreeClear(pMeta->tbMeta);
taosMemoryFreeClear(pMeta);
}
-
+
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-
int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgDropStbMetaMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
+ SCatalog *pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
- return TSDB_CODE_SUCCESS;
+ goto _return;
}
if (msg->dbId && (dbCache->dbId != msg->dbId)) {
- ctgDebug("dbId already modified, dbFName:%s, current:0x%"PRIx64", dbId:0x%"PRIx64", stb:%s, suid:0x%"PRIx64,
+ ctgDebug("dbId already modified, dbFName:%s, current:0x%" PRIx64 ", dbId:0x%" PRIx64 ", stb:%s, suid:0x%" PRIx64,
msg->dbFName, dbCache->dbId, msg->dbId, msg->stbName, msg->suid);
- return TSDB_CODE_SUCCESS;
+ goto _return;
}
-
+
if (taosHashRemove(dbCache->stbCache, &msg->suid, sizeof(msg->suid))) {
- ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
+ ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:0x%" PRIx64, msg->dbFName,
+ msg->stbName, msg->suid);
} else {
CTG_CACHE_STAT_DEC(numOfStb, 1);
}
- SCtgTbCache* pTbCache = taosHashGet(dbCache->tbCache, msg->stbName, strlen(msg->stbName));
+ SCtgTbCache *pTbCache = taosHashGet(dbCache->tbCache, msg->stbName, strlen(msg->stbName));
if (NULL == pTbCache) {
ctgDebug("stb %s already not in cache", msg->stbName);
goto _return;
@@ -1790,29 +1736,29 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
ctgFreeTbCacheImpl(pTbCache);
CTG_UNLOCK(CTG_WRITE, &pTbCache->metaLock);
- if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) {
- ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
+ if (taosHashRemove(dbCache->tbCache, msg->stbName, strlen(msg->stbName))) {
+ ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:0x%" PRIx64, msg->dbFName, msg->stbName, msg->suid);
} else {
CTG_CACHE_STAT_DEC(numOfTbl, 1);
}
-
- ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
+
+ ctgInfo("stb removed from cache, dbFName:%s, stbName:%s, suid:0x%" PRIx64, msg->dbFName, msg->stbName, msg->suid);
CTG_ERR_JRET(ctgMetaRentRemove(&msg->pCtg->stbRent, msg->suid, ctgStbVersionSortCompare, ctgStbVersionSearchCompare));
-
- ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:0x%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
-
+
+ ctgDebug("stb removed from rent, dbFName:%s, stbName:%s, suid:0x%" PRIx64, msg->dbFName, msg->stbName, msg->suid);
+
_return:
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgDropTblMetaMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
+ SCatalog *pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(pCtg, msg->dbFName, &dbCache);
@@ -1821,11 +1767,12 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
}
if (dbCache->dbId != msg->dbId) {
- ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%"PRIx64", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId, msg->dbFName, msg->tbName);
+ ctgDebug("dbId 0x%" PRIx64 " not match with curId 0x%" PRIx64 ", dbFName:%s, tbName:%s", msg->dbId, dbCache->dbId,
+ msg->dbFName, msg->tbName);
goto _return;
}
- SCtgTbCache* pTbCache = taosHashGet(dbCache->tbCache, msg->tbName, strlen(msg->tbName));
+ SCtgTbCache *pTbCache = taosHashGet(dbCache->tbCache, msg->tbName, strlen(msg->tbName));
if (NULL == pTbCache) {
ctgDebug("tb %s already not in cache", msg->tbName);
goto _return;
@@ -1834,7 +1781,7 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
CTG_LOCK(CTG_WRITE, &pTbCache->metaLock);
ctgFreeTbCacheImpl(pTbCache);
CTG_UNLOCK(CTG_WRITE, &pTbCache->metaLock);
-
+
if (taosHashRemove(dbCache->tbCache, msg->tbName, strlen(msg->tbName))) {
ctgError("tb %s not exist in cache, dbFName:%s", msg->tbName, msg->dbFName);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
@@ -1852,10 +1799,10 @@ _return:
}
int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgUpdateUserMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
-
+ SCatalog *pCtg = msg->pCtg;
+
SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user));
if (NULL == pUser) {
SCtgUserAuth userAuth = {0};
@@ -1899,17 +1846,17 @@ _return:
taosHashCleanup(msg->userAuth.createdDbs);
taosHashCleanup(msg->userAuth.readDbs);
taosHashCleanup(msg->userAuth.writeDbs);
-
+
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgUpdateEpsetMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
-
+ SCatalog *pCtg = msg->pCtg;
+
SCtgDBCache *dbCache = NULL;
CTG_ERR_JRET(ctgGetDBCache(pCtg, msg->dbFName, &dbCache));
if (NULL == dbCache) {
@@ -1919,23 +1866,23 @@ int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) {
CTG_ERR_JRET(ctgWLockVgInfo(pCtg, dbCache));
- SDBVgInfo *vgInfo = dbCache->vgCache.vgInfo;
+ SDBVgInfo *vgInfo = dbCache->vgCache.vgInfo;
if (NULL == vgInfo) {
ctgDebug("vgroup in db %s not cached, ignore epset update", msg->dbFName);
goto _return;
}
-
- SVgroupInfo* pInfo = taosHashGet(vgInfo->vgHash, &msg->vgId, sizeof(msg->vgId));
+
+ SVgroupInfo *pInfo = taosHashGet(vgInfo->vgHash, &msg->vgId, sizeof(msg->vgId));
if (NULL == pInfo) {
ctgDebug("no vgroup %d in db %s, ignore epset update", msg->vgId, msg->dbFName);
goto _return;
}
- SEp* pOrigEp = &pInfo->epSet.eps[pInfo->epSet.inUse];
- SEp* pNewEp = &msg->epSet.eps[msg->epSet.inUse];
- ctgDebug("vgroup %d epset updated from %d/%d=>%s:%d to %d/%d=>%s:%d, dbFName:%s in ctg",
- pInfo->vgId, pInfo->epSet.inUse, pInfo->epSet.numOfEps, pOrigEp->fqdn, pOrigEp->port,
- msg->epSet.inUse, msg->epSet.numOfEps, pNewEp->fqdn, pNewEp->port, msg->dbFName);
+ SEp *pOrigEp = &pInfo->epSet.eps[pInfo->epSet.inUse];
+ SEp *pNewEp = &msg->epSet.eps[msg->epSet.inUse];
+ ctgDebug("vgroup %d epset updated from %d/%d=>%s:%d to %d/%d=>%s:%d, dbFName:%s in ctg", pInfo->vgId,
+ pInfo->epSet.inUse, pInfo->epSet.numOfEps, pOrigEp->fqdn, pOrigEp->port, msg->epSet.inUse,
+ msg->epSet.numOfEps, pNewEp->fqdn, pNewEp->port, msg->dbFName);
pInfo->epSet = msg->epSet;
@@ -1946,17 +1893,17 @@ _return:
}
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
int32_t ctgOpUpdateTbIndex(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgUpdateTbIndexMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
- STableIndex* pIndex = msg->pIndex;
- SCtgDBCache *dbCache = NULL;
-
+ SCatalog *pCtg = msg->pCtg;
+ STableIndex *pIndex = msg->pIndex;
+ SCtgDBCache *dbCache = NULL;
+
CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pIndex->dbFName, 0, &dbCache));
CTG_ERR_JRET(ctgWriteTbIndexToCache(pCtg, dbCache, pIndex->dbFName, pIndex->tbName, &pIndex));
@@ -1967,24 +1914,24 @@ _return:
taosArrayDestroyEx(pIndex->pIndex, tFreeSTableIndexInfo);
taosMemoryFreeClear(pIndex);
}
-
+
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
int32_t ctgOpDropTbIndex(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgDropTbIndexMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
- SCtgDBCache *dbCache = NULL;
-
+ SCatalog *pCtg = msg->pCtg;
+ SCtgDBCache *dbCache = NULL;
+
CTG_ERR_JRET(ctgGetDBCache(pCtg, msg->dbFName, &dbCache));
if (NULL == dbCache) {
return TSDB_CODE_SUCCESS;
}
- STableIndex* pIndex = taosMemoryCalloc(1, sizeof(STableIndex));
+ STableIndex *pIndex = taosMemoryCalloc(1, sizeof(STableIndex));
if (NULL == pIndex) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
@@ -2000,17 +1947,16 @@ _return:
taosArrayDestroyEx(pIndex->pIndex, tFreeSTableIndexInfo);
taosMemoryFreeClear(pIndex);
}
-
+
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
-
int32_t ctgOpClearCache(SCtgCacheOperation *operation) {
- int32_t code = 0;
+ int32_t code = 0;
SCtgClearCacheMsg *msg = operation->data;
- SCatalog* pCtg = msg->pCtg;
+ SCatalog *pCtg = msg->pCtg;
CTG_LOCK(CTG_WRITE, &gCtgMgmt.lock);
@@ -2020,7 +1966,7 @@ int32_t ctgOpClearCache(SCtgCacheOperation *operation) {
} else {
ctgClearHandle(pCtg);
}
-
+
goto _return;
}
@@ -2033,17 +1979,17 @@ int32_t ctgOpClearCache(SCtgCacheOperation *operation) {
_return:
CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.lock);
-
+
taosMemoryFreeClear(msg);
-
+
CTG_RET(code);
}
void ctgCleanupCacheQueue(void) {
- SCtgQNode *node = NULL;
- SCtgQNode *nodeNext = NULL;
+ SCtgQNode *node = NULL;
+ SCtgQNode *nodeNext = NULL;
SCtgCacheOperation *op = NULL;
- bool stopQueue = false;
+ bool stopQueue = false;
while (true) {
node = gCtgMgmt.queue.head->next;
@@ -2055,12 +2001,12 @@ void ctgCleanupCacheQueue(void) {
ctgDebug("process [%s] operation", gCtgCacheOperation[op->opId].name);
(*gCtgCacheOperation[op->opId].func)(op);
stopQueue = true;
- CTG_RT_STAT_INC(numOfOpDequeue, 1);
+ CTG_RT_STAT_INC(numOfOpDequeue, 1);
} else {
taosMemoryFree(op->data);
- CTG_RT_STAT_INC(numOfOpAbort, 1);
+ CTG_RT_STAT_INC(numOfOpAbort, 1);
}
-
+
if (op->syncOp) {
tsem_post(&op->rspSem);
} else {
@@ -2070,7 +2016,7 @@ void ctgCleanupCacheQueue(void) {
nodeNext = node->next;
taosMemoryFree(node);
-
+
node = nodeNext;
}
@@ -2085,7 +2031,7 @@ void ctgCleanupCacheQueue(void) {
gCtgMgmt.queue.tail = NULL;
}
-void* ctgUpdateThreadFunc(void* param) {
+void *ctgUpdateThreadFunc(void *param) {
setThreadName("catalog");
qInfo("catalog update thread started");
@@ -2094,8 +2040,8 @@ void* ctgUpdateThreadFunc(void* param) {
if (tsem_wait(&gCtgMgmt.queue.reqSem)) {
qError("ctg tsem_wait failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
}
-
- if (atomic_load_8((int8_t*)&gCtgMgmt.exit)) {
+
+ if (atomic_load_8((int8_t *)&gCtgMgmt.exit)) {
ctgCleanupCacheQueue();
break;
}
@@ -2105,7 +2051,7 @@ void* ctgUpdateThreadFunc(void* param) {
SCatalog *pCtg = ((SCtgUpdateMsgHeader *)operation->data)->pCtg;
ctgDebug("process [%s] operation", gCtgCacheOperation[operation->opId].name);
-
+
(*gCtgCacheOperation[operation->opId].func)(operation);
if (operation->syncOp) {
@@ -2114,18 +2060,17 @@ void* ctgUpdateThreadFunc(void* param) {
taosMemoryFreeClear(operation);
}
- CTG_RT_STAT_INC(numOfOpDequeue, 1);
+ CTG_RT_STAT_INC(numOfOpDequeue, 1);
ctgdShowCacheInfo();
ctgdShowClusterCache(pCtg);
}
qInfo("catalog update thread stopped");
-
+
return NULL;
}
-
int32_t ctgStartUpdateThread() {
TdThreadAttr thAttr;
taosThreadAttrInit(&thAttr);
@@ -2135,13 +2080,12 @@ int32_t ctgStartUpdateThread() {
terrno = TAOS_SYSTEM_ERROR(errno);
CTG_ERR_RET(terrno);
}
-
+
taosThreadAttrDestroy(&thAttr);
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta) {
+int32_t ctgGetTbMetaFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx *ctx, STableMeta **pTableMeta) {
if (IS_SYS_DBNAME(ctx->pName->dbname)) {
CTG_FLAG_SET_SYS_DB(ctx->flag);
}
@@ -2221,14 +2165,15 @@ int32_t ctgGetTbMetaBFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
}
#endif
-int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMetasCtx* ctx, int32_t dbIdx, int32_t *fetchIdx, int32_t baseResIdx, SArray* pList) {
- int32_t tbNum = taosArrayGetSize(pList);
- SName* pName = taosArrayGet(pList, 0);
- char dbFName[TSDB_DB_FNAME_LEN] = {0};
- int32_t flag = CTG_FLAG_UNKNOWN_STB;
- uint64_t lastSuid = 0;
- STableMeta* lastTableMeta = NULL;
-
+int32_t ctgGetTbMetasFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMetasCtx *ctx, int32_t dbIdx,
+ int32_t *fetchIdx, int32_t baseResIdx, SArray *pList) {
+ int32_t tbNum = taosArrayGetSize(pList);
+ SName *pName = taosArrayGet(pList, 0);
+ char dbFName[TSDB_DB_FNAME_LEN] = {0};
+ int32_t flag = CTG_FLAG_UNKNOWN_STB;
+ uint64_t lastSuid = 0;
+ STableMeta *lastTableMeta = NULL;
+
if (IS_SYS_DBNAME(pName->dbname)) {
CTG_FLAG_SET_SYS_DB(flag);
strcpy(dbFName, pName->dbname);
@@ -2237,9 +2182,9 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
}
SCtgDBCache *dbCache = NULL;
- SCtgTbCache* pCache = NULL;
+ SCtgTbCache *pCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
-
+
if (NULL == dbCache) {
ctgDebug("db %s not in cache", dbFName);
for (int32_t i = 0; i < tbNum; ++i) {
@@ -2251,14 +2196,14 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
}
for (int32_t i = 0; i < tbNum; ++i) {
- SName* pName = taosArrayGet(pList, i);
+ SName *pName = taosArrayGet(pList, i);
pCache = taosHashAcquire(dbCache->tbCache, pName->tname, strlen(pName->tname));
if (NULL == pCache) {
ctgDebug("tb %s not in cache, dbFName:%s", pName->tname, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
-
+
continue;
}
@@ -2267,11 +2212,11 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
ctgDebug("tb %s meta not in cache, dbFName:%s", pName->tname, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
-
+
continue;
}
- STableMeta* tbMeta = pCache->pMeta;
+ STableMeta *tbMeta = pCache->pMeta;
SCtgTbMetaCtx nctx = {0};
nctx.flag = flag;
@@ -2280,8 +2225,8 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
nctx.tbInfo.suid = tbMeta->suid;
nctx.tbInfo.tbType = tbMeta->tableType;
- SMetaRes res = {0};
- STableMeta* pTableMeta = NULL;
+ SMetaRes res = {0};
+ STableMeta *pTableMeta = NULL;
if (tbMeta->tableType != TSDB_CHILD_TABLE) {
int32_t metaSize = CTG_META_SIZE(tbMeta);
pTableMeta = taosMemoryCalloc(1, metaSize);
@@ -2289,20 +2234,20 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
ctgReleaseTbMetaToCache(pCtg, dbCache, pCache);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(pTableMeta, tbMeta, metaSize);
-
+
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
- taosHashRelease(dbCache->tbCache, pCache);
-
+ taosHashRelease(dbCache->tbCache, pCache);
+
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, tbMeta->tableType, dbFName);
-
+
res.pRes = pTableMeta;
taosArrayPush(ctx->pResList, &res);
continue;
}
-
+
// PROCESS FOR CHILD TABLE
if (lastSuid && tbMeta->suid == lastSuid && lastTableMeta) {
@@ -2310,32 +2255,32 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
memcpy(pTableMeta, tbMeta, sizeof(SCTableMeta));
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
- taosHashRelease(dbCache->tbCache, pCache);
-
+ taosHashRelease(dbCache->tbCache, pCache);
+
ctgDebug("Got tb %s meta from cache, type:%d, dbFName:%s", pName->tname, tbMeta->tableType, dbFName);
-
+
res.pRes = pTableMeta;
taosArrayPush(ctx->pResList, &res);
-
+
continue;
}
-
+
int32_t metaSize = sizeof(SCTableMeta);
pTableMeta = taosMemoryCalloc(1, metaSize);
if (NULL == pTableMeta) {
- ctgReleaseTbMetaToCache(pCtg, dbCache, pCache);
+ ctgReleaseTbMetaToCache(pCtg, dbCache, pCache);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(pTableMeta, tbMeta, metaSize);
-
+
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
- taosHashRelease(dbCache->tbCache, pCache);
-
- ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s",
- pName->tname, nctx.tbInfo.tbType, dbFName);
-
- char* stName = taosHashAcquire(dbCache->stbCache, &pTableMeta->suid, sizeof(pTableMeta->suid));
+ taosHashRelease(dbCache->tbCache, pCache);
+
+ ctgDebug("Got ctb %s meta from cache, will continue to get its stb meta, type:%d, dbFName:%s", pName->tname,
+ nctx.tbInfo.tbType, dbFName);
+
+ char *stName = taosHashAcquire(dbCache->stbCache, &pTableMeta->suid, sizeof(pTableMeta->suid));
if (NULL == stName) {
ctgDebug("stb 0x%" PRIx64 " not in cache, dbFName:%s", pTableMeta->suid, dbFName);
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
@@ -2349,11 +2294,11 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
if (NULL == pCache) {
ctgDebug("stb 0x%" PRIx64 " name %s not in cache, dbFName:%s", pTableMeta->suid, stName, dbFName);
taosHashRelease(dbCache->stbCache, stName);
-
+
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
- taosMemoryFreeClear(pTableMeta);
+ taosMemoryFreeClear(pTableMeta);
continue;
}
@@ -2363,8 +2308,8 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
if (NULL == pCache->pMeta) {
ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", pTableMeta->suid, dbFName);
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
- taosHashRelease(dbCache->tbCache, pCache);
-
+ taosHashRelease(dbCache->tbCache, pCache);
+
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
@@ -2372,14 +2317,15 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
continue;
}
-
- STableMeta* stbMeta = pCache->pMeta;
- if (stbMeta->suid != nctx.tbInfo.suid) {
+
+ STableMeta *stbMeta = pCache->pMeta;
+ if (stbMeta->suid != nctx.tbInfo.suid) {
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
- taosHashRelease(dbCache->tbCache, pCache);
-
- ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%"PRIx64 , stbMeta->suid, nctx.tbInfo.suid);
-
+ taosHashRelease(dbCache->tbCache, pCache);
+
+ ctgError("stb suid 0x%" PRIx64 " in stbCache mis-match, expected suid 0x%" PRIx64, stbMeta->suid,
+ nctx.tbInfo.suid);
+
ctgAddFetch(&ctx->pFetchs, dbIdx, i, fetchIdx, baseResIdx + i, flag);
taosArraySetSize(ctx->pResList, taosArrayGetSize(ctx->pResList) + 1);
@@ -2387,19 +2333,19 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
continue;
}
-
+
metaSize = CTG_META_SIZE(stbMeta);
pTableMeta = taosMemoryRealloc(pTableMeta, metaSize);
- if (NULL == pTableMeta) {
+ if (NULL == pTableMeta) {
ctgReleaseTbMetaToCache(pCtg, dbCache, pCache);
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
memcpy(&pTableMeta->sversion, &stbMeta->sversion, metaSize - sizeof(SCTableMeta));
-
+
CTG_UNLOCK(CTG_READ, &pCache->metaLock);
- taosHashRelease(dbCache->tbCache, pCache);
-
+ taosHashRelease(dbCache->tbCache, pCache);
+
res.pRes = pTableMeta;
taosArrayPush(ctx->pResList, &res);
@@ -2408,14 +2354,13 @@ int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgTbMe
}
ctgReleaseDBCache(pCtg, dbCache);
-
+
return TSDB_CODE_SUCCESS;
}
-
-int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq) {
+int32_t ctgRemoveTbMetaFromCache(SCatalog *pCtg, SName *pTableName, bool syncReq) {
int32_t code = 0;
- STableMeta* tblMeta = NULL;
+ STableMeta *tblMeta = NULL;
SCtgTbMetaCtx tbCtx = {0};
tbCtx.flag = CTG_FLAG_UNKNOWN_STB;
tbCtx.pName = pTableName;
@@ -2449,7 +2394,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
}
- SCtgDBCache* dbCache = NULL;
+ SCtgDBCache *dbCache = NULL;
int32_t code = 0;
char dbFName[TSDB_DB_FNAME_LEN] = {0};
tNameGetFullDbName(pTableName, dbFName);
@@ -2476,5 +2421,3 @@ _return:
CTG_RET(code);
}
-
-
diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c
index bd3402dc394186b03d116c2c2ebe5e83838bdddb..d21524230736c47fb3c81de0c00e1402f09a585e 100644
--- a/source/libs/catalog/src/ctgDbg.c
+++ b/source/libs/catalog/src/ctgDbg.c
@@ -367,18 +367,22 @@ void ctgdShowDBCache(SCatalog* pCtg, SHashObj *dbHash) {
int32_t stbNum = dbCache->stbCache ? taosHashGetSize(dbCache->stbCache) : 0;
int32_t vgVersion = CTG_DEFAULT_INVALID_VERSION;
int32_t hashMethod = -1;
+ int16_t hashPrefix = 0;
+ int16_t hashSuffix = 0;
int32_t vgNum = 0;
if (dbCache->vgCache.vgInfo) {
vgVersion = dbCache->vgCache.vgInfo->vgVersion;
hashMethod = dbCache->vgCache.vgInfo->hashMethod;
+ hashPrefix = dbCache->vgCache.vgInfo->hashPrefix;
+ hashSuffix = dbCache->vgCache.vgInfo->hashSuffix;
if (dbCache->vgCache.vgInfo->vgHash) {
vgNum = taosHashGetSize(dbCache->vgCache.vgInfo->vgHash);
}
}
- ctgDebug("[%d] db [%.*s][0x%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d",
- i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, vgNum);
+ ctgDebug("[%d] db [%.*s][0x%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, prefix:%d, suffix:%d, vgNum:%d",
+ i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, hashPrefix, hashSuffix, vgNum);
pIter = taosHashIterate(dbHash, pIter);
}
diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c
index 1fdf84e1209e847aa86a5da12e3cdeee0bc4cfcb..7b8c66e36889dde2fe0c624c6adb099151455501 100644
--- a/source/libs/catalog/src/ctgRemote.c
+++ b/source/libs/catalog/src/ctgRemote.c
@@ -68,14 +68,15 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu
taskMsg.pData = NULL;
taskMsg.len = 0;
}
-
+
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = rsp.msgIdx;
+ tReq.msgIdx = rsp.msgIdx;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq.msgIdx);
pMsgCtx->pBatchs = pBatchs;
- ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s, pBatchs: %p", pJob->queryId, pTask->taskId, rsp.msgIdx, TMSG_INFO(taskMsg.msgType + 1), pBatchs);
+ ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s, pBatchs: %p", pJob->queryId, pTask->taskId,
+ rsp.msgIdx, TMSG_INFO(taskMsg.msgType + 1), pBatchs);
(*gCtgAsyncFps[pTask->type].handleRspFp)(&tReq, rsp.reqType, &taskMsg, (rsp.rspCode ? rsp.rspCode : rspCode));
}
@@ -344,13 +345,14 @@ int32_t ctgHandleMsgCallback(void* param, SDataBuf* pMsg, int32_t rspCode) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
- SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
+ SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1);
pMsgCtx->pBatchs = pBatchs;
#endif
SCtgTaskReq tReq;
tReq.pTask = pTask;
tReq.msgIdx = -1;
+
CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].handleRspFp)(&tReq, cbParam->reqType, pMsg, rspCode));
#if CTG_BATCH_FETCH
@@ -361,6 +363,7 @@ int32_t ctgHandleMsgCallback(void* param, SDataBuf* pMsg, int32_t rspCode) {
_return:
taosMemoryFree(pMsg->pData);
+ taosMemoryFree(pMsg->pEpSet);
if (pJob) {
taosReleaseRef(gCtgMgmt.jobPool, cbParam->refId);
@@ -442,17 +445,17 @@ _return:
CTG_RET(code);
}
-int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgTaskReq* tReq, int32_t msgType, void* msg,
- uint32_t msgSize) {
- int32_t code = 0;
- SCtgTask* pTask = tReq->pTask;
+int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgTaskReq* tReq, int32_t msgType,
+ void* msg, uint32_t msgSize) {
+ int32_t code = 0;
+ SCtgTask* pTask = tReq->pTask;
SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx);
- SHashObj* pBatchs = pMsgCtx->pBatchs;
- SCtgJob* pJob = pTask->pJob;
- SCtgBatch* pBatch = taosHashGet(pBatchs, &vgId, sizeof(vgId));
- SCtgBatch newBatch = {0};
- SBatchMsg req = {0};
-
+ SHashObj* pBatchs = pMsgCtx->pBatchs;
+ SCtgJob* pJob = pTask->pJob;
+ SCtgBatch* pBatch = taosHashGet(pBatchs, &vgId, sizeof(vgId));
+ SCtgBatch newBatch = {0};
+ SBatchMsg req = {0};
+
if (NULL == pBatch) {
newBatch.pMsgs = taosArrayInit(pJob->subTaskNum, sizeof(SBatchMsg));
newBatch.pTaskIds = taosArrayInit(pJob->subTaskNum, sizeof(int32_t));
@@ -487,7 +490,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
} else if (TDMT_VND_TABLE_META == msgType) {
if (CTG_TASK_GET_TB_META_BATCH == pTask->type) {
SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx;
- SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
+ SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
pName = ctgGetFetchName(ctx->pNames, fetch);
} else {
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
@@ -521,14 +524,14 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
if (NULL == taosArrayPush(pBatch->pMsgs, &req)) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
- msg = NULL;
+ msg = NULL;
if (NULL == taosArrayPush(pBatch->pTaskIds, &pTask->taskId)) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
if (NULL == taosArrayPush(pBatch->pMsgIdxs, &req.msgIdx)) {
CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
pBatch->msgSize += sizeof(req) + msgSize - POINTER_BYTES;
if (vgId > 0) {
@@ -539,7 +542,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
} else if (TDMT_VND_TABLE_META == msgType) {
if (CTG_TASK_GET_TB_META_BATCH == pTask->type) {
SCtgTbMetasCtx* ctx = (SCtgTbMetasCtx*)pTask->taskCtx;
- SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
+ SCtgFetch* fetch = taosArrayGet(ctx->pFetchs, tReq->msgIdx);
pName = ctgGetFetchName(ctx->pNames, fetch);
} else {
SCtgTbMetaCtx* ctx = (SCtgTbMetaCtx*)pTask->taskCtx;
@@ -550,7 +553,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT
CTG_ERR_JRET(TSDB_CODE_APP_ERROR);
}
- tNameGetFullDbName(pName, pBatch->dbFName);
+ tNameGetFullDbName(pName, pBatch->dbFName);
}
ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), pBatch->batchId,
@@ -583,7 +586,7 @@ int32_t ctgBuildBatchReqMsg(SCtgBatch* pBatch, int32_t vgId, void** msg) {
for (int32_t i = 0; i < num; ++i) {
SBatchMsg* pReq = taosArrayGet(pBatch->pMsgs, i);
*(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgIdx);
- offset += sizeof(pReq->msgIdx);
+ offset += sizeof(pReq->msgIdx);
*(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgType);
offset += sizeof(pReq->msgType);
*(int32_t*)((char*)(*msg) + offset) = htonl(pReq->msgLen);
@@ -611,7 +614,7 @@ int32_t ctgLaunchBatchs(SCatalog* pCtg, SCtgJob* pJob, SHashObj* pBatchs) {
ctgDebug("QID:0x%" PRIx64 " ctg start to launch batch %d", pJob->queryId, pBatch->batchId);
CTG_ERR_JRET(ctgBuildBatchReqMsg(pBatch, *vgId, &msg));
- code = ctgAsyncSendMsg(pCtg, &pBatch->conn, pJob, pBatch->pTaskIds, pBatch->batchId, pBatch->pMsgIdxs,
+ code = ctgAsyncSendMsg(pCtg, &pBatch->conn, pJob, pBatch->pTaskIds, pBatch->batchId, pBatch->pMsgIdxs,
pBatch->dbFName, *vgId, pBatch->msgType, msg, pBatch->msgSize);
pBatch->pTaskIds = NULL;
CTG_ERR_JRET(code);
@@ -656,7 +659,7 @@ int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, SArray
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
@@ -705,7 +708,7 @@ int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, SArray
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
@@ -736,9 +739,9 @@ int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, SArray
int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, SBuildUseDBInput* input, SUseDbOutput* out,
SCtgTaskReq* tReq) {
- char* msg = NULL;
- int32_t msgLen = 0;
- int32_t reqType = TDMT_MND_USE_DB;
+ char* msg = NULL;
+ int32_t msgLen = 0;
+ int32_t reqType = TDMT_MND_USE_DB;
SCtgTask* pTask = tReq ? tReq->pTask : NULL;
void* (*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
@@ -813,7 +816,7 @@ int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const char
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
@@ -868,7 +871,7 @@ int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
@@ -919,13 +922,13 @@ int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, SName* n
if (NULL == pOut) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
CTG_ERR_RET(ctgUpdateMsgCtx(CTG_GET_TASK_MSGCTX(pTask, -1), reqType, pOut, (char*)tbFName));
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
@@ -980,7 +983,7 @@ int32_t ctgGetUdfInfoFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const ch
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
@@ -1035,7 +1038,7 @@ int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
@@ -1066,7 +1069,7 @@ int32_t ctgGetUserDbAuthFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const
int32_t ctgGetTbMetaFromMnodeImpl(SCatalog* pCtg, SRequestConnInfo* pConn, char* dbFName, char* tbName,
STableMetaOutput* out, SCtgTaskReq* tReq) {
- SCtgTask *pTask = tReq ? tReq->pTask : NULL;
+ SCtgTask* pTask = tReq ? tReq->pTask : NULL;
SBuildTableInput bInput = {.vgId = 0, .dbFName = dbFName, .tbName = tbName};
char* msg = NULL;
SEpSet* pVnodeEpSet = NULL;
@@ -1091,7 +1094,7 @@ int32_t ctgGetTbMetaFromMnodeImpl(SCatalog* pCtg, SRequestConnInfo* pConn, char*
}
CTG_ERR_RET(ctgUpdateMsgCtx(CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx), reqType, pOut, tbFName));
-
+
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, 0, pConn, tReq, reqType, msg, msgLen));
#else
@@ -1131,8 +1134,8 @@ int32_t ctgGetTbMetaFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const SNa
int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SVgroupInfo* vgroupInfo,
STableMetaOutput* out, SCtgTaskReq* tReq) {
- SCtgTask *pTask = tReq ? tReq->pTask : NULL;
- char dbFName[TSDB_DB_FNAME_LEN];
+ SCtgTask* pTask = tReq ? tReq->pTask : NULL;
+ char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(pTableName, dbFName);
int32_t reqType = TDMT_VND_TABLE_META;
char tbFName[TSDB_TABLE_FNAME_LEN];
@@ -1165,7 +1168,7 @@ int32_t ctgGetTbMetaFromVnode(SCatalog* pCtg, SRequestConnInfo* pConn, const SNa
.requestObjRefId = pConn->requestObjRefId,
.mgmtEps = vgroupInfo->epSet};
- CTG_ERR_RET(ctgUpdateMsgCtx(CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx), reqType, pOut, tbFName));
+ CTG_ERR_RET(ctgUpdateMsgCtx(CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx), reqType, pOut, tbFName));
#if CTG_BATCH_FETCH
CTG_RET(ctgAddBatch(pCtg, vgroupInfo->vgId, &vConn, tReq, reqType, msg, msgLen));
@@ -1231,7 +1234,7 @@ int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo* pConn, const S
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, vgroupInfo->vgId, &vConn, &tReq, reqType, msg, msgLen));
#else
SCtgTbCfgCtx* ctx = (SCtgTbCfgCtx*)pTask->taskCtx;
@@ -1243,7 +1246,8 @@ int32_t ctgGetTableCfgFromVnode(SCatalog* pCtg, SRequestConnInfo* pConn, const S
}
taosArrayPush(pTaskId, &pTask->taskId);
- CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask->pJob, pTaskId, -1, NULL, dbFName, ctx->pVgInfo->vgId, reqType, msg, msgLen));
+ CTG_RET(ctgAsyncSendMsg(pCtg, &vConn, pTask->pJob, pTaskId, -1, NULL, dbFName, ctx->pVgInfo->vgId, reqType, msg,
+ msgLen));
#endif
}
@@ -1289,7 +1293,7 @@ int32_t ctgGetTableCfgFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, const S
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
@@ -1338,7 +1342,7 @@ int32_t ctgGetSvrVerFromMnode(SCatalog* pCtg, SRequestConnInfo* pConn, char** ou
#if CTG_BATCH_FETCH
SCtgTaskReq tReq;
tReq.pTask = pTask;
- tReq.msgIdx = -1;
+ tReq.msgIdx = -1;
CTG_RET(ctgAddBatch(pCtg, 0, pConn, &tReq, reqType, msg, msgLen));
#else
SArray* pTaskId = taosArrayInit(1, sizeof(int32_t));
diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c
index e28234ab7603248e7261829bcb59e44ba24491fe..97b174de1c7c70553011aae62b9dcf3e779c0efb 100644
--- a/source/libs/catalog/src/ctgUtil.c
+++ b/source/libs/catalog/src/ctgUtil.c
@@ -99,7 +99,16 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) {
}
void ctgFreeQNode(SCtgQNode *node) {
- //TODO
+ if (NULL == node) {
+ return;
+ }
+
+ if (node->op) {
+ taosMemoryFree(node->op->data);
+ taosMemoryFree(node->op);
+ }
+
+ taosMemoryFree(node);
}
void ctgFreeSTableIndex(void *info) {
@@ -848,15 +857,11 @@ int32_t ctgGetVgInfoFromHashValue(SCatalog *pCtg, SDBVgInfo *dbInfo, const SName
CTG_ERR_RET(TSDB_CODE_TSC_DB_NOT_SELECTED);
}
- tableNameHashFp fp = NULL;
SVgroupInfo *vgInfo = NULL;
-
- CTG_ERR_RET(ctgGetHashFunction(dbInfo->hashMethod, &fp));
-
char tbFullName[TSDB_TABLE_FNAME_LEN];
tNameExtractFullName(pTableName, tbFullName);
- uint32_t hashValue = (*fp)(tbFullName, (uint32_t)strlen(tbFullName));
+ uint32_t hashValue = taosGetTbHashVal(tbFullName, (uint32_t)strlen(tbFullName), dbInfo->hashMethod, dbInfo->hashPrefix, dbInfo->hashSuffix);
void *pIter = taosHashIterate(dbInfo->vgHash, NULL);
while (pIter) {
@@ -919,11 +924,7 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
- tableNameHashFp fp = NULL;
SVgroupInfo *vgInfo = NULL;
-
- CTG_ERR_RET(ctgGetHashFunction(dbInfo->hashMethod, &fp));
-
int32_t tbNum = taosArrayGetSize(pNames);
if (1 == vgNum) {
@@ -975,7 +976,7 @@ int32_t ctgGetVgInfosFromHashValue(SCatalog *pCtg, SCtgTaskReq* tReq, SDBVgInfo
tbNameLen = offset + strlen(pName->tname);
strcpy(tbFullName + offset, pName->tname);
- uint32_t hashValue = (*fp)(tbFullName, (uint32_t)tbNameLen);
+ uint32_t hashValue = taosGetTbHashVal(tbFullName, (uint32_t)strlen(tbFullName), dbInfo->hashMethod, dbInfo->hashPrefix, dbInfo->hashSuffix);
SVgroupInfo **p = taosArraySearch(pVgList, &hashValue, ctgHashValueComp, TD_EQ);
diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp
index 0be85333dca130884516c214e324fbab82e33953..c01c269e643b2a1f2a44817a74807c20db718d4b 100644
--- a/source/libs/catalog/test/catalogTests.cpp
+++ b/source/libs/catalog/test/catalogTests.cpp
@@ -218,6 +218,8 @@ void ctgTestBuildDBVgroup(SDBVgInfo **pdbVgroup) {
ctgTestCurrentVgVersion = dbVgroup->vgVersion;
dbVgroup->hashMethod = 0;
+ dbVgroup->hashPrefix = 0;
+ dbVgroup->hashSuffix = 0;
dbVgroup->vgHash = taosHashInit(ctgTestVgNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK);
vgNum = ctgTestGetVgNumFromVgVersion(dbVgroup->vgVersion);
diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h
index 706985f89468455b49876ac8d20edef634c42ff9..ad677a81ff304a3637c9a24c2c20b4f03cee3ac1 100644
--- a/source/libs/command/inc/commandInt.h
+++ b/source/libs/command/inc/commandInt.h
@@ -99,8 +99,10 @@ extern "C" {
typedef struct SExplainGroup {
int32_t nodeNum;
+ int32_t nodeIdx;
int32_t physiPlanExecNum;
int32_t physiPlanExecIdx;
+ bool singleChannel;
SRWLatch lock;
SSubplan *plan;
SArray *nodeExecInfo; //Array
diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c
index 7d259fe06c26bfdd82b595ba49d3fefb7a181598..18d839e1091e3fc5f1be2939a22345efe8ea8579 100644
--- a/source/libs/command/src/command.c
+++ b/source/libs/command/src/command.c
@@ -471,6 +471,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p
len += sprintf(buf2 + VARSTR_HEADER_SIZE, "CREATE TABLE `%s` (", tbName);
appendColumnFields(buf2, &len, pCfg);
len += sprintf(buf2 + VARSTR_HEADER_SIZE + len, ")");
+ appendTableOptions(buf2, &len, pDbCfg, pCfg);
}
varDataLen(buf2) = len;
diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c
index 967c682b0bb701502dff90081fba8973a34bd22a..c5de44e1f67d3e4843a0357be88bbac730fc2d99 100644
--- a/source/libs/command/src/explain.c
+++ b/source/libs/command/src/explain.c
@@ -21,14 +21,14 @@
#include "tdatablock.h"
int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplainResNode **pRes);
-int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level);
+int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel);
void qExplainFreeResNode(SExplainResNode *resNode) {
if (NULL == resNode) {
return;
}
- taosMemoryFreeClear(resNode->pExecInfo);
+ taosArrayDestroy(resNode->pExecInfo);
SNode *node = NULL;
FOREACH(node, resNode->pChildren) { qExplainFreeResNode((SExplainResNode *)node); }
@@ -56,8 +56,9 @@ void qExplainFreeCtx(SExplainCtx *pCtx) {
int32_t num = taosArrayGetSize(group->nodeExecInfo);
for (int32_t i = 0; i < num; ++i) {
SExplainRsp *rsp = taosArrayGet(group->nodeExecInfo, i);
- taosMemoryFreeClear(rsp->subplanInfo);
+ tFreeSExplainRsp(rsp);
}
+ taosArrayDestroy(group->nodeExecInfo);
}
pIter = taosHashIterate(pCtx->groupHash, pIter);
@@ -66,6 +67,7 @@ void qExplainFreeCtx(SExplainCtx *pCtx) {
taosHashCleanup(pCtx->groupHash);
taosArrayDestroy(pCtx->rows);
+ taosMemoryFreeClear(pCtx->tbuf);
taosMemoryFree(pCtx);
}
@@ -248,7 +250,7 @@ int32_t qExplainGenerateResChildren(SPhysiNode *pNode, SExplainGroup *group, SNo
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainGenerateResNodeExecInfo(SArray **pExecInfo, SExplainGroup *group) {
+int32_t qExplainGenerateResNodeExecInfo(SPhysiNode *pNode, SArray **pExecInfo, SExplainGroup *group) {
*pExecInfo = taosArrayInit(group->nodeNum, sizeof(SExplainExecInfo));
if (NULL == (*pExecInfo)) {
qError("taosArrayInit %d explainExecInfo failed", group->nodeNum);
@@ -256,17 +258,28 @@ int32_t qExplainGenerateResNodeExecInfo(SArray **pExecInfo, SExplainGroup *group
}
SExplainRsp *rsp = NULL;
- for (int32_t i = 0; i < group->nodeNum; ++i) {
- rsp = taosArrayGet(group->nodeExecInfo, i);
- /*
- if (group->physiPlanExecIdx >= rsp->numOfPlans) {
- qError("physiPlanIdx %d exceed plan num %d", group->physiPlanExecIdx, rsp->numOfPlans);
- return TSDB_CODE_QRY_APP_ERROR;
- }
+ if (group->singleChannel) {
+ if (0 == group->physiPlanExecIdx) {
+ group->nodeIdx = 0;
+ }
+
+ rsp = taosArrayGet(group->nodeExecInfo, group->nodeIdx++);
+ if (group->physiPlanExecIdx >= rsp->numOfPlans) {
+ qError("physiPlanIdx %d exceed plan num %d", group->physiPlanExecIdx, rsp->numOfPlans);
+ return TSDB_CODE_QRY_APP_ERROR;
+ }
+
+ taosArrayPush(*pExecInfo, rsp->subplanInfo + group->physiPlanExecIdx);
+ } else {
+ for (int32_t i = 0; i < group->nodeNum; ++i) {
+ rsp = taosArrayGet(group->nodeExecInfo, i);
+ if (group->physiPlanExecIdx >= rsp->numOfPlans) {
+ qError("physiPlanIdx %d exceed plan num %d", group->physiPlanExecIdx, rsp->numOfPlans);
+ return TSDB_CODE_QRY_APP_ERROR;
+ }
- taosArrayPush(*pExecInfo, rsp->subplanInfo + group->physiPlanExecIdx);
- */
- taosArrayPush(*pExecInfo, rsp->subplanInfo);
+ taosArrayPush(*pExecInfo, rsp->subplanInfo + group->physiPlanExecIdx);
+ }
}
++group->physiPlanExecIdx;
@@ -291,7 +304,7 @@ int32_t qExplainGenerateResNode(SPhysiNode *pNode, SExplainGroup *group, SExplai
resNode->pNode = pNode;
if (group->nodeExecInfo) {
- QRY_ERR_JRET(qExplainGenerateResNodeExecInfo(&resNode->pExecInfo, group));
+ QRY_ERR_JRET(qExplainGenerateResNodeExecInfo(pNode, &resNode->pExecInfo, group));
}
QRY_ERR_JRET(qExplainGenerateResChildren(pNode, group, &resNode->pChildren));
@@ -764,9 +777,9 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE: {
SExchangePhysiNode *pExchNode = (SExchangePhysiNode *)pNode;
- SExplainGroup *group = taosHashGet(ctx->groupHash, &pExchNode->srcGroupId, sizeof(pExchNode->srcGroupId));
+ SExplainGroup *group = taosHashGet(ctx->groupHash, &pExchNode->srcStartGroupId, sizeof(pExchNode->srcStartGroupId));
if (NULL == group) {
- qError("exchange src group %d not in groupHash", pExchNode->srcGroupId);
+ qError("exchange src group %d not in groupHash", pExchNode->srcStartGroupId);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
@@ -801,7 +814,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
}
- QRY_ERR_RET(qExplainAppendGroupResRows(ctx, pExchNode->srcGroupId, level + 1));
+ QRY_ERR_RET(qExplainAppendGroupResRows(ctx, pExchNode->srcStartGroupId, level + 1, pExchNode->singleChannel));
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SORT: {
@@ -1533,7 +1546,7 @@ int32_t qExplainResNodeToRows(SExplainResNode *pResNode, SExplainCtx *ctx, int32
return TSDB_CODE_SUCCESS;
}
-int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level) {
+int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level, bool singleChannel) {
SExplainResNode *node = NULL;
int32_t code = 0;
SExplainCtx *ctx = (SExplainCtx *)pCtx;
@@ -1544,6 +1557,9 @@ int32_t qExplainAppendGroupResRows(void *pCtx, int32_t groupId, int32_t level) {
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
+ group->singleChannel = singleChannel;
+ group->physiPlanExecIdx = 0;
+
QRY_ERR_RET(qExplainGenerateResNode(group->plan->pNode, group, &node));
QRY_ERR_JRET(qExplainResNodeToRows(node, ctx, level));
@@ -1707,7 +1723,7 @@ int32_t qExplainAppendPlanRows(SExplainCtx *pCtx) {
}
int32_t qExplainGenerateRsp(SExplainCtx *pCtx, SRetrieveTableRsp **pRsp) {
- QRY_ERR_RET(qExplainAppendGroupResRows(pCtx, pCtx->rootGroupId, 0));
+ QRY_ERR_RET(qExplainAppendGroupResRows(pCtx, pCtx->rootGroupId, 0, false));
QRY_ERR_RET(qExplainAppendPlanRows(pCtx));
QRY_ERR_RET(qExplainGetRspFromCtx(pCtx, pRsp));
@@ -1723,7 +1739,7 @@ int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t
SExplainGroup *group = taosHashGet(ctx->groupHash, &groupId, sizeof(groupId));
if (NULL == group) {
qError("group %d not in groupHash", groupId);
- taosMemoryFreeClear(pRspMsg->subplanInfo);
+ tFreeSExplainRsp(pRspMsg);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
@@ -1732,7 +1748,7 @@ int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t
group->nodeExecInfo = taosArrayInit(group->nodeNum, sizeof(SExplainRsp));
if (NULL == group->nodeExecInfo) {
qError("taosArrayInit %d explainExecInfo failed", group->nodeNum);
- taosMemoryFreeClear(pRspMsg->subplanInfo);
+ tFreeSExplainRsp(pRspMsg);
taosWUnLockLatch(&group->lock);
QRY_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
@@ -1742,7 +1758,7 @@ int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t
} else if (taosArrayGetSize(group->nodeExecInfo) >= group->nodeNum) {
qError("group execInfo already full, size:%d, nodeNum:%d", (int32_t)taosArrayGetSize(group->nodeExecInfo),
group->nodeNum);
- taosMemoryFreeClear(pRspMsg->subplanInfo);
+ tFreeSExplainRsp(pRspMsg);
taosWUnLockLatch(&group->lock);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
@@ -1751,13 +1767,14 @@ int32_t qExplainUpdateExecInfo(SExplainCtx *pCtx, SExplainRsp *pRspMsg, int32_t
if (group->physiPlanExecNum != pRspMsg->numOfPlans) {
qError("physiPlanExecNum %d mismatch with others %d in group %d", pRspMsg->numOfPlans, group->physiPlanExecNum,
groupId);
- taosMemoryFreeClear(pRspMsg->subplanInfo);
+ tFreeSExplainRsp(pRspMsg);
taosWUnLockLatch(&group->lock);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
taosArrayPush(group->nodeExecInfo, pRspMsg);
+
groupDone = (taosArrayGetSize(group->nodeExecInfo) >= group->nodeNum);
taosWUnLockLatch(&group->lock);
diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h
index a25933d15e9367513bc1ad5d5096972fbf65bc35..baf7d447cc6f0bbb3e8b552acdefd38abafc98cc 100644
--- a/source/libs/executor/inc/executil.h
+++ b/source/libs/executor/inc/executil.h
@@ -22,6 +22,7 @@
#include "tbuffer.h"
#include "tcommon.h"
#include "tpagedbuf.h"
+#include "tsimplehash.h"
#define T_LONG_JMP(_obj, _c) \
do { \
@@ -86,9 +87,8 @@ struct SqlFunctionCtx;
size_t getResultRowSize(struct SqlFunctionCtx* pCtx, int32_t numOfOutput);
void initResultRowInfo(SResultRowInfo* pResultRowInfo);
-
-void initResultRow(SResultRow* pResultRow);
-void closeResultRow(SResultRow* pResultRow);
+void closeResultRow(SResultRow* pResultRow);
+void resetResultRow(SResultRow* pResultRow, size_t entrySize);
struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset);
@@ -106,7 +106,7 @@ static FORCE_INLINE void setResultBufPageDirty(SDiskbasedBuf* pBuf, SResultRowPo
setBufPageDirty(pPage, true);
}
-void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int32_t order);
+void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order);
void cleanupGroupResInfo(SGroupResInfo* pGroupResInfo);
void initMultiResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayList);
@@ -127,6 +127,7 @@ SArray* extractPartitionColInfo(SNodeList* pNodeList);
SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols,
int32_t type);
+void createExprFromTargetNode(SExprInfo* pExp, STargetNode* pTargetNode);
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs);
SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset);
diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h
index 1b3650ad776a608c91dfb401f08a31d45d70235f..56470f066801283eae6a5414b8f6d90d86ce8e86 100644
--- a/source/libs/executor/inc/executorimpl.h
+++ b/source/libs/executor/inc/executorimpl.h
@@ -34,15 +34,16 @@ extern "C" {
#include "scalar.h"
#include "taosdef.h"
#include "tarray.h"
+#include "tfill.h"
#include "thash.h"
#include "tlockfree.h"
#include "tmsg.h"
#include "tpagedbuf.h"
-#include "tstreamUpdate.h"
#include "tstream.h"
+#include "tstreamUpdate.h"
-#include "vnode.h"
#include "executorInt.h"
+#include "vnode.h"
typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order);
@@ -139,18 +140,24 @@ enum {
};
typedef struct {
- //TODO remove prepareStatus
- STqOffsetVal prepareStatus; // for tmq
- STqOffsetVal lastStatus; // for tmq
- void* metaBlk; // for tmq fetching meta
- SSDataBlock* pullOverBlk; // for streaming
- SWalFilterCond cond;
- int64_t lastScanUid;
- int8_t recoverStep;
+ // TODO remove prepareStatus
+ STqOffsetVal prepareStatus; // for tmq
+ STqOffsetVal lastStatus; // for tmq
+ SMqMetaRsp metaRsp; // for tmq fetching meta
+ int8_t returned;
+ int64_t snapshotVer;
+ const SSubmitReq* pReq;
+
+ SSchemaWrapper* schema;
+ char tbName[TSDB_TABLE_NAME_LEN];
+ SSDataBlock* pullOverBlk; // for streaming
+ SWalFilterCond cond;
+ int64_t lastScanUid;
+ int8_t recoverStep;
SQueryTableDataCond tableCond;
- int64_t recoverStartVer;
- int64_t recoverEndVer;
- SStreamState* pState;
+ int64_t recoverStartVer;
+ int64_t recoverEndVer;
+ SStreamState* pState;
} SStreamTaskInfo;
typedef struct {
@@ -162,29 +169,31 @@ typedef struct {
} SSchemaInfo;
typedef struct SExecTaskInfo {
- STaskIdInfo id;
- uint32_t status;
- STimeWindow window;
- STaskCostInfo cost;
- int64_t owner; // if it is in execution
- int32_t code;
-
- int64_t version; // used for stream to record wal version
- SStreamTaskInfo streamInfo;
- SSchemaInfo schemaInfo;
- STableListInfo tableqinfoList; // this is a table list
- const char* sql; // query sql string
- jmp_buf env; // jump to this position when error happens.
- EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
- SSubplan* pSubplan;
+ STaskIdInfo id;
+ uint32_t status;
+ STimeWindow window;
+ STaskCostInfo cost;
+ int64_t owner; // if it is in execution
+ int32_t code;
+
+ int64_t version; // used for stream to record wal version
+ SStreamTaskInfo streamInfo;
+ SSchemaInfo schemaInfo;
+ STableListInfo tableqinfoList; // this is a table list
+ const char* sql; // query sql string
+ jmp_buf env; // jump to this position when error happens.
+ EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
+ SSubplan* pSubplan;
struct SOperatorInfo* pRoot;
+ SLocalFetch localFetch;
} SExecTaskInfo;
enum {
- OP_NOT_OPENED = 0x0,
- OP_OPENED = 0x1,
+ OP_NOT_OPENED = 0x0,
+ OP_OPENED = 0x1,
OP_RES_TO_RETURN = 0x5,
- OP_EXEC_DONE = 0x9,
+ OP_EXEC_DONE = 0x9,
+ OP_EXEC_RECV = 0x11,
};
typedef struct SOperatorFpSet {
@@ -207,6 +216,7 @@ typedef struct SExprSupp {
typedef struct SOperatorInfo {
uint16_t operatorType;
+ int16_t resultDataBlockId;
bool blocking; // block operator or not
uint8_t status; // denote if current operator is completed
char* name; // name, for debug purpose
@@ -218,12 +228,11 @@ typedef struct SOperatorInfo {
struct SOperatorInfo** pDownstream; // downstram pointer list
int32_t numOfDownstream; // number of downstream. The value is always ONE expect for join operator
SOperatorFpSet fpSet;
- int16_t resultDataBlockId;
} SOperatorInfo;
typedef enum {
EX_SOURCE_DATA_NOT_READY = 0x1,
- EX_SOURCE_DATA_READY = 0x2,
+ EX_SOURCE_DATA_READY = 0x2,
EX_SOURCE_DATA_EXHAUSTED = 0x3,
} EX_SOURCE_STATUS;
@@ -246,26 +255,26 @@ typedef struct SLoadRemoteDataInfo {
} SLoadRemoteDataInfo;
typedef struct SLimitInfo {
- SLimit limit;
- SLimit slimit;
- uint64_t currentGroupId;
- int64_t remainGroupOffset;
- int64_t numOfOutputGroups;
- int64_t remainOffset;
- int64_t numOfOutputRows;
+ SLimit limit;
+ SLimit slimit;
+ uint64_t currentGroupId;
+ int64_t remainGroupOffset;
+ int64_t numOfOutputGroups;
+ int64_t remainOffset;
+ int64_t numOfOutputRows;
} SLimitInfo;
typedef struct SExchangeInfo {
- SArray* pSources;
- SArray* pSourceDataInfo;
- tsem_t ready;
- void* pTransporter;
+ SArray* pSources;
+ SArray* pSourceDataInfo;
+ tsem_t ready;
+ void* pTransporter;
// SArray, result block list, used to keep the multi-block that
// passed by downstream operator
SArray* pResultBlockList;
- int32_t rspBlockIndex; // indicate the return block index in pResultBlockList
- SSDataBlock* pDummyBlock; // dummy block, not keep data
- bool seqLoadData; // sequential load data or not, false by default
+ int32_t rspBlockIndex; // indicate the return block index in pResultBlockList
+ SSDataBlock* pDummyBlock; // dummy block, not keep data
+ bool seqLoadData; // sequential load data or not, false by default
int32_t current;
SLoadRemoteDataInfo loadInfo;
uint64_t self;
@@ -273,22 +282,22 @@ typedef struct SExchangeInfo {
} SExchangeInfo;
typedef struct SColMatchInfo {
- int32_t srcSlotId; // source slot id
+ int32_t srcSlotId; // source slot id
int32_t colId;
int32_t targetSlotId;
- bool output; // todo remove this?
+ bool output; // todo remove this?
bool reserved;
- int32_t matchType; // determinate the source according to col id or slot id
+ int32_t matchType; // determinate the source according to col id or slot id
} SColMatchInfo;
typedef struct SScanInfo {
- int32_t numOfAsc;
- int32_t numOfDesc;
+ int32_t numOfAsc;
+ int32_t numOfDesc;
} SScanInfo;
typedef struct SSampleExecInfo {
- double sampleRatio; // data block sample ratio, 1 by default
- uint32_t seed; // random seed value
+ double sampleRatio; // data block sample ratio, 1 by default
+ uint32_t seed; // random seed value
} SSampleExecInfo;
enum {
@@ -297,42 +306,43 @@ enum {
};
typedef struct SAggSupporter {
- SHashObj* pResultRowHashTable; // quick locate the window object for each result
- char* keyBuf; // window key buffer
- SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
- int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ SSHashObj* pResultRowHashTable; // quick locate the window object for each result
+ char* keyBuf; // window key buffer
+ SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // current write page id
} SAggSupporter;
typedef struct {
- // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
- SInterval interval;
- SAggSupporter *pAggSup;
- SExprSupp *pExprSup; // expr supporter of aggregate operator
+ // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if
+ // current data block needs to be loaded.
+ SInterval interval;
+ SAggSupporter* pAggSup;
+ SExprSupp* pExprSup; // expr supporter of aggregate operator
} SAggOptrPushDownInfo;
typedef struct STableScanInfo {
- STsdbReader* dataReader;
- SReadHandle readHandle;
+ STsdbReader* dataReader;
+ SReadHandle readHandle;
SFileBlockLoadRecorder readRecorder;
- SScanInfo scanInfo;
- int32_t scanTimes;
- SNode* pFilterNode; // filter info, which is push down by optimizer
+ SScanInfo scanInfo;
+ int32_t scanTimes;
+ SNode* pFilterNode; // filter info, which is push down by optimizer
- SSDataBlock* pResBlock;
- SArray* pColMatchInfo;
- SExprSupp pseudoSup;
- SQueryTableDataCond cond;
- int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
- int32_t dataBlockLoadFlag;
-// SInterval interval; // if the upstream is an interval operator, the interval info is also kept here to get the time window to check if current data block needs to be loaded.
- SSampleExecInfo sample; // sample execution info
- int32_t currentGroupId;
- int32_t currentTable;
- int8_t scanMode;
- int8_t noTable;
+ SSDataBlock* pResBlock;
+ SArray* pColMatchInfo;
+ SExprSupp pseudoSup;
+ SQueryTableDataCond cond;
+ int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
+ int32_t dataBlockLoadFlag;
+ SSampleExecInfo sample; // sample execution info
+ int32_t currentGroupId;
+ int32_t currentTable;
+ int8_t scanMode;
+ int8_t noTable;
SAggOptrPushDownInfo pdInfo;
- int8_t assignBlockUid;
+ int8_t assignBlockUid;
} STableScanInfo;
typedef struct STableMergeScanInfo {
@@ -365,7 +375,7 @@ typedef struct STableMergeScanInfo {
SArray* pColMatchInfo;
int32_t numOfOutput;
- SExprSupp pseudoSup;
+ SExprSupp pseudoSup;
SQueryTableDataCond cond;
int32_t scanFlag; // table scan flag to denote if it is a repeat/reverse/main scan
@@ -379,32 +389,33 @@ typedef struct STableMergeScanInfo {
} STableMergeScanInfo;
typedef struct STagScanInfo {
- SColumnInfo *pCols;
- SSDataBlock *pRes;
- SArray *pColMatchInfo;
- int32_t curPos;
- SReadHandle readHandle;
- STableListInfo *pTableList;
+ SColumnInfo* pCols;
+ SSDataBlock* pRes;
+ SArray* pColMatchInfo;
+ int32_t curPos;
+ SReadHandle readHandle;
+ STableListInfo* pTableList;
} STagScanInfo;
typedef struct SLastrowScanInfo {
- SSDataBlock *pRes;
- SReadHandle readHandle;
- void *pLastrowReader;
- SArray *pColMatchInfo;
- int32_t *pSlotIds;
- SExprSupp pseudoExprSup;
- int32_t retrieveType;
- int32_t currentGroupIndex;
- SSDataBlock *pBufferredRes;
- SArray *pUidList;
- int32_t indexOfBufferedRes;
+ SSDataBlock* pRes;
+ SReadHandle readHandle;
+ void* pLastrowReader;
+ SArray* pColMatchInfo;
+ int32_t* pSlotIds;
+ SExprSupp pseudoExprSup;
+ int32_t retrieveType;
+ int32_t currentGroupIndex;
+ SSDataBlock* pBufferredRes;
+ SArray* pUidList;
+ int32_t indexOfBufferedRes;
} SLastrowScanInfo;
typedef enum EStreamScanMode {
STREAM_SCAN_FROM_READERHANDLE = 1,
STREAM_SCAN_FROM_RES,
STREAM_SCAN_FROM_UPDATERES,
+ STREAM_SCAN_FROM_DELETE_DATA,
STREAM_SCAN_FROM_DATAREADER_RETRIEVE,
STREAM_SCAN_FROM_DATAREADER_RANGE,
} EStreamScanMode;
@@ -426,24 +437,39 @@ typedef struct SStreamAggSupporter {
SArray* pCurWins;
int32_t valueSize;
int32_t keySize;
- char* pKeyBuf; // window key buffer
- SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
- int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ char* pKeyBuf; // window key buffer
+ SDiskbasedBuf* pResultBuf; // query result buffer based on blocked-wised disk file
+ int32_t resultRowSize; // the result buffer size for each result row, with the meta data size for each row
+ int32_t currentPageId; // buffer page that is active
SSDataBlock* pScanBlock;
} SStreamAggSupporter;
-typedef struct SessionWindowSupporter {
+typedef struct SWindowSupporter {
SStreamAggSupporter* pStreamAggSup;
int64_t gap;
uint16_t parentType;
SAggSupporter* pIntervalAggSup;
-} SessionWindowSupporter;
+} SWindowSupporter;
-typedef struct STimeWindowSupp {
- int8_t calTrigger;
- int64_t waterMark;
- TSKEY maxTs;
- SColumnInfoData timeWindowData; // query time window info for scalar function execution.
+typedef struct SPartitionBySupporter {
+ SArray* pGroupCols; // group by columns, SArray
+ SArray* pGroupColVals; // current group column values, SArray
+ char* keyBuf; // group by keys for hash
+ bool needCalc; // partition by column
+} SPartitionBySupporter;
+
+typedef struct SPartitionDataInfo {
+ uint64_t groupId;
+ SArray* rowIds;
+} SPartitionDataInfo;
+
+typedef struct STimeWindowAggSupp {
+ int8_t calTrigger;
+ int64_t waterMark;
+ int64_t deleteMark;
+ TSKEY maxTs;
+ TSKEY minTs;
+ SColumnInfoData timeWindowData; // query time window info for scalar function execution.
} STimeWindowAggSupp;
typedef struct SStreamScanInfo {
@@ -468,27 +494,41 @@ typedef struct SStreamScanInfo {
uint64_t groupId;
SUpdateInfo* pUpdateInfo;
- EStreamScanMode scanMode;
- SOperatorInfo* pStreamScanOp;
- SOperatorInfo* pTableScanOp;
- SArray* childIds;
- SessionWindowSupporter sessionSup;
- bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
- int32_t scanWinIndex; // for state operator
- int32_t pullDataResIndex;
- SSDataBlock* pPullDataRes; // pull data SSDataBlock
- SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
- int32_t deleteDataIndex;
- STimeWindow updateWin;
- STimeWindowAggSupp twAggSup;
- SSDataBlock* pUpdateDataRes;
+ EStreamScanMode scanMode;
+ SOperatorInfo* pStreamScanOp;
+ SOperatorInfo* pTableScanOp;
+ SArray* childIds;
+ SWindowSupporter windowSup;
+ SPartitionBySupporter partitionSup;
+ SExprSupp* pPartScalarSup;
+ bool assignBlockUid; // assign block uid to groupId, temporarily used for generating rollup SMA.
+ int32_t scanWinIndex; // for state operator
+ int32_t pullDataResIndex;
+ SSDataBlock* pPullDataRes; // pull data SSDataBlock
+ SSDataBlock* pDeleteDataRes; // delete data SSDataBlock
+ int32_t deleteDataIndex;
+ STimeWindow updateWin;
+ STimeWindowAggSupp twAggSup;
+ SSDataBlock* pUpdateDataRes;
// status for tmq
- // SSchemaWrapper schema;
- SNodeList* pGroupTags;
- SNode* pTagCond;
- SNode* pTagIndexCond;
+ SNodeList* pGroupTags;
+ SNode* pTagCond;
+ SNode* pTagIndexCond;
} SStreamScanInfo;
+typedef struct {
+ // int8_t subType;
+ // bool withMeta;
+ // int64_t suid;
+ // int64_t snapVersion;
+ // void *metaInfo;
+ // void *dataInfo;
+ SVnode* vnode;
+ SSDataBlock pRes; // result SSDataBlock
+ STsdbReader* dataReader;
+ SSnapContext* sContext;
+} SStreamRawScanInfo;
+
typedef struct SSysTableScanInfo {
SRetrieveMetaTableRsp* pRsp;
SRetrieveTableReq req;
@@ -512,14 +552,14 @@ typedef struct SBlockDistInfo {
SSDataBlock* pResBlock;
void* pHandle;
SReadHandle readHandle;
- uint64_t uid; // table uid
+ uint64_t uid; // table uid
} SBlockDistInfo;
// todo remove this
typedef struct SOptrBasicInfo {
- SResultRowInfo resultRowInfo;
- SSDataBlock* pRes;
- bool mergeResultBlock;
+ SResultRowInfo resultRowInfo;
+ SSDataBlock* pRes;
+ bool mergeResultBlock;
} SOptrBasicInfo;
typedef struct SIntervalAggOperatorInfo {
@@ -538,26 +578,27 @@ typedef struct SIntervalAggOperatorInfo {
EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model]
STimeWindowAggSupp twAggSup;
bool invertible;
- SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation.
+ SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation.
bool ignoreExpiredData;
SArray* pRecycledPages;
- SArray* pDelWins; // SWinRes
+ SArray* pDelWins; // SWinRes
int32_t delIndex;
SSDataBlock* pDelRes;
SNode* pCondition;
} SIntervalAggOperatorInfo;
typedef struct SMergeAlignedIntervalAggOperatorInfo {
- SIntervalAggOperatorInfo *intervalAggOperatorInfo;
+ SIntervalAggOperatorInfo* intervalAggOperatorInfo;
- bool hasGroupId;
+// bool hasGroupId;
uint64_t groupId; // current groupId
int64_t curTs; // current ts
SSDataBlock* prefetchedBlock;
SNode* pCondition;
+ SResultRow* pResultRow;
} SMergeAlignedIntervalAggOperatorInfo;
-typedef struct SStreamFinalIntervalOperatorInfo {
+typedef struct SStreamIntervalOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
SOptrBasicInfo binfo; // basic info
SAggSupporter aggSup; // aggregate supporter
@@ -565,57 +606,74 @@ typedef struct SStreamFinalIntervalOperatorInfo {
SGroupResInfo groupResInfo; // multiple results build supporter
SInterval interval; // interval info
int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
- int32_t order; // current SSDataBlock scan order
+ STimeWindowAggSupp twAggSup;
+ bool invertible;
+ bool ignoreExpiredData;
+ SArray* pRecycledPages;
+ SArray* pDelWins; // SWinRes
+ int32_t delIndex;
+ SSDataBlock* pDelRes;
+ bool isFinal;
+} SStreamIntervalOperatorInfo;
+
+typedef struct SStreamFinalIntervalOperatorInfo {
+ // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
+ SOptrBasicInfo binfo; // basic info
+ SAggSupporter aggSup; // aggregate supporter
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo; // multiple results build supporter
+ SInterval interval; // interval info
+ int32_t primaryTsIndex; // primary time stamp slot id from result of downstream operator.
+ int32_t order; // current SSDataBlock scan order
STimeWindowAggSupp twAggSup;
SArray* pChildren;
SSDataBlock* pUpdateRes;
bool returnUpdate;
- SPhysiNode* pPhyNode; // create new child
+ SPhysiNode* pPhyNode; // create new child
bool isFinal;
SHashObj* pPullDataMap;
- SArray* pPullWins; // SPullWindowInfo
+ SArray* pPullWins; // SPullWindowInfo
int32_t pullIndex;
SSDataBlock* pPullDataRes;
bool ignoreExpiredData;
SArray* pRecycledPages;
- SArray* pDelWins; // SWinRes
+ SArray* pDelWins; // SWinRes
int32_t delIndex;
SSDataBlock* pDelRes;
} SStreamFinalIntervalOperatorInfo;
typedef struct SAggOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
- STableQueryInfo *current;
- uint64_t groupId;
- SGroupResInfo groupResInfo;
- SExprSupp scalarExprSup;
- SNode *pCondition;
+ STableQueryInfo* current;
+ uint64_t groupId;
+ SGroupResInfo groupResInfo;
+ SExprSupp scalarExprSup;
+ SNode* pCondition;
} SAggOperatorInfo;
typedef struct SProjectOperatorInfo {
- // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
- SNode* pFilterNode; // filter info, which is push down by optimizer
- SArray* pPseudoColInfo;
- SLimitInfo limitInfo;
- bool mergeDataBlocks;
- SSDataBlock* pFinalRes;
- SNode* pCondition;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+ SNode* pFilterNode; // filter info, which is push down by optimizer
+ SArray* pPseudoColInfo;
+ SLimitInfo limitInfo;
+ bool mergeDataBlocks;
+ SSDataBlock* pFinalRes;
+ SNode* pCondition;
} SProjectOperatorInfo;
typedef struct SIndefOperatorInfo {
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
- SArray* pPseudoColInfo;
- SExprSupp scalarSup;
- SNode* pCondition;
- uint64_t groupId;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+ SArray* pPseudoColInfo;
+ SExprSupp scalarSup;
+ SNode* pCondition;
+ uint64_t groupId;
- SSDataBlock* pNextGroupRes;
+ SSDataBlock* pNextGroupRes;
} SIndefOperatorInfo;
typedef struct SFillOperatorInfo {
@@ -630,7 +688,7 @@ typedef struct SFillOperatorInfo {
SArray* pColMatchColInfo;
int32_t primaryTsCol;
int32_t primarySrcSlotId;
- uint64_t curGroupId; // current handled group id
+ uint64_t curGroupId; // current handled group id
SExprInfo* pExprInfo;
int32_t numOfExpr;
SExprInfo* pNotFillExprInfo;
@@ -638,24 +696,23 @@ typedef struct SFillOperatorInfo {
} SFillOperatorInfo;
typedef struct SGroupbyOperatorInfo {
- // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
-
- SArray* pGroupCols; // group by columns, SArray
- SArray* pGroupColVals; // current group column values, SArray
- SNode* pCondition;
- bool isInit; // denote if current val is initialized or not
- char* keyBuf; // group by keys for hash
- int32_t groupKeyLen; // total group by column width
- SGroupResInfo groupResInfo;
- SExprSupp scalarSup;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
+
+ SArray* pGroupCols; // group by columns, SArray
+ SArray* pGroupColVals; // current group column values, SArray
+ SNode* pCondition;
+ bool isInit; // denote if current val is initialized or not
+ char* keyBuf; // group by keys for hash
+ int32_t groupKeyLen; // total group by column width
+ SGroupResInfo groupResInfo;
+ SExprSupp scalarSup;
} SGroupbyOperatorInfo;
typedef struct SDataGroupInfo {
- uint64_t groupId;
- int64_t numOfRows;
- SArray* pPageList;
+ uint64_t groupId;
+ int64_t numOfRows;
+ SArray* pPageList;
} SDataGroupInfo;
// The sort in partition may be needed later.
@@ -667,13 +724,12 @@ typedef struct SPartitionOperatorInfo {
int32_t groupKeyLen; // total group by column width
SHashObj* pGroupSet; // quick locate the window object for each result
- SDiskbasedBuf* pBuf; // query result buffer based on blocked-wised disk file
- int32_t rowCapacity; // maximum number of rows for each buffer page
- int32_t* columnOffset; // start position for each column data
- SArray* sortedGroupArray; // SDataGroupInfo sorted by group id
- int32_t groupIndex; // group index
- int32_t pageIndex; // page index of current group
- SSDataBlock* pUpdateRes;
+ SDiskbasedBuf* pBuf; // query result buffer based on blocked-wised disk file
+ int32_t rowCapacity; // maximum number of rows for each buffer page
+ int32_t* columnOffset; // start position for each column data
+ SArray* sortedGroupArray; // SDataGroupInfo sorted by group id
+ int32_t groupIndex; // group index
+ int32_t pageIndex; // page index of current group
SExprSupp scalarSup;
} SPartitionOperatorInfo;
@@ -686,76 +742,102 @@ typedef struct SWindowRowsSup {
} SWindowRowsSup;
typedef struct SSessionAggOperatorInfo {
- // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
- bool reptScan; // next round scan
- int64_t gap; // session window gap
- int32_t tsSlotId; // primary timestamp slot id
+ bool reptScan; // next round scan
+ int64_t gap; // session window gap
+ int32_t tsSlotId; // primary timestamp slot id
STimeWindowAggSupp twAggSup;
- const SNode* pCondition;
+ const SNode* pCondition;
} SSessionAggOperatorInfo;
typedef struct SResultWindowInfo {
SResultRowPosition pos;
- STimeWindow win;
- uint64_t groupId;
- bool isOutput;
- bool isClosed;
+ STimeWindow win;
+ uint64_t groupId;
+ bool isOutput;
+ bool isClosed;
} SResultWindowInfo;
typedef struct SStateWindowInfo {
SResultWindowInfo winInfo;
- SStateKeys stateKey;
+ SStateKeys stateKey;
} SStateWindowInfo;
typedef struct SStreamSessionAggOperatorInfo {
- SOptrBasicInfo binfo;
- SStreamAggSupporter streamAggSup;
- SExprSupp scalarSupp; // supporter for perform scalar function
- SGroupResInfo groupResInfo;
- int64_t gap; // session window gap
- int32_t primaryTsIndex; // primary timestamp slot id
- int32_t endTsIndex; // window end timestamp slot id
- int32_t order; // current SSDataBlock scan order
- STimeWindowAggSupp twAggSup;
- SSDataBlock* pWinBlock; // window result
- SqlFunctionCtx* pDummyCtx; // for combine
- SSDataBlock* pDelRes; // delete result
- bool returnDelete;
- SSDataBlock* pUpdateRes; // update window
- SHashObj* pStDeleted;
- void* pDelIterator;
- SArray* pChildren; // cache for children's result; final stream operator
- SPhysiNode* pPhyNode; // create new child
- bool isFinal;
- bool ignoreExpiredData;
+ SOptrBasicInfo binfo;
+ SStreamAggSupporter streamAggSup;
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo;
+ int64_t gap; // session window gap
+ int32_t primaryTsIndex; // primary timestamp slot id
+ int32_t endTsIndex; // window end timestamp slot id
+ int32_t order; // current SSDataBlock scan order
+ STimeWindowAggSupp twAggSup;
+ SSDataBlock* pWinBlock; // window result
+ SqlFunctionCtx* pDummyCtx; // for combine
+ SSDataBlock* pDelRes; // delete result
+ SSDataBlock* pUpdateRes; // update window
+ bool returnUpdate;
+ SHashObj* pStDeleted;
+ void* pDelIterator;
+ SArray* pChildren; // cache for children's result; final stream operator
+ SPhysiNode* pPhyNode; // create new child
+ bool isFinal;
+ bool ignoreExpiredData;
} SStreamSessionAggOperatorInfo;
+typedef struct SStreamPartitionOperatorInfo {
+ SOptrBasicInfo binfo;
+ SPartitionBySupporter partitionSup;
+ SExprSupp scalarSup;
+ SHashObj* pPartitions;
+ void* parIte;
+ SSDataBlock* pInputDataBlock;
+ int32_t tsColIndex;
+ SSDataBlock* pDelRes;
+} SStreamPartitionOperatorInfo;
+
+typedef struct SStreamFillOperatorInfo {
+ SStreamFillSupporter* pFillSup;
+ SSDataBlock* pRes;
+ SSDataBlock* pSrcBlock;
+ int32_t srcRowIndex;
+ SSDataBlock* pPrevSrcBlock;
+ SSDataBlock* pSrcDelBlock;
+ int32_t srcDelRowIndex;
+ SSDataBlock* pDelRes;
+ SNode* pCondition;
+ SArray* pColMatchColInfo;
+ int32_t primaryTsCol;
+ int32_t primarySrcSlotId;
+ SStreamFillInfo* pFillInfo;
+} SStreamFillOperatorInfo;
+
typedef struct STimeSliceOperatorInfo {
- SSDataBlock* pRes;
- STimeWindow win;
- SInterval interval;
- int64_t current;
- SArray* pPrevRow; // SArray
- SArray* pNextRow; // SArray
- SArray* pLinearInfo; // SArray
- bool fillLastPoint;
- bool isPrevRowSet;
- bool isNextRowSet;
- int32_t fillType; // fill type
- SColumn tsCol; // primary timestamp column
- SExprSupp scalarSup; // scalar calculation
- struct SFillColInfo* pFillColInfo; // fill column info
+ SSDataBlock* pRes;
+ STimeWindow win;
+ SInterval interval;
+ int64_t current;
+ SArray* pPrevRow; // SArray
+ SArray* pNextRow; // SArray
+ SArray* pLinearInfo; // SArray
+ bool fillLastPoint;
+ bool isPrevRowSet;
+ bool isNextRowSet;
+ int32_t fillType; // fill type
+ SColumn tsCol; // primary timestamp column
+ SExprSupp scalarSup; // scalar calculation
+ struct SFillColInfo* pFillColInfo; // fill column info
} STimeSliceOperatorInfo;
typedef struct SStateWindowOperatorInfo {
// SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
+ SOptrBasicInfo binfo;
+ SAggSupporter aggSup;
SGroupResInfo groupResInfo;
SWindowRowsSup winSup;
@@ -764,57 +846,37 @@ typedef struct SStateWindowOperatorInfo {
SStateKeys stateKey;
int32_t tsSlotId; // primary timestamp column slot id
STimeWindowAggSupp twAggSup;
- // bool reptScan;
const SNode* pCondition;
} SStateWindowOperatorInfo;
typedef struct SStreamStateAggOperatorInfo {
- SOptrBasicInfo binfo;
- SStreamAggSupporter streamAggSup;
- SExprSupp scalarSupp; // supporter for perform scalar function
- SGroupResInfo groupResInfo;
- int32_t primaryTsIndex; // primary timestamp slot id
- int32_t order; // current SSDataBlock scan order
- STimeWindowAggSupp twAggSup;
- SColumn stateCol;
- SqlFunctionCtx* pDummyCtx; // for combine
- SSDataBlock* pDelRes;
- SHashObj* pSeDeleted;
- void* pDelIterator;
- SArray* pChildren; // cache for children's result;
- bool ignoreExpiredData;
+ SOptrBasicInfo binfo;
+ SStreamAggSupporter streamAggSup;
+ SExprSupp scalarSupp; // supporter for perform scalar function
+ SGroupResInfo groupResInfo;
+ int32_t primaryTsIndex; // primary timestamp slot id
+ int32_t order; // current SSDataBlock scan order
+ STimeWindowAggSupp twAggSup;
+ SColumn stateCol;
+ SqlFunctionCtx* pDummyCtx; // for combine
+ SSDataBlock* pDelRes;
+ SHashObj* pSeDeleted;
+ void* pDelIterator;
+ SArray* pChildren; // cache for children's result;
+ bool ignoreExpiredData;
} SStreamStateAggOperatorInfo;
-typedef struct SSortedMergeOperatorInfo {
- // SOptrBasicInfo should be first, SAggSupporter should be second for stream encode
- SOptrBasicInfo binfo;
- SAggSupporter aggSup;
-
- SArray* pSortInfo;
- int32_t numOfSources;
- SSortHandle *pSortHandle;
- int32_t bufPageSize;
- uint32_t sortBufSize; // max buffer size for in-memory sort
- int32_t resultRowFactor;
- bool hasGroupVal;
- SDiskbasedBuf *pTupleStore; // keep the final results
- int32_t numOfResPerPage;
- char** groupVal;
- SArray *groupInfo;
-} SSortedMergeOperatorInfo;
-
typedef struct SSortOperatorInfo {
SOptrBasicInfo binfo;
- uint32_t sortBufSize; // max buffer size for in-memory sort
- SArray* pSortInfo;
- SSortHandle* pSortHandle;
- SArray* pColMatchInfo; // for index map from table scan output
- int32_t bufPageSize;
-
- int64_t startTs; // sort start time
- uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
- SLimitInfo limitInfo;
- SNode* pCondition;
+ uint32_t sortBufSize; // max buffer size for in-memory sort
+ SArray* pSortInfo;
+ SSortHandle* pSortHandle;
+ SArray* pColMatchInfo; // for index map from table scan output
+ int32_t bufPageSize;
+ int64_t startTs; // sort start time
+ uint64_t sortElapsed; // sort elapsed time, time to flush to disk not included.
+ SLimitInfo limitInfo;
+ SNode* pCondition;
} SSortOperatorInfo;
typedef struct STagFilterOperatorInfo {
@@ -822,18 +884,18 @@ typedef struct STagFilterOperatorInfo {
} STagFilterOperatorInfo;
typedef struct SJoinOperatorInfo {
- SSDataBlock *pRes;
- int32_t joinType;
- int32_t inputOrder;
-
- SSDataBlock *pLeft;
- int32_t leftPos;
- SColumnInfo leftCol;
-
- SSDataBlock *pRight;
- int32_t rightPos;
- SColumnInfo rightCol;
- SNode *pCondAfterMerge;
+ SSDataBlock* pRes;
+ int32_t joinType;
+ int32_t inputOrder;
+
+ SSDataBlock* pLeft;
+ int32_t leftPos;
+ SColumnInfo leftCol;
+
+ SSDataBlock* pRight;
+ int32_t rightPos;
+ SColumnInfo rightCol;
+ SNode* pCondAfterMerge;
} SJoinOperatorInfo;
#define OPTR_IS_OPENED(_optr) (((_optr)->status & OP_OPENED) == OP_OPENED)
@@ -842,11 +904,10 @@ typedef struct SJoinOperatorInfo {
void doDestroyExchangeOperatorInfo(void* param);
SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t streamFn,
- __optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode,
- __optr_decode_fn_t decode, __optr_explain_fn_t explain);
+ __optr_fn_t cleanup, __optr_close_fn_t closeFn, __optr_encode_fn_t encode,
+ __optr_decode_fn_t decode, __optr_explain_fn_t explain);
int32_t operatorDummyOpenFn(SOperatorInfo* pOperator);
-void operatorDummyCloseFn(void* param, int32_t numOfCols);
int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t num);
void initBasicInfo(SOptrBasicInfo* pInfo, SSDataBlock* pBlock);
@@ -854,24 +915,25 @@ void cleanupBasicInfo(SOptrBasicInfo* pInfo);
int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr);
void cleanupExprSupp(SExprSupp* pSup);
void destroyExprInfo(SExprInfo* pExpr, int32_t numOfExprs);
-int32_t initAggInfo(SExprSupp *pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
+int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
const char* pkey);
-void initResultSizeInfo(SResultInfo * pResultInfo, int32_t numOfRows);
-void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo, SDiskbasedBuf* pBuf);
-int32_t handleLimitOffset(SOperatorInfo *pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
+void initResultSizeInfo(SResultInfo* pResultInfo, int32_t numOfRows);
+void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
+ SDiskbasedBuf* pBuf);
+int32_t handleLimitOffset(SOperatorInfo* pOperator, SLimitInfo* pLimitInfo, SSDataBlock* pBlock, bool holdDataInBuf);
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo);
void initLimitInfo(const SNode* pLimit, const SNode* pSLimit, SLimitInfo* pLimitInfo);
-void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset,
- int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput);
+void doApplyFunctions(SExecTaskInfo* taskInfo, SqlFunctionCtx* pCtx, SColumnInfoData* pTimeWindowData, int32_t offset,
+ int32_t forwardStep, int32_t numOfTotal, int32_t numOfOutput);
-int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList, char** pNextStart);
-void updateLoadRemoteInfo(SLoadRemoteDataInfo *pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
- SOperatorInfo* pOperator);
+int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pColList, char** pNextStart);
+void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int32_t numOfRows, int32_t dataLen, int64_t startTs,
+ SOperatorInfo* pOperator);
STimeWindow getFirstQualifiedTimeWindow(int64_t ts, STimeWindow* pWindow, SInterval* pInterval, int32_t order);
-int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t *order, int32_t* scanFlag);
+int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag);
int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaultBufsz);
void doSetOperatorCompleted(SOperatorInfo* pOperator);
@@ -879,10 +941,9 @@ void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, const SArray* pC
int32_t addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_t numOfPseudoExpr,
SSDataBlock* pBlock, const char* idStr);
-void cleanupAggSup(SAggSupporter* pAggSup);
-void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
-void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
-void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId);
+void cleanupAggSup(SAggSupporter* pAggSup);
+void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
+void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId);
int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts);
int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts);
@@ -891,39 +952,41 @@ SSDataBlock* loadNextDataBlock(void* param);
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset);
-SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo,
- char* pData, int16_t bytes, bool masterscan, uint64_t groupId,
- SExecTaskInfo* pTaskInfo, bool isIntervalQuery, SAggSupporter* pSup);
+SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pResultRowInfo, char* pData,
+ int16_t bytes, bool masterscan, uint64_t groupId, SExecTaskInfo* pTaskInfo,
+ bool isIntervalQuery, SAggSupporter* pSup);
SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode* pExNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysiNode* pPhyNode,
STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode *pScanPhyNode, const char* pUser, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode,
+ const char* pUser, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols, SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
+SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
+ SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
int32_t numOfScalarExpr, bool mergeResult, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode *pNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_t numStreams, SMergePhysiNode* pMergePhysiNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createSortedMergeOperatorInfo(SOperatorInfo** downstream, int32_t numOfDownstream, SExprInfo* pExprInfo, int32_t num, SArray* pSortInfo, SArray* pGroupInfo, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createMultiwayMergeOperatorInfo(SOperatorInfo** dowStreams, size_t numStreams,
+ SMergePhysiNode* pMergePhysiNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pTableScanNode, SReadHandle* readHandle,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, bool isStream);
-
-SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- bool mergeResultBlock, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- SNode* pCondition, bool mergeResultBlocks, SExecTaskInfo* pTaskInfo);
-
+ STimeWindowAggSupp* pTwAggSupp, SIntervalPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo, bool isStream);
+SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode,
+ SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild);
SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPhysiNode* pSessionNode,
@@ -931,46 +994,51 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SArray* pGroupColList, SNode* pCondition,
SExprInfo* pScalarExprInfo, int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid, SBlockDistScanPhysiNode* pBlockScanNode,
- SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createDataBlockInfoScanOperator(void* dataReader, SReadHandle* readHandle, uint64_t uid,
+ SBlockDistScanPhysiNode* pBlockScanNode, SExecTaskInfo* pTaskInfo);
SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode, SNode* pTagCond,
SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
- SSDataBlock* pResBlock, STimeWindowAggSupp *pTwAggSupp, int32_t tsSlotId,
- SColumn* pStateKeyCol, SNode* pCondition, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo);
-
-SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream, SSortMergeJoinPhysiNode* pJoinNode,
+SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode,
SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream,
- SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream,
- SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, int32_t numOfChild);
-
-SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPartitionPhysiNode* pPartNode,
+ SExecTaskInfo* pTaskInfo);
-#if 0
-SOperatorInfo* createTableSeqScanOperatorInfo(void* pTsdbReadHandle, STaskRuntimeEnv* pRuntimeEnv);
-#endif
+SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pNode, SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream,
+ SSortMergeJoinPhysiNode* pJoinNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo, int32_t numOfChild);
+SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream,
+ SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo);
+
+SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo);
+SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysiNode* pPhyFillNode,
+ SExecTaskInfo* pTaskInfo);
int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBlock* pSrcBlock, SqlFunctionCtx* pCtx,
- int32_t numOfOutput, SArray* pPseudoList);
+ int32_t numOfOutput, SArray* pPseudoList);
-void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order, int32_t scanFlag, bool createDummyCol);
+void setInputDataBlock(SOperatorInfo* pOperator, SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t order,
+ int32_t scanFlag, bool createDummyCol);
bool isTaskKilled(SExecTaskInfo* pTaskInfo);
int32_t checkForQueryBuf(size_t numOfTables);
-void setTaskKilled(SExecTaskInfo* pTaskInfo);
-void queryCostStatis(SExecTaskInfo* pTaskInfo);
+void setTaskKilled(SExecTaskInfo* pTaskInfo);
+void queryCostStatis(SExecTaskInfo* pTaskInfo);
void doDestroyTask(SExecTaskInfo* pTaskInfo);
int32_t getMaximumIdleDurationSec();
@@ -982,7 +1050,7 @@ int32_t getMaximumIdleDurationSec();
* nOptrWithVal: *nOptrWithVal save the number of optr with value
* return: result code, 0 means success
*/
-int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t *length, int32_t *nOptrWithVal);
+int32_t encodeOperator(SOperatorInfo* ops, char** data, int32_t* length, int32_t* nOptrWithVal);
/*
* ops: root operator, created by caller
@@ -995,7 +1063,7 @@ int32_t decodeOperator(SOperatorInfo* ops, const char* data, int32_t length);
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status);
int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
char* sql, EOPTR_EXEC_MODEL model);
-int32_t createDataSinkParam(SDataSinkNode *pNode, void **pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
+int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pTaskInfo, SReadHandle* readHandle);
int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInfoList);
int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result);
@@ -1004,41 +1072,50 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowInfo, int64_t ts, SInterval* pInterval,
int32_t order);
int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimaryColumn, int32_t startPos, TSKEY ekey,
- __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order);
+ __block_search_fn_t searchFn, STableQueryInfo* item, int32_t order);
int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order);
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
- int32_t size);
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize);
-SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
- TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
-SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
- TSKEY endTs, uint64_t groupId, int64_t gap, int32_t* pIndex);
-bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
-bool functionNeedToExecute(SqlFunctionCtx* pCtx);
-bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
-bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
-bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
-void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID);
-void printDataBlock(SSDataBlock* pBlock, const char* flag);
-
-int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
- SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs, const int32_t* rowCellOffset,
- SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
-
-int32_t createScanTableListInfo(SScanPhysiNode *pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
- STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond, const char* idstr);
+ int32_t size);
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize);
+SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId,
+ int64_t gap, int32_t* pIndex);
+SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId,
+ int64_t gap, int32_t* pIndex);
+bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
+bool functionNeedToExecute(SqlFunctionCtx* pCtx);
+bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
+bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
+bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup);
+bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SOperatorInfo* pOperator, STimeWindowAggSupp* pTwSup);
+void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp);
+void printDataBlock(SSDataBlock* pBlock, const char* flag);
+uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
+
+int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
+ SExprSupp* pSup, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
+
+int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
+ STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
+ const char* idstr);
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode,
SExecTaskInfo* pTaskInfo);
-SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo *pTableListInfo,
+SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex);
-bool groupbyTbname(SNodeList* pGroupList);
+bool groupbyTbname(SNodeList* pGroupList);
int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* groupKey);
-SSDataBlock* createSpecialDataBlock(EStreamType type);
-void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput);
+void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput);
+int32_t buildDataBlockFromGroupRes(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup,
+ SGroupResInfo* pGroupResInfo);
+int32_t setOutputBuf(STimeWindow* win, SResultRow** pResult, int64_t tableGroupId, SqlFunctionCtx* pCtx,
+ int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup,
+ SExecTaskInfo* pTaskInfo);
+int32_t releaseOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult);
+int32_t saveOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
+void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order);
#ifdef __cplusplus
}
diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h
index 63abfc019d7e687267ab2766d0da0e054a6a4a3c..ed019be767b63b9ec641ea25b86670d1cdd4974e 100644
--- a/source/libs/executor/inc/tfill.h
+++ b/source/libs/executor/inc/tfill.h
@@ -23,12 +23,13 @@ extern "C" {
#include "os.h"
#include "taosdef.h"
#include "tcommon.h"
+#include "tsimplehash.h"
struct SSDataBlock;
typedef struct SFillColInfo {
- SExprInfo *pExpr;
- bool notFillCol; // denote if this column needs fill operation
+ SExprInfo* pExpr;
+ bool notFillCol; // denote if this column needs fill operation
SVariant fillVal;
} SFillColInfo;
@@ -51,46 +52,96 @@ typedef struct {
} SRowVal;
typedef struct SFillInfo {
- TSKEY start; // start timestamp
- TSKEY end; // endKey for fill
- TSKEY currentKey; // current active timestamp, the value may be changed during the fill procedure.
- int32_t tsSlotId; // primary time stamp slot id
- int32_t srcTsSlotId; // timestamp column id in the source data block.
- int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC]
- int32_t type; // fill type
- int32_t numOfRows; // number of rows in the input data block
- int32_t index; // active row index
- int32_t numOfTotal; // number of filled rows in one round
- int32_t numOfCurrent; // number of filled rows in current results
- int32_t numOfCols; // number of columns, including the tags columns
- SInterval interval;
- SRowVal prev;
- SRowVal next;
- SSDataBlock *pSrcBlock;
- int32_t alloc; // data buffer size in rows
-
- SFillColInfo* pFillCol; // column info for fill operations
- SFillTagColInfo* pTags; // tags value for filling gap
- const char* id;
+ TSKEY start; // start timestamp
+ TSKEY end; // endKey for fill
+ TSKEY currentKey; // current active timestamp, the value may be changed during the fill procedure.
+ int32_t tsSlotId; // primary time stamp slot id
+ int32_t srcTsSlotId; // timestamp column id in the source data block.
+ int32_t order; // order [TSDB_ORDER_ASC|TSDB_ORDER_DESC]
+ int32_t type; // fill type
+ int32_t numOfRows; // number of rows in the input data block
+ int32_t index; // active row index
+ int32_t numOfTotal; // number of filled rows in one round
+ int32_t numOfCurrent; // number of filled rows in current results
+ int32_t numOfCols; // number of columns, including the tags columns
+ SInterval interval;
+ SRowVal prev;
+ SRowVal next;
+ SSDataBlock* pSrcBlock;
+ int32_t alloc; // data buffer size in rows
+
+ SFillColInfo* pFillCol; // column info for fill operations
+ SFillTagColInfo* pTags; // tags value for filling gap
+ const char* id;
} SFillInfo;
-int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
+typedef struct SResultCellData {
+ bool isNull;
+ int8_t type;
+ int32_t bytes;
+ char pData[];
+} SResultCellData;
+
+typedef struct SResultRowData {
+ TSKEY key;
+ SResultCellData* pRowVal;
+} SResultRowData;
+
+typedef struct SStreamFillLinearInfo {
+ TSKEY nextEnd;
+ SArray* pDeltaVal; // double. value for Fill(linear).
+ SArray* pNextDeltaVal; // double. value for Fill(linear).
+ int64_t winIndex;
+ bool hasNext;
+} SStreamFillLinearInfo;
+
+typedef struct SStreamFillInfo {
+ TSKEY start; // startKey for fill
+ TSKEY end; // endKey for fill
+ TSKEY current; // current Key for fill
+ TSKEY preRowKey;
+ TSKEY nextRowKey;
+ SResultRowData* pResRow;
+ SStreamFillLinearInfo* pLinearInfo;
+ bool needFill;
+ int32_t type; // fill type
+ int32_t pos;
+ SArray* delRanges;
+ int32_t delIndex;
+} SStreamFillInfo;
+
+typedef struct SStreamFillSupporter {
+ int32_t type; // fill type
+ SInterval interval;
+ SResultRowData prev;
+ SResultRowData cur;
+ SResultRowData next;
+ SResultRowData nextNext;
+ SFillColInfo* pAllColInfo; // fill exprs and not fill exprs
+ int32_t numOfAllCols; // number of all exprs, including the tags columns
+ int32_t numOfFillCols;
+ int32_t numOfNotFillCols;
+ int32_t rowSize;
+ SSHashObj* pResMap;
+ bool hasDelete;
+} SStreamFillSupporter;
+int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t maxNumOfRows);
-void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
-void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp);
-void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput);
-struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr, int32_t numOfNotFillCols, const struct SNodeListNode* val);
-bool taosFillHasMoreResults(struct SFillInfo* pFillInfo);
+void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
+void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp);
+void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput);
+struct SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr,
+ int32_t numOfNotFillCols, const struct SNodeListNode* val);
+bool taosFillHasMoreResults(struct SFillInfo* pFillInfo);
SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t numOfNotFillCols, int32_t capacity,
SInterval* pInterval, int32_t fillType, struct SFillColInfo* pCol, int32_t slotId,
int32_t order, const char* id);
-void* taosDestroyFillInfo(struct SFillInfo *pFillInfo);
+void* taosDestroyFillInfo(struct SFillInfo* pFillInfo);
int64_t taosFillResultDataBlock(struct SFillInfo* pFillInfo, SSDataBlock* p, int32_t capacity);
-int64_t getFillInfoStart(struct SFillInfo *pFillInfo);
-
+int64_t getFillInfoStart(struct SFillInfo* pFillInfo);
#ifdef __cplusplus
}
diff --git a/source/libs/executor/inc/tsimplehash.h b/source/libs/executor/inc/tsimplehash.h
index 4c5a80e2f1954812a81665954d3dc448467f6ffc..27191e3b7e674df4dcec9dabc7b8cc6fbb35f9f2 100644
--- a/source/libs/executor/inc/tsimplehash.h
+++ b/source/libs/executor/inc/tsimplehash.h
@@ -28,7 +28,7 @@ typedef void (*_hash_free_fn_t)(void *);
/**
* @brief single thread hash
- *
+ *
*/
typedef struct SSHashObj SSHashObj;
@@ -52,13 +52,13 @@ int32_t tSimpleHashPrint(const SSHashObj *pHashObj);
/**
* @brief put element into hash table, if the element with the same key exists, update it
- *
- * @param pHashObj
- * @param key
- * @param keyLen
- * @param data
- * @param dataLen
- * @return int32_t
+ *
+ * @param pHashObj
+ * @param key
+ * @param keyLen
+ * @param data
+ * @param dataLen
+ * @return int32_t
*/
int32_t tSimpleHashPut(SSHashObj *pHashObj, const void *key, size_t keyLen, const void *data, size_t dataLen);
@@ -80,6 +80,18 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key, size_t keyLen);
*/
int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen);
+/**
+ * remove item with the specified key during hash iterate
+ *
+ * @param pHashObj
+ * @param key
+ * @param keyLen
+ * @param pIter
+ * @param iter
+ * @return int32_t
+ */
+int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t keyLen, void **pIter, int32_t *iter);
+
/**
* Clear the hash table.
* @param pHashObj
@@ -99,13 +111,27 @@ void tSimpleHashCleanup(SSHashObj *pHashObj);
*/
size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj);
+#pragma pack(push, 4)
+typedef struct SHNode{
+ struct SHNode *next;
+ uint32_t keyLen : 20;
+ uint32_t dataLen : 12;
+ char data[];
+} SHNode;
+#pragma pack(pop)
+
/**
* Get the corresponding key information for a given data in hash table
* @param data
* @param keyLen
* @return
*/
-void *tSimpleHashGetKey(void *data, size_t* keyLen);
+static FORCE_INLINE void *tSimpleHashGetKey(void *data, size_t *keyLen) {
+ SHNode *node = (SHNode *)((char *)data - offsetof(SHNode, data));
+ if (keyLen) *keyLen = node->keyLen;
+
+ return POINTER_SHIFT(data, node->dataLen);
+}
/**
* Create the hash table iterator
@@ -116,17 +142,6 @@ void *tSimpleHashGetKey(void *data, size_t* keyLen);
*/
void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter);
-/**
- * Create the hash table iterator
- *
- * @param pHashObj
- * @param data
- * @param key
- * @param iter
- * @return void*
- */
-void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, int32_t *iter);
-
#ifdef __cplusplus
}
#endif
diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c
index b31fa279e57ad3436d19264071959e91fe4709f7..94d9d0cadbd1cf21ac8303a4bee7b86da9695f3c 100644
--- a/source/libs/executor/src/cachescanoperator.c
+++ b/source/libs/executor/src/cachescanoperator.c
@@ -25,24 +25,27 @@
#include "thash.h"
#include "ttypes.h"
-static SSDataBlock* doScanLastrow(SOperatorInfo* pOperator);
+static SSDataBlock* doScanCache(SOperatorInfo* pOperator);
static void destroyLastrowScanOperator(void* param);
static int32_t extractTargetSlotId(const SArray* pColMatchInfo, SExecTaskInfo* pTaskInfo, int32_t** pSlotIds);
-SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SReadHandle* readHandle,
+ SExecTaskInfo* pTaskInfo) {
+ int32_t code = TSDB_CODE_SUCCESS;
SLastrowScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SLastrowScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
goto _error;
}
pInfo->readHandle = *readHandle;
- pInfo->pRes = createResDataBlock(pScanNode->scan.node.pOutputDataBlockDesc);
+ pInfo->pRes = createResDataBlock(pScanNode->scan.node.pOutputDataBlockDesc);
int32_t numOfCols = 0;
pInfo->pColMatchInfo = extractColMatchInfo(pScanNode->scan.pScanCols, pScanNode->scan.node.pOutputDataBlockDesc, &numOfCols,
COL_MATCH_FROM_COL_ID);
- int32_t code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds);
+ code = extractTargetSlotId(pInfo->pColMatchInfo, pTaskInfo, &pInfo->pSlotIds);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -55,13 +58,17 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
// partition by tbname
if (taosArrayGetSize(pTableList->pGroupList) == taosArrayGetSize(pTableList->pTableList)) {
- pInfo->retrieveType = LASTROW_RETRIEVE_TYPE_ALL;
- tsdbLastRowReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList,
- taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
+ pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_ALL|CACHESCAN_RETRIEVE_LAST_ROW;
+ code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pTableList->pTableList,
+ taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
pInfo->pBufferredRes = createOneDataBlock(pInfo->pRes, false);
blockDataEnsureCapacity(pInfo->pBufferredRes, pOperator->resultInfo.capacity);
} else { // by tags
- pInfo->retrieveType = LASTROW_RETRIEVE_TYPE_SINGLE;
+ pInfo->retrieveType = CACHESCAN_RETRIEVE_TYPE_SINGLE|CACHESCAN_RETRIEVE_LAST_ROW;
}
if (pScanNode->scan.pScanPseudoCols != NULL) {
@@ -80,19 +87,19 @@ SOperatorInfo* createLastrowScanOperator(SLastRowScanPhysiNode* pScanNode, SRead
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->fpSet =
- createOperatorFpSet(operatorDummyOpenFn, doScanLastrow, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL);
+ createOperatorFpSet(operatorDummyOpenFn, doScanCache, NULL, NULL, destroyLastrowScanOperator, NULL, NULL, NULL);
pOperator->cost.openCost = 0;
return pOperator;
_error:
- pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
- taosMemoryFree(pInfo);
+ pTaskInfo->code = code;
+ destroyLastrowScanOperator(pInfo);
taosMemoryFree(pOperator);
return NULL;
}
-SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
+SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
@@ -109,14 +116,14 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
blockDataCleanup(pInfo->pRes);
// check if it is a group by tbname
- if (pInfo->retrieveType == LASTROW_RETRIEVE_TYPE_ALL) {
+ if ((pInfo->retrieveType & CACHESCAN_RETRIEVE_TYPE_ALL) == CACHESCAN_RETRIEVE_TYPE_ALL) {
if (pInfo->indexOfBufferedRes >= pInfo->pBufferredRes->info.rows) {
blockDataCleanup(pInfo->pBufferredRes);
taosArrayClear(pInfo->pUidList);
- int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList);
+ int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds, pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
- longjmp(pTaskInfo->env, code);
+ T_LONG_JMP(pTaskInfo->env, code);
}
// check for tag values
@@ -172,11 +179,11 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
while (pInfo->currentGroupIndex < totalGroups) {
SArray* pGroupTableList = taosArrayGetP(pTableList->pGroupList, pInfo->currentGroupIndex);
- tsdbLastRowReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList,
+ tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pGroupTableList,
taosArrayGetSize(pInfo->pColMatchInfo), &pInfo->pLastrowReader);
taosArrayClear(pInfo->pUidList);
- int32_t code = tsdbRetrieveLastRow(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);
+ int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
@@ -200,7 +207,7 @@ SSDataBlock* doScanLastrow(SOperatorInfo* pOperator) {
}
}
- tsdbLastrowReaderClose(pInfo->pLastrowReader);
+ tsdbCacherowsReaderClose(pInfo->pLastrowReader);
return pInfo->pRes;
}
}
diff --git a/source/libs/executor/src/dataDeleter.c b/source/libs/executor/src/dataDeleter.c
index 06b7c13fa2cb52b8255098c5efb652d56ec57974..1ade4b89e63be0a53aa9a3b58e7c36d0631f7860 100644
--- a/source/libs/executor/src/dataDeleter.c
+++ b/source/libs/executor/src/dataDeleter.c
@@ -79,25 +79,36 @@ static void toDataCacheEntry(SDataDeleterHandle* pHandle, const SInputData* pInp
pEntry->dataLen = sizeof(SDeleterRes);
ASSERT(1 == pEntry->numOfRows);
- ASSERT(1 == pEntry->numOfCols);
+ ASSERT(3 == pEntry->numOfCols);
pBuf->useSize = sizeof(SDataCacheEntry);
SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pInput->pData->pDataBlock, 0);
+ SColumnInfoData* pColSKey = (SColumnInfoData*)taosArrayGet(pInput->pData->pDataBlock, 1);
+ SColumnInfoData* pColEKey = (SColumnInfoData*)taosArrayGet(pInput->pData->pDataBlock, 2);
SDeleterRes* pRes = (SDeleterRes*)pEntry->data;
pRes->suid = pHandle->pParam->suid;
pRes->uidList = pHandle->pParam->pUidList;
- pRes->skey = pHandle->pDeleter->deleteTimeRange.skey;
- pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey;
strcpy(pRes->tableName, pHandle->pDeleter->tableFName);
strcpy(pRes->tsColName, pHandle->pDeleter->tsColName);
pRes->affectedRows = *(int64_t*)pColRes->pData;
+ if (pRes->affectedRows) {
+ pRes->skey = *(int64_t*)pColSKey->pData;
+ pRes->ekey = *(int64_t*)pColEKey->pData;
+ ASSERT(pRes->skey <= pRes->ekey);
+ } else {
+ pRes->skey = pHandle->pDeleter->deleteTimeRange.skey;
+ pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey;
+ }
+
+ qDebug("delete %ld rows, from %ld to %ld", pRes->affectedRows, pRes->skey, pRes->ekey);
+
pBuf->useSize += pEntry->dataLen;
-
- atomic_add_fetch_64(&pHandle->cachedSize, pEntry->dataLen);
- atomic_add_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
+
+ atomic_add_fetch_64(&pHandle->cachedSize, pEntry->dataLen);
+ atomic_add_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
}
static bool allocBuf(SDataDeleterHandle* pDeleter, const SInputData* pInput, SDataDeleterBuf* pBuf) {
@@ -168,9 +179,12 @@ static void getDataLength(SDataSinkHandle* pHandle, int64_t* pLen, bool* pQueryE
taosReadQitem(pDeleter->pDataBlocks, (void**)&pBuf);
memcpy(&pDeleter->nextOutput, pBuf, sizeof(SDataDeleterBuf));
taosFreeQitem(pBuf);
- *pLen = ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->dataLen;
+
+ SDataCacheEntry* pEntry = (SDataCacheEntry*)pDeleter->nextOutput.pData;
+ *pLen = pEntry->dataLen;
*pQueryEnd = pDeleter->queryEnd;
- qDebug("got data len %" PRId64 ", row num %d in sink", *pLen, ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->numOfRows);
+ qDebug("got data len %" PRId64 ", row num %d in sink", *pLen,
+ ((SDataCacheEntry*)(pDeleter->nextOutput.pData))->numOfRows);
}
static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
@@ -184,14 +198,14 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
return TSDB_CODE_SUCCESS;
}
SDataCacheEntry* pEntry = (SDataCacheEntry*)(pDeleter->nextOutput.pData);
- memcpy(pOutput->pData, pEntry->data, pEntry->dataLen);
+ memcpy(pOutput->pData, pEntry->data, pEntry->dataLen);
pDeleter->pParam->pUidList = NULL;
pOutput->numOfRows = pEntry->numOfRows;
pOutput->numOfCols = pEntry->numOfCols;
pOutput->compressed = pEntry->compressed;
- atomic_sub_fetch_64(&pDeleter->cachedSize, pEntry->dataLen);
- atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
+ atomic_sub_fetch_64(&pDeleter->cachedSize, pEntry->dataLen);
+ atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
taosMemoryFreeClear(pDeleter->nextOutput.pData); // todo persistent
pOutput->bufStatus = updateStatus(pDeleter);
@@ -200,7 +214,7 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
pOutput->useconds = pDeleter->useconds;
pOutput->precision = pDeleter->pSchema->precision;
taosThreadMutexUnlock(&pDeleter->mutex);
-
+
return TSDB_CODE_SUCCESS;
}
@@ -209,7 +223,7 @@ static int32_t destroyDataSinker(SDataSinkHandle* pHandle) {
atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pDeleter->cachedSize);
taosMemoryFreeClear(pDeleter->nextOutput.pData);
taosArrayDestroy(pDeleter->pParam->pUidList);
- taosMemoryFree(pDeleter->pParam);
+ taosMemoryFree(pDeleter->pParam);
while (!taosQueueEmpty(pDeleter->pDataBlocks)) {
SDataDeleterBuf* pBuf = NULL;
taosReadQitem(pDeleter->pDataBlocks, (void**)&pBuf);
@@ -228,14 +242,15 @@ static int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) {
return TSDB_CODE_SUCCESS;
}
-int32_t createDataDeleter(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle, void *pParam) {
+int32_t createDataDeleter(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle,
+ void* pParam) {
SDataDeleterHandle* deleter = taosMemoryCalloc(1, sizeof(SDataDeleterHandle));
if (NULL == deleter) {
terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
- SDataDeleterNode* pDeleterNode = (SDataDeleterNode *)pDataSink;
+ SDataDeleterNode* pDeleterNode = (SDataDeleterNode*)pDataSink;
deleter->sink.fPut = putDataBlock;
deleter->sink.fEndPut = endPut;
deleter->sink.fGetLen = getDataLength;
diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c
index 20396046ba5daa34c3caaf76796c2b8a3b06527c..1697ed63fb196aa2a571aa26f8ffe29ee1d6c5d5 100644
--- a/source/libs/executor/src/dataDispatcher.c
+++ b/source/libs/executor/src/dataDispatcher.c
@@ -93,6 +93,8 @@ static void toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pIn
pBuf->useSize = sizeof(SDataCacheEntry);
blockEncode(pInput->pData, pEntry->data, &pEntry->dataLen, numOfCols, pEntry->compressed);
+ ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data+8));
+ ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data+8+4));
pBuf->useSize += pEntry->dataLen;
@@ -170,7 +172,13 @@ static void getDataLength(SDataSinkHandle* pHandle, int64_t* pLen, bool* pQueryE
taosReadQitem(pDispatcher->pDataBlocks, (void**)&pBuf);
memcpy(&pDispatcher->nextOutput, pBuf, sizeof(SDataDispatchBuf));
taosFreeQitem(pBuf);
- *pLen = ((SDataCacheEntry*)(pDispatcher->nextOutput.pData))->dataLen;
+
+ SDataCacheEntry* pEntry = (SDataCacheEntry*)pDispatcher->nextOutput.pData;
+ *pLen = pEntry->dataLen;
+
+ ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data+8));
+ ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data+8+4));
+
*pQueryEnd = pDispatcher->queryEnd;
qDebug("got data len %" PRId64 ", row num %d in sink", *pLen, ((SDataCacheEntry*)(pDispatcher->nextOutput.pData))->numOfRows);
}
@@ -191,6 +199,9 @@ static int32_t getDataBlock(SDataSinkHandle* pHandle, SOutputData* pOutput) {
pOutput->numOfCols = pEntry->numOfCols;
pOutput->compressed = pEntry->compressed;
+ ASSERT(pEntry->numOfRows == *(int32_t*)(pEntry->data+8));
+ ASSERT(pEntry->numOfCols == *(int32_t*)(pEntry->data+8+4));
+
atomic_sub_fetch_64(&pDispatcher->cachedSize, pEntry->dataLen);
atomic_sub_fetch_64(&gDataSinkStat.cachedSize, pEntry->dataLen);
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index b89579a0177caa798565e5a90772e71137b2c1f0..b26603b394f1527334f4e4d490edb69e996053e1 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -33,6 +33,17 @@ void initResultRowInfo(SResultRowInfo* pResultRowInfo) {
void closeResultRow(SResultRow* pResultRow) { pResultRow->closed = true; }
+void resetResultRow(SResultRow* pResultRow, size_t entrySize) {
+ pResultRow->numOfRows = 0;
+ pResultRow->closed = false;
+ pResultRow->endInterp = false;
+ pResultRow->startInterp = false;
+
+ if (entrySize > 0) {
+ memset(pResultRow->pEntryInfo, 0, entrySize);
+ }
+}
+
// TODO refactor: use macro
SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t index, const int32_t* offset) {
assert(index >= 0 && offset != NULL);
@@ -46,8 +57,8 @@ size_t getResultRowSize(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
rowSize += pCtx[i].resDataInfo.interBufSize;
}
- rowSize +=
- (numOfOutput * sizeof(bool)); // expand rowSize to mark if col is null for top/bottom result(doSaveTupleData)
+ rowSize += (numOfOutput * sizeof(bool));
+ // expand rowSize to mark if col is null for top/bottom result(saveTupleData)
return rowSize;
}
@@ -83,7 +94,7 @@ int32_t resultrowComparAsc(const void* p1, const void* p2) {
static int32_t resultrowComparDesc(const void* p1, const void* p2) { return resultrowComparAsc(p2, p1); }
-void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int32_t order) {
+void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, int32_t order) {
if (pGroupResInfo->pRows != NULL) {
taosArrayDestroy(pGroupResInfo->pRows);
}
@@ -92,9 +103,10 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SHashObj* pHashmap, int
void* pData = NULL;
pGroupResInfo->pRows = taosArrayInit(10, POINTER_BYTES);
- size_t keyLen = 0;
- while ((pData = taosHashIterate(pHashmap, pData)) != NULL) {
- void* key = taosHashGetKey(pData, &keyLen);
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pData, &keyLen);
SResKeyPos* p = taosMemoryMalloc(keyLen + sizeof(SResultRowPosition));
@@ -348,7 +360,7 @@ static int32_t createResultData(SDataType* pType, int32_t numOfRows, SScalarPara
int32_t code = colInfoDataEnsureCapacity(pColumnData, numOfRows);
if (code != TSDB_CODE_SUCCESS) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+ terrno = code;
taosMemoryFree(pColumnData);
return terrno;
}
@@ -366,6 +378,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, uint64_t suid, SArray
SScalarParam output = {0};
tagFilterAssist ctx = {0};
+
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
if (ctx.colHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -797,9 +810,15 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
taosMemoryFreeClear(pColInfoData);
}
- for (int i = 0; i < taosArrayGetSize(res); i++) {
+ size_t numOfTables = taosArrayGetSize(res);
+ for (int i = 0; i < numOfTables; i++) {
STableKeyInfo info = {.uid = *(uint64_t*)taosArrayGet(res, i), .groupId = 0};
- taosArrayPush(pListInfo->pTableList, &info);
+ void* p = taosArrayPush(pListInfo->pTableList, &info);
+ if (p == NULL) {
+ taosArrayDestroy(res);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
qDebug("tagfilter get uid:%ld", info.uid);
}
@@ -935,15 +954,17 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
for (int32_t i = 0; i < numOfCols; ++i) {
STargetNode* pNode = (STargetNode*)nodesListGetNode(pNodeList, i);
- SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
-
- SColMatchInfo c = {0};
- c.output = true;
- c.colId = pColNode->colId;
- c.srcSlotId = pColNode->slotId;
- c.matchType = type;
- c.targetSlotId = pNode->slotId;
- taosArrayPush(pList, &c);
+ if (nodeType(pNode->pExpr) == QUERY_NODE_COLUMN) {
+ SColumnNode* pColNode = (SColumnNode*)pNode->pExpr;
+
+ SColMatchInfo c = {0};
+ c.output = true;
+ c.colId = pColNode->colId;
+ c.srcSlotId = pColNode->slotId;
+ c.matchType = type;
+ c.targetSlotId = pNode->slotId;
+ taosArrayPush(pList, &c);
+ }
}
*numOfOutputCols = 0;
@@ -968,7 +989,8 @@ SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
if (pNode->output) {
(*numOfOutputCols) += 1;
- } else {
+ } else if (info != NULL) {
+ // select distinct tbname from stb where tbname='abc';
info->output = false;
}
}
@@ -1007,6 +1029,100 @@ static SColumn* createColumn(int32_t blockId, int32_t slotId, int32_t colId, SDa
return pCol;
}
+void createExprFromTargetNode(SExprInfo* pExp, STargetNode* pTargetNode) {
+ pExp->pExpr = taosMemoryCalloc(1, sizeof(tExprNode));
+ pExp->pExpr->_function.num = 1;
+ pExp->pExpr->_function.functionId = -1;
+
+ int32_t type = nodeType(pTargetNode->pExpr);
+ // it is a project query, or group by column
+ if (type == QUERY_NODE_COLUMN) {
+ pExp->pExpr->nodeType = QUERY_NODE_COLUMN;
+ SColumnNode* pColNode = (SColumnNode*)pTargetNode->pExpr;
+
+ pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
+ pExp->base.numOfParams = 1;
+
+ SDataType* pType = &pColNode->node.resType;
+ pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
+ pType->precision, pColNode->colName);
+ pExp->base.pParam[0].pCol =
+ createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType, pColNode->colType);
+ pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN;
+ } else if (type == QUERY_NODE_VALUE) {
+ pExp->pExpr->nodeType = QUERY_NODE_VALUE;
+ SValueNode* pValNode = (SValueNode*)pTargetNode->pExpr;
+
+ pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
+ pExp->base.numOfParams = 1;
+
+ SDataType* pType = &pValNode->node.resType;
+ pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
+ pType->precision, pValNode->node.aliasName);
+ pExp->base.pParam[0].type = FUNC_PARAM_TYPE_VALUE;
+ nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param);
+ } else if (type == QUERY_NODE_FUNCTION) {
+ pExp->pExpr->nodeType = QUERY_NODE_FUNCTION;
+ SFunctionNode* pFuncNode = (SFunctionNode*)pTargetNode->pExpr;
+
+ SDataType* pType = &pFuncNode->node.resType;
+ pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
+ pType->precision, pFuncNode->node.aliasName);
+
+ pExp->pExpr->_function.functionId = pFuncNode->funcId;
+ pExp->pExpr->_function.pFunctNode = pFuncNode;
+
+ strncpy(pExp->pExpr->_function.functionName, pFuncNode->functionName,
+ tListLen(pExp->pExpr->_function.functionName));
+#if 1
+ // todo refactor: add the parameter for tbname function
+ if (!pFuncNode->pParameterList && (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0)) {
+ pFuncNode->pParameterList = nodesMakeList();
+ ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0);
+ SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
+ if (NULL == res) { // todo handle error
+ } else {
+ res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
+ nodesListAppend(pFuncNode->pParameterList, (SNode*)res);
+ }
+ }
+#endif
+
+ int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList);
+
+ pExp->base.pParam = taosMemoryCalloc(numOfParam, sizeof(SFunctParam));
+ pExp->base.numOfParams = numOfParam;
+
+ for (int32_t j = 0; j < numOfParam; ++j) {
+ SNode* p1 = nodesListGetNode(pFuncNode->pParameterList, j);
+ if (p1->type == QUERY_NODE_COLUMN) {
+ SColumnNode* pcn = (SColumnNode*)p1;
+
+ pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN;
+ pExp->base.pParam[j].pCol =
+ createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType, pcn->colType);
+ } else if (p1->type == QUERY_NODE_VALUE) {
+ SValueNode* pvn = (SValueNode*)p1;
+ pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
+ nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param);
+ }
+ }
+ } else if (type == QUERY_NODE_OPERATOR) {
+ pExp->pExpr->nodeType = QUERY_NODE_OPERATOR;
+ SOperatorNode* pNode = (SOperatorNode*)pTargetNode->pExpr;
+
+ pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
+ pExp->base.numOfParams = 1;
+
+ SDataType* pType = &pNode->node.resType;
+ pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
+ pType->precision, pNode->node.aliasName);
+ pExp->pExpr->_optrRoot.pRootNode = pTargetNode->pExpr;
+ } else {
+ ASSERT(0);
+ }
+}
+
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs) {
int32_t numOfFuncs = LIST_LENGTH(pNodeList);
int32_t numOfGroupKeys = 0;
@@ -1030,98 +1146,7 @@ SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t*
}
SExprInfo* pExp = &pExprs[i];
-
- pExp->pExpr = taosMemoryCalloc(1, sizeof(tExprNode));
- pExp->pExpr->_function.num = 1;
- pExp->pExpr->_function.functionId = -1;
-
- int32_t type = nodeType(pTargetNode->pExpr);
- // it is a project query, or group by column
- if (type == QUERY_NODE_COLUMN) {
- pExp->pExpr->nodeType = QUERY_NODE_COLUMN;
- SColumnNode* pColNode = (SColumnNode*)pTargetNode->pExpr;
-
- pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
- pExp->base.numOfParams = 1;
-
- SDataType* pType = &pColNode->node.resType;
- pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
- pType->precision, pColNode->colName);
- pExp->base.pParam[0].pCol =
- createColumn(pColNode->dataBlockId, pColNode->slotId, pColNode->colId, pType, pColNode->colType);
- pExp->base.pParam[0].type = FUNC_PARAM_TYPE_COLUMN;
- } else if (type == QUERY_NODE_VALUE) {
- pExp->pExpr->nodeType = QUERY_NODE_VALUE;
- SValueNode* pValNode = (SValueNode*)pTargetNode->pExpr;
-
- pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
- pExp->base.numOfParams = 1;
-
- SDataType* pType = &pValNode->node.resType;
- pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
- pType->precision, pValNode->node.aliasName);
- pExp->base.pParam[0].type = FUNC_PARAM_TYPE_VALUE;
- nodesValueNodeToVariant(pValNode, &pExp->base.pParam[0].param);
- } else if (type == QUERY_NODE_FUNCTION) {
- pExp->pExpr->nodeType = QUERY_NODE_FUNCTION;
- SFunctionNode* pFuncNode = (SFunctionNode*)pTargetNode->pExpr;
-
- SDataType* pType = &pFuncNode->node.resType;
- pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
- pType->precision, pFuncNode->node.aliasName);
-
- pExp->pExpr->_function.functionId = pFuncNode->funcId;
- pExp->pExpr->_function.pFunctNode = pFuncNode;
-
- strncpy(pExp->pExpr->_function.functionName, pFuncNode->functionName,
- tListLen(pExp->pExpr->_function.functionName));
-#if 1
- // todo refactor: add the parameter for tbname function
- if (!pFuncNode->pParameterList && (strcmp(pExp->pExpr->_function.functionName, "tbname") == 0)) {
- pFuncNode->pParameterList = nodesMakeList();
- ASSERT(LIST_LENGTH(pFuncNode->pParameterList) == 0);
- SValueNode* res = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
- if (NULL == res) { // todo handle error
- } else {
- res->node.resType = (SDataType){.bytes = sizeof(int64_t), .type = TSDB_DATA_TYPE_BIGINT};
- nodesListAppend(pFuncNode->pParameterList, (SNode*)res);
- }
- }
-#endif
-
- int32_t numOfParam = LIST_LENGTH(pFuncNode->pParameterList);
-
- pExp->base.pParam = taosMemoryCalloc(numOfParam, sizeof(SFunctParam));
- pExp->base.numOfParams = numOfParam;
-
- for (int32_t j = 0; j < numOfParam; ++j) {
- SNode* p1 = nodesListGetNode(pFuncNode->pParameterList, j);
- if (p1->type == QUERY_NODE_COLUMN) {
- SColumnNode* pcn = (SColumnNode*)p1;
-
- pExp->base.pParam[j].type = FUNC_PARAM_TYPE_COLUMN;
- pExp->base.pParam[j].pCol =
- createColumn(pcn->dataBlockId, pcn->slotId, pcn->colId, &pcn->node.resType, pcn->colType);
- } else if (p1->type == QUERY_NODE_VALUE) {
- SValueNode* pvn = (SValueNode*)p1;
- pExp->base.pParam[j].type = FUNC_PARAM_TYPE_VALUE;
- nodesValueNodeToVariant(pvn, &pExp->base.pParam[j].param);
- }
- }
- } else if (type == QUERY_NODE_OPERATOR) {
- pExp->pExpr->nodeType = QUERY_NODE_OPERATOR;
- SOperatorNode* pNode = (SOperatorNode*)pTargetNode->pExpr;
-
- pExp->base.pParam = taosMemoryCalloc(1, sizeof(SFunctParam));
- pExp->base.numOfParams = 1;
-
- SDataType* pType = &pNode->node.resType;
- pExp->base.resSchema = createResSchema(pType->type, pType->bytes, pTargetNode->slotId, pType->scale,
- pType->precision, pNode->node.aliasName);
- pExp->pExpr->_optrRoot.pRootNode = pTargetNode->pExpr;
- } else {
- ASSERT(0);
- }
+ createExprFromTargetNode(pExp, pTargetNode);
}
return pExprs;
@@ -1175,7 +1200,6 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
SqlFunctionCtx* pCtx = &pFuncCtx[i];
pCtx->functionId = -1;
- pCtx->curBufPage = -1;
pCtx->pExpr = pExpr;
if (pExpr->pExpr->nodeType == QUERY_NODE_FUNCTION) {
@@ -1188,7 +1212,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
fmGetFuncExecFuncs(pCtx->functionId, &pCtx->fpSet);
} else {
char* udfName = pExpr->pExpr->_function.pFunctNode->functionName;
- strncpy(pCtx->udfName, udfName, strlen(udfName));
+ strncpy(pCtx->udfName, udfName, TSDB_FUNC_NAME_LEN);
fmGetUdafExecFuncs(pCtx->functionId, &pCtx->fpSet);
}
pCtx->fpSet.getEnv(pExpr->pExpr->_function.pFunctNode, &env);
@@ -1216,10 +1240,10 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->start.key = INT64_MIN;
pCtx->end.key = INT64_MIN;
pCtx->numOfParams = pExpr->base.numOfParams;
- pCtx->increase = false;
pCtx->isStream = false;
pCtx->param = pFunct->pParam;
+ pCtx->saveHandle.currentPage = -1;
}
for (int32_t i = 1; i < numOfOutput; ++i) {
@@ -1318,7 +1342,7 @@ int32_t initQueryTableDataCond(SQueryTableDataCond* pCond, const STableScanPhysi
return TSDB_CODE_SUCCESS;
}
-void cleanupQueryTableDataCond(SQueryTableDataCond* pCond) { taosMemoryFree(pCond->colList); }
+void cleanupQueryTableDataCond(SQueryTableDataCond* pCond) { taosMemoryFreeClear(pCond->colList); }
int32_t convertFillType(int32_t mode) {
int32_t type = TSDB_FILL_NONE;
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index fe1f4911cae75aa4f972670126b9e94735dd1a56..a8c73f0170a3a888f9ec27c66e19667654bc1abd 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -49,10 +49,19 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
SStreamScanInfo* pInfo = pOperator->info;
+#if 0
// TODO: if a block was set but not consumed,
// prevent setting a different type of block
pInfo->validBlockIndex = 0;
- taosArrayClear(pInfo->pBlockLists);
+ if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
+ taosArrayClearP(pInfo->pBlockLists, taosMemoryFree);
+ } else {
+ taosArrayClear(pInfo->pBlockLists);
+ }
+#endif
+
+ ASSERT(pInfo->validBlockIndex == 0);
+ ASSERT(taosArrayGetSize(pInfo->pBlockLists) == 0);
if (type == STREAM_INPUT__MERGED_SUBMIT) {
// ASSERT(numOfBlocks > 1);
@@ -75,7 +84,9 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
} else if (type == STREAM_INPUT__DATA_BLOCK) {
for (int32_t i = 0; i < numOfBlocks; ++i) {
SSDataBlock* pDataBlock = &((SSDataBlock*)input)[i];
+ taosArrayPush(pInfo->pBlockLists, &pDataBlock);
+#if 0
// TODO optimize
SSDataBlock* p = createOneDataBlock(pDataBlock, false);
p->info = pDataBlock->info;
@@ -83,6 +94,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
taosArrayClear(p->pDataBlock);
taosArrayAddAll(p->pDataBlock, pDataBlock->pDataBlock);
taosArrayPush(pInfo->pBlockLists, &p);
+#endif
}
pInfo->blockType = STREAM_INPUT__DATA_BLOCK;
} else {
@@ -93,7 +105,10 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
}
}
+static FORCE_INLINE void streamInputBlockDataDestory(void* pBlock) { blockDataDestroy((SSDataBlock*)pBlock); }
+
void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo) {
+#if 0
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (!pTaskInfo || !pTaskInfo->pRoot || pTaskInfo->pRoot->numOfDownstream <= 0) {
return;
@@ -103,17 +118,14 @@ void tdCleanupStreamInputDataBlock(qTaskInfo_t tinfo) {
if (pOptrInfo->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pInfo = pOptrInfo->info;
if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
- for (int32_t i = 0; i < taosArrayGetSize(pInfo->pBlockLists); ++i) {
- SSDataBlock* p = *(SSDataBlock**)taosArrayGet(pInfo->pBlockLists, i);
- taosArrayDestroy(p->pDataBlock);
- taosMemoryFreeClear(p);
- }
+ taosArrayClearP(pInfo->pBlockLists, streamInputBlockDataDestory);
} else {
ASSERT(0);
}
} else {
ASSERT(0);
}
+#endif
}
int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks, int32_t type) {
@@ -139,8 +151,24 @@ int32_t qSetMultiStreamInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numO
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* readers, int32_t* numOfCols, SSchemaWrapper** pSchema) {
if (msg == NULL) {
- // TODO create raw scan
- return NULL;
+ // create raw scan
+
+ SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
+ if (NULL == pTaskInfo) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+ setTaskStatus(pTaskInfo, TASK_NOT_COMPLETED);
+
+ pTaskInfo->cost.created = taosGetTimestampMs();
+ pTaskInfo->execModel = OPTR_EXEC_MODEL_QUEUE;
+ pTaskInfo->pRoot = createRawScanOperatorInfo(readers, pTaskInfo);
+ if (NULL == pTaskInfo->pRoot) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pTaskInfo);
+ return NULL;
+ }
+ return pTaskInfo;
}
struct SSubplan* pPlan = NULL;
@@ -246,7 +274,16 @@ static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const S
}
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
- SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ STableListInfo* pListInfo = &pTaskInfo->tableqinfoList;
+
+ if (isAdd) {
+ qDebug("add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), pTaskInfo->id.str);
+ }
+
+ if (pListInfo->map == NULL) {
+ pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ }
// traverse to the stream scanner node to add this table id
SOperatorInfo* pInfo = pTaskInfo->pRoot;
@@ -293,13 +330,21 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
}
- taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo);
- if (pTaskInfo->tableqinfoList.map == NULL) {
- pTaskInfo->tableqinfoList.map =
- taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
+ bool exists = false;
+#if 0
+ for (int32_t k = 0; k < taosArrayGetSize(pListInfo->pTableList); ++k) {
+ STableKeyInfo* pKeyInfo = taosArrayGet(pListInfo->pTableList, k);
+ if (pKeyInfo->uid == keyInfo.uid) {
+ qWarn("ignore duplicated query table uid:%" PRIu64 " added, %s", pKeyInfo->uid, pTaskInfo->id.str);
+ exists = true;
+ }
}
+#endif
- taosHashPut(pTaskInfo->tableqinfoList.map, uid, sizeof(uid), &keyInfo.groupId, sizeof(keyInfo.groupId));
+ if (!exists) {
+ taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &keyInfo);
+ taosHashPut(pTaskInfo->tableqinfoList.map, uid, sizeof(uid), &keyInfo.groupId, sizeof(keyInfo.groupId));
+ }
}
if (keyBuf != NULL) {
@@ -434,10 +479,14 @@ static void freeBlock(void* param) {
blockDataDestroy(pBlock);
}
-int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
+int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bool* hasMore, SLocalFetch* pLocal) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
int64_t threadId = taosGetSelfPthreadId();
+ if (pLocal) {
+ memcpy(&pTaskInfo->localFetch, pLocal, sizeof(*pLocal));
+ }
+
taosArrayClearEx(pResList, freeBlock);
int64_t curOwner = 0;
@@ -462,6 +511,7 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
if (ret != TSDB_CODE_SUCCESS) {
pTaskInfo->code = ret;
cleanUpUdfs();
+
qDebug("%s task abort due to error/cancel occurs, code:%s", GET_TASKID(pTaskInfo), tstrerror(pTaskInfo->code));
atomic_store_64(&pTaskInfo->owner, 0);
@@ -486,6 +536,7 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
}
}
+ *hasMore = (pRes != NULL);
uint64_t el = (taosGetTimestampUs() - st);
pTaskInfo->cost.elapsedTime += el;
@@ -494,8 +545,8 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds) {
}
cleanUpUdfs();
- uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
+ uint64_t total = pTaskInfo->pRoot->resultInfo.totalRows;
qDebug("%s task suspended, %d rows in %d blocks returned, total:%" PRId64 " rows, in sinkNode:%d, elapsed:%.2f ms",
GET_TASKID(pTaskInfo), current, (int32_t)taosArrayGetSize(pResList), total, 0, el / 1000.0);
@@ -669,15 +720,26 @@ void* qExtractReaderFromStreamScanner(void* scanner) {
return (void*)pInfo->tqReader;
}
-const SSchemaWrapper* qExtractSchemaFromStreamScanner(void* scanner) {
- SStreamScanInfo* pInfo = scanner;
- return pInfo->tqReader->pSchemaWrapper;
+const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ return pTaskInfo->streamInfo.schema;
+}
+
+const char* qExtractTbnameFromTask(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ return pTaskInfo->streamInfo.tbName;
+}
+
+SMqMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
+ return &pTaskInfo->streamInfo.metaRsp;
}
-void* qStreamExtractMetaMsg(qTaskInfo_t tinfo) {
+int64_t qStreamExtractPrepareUid(qTaskInfo_t tinfo) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
- return pTaskInfo->streamInfo.metaBlk;
+ return pTaskInfo->streamInfo.prepareStatus.uid;
}
int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
@@ -687,102 +749,175 @@ int32_t qStreamExtractOffset(qTaskInfo_t tinfo, STqOffsetVal* pOffset) {
return 0;
}
-int32_t qStreamPrepareScan(qTaskInfo_t tinfo, const STqOffsetVal* pOffset) {
+int32_t initQueryTableDataCondForTmq(SQueryTableDataCond* pCond, SSnapContext* sContext, SMetaTableInfo mtInfo) {
+ memset(pCond, 0, sizeof(SQueryTableDataCond));
+ pCond->order = TSDB_ORDER_ASC;
+ pCond->numOfCols = mtInfo.schema->nCols;
+ pCond->colList = taosMemoryCalloc(pCond->numOfCols, sizeof(SColumnInfo));
+ if (pCond->colList == NULL) {
+ terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ return terrno;
+ }
+
+ pCond->twindows = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
+ pCond->suid = mtInfo.suid;
+ pCond->type = TIMEWINDOW_RANGE_CONTAINED;
+ pCond->startVersion = -1;
+ pCond->endVersion = sContext->snapVersion;
+
+ for (int32_t i = 0; i < pCond->numOfCols; ++i) {
+ pCond->colList[i].type = mtInfo.schema->pSchema[i].type;
+ pCond->colList[i].bytes = mtInfo.schema->pSchema[i].bytes;
+ pCond->colList[i].colId = mtInfo.schema->pSchema[i].colId;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t qStreamScanMemData(qTaskInfo_t tinfo, const SSubmitReq* pReq) {
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
+ ASSERT(pTaskInfo->streamInfo.pReq == NULL);
+ pTaskInfo->streamInfo.pReq = pReq;
+ return 0;
+}
+
+int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SOperatorInfo* pOperator = pTaskInfo->pRoot;
ASSERT(pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE);
pTaskInfo->streamInfo.prepareStatus = *pOffset;
- if (!tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
- while (1) {
- uint16_t type = pOperator->operatorType;
- pOperator->status = OP_OPENED;
- // TODO add more check
- if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- ASSERT(pOperator->numOfDownstream == 1);
- pOperator = pOperator->pDownstream[0];
- }
+ pTaskInfo->streamInfo.returned = 0;
+ if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus)) {
+ return 0;
+ }
+ if (subType == TOPIC_SUB_TYPE__COLUMN) {
+ uint16_t type = pOperator->operatorType;
+ pOperator->status = OP_OPENED;
+ // TODO add more check
+ if (type != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ ASSERT(pOperator->numOfDownstream == 1);
+ pOperator = pOperator->pDownstream[0];
+ }
- SStreamScanInfo* pInfo = pOperator->info;
- if (pOffset->type == TMQ_OFFSET__LOG) {
- STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTSInfo->dataReader);
- pTSInfo->dataReader = NULL;
+ SStreamScanInfo* pInfo = pOperator->info;
+ if (pOffset->type == TMQ_OFFSET__LOG) {
+ STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
+ tsdbReaderClose(pTSInfo->dataReader);
+ pTSInfo->dataReader = NULL;
#if 0
- if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
- pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
- qError("prepare scan ver %" PRId64 " actual ver %" PRId64 ", last %" PRId64, pOffset->version,
- pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
- ASSERT(0);
- }
+ if (tOffsetEqual(pOffset, &pTaskInfo->streamInfo.lastStatus) &&
+ pInfo->tqReader->pWalReader->curVersion != pOffset->version) {
+ qError("prepare scan ver %" PRId64 " actual ver %" PRId64 ", last %" PRId64, pOffset->version,
+ pInfo->tqReader->pWalReader->curVersion, pTaskInfo->streamInfo.lastStatus.version);
+ ASSERT(0);
+ }
#endif
- if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
+ if (tqSeekVer(pInfo->tqReader, pOffset->version + 1) < 0) {
+ return -1;
+ }
+ ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
+ int64_t uid = pOffset->uid;
+ int64_t ts = pOffset->ts;
+
+ if (uid == 0) {
+ if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
+ STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
+ uid = pTableInfo->uid;
+ ts = INT64_MIN;
+ } else {
return -1;
}
- ASSERT(pInfo->tqReader->pWalReader->curVersion == pOffset->version + 1);
- } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
- /*pInfo->blockType = STREAM_INPUT__TABLE_SCAN;*/
- int64_t uid = pOffset->uid;
- int64_t ts = pOffset->ts;
-
- if (uid == 0) {
- if (taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList) != 0) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, 0);
- uid = pTableInfo->uid;
- ts = INT64_MIN;
- } else {
- return -1;
- }
- }
+ }
- /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
- /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
- STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
+ /*if (pTaskInfo->streamInfo.lastStatus.type != TMQ_OFFSET__SNAPSHOT_DATA ||*/
+ /*pTaskInfo->streamInfo.lastStatus.uid != uid || pTaskInfo->streamInfo.lastStatus.ts != ts) {*/
+ STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
+ int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
#ifndef NDEBUG
-
- qDebug("switch to next table %" PRId64 " (cursor %d), %" PRId64 " rows returned", uid,
- pTableScanInfo->currentTable, pInfo->pTableScanOp->resultInfo.totalRows);
- pInfo->pTableScanOp->resultInfo.totalRows = 0;
+ qDebug("switch to next table %" PRId64 " (cursor %d), %" PRId64 " rows returned", uid,
+ pTableScanInfo->currentTable, pInfo->pTableScanOp->resultInfo.totalRows);
+ pInfo->pTableScanOp->resultInfo.totalRows = 0;
#endif
- bool found = false;
- for (int32_t i = 0; i < tableSz; i++) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
- if (pTableInfo->uid == uid) {
- found = true;
- pTableScanInfo->currentTable = i;
- break;
- }
+ bool found = false;
+ for (int32_t i = 0; i < tableSz; i++) {
+ STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
+ if (pTableInfo->uid == uid) {
+ found = true;
+ pTableScanInfo->currentTable = i;
+ break;
}
+ }
- // TODO after dropping table, table may be not found
- ASSERT(found);
+ // TODO after dropping table, table may not found
+ ASSERT(found);
- if (pTableScanInfo->dataReader == NULL) {
- if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
- pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
- pTableScanInfo->dataReader == NULL) {
- ASSERT(0);
- }
+ if (pTableScanInfo->dataReader == NULL) {
+ if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond,
+ pTaskInfo->tableqinfoList.pTableList, &pTableScanInfo->dataReader, NULL) < 0 ||
+ pTableScanInfo->dataReader == NULL) {
+ ASSERT(0);
}
+ }
- tsdbSetTableId(pTableScanInfo->dataReader, uid);
- int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
- pTableScanInfo->cond.twindows.skey = ts + 1;
- tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
- pTableScanInfo->cond.twindows.skey = oldSkey;
- pTableScanInfo->scanTimes = 0;
+ tsdbSetTableId(pTableScanInfo->dataReader, uid);
+ int64_t oldSkey = pTableScanInfo->cond.twindows.skey;
+ pTableScanInfo->cond.twindows.skey = ts + 1;
+ tsdbReaderReset(pTableScanInfo->dataReader, &pTableScanInfo->cond);
+ pTableScanInfo->cond.twindows.skey = oldSkey;
+ pTableScanInfo->scanTimes = 0;
- qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
- ts, pTableScanInfo->currentTable, tableSz);
- /*}*/
+ qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid,
+ ts, pTableScanInfo->currentTable, tableSz);
+ /*}*/
+ } else {
+ ASSERT(0);
+ }
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ SSnapContext* sContext = pInfo->sContext;
+ if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ qError("setDataForSnapShot error. uid:%" PRIi64, pOffset->uid);
+ return -1;
+ }
- } else {
- ASSERT(0);
- }
- return 0;
+ SMetaTableInfo mtInfo = getUidfromSnapShot(sContext);
+ tsdbReaderClose(pInfo->dataReader);
+ pInfo->dataReader = NULL;
+ cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
+ taosArrayDestroy(pTaskInfo->tableqinfoList.pTableList);
+ if (mtInfo.uid == 0) return 0; // no data
+
+ initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, mtInfo);
+ pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts;
+ pTaskInfo->tableqinfoList.pTableList = taosArrayInit(1, sizeof(STableKeyInfo));
+ taosArrayPush(pTaskInfo->tableqinfoList.pTableList, &(STableKeyInfo){.uid = mtInfo.uid, .groupId = 0});
+ tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pTaskInfo->tableqinfoList.pTableList,
+ &pInfo->dataReader, NULL);
+
+ cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
+ strcpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName);
+ tDeleteSSchemaWrapper(pTaskInfo->streamInfo.schema);
+ pTaskInfo->streamInfo.schema = mtInfo.schema;
+
+ qDebug("tmqsnap qStreamPrepareScan snapshot data uid %ld ts %ld", mtInfo.uid, pOffset->ts);
+ } else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ SSnapContext* sContext = pInfo->sContext;
+ if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ qError("setForSnapShot error. uid:%" PRIi64 " ,version:%" PRIi64, pOffset->uid);
+ return -1;
}
+ qDebug("tmqsnap qStreamPrepareScan snapshot meta uid %ld ts %ld", pOffset->uid);
+ } else if (pOffset->type == TMQ_OFFSET__LOG) {
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ tsdbReaderClose(pInfo->dataReader);
+ pInfo->dataReader = NULL;
+ qDebug("tmqsnap qStreamPrepareScan snapshot log");
}
return 0;
}
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 893acf1bbc67764d219bce708be83d48e5a6a49f..99177913126b3da573fb9a006171f1dbae686d6b 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -81,11 +81,6 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) {
int32_t getMaximumIdleDurationSec() { return tsShellActivityTimer * 2; }
-static int32_t getExprFunctionId(SExprInfo* pExprInfo) {
- assert(pExprInfo != NULL && pExprInfo->pExpr != NULL && pExprInfo->pExpr->nodeType == TEXPR_UNARYEXPR_NODE);
- return 0;
-}
-
static void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExpr, SSDataBlock* pBlock);
static void releaseQueryBuf(size_t numOfTables);
@@ -132,8 +127,6 @@ SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn,
return fpSet;
}
-void operatorDummyCloseFn(void* param, int32_t numOfCols) {}
-
static int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup, SDiskbasedBuf* pBuf,
SGroupResInfo* pGroupResInfo);
@@ -179,26 +172,23 @@ static bool chkResultRowFromKey(STaskRuntimeEnv* pRuntimeEnv, SResultRowInfo* pR
}
#endif
-SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int32_t interBufSize) {
+SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int32_t* currentPageId, int32_t interBufSize) {
SFilePage* pData = NULL;
// in the first scan, new space needed for results
int32_t pageId = -1;
- SIDList list = getDataBufPagesIdList(pResultBuf, tableGroupId);
-
- if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ if (*currentPageId == -1) {
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
- SPageInfo* pi = getLastPageInfo(list);
- pData = getBufPage(pResultBuf, getPageId(pi));
- pageId = getPageId(pi);
+ pData = getBufPage(pResultBuf, *currentPageId);
+ pageId = *currentPageId;
if (pData->num + interBufSize > getBufPageSize(pResultBuf)) {
// release current page first, and prepare the next one
- releaseBufPageInfo(pResultBuf, pi);
+ releaseBufPage(pResultBuf, pData);
- pData = getNewBufPage(pResultBuf, tableGroupId, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -215,9 +205,9 @@ SResultRow* getNewResultRow(SDiskbasedBuf* pResultBuf, int64_t tableGroupId, int
SResultRow* pResultRow = (SResultRow*)((char*)pData + pData->num);
pResultRow->pageId = pageId;
pResultRow->offset = (int32_t)pData->num;
+ *currentPageId = pageId;
pData->num += interBufSize;
-
return pResultRow;
}
@@ -234,7 +224,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
SET_RES_WINDOW_KEY(pSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
SResultRow* pResult = NULL;
@@ -263,18 +253,13 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// allocate a new buffer page
if (pResult == NULL) {
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_2");
-#endif
ASSERT(pSup->resultRowSize > 0);
- pResult = getNewResultRow(pResultBuf, groupId, pSup->resultRowSize);
-
- initResultRow(pResult);
+ pResult = getNewResultRow(pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
// add a new result set for a new group
SResultRowPosition pos = {.pageId = pResult->pageId, .offset = pResult->offset};
- taosHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
- sizeof(SResultRowPosition));
+ tSimpleHashPut(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), &pos,
+ sizeof(SResultRowPosition));
}
// 2. set the new time window to be the new active time window
@@ -282,7 +267,7 @@ SResultRow* doSetResultOutBufByKey(SDiskbasedBuf* pResultBuf, SResultRowInfo* pR
// too many time window in query
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_BATCH &&
- taosHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) {
+ tSimpleHashGetSize(pSup->pResultRowHashTable) > MAX_INTERVAL_TIME_WINDOW) {
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW);
}
@@ -299,10 +284,10 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// in the first scan, new space needed for results
int32_t pageId = -1;
- SIDList list = getDataBufPagesIdList(pResultBuf, tid);
+ SIDList list = getDataBufPagesIdList(pResultBuf);
if (taosArrayGetSize(list) == 0) {
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
pData->num = sizeof(SFilePage);
} else {
SPageInfo* pi = getLastPageInfo(list);
@@ -313,7 +298,7 @@ static int32_t addNewWindowResultBuf(SResultRow* pWindowRes, SDiskbasedBuf* pRes
// release current page first, and prepare the next one
releaseBufPageInfo(pResultBuf, pi);
- pData = getNewBufPage(pResultBuf, tid, &pageId);
+ pData = getNewBufPage(pResultBuf, &pageId);
if (pData != NULL) {
pData->num = sizeof(SFilePage);
}
@@ -600,7 +585,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
if (pExpr[k].pExpr->nodeType == QUERY_NODE_COLUMN) { // it is a project query
SColumnInfoData* pColInfoData = taosArrayGet(pResult->pDataBlock, outputSlotId);
if (pResult->info.rows > 0 && !createNewColModel) {
- colDataMergeCol(pColInfoData, pResult->info.rows, &pResult->info.capacity, pInputData->pData[0],
+ colDataMergeCol(pColInfoData, pResult->info.rows, (int32_t*)&pResult->info.capacity, pInputData->pData[0],
pInputData->numOfRows);
} else {
colDataAssign(pColInfoData, pInputData->pData[0], pInputData->numOfRows, &pResult->info);
@@ -638,7 +623,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
int32_t startOffset = createNewColModel ? 0 : pResult->info.rows;
ASSERT(pResult->info.capacity > 0);
- colDataMergeCol(pResColData, startOffset, &pResult->info.capacity, &idata, dest.numOfRows);
+ colDataMergeCol(pResColData, startOffset, (int32_t*)&pResult->info.capacity, &idata, dest.numOfRows);
colDataDestroy(&idata);
numOfRows = dest.numOfRows;
@@ -703,7 +688,7 @@ int32_t projectApplyFunctions(SExprInfo* pExpr, SSDataBlock* pResult, SSDataBloc
int32_t startOffset = createNewColModel ? 0 : pResult->info.rows;
ASSERT(pResult->info.capacity > 0);
- colDataMergeCol(pResColData, startOffset, &pResult->info.capacity, &idata, dest.numOfRows);
+ colDataMergeCol(pResColData, startOffset, (int32_t*)&pResult->info.capacity, &idata, dest.numOfRows);
colDataDestroy(&idata);
numOfRows = dest.numOfRows;
@@ -823,13 +808,6 @@ void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pB
} else {
pInput->colDataAggIsSet = false;
}
-
- // set the statistics data for primary time stamp column
- // if (pCtx->functionId == FUNCTION_SPREAD && pColumn->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
- // pCtx->isAggSet = true;
- // pCtx->agg.min = pBlock->info.window.skey;
- // pCtx->agg.max = pBlock->info.window.ekey;
- // }
}
bool isTaskKilled(SExecTaskInfo* pTaskInfo) {
@@ -866,146 +844,6 @@ STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int
return win;
}
-#if 0
-static int32_t updateBlockLoadStatus(STaskAttr* pQuery, int32_t status) {
-
- bool hasFirstLastFunc = false;
- bool hasOtherFunc = false;
-
- if (status == BLK_DATA_DATA_LOAD || status == BLK_DATA_FILTEROUT) {
- return status;
- }
-
- for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
- int32_t functionId = getExprFunctionId(&pQuery->pExpr1[i]);
-
- if (functionId == FUNCTION_TS || functionId == FUNCTION_TS_DUMMY || functionId == FUNCTION_TAG ||
- functionId == FUNCTION_TAG_DUMMY) {
- continue;
- }
-
- if (functionId == FUNCTION_FIRST_DST || functionId == FUNCTION_LAST_DST) {
- hasFirstLastFunc = true;
- } else {
- hasOtherFunc = true;
- }
-
- }
-
- if (hasFirstLastFunc && status == BLK_DATA_NOT_LOAD) {
- if (!hasOtherFunc) {
- return BLK_DATA_FILTEROUT;
- } else {
- return BLK_DATA_DATA_LOAD;
- }
- }
-
- return status;
-}
-
-#endif
-
-// static void updateDataCheckOrder(SQInfo *pQInfo, SQueryTableReq* pQueryMsg, bool stableQuery) {
-// STaskAttr* pQueryAttr = pQInfo->runtimeEnv.pQueryAttr;
-//
-// // in case of point-interpolation query, use asc order scan
-// char msg[] = "QInfo:0x%"PRIx64" scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%"
-// PRId64
-// "-%" PRId64 ", new qrange:%" PRId64 "-%" PRId64;
-//
-// // todo handle the case the the order irrelevant query type mixed up with order critical query type
-// // descending order query for last_row query
-// if (isFirstLastRowQuery(pQueryAttr)) {
-// //qDebug("QInfo:0x%"PRIx64" scan order changed for last_row query, old:%d, new:%d", pQInfo->qId,
-// pQueryAttr->order.order, TSDB_ORDER_ASC);
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// if (pQueryAttr->window.skey > pQueryAttr->window.ekey) {
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// }
-//
-// pQueryAttr->needReverseScan = false;
-// return;
-// }
-//
-// if (pQueryAttr->groupbyColumn && pQueryAttr->order.order == TSDB_ORDER_DESC) {
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// if (pQueryAttr->window.skey > pQueryAttr->window.ekey) {
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// }
-//
-// pQueryAttr->needReverseScan = false;
-// doUpdateLastKey(pQueryAttr);
-// return;
-// }
-//
-// if (pQueryAttr->pointInterpQuery && pQueryAttr->interval.interval == 0) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "interp", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey,
-// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey); TSWAP(pQueryAttr->window.skey,
-// pQueryAttr->window.ekey, TSKEY);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// return;
-// }
-//
-// if (pQueryAttr->interval.interval == 0) {
-// if (onlyFirstQuery(pQueryAttr)) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-first", pQueryAttr->order.order, TSDB_ORDER_ASC, pQueryAttr->window.skey,
-//// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// pQueryAttr->needReverseScan = false;
-// } else if (onlyLastQuery(pQueryAttr) && notContainSessionOrStateWindow(pQueryAttr)) {
-// if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-last", pQueryAttr->order.order, TSDB_ORDER_DESC, pQueryAttr->window.skey,
-//// pQueryAttr->window.ekey, pQueryAttr->window.ekey, pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_DESC;
-// pQueryAttr->needReverseScan = false;
-// }
-//
-// } else { // interval query
-// if (stableQuery) {
-// if (onlyFirstQuery(pQueryAttr)) {
-// if (!QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-first stable", pQueryAttr->order.order, TSDB_ORDER_ASC,
-//// pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey,
-/// pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_ASC;
-// pQueryAttr->needReverseScan = false;
-// } else if (onlyLastQuery(pQueryAttr)) {
-// if (QUERY_IS_ASC_QUERY(pQueryAttr)) {
-// //qDebug(msg, pQInfo->qId, "only-last stable", pQueryAttr->order.order, TSDB_ORDER_DESC,
-//// pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->window.ekey,
-/// pQueryAttr->window.skey);
-//
-// TSWAP(pQueryAttr->window.skey, pQueryAttr->window.ekey);
-// doUpdateLastKey(pQueryAttr);
-// }
-//
-// pQueryAttr->order.order = TSDB_ORDER_DESC;
-// pQueryAttr->needReverseScan = false;
-// }
-// }
-// }
-//}
-
#if 0
static bool overlapWithTimeWindow(STaskAttr* pQueryAttr, SDataBlockInfo* pBlockInfo) {
STimeWindow w = {0};
@@ -1231,24 +1069,6 @@ static void updateTableQueryInfoForReverseScan(STableQueryInfo* pTableQueryInfo)
if (pTableQueryInfo == NULL) {
return;
}
-
- // TSWAP(pTableQueryInfo->win.skey, pTableQueryInfo->win.ekey);
- // pTableQueryInfo->lastKey = pTableQueryInfo->win.skey;
-
- // SWITCH_ORDER(pTableQueryInfo->cur.order);
- // pTableQueryInfo->cur.vgroupIndex = -1;
-
- // set the index to be the end slot of result rows array
- // SResultRowInfo* pResultRowInfo = &pTableQueryInfo->resInfo;
- // if (pResultRowInfo->size > 0) {
- // pResultRowInfo->curPos = pResultRowInfo->size - 1;
- // } else {
- // pResultRowInfo->curPos = -1;
- // }
-}
-
-void initResultRow(SResultRow* pResultRow) {
- // pResultRow->pEntryInfo = (struct SResultRowEntryInfo*)((char*)pResultRow + sizeof(SResultRow));
}
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
@@ -1261,15 +1081,6 @@ void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
}
}
-void destroyTableQueryInfoImpl(STableQueryInfo* pTableQueryInfo) {
- if (pTableQueryInfo == NULL) {
- return;
- }
-
- // taosVariantDestroy(&pTableQueryInfo->tag);
- // cleanupResultRowInfo(&pTableQueryInfo->resInfo);
-}
-
void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset) {
bool init = false;
for (int32_t i = 0; i < numOfOutput; ++i) {
@@ -1299,7 +1110,8 @@ void setResultRowInitCtx(SResultRow* pResult, SqlFunctionCtx* pCtx, int32_t numO
}
}
-static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowRes, bool keep);
+static void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, bool keep,
+ int32_t status);
void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, const SArray* pColMatchInfo) {
if (pFilterNode == NULL || pBlock->info.rows == 0) {
@@ -1309,19 +1121,18 @@ void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, const SArray* pColM
SFilterInfo* filter = NULL;
// todo move to the initialization function
- int32_t code = filterInitFromNode((SNode*)pFilterNode, &filter, 0);
-
- size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
- SFilterColumnParam param1 = {.numOfCols = numOfCols, .pDataBlock = pBlock->pDataBlock};
+ int32_t code = filterInitFromNode((SNode*)pFilterNode, &filter, 0);
+ SFilterColumnParam param1 = {.numOfCols = taosArrayGetSize(pBlock->pDataBlock), .pDataBlock = pBlock->pDataBlock};
code = filterSetDataFromSlotId(filter, ¶m1);
- int8_t* rowRes = NULL;
+ SColumnInfoData* p = NULL;
+ int32_t status = 0;
// todo the keep seems never to be True??
- bool keep = filterExecute(filter, pBlock, &rowRes, NULL, param1.numOfCols);
+ bool keep = filterExecute(filter, pBlock, &p, NULL, param1.numOfCols, &status);
filterFreeInfo(filter);
- extractQualifiedTupleByFilterResult(pBlock, rowRes, keep);
+ extractQualifiedTupleByFilterResult(pBlock, p, keep, status);
if (pColMatchInfo != NULL) {
for (int32_t i = 0; i < taosArrayGetSize(pColMatchInfo); ++i) {
@@ -1336,16 +1147,22 @@ void doFilter(const SNode* pFilterNode, SSDataBlock* pBlock, const SArray* pColM
}
}
- taosMemoryFree(rowRes);
+ colDataDestroy(p);
+ taosMemoryFree(p);
}
-void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowRes, bool keep) {
+void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoData* p, bool keep, int32_t status) {
if (keep) {
return;
}
- if (rowRes != NULL) {
- int32_t totalRows = pBlock->info.rows;
+ int32_t totalRows = pBlock->info.rows;
+
+ if (status == FILTER_RESULT_ALL_QUALIFIED) {
+ // here nothing needs to be done
+ } else if (status == FILTER_RESULT_NONE_QUALIFIED) {
+ pBlock->info.rows = 0;
+ } else {
SSDataBlock* px = createOneDataBlock(pBlock, true);
size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
@@ -1361,7 +1178,7 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowR
int32_t numOfRows = 0;
for (int32_t j = 0; j < totalRows; ++j) {
- if (rowRes[j] == 0) {
+ if (((int8_t*)p->pData)[j] == 0) {
continue;
}
@@ -1373,6 +1190,7 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowR
numOfRows += 1;
}
+ // todo this value can be assigned directly
if (pBlock->info.rows == totalRows) {
pBlock->info.rows = numOfRows;
} else {
@@ -1381,9 +1199,6 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowR
}
blockDataDestroy(px); // fix memory leak
- } else {
- // do nothing
- pBlock->info.rows = 0;
}
}
@@ -1451,33 +1266,12 @@ static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t nu
}
}
-// todo extract method with copytoSSDataBlock
-int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition,
- SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, int32_t numOfExprs,
- const int32_t* rowCellOffset, SSDataBlock* pBlock,
- SExecTaskInfo* pTaskInfo) {
- SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId);
- SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset);
-
- doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowCellOffset);
- if (pRow->numOfRows == 0) {
- releaseBufPage(pBuf, page);
- return 0;
- }
-
- while (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
- int32_t code = blockDataEnsureCapacity(pBlock, pBlock->info.capacity * 1.25);
- if (TAOS_FAILED(code)) {
- releaseBufPage(pBuf, page);
- qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
- T_LONG_JMP(pTaskInfo->env, code);
- }
- }
-
+static void doCopyResultToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultRow* pRow, SqlFunctionCtx* pCtx,
+ SSDataBlock* pBlock, const int32_t* rowEntryOffset, SExecTaskInfo* pTaskInfo) {
for (int32_t j = 0; j < numOfExprs; ++j) {
int32_t slotId = pExprInfo[j].base.resSchema.slotId;
- pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowCellOffset);
+ pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
if (pCtx[j].fpSet.finalize) {
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
@@ -1485,7 +1279,7 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
T_LONG_JMP(pTaskInfo->env, code);
}
} else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
- // do nothing, todo refactor
+ // do nothing
} else {
// expand the result into multiple rows. E.g., _wstart, top(k, 20)
// the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows.
@@ -1496,10 +1290,40 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
}
}
}
+}
+
+// todo refactor. SResultRow has direct pointer in miainfo
+int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPosition, SExprSupp* pSup,
+ SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
+ SFilePage* page = getBufPage(pBuf, resultRowPosition->pageId);
+ SResultRow* pRow = (SResultRow*)((char*)page + resultRowPosition->offset);
+
+ SqlFunctionCtx* pCtx = pSup->pCtx;
+ SExprInfo* pExprInfo = pSup->pExprInfo;
+ const int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
+
+ doUpdateNumOfRows(pCtx, pRow, pSup->numOfExprs, rowEntryOffset);
+ if (pRow->numOfRows == 0) {
+ releaseBufPage(pBuf, page);
+ return 0;
+ }
+
+ int32_t size = pBlock->info.capacity;
+ while (pBlock->info.rows + pRow->numOfRows > size) {
+ size = size * 1.25;
+ }
+
+ int32_t code = blockDataEnsureCapacity(pBlock, size);
+ if (TAOS_FAILED(code)) {
+ releaseBufPage(pBuf, page);
+ qError("%s ensure result data capacity failed, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
+ T_LONG_JMP(pTaskInfo->env, code);
+ }
+
+ doCopyResultToDataBlock(pExprInfo, pSup->numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo);
releaseBufPage(pBuf, page);
pBlock->info.rows += pRow->numOfRows;
-
return 0;
}
@@ -1544,40 +1368,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
}
pGroupResInfo->index += 1;
-
- for (int32_t j = 0; j < numOfExprs; ++j) {
- int32_t slotId = pExprInfo[j].base.resSchema.slotId;
-
- pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
- if (pCtx[j].fpSet.finalize) {
-#ifdef BUF_PAGE_DEBUG
- qDebug("\npage_finalize %d", numOfExprs);
-#endif
- int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
- if (TAOS_FAILED(code)) {
- qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
- T_LONG_JMP(pTaskInfo->env, code);
- }
- } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
- // do nothing, todo refactor
- } else {
- // expand the result into multiple rows. E.g., _wstart, top(k, 20)
- // the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows.
- SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId);
- char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo);
- if (pCtx[j].increase) {
- int64_t ts = *(int64_t*)in;
- for (int32_t k = 0; k < pRow->numOfRows; ++k) {
- colDataAppend(pColInfoData, pBlock->info.rows + k, (const char*)&ts, pCtx[j].resultInfo->isNullRes);
- ts++;
- }
- } else {
- for (int32_t k = 0; k < pRow->numOfRows; ++k) {
- colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
- }
- }
- }
- }
+ doCopyResultToDataBlock(pExprInfo, numOfExprs, pRow, pCtx, pBlock, rowEntryOffset, pTaskInfo);
releaseBufPage(pBuf, page);
pBlock->info.rows += pRow->numOfRows;
@@ -1917,22 +1708,6 @@ int32_t appendDownstream(SOperatorInfo* p, SOperatorInfo** pDownstream, int32_t
static void doDestroyTableList(STableListInfo* pTableqinfoList);
-static void doTableQueryInfoTimeWindowCheck(SExecTaskInfo* pTaskInfo, STableQueryInfo* pTableQueryInfo, int32_t order) {
-#if 0
- if (order == TSDB_ORDER_ASC) {
- assert(
- (pTableQueryInfo->win.skey <= pTableQueryInfo->win.ekey) &&
- (pTableQueryInfo->lastKey >= pTaskInfo->window.skey) &&
- (pTableQueryInfo->win.skey >= pTaskInfo->window.skey && pTableQueryInfo->win.ekey <= pTaskInfo->window.ekey));
- } else {
- assert(
- (pTableQueryInfo->win.skey >= pTableQueryInfo->win.ekey) &&
- (pTableQueryInfo->lastKey <= pTaskInfo->window.skey) &&
- (pTableQueryInfo->win.skey <= pTaskInfo->window.skey && pTableQueryInfo->win.ekey >= pTaskInfo->window.ekey));
- }
-#endif
-}
-
typedef struct SFetchRspHandleWrapper {
uint32_t exchangeId;
int32_t sourceIndex;
@@ -2002,49 +1777,60 @@ void qProcessRspMsg(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) {
static int32_t doSendFetchDataRequest(SExchangeInfo* pExchangeInfo, SExecTaskInfo* pTaskInfo, int32_t sourceIndex) {
size_t totalSources = taosArrayGetSize(pExchangeInfo->pSources);
- SResFetchReq* pMsg = taosMemoryCalloc(1, sizeof(SResFetchReq));
- if (NULL == pMsg) {
- pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
- return pTaskInfo->code;
- }
-
SDownstreamSourceNode* pSource = taosArrayGet(pExchangeInfo->pSources, sourceIndex);
SSourceDataInfo* pDataInfo = taosArrayGet(pExchangeInfo->pSourceDataInfo, sourceIndex);
ASSERT(pDataInfo->status == EX_SOURCE_DATA_NOT_READY);
- qDebug("%s build fetch msg and send to vgId:%d, ep:%s, taskId:0x%" PRIx64 ", execId:%d, %d/%" PRIzu,
- GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->taskId, pSource->execId,
- sourceIndex, totalSources);
-
- pMsg->header.vgId = htonl(pSource->addr.nodeId);
- pMsg->sId = htobe64(pSource->schedId);
- pMsg->taskId = htobe64(pSource->taskId);
- pMsg->queryId = htobe64(pTaskInfo->id.queryId);
- pMsg->execId = htonl(pSource->execId);
-
- // send the fetch remote task result reques
- SMsgSendInfo* pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
- if (NULL == pMsgSendInfo) {
- taosMemoryFreeClear(pMsg);
- qError("%s prepare message %d failed", GET_TASKID(pTaskInfo), (int32_t)sizeof(SMsgSendInfo));
- pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
- return pTaskInfo->code;
- }
-
SFetchRspHandleWrapper* pWrapper = taosMemoryCalloc(1, sizeof(SFetchRspHandleWrapper));
pWrapper->exchangeId = pExchangeInfo->self;
pWrapper->sourceIndex = sourceIndex;
- pMsgSendInfo->param = pWrapper;
- pMsgSendInfo->paramFreeFp = taosMemoryFree;
- pMsgSendInfo->msgInfo.pData = pMsg;
- pMsgSendInfo->msgInfo.len = sizeof(SResFetchReq);
- pMsgSendInfo->msgType = pSource->fetchMsgType;
- pMsgSendInfo->fp = loadRemoteDataCallback;
+ if (pSource->localExec) {
+ SDataBuf pBuf = {0};
+ int32_t code =
+ (*pTaskInfo->localFetch.fp)(pTaskInfo->localFetch.handle, pSource->schedId, pTaskInfo->id.queryId,
+ pSource->taskId, 0, pSource->execId, &pBuf.pData, pTaskInfo->localFetch.explainRes);
+ loadRemoteDataCallback(pWrapper, &pBuf, code);
+ taosMemoryFree(pWrapper);
+ } else {
+ SResFetchReq* pMsg = taosMemoryCalloc(1, sizeof(SResFetchReq));
+ if (NULL == pMsg) {
+ pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ return pTaskInfo->code;
+ }
+
+ qDebug("%s build fetch msg and send to vgId:%d, ep:%s, taskId:0x%" PRIx64 ", execId:%d, %d/%" PRIzu,
+ GET_TASKID(pTaskInfo), pSource->addr.nodeId, pSource->addr.epSet.eps[0].fqdn, pSource->taskId,
+ pSource->execId, sourceIndex, totalSources);
+
+ pMsg->header.vgId = htonl(pSource->addr.nodeId);
+ pMsg->sId = htobe64(pSource->schedId);
+ pMsg->taskId = htobe64(pSource->taskId);
+ pMsg->queryId = htobe64(pTaskInfo->id.queryId);
+ pMsg->execId = htonl(pSource->execId);
+
+ // send the fetch remote task result reques
+ SMsgSendInfo* pMsgSendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo));
+ if (NULL == pMsgSendInfo) {
+ taosMemoryFreeClear(pMsg);
+ qError("%s prepare message %d failed", GET_TASKID(pTaskInfo), (int32_t)sizeof(SMsgSendInfo));
+ pTaskInfo->code = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ return pTaskInfo->code;
+ }
+
+ pMsgSendInfo->param = pWrapper;
+ pMsgSendInfo->paramFreeFp = taosMemoryFree;
+ pMsgSendInfo->msgInfo.pData = pMsg;
+ pMsgSendInfo->msgInfo.len = sizeof(SResFetchReq);
+ pMsgSendInfo->msgType = pSource->fetchMsgType;
+ pMsgSendInfo->fp = loadRemoteDataCallback;
+
+ int64_t transporterId = 0;
+ int32_t code =
+ asyncSendMsgToServer(pExchangeInfo->pTransporter, &pSource->addr.epSet, &transporterId, pMsgSendInfo);
+ }
- int64_t transporterId = 0;
- int32_t code = asyncSendMsgToServer(pExchangeInfo->pTransporter, &pSource->addr.epSet, &transporterId, pMsgSendInfo);
return TSDB_CODE_SUCCESS;
}
@@ -2056,13 +1842,11 @@ void updateLoadRemoteInfo(SLoadRemoteDataInfo* pInfo, int32_t numOfRows, int32_t
pOperator->resultInfo.totalRows += numOfRows;
}
-int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, int32_t numOfOutput, SArray* pColList,
- char** pNextStart) {
+int32_t extractDataBlockFromFetchRsp(SSDataBlock* pRes, char* pData, SArray* pColList, char** pNextStart) {
if (pColList == NULL) { // data from other sources
blockDataCleanup(pRes);
*pNextStart = (char*)blockDecode(pRes, pData);
} else { // extract data according to pColList
- ASSERT(numOfOutput == taosArrayGetSize(pColList));
char* pStart = pData;
int32_t numOfCols = htonl(*(int32_t*)pStart);
@@ -2160,7 +1944,7 @@ static void concurrentlyLoadRemoteDataImpl(SOperatorInfo* pOperator, SExchangeIn
char* pStart = pRetrieveRsp->data;
while (index++ < pRetrieveRsp->numOfBlocks) {
SSDataBlock* pb = createOneDataBlock(pExchangeInfo->pDummyBlock, false);
- code = extractDataBlockFromFetchRsp(pb, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
+ code = extractDataBlockFromFetchRsp(pb, pStart, NULL, &pStart);
if (code != 0) {
taosMemoryFreeClear(pDataInfo->pRsp);
goto _error;
@@ -2285,7 +2069,7 @@ static int32_t seqLoadRemoteData(SOperatorInfo* pOperator) {
SRetrieveTableRsp* pRetrieveRsp = pDataInfo->pRsp;
char* pStart = pRetrieveRsp->data;
- int32_t code = extractDataBlockFromFetchRsp(NULL, pStart, pRetrieveRsp->numOfCols, NULL, &pStart);
+ int32_t code = extractDataBlockFromFetchRsp(NULL, pStart, NULL, &pStart);
if (pRsp->completed == 1) {
qDebug("%s fetch msg rsp from vgId:%d, taskId:0x%" PRIx64 " execId:%d numOfRows:%d, rowsOfSource:%" PRIu64
@@ -2499,21 +2283,6 @@ _error:
static int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey);
-static void destroySortedMergeOperatorInfo(void* param, int32_t numOfOutput) {
- SSortedMergeOperatorInfo* pInfo = (SSortedMergeOperatorInfo*)param;
- taosArrayDestroy(pInfo->pSortInfo);
- taosArrayDestroy(pInfo->groupInfo);
-
- if (pInfo->pSortHandle != NULL) {
- tsortDestroySortHandle(pInfo->pSortHandle);
- }
-
- blockDataDestroy(pInfo->binfo.pRes);
- cleanupAggSup(&pInfo->aggSup);
-
- taosMemoryFreeClear(param);
-}
-
static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int32_t rowIndex) {
size_t size = taosArrayGetSize(groupInfo);
if (size == 0) {
@@ -2549,41 +2318,6 @@ static bool needToMerge(SSDataBlock* pBlock, SArray* groupInfo, char** buf, int3
return 0;
}
-static void doMergeResultImpl(SSortedMergeOperatorInfo* pInfo, SqlFunctionCtx* pCtx, int32_t numOfExpr,
- int32_t rowIndex) {
- for (int32_t j = 0; j < numOfExpr; ++j) { // TODO set row index
- // pCtx[j].startRow = rowIndex;
- }
-
- for (int32_t j = 0; j < numOfExpr; ++j) {
- int32_t functionId = pCtx[j].functionId;
- // pCtx[j].fpSet->addInput(&pCtx[j]);
-
- // if (functionId < 0) {
- // SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
- // doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_MERGE);
- // } else {
- // assert(!TSDB_FUNC_IS_SCALAR(functionId));
- // aAggs[functionId].mergeFunc(&pCtx[j]);
- // }
- }
-}
-
-static void doFinalizeResultImpl(SqlFunctionCtx* pCtx, int32_t numOfExpr) {
- for (int32_t j = 0; j < numOfExpr; ++j) {
- int32_t functionId = pCtx[j].functionId;
- // if (functionId == FUNC_TAG_DUMMY || functionId == FUNC_TS_DUMMY) {
- // continue;
- // }
-
- // if (functionId < 0) {
- // SUdfInfo* pUdfInfo = taosArrayGet(pInfo->udfInfo, -1 * functionId - 1);
- // doInvokeUdf(pUdfInfo, &pCtx[j], 0, TSDB_UDF_FUNC_FINALIZE);
- // } else {
- // pCtx[j].fpSet.finalize(&pCtx[j]);
- }
-}
-
static bool saveCurrentTuple(char** rowColData, SArray* pColumnList, SSDataBlock* pBlock, int32_t rowIndex) {
int32_t size = (int32_t)taosArrayGetSize(pColumnList);
@@ -2598,210 +2332,6 @@ static bool saveCurrentTuple(char** rowColData, SArray* pColumnList, SSDataBlock
return true;
}
-static void doMergeImpl(SOperatorInfo* pOperator, int32_t numOfExpr, SSDataBlock* pBlock) {
- SSortedMergeOperatorInfo* pInfo = pOperator->info;
-
- SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
-
- for (int32_t i = 0; i < pBlock->info.rows; ++i) {
- if (!pInfo->hasGroupVal) {
- ASSERT(i == 0);
- doMergeResultImpl(pInfo, pCtx, numOfExpr, i);
- pInfo->hasGroupVal = saveCurrentTuple(pInfo->groupVal, pInfo->groupInfo, pBlock, i);
- } else {
- if (needToMerge(pBlock, pInfo->groupInfo, pInfo->groupVal, i)) {
- doMergeResultImpl(pInfo, pCtx, numOfExpr, i);
- } else {
- doFinalizeResultImpl(pCtx, numOfExpr);
- int32_t numOfRows = getNumOfResult(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs, NULL);
- // setTagValueForMultipleRows(pCtx, pOperator->exprSupp.numOfExprs, numOfRows);
-
- // TODO check for available buffer;
-
- // next group info data
- pInfo->binfo.pRes->info.rows += numOfRows;
- for (int32_t j = 0; j < numOfExpr; ++j) {
- if (pCtx[j].functionId < 0) {
- continue;
- }
-
- pCtx[j].fpSet.process(&pCtx[j]);
- }
-
- doMergeResultImpl(pInfo, pCtx, numOfExpr, i);
- pInfo->hasGroupVal = saveCurrentTuple(pInfo->groupVal, pInfo->groupInfo, pBlock, i);
- }
- }
- }
-}
-
-static SSDataBlock* doMerge(SOperatorInfo* pOperator) {
- SSortedMergeOperatorInfo* pInfo = pOperator->info;
- SSortHandle* pHandle = pInfo->pSortHandle;
-
- SSDataBlock* pDataBlock = createOneDataBlock(pInfo->binfo.pRes, false);
- blockDataEnsureCapacity(pDataBlock, pOperator->resultInfo.capacity);
-
- while (1) {
- blockDataCleanup(pDataBlock);
- while (1) {
- STupleHandle* pTupleHandle = tsortNextTuple(pHandle);
- if (pTupleHandle == NULL) {
- break;
- }
-
- // build datablock for merge for one group
- appendOneRowToDataBlock(pDataBlock, pTupleHandle);
- if (pDataBlock->info.rows >= pOperator->resultInfo.capacity) {
- break;
- }
- }
-
- if (pDataBlock->info.rows == 0) {
- break;
- }
-
- setInputDataBlock(pOperator, pOperator->exprSupp.pCtx, pDataBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
- // updateOutputBuf(&pInfo->binfo, &pAggInfo->bufCapacity, pBlock->info.rows * pAggInfo->resultRowFactor,
- // pOperator->pRuntimeEnv, true);
- doMergeImpl(pOperator, pOperator->exprSupp.numOfExprs, pDataBlock);
- // flush to tuple store, and after all data have been handled, return to upstream node or sink node
- }
-
- doFinalizeResultImpl(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs);
- int32_t numOfRows = getNumOfResult(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs, NULL);
- // setTagValueForMultipleRows(pCtx, pOperator->exprSupp.numOfExprs, numOfRows);
-
- // TODO check for available buffer;
-
- // next group info data
- pInfo->binfo.pRes->info.rows += numOfRows;
- return (pInfo->binfo.pRes->info.rows > 0) ? pInfo->binfo.pRes : NULL;
-}
-
-SSDataBlock* getSortedMergeBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity,
- SArray* pColMatchInfo, SSortedMergeOperatorInfo* pInfo) {
- blockDataCleanup(pDataBlock);
-
- SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
- if (p == NULL) {
- return NULL;
- }
-
- blockDataEnsureCapacity(p, capacity);
-
- while (1) {
- STupleHandle* pTupleHandle = tsortNextTuple(pHandle);
- if (pTupleHandle == NULL) {
- break;
- }
-
- appendOneRowToDataBlock(p, pTupleHandle);
- if (p->info.rows >= capacity) {
- break;
- }
- }
-
- if (p->info.rows > 0) {
- int32_t numOfCols = taosArrayGetSize(pColMatchInfo);
- for (int32_t i = 0; i < numOfCols; ++i) {
- SColMatchInfo* pmInfo = taosArrayGet(pColMatchInfo, i);
- ASSERT(pmInfo->matchType == COL_MATCH_FROM_SLOT_ID);
-
- SColumnInfoData* pSrc = taosArrayGet(p->pDataBlock, pmInfo->srcSlotId);
- SColumnInfoData* pDst = taosArrayGet(pDataBlock->pDataBlock, pmInfo->targetSlotId);
- colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info);
- }
-
- pDataBlock->info.rows = p->info.rows;
- pDataBlock->info.capacity = p->info.rows;
- }
-
- blockDataDestroy(p);
- return (pDataBlock->info.rows > 0) ? pDataBlock : NULL;
-}
-
-static SSDataBlock* doSortedMerge(SOperatorInfo* pOperator) {
- if (pOperator->status == OP_EXEC_DONE) {
- return NULL;
- }
-
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- SSortedMergeOperatorInfo* pInfo = pOperator->info;
- if (pOperator->status == OP_RES_TO_RETURN) {
- return getSortedMergeBlockData(pInfo->pSortHandle, pInfo->binfo.pRes, pOperator->resultInfo.capacity, NULL, pInfo);
- }
-
- int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
- pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
- pInfo->binfo.pRes, "GET_TASKID(pTaskInfo)");
-
- tsortSetFetchRawDataFp(pInfo->pSortHandle, loadNextDataBlock, NULL, NULL);
-
- for (int32_t i = 0; i < pOperator->numOfDownstream; ++i) {
- SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
- ps->param = pOperator->pDownstream[i];
- tsortAddSource(pInfo->pSortHandle, ps);
- }
-
- int32_t code = tsortOpen(pInfo->pSortHandle);
- if (code != TSDB_CODE_SUCCESS) {
- T_LONG_JMP(pTaskInfo->env, terrno);
- }
-
- pOperator->status = OP_RES_TO_RETURN;
- return doMerge(pOperator);
-}
-
-static int32_t initGroupCol(SExprInfo* pExprInfo, int32_t numOfCols, SArray* pGroupInfo,
- SSortedMergeOperatorInfo* pInfo) {
- if (pGroupInfo == NULL || taosArrayGetSize(pGroupInfo) == 0) {
- return 0;
- }
-
- int32_t len = 0;
- SArray* plist = taosArrayInit(3, sizeof(SColumn));
- pInfo->groupInfo = taosArrayInit(3, sizeof(int32_t));
-
- if (plist == NULL || pInfo->groupInfo == NULL) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
-
- size_t numOfGroupCol = taosArrayGetSize(pInfo->groupInfo);
- for (int32_t i = 0; i < numOfGroupCol; ++i) {
- SColumn* pCol = taosArrayGet(pGroupInfo, i);
- for (int32_t j = 0; j < numOfCols; ++j) {
- SExprInfo* pe = &pExprInfo[j];
- if (pe->base.resSchema.slotId == pCol->colId) {
- taosArrayPush(plist, pCol);
- taosArrayPush(pInfo->groupInfo, &j);
- len += pCol->bytes;
- break;
- }
- }
- }
-
- ASSERT(taosArrayGetSize(pGroupInfo) == taosArrayGetSize(plist));
-
- pInfo->groupVal = taosMemoryCalloc(1, (POINTER_BYTES * numOfGroupCol + len));
- if (pInfo->groupVal == NULL) {
- taosArrayDestroy(plist);
- return TSDB_CODE_OUT_OF_MEMORY;
- }
-
- int32_t offset = 0;
- char* start = (char*)(pInfo->groupVal + (POINTER_BYTES * numOfGroupCol));
- for (int32_t i = 0; i < numOfGroupCol; ++i) {
- pInfo->groupVal[i] = start + offset;
- SColumn* pCol = taosArrayGet(plist, i);
- offset += pCol->bytes;
- }
-
- taosArrayDestroy(plist);
-
- return TSDB_CODE_SUCCESS;
-}
-
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag) {
// todo add more information about exchange operation
int32_t type = pOperator->operatorType;
@@ -2829,92 +2359,6 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
}
}
}
-#if 0
-int32_t doPrepareScan(SOperatorInfo* pOperator, uint64_t uid, int64_t ts) {
- uint8_t type = pOperator->operatorType;
-
- pOperator->status = OP_OPENED;
-
- if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- pScanInfo->blockType = STREAM_INPUT__TABLE_SCAN;
-
- pScanInfo->pTableScanOp->status = OP_OPENED;
-
- STableScanInfo* pInfo = pScanInfo->pTableScanOp->info;
- ASSERT(pInfo->scanMode == TABLE_SCAN__TABLE_ORDER);
-
- if (uid == 0) {
- pInfo->noTable = 1;
- return TSDB_CODE_SUCCESS;
- }
-
- /*if (pSnapShotScanInfo->dataReader == NULL) {*/
- /*pSnapShotScanInfo->dataReader = tsdbReaderOpen(pHandle->vnode, &pSTInfo->cond, tableList, 0, 0);*/
- /*pSnapShotScanInfo->scanMode = TABLE_SCAN__TABLE_ORDER;*/
- /*}*/
-
- pInfo->noTable = 0;
-
- if (pInfo->lastStatus.uid != uid || pInfo->lastStatus.ts != ts) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
-
- int32_t tableSz = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
- bool found = false;
- for (int32_t i = 0; i < tableSz; i++) {
- STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, i);
- if (pTableInfo->uid == uid) {
- found = true;
- pInfo->currentTable = i;
- }
- }
- // TODO after processing drop, found can be false
- ASSERT(found);
-
- tsdbSetTableId(pInfo->dataReader, uid);
- int64_t oldSkey = pInfo->cond.twindows.skey;
- pInfo->cond.twindows.skey = ts + 1;
- tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
- pInfo->cond.twindows.skey = oldSkey;
- pInfo->scanTimes = 0;
-
- qDebug("tsdb reader offset seek to uid %" PRId64 " ts %" PRId64 ", table cur set to %d , all table num %d", uid, ts,
- pInfo->currentTable, tableSz);
- }
-
- return TSDB_CODE_SUCCESS;
-
- } else {
- if (pOperator->numOfDownstream == 1) {
- return doPrepareScan(pOperator->pDownstream[0], uid, ts);
- } else if (pOperator->numOfDownstream == 0) {
- qError("failed to find stream scan operator to set the input data block");
- return TSDB_CODE_QRY_APP_ERROR;
- } else {
- qError("join not supported for stream block scan");
- return TSDB_CODE_QRY_APP_ERROR;
- }
- }
-}
-
-int32_t doGetScanStatus(SOperatorInfo* pOperator, uint64_t* uid, int64_t* ts) {
- int32_t type = pOperator->operatorType;
- if (type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- SStreamScanInfo* pScanInfo = pOperator->info;
- STableScanInfo* pSnapShotScanInfo = pScanInfo->pTableScanOp->info;
- *uid = pSnapShotScanInfo->lastStatus.uid;
- *ts = pSnapShotScanInfo->lastStatus.ts;
- } else {
- if (pOperator->pDownstream[0] == NULL) {
- return TSDB_CODE_INVALID_PARA;
- } else {
- doGetScanStatus(pOperator->pDownstream[0], uid, ts);
- }
- }
-
- return TSDB_CODE_SUCCESS;
-}
-#endif
// this is a blocking operator
static int32_t doOpenAggregateOptr(SOperatorInfo* pOperator) {
@@ -3011,7 +2455,7 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
}
SOptrBasicInfo* pInfo = (SOptrBasicInfo*)(pOperator->info);
SAggSupporter* pSup = (SAggSupporter*)POINTER_SHIFT(pOperator->info, sizeof(SOptrBasicInfo));
- int32_t size = taosHashGetSize(pSup->pResultRowHashTable);
+ int32_t size = tSimpleHashGetSize(pSup->pResultRowHashTable);
size_t keyLen = sizeof(uint64_t) * 2; // estimate the key length
int32_t totalSize =
sizeof(int32_t) + sizeof(int32_t) + size * (sizeof(int32_t) + keyLen + sizeof(int32_t) + pSup->resultRowSize);
@@ -3039,9 +2483,10 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
setBufPageDirty(pPage, true);
releaseBufPage(pSup->pResultBuf, pPage);
- void* pIter = taosHashIterate(pSup->pResultRowHashTable, NULL);
- while (pIter) {
- void* key = taosHashGetKey(pIter, &keyLen);
+ int32_t iter = 0;
+ void* pIter = NULL;
+ while ((pIter = tSimpleHashIterate(pSup->pResultRowHashTable, pIter, &iter))) {
+ void* key = tSimpleHashGetKey(pIter, &keyLen);
SResultRowPosition* p1 = (SResultRowPosition*)pIter;
pPage = (SFilePage*)getBufPage(pSup->pResultBuf, p1->pageId);
@@ -3072,8 +2517,6 @@ int32_t aggEncodeResultRow(SOperatorInfo* pOperator, char** result, int32_t* len
offset += sizeof(int32_t);
memcpy(*result + offset, pRow, pSup->resultRowSize);
offset += pSup->resultRowSize;
-
- pIter = taosHashIterate(pSup->pResultRowHashTable, pIter);
}
*(int32_t*)(*result) = offset;
@@ -3101,14 +2544,14 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
offset += sizeof(int32_t);
uint64_t tableGroupId = *(uint64_t*)(result + offset);
- SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, tableGroupId, pSup->resultRowSize);
+ SResultRow* resultRow = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
if (!resultRow) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
// add a new result set for a new group
SResultRowPosition pos = {.pageId = resultRow->pageId, .offset = resultRow->offset};
- taosHashPut(pSup->pResultRowHashTable, result + offset, keyLen, &pos, sizeof(SResultRowPosition));
+ tSimpleHashPut(pSup->pResultRowHashTable, result + offset, keyLen, &pos, sizeof(SResultRowPosition));
offset += keyLen;
int32_t valueLen = *(int32_t*)(result + offset);
@@ -3123,7 +2566,6 @@ int32_t aggDecodeResultRow(SOperatorInfo* pOperator, char* result) {
resultRow->offset = pOffset;
offset += valueLen;
- initResultRow(resultRow);
pInfo->resultRowInfo.cur = (SResultRowPosition){.pageId = resultRow->pageId, .offset = resultRow->offset};
// releaseBufPage(pSup->pResultBuf, getBufPage(pSup->pResultBuf, pageId));
}
@@ -3225,6 +2667,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp
Q_STATUS_EQUAL(pTaskInfo->status, TASK_COMPLETED) ? pInfo->win.ekey : pInfo->existNewGroupBlock->info.window.ekey;
taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo));
+ blockDataCleanup(pInfo->pRes);
doApplyScalarCalculation(pOperator, pInfo->existNewGroupBlock, order, scanFlag);
taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, ekey);
@@ -3287,7 +2730,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
SSDataBlock* pResBlock = pInfo->pFinalRes;
blockDataCleanup(pResBlock);
- blockDataCleanup(pInfo->pRes);
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
@@ -3311,6 +2753,8 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
taosFillSetStartInfo(pInfo->pFillInfo, 0, pInfo->win.ekey);
} else {
blockDataUpdateTsWindow(pBlock, pInfo->primarySrcSlotId);
+
+ blockDataCleanup(pInfo->pRes);
doApplyScalarCalculation(pOperator, pBlock, order, scanFlag);
if (pInfo->curGroupId == 0 || pInfo->curGroupId == pInfo->pRes->info.groupId) {
@@ -3353,7 +2797,6 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
assert(pBlock != NULL);
blockDataCleanup(pResBlock);
- blockDataCleanup(pInfo->pRes);
doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, pTaskInfo);
if (pResBlock->info.rows > pResultInfo->threshold) {
@@ -3448,11 +2891,13 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, size_t keyBufSize,
const char* pKey) {
+ int32_t code = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pAggSup->currentPageId = -1;
pAggSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pAggSup->keyBuf = taosMemoryCalloc(1, keyBufSize + POINTER_BYTES + sizeof(int64_t));
- pAggSup->pResultRowHashTable = taosHashInit(10, hashFn, true, HASH_NO_LOCK);
+ pAggSup->pResultRowHashTable = tSimpleHashInit(10, hashFn);
if (pAggSup->keyBuf == NULL || pAggSup->pResultRowHashTable == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
@@ -3463,23 +2908,23 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n
getBufferPgSize(pAggSup->resultRowSize, &defaultPgsz, &defaultBufsz);
if (!osTempSpaceAvailable()) {
- terrno = TSDB_CODE_NO_AVAIL_DISK;
- qError("Init stream agg supporter failed since %s", terrstr(terrno));
- return terrno;
+ code = TSDB_CODE_NO_AVAIL_DISK;
+ qError("Init stream agg supporter failed since %s, %s", terrstr(code), pKey);
+ return code;
}
- int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
+ code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir);
if (code != TSDB_CODE_SUCCESS) {
- qError("Create agg result buf failed since %s", tstrerror(code));
+ qError("Create agg result buf failed since %s, %s", tstrerror(code), pKey);
return code;
}
- return TSDB_CODE_SUCCESS;
+ return code;
}
void cleanupAggSup(SAggSupporter* pAggSup) {
taosMemoryFreeClear(pAggSup->keyBuf);
- taosHashCleanup(pAggSup->pResultRowHashTable);
+ tSimpleHashCleanup(pAggSup->pResultRowHashTable);
destroyDiskbasedBuf(pAggSup->pResultBuf);
}
@@ -3496,7 +2941,7 @@ int32_t initAggInfo(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInf
}
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = pAggSup->pResultBuf;
+ pSup->pCtx[i].saveHandle.pBuf = pAggSup->pResultBuf;
}
return TSDB_CODE_SUCCESS;
@@ -3528,6 +2973,7 @@ void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
}
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
+ taosMemoryFreeClear(pCtx[i].subsidiaries.buf);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
}
@@ -3618,13 +3064,6 @@ void cleanupBasicInfo(SOptrBasicInfo* pInfo) {
pInfo->pRes = blockDataDestroy(pInfo->pRes);
}
-void destroyBasicOperatorInfo(void* param, int32_t numOfOutput) {
- SOptrBasicInfo* pInfo = (SOptrBasicInfo*)param;
- cleanupBasicInfo(pInfo);
-
- taosMemoryFreeClear(param);
-}
-
static void freeItem(void* pItem) {
void** p = pItem;
if (*p != NULL) {
@@ -3715,6 +3154,44 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t
}
}
+static bool isWstartColumnExist(SFillOperatorInfo* pInfo) {
+ if (pInfo->numOfNotFillExpr == 0) {
+ return false;
+ }
+ for (int32_t i = 0; i < pInfo->numOfNotFillExpr; ++i) {
+ SExprInfo* exprInfo = pInfo->pNotFillExprInfo + i;
+ if (exprInfo->pExpr->nodeType == QUERY_NODE_COLUMN && exprInfo->base.numOfParams == 1 &&
+ exprInfo->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_START) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static int32_t createWStartTsAsNotFillExpr(SFillOperatorInfo* pInfo, SFillPhysiNode* pPhyFillNode) {
+ bool wstartExist = isWstartColumnExist(pInfo);
+ if (wstartExist == false) {
+ if (pPhyFillNode->pWStartTs->type != QUERY_NODE_TARGET) {
+ qError("pWStartTs of fill physical node is not a target node");
+ return TSDB_CODE_QRY_SYS_ERROR;
+ }
+
+ SExprInfo* notFillExprs =
+ taosMemoryRealloc(pInfo->pNotFillExprInfo, (pInfo->numOfNotFillExpr + 1) * sizeof(SExprInfo));
+ if (notFillExprs == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ createExprFromTargetNode(notFillExprs + pInfo->numOfNotFillExpr, (STargetNode*)pPhyFillNode->pWStartTs);
+
+ ++pInfo->numOfNotFillExpr;
+ pInfo->pNotFillExprInfo = notFillExprs;
+ return TSDB_CODE_SUCCESS;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode* pPhyFillNode,
SExecTaskInfo* pTaskInfo) {
SFillOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SFillOperatorInfo));
@@ -3726,7 +3203,10 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
SSDataBlock* pResBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc);
SExprInfo* pExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &pInfo->numOfExpr);
pInfo->pNotFillExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &pInfo->numOfNotFillExpr);
-
+ int32_t code = createWStartTsAsNotFillExpr(pInfo, pPhyFillNode);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
SInterval* pInterval =
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == downstream->operatorType
? &((SMergeAlignedIntervalAggOperatorInfo*)downstream->info)->intervalAggOperatorInfo->interval
@@ -3747,9 +3227,9 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc,
&numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
- int32_t code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr,
- (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity,
- pTaskInfo->id.str, pInterval, type, order);
+ code = initFillInfo(pInfo, pExprInfo, pInfo->numOfExpr, pInfo->pNotFillExprInfo, pInfo->numOfNotFillExpr,
+ (SNodeListNode*)pPhyFillNode->pValues, pPhyFillNode->timeRange, pResultInfo->capacity,
+ pTaskInfo->id.str, pInterval, type, order);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -3880,6 +3360,8 @@ static void cleanupTableSchemaInfo(SSchemaInfo* pSchemaInfo) {
tDeleteSSchemaWrapper(pSchemaInfo->qsw);
}
+static void cleanupStreamInfo(SStreamTaskInfo* pStreamInfo) { tDeleteSSchemaWrapper(pStreamInfo->schema); }
+
static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
taosArrayClear(pTableListInfo->pGroupList);
SArray* sortSupport = taosArrayInit(16, sizeof(uint64_t));
@@ -4024,7 +3506,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo));
if (code) {
pTaskInfo->code = code;
- qError("failed to createScanTableListInfo, code: %s", tstrerror(code));
+ qError("failed to createScanTableListInfo, code:%s, %s", tstrerror(code), GET_TASKID(pTaskInfo));
return NULL;
}
@@ -4059,7 +3541,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STableScanInfo* pScanInfo = pOperator->info;
pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder;
} else if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == type) {
- pOperator = createExchangeOperatorInfo(pHandle->pMsgCb->clientRpc, (SExchangePhysiNode*)pPhyNode, pTaskInfo);
+ pOperator = createExchangeOperatorInfo(pHandle ? pHandle->pMsgCb->clientRpc : NULL, (SExchangePhysiNode*)pPhyNode,
+ pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN == type) {
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
if (pHandle->vnode) {
@@ -4078,8 +3561,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STableKeyInfo* pKeyInfo = taosArrayGet(pTableListInfo->pTableList, i);
qDebug("creating stream task: add table %" PRId64, pKeyInfo->uid);
}
- }
#endif
+ }
pTaskInfo->schemaInfo.qsw = extractQueriedColumnSchema(&pTableScanNode->scan);
pOperator = createStreamScanOperatorInfo(pHandle, pTableScanNode, pTagCond, pTaskInfo);
@@ -4090,7 +3573,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode;
int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanPhyNode, pTagCond, pTagIndexCond, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
- pTaskInfo->code = terrno;
+ pTaskInfo->code = code;
qError("failed to getTableList, code: %s", tstrerror(code));
return NULL;
}
@@ -4138,7 +3621,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return NULL;
}
- pOperator = createLastrowScanOperator(pScanNode, pHandle, pTaskInfo);
+ pOperator = createCacherowsScanOperator(pScanNode, pHandle, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_PROJECT == type) {
pOperator = createProjectOperatorInfo(NULL, (SProjectPhysiNode*)pPhyNode, pTaskInfo);
} else {
@@ -4162,9 +3645,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
if (ops[i] == NULL) {
taosMemoryFree(ops);
return NULL;
- } else {
- ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId;
}
+
+ ops[i]->resultDataBlockId = pChildNode->pOutputDataBlockDesc->dataBlockId;
}
SOperatorInfo* pOptr = NULL;
@@ -4189,7 +3672,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createAggregateOperatorInfo(ops[0], pExprInfo, num, pResBlock, pAggNode->node.pConditions,
pScalarExprInfo, numOfScalarExpr, pAggNode->mergeDataBlock, pTaskInfo);
}
- } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type || QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
+ } else if (QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL == type) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
@@ -4214,39 +3697,14 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId, &as, pIntervalPhyNode,
pTaskInfo, isStream);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL == type) {
+ pOptr = createStreamIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL == type) {
SMergeAlignedIntervalPhysiNode* pIntervalPhyNode = (SMergeAlignedIntervalPhysiNode*)pPhyNode;
-
- SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
-
- SInterval interval = {.interval = pIntervalPhyNode->interval,
- .sliding = pIntervalPhyNode->sliding,
- .intervalUnit = pIntervalPhyNode->intervalUnit,
- .slidingUnit = pIntervalPhyNode->slidingUnit,
- .offset = pIntervalPhyNode->offset,
- .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
-
- int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
- pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
- pPhyNode->pConditions, pIntervalPhyNode->window.mergeDataBlock,
- pTaskInfo);
+ pOptr = createMergeAlignedIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL == type) {
SMergeIntervalPhysiNode* pIntervalPhyNode = (SMergeIntervalPhysiNode*)pPhyNode;
-
- SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
-
- SInterval interval = {.interval = pIntervalPhyNode->interval,
- .sliding = pIntervalPhyNode->sliding,
- .intervalUnit = pIntervalPhyNode->intervalUnit,
- .slidingUnit = pIntervalPhyNode->slidingUnit,
- .offset = pIntervalPhyNode->offset,
- .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
-
- int32_t tsSlotId = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
- pOptr = createMergeIntervalOperatorInfo(ops[0], pExprInfo, num, pResBlock, &interval, tsSlotId,
- pIntervalPhyNode->window.mergeDataBlock, pTaskInfo);
+ pOptr = createMergeIntervalOperatorInfo(ops[0], pIntervalPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL == type) {
int32_t children = 0;
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
@@ -4273,25 +3731,19 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createStreamFinalSessionAggOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
} else if (QUERY_NODE_PHYSICAL_PLAN_PARTITION == type) {
pOptr = createPartitionOperatorInfo(ops[0], (SPartitionPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION == type) {
+ pOptr = createStreamPartitionOperatorInfo(ops[0], (SStreamPartitionPhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE == type) {
SStateWinodwPhysiNode* pStateNode = (SStateWinodwPhysiNode*)pPhyNode;
-
- STimeWindowAggSupp as = {.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType};
-
- SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num);
- SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
- int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
-
- SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
- SColumn col = extractColumnFromColumnNode(pColNode);
- pOptr = createStatewindowOperatorInfo(ops[0], pExprInfo, num, pResBlock, &as, tsSlotId, &col, pPhyNode->pConditions,
- pTaskInfo);
+ pOptr = createStatewindowOperatorInfo(ops[0], pStateNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE == type) {
pOptr = createStreamStateAggOperatorInfo(ops[0], pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN == type) {
pOptr = createMergeJoinOperatorInfo(ops, size, (SSortMergeJoinPhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_FILL == type) {
pOptr = createFillOperatorInfo(ops[0], (SFillPhysiNode*)pPhyNode, pTaskInfo);
+ } else if (QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL == type) {
+ pOptr = createStreamFillOperatorInfo(ops[0], (SStreamFillPhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC == type) {
pOptr = createIndefinitOutputOperatorInfo(ops[0], pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC == type) {
@@ -4299,8 +3751,12 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else {
ASSERT(0);
}
+
taosMemoryFree(ops);
- if (pOptr) pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
+ if (pOptr) {
+ pOptr->resultDataBlockId = pPhyNode->pOutputDataBlockDesc->dataBlockId;
+ }
+
return pOptr;
}
@@ -4337,42 +3793,6 @@ SArray* extractColumnInfo(SNodeList* pNodeList) {
return pList;
}
-#if 0
-STsdbReader* doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
- STableListInfo* pTableListInfo, const char* idstr) {
- int32_t code = getTableList(pHandle->meta, pHandle->vnode, &pTableScanNode->scan, pTableListInfo);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- if (taosArrayGetSize(pTableListInfo->pTableList) == 0) {
- code = 0;
- qDebug("no table qualified for query, %s", idstr);
- goto _error;
- }
-
- SQueryTableDataCond cond = {0};
- code = initQueryTableDataCond(&cond, pTableScanNode);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- STsdbReader* pReader;
- code = tsdbReaderOpen(pHandle->vnode, &cond, pTableListInfo->pTableList, &pReader, idstr);
- if (code != TSDB_CODE_SUCCESS) {
- goto _error;
- }
-
- cleanupQueryTableDataCond(&cond);
-
- return pReader;
-
-_error:
- terrno = code;
- return NULL;
-}
-#endif
-
static int32_t extractTbscanInStreamOpTree(SOperatorInfo* pOperator, STableScanInfo** ppInfo) {
if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
if (pOperator->numOfDownstream == 0) {
@@ -4614,7 +4034,7 @@ _complete:
return code;
}
-static void doDestroyTableList(STableListInfo* pTableqinfoList) {
+void doDestroyTableList(STableListInfo* pTableqinfoList) {
taosArrayDestroy(pTableqinfoList->pTableList);
taosHashCleanup(pTableqinfoList->map);
if (pTableqinfoList->needSortTableByGroupId) {
@@ -4638,8 +4058,11 @@ void doDestroyTask(SExecTaskInfo* pTaskInfo) {
doDestroyTableList(&pTaskInfo->tableqinfoList);
destroyOperatorInfo(pTaskInfo->pRoot);
cleanupTableSchemaInfo(&pTaskInfo->schemaInfo);
+ cleanupStreamInfo(&pTaskInfo->streamInfo);
- nodesDestroyNode((SNode*)pTaskInfo->pSubplan);
+ if (!pTaskInfo->localFetch.localExec) {
+ nodesDestroyNode((SNode*)pTaskInfo->pSubplan);
+ }
taosMemoryFreeClear(pTaskInfo->sql);
taosMemoryFreeClear(pTaskInfo->id.str);
@@ -4718,6 +4141,7 @@ int32_t getOperatorExplainExecInfo(SOperatorInfo* operatorInfo, SArray* pExecInf
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlFunctionCtx* pCtx, int32_t numOfOutput,
int32_t size) {
+ pSup->currentPageId = -1;
pSup->resultRowSize = getResultRowSize(pCtx, numOfOutput);
pSup->keySize = sizeof(int64_t) + sizeof(TSKEY);
pSup->pKeyBuf = taosMemoryCalloc(1, pSup->keySize);
@@ -4745,7 +4169,117 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, const char* pKey, SqlF
}
int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, pKey, tsTempDir);
for (int32_t i = 0; i < numOfOutput; ++i) {
- pCtx[i].pBuf = pSup->pResultBuf;
+ pCtx[i].saveHandle.pBuf = pSup->pResultBuf;
}
+
return code;
}
+
+int32_t setOutputBuf(STimeWindow* win, SResultRow** pResult, int64_t tableGroupId, SqlFunctionCtx* pCtx,
+ int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup,
+ SExecTaskInfo* pTaskInfo) {
+ SWinKey key = {
+ .ts = win->skey,
+ .groupId = tableGroupId,
+ };
+ char* value = NULL;
+ int32_t size = pAggSup->resultRowSize;
+
+ tSimpleHashPut(pAggSup->pResultRowHashTable, &key, sizeof(SWinKey), NULL, 0);
+ if (streamStateAddIfNotExist(pTaskInfo->streamInfo.pState, &key, (void**)&value, &size) < 0) {
+ return TSDB_CODE_QRY_OUT_OF_MEMORY;
+ }
+ *pResult = (SResultRow*)value;
+ ASSERT(*pResult);
+ // set time window for current result
+ (*pResult)->win = (*win);
+ setResultRowInitCtx(*pResult, pCtx, numOfOutput, rowEntryInfoOffset);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t releaseOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult) {
+ streamStateReleaseBuf(pTaskInfo->streamInfo.pState, pKey, pResult);
+ /*taosMemoryFree((*(void**)pResult));*/
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t saveOutputBuf(SExecTaskInfo* pTaskInfo, SWinKey* pKey, SResultRow* pResult, int32_t resSize) {
+ streamStatePut(pTaskInfo->streamInfo.pState, pKey, pResult, resSize);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t buildDataBlockFromGroupRes(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprSupp* pSup,
+ SGroupResInfo* pGroupResInfo) {
+ SExprInfo* pExprInfo = pSup->pExprInfo;
+ int32_t numOfExprs = pSup->numOfExprs;
+ int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
+ SqlFunctionCtx* pCtx = pSup->pCtx;
+
+ int32_t numOfRows = getNumOfTotalRes(pGroupResInfo);
+
+ for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) {
+ SResKeyPos* pPos = taosArrayGetP(pGroupResInfo->pRows, i);
+ int32_t size = 0;
+ void* pVal = NULL;
+ SWinKey key = {
+ .ts = *(TSKEY*)pPos->key,
+ .groupId = pPos->groupId,
+ };
+ int32_t code = streamStateGet(pTaskInfo->streamInfo.pState, &key, &pVal, &size);
+ ASSERT(code == 0);
+ SResultRow* pRow = (SResultRow*)pVal;
+ doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset);
+ // no results, continue to check the next one
+ if (pRow->numOfRows == 0) {
+ pGroupResInfo->index += 1;
+ releaseOutputBuf(pTaskInfo, &key, pRow);
+ continue;
+ }
+
+ if (pBlock->info.groupId == 0) {
+ pBlock->info.groupId = pPos->groupId;
+ } else {
+ // current value belongs to different group, it can't be packed into one datablock
+ if (pBlock->info.groupId != pPos->groupId) {
+ releaseOutputBuf(pTaskInfo, &key, pRow);
+ break;
+ }
+ }
+
+ if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
+ ASSERT(pBlock->info.rows > 0);
+ releaseOutputBuf(pTaskInfo, &key, pRow);
+ break;
+ }
+
+ pGroupResInfo->index += 1;
+
+ for (int32_t j = 0; j < numOfExprs; ++j) {
+ int32_t slotId = pExprInfo[j].base.resSchema.slotId;
+
+ pCtx[j].resultInfo = getResultEntryInfo(pRow, j, rowEntryOffset);
+ if (pCtx[j].fpSet.finalize) {
+ int32_t code1 = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
+ if (TAOS_FAILED(code1)) {
+ qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code1));
+ T_LONG_JMP(pTaskInfo->env, code1);
+ }
+ } else if (strcmp(pCtx[j].pExpr->pExpr->_function.functionName, "_select_value") == 0) {
+ // do nothing, todo refactor
+ } else {
+ // expand the result into multiple rows. E.g., _wstart, top(k, 20)
+ // the _wstart needs to copy to 20 following rows, since the results of top-k expands to 20 different rows.
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId);
+ char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo);
+ for (int32_t k = 0; k < pRow->numOfRows; ++k) {
+ colDataAppend(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes);
+ }
+ }
+ }
+
+ pBlock->info.rows += pRow->numOfRows;
+ releaseOutputBuf(pTaskInfo, &key, pRow);
+ }
+ blockDataUpdateTsWindow(pBlock, 0);
+ return TSDB_CODE_SUCCESS;
+}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 53709c7dcc78b380b54afd2a25947a67f5381ecd..7cb641a9432029bc088273812a6b07fe6af102da 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -13,26 +13,26 @@
* along with this program. If not, see .
*/
-#include "os.h"
#include "function.h"
+#include "os.h"
#include "tname.h"
#include "tdatablock.h"
#include "tmsg.h"
+#include "executorInt.h"
#include "executorimpl.h"
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
-#include "executorInt.h"
static void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInfo** pGroupInfo, int32_t len);
static int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity);
-static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
- uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
+static int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
+ int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup);
static void freeGroupKey(void* param) {
- SGroupKeys* pKey = (SGroupKeys*) param;
+ SGroupKeys* pKey = (SGroupKeys*)param;
taosMemoryFree(pKey->pData);
}
@@ -62,13 +62,13 @@ static int32_t initGroupOptrInfo(SArray** pGroupColVals, int32_t* keyLen, char**
int32_t numOfGroupCols = taosArrayGetSize(pGroupColList);
for (int32_t i = 0; i < numOfGroupCols; ++i) {
SColumn* pCol = taosArrayGet(pGroupColList, i);
- (*keyLen) += pCol->bytes; // actual data + null_flag
+ (*keyLen) += pCol->bytes; // actual data + null_flag
SGroupKeys key = {0};
- key.bytes = pCol->bytes;
- key.type = pCol->type;
+ key.bytes = pCol->bytes;
+ key.type = pCol->type;
key.isNull = false;
- key.pData = taosMemoryCalloc(1, pCol->bytes);
+ key.pData = taosMemoryCalloc(1, pCol->bytes);
if (key.pData == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -87,7 +87,8 @@ static int32_t initGroupOptrInfo(SArray** pGroupColVals, int32_t* keyLen, char**
return TSDB_CODE_SUCCESS;
}
-static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex, int32_t numOfGroupCols) {
+static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlock* pBlock, int32_t rowIndex,
+ int32_t numOfGroupCols) {
SColumnDataAgg* pColAgg = NULL;
for (int32_t i = 0; i < numOfGroupCols; ++i) {
SColumn* pCol = taosArrayGet(pGroupCols, i);
@@ -112,7 +113,7 @@ static bool groupKeyCompare(SArray* pGroupCols, SArray* pGroupColVals, SSDataBlo
if (pkey->type == TSDB_DATA_TYPE_JSON) {
int32_t dataLen = getJsonValueLen(val);
- if (memcmp(pkey->pData, val, dataLen) == 0){
+ if (memcmp(pkey->pData, val, dataLen) == 0) {
continue;
} else {
return false;
@@ -154,7 +155,7 @@ static void recordNewGroupKeys(SArray* pGroupCols, SArray* pGroupColVals, SSData
pkey->isNull = false;
char* val = colDataGetData(pColInfoData, rowIndex);
if (pkey->type == TSDB_DATA_TYPE_JSON) {
- if(tTagIsJson(val)){
+ if (tTagIsJson(val)) {
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
return;
}
@@ -198,13 +199,13 @@ static int32_t buildGroupKeys(void* pKey, const SArray* pGroupColVals) {
}
}
- return (int32_t) (pStart - (char*)pKey);
+ return (int32_t)(pStart - (char*)pKey);
}
// assign the group keys or user input constant values if required
static void doAssignGroupKeys(SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t totalRows, int32_t rowIndex) {
for (int32_t i = 0; i < numOfOutput; ++i) {
- if (pCtx[i].functionId == -1) { // select count(*),key from t group by key.
+ if (pCtx[i].functionId == -1) { // select count(*),key from t group by key.
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pCtx[i]);
SColumnInfoData* pColInfoData = pCtx[i].input.pData[0];
@@ -221,7 +222,7 @@ static void doAssignGroupKeys(SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t
} else {
memcpy(dest, data, pColInfoData->info.bytes);
}
- } else { // it is a NULL value
+ } else { // it is a NULL value
pEntryInfo->isNullRes = 1;
}
@@ -275,7 +276,8 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
}
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
- int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
+ int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf,
+ len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
@@ -291,9 +293,8 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
if (num > 0) {
len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
- int32_t ret =
- setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf, len,
- pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
+ int32_t ret = setGroupResultOutputBuf(pOperator, &(pInfo->binfo), pOperator->exprSupp.numOfExprs, pInfo->keyBuf,
+ len, pBlock->info.groupId, pInfo->aggSup.pResultBuf, &pInfo->aggSup);
if (ret != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_APP_ERROR);
}
@@ -308,7 +309,7 @@ static SSDataBlock* buildGroupResultDataBlock(SOperatorInfo* pOperator) {
SGroupbyOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pRes = pInfo->binfo.pRes;
- while(1) {
+ while (1) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
doFilter(pInfo->pCondition, pRes, NULL);
@@ -323,7 +324,7 @@ static SSDataBlock* buildGroupResultDataBlock(SOperatorInfo* pOperator) {
}
pOperator->resultInfo.totalRows += pRes->info.rows;
- return (pRes->info.rows == 0)? NULL:pRes;
+ return (pRes->info.rows == 0) ? NULL : pRes;
}
static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
@@ -334,7 +335,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SGroupbyOperatorInfo* pInfo = pOperator->info;
- SSDataBlock* pRes = pInfo->binfo.pRes;
+ SSDataBlock* pRes = pInfo->binfo.pRes;
if (pOperator->status == OP_RES_TO_RETURN) {
return buildGroupResultDataBlock(pOperator);
@@ -343,7 +344,7 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
- int64_t st = taosGetTimestampUs();
+ int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
@@ -362,7 +363,8 @@ static SSDataBlock* hashGroupbyAggregate(SOperatorInfo* pOperator) {
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
if (pInfo->scalarSup.pExprInfo != NULL) {
- pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL);
+ pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
+ pInfo->scalarSup.numOfExprs, NULL);
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
@@ -403,8 +405,8 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
goto _error;
}
- pInfo->pGroupCols = pGroupColList;
- pInfo->pCondition = pCondition;
+ pInfo->pGroupCols = pGroupColList;
+ pInfo->pCondition = pCondition;
int32_t code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr);
if (code != TSDB_CODE_SUCCESS) {
@@ -425,14 +427,15 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
initBasicInfo(&pInfo->binfo, pResultBlock);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
- pOperator->name = "GroupbyAggOperator";
- pOperator->blocking = true;
- pOperator->status = OP_NOT_OPENED;
+ pOperator->name = "GroupbyAggOperator";
+ pOperator->blocking = true;
+ pOperator->status = OP_NOT_OPENED;
// pOperator->operatorType = OP_Groupby;
- pOperator->info = pInfo;
- pOperator->pTaskInfo = pTaskInfo;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
- pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashGroupbyAggregate, NULL, NULL, destroyGroupOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashGroupbyAggregate, NULL, NULL,
+ destroyGroupOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@@ -440,7 +443,7 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
return pOperator;
- _error:
+_error:
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
destroyGroupOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
@@ -448,7 +451,7 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SExprInfo* pEx
}
static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
-// SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ // SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SPartitionOperatorInfo* pInfo = pOperator->info;
@@ -457,7 +460,7 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
int32_t len = buildGroupKeys(pInfo->keyBuf, pInfo->pGroupColVals);
SDataGroupInfo* pGroupInfo = NULL;
- void *pPage = getCurrentDataGroupInfo(pInfo, &pGroupInfo, len);
+ void* pPage = getCurrentDataGroupInfo(pInfo, &pGroupInfo, len);
pGroupInfo->numOfRows += 1;
@@ -467,32 +470,32 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
}
// number of rows
- int32_t* rows = (int32_t*) pPage;
+ int32_t* rows = (int32_t*)pPage;
size_t numOfCols = pOperator->exprSupp.numOfExprs;
- for(int32_t i = 0; i < numOfCols; ++i) {
+ for (int32_t i = 0; i < numOfCols; ++i) {
SExprInfo* pExpr = &pOperator->exprSupp.pExprInfo[i];
- int32_t slotId = pExpr->base.pParam[0].pCol->slotId;
+ int32_t slotId = pExpr->base.pParam[0].pCol->slotId;
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, slotId);
int32_t bytes = pColInfoData->info.bytes;
int32_t startOffset = pInfo->columnOffset[i];
- int32_t* columnLen = NULL;
+ int32_t* columnLen = NULL;
int32_t contentLen = 0;
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
int32_t* offset = (int32_t*)((char*)pPage + startOffset);
- columnLen = (int32_t*) ((char*)pPage + startOffset + sizeof(int32_t) * pInfo->rowCapacity);
- char* data = (char*)((char*) columnLen + sizeof(int32_t));
+ columnLen = (int32_t*)((char*)pPage + startOffset + sizeof(int32_t) * pInfo->rowCapacity);
+ char* data = (char*)((char*)columnLen + sizeof(int32_t));
if (colDataIsNull_s(pColInfoData, j)) {
offset[(*rows)] = -1;
contentLen = 0;
- } else if(pColInfoData->info.type == TSDB_DATA_TYPE_JSON){
+ } else if (pColInfoData->info.type == TSDB_DATA_TYPE_JSON) {
offset[*rows] = (*columnLen);
- char* src = colDataGetData(pColInfoData, j);
+ char* src = colDataGetData(pColInfoData, j);
int32_t dataLen = getJsonValueLen(src);
memcpy(data + (*columnLen), src, dataLen);
@@ -511,8 +514,8 @@ static void doHashPartition(SOperatorInfo* pOperator, SSDataBlock* pBlock) {
}
} else {
char* bitmap = (char*)pPage + startOffset;
- columnLen = (int32_t*) ((char*)pPage + startOffset + BitmapLen(pInfo->rowCapacity));
- char* data = (char*) columnLen + sizeof(int32_t);
+ columnLen = (int32_t*)((char*)pPage + startOffset + BitmapLen(pInfo->rowCapacity));
+ char* data = (char*)columnLen + sizeof(int32_t);
bool isNull = colDataIsNull_f(pColInfoData->nullbitmap, j);
if (isNull) {
@@ -539,7 +542,7 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
SDataGroupInfo* p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len);
void* pPage = NULL;
- if (p == NULL) { // it is a new group
+ if (p == NULL) { // it is a new group
SDataGroupInfo gi = {0};
gi.pPageList = taosArrayInit(100, sizeof(int32_t));
taosHashPut(pInfo->pGroupSet, pInfo->keyBuf, len, &gi, sizeof(SDataGroupInfo));
@@ -547,22 +550,22 @@ void* getCurrentDataGroupInfo(const SPartitionOperatorInfo* pInfo, SDataGroupInf
p = taosHashGet(pInfo->pGroupSet, pInfo->keyBuf, len);
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
- *(int32_t *) pPage = 0;
+ *(int32_t*)pPage = 0;
} else {
int32_t* curId = taosArrayGetLast(p->pPageList);
pPage = getBufPage(pInfo->pBuf, *curId);
- int32_t *rows = (int32_t*) pPage;
+ int32_t* rows = (int32_t*)pPage;
if (*rows >= pInfo->rowCapacity) {
// release buffer
releaseBufPage(pInfo->pBuf, pPage);
// add a new page for current group
int32_t pageId = 0;
- pPage = getNewBufPage(pInfo->pBuf, 0, &pageId);
+ pPage = getNewBufPage(pInfo->pBuf, &pageId);
taosArrayPush(p->pPageList, &pageId);
memset(pPage, 0, getBufPageSize(pInfo->pBuf));
}
@@ -585,17 +588,18 @@ uint64_t calcGroupId(char* pData, int32_t len) {
}
int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity) {
- size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
+ size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
int32_t* offset = taosMemoryCalloc(numOfCols, sizeof(int32_t));
- offset[0] = sizeof(int32_t) + sizeof(uint64_t); // the number of rows in current page, ref to SSDataBlock paged serialization format
+ offset[0] = sizeof(int32_t) +
+ sizeof(uint64_t); // the number of rows in current page, ref to SSDataBlock paged serialization format
- for(int32_t i = 0; i < numOfCols - 1; ++i) {
+ for (int32_t i = 0; i < numOfCols - 1; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
int32_t bytes = pColInfoData->info.bytes;
int32_t payloadLen = bytes * rowCapacity;
-
+
if (IS_VAR_DATA_TYPE(pColInfoData->info.type)) {
// offset segment + content length + payload
offset[i + 1] = rowCapacity * sizeof(int32_t) + sizeof(int32_t) + payloadLen + offset[i];
@@ -609,9 +613,9 @@ int32_t* setupColumnOffset(const SSDataBlock* pBlock, int32_t rowCapacity) {
}
static void clearPartitionOperator(SPartitionOperatorInfo* pInfo) {
- void *ite = NULL;
- while( (ite = taosHashIterate(pInfo->pGroupSet, ite)) != NULL ) {
- taosArrayDestroy( ((SDataGroupInfo *)ite)->pPageList);
+ void* ite = NULL;
+ while ((ite = taosHashIterate(pInfo->pGroupSet, ite)) != NULL) {
+ taosArrayDestroy(((SDataGroupInfo*)ite)->pPageList);
}
taosArrayClear(pInfo->sortedGroupArray);
clearDiskbasedBuf(pInfo->pBuf);
@@ -626,13 +630,14 @@ static int compareDataGroupInfo(const void* group1, const void* group2) {
return 0;
}
- return (pGroupInfo1->groupId < pGroupInfo2->groupId)? -1:1;
+ return (pGroupInfo1->groupId < pGroupInfo2->groupId) ? -1 : 1;
}
static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
SPartitionOperatorInfo* pInfo = pOperator->info;
- SDataGroupInfo* pGroupInfo = (pInfo->groupIndex != -1) ? taosArrayGet(pInfo->sortedGroupArray, pInfo->groupIndex) : NULL;
+ SDataGroupInfo* pGroupInfo =
+ (pInfo->groupIndex != -1) ? taosArrayGet(pInfo->sortedGroupArray, pInfo->groupIndex) : NULL;
if (pInfo->groupIndex == -1 || pInfo->pageIndex >= taosArrayGetSize(pGroupInfo->pPageList)) {
// try next group data
++pInfo->groupIndex;
@@ -647,7 +652,7 @@ static SSDataBlock* buildPartitionResult(SOperatorInfo* pOperator) {
}
int32_t* pageId = taosArrayGet(pGroupInfo->pPageList, pInfo->pageIndex);
- void* page = getBufPage(pInfo->pBuf, *pageId);
+ void* page = getBufPage(pInfo->pBuf, *pageId);
blockDataEnsureCapacity(pInfo->binfo.pRes, pInfo->rowCapacity);
blockDataFromBuf1(pInfo->binfo.pRes, page, pInfo->rowCapacity);
@@ -670,14 +675,14 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SPartitionOperatorInfo* pInfo = pOperator->info;
- SSDataBlock* pRes = pInfo->binfo.pRes;
+ SSDataBlock* pRes = pInfo->binfo.pRes;
if (pOperator->status == OP_RES_TO_RETURN) {
blockDataCleanup(pRes);
return buildPartitionResult(pOperator);
}
- int64_t st = taosGetTimestampUs();
+ int64_t st = taosGetTimestampUs();
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
@@ -688,7 +693,8 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
// there is an scalar expression that needs to be calculated right before apply the group aggregation.
if (pInfo->scalarSup.pExprInfo != NULL) {
- pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx, pInfo->scalarSup.numOfExprs, NULL);
+ pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
+ pInfo->scalarSup.numOfExprs, NULL);
if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
@@ -702,8 +708,8 @@ static SSDataBlock* hashPartition(SOperatorInfo* pOperator) {
}
SArray* groupArray = taosArrayInit(taosHashGetSize(pInfo->pGroupSet), sizeof(SDataGroupInfo));
- void* pGroupIter = NULL;
- pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL);
+
+ void* pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL);
while (pGroupIter != NULL) {
SDataGroupInfo* pGroupInfo = pGroupIter;
taosArrayPush(groupArray, pGroupInfo);
@@ -727,7 +733,7 @@ static void destroyPartitionOperatorInfo(void* param) {
cleanupBasicInfo(&pInfo->binfo);
taosArrayDestroy(pInfo->pGroupCols);
- for(int i = 0; i < taosArrayGetSize(pInfo->pGroupColVals); i++){
+ for (int i = 0; i < taosArrayGetSize(pInfo->pGroupColVals); i++) {
SGroupKeys key = *(SGroupKeys*)taosArrayGet(pInfo->pGroupColVals, i);
taosMemoryFree(key.pData);
}
@@ -743,24 +749,25 @@ static void destroyPartitionOperatorInfo(void* param) {
taosMemoryFreeClear(param);
}
-SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartitionPhysiNode* pPartNode,
+ SExecTaskInfo* pTaskInfo) {
SPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SPartitionOperatorInfo));
- SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
SSDataBlock* pResBlock = createResDataBlock(pPartNode->node.pOutputDataBlockDesc);
- int32_t numOfCols = 0;
+ int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pPartNode->pTargets, NULL, &numOfCols);
pInfo->pGroupCols = extractPartitionColInfo(pPartNode->pPartitionKeys);
if (pPartNode->pExprs != NULL) {
- int32_t num = 0;
+ int32_t num = 0;
SExprInfo* pExprInfo1 = createExprInfo(pPartNode->pExprs, NULL, &num);
- int32_t code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num);
+ int32_t code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -772,7 +779,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
goto _error;
}
- uint32_t defaultPgsz = 0;
+ uint32_t defaultPgsz = 0;
uint32_t defaultBufsz = 0;
getBufferPgSize(pResBlock->info.rowSize, &defaultPgsz, &defaultBufsz);
@@ -794,15 +801,15 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
goto _error;
}
- pOperator->name = "PartitionOperator";
- pOperator->blocking = true;
- pOperator->status = OP_NOT_OPENED;
+ pOperator->name = "PartitionOperator";
+ pOperator->blocking = true;
+ pOperator->status = OP_NOT_OPENED;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_PARTITION;
- pInfo->binfo.pRes = pResBlock;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->info = pInfo;
- pOperator->pTaskInfo = pTaskInfo;
+ pInfo->binfo.pRes = pResBlock;
+ pOperator->exprSupp.numOfExprs = numOfCols;
+ pOperator->exprSupp.pExprInfo = pExprInfo;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, hashPartition, NULL, NULL, destroyPartitionOperatorInfo,
NULL, NULL, NULL);
@@ -810,16 +817,16 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
- _error:
+_error:
pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
return NULL;
}
-int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData, int16_t bytes,
- uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo, int32_t numOfCols, char* pData,
+ int16_t bytes, uint64_t groupId, SDiskbasedBuf* pBuf, SAggSupporter* pAggSup) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SResultRowInfo* pResultRowInfo = &binfo->resultRowInfo;
SqlFunctionCtx* pCtx = pOperator->exprSupp.pCtx;
@@ -830,3 +837,211 @@ int32_t setGroupResultOutputBuf(SOperatorInfo* pOperator, SOptrBasicInfo* binfo,
setResultRowInitCtx(pResultRow, pCtx, numOfCols, pOperator->exprSupp.rowEntryInfoOffset);
return TSDB_CODE_SUCCESS;
}
+
+uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId) {
+ if (pExprSup->pExprInfo != NULL) {
+ int32_t code =
+ projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("calaculate group id error, code:%d", code);
+ }
+ }
+ recordNewGroupKeys(pParSup->pGroupCols, pParSup->pGroupColVals, pBlock, rowId);
+ int32_t len = buildGroupKeys(pParSup->keyBuf, pParSup->pGroupColVals);
+ uint64_t groupId = calcGroupId(pParSup->keyBuf, len);
+ return groupId;
+}
+
+static bool hasRemainPartion(SStreamPartitionOperatorInfo* pInfo) { return pInfo->parIte != NULL; }
+
+static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
+ SStreamPartitionOperatorInfo* pInfo = pOperator->info;
+ SSDataBlock* pDest = pInfo->binfo.pRes;
+ ASSERT(hasRemainPartion(pInfo));
+ SPartitionDataInfo* pParInfo = (SPartitionDataInfo*)pInfo->parIte;
+ blockDataCleanup(pDest);
+ int32_t rows = taosArrayGetSize(pParInfo->rowIds);
+ SSDataBlock* pSrc = pInfo->pInputDataBlock;
+ for (int32_t i = 0; i < rows; i++) {
+ int32_t rowIndex = *(int32_t*)taosArrayGet(pParInfo->rowIds, i);
+ for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; j++) {
+ int32_t slotId = pOperator->exprSupp.pExprInfo[j].base.pParam[0].pCol->slotId;
+ SColumnInfoData* pSrcCol = taosArrayGet(pSrc->pDataBlock, slotId);
+ SColumnInfoData* pDestCol = taosArrayGet(pDest->pDataBlock, j);
+ bool isNull = colDataIsNull(pSrcCol, pSrc->info.rows, rowIndex, NULL);
+ char* pSrcData = colDataGetData(pSrcCol, rowIndex);
+ colDataAppend(pDestCol, pDest->info.rows, pSrcData, isNull);
+ }
+ pDest->info.rows++;
+ }
+ blockDataUpdateTsWindow(pDest, pInfo->tsColIndex);
+ pDest->info.groupId = pParInfo->groupId;
+ pOperator->resultInfo.totalRows += pDest->info.rows;
+ pInfo->parIte = taosHashIterate(pInfo->pPartitions, pInfo->parIte);
+ ASSERT(pDest->info.rows > 0);
+ printDataBlock(pDest, "stream partitionby");
+ return pDest;
+}
+
+static void doStreamHashPartitionImpl(SStreamPartitionOperatorInfo* pInfo, SSDataBlock* pBlock) {
+ pInfo->pInputDataBlock = pBlock;
+ for (int32_t i = 0; i < pBlock->info.rows; ++i) {
+ recordNewGroupKeys(pInfo->partitionSup.pGroupCols, pInfo->partitionSup.pGroupColVals, pBlock, i);
+ int32_t keyLen = buildGroupKeys(pInfo->partitionSup.keyBuf, pInfo->partitionSup.pGroupColVals);
+ SPartitionDataInfo* pParData =
+ (SPartitionDataInfo*)taosHashGet(pInfo->pPartitions, pInfo->partitionSup.keyBuf, keyLen);
+ if (pParData) {
+ taosArrayPush(pParData->rowIds, &i);
+ } else {
+ SPartitionDataInfo newParData = {0};
+ newParData.groupId = calcGroupId(pInfo->partitionSup.keyBuf, keyLen);
+ newParData.rowIds = taosArrayInit(64, sizeof(int32_t));
+ taosArrayPush(newParData.rowIds, &i);
+ taosHashPut(pInfo->pPartitions, pInfo->partitionSup.keyBuf, keyLen, &newParData, sizeof(SPartitionDataInfo));
+ }
+ }
+}
+
+static SSDataBlock* doStreamHashPartition(SOperatorInfo* pOperator) {
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamPartitionOperatorInfo* pInfo = pOperator->info;
+ if (hasRemainPartion(pInfo)) {
+ return buildStreamPartitionResult(pOperator);
+ }
+
+ int64_t st = taosGetTimestampUs();
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+ {
+ pInfo->pInputDataBlock = NULL;
+ SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
+ if (pBlock == NULL) {
+ doSetOperatorCompleted(pOperator);
+ return NULL;
+ }
+ printDataBlock(pBlock, "stream partitionby recv");
+ switch (pBlock->info.type) {
+ case STREAM_NORMAL:
+ case STREAM_PULL_DATA:
+ case STREAM_INVALID:
+ pInfo->binfo.pRes->info.type = pBlock->info.type;
+ break;
+ case STREAM_DELETE_DATA: {
+ copyDataBlock(pInfo->pDelRes, pBlock);
+ pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;
+ return pInfo->pDelRes;
+ } break;
+ default:
+ return pBlock;
+ }
+
+ // there is an scalar expression that needs to be calculated right before apply the group aggregation.
+ if (pInfo->scalarSup.pExprInfo != NULL) {
+ pTaskInfo->code = projectApplyFunctions(pInfo->scalarSup.pExprInfo, pBlock, pBlock, pInfo->scalarSup.pCtx,
+ pInfo->scalarSup.numOfExprs, NULL);
+ if (pTaskInfo->code != TSDB_CODE_SUCCESS) {
+ longjmp(pTaskInfo->env, pTaskInfo->code);
+ }
+ }
+ taosHashClear(pInfo->pPartitions);
+ doStreamHashPartitionImpl(pInfo, pBlock);
+ }
+ pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0;
+
+ pInfo->parIte = taosHashIterate(pInfo->pPartitions, NULL);
+ return buildStreamPartitionResult(pOperator);
+}
+
+static void destroyStreamPartitionOperatorInfo(void* param) {
+ SStreamPartitionOperatorInfo* pInfo = (SStreamPartitionOperatorInfo*)param;
+ cleanupBasicInfo(&pInfo->binfo);
+ taosArrayDestroy(pInfo->partitionSup.pGroupCols);
+
+ for (int i = 0; i < taosArrayGetSize(pInfo->partitionSup.pGroupColVals); i++) {
+ SGroupKeys key = *(SGroupKeys*)taosArrayGet(pInfo->partitionSup.pGroupColVals, i);
+ taosMemoryFree(key.pData);
+ }
+ taosArrayDestroy(pInfo->partitionSup.pGroupColVals);
+
+ taosMemoryFree(pInfo->partitionSup.keyBuf);
+ cleanupExprSupp(&pInfo->scalarSup);
+ blockDataDestroy(pInfo->pDelRes);
+ taosMemoryFreeClear(param);
+}
+
+void initParDownStream(SOperatorInfo* downstream, SPartitionBySupporter* pParSup, SExprSupp* pExpr) {
+ if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ return;
+ }
+ SStreamScanInfo* pScanInfo = downstream->info;
+ pScanInfo->partitionSup = *pParSup;
+ pScanInfo->pPartScalarSup = pExpr;
+}
+
+SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStreamPartitionPhysiNode* pPartNode,
+ SExecTaskInfo* pTaskInfo) {
+ SStreamPartitionOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamPartitionOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ goto _error;
+ }
+ int32_t code = TSDB_CODE_SUCCESS;
+ pInfo->partitionSup.pGroupCols = extractPartitionColInfo(pPartNode->part.pPartitionKeys);
+
+ if (pPartNode->part.pExprs != NULL) {
+ int32_t num = 0;
+ SExprInfo* pCalExprInfo = createExprInfo(pPartNode->part.pExprs, NULL, &num);
+ code = initExprSupp(&pInfo->scalarSup, pCalExprInfo, num);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ }
+
+ int32_t keyLen = 0;
+ code = initGroupOptrInfo(&pInfo->partitionSup.pGroupColVals, &keyLen, &pInfo->partitionSup.keyBuf,
+ pInfo->partitionSup.pGroupCols);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ pInfo->partitionSup.needCalc = true;
+
+ SSDataBlock* pResBlock = createResDataBlock(pPartNode->part.node.pOutputDataBlockDesc);
+ if (!pResBlock) {
+ goto _error;
+ }
+ blockDataEnsureCapacity(pResBlock, 4096);
+ pInfo->binfo.pRes = pResBlock;
+ pInfo->parIte = NULL;
+ pInfo->pInputDataBlock = NULL;
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pInfo->pPartitions = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
+ pInfo->tsColIndex = 0;
+ pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
+
+ int32_t numOfCols = 0;
+ SExprInfo* pExprInfo = createExprInfo(pPartNode->part.pTargets, NULL, &numOfCols);
+
+ pOperator->name = "StreamPartitionOperator";
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION;
+ pOperator->exprSupp.numOfExprs = numOfCols;
+ pOperator->exprSupp.pExprInfo = pExprInfo;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamHashPartition, NULL, NULL,
+ destroyStreamPartitionOperatorInfo, NULL, NULL, NULL);
+
+ initParDownStream(downstream, &pInfo->partitionSup, &pInfo->scalarSup);
+ code = appendDownstream(pOperator, &downstream, 1);
+ return pOperator;
+
+_error:
+ pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY;
+ destroyStreamPartitionOperatorInfo(pInfo);
+ taosMemoryFreeClear(pOperator);
+ return NULL;
+}
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index 0661ccd3902bc0ba653e988cf6a03f91d2c6c68f..e9e6fed66aad43daf71a99613fb966d16461da76 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -53,7 +53,7 @@ static void destroyIndefinitOperatorInfo(void* param) {
SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode,
SExecTaskInfo* pTaskInfo) {
- int32_t code = TSDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
SProjectOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SProjectOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -184,7 +184,7 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
if (pLimitInfo->limit.limit >= 0 && pLimitInfo->numOfOutputRows + pBlock->info.rows >= pLimitInfo->limit.limit) {
int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows);
blockDataKeepFirstNRows(pBlock, keepRows);
- //TODO: optimize it later when partition by + limit
+ // TODO: optimize it later when partition by + limit
if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) ||
(pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) {
doSetOperatorCompleted(pOperator);
@@ -195,16 +195,6 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS
return PROJECT_RETRIEVE_DONE;
}
-void printDataBlock1(SSDataBlock* pBlock, const char* flag) {
- if (!pBlock || pBlock->info.rows == 0) {
- qDebug("===stream===printDataBlock: Block is Null or Empty");
- return;
- }
- char* pBuf = NULL;
- qDebug("%s", dumpBlockData(pBlock, flag, &pBuf));
- taosMemoryFreeClear(pBuf);
-}
-
SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
SProjectOperatorInfo* pProjectInfo = pOperator->info;
SOptrBasicInfo* pInfo = &pProjectInfo->binfo;
@@ -216,9 +206,16 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
blockDataCleanup(pFinalRes);
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ if (pTaskInfo->streamInfo.pReq) {
+ pOperator->status = OP_OPENED;
+ }
+
+ qDebug("enter project");
+
if (pOperator->status == OP_EXEC_DONE) {
if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
pOperator->status = OP_OPENED;
+ qDebug("projection in queue model, set status open and return null");
return NULL;
}
@@ -247,9 +244,23 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// The downstream exec may change the value of the newgroup, so use a local variable instead.
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
+ if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE && pFinalRes->info.rows == 0) {
+ pOperator->status = OP_OPENED;
+ if (pOperator->status == OP_EXEC_RECV) {
+ continue;
+ } else {
+ return NULL;
+ }
+ }
+ qDebug("set op close, exec %d, status %d rows %d", pTaskInfo->execModel, pOperator->status,
+ pFinalRes->info.rows);
doSetOperatorCompleted(pOperator);
break;
}
+ if (pTaskInfo->execModel == OPTR_EXEC_MODEL_QUEUE) {
+ qDebug("set status recv");
+ pOperator->status = OP_EXEC_RECV;
+ }
// for stream interval
if (pBlock->info.type == STREAM_RETRIEVE || pBlock->info.type == STREAM_DELETE_RESULT ||
@@ -308,6 +319,7 @@ SSDataBlock* doProjectOperation(SOperatorInfo* pOperator) {
// when apply the limit/offset for each group, pRes->info.rows may be 0, due to limit constraint.
if (pFinalRes->info.rows > 0 || (pOperator->status == OP_EXEC_DONE)) {
+ qDebug("project return %d rows, status %d", pFinalRes->info.rows, pOperator->status);
break;
}
} else {
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index fc36d740a9b331dbcb338684afb9af24592597c3..927ef2c64d03c050d859a2878765d4cfcddc7f41 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -178,8 +178,8 @@ static SResultRow* getTableGroupOutputBuf(SOperatorInfo* pOperator, uint64_t gro
STableScanInfo* pTableScanInfo = pOperator->info;
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
- GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pTableScanInfo->pdInfo.pAggSup->pResultRowHashTable, buf,
+ GET_RES_WINDOW_KEY_LEN(sizeof(groupId)));
if (p1 == NULL) {
return NULL;
@@ -617,19 +617,29 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
// if scan table by table
if (pInfo->scanMode == TABLE_SCAN__TABLE_ORDER) {
- if (pInfo->noTable) return NULL;
+ if (pInfo->noTable) {
+ return NULL;
+ }
+
+ int32_t numOfTables = taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList);
+
while (1) {
SSDataBlock* result = doTableScanGroup(pOperator);
if (result) {
return result;
}
+
// if no data, switch to next table and continue scan
pInfo->currentTable++;
- if (pInfo->currentTable >= taosArrayGetSize(pTaskInfo->tableqinfoList.pTableList)) {
+ if (pInfo->currentTable >= numOfTables) {
return NULL;
}
+
STableKeyInfo* pTableInfo = taosArrayGet(pTaskInfo->tableqinfoList.pTableList, pInfo->currentTable);
tsdbSetTableId(pInfo->dataReader, pTableInfo->uid);
+ qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d %s", pTableInfo->uid, numOfTables,
+ pInfo->currentTable, pTaskInfo->id.str);
+
tsdbReaderReset(pInfo->dataReader, &pInfo->cond);
pInfo->scanTimes = 0;
}
@@ -695,6 +705,7 @@ static void destroyTableScanOperatorInfo(void* param) {
cleanupQueryTableDataCond(&pTableScanInfo->cond);
tsdbReaderClose(pTableScanInfo->dataReader);
+ pTableScanInfo->dataReader = NULL;
if (pTableScanInfo->pColMatchInfo != NULL) {
taosArrayDestroy(pTableScanInfo->pColMatchInfo);
@@ -909,6 +920,19 @@ _error:
}
static void doClearBufferedBlocks(SStreamScanInfo* pInfo) {
+#if 0
+ if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
+ size_t total = taosArrayGetSize(pInfo->pBlockLists);
+ for (int32_t i = 0; i < total; i++) {
+ SSDataBlock* p = taosArrayGetP(pInfo->pBlockLists, i);
+ taosArrayDestroy(p->pDataBlock);
+ taosMemoryFree(p);
+ }
+ }
+#endif
+ taosArrayClear(pInfo->pBlockLists);
+ pInfo->validBlockIndex = 0;
+#if 0
size_t total = taosArrayGetSize(pInfo->pBlockLists);
pInfo->validBlockIndex = 0;
@@ -917,52 +941,32 @@ static void doClearBufferedBlocks(SStreamScanInfo* pInfo) {
blockDataDestroy(p);
}
taosArrayClear(pInfo->pBlockLists);
+#endif
}
static bool isSessionWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
}
static bool isStateWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
}
static bool isIntervalWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
- pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL ||
+ pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL;
}
static bool isSignleIntervalWindow(SStreamScanInfo* pInfo) {
- return pInfo->sessionSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
+ return pInfo->windowSup.parentType == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
}
static bool isSlidingWindow(SStreamScanInfo* pInfo) {
return isIntervalWindow(pInfo) && pInfo->interval.interval != pInfo->interval.sliding;
}
-static uint64_t getGroupId(SOperatorInfo* pOperator, uint64_t uid) {
- uint64_t* groupId = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &uid, sizeof(int64_t));
- if (groupId) {
- return *groupId;
- }
- return 0;
- /* Todo(liuyao) for partition by column
- recordNewGroupKeys(pTableScanInfo->pGroupCols, pTableScanInfo->pGroupColVals, pBlock, rowId);
- int32_t len = buildGroupKeys(pTableScanInfo->keyBuf, pTableScanInfo->pGroupColVals);
- uint64_t resId = 0;
- uint64_t* groupId = taosHashGet(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len);
- if (groupId) {
- return *groupId;
- } else if (len != 0) {
- resId = calcGroupId(pTableScanInfo->keyBuf, len);
- taosHashPut(pTableScanInfo->pGroupSet, pTableScanInfo->keyBuf, len, &resId, sizeof(uint64_t));
- }
- return resId;
- */
-}
-
static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t groupColIndex, int32_t rowIndex) {
SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, groupColIndex);
uint64_t* groupCol = (uint64_t*)pColInfo->pData;
@@ -976,6 +980,65 @@ void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin) {
pTableScanInfo->currentGroupId = -1;
}
+static void freeArray(void* array) { taosArrayDestroy(array); }
+
+static void resetTableScanOperator(SOperatorInfo* pTableScanOp) {
+ STableScanInfo* pTableScanInfo = pTableScanOp->info;
+ pTableScanInfo->cond.startVersion = -1;
+ pTableScanInfo->cond.endVersion = -1;
+ SArray* gpTbls = pTableScanOp->pTaskInfo->tableqinfoList.pGroupList;
+ SArray* allTbls = pTableScanOp->pTaskInfo->tableqinfoList.pTableList;
+ taosArrayClearP(gpTbls, freeArray);
+ taosArrayPush(gpTbls, &allTbls);
+ STimeWindow win = {.skey = INT64_MIN, .ekey = INT64_MAX};
+ resetTableScanInfo(pTableScanOp->info, &win);
+}
+
+static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbUid, TSKEY startTs, TSKEY endTs,
+ int64_t maxVersion) {
+ SArray* gpTbls = pTableScanOp->pTaskInfo->tableqinfoList.pGroupList;
+ taosArrayClear(gpTbls);
+ STableKeyInfo tblInfo = {.uid = tbUid, .groupId = 0};
+ SArray* tbls = taosArrayInit(1, sizeof(STableKeyInfo));
+ taosArrayPush(tbls, &tblInfo);
+ taosArrayPush(gpTbls, &tbls);
+
+ STimeWindow win = {.skey = startTs, .ekey = endTs};
+ STableScanInfo* pTableScanInfo = pTableScanOp->info;
+ pTableScanInfo->cond.startVersion = -1;
+ pTableScanInfo->cond.endVersion = maxVersion;
+ resetTableScanInfo(pTableScanOp->info, &win);
+ SSDataBlock* pRes = doTableScan(pTableScanOp);
+ resetTableScanOperator(pTableScanOp);
+ return pRes;
+}
+
+static uint64_t getGroupIdByCol(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts, int64_t maxVersion) {
+ SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, uid, ts, ts, maxVersion);
+ if (!pPreRes || pPreRes->info.rows == 0) {
+ return 0;
+ }
+ ASSERT(pPreRes->info.rows == 1);
+ return calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pPreRes, 0);
+}
+
+static uint64_t getGroupIdByUid(SStreamScanInfo* pInfo, uint64_t uid) {
+ SHashObj* map = pInfo->pTableScanOp->pTaskInfo->tableqinfoList.map;
+ uint64_t* groupId = taosHashGet(map, &uid, sizeof(int64_t));
+ if (groupId) {
+ return *groupId;
+ }
+ return 0;
+}
+
+static uint64_t getGroupIdByData(SStreamScanInfo* pInfo, uint64_t uid, TSKEY ts, int64_t maxVersion) {
+ if (pInfo->partitionSup.needCalc) {
+ return getGroupIdByCol(pInfo, uid, ts, maxVersion);
+ }
+
+ return getGroupIdByUid(pInfo, uid);
+}
+
static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t* pRowIndex) {
if ((*pRowIndex) == pBlock->info.rows) {
return false;
@@ -987,6 +1050,9 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endData = (TSKEY*)pEndTsCol->pData;
STimeWindow win = {.skey = startData[*pRowIndex], .ekey = endData[*pRowIndex]};
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* gpData = (uint64_t*)pGpCol->pData;
+ uint64_t groupId = gpData[*pRowIndex];
SColumnInfoData* pCalStartTsCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
TSKEY* calStartData = (TSKEY*)pCalStartTsCol->pData;
@@ -1001,11 +1067,11 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
(*pRowIndex)++;
for (; *pRowIndex < pBlock->info.rows; (*pRowIndex)++) {
- if (win.skey == startData[*pRowIndex]) {
+ if (win.skey == startData[*pRowIndex] && groupId == gpData[*pRowIndex]) {
win.ekey = TMAX(win.ekey, endData[*pRowIndex]);
continue;
}
- if (win.skey == endData[*pRowIndex]) {
+ if (win.skey == endData[*pRowIndex] && groupId == gpData[*pRowIndex]) {
win.skey = TMIN(win.skey, startData[*pRowIndex]);
continue;
}
@@ -1019,22 +1085,32 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
return true;
}
-static STimeWindow getSlidingWindow(TSKEY* tsCol, SInterval* pInterval, SDataBlockInfo* pDataBlockInfo,
- int32_t* pRowIndex) {
+static STimeWindow getSlidingWindow(TSKEY* startTsCol, TSKEY* endTsCol, uint64_t* gpIdCol, SInterval* pInterval,
+ SDataBlockInfo* pDataBlockInfo, int32_t* pRowIndex, bool hasGroup) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
- STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsCol[*pRowIndex], pInterval, TSDB_ORDER_ASC);
+ STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCol[*pRowIndex], pInterval, TSDB_ORDER_ASC);
STimeWindow endWin = win;
STimeWindow preWin = win;
+ uint64_t groupId = gpIdCol[*pRowIndex];
while (1) {
- (*pRowIndex) += getNumOfRowsInTimeWindow(pDataBlockInfo, tsCol, *pRowIndex, endWin.ekey, binarySearchForKey, NULL,
- TSDB_ORDER_ASC);
+ if (hasGroup) {
+ (*pRowIndex) += 1;
+ } else {
+ while ((groupId == gpIdCol[(*pRowIndex)] && startTsCol[*pRowIndex] < endWin.ekey)) {
+ (*pRowIndex) += 1;
+ if ((*pRowIndex) == pDataBlockInfo->rows) {
+ break;
+ }
+ }
+ }
+
do {
preWin = endWin;
getNextTimeWindow(pInterval, &endWin, TSDB_ORDER_ASC);
- } while (tsCol[(*pRowIndex) - 1] >= endWin.skey);
+ } while (endTsCol[(*pRowIndex) - 1] >= endWin.skey);
endWin = preWin;
- if (win.ekey == endWin.ekey || (*pRowIndex) == pDataBlockInfo->rows) {
+ if (win.ekey == endWin.ekey || (*pRowIndex) == pDataBlockInfo->rows || groupId != gpIdCol[*pRowIndex]) {
win.ekey = endWin.ekey;
return win;
}
@@ -1060,7 +1136,31 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32
return NULL;
}
- if (pResult->info.groupId == pInfo->groupId) {
+ doFilter(pInfo->pCondition, pResult, NULL);
+ if (pResult->info.rows == 0) {
+ continue;
+ }
+
+ if (pInfo->partitionSup.needCalc) {
+ SSDataBlock* tmpBlock = createOneDataBlock(pResult, true);
+ blockDataCleanup(pResult);
+ for (int32_t i = 0; i < tmpBlock->info.rows; i++) {
+ if (calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, tmpBlock, i) == pInfo->groupId) {
+ for (int32_t j = 0; j < pInfo->pTableScanOp->exprSupp.numOfExprs; j++) {
+ SColumnInfoData* pSrcCol = taosArrayGet(tmpBlock->pDataBlock, j);
+ SColumnInfoData* pDestCol = taosArrayGet(pResult->pDataBlock, j);
+ bool isNull = colDataIsNull(pSrcCol, tmpBlock->info.rows, i, NULL);
+ char* pSrcData = colDataGetData(pSrcCol, i);
+ colDataAppend(pDestCol, pResult->info.rows, pSrcData, isNull);
+ }
+ pResult->info.rows++;
+ }
+ }
+ if (pResult->info.rows > 0) {
+ pResult->info.calWin = pInfo->updateWin;
+ return pResult;
+ }
+ } else if (pResult->info.groupId == pInfo->groupId) {
pResult->info.calWin = pInfo->updateWin;
return pResult;
}
@@ -1091,17 +1191,18 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
int32_t dummy = 0;
+ int64_t version = pSrcBlock->info.version - 1;
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
- uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[i]);
+ uint64_t groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], version);
// gap must be 0.
SResultWindowInfo* pStartWin =
- getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, startData[i], endData[i], groupId, 0, &dummy);
+ getCurSessionWindow(pInfo->windowSup.pStreamAggSup, startData[i], endData[i], groupId, 0, &dummy);
if (!pStartWin) {
// window has been closed.
continue;
}
SResultWindowInfo* pEndWin =
- getCurSessionWindow(pInfo->sessionSup.pStreamAggSup, endData[i], endData[i], groupId, 0, &dummy);
+ getCurSessionWindow(pInfo->windowSup.pStreamAggSup, endData[i], endData[i], groupId, 0, &dummy);
ASSERT(pEndWin);
TSKEY ts = INT64_MIN;
colDataAppend(pDestStartCol, i, (const char*)&pStartWin->win.skey, false);
@@ -1121,34 +1222,83 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
if (rows == 0) {
return TSDB_CODE_SUCCESS;
}
- int32_t code = blockDataEnsureCapacity(pDestBlock, rows);
+ int32_t code = blockDataEnsureCapacity(pDestBlock, rows * 2);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- SColumnInfoData* pTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
- SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
- uint64_t* uidCol = (uint64_t*)pUidCol->pData;
- ASSERT(pTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
- TSKEY* tsCol = (TSKEY*)pTsCol->pData;
+ SColumnInfoData* pSrcStartTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pSrcEndTsCol = (SColumnInfoData*)taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pSrcUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
+ uint64_t* srcUidData = (uint64_t*)pSrcUidCol->pData;
+ SColumnInfoData* pSrcGpCol = taosArrayGet(pSrcBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* srcGp = (uint64_t*)pSrcGpCol->pData;
+ ASSERT(pSrcStartTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
+ TSKEY* srcStartTsCol = (TSKEY*)pSrcStartTsCol->pData;
+ TSKEY* srcEndTsCol = (TSKEY*)pSrcEndTsCol->pData;
SColumnInfoData* pStartTsCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pDeUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX);
SColumnInfoData* pGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
- uint64_t groupId = getGroupId(pInfo->pTableScanOp, uidCol[0]);
+ int64_t version = pSrcBlock->info.version - 1;
for (int32_t i = 0; i < rows;) {
- colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(tsCol + i), false);
- STimeWindow win = getSlidingWindow(tsCol, &pInfo->interval, &pSrcBlock->info, &i);
- colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(tsCol + i - 1), false);
-
+ uint64_t srcUid = srcUidData[i];
+ uint64_t groupId = srcGp[i];
+ if (groupId == 0) {
+ groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
+ }
+ TSKEY calStartTs = srcStartTsCol[i];
+ colDataAppend(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
+ STimeWindow win = getSlidingWindow(srcStartTsCol, srcEndTsCol, srcGp, &pInfo->interval, &pSrcBlock->info, &i,
+ pInfo->partitionSup.needCalc);
+ TSKEY calEndTs = srcStartTsCol[i - 1];
+ colDataAppend(pCalEndTsCol, pDestBlock->info.rows, (const char*)(&calEndTs), false);
+ colDataAppend(pDeUidCol, pDestBlock->info.rows, (const char*)(&srcUid), false);
colDataAppend(pStartTsCol, pDestBlock->info.rows, (const char*)(&win.skey), false);
colDataAppend(pEndTsCol, pDestBlock->info.rows, (const char*)(&win.ekey), false);
colDataAppend(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false);
pDestBlock->info.rows++;
}
- // all rows have same group id
- pDestBlock->info.groupId = groupId;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t generateDeleteResultBlock(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock, SSDataBlock* pDestBlock) {
+ if (pSrcBlock->info.rows == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+ blockDataCleanup(pDestBlock);
+ int32_t code = blockDataEnsureCapacity(pDestBlock, pSrcBlock->info.rows);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ ASSERT(taosArrayGetSize(pSrcBlock->pDataBlock) >= 3);
+ SColumnInfoData* pStartTsCol = taosArrayGet(pSrcBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ TSKEY* startData = (TSKEY*)pStartTsCol->pData;
+ SColumnInfoData* pEndTsCol = taosArrayGet(pSrcBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ TSKEY* endData = (TSKEY*)pEndTsCol->pData;
+ SColumnInfoData* pUidCol = taosArrayGet(pSrcBlock->pDataBlock, UID_COLUMN_INDEX);
+ uint64_t* uidCol = (uint64_t*)pUidCol->pData;
+
+ SColumnInfoData* pDestStartCol = taosArrayGet(pDestBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pDestEndCol = taosArrayGet(pDestBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pDestUidCol = taosArrayGet(pDestBlock->pDataBlock, UID_COLUMN_INDEX);
+ SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
+ SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
+ int32_t dummy = 0;
+ int64_t version = pSrcBlock->info.version - 1;
+ for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
+ uint64_t groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], version);
+ colDataAppend(pDestStartCol, i, (const char*)(startData + i), false);
+ colDataAppend(pDestEndCol, i, (const char*)(endData + i), false);
+ colDataAppendNULL(pDestUidCol, i);
+ colDataAppend(pDestGpCol, i, (const char*)&groupId, false);
+ colDataAppendNULL(pDestCalStartTsCol, i);
+ colDataAppendNULL(pDestCalEndTsCol, i);
+ pDestBlock->info.rows++;
+ }
return TSDB_CODE_SUCCESS;
}
@@ -1156,28 +1306,35 @@ static int32_t generateScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcBlock,
int32_t code = TSDB_CODE_SUCCESS;
if (isIntervalWindow(pInfo)) {
code = generateIntervalScanRange(pInfo, pSrcBlock, pDestBlock);
- } else {
+ } else if (isSessionWindow(pInfo) || isStateWindow(pInfo)) {
code = generateSessionScanRange(pInfo, pSrcBlock, pDestBlock);
}
pDestBlock->info.type = STREAM_CLEAR;
+ pDestBlock->info.version = pSrcBlock->info.version;
blockDataUpdateTsWindow(pDestBlock, 0);
return code;
}
-void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, int32_t uidCol, uint64_t* pID) {
+void appendOneRow(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid, uint64_t* pGp) {
SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
- SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, uidCol);
+ SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ SColumnInfoData* pCalStartCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
+ SColumnInfoData* pCalEndCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)pStartTs, false);
colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)pEndTs, false);
- colDataAppend(pUidCol, pBlock->info.rows, (const char*)pID, false);
+ colDataAppend(pUidCol, pBlock->info.rows, (const char*)pUid, false);
+ colDataAppend(pGpCol, pBlock->info.rows, (const char*)pGp, false);
+ colDataAppendNULL(pCalStartCol, pBlock->info.rows);
+ colDataAppendNULL(pCalEndCol, pBlock->info.rows);
pBlock->info.rows++;
}
static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock* pBlock, bool out) {
if (out) {
blockDataCleanup(pInfo->pUpdateDataRes);
- blockDataEnsureCapacity(pInfo->pUpdateDataRes, pBlock->info.rows);
+ blockDataEnsureCapacity(pInfo->pUpdateDataRes, pBlock->info.rows * 2);
}
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
@@ -1195,31 +1352,28 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
// must check update info first.
bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.uid, tsCol[rowId]);
bool closedWin = isClosed && isSignleIntervalWindow(pInfo) &&
- isDeletedWindow(&win, pBlock->info.groupId, pInfo->sessionSup.pIntervalAggSup);
+ isDeletedStreamWindow(&win, pBlock->info.groupId, pInfo->pTableScanOp, &pInfo->twAggSup);
if ((update || closedWin) && out) {
- appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, UID_COLUMN_INDEX, &pBlock->info.uid);
+ qDebug("stream update check not pass, update %d, closedWin %d", update, closedWin);
+ uint64_t gpId = 0;
+ appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid, &gpId);
+ if (closedWin && pInfo->partitionSup.needCalc) {
+ gpId = calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pBlock, rowId);
+ appendOneRow(pInfo->pUpdateDataRes, tsCol + rowId, tsCol + rowId, &pBlock->info.uid, &gpId);
+ }
}
}
- if (out) {
+ if (out && pInfo->pUpdateDataRes->info.rows > 0) {
+ pInfo->pUpdateDataRes->info.version = pBlock->info.version;
blockDataUpdateTsWindow(pInfo->pUpdateDataRes, 0);
- pInfo->pUpdateDataRes->info.type = STREAM_CLEAR;
- }
-}
-
-static void setBlockGroupId(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t uidColIndex) {
- ASSERT(taosArrayGetSize(pBlock->pDataBlock) >= 3);
- SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, uidColIndex);
- uint64_t* uidCol = (uint64_t*)pColDataInfo->pData;
- ASSERT(pBlock->info.rows > 0);
- for (int32_t i = 0; i < pBlock->info.rows; i++) {
- uidCol[i] = getGroupId(pOperator, uidCol[i]);
+ pInfo->pUpdateDataRes->info.type = pInfo->partitionSup.needCalc ? STREAM_DELETE_DATA : STREAM_CLEAR;
}
}
static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock) {
SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
SOperatorInfo* pOperator = pInfo->pStreamScanOp;
- SExecTaskInfo* pTaskInfo = pInfo->pStreamScanOp->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
blockDataEnsureCapacity(pInfo->pRes, pBlock->info.rows);
@@ -1228,7 +1382,7 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
pInfo->pRes->info.type = STREAM_NORMAL;
pInfo->pRes->info.version = pBlock->info.version;
- uint64_t* groupIdPre = taosHashGet(pOperator->pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
+ uint64_t* groupIdPre = taosHashGet(pTaskInfo->tableqinfoList.map, &pBlock->info.uid, sizeof(int64_t));
if (groupIdPre) {
pInfo->pRes->info.groupId = *groupIdPre;
} else {
@@ -1276,48 +1430,71 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
return 0;
}
-static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
- // NOTE: this operator does never check if current status is done or not
+static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
SStreamScanInfo* pInfo = pOperator->info;
-#if 0
- SStreamState* pState = pTaskInfo->streamInfo.pState;
- if (pState) {
- printf(">>>>>>>> stream write backend\n");
- SWinKey key = {
- .ts = 1,
- .groupId = 2,
- };
- char tmp[100] = "abcdefg1";
- if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) {
- ASSERT(0);
- }
+ qDebug("queue scan called");
- key.ts = 2;
- char tmp2[100] = "abcdefg2";
- if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) {
- ASSERT(0);
+ if (pTaskInfo->streamInfo.pReq != NULL) {
+ if (pInfo->tqReader->pMsg == NULL) {
+ pInfo->tqReader->pMsg = pTaskInfo->streamInfo.pReq;
+ const SSubmitReq* pSubmit = pInfo->tqReader->pMsg;
+ if (tqReaderSetDataMsg(pInfo->tqReader, pSubmit, 0) < 0) {
+ qError("submit msg messed up when initing stream submit block %p", pSubmit);
+ pInfo->tqReader->pMsg = NULL;
+ pTaskInfo->streamInfo.pReq = NULL;
+ ASSERT(0);
+ }
}
- key.groupId = 5;
- key.ts = 1;
- char tmp3[100] = "abcdefg3";
- if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) {
- ASSERT(0);
+ blockDataCleanup(pInfo->pRes);
+ SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
+
+ while (tqNextDataBlock(pInfo->tqReader)) {
+ SSDataBlock block = {0};
+
+ int32_t code = tqRetrieveDataBlock(&block, pInfo->tqReader);
+
+ if (code != TSDB_CODE_SUCCESS || block.info.rows == 0) {
+ continue;
+ }
+
+ setBlockIntoRes(pInfo, &block);
+
+ if (pBlockInfo->rows > 0) {
+ return pInfo->pRes;
+ }
}
- char* val2 = NULL;
- int32_t sz;
- if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) {
- ASSERT(0);
+ pInfo->tqReader->pMsg = NULL;
+ pTaskInfo->streamInfo.pReq = NULL;
+ return NULL;
+ }
+
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
+ if (pResult && pResult->info.rows > 0) {
+ qDebug("queue scan tsdb return %d rows", pResult->info.rows);
+ pTaskInfo->streamInfo.returned = 1;
+ return pResult;
+ } else {
+ if (!pTaskInfo->streamInfo.returned) {
+ STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
+ tsdbReaderClose(pTSInfo->dataReader);
+ pTSInfo->dataReader = NULL;
+ tqOffsetResetToLog(&pTaskInfo->streamInfo.prepareStatus, pTaskInfo->streamInfo.snapshotVer);
+ qDebug("queue scan tsdb over, switch to wal ver %d", pTaskInfo->streamInfo.snapshotVer + 1);
+ if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1) < 0) {
+ return NULL;
+ }
+ ASSERT(pInfo->tqReader->pWalReader->curVersion == pTaskInfo->streamInfo.snapshotVer + 1);
+ } else {
+ return NULL;
+ }
}
- printf("stream read %s %d\n", val2, sz);
- streamFreeVal(val2);
}
-#endif
- qDebug("stream scan called");
if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
while (1) {
SFetchRet ret = {0};
@@ -1327,28 +1504,29 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
if (setBlockIntoRes(pInfo, &ret.data) < 0) {
ASSERT(0);
}
- // TODO clean data block
if (pInfo->pRes->info.rows > 0) {
- qDebug("stream scan log return %d rows", pInfo->pRes->info.rows);
+ pOperator->status = OP_EXEC_RECV;
+ qDebug("queue scan log return %d rows", pInfo->pRes->info.rows);
return pInfo->pRes;
}
} else if (ret.fetchType == FETCH_TYPE__META) {
ASSERT(0);
- pTaskInfo->streamInfo.lastStatus = ret.offset;
- pTaskInfo->streamInfo.metaBlk = ret.meta;
- return NULL;
- } else if (ret.fetchType == FETCH_TYPE__NONE) {
+ // pTaskInfo->streamInfo.lastStatus = ret.offset;
+ // pTaskInfo->streamInfo.metaBlk = ret.meta;
+ // return NULL;
+ } else if (ret.fetchType == FETCH_TYPE__NONE ||
+ (ret.fetchType == FETCH_TYPE__SEP && pOperator->status == OP_EXEC_RECV)) {
pTaskInfo->streamInfo.lastStatus = ret.offset;
ASSERT(pTaskInfo->streamInfo.lastStatus.version >= pTaskInfo->streamInfo.prepareStatus.version);
ASSERT(pTaskInfo->streamInfo.lastStatus.version + 1 == pInfo->tqReader->pWalReader->curVersion);
char formatBuf[80];
tFormatOffset(formatBuf, 80, &ret.offset);
- qDebug("stream scan log return null, offset %s", formatBuf);
+ qDebug("queue scan log return null, offset %s", formatBuf);
+ pOperator->status = OP_OPENED;
return NULL;
- } else {
- ASSERT(0);
}
}
+#if 0
} else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
if (pResult && pResult->info.rows > 0) {
@@ -1357,11 +1535,112 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
}
qDebug("stream scan tsdb return null");
return NULL;
- } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
- // TODO scan meta
+#endif
+ } else {
ASSERT(0);
return NULL;
}
+}
+
+static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, SStreamScanInfo* pInfo) {
+ STqReader* pReader = pInfo->tqReader;
+ int32_t rows = pSrc->info.rows;
+ blockDataEnsureCapacity(pDst, rows);
+
+ SColumnInfoData* pSrcStartCol = taosArrayGet(pSrc->pDataBlock, START_TS_COLUMN_INDEX);
+ uint64_t* startCol = (uint64_t*)pSrcStartCol->pData;
+ SColumnInfoData* pSrcEndCol = taosArrayGet(pSrc->pDataBlock, END_TS_COLUMN_INDEX);
+ uint64_t* endCol = (uint64_t*)pSrcEndCol->pData;
+ SColumnInfoData* pSrcUidCol = taosArrayGet(pSrc->pDataBlock, UID_COLUMN_INDEX);
+ uint64_t* uidCol = (uint64_t*)pSrcUidCol->pData;
+
+ SColumnInfoData* pDstStartCol = taosArrayGet(pDst->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pDstEndCol = taosArrayGet(pDst->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pDstUidCol = taosArrayGet(pDst->pDataBlock, UID_COLUMN_INDEX);
+ int32_t j = 0;
+ for (int32_t i = 0; i < rows; i++) {
+ if (taosHashGet(pReader->tbIdHash, &uidCol[i], sizeof(uint64_t))) {
+ colDataAppend(pDstStartCol, j, (const char*)&startCol[i], false);
+ colDataAppend(pDstEndCol, j, (const char*)&endCol[i], false);
+ colDataAppend(pDstUidCol, j, (const char*)&uidCol[i], false);
+
+ colDataAppendNULL(taosArrayGet(pDst->pDataBlock, GROUPID_COLUMN_INDEX), j);
+ colDataAppendNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), j);
+ colDataAppendNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), j);
+ j++;
+ }
+ }
+ pDst->info = pSrc->info;
+ pDst->info.rows = j;
+
+ return 0;
+}
+
+// for partition by tag
+static void setBlockGroupIdByUid(SStreamScanInfo* pInfo, SSDataBlock* pBlock) {
+ SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ TSKEY* startTsCol = (TSKEY*)pStartTsCol->pData;
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* gpCol = (uint64_t*)pGpCol->pData;
+ SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ uint64_t* uidCol = (uint64_t*)pUidCol->pData;
+ int32_t rows = pBlock->info.rows;
+ if (!pInfo->partitionSup.needCalc) {
+ for (int32_t i = 0; i < rows; i++) {
+ uint64_t groupId = getGroupIdByUid(pInfo, uidCol[i]);
+ colDataAppend(pGpCol, i, (const char*)&groupId, false);
+ }
+ } else {
+ // SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, uidCol[i], startTsCol, ts, maxVersion);
+ // if (!pPreRes || pPreRes->info.rows == 0) {
+ // return 0;
+ // }
+ // ASSERT(pPreRes->info.rows == 1);
+ // return calGroupIdByData(&pInfo->partitionSup, pInfo->pPartScalarSup, pPreRes, 0);
+ }
+}
+
+static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
+ // NOTE: this operator does never check if current status is done or not
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamScanInfo* pInfo = pOperator->info;
+
+ qDebug("stream scan called");
+#if 0
+ SStreamState* pState = pTaskInfo->streamInfo.pState;
+ if (pState) {
+ printf(">>>>>>>> stream write backend\n");
+ SWinKey key = {
+ .ts = 1,
+ .groupId = 2,
+ };
+ char tmp[100] = "abcdefg1";
+ if (streamStatePut(pState, &key, &tmp, strlen(tmp) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ key.ts = 2;
+ char tmp2[100] = "abcdefg2";
+ if (streamStatePut(pState, &key, &tmp2, strlen(tmp2) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ key.groupId = 5;
+ key.ts = 1;
+ char tmp3[100] = "abcdefg3";
+ if (streamStatePut(pState, &key, &tmp3, strlen(tmp3) + 1) < 0) {
+ ASSERT(0);
+ }
+
+ char* val2 = NULL;
+ int32_t sz;
+ if (streamStateGet(pState, &key, (void**)&val2, &sz) < 0) {
+ ASSERT(0);
+ }
+ printf("stream read %s %d\n", val2, sz);
+ streamFreeVal(val2);
+ }
+#endif
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
@@ -1383,9 +1662,10 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
size_t total = taosArrayGetSize(pInfo->pBlockLists);
// TODO: refactor
+FETCH_NEXT_BLOCK:
if (pInfo->blockType == STREAM_INPUT__DATA_BLOCK) {
if (pInfo->validBlockIndex >= total) {
- /*doClearBufferedBlocks(pInfo);*/
+ doClearBufferedBlocks(pInfo);
/*pOperator->status = OP_EXEC_DONE;*/
return NULL;
}
@@ -1408,17 +1688,48 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo);
} break;
case STREAM_DELETE_DATA: {
- pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
- pInfo->updateResIndex = 0;
- generateScanRange(pInfo, pBlock, pInfo->pUpdateRes);
- prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
- copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
- pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
- return pInfo->pDeleteDataRes;
+ printDataBlock(pBlock, "stream scan delete recv");
+ SSDataBlock* pDelBlock = NULL;
+ if (pInfo->tqReader) {
+ pDelBlock = createSpecialDataBlock(STREAM_DELETE_DATA);
+ filterDelBlockByUid(pDelBlock, pBlock, pInfo);
+ } else {
+ pDelBlock = pBlock;
+ }
+ setBlockGroupIdByUid(pInfo, pDelBlock);
+ printDataBlock(pDelBlock, "stream scan delete recv filtered");
+ if (!isIntervalWindow(pInfo) && !isSessionWindow(pInfo) && !isStateWindow(pInfo)) {
+ generateDeleteResultBlock(pInfo, pDelBlock, pInfo->pDeleteDataRes);
+ pInfo->pDeleteDataRes->info.type = STREAM_DELETE_RESULT;
+ printDataBlock(pDelBlock, "stream scan delete result");
+ if (pInfo->pDeleteDataRes->info.rows > 0) {
+ return pInfo->pDeleteDataRes;
+ } else {
+ goto FETCH_NEXT_BLOCK;
+ }
+ } else {
+ pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
+ pInfo->updateResIndex = 0;
+ generateScanRange(pInfo, pDelBlock, pInfo->pUpdateRes);
+ prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
+ copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
+ pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA;
+ pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
+ printDataBlock(pDelBlock, "stream scan delete data");
+ if (pInfo->tqReader) {
+ blockDataDestroy(pDelBlock);
+ }
+ if (pInfo->pDeleteDataRes->info.rows > 0) {
+ return pInfo->pDeleteDataRes;
+ } else {
+ goto FETCH_NEXT_BLOCK;
+ }
+ }
} break;
default:
break;
}
+ // printDataBlock(pBlock, "stream scan recv");
return pBlock;
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
qDebug("scan mode %d", pInfo->scanMode);
@@ -1428,6 +1739,14 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
return pInfo->pRes;
} break;
+ case STREAM_SCAN_FROM_DELETE_DATA: {
+ generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
+ prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
+ pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
+ copyDataBlock(pInfo->pDeleteDataRes, pInfo->pUpdateRes);
+ pInfo->pDeleteDataRes->info.type = STREAM_DELETE_DATA;
+ return pInfo->pDeleteDataRes;
+ } break;
case STREAM_SCAN_FROM_UPDATERES: {
generateScanRange(pInfo, pInfo->pUpdateDataRes, pInfo->pUpdateRes);
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
@@ -1443,6 +1762,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
updateInfoSetScanRange(pInfo->pUpdateInfo, &pTableScanInfo->cond.twindows, pInfo->groupId, version);
pSDB->info.type = pInfo->scanMode == STREAM_SCAN_FROM_DATAREADER_RANGE ? STREAM_NORMAL : STREAM_PULL_DATA;
checkUpdateData(pInfo, true, pSDB, false);
+ // printDataBlock(pSDB, "stream scan update");
return pSDB;
}
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
@@ -1451,7 +1771,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
break;
}
- SStreamAggSupporter* pSup = pInfo->sessionSup.pStreamAggSup;
+ SStreamAggSupporter* pSup = pInfo->windowSup.pStreamAggSup;
if (isStateWindow(pInfo) && pSup->pScanBlock->info.rows > 0) {
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
pInfo->updateResIndex = 0;
@@ -1465,9 +1785,12 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
int32_t totBlockNum = taosArrayGetSize(pInfo->pBlockLists);
+ NEXT_SUBMIT_BLK:
while (1) {
if (pInfo->tqReader->pMsg == NULL) {
if (pInfo->validBlockIndex >= totBlockNum) {
+ updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
+ doClearBufferedBlocks(pInfo);
return NULL;
}
@@ -1517,7 +1840,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
// record the scan action.
pInfo->numOfExec++;
pOperator->resultInfo.totalRows += pBlockInfo->rows;
- printDataBlock(pInfo->pRes, "stream scan");
+ // printDataBlock(pInfo->pRes, "stream scan");
if (pBlockInfo->rows == 0) {
updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
@@ -1526,30 +1849,31 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
checkUpdateData(pInfo, true, pInfo->pRes, true);
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlockInfo->window.ekey);
if (pInfo->pUpdateDataRes->info.rows > 0) {
+ pInfo->updateResIndex = 0;
if (pInfo->pUpdateDataRes->info.type == STREAM_CLEAR) {
- pInfo->updateResIndex = 0;
pInfo->scanMode = STREAM_SCAN_FROM_UPDATERES;
} else if (pInfo->pUpdateDataRes->info.type == STREAM_INVERT) {
pInfo->scanMode = STREAM_SCAN_FROM_RES;
return pInfo->pUpdateDataRes;
+ } else if (pInfo->pUpdateDataRes->info.type == STREAM_DELETE_DATA) {
+ pInfo->scanMode = STREAM_SCAN_FROM_DELETE_DATA;
}
}
}
qDebug("scan rows: %d", pBlockInfo->rows);
- return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes;
-
+ if (pBlockInfo->rows > 0) {
+ return pInfo->pRes;
+ } else {
+ goto NEXT_SUBMIT_BLK;
+ }
+ /*return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes;*/
} else {
ASSERT(0);
return NULL;
}
}
-static SSDataBlock* doRawScan(SOperatorInfo* pInfo) {
- //
- return NULL;
-}
-
static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
SArray* tableIdList = taosArrayInit(4, sizeof(uint64_t));
@@ -1562,17 +1886,156 @@ static SArray* extractTableIdList(const STableListInfo* pTableGroupInfo) {
return tableIdList;
}
+static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
+ // NOTE: this operator does never check if current status is done or not
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamRawScanInfo* pInfo = pOperator->info;
+ pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta
+ pTaskInfo->streamInfo.metaRsp.metaRsp = NULL;
+
+ qDebug("tmqsnap doRawScan called");
+ if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_DATA) {
+ SSDataBlock* pBlock = &pInfo->pRes;
+
+ if (pInfo->dataReader && tsdbNextDataBlock(pInfo->dataReader)) {
+ if (isTaskKilled(pTaskInfo)) {
+ longjmp(pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
+ }
+
+ tsdbRetrieveDataBlockInfo(pInfo->dataReader, &pBlock->info);
+
+ SArray* pCols = tsdbRetrieveDataBlock(pInfo->dataReader, NULL);
+ pBlock->pDataBlock = pCols;
+ if (pCols == NULL) {
+ longjmp(pTaskInfo->env, terrno);
+ }
+
+ qDebug("tmqsnap doRawScan get data uid:%ld", pBlock->info.uid);
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.uid;
+ pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
+ return pBlock;
+ }
+
+ SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
+ if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
+ qDebug("tmqsnap read snapshot done, change to get data from wal");
+ pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ pTaskInfo->streamInfo.lastStatus.version = pInfo->sContext->snapVersion;
+ } else {
+ pTaskInfo->streamInfo.prepareStatus.uid = mtInfo.uid;
+ pTaskInfo->streamInfo.prepareStatus.ts = INT64_MIN;
+ qDebug("tmqsnap change get data uid:%ld", mtInfo.uid);
+ qStreamPrepareScan(pTaskInfo, &pTaskInfo->streamInfo.prepareStatus, pInfo->sContext->subType);
+ }
+ tDeleteSSchemaWrapper(mtInfo.schema);
+ qDebug("tmqsnap stream scan tsdb return null");
+ return NULL;
+ } else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__SNAPSHOT_META) {
+ SSnapContext* sContext = pInfo->sContext;
+ void* data = NULL;
+ int32_t dataLen = 0;
+ int16_t type = 0;
+ int64_t uid = 0;
+ if (getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0) {
+ qError("tmqsnap getMetafromSnapShot error");
+ taosMemoryFreeClear(data);
+ return NULL;
+ }
+
+ if (!sContext->queryMetaOrData) { // change to get data next poll request
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
+ pTaskInfo->streamInfo.lastStatus.uid = uid;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__SNAPSHOT_DATA;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.uid = 0;
+ pTaskInfo->streamInfo.metaRsp.rspOffset.ts = INT64_MIN;
+ } else {
+ pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_META;
+ pTaskInfo->streamInfo.lastStatus.uid = uid;
+ pTaskInfo->streamInfo.metaRsp.rspOffset = pTaskInfo->streamInfo.lastStatus;
+ pTaskInfo->streamInfo.metaRsp.resMsgType = type;
+ pTaskInfo->streamInfo.metaRsp.metaRspLen = dataLen;
+ pTaskInfo->streamInfo.metaRsp.metaRsp = data;
+ }
+
+ return NULL;
+ }
+ // else if (pTaskInfo->streamInfo.prepareStatus.type == TMQ_OFFSET__LOG) {
+ // int64_t fetchVer = pTaskInfo->streamInfo.prepareStatus.version + 1;
+ //
+ // while(1){
+ // if (tqFetchLog(pInfo->tqReader->pWalReader, pInfo->sContext->withMeta, &fetchVer, &pInfo->pCkHead) < 0) {
+ // qDebug("tmqsnap tmq poll: consumer log end. offset %" PRId64, fetchVer);
+ // pTaskInfo->streamInfo.lastStatus.version = fetchVer;
+ // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ // return NULL;
+ // }
+ // SWalCont* pHead = &pInfo->pCkHead->head;
+ // qDebug("tmqsnap tmq poll: consumer log offset %" PRId64 " msgType %d", fetchVer, pHead->msgType);
+ //
+ // if (pHead->msgType == TDMT_VND_SUBMIT) {
+ // SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
+ // tqReaderSetDataMsg(pInfo->tqReader, pCont, 0);
+ // SSDataBlock* block = tqLogScanExec(pInfo->sContext->subType, pInfo->tqReader, pInfo->pFilterOutTbUid,
+ // &pInfo->pRes); if(block){
+ // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__LOG;
+ // pTaskInfo->streamInfo.lastStatus.version = fetchVer;
+ // qDebug("tmqsnap fetch data msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
+ // return block;
+ // }else{
+ // fetchVer++;
+ // }
+ // } else{
+ // ASSERT(pInfo->sContext->withMeta);
+ // ASSERT(IS_META_MSG(pHead->msgType));
+ // qDebug("tmqsnap fetch meta msg, ver:%" PRId64 ", type:%d", pHead->version, pHead->msgType);
+ // pTaskInfo->streamInfo.metaRsp.rspOffset.version = fetchVer;
+ // pTaskInfo->streamInfo.metaRsp.rspOffset.type = TMQ_OFFSET__LOG;
+ // pTaskInfo->streamInfo.metaRsp.resMsgType = pHead->msgType;
+ // pTaskInfo->streamInfo.metaRsp.metaRspLen = pHead->bodyLen;
+ // pTaskInfo->streamInfo.metaRsp.metaRsp = taosMemoryMalloc(pHead->bodyLen);
+ // memcpy(pTaskInfo->streamInfo.metaRsp.metaRsp, pHead->body, pHead->bodyLen);
+ // return NULL;
+ // }
+ // }
+ return NULL;
+}
+
+static void destroyRawScanOperatorInfo(void* param) {
+ SStreamRawScanInfo* pRawScan = (SStreamRawScanInfo*)param;
+ tsdbReaderClose(pRawScan->dataReader);
+ destroySnapContext(pRawScan->sContext);
+ taosMemoryFree(pRawScan);
+}
+
// for subscribing db or stb (not including column),
// if this scan is used, meta data can be return
// and schemas are decided when scanning
-SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pTableScanNode,
- SExecTaskInfo* pTaskInfo, STimeWindowAggSupp* pTwSup) {
+SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pTaskInfo) {
// create operator
// create tb reader
// create meta reader
// create tq reader
- return NULL;
+ SStreamRawScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamRawScanInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ terrno = TSDB_CODE_QRY_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ pInfo->vnode = pHandle->vnode;
+
+ pInfo->sContext = pHandle->sContext;
+ pOperator->name = "RawStreamScanOperator";
+ // pOperator->blocking = false;
+ // pOperator->status = OP_NOT_OPENED;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+
+ pOperator->fpSet = createOperatorFpSet(NULL, doRawScan, NULL, NULL, destroyRawScanOperatorInfo, NULL, NULL, NULL);
+ return pOperator;
}
static void destroyStreamScanOperatorInfo(void* param) {
@@ -1590,7 +2053,7 @@ static void destroyStreamScanOperatorInfo(void* param) {
}
if (pStreamScan->pPseudoExpr) {
destroyExprInfo(pStreamScan->pPseudoExpr, pStreamScan->numOfPseudoExpr);
- taosMemoryFreeClear(pStreamScan->pPseudoExpr);
+ taosMemoryFree(pStreamScan->pPseudoExpr);
}
updateInfoDestroy(pStreamScan->pUpdateInfo);
@@ -1618,11 +2081,6 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pTagCond = pTagCond;
pInfo->pGroupTags = pTableScanNode->pGroupTags;
- pInfo->twAggSup = (STimeWindowAggSupp){
- .waterMark = pTableScanNode->watermark,
- .calTrigger = pTableScanNode->triggerType,
- .maxTs = INT64_MIN,
- };
int32_t numOfCols = 0;
pInfo->pColMatchInfo = extractColMatchInfo(pScanPhyNode->pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
@@ -1670,17 +2128,12 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->tqReader = pHandle->tqReader;
}
- if (pTSInfo->pdInfo.interval.interval > 0) {
- pInfo->pUpdateInfo = updateInfoInitP(&pTSInfo->pdInfo.interval, pInfo->twAggSup.waterMark);
- } else {
- pInfo->pUpdateInfo = NULL;
- }
-
+ pInfo->pUpdateInfo = NULL;
pInfo->pTableScanOp = pTableScanOp;
- pInfo->interval = pTSInfo->pdInfo.interval;
pInfo->readHandle = *pHandle;
pInfo->tableUid = pScanPhyNode->uid;
+ pTaskInfo->streamInfo.snapshotVer = pHandle->version;
// set the extract column id to streamHandle
tqReaderSetColIdList(pInfo->tqReader, pColIds);
@@ -1705,8 +2158,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pUpdateRes = createSpecialDataBlock(STREAM_CLEAR);
pInfo->pCondition = pScanPhyNode->node.pConditions;
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
- pInfo->sessionSup =
- (SessionWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
+ pInfo->windowSup = (SWindowSupporter){.pStreamAggSup = NULL, .gap = -1, .parentType = QUERY_NODE_PHYSICAL_PLAN};
pInfo->groupId = 0;
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
pInfo->pStreamScanOp = pOperator;
@@ -1715,6 +2167,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->updateWin = (STimeWindow){.skey = INT64_MAX, .ekey = INT64_MAX};
pInfo->pUpdateDataRes = createSpecialDataBlock(STREAM_CLEAR);
pInfo->assignBlockUid = pTableScanNode->assignBlockUid;
+ pInfo->partitionSup.needCalc = false;
pOperator->name = "StreamScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN;
@@ -1724,8 +2177,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pRes->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
- pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamScan, NULL, NULL, destroyStreamScanOperatorInfo,
- NULL, NULL, NULL);
+ __optr_fn_t nextFn = pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM ? doStreamScan : doQueueScan;
+ pOperator->fpSet =
+ createOperatorFpSet(operatorDummyOpenFn, nextFn, NULL, NULL, destroyStreamScanOperatorInfo, NULL, NULL, NULL);
return pOperator;
@@ -2405,7 +2859,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
}
char* pStart = pRsp->data;
- extractDataBlockFromFetchRsp(pInfo->pRes, pRsp->data, pOperator->exprSupp.numOfExprs, pInfo->scanCols, &pStart);
+ extractDataBlockFromFetchRsp(pInfo->pRes, pRsp->data, pInfo->scanCols, &pStart);
updateLoadRemoteInfo(&pInfo->loadInfo, pRsp->numOfRows, pRsp->compLen, startTs, pOperator);
// todo log the filter info
diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c
index f23552c5a7b82207ffc368dbae7c1894cb6a8edd..ea0d26f4de167f0158f22a2e438ac16a74e3485f 100644
--- a/source/libs/executor/src/tfill.c
+++ b/source/libs/executor/src/tfill.c
@@ -19,6 +19,7 @@
#include "tmsg.h"
#include "ttypes.h"
+#include "executorimpl.h"
#include "tcommon.h"
#include "thash.h"
#include "ttime.h"
@@ -35,18 +36,30 @@
#define GET_DEST_SLOT_ID(_p) ((_p)->pExpr->base.resSchema.slotId)
+#define FILL_POS_INVALID 0
+#define FILL_POS_START 1
+#define FILL_POS_MID 2
+#define FILL_POS_END 3
+
+typedef struct STimeRange {
+ TSKEY skey;
+ TSKEY ekey;
+ uint64_t groupId;
+} STimeRange;
+
static void doSetVal(SColumnInfoData* pDstColInfoData, int32_t rowIndex, const SGroupKeys* pKey);
-static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex);
+static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData,
+ int32_t rowIndex);
static void setNullRow(SSDataBlock* pBlock, SFillInfo* pFillInfo, int32_t rowIndex) {
- for(int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
- SFillColInfo* pCol = &pFillInfo->pFillCol[i];
- int32_t dstSlotId = GET_DEST_SLOT_ID(pCol);
+ for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
+ SFillColInfo* pCol = &pFillInfo->pFillCol[i];
+ int32_t dstSlotId = GET_DEST_SLOT_ID(pCol);
SColumnInfoData* pDstColInfo = taosArrayGet(pBlock->pDataBlock, dstSlotId);
if (pCol->notFillCol) {
bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfo, rowIndex);
if (!filled) {
- SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
+ SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfo, rowIndex, pKey);
}
@@ -76,8 +89,9 @@ static void doSetUserSpecifiedValue(SColumnInfoData* pDst, SVariant* pVar, int32
}
}
-//fill windows pseudo column, _wstart, _wend, _wduration and return true, otherwise return false
-static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData, int32_t rowIndex) {
+// fill windows pseudo column, _wstart, _wend, _wduration and return true, otherwise return false
+static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, SColumnInfoData* pDstColInfoData,
+ int32_t rowIndex) {
if (!pCol->notFillCol) {
return false;
}
@@ -89,15 +103,15 @@ static bool fillIfWindowPseudoColumn(SFillInfo* pFillInfo, SFillColInfo* pCol, S
colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->currentKey, false);
return true;
} else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_END) {
- //TODO: include endpoint
+ // TODO: include endpoint
SInterval* pInterval = &pFillInfo->interval;
- int32_t step = (pFillInfo->order == TSDB_ORDER_ASC) ? 1 : -1;
- int64_t windowEnd =
+ int32_t step = (pFillInfo->order == TSDB_ORDER_ASC) ? 1 : -1;
+ int64_t windowEnd =
taosTimeAdd(pFillInfo->currentKey, pInterval->sliding * step, pInterval->slidingUnit, pInterval->precision);
colDataAppend(pDstColInfoData, rowIndex, (const char*)&windowEnd, false);
return true;
} else if (pCol->pExpr->base.pParam[0].pCol->colType == COLUMN_TYPE_WINDOW_DURATION) {
- //TODO: include endpoint
+ // TODO: include endpoint
colDataAppend(pDstColInfoData, rowIndex, (const char*)&pFillInfo->interval.sliding, false);
return true;
}
@@ -115,13 +129,13 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
// set the other values
if (pFillInfo->type == TSDB_FILL_PREV) {
- SArray* p = FILL_IS_ASC_FILL(pFillInfo)? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
+ SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol));
- bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index);
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index);
if (!filled) {
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfoData, index, pKey);
@@ -131,9 +145,9 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->next.pRowVal : pFillInfo->prev.pRowVal;
// todo refactor: start from 0 not 1
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
- SFillColInfo* pCol = &pFillInfo->pFillCol[i];
+ SFillColInfo* pCol = &pFillInfo->pFillCol[i];
SColumnInfoData* pDstColInfoData = taosArrayGet(pBlock->pDataBlock, GET_DEST_SLOT_ID(pCol));
- bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index);
+ bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstColInfoData, index);
if (!filled) {
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstColInfoData, index, pKey);
@@ -154,7 +168,7 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
if (pCol->notFillCol) {
bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDstCol, index);
if (!filled) {
- SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
+ SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDstCol, index, pKey);
}
@@ -190,13 +204,13 @@ static void doFillOneRow(SFillInfo* pFillInfo, SSDataBlock* pBlock, SSDataBlock*
for (int32_t i = 0; i < pFillInfo->numOfCols; ++i) {
SFillColInfo* pCol = &pFillInfo->pFillCol[i];
- int32_t slotId = GET_DEST_SLOT_ID(pCol);
+ int32_t slotId = GET_DEST_SLOT_ID(pCol);
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, slotId);
if (pCol->notFillCol) {
bool filled = fillIfWindowPseudoColumn(pFillInfo, pCol, pDst, index);
if (!filled) {
- SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
+ SArray* p = FILL_IS_ASC_FILL(pFillInfo) ? pFillInfo->prev.pRowVal : pFillInfo->next.pRowVal;
SGroupKeys* pKey = taosArrayGet(p, i);
doSetVal(pDst, index, pKey);
}
@@ -261,8 +275,8 @@ static void copyCurrentRowIntoBuf(SFillInfo* pFillInfo, int32_t rowIndex, SArray
} else if (type == QUERY_NODE_OPERATOR) {
SColumnInfoData* pSrcCol = taosArrayGet(pFillInfo->pSrcBlock->pDataBlock, i);
- bool isNull = colDataIsNull_s(pSrcCol, rowIndex);
- char* p = colDataGetData(pSrcCol, rowIndex);
+ bool isNull = colDataIsNull_s(pSrcCol, rowIndex);
+ char* p = colDataGetData(pSrcCol, rowIndex);
saveColData(pRow, i, p, isNull);
} else {
ASSERT(0);
@@ -425,9 +439,9 @@ struct SFillInfo* taosCreateFillInfo(TSKEY skey, int32_t numOfFillCols, int32_t
pFillInfo->order = order;
pFillInfo->srcTsSlotId = primaryTsSlotId;
- for(int32_t i = 0; i < numOfNotFillCols; ++i) {
+ for (int32_t i = 0; i < numOfNotFillCols; ++i) {
SFillColInfo* p = &pCol[i + numOfFillCols];
- int32_t srcSlotId = GET_DEST_SLOT_ID(p);
+ int32_t srcSlotId = GET_DEST_SLOT_ID(p);
if (srcSlotId == primaryTsSlotId) {
pFillInfo->tsSlotId = i + numOfFillCols;
break;
@@ -499,9 +513,9 @@ void* taosDestroyFillInfo(SFillInfo* pFillInfo) {
}
taosArrayDestroy(pFillInfo->next.pRowVal);
-// for (int32_t i = 0; i < pFillInfo->numOfTags; ++i) {
-// taosMemoryFreeClear(pFillInfo->pTags[i].tagVal);
-// }
+ // for (int32_t i = 0; i < pFillInfo->numOfTags; ++i) {
+ // taosMemoryFreeClear(pFillInfo->pTags[i].tagVal);
+ // }
taosMemoryFreeClear(pFillInfo->pTags);
taosMemoryFreeClear(pFillInfo->pFillCol);
@@ -640,7 +654,7 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn
}
}
- for(int32_t i = 0; i < numOfNotFillExpr; ++i) {
+ for (int32_t i = 0; i < numOfNotFillExpr; ++i) {
SExprInfo* pExprInfo = &pNotFillExpr[i];
pFillCol[i + numOfFillExpr].pExpr = pExprInfo;
pFillCol[i + numOfFillExpr].notFillCol = true;
@@ -648,3 +662,1050 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprIn
return pFillCol;
}
+
+TSKEY getNextWindowTs(TSKEY ts, SInterval* pInterval) {
+ STimeWindow win = {.skey = ts, .ekey = ts};
+ getNextIntervalWindow(pInterval, &win, TSDB_ORDER_ASC);
+ return win.skey;
+}
+
+TSKEY getPrevWindowTs(TSKEY ts, SInterval* pInterval) {
+ STimeWindow win = {.skey = ts, .ekey = ts};
+ getNextIntervalWindow(pInterval, &win, TSDB_ORDER_DESC);
+ return win.skey;
+}
+
+void setRowCell(SColumnInfoData* pCol, int32_t rowId, const SResultCellData* pCell) {
+ colDataAppend(pCol, rowId, pCell->pData, pCell->isNull);
+}
+
+SResultCellData* getResultCell(SResultRowData* pRaw, int32_t index) {
+ if (!pRaw || !pRaw->pRowVal) {
+ return NULL;
+ }
+ char* pData = (char*)pRaw->pRowVal;
+ SResultCellData* pCell = pRaw->pRowVal;
+ for (int32_t i = 0; i < index; i++) {
+ pData += (pCell->bytes + sizeof(SResultCellData));
+ pCell = (SResultCellData*)pData;
+ }
+ return pCell;
+}
+
+void* destroyFillColumnInfo(SFillColInfo* pFillCol, int32_t start, int32_t end) {
+ for (int32_t i = start; i < end; i++) {
+ destroyExprInfo(pFillCol[i].pExpr, 1);
+ taosMemoryFreeClear(pFillCol[i].pExpr);
+ taosVariantDestroy(&pFillCol[i].fillVal);
+ }
+ taosMemoryFree(pFillCol);
+ return NULL;
+}
+
+void* destroyStreamFillSupporter(SStreamFillSupporter* pFillSup) {
+ pFillSup->pAllColInfo = destroyFillColumnInfo(pFillSup->pAllColInfo, pFillSup->numOfFillCols, pFillSup->numOfAllCols);
+ tSimpleHashCleanup(pFillSup->pResMap);
+ pFillSup->pResMap = NULL;
+ taosMemoryFree(pFillSup);
+ return NULL;
+}
+
+void* destroyStreamFillLinearInfo(SStreamFillLinearInfo* pFillLinear) {
+ taosArrayDestroy(pFillLinear->pDeltaVal);
+ taosArrayDestroy(pFillLinear->pNextDeltaVal);
+ taosMemoryFree(pFillLinear);
+ return NULL;
+}
+void* destroyStreamFillInfo(SStreamFillInfo* pFillInfo) {
+ if (pFillInfo->type == TSDB_FILL_SET_VALUE || pFillInfo->type == TSDB_FILL_NULL) {
+ taosMemoryFreeClear(pFillInfo->pResRow->pRowVal);
+ taosMemoryFreeClear(pFillInfo->pResRow);
+ }
+ pFillInfo->pLinearInfo = destroyStreamFillLinearInfo(pFillInfo->pLinearInfo);
+ taosMemoryFree(pFillInfo);
+ return NULL;
+}
+
+void destroyStreamFillOperatorInfo(void* param) {
+ SStreamFillOperatorInfo* pInfo = (SStreamFillOperatorInfo*)param;
+ pInfo->pFillInfo = destroyStreamFillInfo(pInfo->pFillInfo);
+ pInfo->pFillSup = destroyStreamFillSupporter(pInfo->pFillSup);
+ pInfo->pRes = blockDataDestroy(pInfo->pRes);
+ pInfo->pSrcBlock = blockDataDestroy(pInfo->pSrcBlock);
+ pInfo->pColMatchColInfo = taosArrayDestroy(pInfo->pColMatchColInfo);
+ taosMemoryFree(pInfo);
+}
+
+static void resetFillWindow(SResultRowData* pRowData) {
+ pRowData->key = INT64_MIN;
+ pRowData->pRowVal = NULL;
+}
+
+void resetPrevAndNextWindow(SStreamFillSupporter* pFillSup, SStreamState* pState) {
+ resetFillWindow(&pFillSup->prev);
+ resetFillWindow(&pFillSup->cur);
+ resetFillWindow(&pFillSup->next);
+ resetFillWindow(&pFillSup->nextNext);
+}
+
+void getCurWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, SStreamFillSupporter* pFillSup) {
+ SStreamState* pState = pOperator->pTaskInfo->streamInfo.pState;
+ resetPrevAndNextWindow(pFillSup, pState);
+
+ SWinKey key = {.ts = ts, .groupId = groupId};
+ void* curVal = NULL;
+ int32_t curVLen = 0;
+ int32_t code = streamStateFillGet(pState, &key, (void**)&curVal, &curVLen);
+ ASSERT(code == TSDB_CODE_SUCCESS);
+ pFillSup->cur.key = key.ts;
+ pFillSup->cur.pRowVal = curVal;
+}
+
+void getWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, SStreamFillSupporter* pFillSup) {
+ SStreamState* pState = pOperator->pTaskInfo->streamInfo.pState;
+ resetPrevAndNextWindow(pFillSup, pState);
+
+ SWinKey key = {.ts = ts, .groupId = groupId};
+ void* curVal = NULL;
+ int32_t curVLen = 0;
+ int32_t code = streamStateFillGet(pState, &key, (void**)&curVal, &curVLen);
+ ASSERT(code == TSDB_CODE_SUCCESS);
+ pFillSup->cur.key = key.ts;
+ pFillSup->cur.pRowVal = curVal;
+
+ SStreamStateCur* pCur = streamStateFillSeekKeyPrev(pState, &key);
+ SWinKey preKey = {.groupId = groupId};
+ void* preVal = NULL;
+ int32_t preVLen = 0;
+ if (pCur) {
+ code = streamStateGetGroupKVByCur(pCur, &preKey, (const void**)&preVal, &preVLen);
+ }
+
+ if (pCur && code == TSDB_CODE_SUCCESS) {
+ pFillSup->prev.key = preKey.ts;
+ pFillSup->prev.pRowVal = preVal;
+
+ code = streamStateCurNext(pState, pCur);
+ ASSERT(code == TSDB_CODE_SUCCESS);
+
+ code = streamStateCurNext(pState, pCur);
+ if (code != TSDB_CODE_SUCCESS) {
+ pCur = NULL;
+ }
+ } else {
+ pCur = streamStateFillSeekKeyNext(pState, &key);
+ }
+
+ if (pCur) {
+ SWinKey nextKey = {.groupId = groupId};
+ void* nextVal = NULL;
+ int32_t nextVLen = 0;
+ code = streamStateGetGroupKVByCur(pCur, &nextKey, (const void**)&nextVal, &nextVLen);
+ if (code == TSDB_CODE_SUCCESS) {
+ pFillSup->next.key = nextKey.ts;
+ pFillSup->next.pRowVal = nextVal;
+ if (pFillSup->type == TSDB_FILL_PREV || pFillSup->type == TSDB_FILL_NEXT) {
+ code = streamStateCurNext(pState, pCur);
+ if (code == TSDB_CODE_SUCCESS) {
+ SWinKey nextNextKey = {.groupId = groupId};
+ void* nextNextVal = NULL;
+ int32_t nextNextVLen = 0;
+ code = streamStateGetGroupKVByCur(pCur, &nextNextKey, (const void**)&nextNextVal, &nextNextVLen);
+ if (code == TSDB_CODE_SUCCESS) {
+ pFillSup->nextNext.key = nextNextKey.ts;
+ pFillSup->nextNext.pRowVal = nextNextVal;
+ }
+ }
+ }
+ }
+ }
+}
+
+static bool hasPrevWindow(SStreamFillSupporter* pFillSup) { return pFillSup->prev.key != INT64_MIN; }
+static bool hasNextWindow(SStreamFillSupporter* pFillSup) { return pFillSup->next.key != INT64_MIN; }
+static bool hasNextNextWindow(SStreamFillSupporter* pFillSup) {
+ return pFillSup->nextNext.key != INT64_MIN;
+ return false;
+}
+
+static void transBlockToResultRow(const SSDataBlock* pBlock, int32_t rowId, TSKEY ts, SResultRowData* pRowVal) {
+ int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
+ for (int32_t i = 0; i < numOfCols; ++i) {
+ SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, i);
+ SResultCellData* pCell = getResultCell(pRowVal, i);
+ if (!colDataIsNull_s(pColData, rowId)) {
+ pCell->isNull = false;
+ pCell->type = pColData->info.type;
+ pCell->bytes = pColData->info.bytes;
+ char* val = colDataGetData(pColData, rowId);
+ if (IS_VAR_DATA_TYPE(pCell->type)) {
+ memcpy(pCell->pData, val, varDataTLen(val));
+ } else {
+ memcpy(pCell->pData, val, pCell->bytes);
+ }
+ } else {
+ pCell->isNull = true;
+ }
+ }
+ pRowVal->key = ts;
+}
+
+static void calcDeltaData(SSDataBlock* pBlock, int32_t rowId, SResultRowData* pRowVal, SArray* pDelta,
+ SFillColInfo* pFillCol, int32_t numOfCol, int32_t winCount, int32_t order) {
+ for (int32_t i = 0; i < numOfCol; i++) {
+ if (!pFillCol[i].notFillCol) {
+ int32_t slotId = GET_DEST_SLOT_ID(pFillCol + i);
+ SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
+ char* var = colDataGetData(pCol, rowId);
+ double start = 0;
+ GET_TYPED_DATA(start, double, pCol->info.type, var);
+ SResultCellData* pCell = getResultCell(pRowVal, slotId);
+ double end = 0;
+ GET_TYPED_DATA(end, double, pCell->type, pCell->pData);
+ double delta = 0;
+ if (order == TSDB_ORDER_ASC) {
+ delta = (end - start) / winCount;
+ } else {
+ delta = (start - end) / winCount;
+ }
+ taosArraySet(pDelta, slotId, &delta);
+ }
+ }
+}
+
+static void calcRowDeltaData(SResultRowData* pStartRow, SResultRowData* pEndRow, SArray* pDelta, SFillColInfo* pFillCol,
+ int32_t numOfCol, int32_t winCount) {
+ for (int32_t i = 0; i < numOfCol; i++) {
+ if (!pFillCol[i].notFillCol) {
+ int32_t slotId = GET_DEST_SLOT_ID(pFillCol + i);
+ SResultCellData* pSCell = getResultCell(pStartRow, slotId);
+ double start = 0.0;
+ GET_TYPED_DATA(start, double, pSCell->type, pSCell->pData);
+ SResultCellData* pECell = getResultCell(pEndRow, slotId);
+ double end = 0.0;
+ GET_TYPED_DATA(end, double, pECell->type, pECell->pData);
+ double delta = (end - start) / winCount;
+ taosArraySet(pDelta, slotId, &delta);
+ }
+ }
+}
+
+static void setFillInfoStart(TSKEY ts, SInterval* pInterval, SStreamFillInfo* pFillInfo) {
+ ts = taosTimeAdd(ts, pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
+ pFillInfo->start = ts;
+}
+
+static void setFillInfoEnd(TSKEY ts, SInterval* pInterval, SStreamFillInfo* pFillInfo) {
+ ts = taosTimeAdd(ts, pInterval->sliding * -1, pInterval->slidingUnit, pInterval->precision);
+ pFillInfo->end = ts;
+}
+
+static void setFillKeyInfo(TSKEY start, TSKEY end, SInterval* pInterval, SStreamFillInfo* pFillInfo) {
+ setFillInfoStart(start, pInterval, pFillInfo);
+ pFillInfo->current = pFillInfo->start;
+ setFillInfoEnd(end, pInterval, pFillInfo);
+}
+
+void setDeleteFillValueInfo(TSKEY start, TSKEY end, SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo) {
+ if (!hasPrevWindow(pFillSup) || !hasNextWindow(pFillSup)) {
+ pFillInfo->needFill = false;
+ return;
+ }
+
+ pFillInfo->needFill = true;
+ pFillInfo->start = start;
+ pFillInfo->current = pFillInfo->start;
+ pFillInfo->end = end;
+ pFillInfo->pos = FILL_POS_INVALID;
+ switch (pFillInfo->type) {
+ case TSDB_FILL_NULL:
+ case TSDB_FILL_SET_VALUE:
+ break;
+ case TSDB_FILL_PREV:
+ pFillInfo->pResRow = &pFillSup->prev;
+ break;
+ case TSDB_FILL_NEXT:
+ pFillInfo->pResRow = &pFillSup->next;
+ break;
+ case TSDB_FILL_LINEAR: {
+ setFillKeyInfo(pFillSup->prev.key, pFillSup->next.key, &pFillSup->interval, pFillInfo);
+ pFillInfo->pLinearInfo->hasNext = false;
+ pFillInfo->pLinearInfo->nextEnd = INT64_MIN;
+ int32_t numOfWins = taosTimeCountInterval(pFillSup->prev.key, pFillSup->next.key, pFillSup->interval.sliding,
+ pFillSup->interval.slidingUnit, pFillSup->interval.precision);
+ calcRowDeltaData(&pFillSup->prev, &pFillSup->next, pFillInfo->pLinearInfo->pDeltaVal, pFillSup->pAllColInfo,
+ pFillSup->numOfAllCols, numOfWins);
+ pFillInfo->pResRow = &pFillSup->prev;
+ pFillInfo->pLinearInfo->winIndex = 0;
+ } break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
+void setFillValueInfo(SSDataBlock* pBlock, TSKEY ts, int32_t rowId, SStreamFillSupporter* pFillSup,
+ SStreamFillInfo* pFillInfo) {
+ pFillInfo->preRowKey = pFillSup->cur.key;
+ if (!hasPrevWindow(pFillSup) && !hasNextWindow(pFillSup)) {
+ pFillInfo->needFill = false;
+ pFillInfo->pos = FILL_POS_START;
+ return;
+ }
+ TSKEY prevWKey = INT64_MIN;
+ TSKEY nextWKey = INT64_MIN;
+ if (hasPrevWindow(pFillSup)) {
+ prevWKey = pFillSup->prev.key;
+ }
+ if (hasNextWindow(pFillSup)) {
+ nextWKey = pFillSup->next.key;
+ }
+
+ pFillInfo->needFill = true;
+ pFillInfo->pos = FILL_POS_INVALID;
+ switch (pFillInfo->type) {
+ case TSDB_FILL_NULL:
+ case TSDB_FILL_SET_VALUE: {
+ if (pFillSup->prev.key == pFillInfo->preRowKey) {
+ resetFillWindow(&pFillSup->prev);
+ }
+ if (hasPrevWindow(pFillSup) && hasNextWindow(pFillSup)) {
+ if (pFillSup->next.key == pFillInfo->nextRowKey) {
+ pFillInfo->preRowKey = INT64_MIN;
+ setFillKeyInfo(prevWKey, ts, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_END;
+ } else {
+ pFillInfo->needFill = false;
+ pFillInfo->pos = FILL_POS_START;
+ }
+ } else if (hasPrevWindow(pFillSup)) {
+ setFillKeyInfo(prevWKey, ts, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_END;
+ } else {
+ setFillKeyInfo(ts, nextWKey, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_START;
+ }
+ } break;
+ case TSDB_FILL_PREV: {
+ if (hasNextWindow(pFillSup) && ((pFillSup->next.key != pFillInfo->nextRowKey) ||
+ (pFillSup->next.key == pFillInfo->nextRowKey && hasNextNextWindow(pFillSup)) ||
+ (pFillSup->next.key == pFillInfo->nextRowKey && !hasPrevWindow(pFillSup)))) {
+ setFillKeyInfo(ts, nextWKey, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_START;
+ pFillSup->prev.key = pFillSup->cur.key;
+ pFillSup->prev.pRowVal = pFillSup->cur.pRowVal;
+ } else if (hasPrevWindow(pFillSup)) {
+ setFillKeyInfo(prevWKey, ts, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_END;
+ pFillInfo->preRowKey = INT64_MIN;
+ }
+ pFillInfo->pResRow = &pFillSup->prev;
+ } break;
+ case TSDB_FILL_NEXT: {
+ if (hasPrevWindow(pFillSup)) {
+ setFillKeyInfo(prevWKey, ts, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_END;
+ pFillSup->next.key = pFillSup->cur.key;
+ pFillSup->next.pRowVal = pFillSup->cur.pRowVal;
+ pFillInfo->preRowKey = INT64_MIN;
+ } else {
+ ASSERT(hasNextWindow(pFillSup));
+ setFillKeyInfo(ts, nextWKey, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_START;
+ }
+ pFillInfo->pResRow = &pFillSup->next;
+ } break;
+ case TSDB_FILL_LINEAR: {
+ pFillInfo->pLinearInfo->winIndex = 0;
+ if (hasPrevWindow(pFillSup) && hasNextWindow(pFillSup)) {
+ setFillKeyInfo(prevWKey, ts, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_MID;
+ pFillInfo->pLinearInfo->nextEnd = nextWKey;
+ int32_t numOfWins = taosTimeCountInterval(prevWKey, ts, pFillSup->interval.sliding,
+ pFillSup->interval.slidingUnit, pFillSup->interval.precision);
+ calcRowDeltaData(&pFillSup->prev, &pFillSup->cur, pFillInfo->pLinearInfo->pDeltaVal, pFillSup->pAllColInfo,
+ pFillSup->numOfAllCols, numOfWins);
+ pFillInfo->pResRow = &pFillSup->prev;
+
+ numOfWins = taosTimeCountInterval(ts, nextWKey, pFillSup->interval.sliding, pFillSup->interval.slidingUnit,
+ pFillSup->interval.precision);
+ calcRowDeltaData(&pFillSup->cur, &pFillSup->next, pFillInfo->pLinearInfo->pNextDeltaVal, pFillSup->pAllColInfo,
+ pFillSup->numOfAllCols, numOfWins);
+ pFillInfo->pLinearInfo->hasNext = true;
+ } else if (hasPrevWindow(pFillSup)) {
+ setFillKeyInfo(prevWKey, ts, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_END;
+ pFillInfo->pLinearInfo->nextEnd = INT64_MIN;
+ int32_t numOfWins = taosTimeCountInterval(prevWKey, ts, pFillSup->interval.sliding,
+ pFillSup->interval.slidingUnit, pFillSup->interval.precision);
+ calcRowDeltaData(&pFillSup->prev, &pFillSup->cur, pFillInfo->pLinearInfo->pDeltaVal, pFillSup->pAllColInfo,
+ pFillSup->numOfAllCols, numOfWins);
+ pFillInfo->pResRow = &pFillSup->prev;
+ pFillInfo->pLinearInfo->hasNext = false;
+ } else {
+ ASSERT(hasNextWindow(pFillSup));
+ setFillKeyInfo(ts, nextWKey, &pFillSup->interval, pFillInfo);
+ pFillInfo->pos = FILL_POS_START;
+ pFillInfo->pLinearInfo->nextEnd = INT64_MIN;
+ int32_t numOfWins = taosTimeCountInterval(ts, nextWKey, pFillSup->interval.sliding,
+ pFillSup->interval.slidingUnit, pFillSup->interval.precision);
+ calcRowDeltaData(&pFillSup->cur, &pFillSup->next, pFillInfo->pLinearInfo->pDeltaVal, pFillSup->pAllColInfo,
+ pFillSup->numOfAllCols, numOfWins);
+ pFillInfo->pResRow = &pFillSup->cur;
+ pFillInfo->pLinearInfo->hasNext = false;
+ }
+ } break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ ASSERT(pFillInfo->pos != FILL_POS_INVALID);
+}
+
+static bool checkResult(SStreamFillSupporter* pFillSup, TSKEY ts, uint64_t groupId) {
+ SWinKey key = {.groupId = groupId, .ts = ts};
+ if (tSimpleHashGet(pFillSup->pResMap, &key, sizeof(SWinKey)) != NULL) {
+ return false;
+ }
+ tSimpleHashPut(pFillSup->pResMap, &key, sizeof(SWinKey), NULL, 0);
+ return true;
+}
+
+static void buildFillResult(SResultRowData* pResRow, SStreamFillSupporter* pFillSup, TSKEY ts, SSDataBlock* pBlock) {
+ uint64_t groupId = pBlock->info.groupId;
+ if (pFillSup->hasDelete && !checkResult(pFillSup, ts, groupId)) {
+ return;
+ }
+ for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) {
+ SFillColInfo* pFillCol = pFillSup->pAllColInfo + i;
+ int32_t slotId = GET_DEST_SLOT_ID(pFillCol);
+ SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, slotId);
+ SFillInfo tmpInfo = {
+ .currentKey = ts,
+ .order = TSDB_ORDER_ASC,
+ .interval = pFillSup->interval,
+ };
+ bool filled = fillIfWindowPseudoColumn(&tmpInfo, pFillCol, pColData, pBlock->info.rows);
+ if (!filled) {
+ SResultCellData* pCell = getResultCell(pResRow, slotId);
+ setRowCell(pColData, pBlock->info.rows, pCell);
+ }
+ }
+ pBlock->info.rows++;
+}
+
+static bool hasRemainCalc(SStreamFillInfo* pFillInfo) {
+ if (pFillInfo->current != INT64_MIN && pFillInfo->current <= pFillInfo->end) {
+ return true;
+ }
+ return false;
+}
+
+static void doStreamFillNormal(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo, SSDataBlock* pBlock) {
+ while (hasRemainCalc(pFillInfo) && pBlock->info.rows < pBlock->info.capacity) {
+ buildFillResult(pFillInfo->pResRow, pFillSup, pFillInfo->current, pBlock);
+ pFillInfo->current = taosTimeAdd(pFillInfo->current, pFillSup->interval.sliding, pFillSup->interval.slidingUnit,
+ pFillSup->interval.precision);
+ }
+}
+
+static void doStreamFillLinear(SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo, SSDataBlock* pBlock) {
+ while (hasRemainCalc(pFillInfo) && pBlock->info.rows < pBlock->info.capacity) {
+ uint64_t groupId = pBlock->info.groupId;
+ SWinKey key = {.groupId = groupId, .ts = pFillInfo->current};
+ if (pFillSup->hasDelete && !checkResult(pFillSup, pFillInfo->current, groupId)) {
+ pFillInfo->current = taosTimeAdd(pFillInfo->current, pFillSup->interval.sliding, pFillSup->interval.slidingUnit,
+ pFillSup->interval.precision);
+ pFillInfo->pLinearInfo->winIndex++;
+ continue;
+ }
+ pFillInfo->pLinearInfo->winIndex++;
+ for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) {
+ SFillColInfo* pFillCol = pFillSup->pAllColInfo + i;
+ SFillInfo tmp = {
+ .currentKey = pFillInfo->current,
+ .order = TSDB_ORDER_ASC,
+ .interval = pFillSup->interval,
+ };
+
+ int32_t slotId = GET_DEST_SLOT_ID(pFillCol);
+ SColumnInfoData* pColData = taosArrayGet(pBlock->pDataBlock, slotId);
+ int16_t type = pColData->info.type;
+ SResultCellData* pCell = getResultCell(pFillInfo->pResRow, slotId);
+ int32_t index = pBlock->info.rows;
+ if (pFillCol->notFillCol) {
+ bool filled = fillIfWindowPseudoColumn(&tmp, pFillCol, pColData, index);
+ if (!filled) {
+ setRowCell(pColData, index, pCell);
+ }
+ } else {
+ if (IS_VAR_DATA_TYPE(type) || type == TSDB_DATA_TYPE_BOOL || pCell->isNull) {
+ colDataAppendNULL(pColData, index);
+ continue;
+ }
+ double* pDelta = taosArrayGet(pFillInfo->pLinearInfo->pDeltaVal, slotId);
+ double vCell = 0;
+ GET_TYPED_DATA(vCell, double, pCell->type, pCell->pData);
+ vCell += (*pDelta) * pFillInfo->pLinearInfo->winIndex;
+ int64_t result = 0;
+ SET_TYPED_DATA(&result, pCell->type, vCell);
+ colDataAppend(pColData, index, (const char*)&result, false);
+ }
+ }
+ pFillInfo->current = taosTimeAdd(pFillInfo->current, pFillSup->interval.sliding, pFillSup->interval.slidingUnit,
+ pFillSup->interval.precision);
+ pBlock->info.rows++;
+ }
+}
+
+static void keepResultInDiscBuf(SOperatorInfo* pOperator, uint64_t groupId, SResultRowData* pRow, int32_t len) {
+ SWinKey key = {.groupId = groupId, .ts = pRow->key};
+ int32_t code = streamStateFillPut(pOperator->pTaskInfo->streamInfo.pState, &key, pRow->pRowVal, len);
+ ASSERT(code == TSDB_CODE_SUCCESS);
+}
+
+static void doStreamFillRange(SStreamFillInfo* pFillInfo, SStreamFillSupporter* pFillSup, SSDataBlock* pRes) {
+ if (pFillInfo->needFill == false) {
+ buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes);
+ return;
+ }
+
+ if (pFillInfo->pos == FILL_POS_START) {
+ buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes);
+ }
+ if (pFillInfo->type != TSDB_FILL_LINEAR) {
+ doStreamFillNormal(pFillSup, pFillInfo, pRes);
+ } else {
+ doStreamFillLinear(pFillSup, pFillInfo, pRes);
+
+ if (pFillInfo->pos == FILL_POS_MID) {
+ buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes);
+ }
+
+ if (pFillInfo->current > pFillInfo->end && pFillInfo->pLinearInfo->hasNext) {
+ pFillInfo->pLinearInfo->hasNext = false;
+ pFillInfo->pLinearInfo->winIndex = 0;
+ taosArrayClear(pFillInfo->pLinearInfo->pDeltaVal);
+ taosArrayAddAll(pFillInfo->pLinearInfo->pDeltaVal, pFillInfo->pLinearInfo->pNextDeltaVal);
+ pFillInfo->pResRow = &pFillSup->cur;
+ setFillKeyInfo(pFillSup->cur.key, pFillInfo->pLinearInfo->nextEnd, &pFillSup->interval, pFillInfo);
+ doStreamFillLinear(pFillSup, pFillInfo, pRes);
+ }
+ }
+ if (pFillInfo->pos == FILL_POS_END) {
+ buildFillResult(&pFillSup->cur, pFillSup, pFillSup->cur.key, pRes);
+ }
+}
+
+void keepBlockRowInDiscBuf(SOperatorInfo* pOperator, SStreamFillInfo* pFillInfo, SSDataBlock* pBlock, TSKEY* tsCol,
+ int32_t rowId, uint64_t groupId, int32_t rowSize) {
+ TSKEY ts = tsCol[rowId];
+ pFillInfo->nextRowKey = ts;
+ SResultRowData tmpNextRow = {.key = ts};
+ tmpNextRow.pRowVal = taosMemoryCalloc(1, rowSize);
+ transBlockToResultRow(pBlock, rowId, ts, &tmpNextRow);
+ keepResultInDiscBuf(pOperator, groupId, &tmpNextRow, rowSize);
+ taosMemoryFreeClear(tmpNextRow.pRowVal);
+}
+
+static void doFillResults(SOperatorInfo* pOperator, SStreamFillSupporter* pFillSup, SStreamFillInfo* pFillInfo,
+ SSDataBlock* pBlock, TSKEY* tsCol, int32_t rowId, SSDataBlock* pRes) {
+ uint64_t groupId = pBlock->info.groupId;
+ getWindowFromDiscBuf(pOperator, tsCol[rowId], groupId, pFillSup);
+ if (pFillSup->prev.key == pFillInfo->preRowKey) {
+ resetFillWindow(&pFillSup->prev);
+ }
+ setFillValueInfo(pBlock, tsCol[rowId], rowId, pFillSup, pFillInfo);
+ doStreamFillRange(pFillInfo, pFillSup, pRes);
+}
+
+static void doStreamFillImpl(SOperatorInfo* pOperator) {
+ SStreamFillOperatorInfo* pInfo = pOperator->info;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStreamFillSupporter* pFillSup = pInfo->pFillSup;
+ SStreamFillInfo* pFillInfo = pInfo->pFillInfo;
+ SSDataBlock* pBlock = pInfo->pSrcBlock;
+ uint64_t groupId = pBlock->info.groupId;
+ SSDataBlock* pRes = pInfo->pRes;
+ pRes->info.groupId = groupId;
+ if (hasRemainCalc(pFillInfo)) {
+ doStreamFillRange(pFillInfo, pFillSup, pRes);
+ }
+
+ SColumnInfoData* pTsCol = taosArrayGet(pInfo->pSrcBlock->pDataBlock, pInfo->primaryTsCol);
+ TSKEY* tsCol = (TSKEY*)pTsCol->pData;
+
+ if (pInfo->srcRowIndex == 0) {
+ keepBlockRowInDiscBuf(pOperator, pFillInfo, pBlock, tsCol, pInfo->srcRowIndex, groupId, pFillSup->rowSize);
+ SSDataBlock* preBlock = pInfo->pPrevSrcBlock;
+ if (preBlock->info.rows > 0) {
+ int preRowId = preBlock->info.rows - 1;
+ SColumnInfoData* pPreTsCol = taosArrayGet(preBlock->pDataBlock, pInfo->primaryTsCol);
+ doFillResults(pOperator, pFillSup, pFillInfo, preBlock, (TSKEY*)pPreTsCol->pData, preRowId, pRes);
+ }
+ pInfo->srcRowIndex++;
+ }
+
+ while (pInfo->srcRowIndex < pBlock->info.rows) {
+ TSKEY ts = tsCol[pInfo->srcRowIndex];
+ keepBlockRowInDiscBuf(pOperator, pFillInfo, pBlock, tsCol, pInfo->srcRowIndex, groupId, pFillSup->rowSize);
+ doFillResults(pOperator, pFillSup, pFillInfo, pBlock, tsCol, pInfo->srcRowIndex - 1, pRes);
+ if (pInfo->pRes->info.rows == pInfo->pRes->info.capacity) {
+ blockDataUpdateTsWindow(pRes, pInfo->primaryTsCol);
+ return;
+ }
+ pInfo->srcRowIndex++;
+ }
+ blockDataUpdateTsWindow(pRes, pInfo->primaryTsCol);
+ blockDataCleanup(pInfo->pPrevSrcBlock);
+ copyDataBlock(pInfo->pPrevSrcBlock, pInfo->pSrcBlock);
+ blockDataCleanup(pInfo->pSrcBlock);
+}
+
+static void buildDeleteRange(TSKEY start, TSKEY end, uint64_t groupId, SSDataBlock* delRes) {
+ SSDataBlock* pBlock = delRes;
+ SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ SColumnInfoData* pCalStartCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
+ SColumnInfoData* pCalEndCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
+ colDataAppend(pStartCol, pBlock->info.rows, (const char*)&start, false);
+ colDataAppend(pEndCol, pBlock->info.rows, (const char*)&end, false);
+ colDataAppendNULL(pUidCol, pBlock->info.rows);
+ colDataAppend(pGroupCol, pBlock->info.rows, (const char*)&groupId, false);
+ colDataAppendNULL(pCalStartCol, pBlock->info.rows);
+ colDataAppendNULL(pCalEndCol, pBlock->info.rows);
+ pBlock->info.rows++;
+}
+
+static void buildDeleteResult(SStreamFillSupporter* pFillSup, TSKEY startTs, TSKEY endTs, uint64_t groupId,
+ SSDataBlock* delRes) {
+ if (hasPrevWindow(pFillSup)) {
+ TSKEY start = getNextWindowTs(pFillSup->prev.key, &pFillSup->interval);
+ buildDeleteRange(start, endTs, groupId, delRes);
+ } else if (hasNextWindow(pFillSup)) {
+ TSKEY end = getPrevWindowTs(pFillSup->next.key, &pFillSup->interval);
+ buildDeleteRange(startTs, end, groupId, delRes);
+ } else {
+ buildDeleteRange(startTs, endTs, groupId, delRes);
+ }
+}
+
+static void doDeleteFillResultImpl(SOperatorInfo* pOperator, TSKEY startTs, TSKEY endTs, uint64_t groupId) {
+ SStreamFillOperatorInfo* pInfo = pOperator->info;
+ getWindowFromDiscBuf(pOperator, startTs, groupId, pInfo->pFillSup);
+ setDeleteFillValueInfo(startTs, endTs, pInfo->pFillSup, pInfo->pFillInfo);
+ SWinKey key = {.ts = startTs, .groupId = groupId};
+ if (!pInfo->pFillInfo->needFill) {
+ streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &key);
+ buildDeleteResult(pInfo->pFillSup, startTs, endTs, groupId, pInfo->pDelRes);
+ } else {
+ STimeRange tw = {
+ .skey = startTs,
+ .ekey = endTs,
+ .groupId = groupId,
+ };
+ taosArrayPush(pInfo->pFillInfo->delRanges, &tw);
+ while (key.ts <= endTs) {
+ key.ts = taosTimeAdd(key.ts, pInfo->pFillSup->interval.sliding, pInfo->pFillSup->interval.slidingUnit,
+ pInfo->pFillSup->interval.precision);
+ tSimpleHashPut(pInfo->pFillSup->pResMap, &key, sizeof(SWinKey), NULL, 0);
+ }
+ }
+}
+
+static void doDeleteFillFinalize(SOperatorInfo* pOperator) {
+ SStreamFillOperatorInfo* pInfo = pOperator->info;
+ SStreamFillInfo* pFillInfo = pInfo->pFillInfo;
+ int32_t size = taosArrayGetSize(pFillInfo->delRanges);
+ tSimpleHashClear(pInfo->pFillSup->pResMap);
+ for (; pFillInfo->delIndex < size; pFillInfo->delIndex++) {
+ STimeRange* range = taosArrayGet(pFillInfo->delRanges, pFillInfo->delIndex);
+ if (pInfo->pRes->info.groupId != 0 && pInfo->pRes->info.groupId != range->groupId) {
+ return;
+ }
+ getWindowFromDiscBuf(pOperator, range->skey, range->groupId, pInfo->pFillSup);
+ setDeleteFillValueInfo(range->skey, range->ekey, pInfo->pFillSup, pInfo->pFillInfo);
+ if (pInfo->pFillInfo->needFill) {
+ doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes);
+ pInfo->pRes->info.groupId = range->groupId;
+ }
+ SWinKey key = {.ts = range->skey, .groupId = range->groupId};
+ streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &key);
+ }
+}
+
+static void doDeleteFillResult(SOperatorInfo* pOperator) {
+ SStreamFillOperatorInfo* pInfo = pOperator->info;
+ SStreamFillSupporter* pFillSup = pInfo->pFillSup;
+ SStreamFillInfo* pFillInfo = pInfo->pFillInfo;
+ SSDataBlock* pBlock = pInfo->pSrcDelBlock;
+ SSDataBlock* pRes = pInfo->pRes;
+ SSDataBlock* pDelRes = pInfo->pDelRes;
+
+ SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ TSKEY* tsStarts = (TSKEY*)pStartCol->pData;
+ SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* groupIds = (uint64_t*)pGroupCol->pData;
+ while (pInfo->srcDelRowIndex < pBlock->info.rows) {
+ TSKEY ts = tsStarts[pInfo->srcDelRowIndex];
+ TSKEY endTs = ts;
+ uint64_t groupId = groupIds[pInfo->srcDelRowIndex];
+ SWinKey key = {.ts = ts, .groupId = groupId};
+ SStreamStateCur* pCur = streamStateGetAndCheckCur(pOperator->pTaskInfo->streamInfo.pState, &key);
+ if (!pCur) {
+ pInfo->srcDelRowIndex++;
+ continue;
+ }
+
+ SWinKey nextKey = {.groupId = groupId, .ts = ts};
+ while (pInfo->srcDelRowIndex < pBlock->info.rows) {
+ void* nextVal = NULL;
+ int32_t nextLen = 0;
+ TSKEY delTs = tsStarts[pInfo->srcDelRowIndex];
+ uint64_t delGroupId = groupIds[pInfo->srcDelRowIndex];
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (groupId != delGroupId) {
+ break;
+ }
+ if (delTs > nextKey.ts) {
+ break;
+ }
+ endTs = delTs;
+ SWinKey delKey = {.groupId = delGroupId, .ts = delTs};
+ if (delTs == nextKey.ts) {
+ code = streamStateCurNext(pOperator->pTaskInfo->streamInfo.pState, pCur);
+ if (code == TSDB_CODE_SUCCESS) {
+ code = streamStateGetGroupKVByCur(pCur, &nextKey, (const void**)&nextVal, &nextLen);
+ }
+ if (delTs != ts) {
+ streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &delKey);
+ }
+ if (code != TSDB_CODE_SUCCESS) {
+ break;
+ }
+ }
+ pInfo->srcDelRowIndex++;
+ }
+ doDeleteFillResultImpl(pOperator, ts, endTs, groupId);
+ }
+ pFillInfo->current = pFillInfo->end + 1;
+}
+
+static void resetStreamFillInfo(SStreamFillOperatorInfo* pInfo) {
+ blockDataCleanup(pInfo->pPrevSrcBlock);
+ tSimpleHashClear(pInfo->pFillSup->pResMap);
+ pInfo->pFillSup->hasDelete = false;
+ taosArrayClear(pInfo->pFillInfo->delRanges);
+ pInfo->pFillInfo->delIndex = 0;
+}
+
+static void doApplyStreamScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pSrcBlock, SSDataBlock* pDstBlock) {
+ SStreamFillOperatorInfo* pInfo = pOperator->info;
+ SExprSupp* pSup = &pOperator->exprSupp;
+
+ blockDataCleanup(pDstBlock);
+ blockDataEnsureCapacity(pDstBlock, pSrcBlock->info.rows);
+ setInputDataBlock(pOperator, pSup->pCtx, pSrcBlock, TSDB_ORDER_ASC, MAIN_SCAN, false);
+ projectApplyFunctions(pSup->pExprInfo, pDstBlock, pSrcBlock, pSup->pCtx, pSup->numOfExprs, NULL);
+ pDstBlock->info.groupId = pSrcBlock->info.groupId;
+
+ SColumnInfoData* pDst = taosArrayGet(pDstBlock->pDataBlock, pInfo->primaryTsCol);
+ SColumnInfoData* pSrc = taosArrayGet(pSrcBlock->pDataBlock, pInfo->primarySrcSlotId);
+ colDataAssign(pDst, pSrc, pDstBlock->info.rows, &pDstBlock->info);
+
+ int32_t numOfNotFill = pInfo->pFillSup->numOfAllCols - pInfo->pFillSup->numOfFillCols;
+ for (int32_t i = 0; i < numOfNotFill; ++i) {
+ SFillColInfo* pCol = &pInfo->pFillSup->pAllColInfo[i + pInfo->pFillSup->numOfFillCols];
+ ASSERT(pCol->notFillCol);
+
+ SExprInfo* pExpr = pCol->pExpr;
+ int32_t srcSlotId = pExpr->base.pParam[0].pCol->slotId;
+ int32_t dstSlotId = pExpr->base.resSchema.slotId;
+
+ SColumnInfoData* pDst1 = taosArrayGet(pDstBlock->pDataBlock, dstSlotId);
+ SColumnInfoData* pSrc1 = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
+ colDataAssign(pDst1, pSrc1, pDstBlock->info.rows, &pDstBlock->info);
+ }
+ blockDataUpdateTsWindow(pDstBlock, pInfo->primaryTsCol);
+}
+
+static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) {
+ SStreamFillOperatorInfo* pInfo = pOperator->info;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+ blockDataCleanup(pInfo->pRes);
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ if (hasRemainCalc(pInfo->pFillInfo)) {
+ doStreamFillRange(pInfo->pFillInfo, pInfo->pFillSup, pInfo->pRes);
+ if (pInfo->pRes->info.rows > 0) {
+ return pInfo->pRes;
+ }
+ }
+ doDeleteFillFinalize(pOperator);
+ if (pInfo->pRes->info.rows > 0) {
+ printDataBlock(pInfo->pRes, "stream fill");
+ return pInfo->pRes;
+ }
+ doSetOperatorCompleted(pOperator);
+ resetStreamFillInfo(pInfo);
+ return NULL;
+ }
+
+ SSDataBlock* fillResult = NULL;
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+ while (1) {
+ if (pInfo->srcRowIndex >= pInfo->pSrcBlock->info.rows) {
+ // If there are delete datablocks, we receive them first.
+ SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
+ if (pBlock == NULL) {
+ pOperator->status = OP_RES_TO_RETURN;
+ SSDataBlock* preBlock = pInfo->pPrevSrcBlock;
+ if (preBlock->info.rows > 0) {
+ int preRowId = preBlock->info.rows - 1;
+ SColumnInfoData* pPreTsCol = taosArrayGet(preBlock->pDataBlock, pInfo->primaryTsCol);
+ doFillResults(pOperator, pInfo->pFillSup, pInfo->pFillInfo, preBlock, (TSKEY*)pPreTsCol->pData, preRowId,
+ pInfo->pRes);
+ }
+ pInfo->pFillInfo->preRowKey = INT64_MIN;
+ if (pInfo->pRes->info.rows > 0) {
+ printDataBlock(pInfo->pRes, "stream fill");
+ return pInfo->pRes;
+ }
+ break;
+ }
+ printDataBlock(pBlock, "stream fill recv");
+
+ switch (pBlock->info.type) {
+ case STREAM_RETRIEVE:
+ return pBlock;
+ case STREAM_DELETE_RESULT: {
+ pInfo->pSrcDelBlock = pBlock;
+ pInfo->srcDelRowIndex = 0;
+ blockDataCleanup(pInfo->pDelRes);
+ pInfo->pFillSup->hasDelete = true;
+ doDeleteFillResult(pOperator);
+ if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "stream fill delete");
+ return pInfo->pDelRes;
+ }
+ continue;
+ } break;
+ case STREAM_NORMAL:
+ case STREAM_INVALID: {
+ doApplyStreamScalarCalculation(pOperator, pBlock, pInfo->pSrcBlock);
+ pInfo->srcRowIndex = 0;
+ } break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ doStreamFillImpl(pOperator);
+ doFilter(pInfo->pCondition, pInfo->pRes, pInfo->pColMatchColInfo);
+ pOperator->resultInfo.totalRows += pInfo->pRes->info.rows;
+ if (pInfo->pRes->info.rows > 0) {
+ break;
+ }
+ }
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ doDeleteFillFinalize(pOperator);
+ }
+
+ if (pInfo->pRes->info.rows == 0) {
+ doSetOperatorCompleted(pOperator);
+ resetStreamFillInfo(pInfo);
+ return NULL;
+ }
+
+ pOperator->resultInfo.totalRows += pInfo->pRes->info.rows;
+ printDataBlock(pInfo->pRes, "stream fill");
+ return pInfo->pRes;
+}
+
+static int32_t initResultBuf(SStreamFillSupporter* pFillSup) {
+ pFillSup->rowSize = sizeof(SResultCellData) * pFillSup->numOfAllCols;
+ for (int i = 0; i < pFillSup->numOfAllCols; i++) {
+ SFillColInfo* pCol = &pFillSup->pAllColInfo[i];
+ SResSchema* pSchema = &pCol->pExpr->base.resSchema;
+ pFillSup->rowSize += pSchema->bytes;
+ }
+ pFillSup->next.key = INT64_MIN;
+ pFillSup->nextNext.key = INT64_MIN;
+ pFillSup->prev.key = INT64_MIN;
+ pFillSup->next.pRowVal = NULL;
+ pFillSup->nextNext.pRowVal = NULL;
+ pFillSup->prev.pRowVal = NULL;
+ return TSDB_CODE_SUCCESS;
+}
+
+static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNode, SInterval* pInterval,
+ SExprInfo* pFillExprInfo, int32_t numOfFillCols) {
+ SStreamFillSupporter* pFillSup = taosMemoryCalloc(1, sizeof(SStreamFillSupporter));
+ if (!pFillSup) {
+ return NULL;
+ }
+ pFillSup->numOfFillCols = numOfFillCols;
+ int32_t numOfNotFillCols = 0;
+ SExprInfo* pNotFillExprInfo = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &numOfNotFillCols);
+ pFillSup->pAllColInfo = createFillColInfo(pFillExprInfo, pFillSup->numOfFillCols, pNotFillExprInfo, numOfNotFillCols,
+ (const SNodeListNode*)(pPhyFillNode->pValues));
+ pFillSup->type = convertFillType(pPhyFillNode->mode);
+ pFillSup->numOfAllCols = pFillSup->numOfFillCols + numOfNotFillCols;
+ pFillSup->interval = *pInterval;
+
+ int32_t code = initResultBuf(pFillSup);
+ if (code != TSDB_CODE_SUCCESS) {
+ destroyStreamFillSupporter(pFillSup);
+ return NULL;
+ }
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pFillSup->pResMap = tSimpleHashInit(16, hashFn);
+ pFillSup->hasDelete = false;
+ return pFillSup;
+}
+
+SStreamFillInfo* initStreamFillInfo(SStreamFillSupporter* pFillSup, SSDataBlock* pRes) {
+ SStreamFillInfo* pFillInfo = taosMemoryCalloc(1, sizeof(SStreamFillInfo));
+ pFillInfo->start = INT64_MIN;
+ pFillInfo->current = INT64_MIN;
+ pFillInfo->end = INT64_MIN;
+ pFillInfo->preRowKey = INT64_MIN;
+ pFillInfo->needFill = false;
+ pFillInfo->pLinearInfo = taosMemoryCalloc(1, sizeof(SStreamFillLinearInfo));
+ pFillInfo->pLinearInfo->hasNext = false;
+ pFillInfo->pLinearInfo->nextEnd = INT64_MIN;
+ pFillInfo->pLinearInfo->pDeltaVal = NULL;
+ pFillInfo->pLinearInfo->pNextDeltaVal = NULL;
+ if (pFillSup->type == TSDB_FILL_LINEAR) {
+ pFillInfo->pLinearInfo->pDeltaVal = taosArrayInit(pFillSup->numOfAllCols, sizeof(double));
+ pFillInfo->pLinearInfo->pNextDeltaVal = taosArrayInit(pFillSup->numOfAllCols, sizeof(double));
+ for (int32_t i = 0; i < pFillSup->numOfAllCols; i++) {
+ double value = 0.0;
+ taosArrayPush(pFillInfo->pLinearInfo->pDeltaVal, &value);
+ taosArrayPush(pFillInfo->pLinearInfo->pNextDeltaVal, &value);
+ }
+ }
+ pFillInfo->pLinearInfo->winIndex = 0;
+
+ pFillInfo->pResRow = NULL;
+ if (pFillSup->type == TSDB_FILL_SET_VALUE || pFillSup->type == TSDB_FILL_NULL) {
+ pFillInfo->pResRow = taosMemoryCalloc(1, sizeof(SResultRowData));
+ pFillInfo->pResRow->key = INT64_MIN;
+ pFillInfo->pResRow->pRowVal = taosMemoryCalloc(1, pFillSup->rowSize);
+ for (int32_t i = 0; i < pFillSup->numOfAllCols; ++i) {
+ SColumnInfoData* pColData = taosArrayGet(pRes->pDataBlock, i);
+ SResultCellData* pCell = getResultCell(pFillInfo->pResRow, i);
+ pCell->bytes = pColData->info.bytes;
+ pCell->type = pColData->info.type;
+ }
+ }
+
+ pFillInfo->type = pFillSup->type;
+ pFillInfo->delRanges = taosArrayInit(16, sizeof(STimeRange));
+ pFillInfo->delIndex = 0;
+ return pFillInfo;
+}
+
+SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFillPhysiNode* pPhyFillNode,
+ SExecTaskInfo* pTaskInfo) {
+ SStreamFillOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamFillOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ goto _error;
+ }
+
+ SInterval* pInterval = QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL == downstream->operatorType
+ ? &((SStreamFinalIntervalOperatorInfo*)downstream->info)->interval
+ : &((SStreamIntervalOperatorInfo*)downstream->info)->interval;
+ int32_t numOfFillCols = 0;
+ SExprInfo* pFillExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &numOfFillCols);
+ pInfo->pFillSup = initStreamFillSup(pPhyFillNode, pInterval, pFillExprInfo, numOfFillCols);
+ if (!pInfo->pFillSup) {
+ goto _error;
+ }
+
+ SResultInfo* pResultInfo = &pOperator->resultInfo;
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
+ pInfo->pRes = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc);
+ pInfo->pSrcBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc);
+ pInfo->pPrevSrcBlock = createResDataBlock(pPhyFillNode->node.pOutputDataBlockDesc);
+ blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
+ blockDataEnsureCapacity(pInfo->pSrcBlock, pOperator->resultInfo.capacity);
+ blockDataEnsureCapacity(pInfo->pPrevSrcBlock, pOperator->resultInfo.capacity);
+
+ pInfo->pFillInfo = initStreamFillInfo(pInfo->pFillSup, pInfo->pRes);
+ if (!pInfo->pFillInfo) {
+ goto _error;
+ }
+
+ if (pInfo->pFillInfo->type == TSDB_FILL_SET_VALUE) {
+ for (int32_t i = 0; i < pInfo->pFillSup->numOfAllCols; ++i) {
+ SFillColInfo* pFillCol = pInfo->pFillSup->pAllColInfo + i;
+ int32_t slotId = GET_DEST_SLOT_ID(pFillCol);
+ SResultCellData* pCell = getResultCell(pInfo->pFillInfo->pResRow, slotId);
+ SVariant* pVar = &(pFillCol->fillVal);
+ if (pCell->type == TSDB_DATA_TYPE_FLOAT) {
+ float v = 0;
+ GET_TYPED_DATA(v, float, pVar->nType, &pVar->i);
+ SET_TYPED_DATA(pCell->pData, pCell->type, v);
+ } else if (pCell->type == TSDB_DATA_TYPE_DOUBLE) {
+ double v = 0;
+ GET_TYPED_DATA(v, double, pVar->nType, &pVar->i);
+ SET_TYPED_DATA(pCell->pData, pCell->type, v);
+ } else if (IS_SIGNED_NUMERIC_TYPE(pCell->type)) {
+ int64_t v = 0;
+ GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i);
+ SET_TYPED_DATA(pCell->pData, pCell->type, v);
+ } else {
+ pCell->isNull = true;
+ }
+ }
+ } else if (pInfo->pFillInfo->type == TSDB_FILL_NULL) {
+ for (int32_t i = 0; i < pInfo->pFillSup->numOfAllCols; ++i) {
+ SFillColInfo* pFillCol = pInfo->pFillSup->pAllColInfo + i;
+ int32_t slotId = GET_DEST_SLOT_ID(pFillCol);
+ SResultCellData* pCell = getResultCell(pInfo->pFillInfo->pResRow, slotId);
+ pCell->isNull = true;
+ }
+ }
+
+ pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
+ blockDataEnsureCapacity(pInfo->pDelRes, pOperator->resultInfo.capacity);
+
+ pInfo->primaryTsCol = ((STargetNode*)pPhyFillNode->pWStartTs)->slotId;
+ pInfo->primarySrcSlotId = ((SColumnNode*)((STargetNode*)pPhyFillNode->pWStartTs)->pExpr)->slotId;
+
+ int32_t numOfOutputCols = 0;
+ SArray* pColMatchColInfo = extractColMatchInfo(pPhyFillNode->pFillExprs, pPhyFillNode->node.pOutputDataBlockDesc,
+ &numOfOutputCols, COL_MATCH_FROM_SLOT_ID);
+ pInfo->pCondition = pPhyFillNode->node.pConditions;
+ pInfo->pColMatchColInfo = pColMatchColInfo;
+ initExprSupp(&pOperator->exprSupp, pFillExprInfo, numOfFillCols);
+ pInfo->srcRowIndex = 0;
+
+ pOperator->name = "FillOperator";
+ pOperator->blocking = false;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL;
+ pOperator->info = pInfo;
+ pOperator->pTaskInfo = pTaskInfo;
+ pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamFill, NULL, NULL, destroyStreamFillOperatorInfo,
+ NULL, NULL, NULL);
+
+ int32_t code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ return pOperator;
+
+_error:
+ destroyStreamFillOperatorInfo(pInfo);
+ taosMemoryFreeClear(pOperator);
+ return NULL;
+}
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index cadaf4a9d522971a81e16b0215d4c5967f30f676..b4e2b73889918a69775121104536dad4a49e3371 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -33,26 +33,19 @@ typedef struct SPullWindowInfo {
uint64_t groupId;
} SPullWindowInfo;
+typedef struct SOpenWindowInfo {
+ SResultRowPosition pos;
+ uint64_t groupId;
+} SOpenWindowInfo;
+
static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator);
static int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo);
-static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult);
+static SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult,
+ uint64_t groupId);
static void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult);
-///*
-// * There are two cases to handle:
-// *
-// * 1. Query range is not set yet (queryRangeSet = 0). we need to set the query range info, including
-// * pQueryAttr->lastKey, pQueryAttr->window.skey, and pQueryAttr->eKey.
-// * 2. Query range is set and query is in progress. There may be another result with the same query ranges to be
-// * merged during merge stage. In this case, we need the pTableQueryInfo->lastResRows to decide if there
-// * is a previous result generated or not.
-// */
-// static void setIntervalQueryRange(STableQueryInfo* pTableQueryInfo, TSKEY key, STimeWindow* pQRange) {
-// // do nothing
-//}
-
static TSKEY getStartTsKey(STimeWindow* win, const TSKEY* tsCols) { return tsCols == NULL ? win->skey : tsCols[0]; }
static int32_t setTimeWindowOutputBuf(SResultRowInfo* pResultRowInfo, STimeWindow* win, bool masterscan,
@@ -278,6 +271,10 @@ static void getNextTimeWindow(SInterval* pInterval, int32_t precision, int32_t o
tw->ekey -= 1;
}
+void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order) {
+ getNextTimeWindow(pInterval, pInterval->precision, order, tw);
+}
+
void doTimeWindowInterpolation(SArray* pPrevValues, SArray* pDataBlock, TSKEY prevTs, int32_t prevRowIndex, TSKEY curTs,
int32_t curRowIndex, TSKEY windowKey, int32_t type, SExprSupp* pSup) {
SqlFunctionCtx* pCtx = pSup->pCtx;
@@ -596,16 +593,16 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
SExprSupp* pSup = &pOperatorInfo->exprSupp;
- int32_t startPos = 0;
- int32_t numOfOutput = pSup->numOfExprs;
- uint64_t groupId = pBlock->info.groupId;
+ int32_t startPos = 0;
+ int32_t numOfOutput = pSup->numOfExprs;
SResultRow* pResult = NULL;
while (1) {
- SListNode* pn = tdListGetHead(pResultRowInfo->openWindow);
-
- SResultRowPosition* p1 = (SResultRowPosition*)pn->data;
+ SListNode* pn = tdListGetHead(pResultRowInfo->openWindow);
+ SOpenWindowInfo* pOpenWin = (SOpenWindowInfo*)pn->data;
+ uint64_t groupId = pOpenWin->groupId;
+ SResultRowPosition* p1 = &pOpenWin->pos;
if (p->pageId == p1->pageId && p->offset == p1->offset) {
break;
}
@@ -616,7 +613,8 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
if (pr->closed) {
ASSERT(isResultRowInterpolated(pr, RESULT_ROW_START_INTERP) &&
isResultRowInterpolated(pr, RESULT_ROW_END_INTERP));
- tdListPopHead(pResultRowInfo->openWindow);
+ SListNode* pNode = tdListPopHead(pResultRowInfo->openWindow);
+ taosMemoryFree(pNode);
continue;
}
@@ -631,18 +629,22 @@ static void doInterpUnclosedTimeWindow(SOperatorInfo* pOperatorInfo, int32_t num
SGroupKeys* pTsKey = taosArrayGet(pInfo->pPrevValues, 0);
int64_t prevTs = *(int64_t*)pTsKey->pData;
- doTimeWindowInterpolation(pInfo->pPrevValues, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, w.ekey,
- RESULT_ROW_END_INTERP, pSup);
+ if (groupId == pBlock->info.groupId) {
+ doTimeWindowInterpolation(pInfo->pPrevValues, pBlock->pDataBlock, prevTs, -1, tsCols[startPos], startPos, w.ekey,
+ RESULT_ROW_END_INTERP, pSup);
+ }
setResultRowInterpo(pResult, RESULT_ROW_END_INTERP);
setNotInterpoWindowKey(pSup->pCtx, numOfExprs, RESULT_ROW_START_INTERP);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &w, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, 0, pBlock->info.rows,
numOfExprs);
if (isResultRowInterpolated(pResult, RESULT_ROW_END_INTERP)) {
closeResultRow(pr);
- tdListPopHead(pResultRowInfo->openWindow);
+ SListNode* pNode = tdListPopHead(pResultRowInfo->openWindow);
+ taosMemoryFree(pNode);
} else { // the remains are can not be closed yet.
break;
}
@@ -869,6 +871,10 @@ static int32_t saveWinResultRow(SResultRow* result, uint64_t groupId, SHashObj*
return saveWinResult(result->win.skey, result->pageId, result->offset, groupId, pUpdatedMap);
}
+static int32_t saveWinResultInfo(TSKEY ts, uint64_t groupId, SHashObj* pUpdatedMap) {
+ return saveWinResult(ts, -1, -1, groupId, pUpdatedMap);
+}
+
static int32_t saveResultRow(SResultRow* result, uint64_t groupId, SArray* pUpdated) {
return saveResult(result->win.skey, result->pageId, result->offset, groupId, pUpdated);
}
@@ -889,7 +895,7 @@ int64_t getWinReskey(void* data, int32_t index) {
int32_t compareWinRes(void* pKey, void* data, int32_t index) {
SArray* res = (SArray*)data;
- SWinKey* pos = taosArrayGetP(res, index);
+ SWinKey* pos = taosArrayGet(res, index);
SResKeyPos* pData = (SResKeyPos*)pKey;
if (*(int64_t*)pData->key == pos->ts) {
if (pData->groupId > pos->groupId) {
@@ -911,23 +917,28 @@ static void removeDeleteResults(SHashObj* pUpdatedMap, SArray* pDelWins) {
}
void* pIte = NULL;
while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
- SResKeyPos* pResKey = (SResKeyPos*)pIte;
+ SResKeyPos* pResKey = *(SResKeyPos**)pIte;
int32_t index = binarySearchCom(pDelWins, delSize, pResKey, TSDB_ORDER_DESC, compareWinRes);
if (index >= 0 && 0 == compareWinRes(pResKey, pDelWins, index)) {
taosArrayRemove(pDelWins, index);
+ delSize = taosArrayGetSize(pDelWins);
}
}
}
-bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup) {
- ASSERT(pSup->maxTs == INT64_MIN || pSup->maxTs > 0);
- return pSup->maxTs != INT64_MIN && ts < pSup->maxTs - pSup->waterMark;
+bool isOverdue(TSKEY ekey, STimeWindowAggSupp* pTwSup) {
+ ASSERT(pTwSup->maxTs == INT64_MIN || pTwSup->maxTs > 0);
+ return pTwSup->maxTs != INT64_MIN && ekey < pTwSup->maxTs - pTwSup->waterMark;
}
-bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup) { return isOverdue(pWin->ekey, pSup); }
+bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pTwSup) { return isOverdue(pWin->ekey, pTwSup); }
+
+bool needDeleteWindowBuf(STimeWindow* pWin, STimeWindowAggSupp* pTwSup) {
+ return pTwSup->maxTs != INT64_MIN && pWin->ekey < pTwSup->maxTs - pTwSup->deleteMark;
+}
static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
- int32_t scanFlag, SHashObj* pUpdatedMap) {
+ int32_t scanFlag) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
@@ -943,21 +954,11 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
STimeWindow win =
getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->inputOrder);
- int32_t ret = TSDB_CODE_SUCCESS;
- if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
- inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
- ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
- numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
- if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
- }
-
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
- saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
- setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
- }
+ int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
+ pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
+ if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
-
TSKEY ekey = ascScan ? win.ekey : win.skey;
int32_t forwardRows =
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
@@ -965,7 +966,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
// prev time window not interpolation yet.
if (pInfo->timeWindowInterpo) {
- SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult);
+ SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos);
// restore current time window
@@ -979,12 +980,9 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
doWindowBorderInterpolation(pInfo, pBlock, pResult, &win, startPos, forwardRows, pSup);
}
- if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
- inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
- updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
- doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
- numOfOutput);
- }
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
doCloseWindow(pResultRowInfo, pInfo, pResult);
@@ -995,13 +993,6 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
if (startPos < 0) {
break;
}
- if (pInfo->ignoreExpiredData && isCloseWindow(&nextWin, &pInfo->twAggSup)) {
- ekey = ascScan ? nextWin.ekey : nextWin.skey;
- forwardRows =
- getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
- continue;
- }
-
// null data, failed to allocate more memory buffer
int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
@@ -1009,18 +1000,21 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- if (pInfo->execModel == OPTR_EXEC_MODEL_STREAM && pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
- saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
- setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
- }
-
ekey = ascScan ? nextWin.ekey : nextWin.skey;
forwardRows =
getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, pInfo->inputOrder);
-
// window start(end) key interpolation
doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup);
-
+ // TODO: add to open window? how to close the open windows after input blocks exhausted?
+#if 0
+ if ((ascScan && ekey <= pBlock->info.window.ekey) ||
+ (!ascScan && ekey >= pBlock->info.window.skey)) {
+ // window start(end) key interpolation
+ doWindowBorderInterpolation(pInfo, pBlock, pResult, &nextWin, startPos, forwardRows, pSup);
+ } else if (pInfo->timeWindowInterpo) {
+ addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
+ }
+#endif
updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
numOfOutput);
@@ -1040,20 +1034,23 @@ void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInf
}
}
-SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult) {
- SResultRowPosition pos = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
- SListNode* pn = tdListGetTail(pResultRowInfo->openWindow);
+SResultRowPosition addToOpenWindowList(SResultRowInfo* pResultRowInfo, const SResultRow* pResult, uint64_t groupId) {
+ SOpenWindowInfo openWin = {0};
+ openWin.pos.pageId = pResult->pageId;
+ openWin.pos.offset = pResult->offset;
+ openWin.groupId = groupId;
+ SListNode* pn = tdListGetTail(pResultRowInfo->openWindow);
if (pn == NULL) {
- tdListAppend(pResultRowInfo->openWindow, &pos);
- return pos;
+ tdListAppend(pResultRowInfo->openWindow, &openWin);
+ return openWin.pos;
}
- SResultRowPosition* px = (SResultRowPosition*)pn->data;
- if (px->pageId != pos.pageId || px->offset != pos.offset) {
- tdListAppend(pResultRowInfo->openWindow, &pos);
+ SOpenWindowInfo* px = (SOpenWindowInfo*)pn->data;
+ if (px->pos.pageId != openWin.pos.pageId || px->pos.offset != openWin.pos.offset || px->groupId != openWin.groupId) {
+ tdListAppend(pResultRowInfo->openWindow, &openWin);
}
- return pos;
+ return openWin.pos;
}
int64_t* extractTsCol(SSDataBlock* pBlock, const SIntervalAggOperatorInfo* pInfo) {
@@ -1107,7 +1104,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->inputOrder, scanFlag, true);
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
- hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
+ hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag);
}
initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->resultTsOrder);
@@ -1122,7 +1119,7 @@ static bool compareVal(const char* v, const SStateKeys* pKey) {
if (varDataLen(v) != varDataLen(pKey->pData)) {
return false;
} else {
- return strncmp(varDataVal(v), varDataVal(pKey->pData), varDataLen(v)) == 0;
+ return memcmp(varDataVal(v), varDataVal(pKey->pData), varDataLen(v)) == 0;
}
} else {
return memcmp(pKey->pData, v, pKey->bytes) == 0;
@@ -1357,7 +1354,7 @@ static void setInverFunction(SqlFunctionCtx* pCtx, int32_t num, EStreamType type
}
}
-void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprSupp* pSup, int32_t numOfOutput) {
+static void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprSupp* pSup, int32_t numOfOutput) {
SResultRow* pResult = getResultRowByPos(pResultBuf, p1, false);
SqlFunctionCtx* pCtx = pSup->pCtx;
for (int32_t i = 0; i < numOfOutput; ++i) {
@@ -1376,11 +1373,11 @@ void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf, SExprS
releaseBufPage(pResultBuf, bufPage);
}
-bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t bytes, uint64_t groupId,
- int32_t numOfOutput) {
+static bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t bytes, uint64_t groupId,
+ int32_t numOfOutput) {
SET_RES_WINDOW_KEY(pAggSup->keyBuf, pData, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
if (!p1) {
// window has been closed
return false;
@@ -1389,35 +1386,80 @@ bool doClearWindow(SAggSupporter* pAggSup, SExprSupp* pSup, char* pData, int16_t
return true;
}
+static bool doDeleteWindow(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, int32_t numOfOutput) {
+ SStreamIntervalOperatorInfo* pInfo = pOperator->info;
+ SWinKey key = {.ts = ts, .groupId = groupId};
+ tSimpleHashRemove(pInfo->aggSup.pResultRowHashTable, &key, sizeof(SWinKey));
+ streamStateDel(pOperator->pTaskInfo->streamInfo.pState, &key);
+ return true;
+}
+
+static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, int32_t numOfOutput, SSDataBlock* pBlock,
+ SArray* pUpWins, SHashObj* pUpdatedMap) {
+ SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ TSKEY* startTsCols = (TSKEY*)pStartTsCol->pData;
+ SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ TSKEY* endTsCols = (TSKEY*)pEndTsCol->pData;
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* pGpDatas = (uint64_t*)pGpCol->pData;
+ for (int32_t i = 0; i < pBlock->info.rows; i++) {
+ SResultRowInfo dumyInfo;
+ dumyInfo.cur.pageId = -1;
+ STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCols[i], pInterval, TSDB_ORDER_ASC);
+ while (win.ekey <= endTsCols[i]) {
+ uint64_t winGpId = pGpDatas[i];
+ bool res = doDeleteWindow(pOperator, win.skey, winGpId, numOfOutput);
+ SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
+ if (pUpWins && res) {
+ taosArrayPush(pUpWins, &winRes);
+ }
+ if (pUpdatedMap) {
+ taosHashRemove(pUpdatedMap, &winRes, sizeof(SWinKey));
+ }
+ getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
+ }
+ }
+}
+
bool doDeleteIntervalWindow(SAggSupporter* pAggSup, TSKEY ts, uint64_t groupId) {
size_t bytes = sizeof(TSKEY);
SET_RES_WINDOW_KEY(pAggSup->keyBuf, &ts, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
if (!p1) {
// window has been closed
return false;
}
- // SFilePage* bufPage = getBufPage(pAggSup->pResultBuf, p1->pageId);
- // dBufSetBufPageRecycled(pAggSup->pResultBuf, bufPage);
- taosHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ tSimpleHashRemove(pAggSup->pResultRowHashTable, pAggSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
return true;
}
-void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, SSDataBlock* pBlock, SArray* pUpWins, SInterval* pInterval) {
+static void doDeleteSpecifyIntervalWindow(SAggSupporter* pAggSup, STimeWindowAggSupp* pTwSup, SSDataBlock* pBlock,
+ SArray* pDelWins, SInterval* pInterval, SHashObj* pUpdatedMap) {
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* tsStarts = (TSKEY*)pStartCol->pData;
+ SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ TSKEY* tsEnds = (TSKEY*)pEndCol->pData;
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
uint64_t* groupIds = (uint64_t*)pGroupCol->pData;
+ int64_t numOfWin = tSimpleHashGetSize(pAggSup->pResultRowHashTable);
for (int32_t i = 0; i < pBlock->info.rows; i++) {
+ TSKEY startTs = TMAX(tsStarts[i], pTwSup->minTs);
+ TSKEY endTs = TMIN(tsEnds[i], pTwSup->maxTs);
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
- STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, tsStarts[i], pInterval, TSDB_ORDER_ASC);
- doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]);
- if (pUpWins) {
+ STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTs, pInterval, TSDB_ORDER_ASC);
+ do {
+ doDeleteIntervalWindow(pAggSup, win.skey, groupIds[i]);
SWinKey winRes = {.ts = win.skey, .groupId = groupIds[i]};
- taosArrayPush(pUpWins, &winRes);
- }
+ if (pDelWins) {
+ taosArrayPush(pDelWins, &winRes);
+ }
+ if (pUpdatedMap) {
+ taosHashRemove(pUpdatedMap, &winRes, sizeof(SWinKey));
+ }
+ getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
+ } while (win.skey <= endTs);
}
}
@@ -1427,19 +1469,14 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
TSKEY* startTsCols = (TSKEY*)pStartTsCol->pData;
SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
TSKEY* endTsCols = (TSKEY*)pEndTsCol->pData;
- uint64_t* pGpDatas = NULL;
- if (pBlock->info.type == STREAM_RETRIEVE) {
- SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
- pGpDatas = (uint64_t*)pGpCol->pData;
- }
- int32_t step = 0;
- int32_t startPos = 0;
+ SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t* pGpDatas = (uint64_t*)pGpCol->pData;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
STimeWindow win = getActiveTimeWindow(NULL, &dumyInfo, startTsCols[i], pInterval, TSDB_ORDER_ASC);
while (win.ekey <= endTsCols[i]) {
- uint64_t winGpId = pGpDatas ? pGpDatas[startPos] : pBlock->info.groupId;
+ uint64_t winGpId = pGpDatas[i];
bool res = doClearWindow(pAggSup, pSup1, (char*)&win.skey, sizeof(TSKEY), winGpId, numOfOutput);
if (pUpWins && res) {
SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
@@ -1450,11 +1487,12 @@ static void doClearWindows(SAggSupporter* pAggSup, SExprSupp* pSup1, SInterval*
}
}
-static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) {
- void* pIte = NULL;
- size_t keyLen = 0;
- while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
- void* key = taosHashGetKey(pIte, &keyLen);
+static int32_t getAllIntervalWindow(SSHashObj* pHashMap, SHashObj* resWins) {
+ void* pIte = NULL;
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pIte, &keyLen);
uint64_t groupId = *(uint64_t*)key;
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
@@ -1467,14 +1505,15 @@ static int32_t getAllIntervalWindow(SHashObj* pHashMap, SHashObj* resWins) {
return TSDB_CODE_SUCCESS;
}
-static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
+static int32_t closeIntervalWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pSup, SInterval* pInterval,
SHashObj* pPullDataMap, SHashObj* closeWins, SArray* pRecyPages,
SDiskbasedBuf* pDiscBuf) {
qDebug("===stream===close interval window");
- void* pIte = NULL;
- size_t keyLen = 0;
- while ((pIte = taosHashIterate(pHashMap, pIte)) != NULL) {
- void* key = taosHashGetKey(pIte, &keyLen);
+ void* pIte = NULL;
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pIte, &keyLen);
uint64_t groupId = *(uint64_t*)key;
ASSERT(keyLen == GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY)));
TSKEY ts = *(int64_t*)((char*)key + sizeof(uint64_t));
@@ -1512,7 +1551,50 @@ static int32_t closeIntervalWindow(SHashObj* pHashMap, STimeWindowAggSupp* pSup,
}
char keyBuf[GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))];
SET_RES_WINDOW_KEY(keyBuf, &ts, sizeof(TSKEY), groupId);
- taosHashRemove(pHashMap, keyBuf, keyLen);
+ tSimpleHashIterateRemove(pHashMap, keyBuf, keyLen, &pIte, &iter);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t closeStreamIntervalWindow(SSHashObj* pHashMap, STimeWindowAggSupp* pTwSup, SInterval* pInterval,
+ SHashObj* pPullDataMap, SHashObj* closeWins, SOperatorInfo* pOperator) {
+ qDebug("===stream===close interval window");
+ void* pIte = NULL;
+ size_t keyLen = 0;
+ int32_t iter = 0;
+ while ((pIte = tSimpleHashIterate(pHashMap, pIte, &iter)) != NULL) {
+ void* key = tSimpleHashGetKey(pIte, &keyLen);
+ SWinKey* pWinKey = (SWinKey*)key;
+ void* chIds = taosHashGet(pPullDataMap, pWinKey, sizeof(SWinKey));
+ STimeWindow win = {
+ .skey = pWinKey->ts,
+ .ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1,
+ };
+ if (isCloseWindow(&win, pTwSup)) {
+ if (chIds && pPullDataMap) {
+ SArray* chAy = *(SArray**)chIds;
+ int32_t size = taosArrayGetSize(chAy);
+ qDebug("===stream===window %" PRId64 " wait child size:%d", pWinKey->ts, size);
+ for (int32_t i = 0; i < size; i++) {
+ qDebug("===stream===window %" PRId64 " wait child id:%d", pWinKey->ts, *(int32_t*)taosArrayGet(chAy, i));
+ }
+ continue;
+ } else if (pPullDataMap) {
+ qDebug("===stream===close window %" PRId64, pWinKey->ts);
+ }
+
+ if (pTwSup->calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) {
+ int32_t code = saveWinResultInfo(pWinKey->ts, pWinKey->groupId, closeWins);
+ if (code != TSDB_CODE_SUCCESS) {
+ return code;
+ }
+ }
+ tSimpleHashIterateRemove(pHashMap, pWinKey, sizeof(SWinKey), &pIte, &iter);
+
+ if (needDeleteWindowBuf(&win, pTwSup)) {
+ streamStateDel(pOperator->pTaskInfo->streamInfo.pState, pWinKey);
+ }
}
}
return TSDB_CODE_SUCCESS;
@@ -1549,150 +1631,14 @@ static void doBuildDeleteResult(SArray* pWins, int32_t* index, SSDataBlock* pBlo
return;
}
blockDataEnsureCapacity(pBlock, size - *index);
- SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
- SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
+ uint64_t uid = 0;
for (int32_t i = *index; i < size; i++) {
SWinKey* pWin = taosArrayGet(pWins, i);
- colDataAppend(pTsCol, pBlock->info.rows, (const char*)&pWin->ts, false);
- colDataAppend(pGroupCol, pBlock->info.rows, (const char*)&pWin->groupId, false);
- pBlock->info.rows++;
+ appendOneRow(pBlock, &pWin->ts, &pWin->ts, &uid, &pWin->groupId);
(*index)++;
}
}
-static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
- SIntervalAggOperatorInfo* pInfo = pOperator->info;
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
-
- pInfo->inputOrder = TSDB_ORDER_ASC;
- SExprSupp* pSup = &pOperator->exprSupp;
-
- if (pOperator->status == OP_EXEC_DONE) {
- return NULL;
- }
-
- if (pOperator->status == OP_RES_TO_RETURN) {
- doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
- if (pInfo->pDelRes->info.rows > 0) {
- return pInfo->pDelRes;
- }
-
- doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
- if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainResults(&pInfo->groupResInfo)) {
- pOperator->status = OP_EXEC_DONE;
- qDebug("===stream===single interval is done");
- freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
- }
- printDataBlock(pInfo->binfo.pRes, "single interval");
- return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
- }
-
- SOperatorInfo* downstream = pOperator->pDownstream[0];
-
- SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
- SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
-
- SStreamState* pState = pTaskInfo->streamInfo.pState;
-
- while (1) {
- SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
- if (pBlock == NULL) {
- break;
- }
- // qInfo("===stream===%ld", pBlock->info.version);
- printDataBlock(pBlock, "single interval recv");
-
- if (pBlock->info.type == STREAM_CLEAR) {
- doClearWindows(&pInfo->aggSup, &pOperator->exprSupp, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock,
- NULL);
- qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo));
- continue;
- }
- if (pBlock->info.type == STREAM_DELETE_DATA) {
- doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval);
- continue;
- } else if (pBlock->info.type == STREAM_GET_ALL) {
- getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
- continue;
- }
-
- if (pBlock->info.type == STREAM_NORMAL && pBlock->info.version != 0) {
- // set input version
- pTaskInfo->version = pBlock->info.version;
- }
-
- if (pInfo->scalarSupp.pExprInfo != NULL) {
- SExprSupp* pExprSup = &pInfo->scalarSupp;
- projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
- }
-
- // The timewindow that overlaps the timestamps of the input pBlock need to be recalculated and return to the
- // caller. Note that all the time window are not close till now.
- // the pDataBlock are always the same one, no need to call this again
- setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->inputOrder, MAIN_SCAN, true);
- if (pInfo->invertible) {
- setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type);
- }
-
- pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
- hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap);
- }
-
-#if 0
- if (pState) {
- printf(">>>>>>>> stream read backend\n");
- SWinKey key = {
- .ts = 1,
- .groupId = 2,
- };
- char* val = NULL;
- int32_t sz;
- if (streamStateGet(pState, &key, (void**)&val, &sz) < 0) {
- ASSERT(0);
- }
- printf("stream read %s %d\n", val, sz);
- streamFreeVal(val);
-
- SStreamStateCur* pCur = streamStateGetCur(pState, &key);
- ASSERT(pCur);
- while (streamStateCurNext(pState, pCur) == 0) {
- SWinKey key1;
- const void* val1;
- if (streamStateGetKVByCur(pCur, &key1, &val1, &sz) < 0) {
- break;
- }
- printf("stream iter key groupId:%d ts:%d, value %s %d\n", key1.groupId, key1.ts, val1, sz);
- }
- streamStateFreeCur(pCur);
- }
-#endif
-
- pOperator->status = OP_RES_TO_RETURN;
- closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap,
- pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
-
- void* pIte = NULL;
- while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
- taosArrayPush(pUpdated, pIte);
- }
- taosArraySort(pUpdated, resultrowComparAsc);
-
- finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->aggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
- initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
- blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
- removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
- taosHashCleanup(pUpdatedMap);
- doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
- if (pInfo->pDelRes->info.rows > 0) {
- return pInfo->pDelRes;
- }
-
- doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
- printDataBlock(pInfo->binfo.pRes, "single interval");
- return pInfo->binfo.pRes->info.rows == 0 ? NULL : pInfo->binfo.pRes;
-}
-
static void destroyStateWindowOperatorInfo(void* param) {
SStateWindowOperatorInfo* pInfo = (SStateWindowOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
@@ -1710,6 +1656,10 @@ void destroyIntervalOperatorInfo(void* param) {
SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);
cleanupAggSup(&pInfo->aggSup);
+ cleanupExprSupp(&pInfo->scalarSupp);
+
+ tdListFree(pInfo->binfo.resultRowInfo.openWindow);
+
pInfo->pRecycledPages = taosArrayDestroy(pInfo->pRecycledPages);
pInfo->pInterpCols = taosArrayDestroy(pInfo->pInterpCols);
taosArrayDestroyEx(pInfo->pPrevValues, freeItem);
@@ -1749,6 +1699,7 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
}
nodesDestroyNode((SNode*)pInfo->pPhyNode);
colDataDestroy(&pInfo->twAggSup.timeWindowData);
+ cleanupGroupResInfo(&pInfo->groupResInfo);
taosMemoryFreeClear(param);
}
@@ -1806,20 +1757,18 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt
return needed;
}
-void increaseTs(SqlFunctionCtx* pCtx) {
- if (pCtx[0].pExpr->pExpr->_function.pFunctNode->funcType == FUNCTION_TYPE_WSTART) {
- pCtx[0].increase = true;
- }
-}
-
-void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup) {
+void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SAggSupporter* pSup, SInterval* pInterval,
+ STimeWindowAggSupp* pTwSup) {
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
- // Todo(liuyao) support partition by column
+ initIntervalDownStream(downstream->pDownstream[0], type, pSup, pInterval, pTwSup);
return;
}
SStreamScanInfo* pScanInfo = downstream->info;
- pScanInfo->sessionSup.parentType = type;
- pScanInfo->sessionSup.pIntervalAggSup = pSup;
+ pScanInfo->windowSup.parentType = type;
+ pScanInfo->windowSup.pIntervalAggSup = pSup;
+ pScanInfo->pUpdateInfo = updateInfoInitP(pInterval, pTwSup->waterMark);
+ pScanInfo->interval = *pInterval;
+ pScanInfo->twAggSup = *pTwSup;
}
void initStreamFunciton(SqlFunctionCtx* pCtx, int32_t numOfExpr) {
@@ -1873,7 +1822,6 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
if (isStream) {
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
initStreamFunciton(pSup->pCtx, pSup->numOfExprs);
}
@@ -1884,7 +1832,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, pInfo);
if (pInfo->timeWindowInterpo) {
- pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
+ pInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
if (pInfo->binfo.resultRowInfo.openWindow == NULL) {
goto _error;
}
@@ -1900,17 +1848,11 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo*
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
- pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, doStreamIntervalAgg, NULL,
+ pOperator->fpSet = createOperatorFpSet(doOpenIntervalAgg, doBuildIntervalResult, NULL, NULL,
destroyIntervalOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
- if (nodeType(pPhyNode) == QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL) {
- initIntervalDownStream(downstream, QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, &pInfo->aggSup);
- }
-
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@@ -2078,7 +2020,11 @@ static void doKeepPrevRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
pkey->isNull = false;
char* val = colDataGetData(pColInfoData, rowIndex);
- memcpy(pkey->pData, val, pkey->bytes);
+ if (!IS_VAR_DATA_TYPE(pkey->type)) {
+ memcpy(pkey->pData, val, pkey->bytes);
+ } else {
+ memcpy(pkey->pData, val, varDataLen(val));
+ }
}
}
@@ -2096,7 +2042,11 @@ static void doKeepNextRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock
pkey->isNull = false;
char* val = colDataGetData(pColInfoData, rowIndex);
- memcpy(pkey->pData, val, pkey->bytes);
+ if (!IS_VAR_DATA_TYPE(pkey->type)) {
+ memcpy(pkey->pData, val, pkey->bytes);
+ } else {
+ memcpy(pkey->pData, val, varDataLen(val));
+ }
}
}
@@ -2137,7 +2087,7 @@ static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlo
}
}
- pSliceInfo->fillLastPoint = isLastRow ? true : false;
+ pSliceInfo->fillLastPoint = isLastRow;
}
static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pResBlock) {
@@ -2149,12 +2099,17 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
bool hasInterp = true;
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
- int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
- int32_t dstSlot = pExprInfo->base.resSchema.slotId;
- // SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
+ int32_t dstSlot = pExprInfo->base.resSchema.slotId;
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
+ if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
+ colDataAppend(pDst, rows, (char*)&pSliceInfo->current, false);
+ continue;
+ }
+
+ int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
+ // SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
switch (pSliceInfo->fillType) {
case TSDB_FILL_NULL: {
colDataAppendNULL(pDst, rows);
@@ -2235,7 +2190,6 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
if (hasInterp) {
pResBlock->info.rows += 1;
}
-
}
static int32_t initPrevRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) {
@@ -2357,15 +2311,6 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
SSDataBlock* pResBlock = pSliceInfo->pRes;
SExprSupp* pSup = &pOperator->exprSupp;
- // if (pOperator->status == OP_RES_TO_RETURN) {
- // // doBuildResultDatablock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pIntervalInfo->pRes);
- // if (pResBlock->info.rows == 0 || !hasRemainResults(&pSliceInfo->groupResInfo)) {
- // doSetOperatorCompleted(pOperator);
- // }
- //
- // return pResBlock;
- // }
-
int32_t order = TSDB_ORDER_ASC;
SInterval* pInterval = &pSliceInfo->interval;
SOperatorInfo* downstream = pOperator->pDownstream[0];
@@ -2410,19 +2355,24 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
if (ts == pSliceInfo->current) {
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
- int32_t dstSlot = pExprInfo->base.resSchema.slotId;
- int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
- SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
+ int32_t dstSlot = pExprInfo->base.resSchema.slotId;
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
- if (colDataIsNull_s(pSrc, i)) {
- colDataAppendNULL(pDst, pResBlock->info.rows);
- continue;
- }
+ if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
+ colDataAppend(pDst, pResBlock->info.rows, (char *)&pSliceInfo->current, false);
+ } else {
+ int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
+ SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
+
+ if (colDataIsNull_s(pSrc, i)) {
+ colDataAppendNULL(pDst, pResBlock->info.rows);
+ continue;
+ }
- char* v = colDataGetData(pSrc, i);
- colDataAppend(pDst, pResBlock->info.rows, v, false);
+ char* v = colDataGetData(pSrc, i);
+ colDataAppend(pDst, pResBlock->info.rows, v, false);
+ }
}
pResBlock->info.rows += 1;
@@ -2495,6 +2445,9 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
break;
}
}
+ } else {
+ // store ts value as start, and calculate interp value when processing next block
+ doKeepLinearInfo(pSliceInfo, pBlock, i, true);
}
} else { // non-linear interpolation
if (i < pBlock->info.rows - 1) {
@@ -2539,14 +2492,24 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
if (ts == pSliceInfo->current && pSliceInfo->current <= pSliceInfo->win.ekey) {
for (int32_t j = 0; j < pOperator->exprSupp.numOfExprs; ++j) {
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[j];
- int32_t dstSlot = pExprInfo->base.resSchema.slotId;
- int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
- SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
+ int32_t dstSlot = pExprInfo->base.resSchema.slotId;
SColumnInfoData* pDst = taosArrayGet(pResBlock->pDataBlock, dstSlot);
- char* v = colDataGetData(pSrc, i);
- colDataAppend(pDst, pResBlock->info.rows, v, false);
+ if (IS_TIMESTAMP_TYPE(pExprInfo->base.resSchema.type)) {
+ colDataAppend(pDst, pResBlock->info.rows, (char *)&pSliceInfo->current, false);
+ } else {
+ int32_t srcSlot = pExprInfo->base.pParam[0].pCol->slotId;
+ SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, srcSlot);
+
+ if (colDataIsNull_s(pSrc, i)) {
+ colDataAppendNULL(pDst, pResBlock->info.rows);
+ continue;
+ }
+
+ char* v = colDataGetData(pSrc, i);
+ colDataAppend(pDst, pResBlock->info.rows, v, false);
+ }
}
pResBlock->info.rows += 1;
@@ -2573,6 +2536,9 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) {
break;
}
}
+ } else { // it is the last row of current block
+ // store ts value as start, and calculate interp value when processing next block
+ doKeepLinearInfo(pSliceInfo, pBlock, i, true);
}
} else { // non-linear interpolation
pSliceInfo->current =
@@ -2678,6 +2644,10 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
pInfo->interval.interval = pInterpPhyNode->interval;
pInfo->current = pInfo->win.skey;
+ STableScanInfo* pScanInfo = (STableScanInfo*)downstream->info;
+ pScanInfo->cond.twindows = pInfo->win;
+ pScanInfo->cond.type = TIMEWINDOW_RANGE_EXTERNAL;
+
pOperator->name = "TimeSliceOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC;
pOperator->blocking = false;
@@ -2700,20 +2670,26 @@ _error:
return NULL;
}
-SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExpr, int32_t numOfCols,
- SSDataBlock* pResBlock, STimeWindowAggSupp* pTwAggSup, int32_t tsSlotId,
- SColumn* pStateKeyCol, SNode* pCondition, SExecTaskInfo* pTaskInfo) {
+SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhysiNode* pStateNode,
+ SExecTaskInfo* pTaskInfo) {
SStateWindowOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStateWindowOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
goto _error;
}
- pInfo->stateCol = *pStateKeyCol;
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pStateNode->window.node.pOutputDataBlockDesc);
+ int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId;
+
+ SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr;
+
+ pInfo->stateCol = extractColumnFromColumnNode(pColNode);
pInfo->stateKey.type = pInfo->stateCol.type;
pInfo->stateKey.bytes = pInfo->stateCol.bytes;
pInfo->stateKey.pData = taosMemoryCalloc(1, pInfo->stateCol.bytes);
- pInfo->pCondition = pCondition;
+ pInfo->pCondition = pStateNode->window.node.pConditions;
if (pInfo->stateKey.pData == NULL) {
goto _error;
}
@@ -2721,16 +2697,17 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExpr, numOfCols, keyBufSize, pTaskInfo->id.str);
+ int32_t code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
initBasicInfo(&pInfo->binfo, pResBlock);
-
initResultRowInfo(&pInfo->binfo.resultRowInfo);
- pInfo->twAggSup = *pTwAggSup;
+ pInfo->twAggSup =
+ (STimeWindowAggSupp){.waterMark = pStateNode->window.watermark, .calTrigger = pStateNode->window.triggerType};
+ ;
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
pInfo->tsSlotId = tsSlotId;
@@ -2738,8 +2715,6 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SExprInf
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExpr;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
@@ -2813,8 +2788,6 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doSessionWindowAgg, NULL, NULL,
@@ -2835,14 +2808,26 @@ _error:
}
void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int32_t numOfOutput,
- SExecTaskInfo* pTaskInfo) {
+ SExecTaskInfo* pTaskInfo, SColumnInfoData* pTimeWindowData) {
for (int32_t k = 0; k < numOfOutput; ++k) {
if (fmIsWindowPseudoColumnFunc(pDestCtx[k].functionId)) {
- continue;
- }
- int32_t code = TSDB_CODE_SUCCESS;
- if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) {
- code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]);
+ if (!pTimeWindowData) {
+ continue;
+ }
+
+ SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(&pDestCtx[k]);
+ char* p = GET_ROWCELL_INTERBUF(pEntryInfo);
+ SColumnInfoData idata = {0};
+ idata.info.type = TSDB_DATA_TYPE_BIGINT;
+ idata.info.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
+ idata.pData = p;
+
+ SScalarParam out = {.columnData = &idata};
+ SScalarParam tw = {.numOfRows = 5, .columnData = pTimeWindowData};
+ pDestCtx[k].sfp.process(&tw, 1, &out);
+ pEntryInfo->numOfRes = 1;
+ } else if (functionNeedToExecute(&pDestCtx[k]) && pDestCtx[k].fpSet.combine != NULL) {
+ int32_t code = pDestCtx[k].fpSet.combine(&pDestCtx[k], &pSourceCtx[k]);
if (code != TSDB_CODE_SUCCESS) {
qError("%s apply functions error, code: %s", GET_TASKID(pTaskInfo), tstrerror(code));
pTaskInfo->code = code;
@@ -2856,24 +2841,34 @@ bool hasIntervalWindow(SAggSupporter* pSup, TSKEY ts, uint64_t groupId) {
int32_t bytes = sizeof(TSKEY);
SET_RES_WINDOW_KEY(pSup->keyBuf, &ts, bytes, groupId);
SResultRowPosition* p1 =
- (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
+ (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes));
return p1 != NULL;
}
-static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExprSupp* pSup, SArray* pWinArray,
- int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo, SArray* pUpdated) {
- int32_t size = taosArrayGetSize(pWinArray);
- if (!pInfo->pChildren) {
- return;
+STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) {
+ STimeWindow w = {.skey = ts, .ekey = INT64_MAX};
+ w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
+ return w;
+}
+
+static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExprSupp* pSup, SArray* pWinArray,
+ int32_t groupId, int32_t numOfOutput, SExecTaskInfo* pTaskInfo,
+ SHashObj* pUpdatedMap) {
+ int32_t size = taosArrayGetSize(pWinArray);
+ if (!pInfo->pChildren) {
+ return;
}
for (int32_t i = 0; i < size; i++) {
SWinKey* pWinRes = taosArrayGet(pWinArray, i);
SResultRow* pCurResult = NULL;
- STimeWindow ParentWin = {.skey = pWinRes->ts, .ekey = pWinRes->ts + 1};
- setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &ParentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx,
+ STimeWindow parentWin = getFinalTimeWindow(pWinRes->ts, &pInfo->interval);
+ if (isDeletedWindow(&parentWin, pWinRes->groupId, &pInfo->aggSup) && isCloseWindow(&parentWin, &pInfo->twAggSup)) {
+ continue;
+ }
+ setTimeWindowOutputBuf(&pInfo->binfo.resultRowInfo, &parentWin, true, &pCurResult, pWinRes->groupId, pSup->pCtx,
numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
- bool find = true;
+ int32_t num = 0;
for (int32_t j = 0; j < numOfChildren; j++) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, j);
SIntervalAggOperatorInfo* pChInfo = pChildOp->info;
@@ -2881,15 +2876,16 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr
if (!hasIntervalWindow(&pChInfo->aggSup, pWinRes->ts, pWinRes->groupId)) {
continue;
}
- find = true;
+ num++;
SResultRow* pChResult = NULL;
- setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, &ParentWin, true, &pChResult, pWinRes->groupId,
+ setTimeWindowOutputBuf(&pChInfo->binfo.resultRowInfo, &parentWin, true, &pChResult, pWinRes->groupId,
pChildSup->pCtx, pChildSup->numOfExprs, pChildSup->rowEntryInfoOffset, &pChInfo->aggSup,
pTaskInfo);
- compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &parentWin, true);
+ compactFunctions(pSup->pCtx, pChildSup->pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
}
- if (find && pUpdated) {
- saveResultRow(pCurResult, pWinRes->groupId, pUpdated);
+ if (num > 0 && pUpdatedMap) {
+ saveWinResultRow(pCurResult, pWinRes->groupId, pUpdatedMap);
setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pInfo->binfo.resultRowInfo.cur);
}
}
@@ -2897,11 +2893,24 @@ static void rebuildIntervalWindow(SStreamFinalIntervalOperatorInfo* pInfo, SExpr
bool isDeletedWindow(STimeWindow* pWin, uint64_t groupId, SAggSupporter* pSup) {
SET_RES_WINDOW_KEY(pSup->keyBuf, &pWin->skey, sizeof(int64_t), groupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
- GET_RES_WINDOW_KEY_LEN(sizeof(int64_t)));
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(pSup->pResultRowHashTable, pSup->keyBuf,
+ GET_RES_WINDOW_KEY_LEN(sizeof(int64_t)));
return p1 == NULL;
}
+bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SOperatorInfo* pOperator, STimeWindowAggSupp* pTwSup) {
+ if (pWin->ekey < pTwSup->maxTs - pTwSup->deleteMark) {
+ SWinKey key = {.ts = pWin->skey, .groupId = groupId};
+ void* pVal = NULL;
+ int32_t size = 0;
+ if (streamStateGet(pOperator->pTaskInfo->streamInfo.pState, &key, &pVal, &size) < 0) {
+ return false;
+ }
+ streamStateReleaseBuf(pOperator->pTaskInfo->streamInfo.pState, &key, pVal);
+ }
+ return false;
+}
+
int32_t getNexWindowPos(SInterval* pInterval, SDataBlockInfo* pBlockInfo, TSKEY* tsCols, int32_t startPos, TSKEY eKey,
STimeWindow* pNextWin) {
int32_t forwardRows =
@@ -2920,21 +2929,14 @@ void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) {
static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
-STimeWindow getFinalTimeWindow(int64_t ts, SInterval* pInterval) {
- STimeWindow w = {.skey = ts, .ekey = INT64_MAX};
- w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
- return w;
-}
-
-static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
- SHashObj* pUpdatedMap) {
+static void doHashIntervalAgg(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
+ SHashObj* pUpdatedMap) {
SStreamFinalIntervalOperatorInfo* pInfo = (SStreamFinalIntervalOperatorInfo*)pOperatorInfo->info;
SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
SExprSupp* pSup = &pOperatorInfo->exprSupp;
int32_t numOfOutput = pSup->numOfExprs;
int32_t step = 1;
- bool ascScan = true;
TSKEY* tsCols = NULL;
SResultRow* pResult = NULL;
int32_t forwardRows = 0;
@@ -2943,7 +2945,7 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
tsCols = (int64_t*)pColDataInfo->pData;
- int32_t startPos = ascScan ? 0 : (pSDataBlock->info.rows - 1);
+ int32_t startPos = 0;
TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols);
STimeWindow nextWin = {0};
if (IS_FINAL_OP(pInfo)) {
@@ -3026,9 +3028,10 @@ static void doHashInterval(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBloc
}
static void clearStreamIntervalOperator(SStreamFinalIntervalOperatorInfo* pInfo) {
- taosHashClear(pInfo->aggSup.pResultRowHashTable);
+ tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
+ pInfo->aggSup.currentPageId = -1;
}
static void clearSpecialDataBlock(SSDataBlock* pBlock) {
@@ -3104,6 +3107,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
taosArrayRemove(chArray, index);
if (taosArrayGetSize(chArray) == 0) {
// pull data is over
+ taosArrayDestroy(chArray);
taosHashRemove(pMap, &winRes, sizeof(SWinKey));
}
}
@@ -3111,14 +3115,37 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap) {
}
}
+static void addRetriveWindow(SArray* wins, SStreamFinalIntervalOperatorInfo* pInfo) {
+ int32_t size = taosArrayGetSize(wins);
+ for (int32_t i = 0; i < size; i++) {
+ SWinKey* winKey = taosArrayGet(wins, i);
+ STimeWindow nextWin = getFinalTimeWindow(winKey->ts, &pInfo->interval);
+ if (isCloseWindow(&nextWin, &pInfo->twAggSup) && !pInfo->ignoreExpiredData) {
+ void* chIds = taosHashGet(pInfo->pPullDataMap, winKey, sizeof(SWinKey));
+ if (!chIds) {
+ SPullWindowInfo pull = {.window = nextWin, .groupId = winKey->groupId};
+ // add pull data request
+ savePullWindow(&pull, pInfo->pPullWins);
+ int32_t size1 = taosArrayGetSize(pInfo->pChildren);
+ addPullWindow(pInfo->pPullDataMap, winKey, size1);
+ qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, size1);
+ }
+ }
+ }
+}
+
+static void clearFunctionContext(SExprSupp* pSup) {
+ for (int32_t i = 0; i < pSup->numOfExprs; i++) {
+ pSup->pCtx[i].saveHandle.currentPage = -1;
+ }
+}
+
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamFinalIntervalOperatorInfo* pInfo = pOperator->info;
SOperatorInfo* downstream = pOperator->pDownstream[0];
- SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
- _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
- SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
TSKEY maxTs = INT64_MIN;
+ TSKEY minTs = INT64_MAX;
SExprSupp* pSup = &pOperator->exprSupp;
@@ -3135,19 +3162,29 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return pInfo->pPullDataRes;
}
+ doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
+ if (pInfo->pDelRes->info.rows != 0) {
+ // process the rest of the data
+ printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
+ return pInfo->pDelRes;
+ }
+
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
- if (pInfo->binfo.pRes->info.rows == 0) {
- pOperator->status = OP_EXEC_DONE;
- if (!IS_FINAL_OP(pInfo)) {
- // semi interval operator clear disk buffer
- clearStreamIntervalOperator(pInfo);
- } else {
- freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
- }
- return NULL;
+ if (pInfo->binfo.pRes->info.rows != 0) {
+ printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
+ return pInfo->binfo.pRes;
}
- printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
- return pInfo->binfo.pRes;
+
+ doSetOperatorCompleted(pOperator);
+ if (!IS_FINAL_OP(pInfo)) {
+ clearFunctionContext(&pOperator->exprSupp);
+ // semi interval operator clear disk buffer
+ clearStreamIntervalOperator(pInfo);
+ qDebug("===stream===clear semi operator");
+ } else {
+ freeAllPages(pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
+ }
+ return NULL;
} else {
if (!IS_FINAL_OP(pInfo)) {
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
@@ -3171,6 +3208,9 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
}
+ SArray* pUpdated = taosArrayInit(4, POINTER_BYTES);
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -3181,8 +3221,6 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
break;
}
printDataBlock(pBlock, IS_FINAL_OP(pInfo) ? "interval final recv" : "interval semi recv");
- maxTs = TMAX(maxTs, pBlock->info.window.ekey);
- maxTs = TMAX(maxTs, pBlock->info.watermark);
ASSERT(pBlock->info.type != STREAM_INVERT);
if (pBlock->info.type == STREAM_NORMAL || pBlock->info.type == STREAM_PULL_DATA) {
@@ -3204,23 +3242,28 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
removeResults(pUpWins, pUpdatedMap);
copyDataBlock(pInfo->pUpdateRes, pBlock);
- // copyUpdateDataBlock(pInfo->pUpdateRes, pBlock, pInfo->primaryTsIndex);
pInfo->returnUpdate = true;
taosArrayDestroy(pUpWins);
break;
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
- doDeleteSpecifyIntervalWindow(&pInfo->aggSup, pBlock, pInfo->pDelWins, &pInfo->interval);
+ SArray* delWins = taosArrayInit(8, sizeof(SWinKey));
+ doDeleteSpecifyIntervalWindow(&pInfo->aggSup, &pInfo->twAggSup, pBlock, delWins, &pInfo->interval, pUpdatedMap);
if (IS_FINAL_OP(pInfo)) {
int32_t childIndex = getChildIndex(pBlock);
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, childIndex);
SStreamFinalIntervalOperatorInfo* pChildInfo = pChildOp->info;
SExprSupp* pChildSup = &pChildOp->exprSupp;
- doDeleteSpecifyIntervalWindow(&pChildInfo->aggSup, pBlock, NULL, &pChildInfo->interval);
- rebuildIntervalWindow(pInfo, pSup, pInfo->pDelWins, pInfo->binfo.pRes->info.groupId,
- pOperator->exprSupp.numOfExprs, pOperator->pTaskInfo, pUpdated);
+ doDeleteSpecifyIntervalWindow(&pChildInfo->aggSup, &pInfo->twAggSup, pBlock, NULL, &pChildInfo->interval, NULL);
+ rebuildIntervalWindow(pInfo, pSup, delWins, pInfo->binfo.pRes->info.groupId, pOperator->exprSupp.numOfExprs,
+ pOperator->pTaskInfo, pUpdatedMap);
+ addRetriveWindow(delWins, pInfo);
+ taosArrayAddAll(pInfo->pDelWins, delWins);
+ taosArrayDestroy(delWins);
continue;
}
- removeResults(pInfo->pDelWins, pUpdatedMap);
+ removeResults(delWins, pUpdatedMap);
+ taosArrayAddAll(pInfo->pDelWins, delWins);
+ taosArrayDestroy(delWins);
break;
} else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) {
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
@@ -3244,7 +3287,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
}
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, MAIN_SCAN, true);
- doHashInterval(pOperator, pBlock, pBlock->info.groupId, pUpdatedMap);
+ doHashIntervalAgg(pOperator, pBlock, pBlock->info.groupId, pUpdatedMap);
if (IS_FINAL_OP(pInfo)) {
int32_t chIndex = getChildIndex(pBlock);
int32_t size = taosArrayGetSize(pInfo->pChildren);
@@ -3262,11 +3305,15 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SOperatorInfo* pChildOp = taosArrayGetP(pInfo->pChildren, chIndex);
SStreamFinalIntervalOperatorInfo* pChInfo = pChildOp->info;
setInputDataBlock(pChildOp, pChildOp->exprSupp.pCtx, pBlock, pChInfo->order, MAIN_SCAN, true);
- doHashInterval(pChildOp, pBlock, pBlock->info.groupId, NULL);
+ doHashIntervalAgg(pChildOp, pBlock, pBlock->info.groupId, NULL);
}
+ maxTs = TMAX(maxTs, pBlock->info.window.ekey);
+ maxTs = TMAX(maxTs, pBlock->info.watermark);
+ minTs = TMIN(minTs, pBlock->info.window.skey);
}
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
+ pInfo->twAggSup.minTs = TMIN(pInfo->twAggSup.minTs, minTs);
if (IS_FINAL_OP(pInfo)) {
closeIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, pInfo->pPullDataMap,
pUpdatedMap, pInfo->pRecycledPages, pInfo->aggSup.pResultBuf);
@@ -3294,6 +3341,13 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return pInfo->pPullDataRes;
}
+ doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
+ if (pInfo->pDelRes->info.rows != 0) {
+ // process the rest of the data
+ printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
+ return pInfo->pDelRes;
+ }
+
doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pInfo->binfo.pRes->info.rows != 0) {
printDataBlock(pInfo->binfo.pRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
@@ -3307,50 +3361,9 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
// process the rest of the data
return pInfo->pUpdateRes;
}
-
- doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
- if (pInfo->pDelRes->info.rows != 0) {
- // process the rest of the data
- printDataBlock(pInfo->pDelRes, IS_FINAL_OP(pInfo) ? "interval final" : "interval semi");
- return pInfo->pDelRes;
- }
return NULL;
}
-SSDataBlock* createSpecialDataBlock(EStreamType type) {
- SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
- pBlock->info.hasVarCol = false;
- pBlock->info.groupId = 0;
- pBlock->info.rows = 0;
- pBlock->info.type = type;
- pBlock->info.rowSize =
- sizeof(TSKEY) + sizeof(TSKEY) + sizeof(uint64_t) + sizeof(uint64_t) + sizeof(TSKEY) + sizeof(TSKEY);
- pBlock->info.watermark = INT64_MIN;
-
- pBlock->pDataBlock = taosArrayInit(6, sizeof(SColumnInfoData));
- SColumnInfoData infoData = {0};
- infoData.info.type = TSDB_DATA_TYPE_TIMESTAMP;
- infoData.info.bytes = sizeof(TSKEY);
- // window start ts
- taosArrayPush(pBlock->pDataBlock, &infoData);
- // window end ts
- taosArrayPush(pBlock->pDataBlock, &infoData);
-
- infoData.info.type = TSDB_DATA_TYPE_UBIGINT;
- infoData.info.bytes = sizeof(uint64_t);
- // uid
- taosArrayPush(pBlock->pDataBlock, &infoData);
- // group id
- taosArrayPush(pBlock->pDataBlock, &infoData);
-
- // calculate start ts
- taosArrayPush(pBlock->pDataBlock, &infoData);
- // calculate end ts
- taosArrayPush(pBlock->pDataBlock, &infoData);
-
- return pBlock;
-}
-
SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
SExecTaskInfo* pTaskInfo, int32_t numOfChild) {
SIntervalPhysiNode* pIntervalPhyNode = (SIntervalPhysiNode*)pPhyNode;
@@ -3372,6 +3385,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
.waterMark = pIntervalPhyNode->window.watermark,
.calTrigger = pIntervalPhyNode->window.triggerType,
.maxTs = INT64_MIN,
+ .minTs = INT64_MAX,
};
ASSERT(pInfo->twAggSup.calTrigger != STREAM_TRIGGER_MAX_DELAY);
pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
@@ -3399,7 +3413,6 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
initBasicInfo(&pInfo->binfo, pResBlock);
ASSERT(numOfCols > 0);
- increaseTs(pOperator->exprSupp.pCtx);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
@@ -3430,6 +3443,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
// semi interval operator does not catch result
pInfo->isFinal = false;
pOperator->name = "StreamSemiIntervalOperator";
+ ASSERT(pInfo->aggSup.currentPageId == -1);
}
if (!IS_FINAL_OP(pInfo) || numOfChild == 0) {
@@ -3449,15 +3463,13 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pOperator->operatorType = pPhyNode->type;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(NULL, doStreamFinalIntervalAgg, NULL, NULL, destroyStreamFinalIntervalOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
if (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL) {
- initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup);
+ initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup, &pInfo->interval, &pInfo->twAggSup);
}
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@@ -3540,11 +3552,10 @@ int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo*
initBasicInfo(pBasicInfo, pResultBlock);
for (int32_t i = 0; i < numOfCols; ++i) {
- pSup->pCtx[i].pBuf = NULL;
+ pSup->pCtx[i].saveHandle.pBuf = NULL;
}
ASSERT(numOfCols > 0);
- increaseTs(pSup->pCtx);
return TSDB_CODE_SUCCESS;
}
@@ -3555,10 +3566,18 @@ void initDummyFunction(SqlFunctionCtx* pDummy, SqlFunctionCtx* pCtx, int32_t num
}
void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, int64_t gap, int64_t waterMark,
- uint16_t type) {
- ASSERT(downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN);
+ uint16_t type, int32_t tsColIndex) {
+ if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION) {
+ SStreamPartitionOperatorInfo* pScanInfo = downstream->info;
+ pScanInfo->tsColIndex = tsColIndex;
+ }
+
+ if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
+ initDownStream(downstream->pDownstream[0], pAggSup, gap, waterMark, type, tsColIndex);
+ return;
+ }
SStreamScanInfo* pScanInfo = downstream->info;
- pScanInfo->sessionSup = (SessionWindowSupporter){.pStreamAggSup = pAggSup, .gap = gap, .parentType = type};
+ pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = gap, .parentType = type};
pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, waterMark);
}
@@ -3610,7 +3629,11 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
initDummyFunction(pInfo->pDummyCtx, pSup->pCtx, numOfCols);
pInfo->twAggSup = (STimeWindowAggSupp){
- .waterMark = pSessionNode->window.watermark, .calTrigger = pSessionNode->window.triggerType, .maxTs = INT64_MIN};
+ .waterMark = pSessionNode->window.watermark,
+ .calTrigger = pSessionNode->window.triggerType,
+ .maxTs = INT64_MIN,
+ .minTs = INT64_MAX,
+ };
initResultRowInfo(&pInfo->binfo.resultRowInfo);
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
@@ -3630,20 +3653,18 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
pInfo->isFinal = false;
pInfo->pPhyNode = pPhyNode;
pInfo->ignoreExpiredData = pSessionNode->window.igExpired;
- pInfo->returnDelete = false;
pOperator->name = "StreamSessionWindowAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = pInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doStreamSessionAgg, NULL, NULL, destroyStreamSessionAggOperatorInfo,
aggEncodeResultRow, aggDecodeResultRow, NULL);
if (downstream) {
- initDownStream(downstream, &pInfo->streamAggSup, pInfo->gap, pInfo->twAggSup.waterMark, pOperator->operatorType);
+ initDownStream(downstream, &pInfo->streamAggSup, pInfo->gap, pInfo->twAggSup.waterMark, pOperator->operatorType,
+ pInfo->primaryTsIndex);
code = appendDownstream(pOperator, &downstream, 1);
}
return pOperator;
@@ -3673,13 +3694,15 @@ bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap) {
bool isInWindow(SResultWindowInfo* pWinInfo, TSKEY ts, int64_t gap) { return isInTimeWindow(&pWinInfo->win, ts, gap); }
-static SResultWindowInfo* insertNewSessionWindow(SArray* pWinInfos, TSKEY ts, int32_t index) {
- SResultWindowInfo win = {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false};
+static SResultWindowInfo* insertNewSessionWindow(SArray* pWinInfos, TSKEY startTs, TSKEY endTs, int32_t index) {
+ SResultWindowInfo win = {
+ .pos.offset = -1, .pos.pageId = -1, .win.skey = startTs, .win.ekey = endTs, .isOutput = false};
return taosArrayInsert(pWinInfos, index, &win);
}
-static SResultWindowInfo* addNewSessionWindow(SArray* pWinInfos, TSKEY ts) {
- SResultWindowInfo win = {.pos.offset = -1, .pos.pageId = -1, .win.skey = ts, .win.ekey = ts, .isOutput = false};
+static SResultWindowInfo* addNewSessionWindow(SArray* pWinInfos, TSKEY startTs, TSKEY endTs) {
+ SResultWindowInfo win = {
+ .pos.offset = -1, .pos.pageId = -1, .win.skey = startTs, .win.ekey = endTs, .isOutput = false};
return taosArrayPush(pWinInfos, &win);
}
@@ -3698,7 +3721,8 @@ SArray* getWinInfos(SStreamAggSupporter* pAggSup, uint64_t groupId) {
// don't add new window
SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endTs, uint64_t groupId,
int64_t gap, int32_t* pIndex) {
- SArray* pWinInfos = getWinInfos(pAggSup, groupId);
+ STimeWindow searchWin = {.skey = startTs, .ekey = endTs};
+ SArray* pWinInfos = getWinInfos(pAggSup, groupId);
pAggSup->pCurWins = pWinInfos;
int32_t size = taosArrayGetSize(pWinInfos);
@@ -3710,7 +3734,7 @@ SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY start
SResultWindowInfo* pWin = NULL;
if (index >= 0) {
pWin = taosArrayGet(pWinInfos, index);
- if (isInWindow(pWin, startTs, gap)) {
+ if (isInWindow(pWin, startTs, gap) || isInTimeWindow(&searchWin, pWin->win.skey, gap)) {
*pIndex = index;
return pWin;
}
@@ -3718,7 +3742,7 @@ SResultWindowInfo* getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY start
if (index + 1 < size) {
pWin = taosArrayGet(pWinInfos, index + 1);
- if (isInWindow(pWin, startTs, gap)) {
+ if (isInWindow(pWin, startTs, gap) || isInTimeWindow(&searchWin, pWin->win.skey, gap)) {
*pIndex = index + 1;
return pWin;
} else if (endTs != INT64_MIN && isInWindow(pWin, endTs, gap)) {
@@ -3738,7 +3762,7 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
int32_t size = taosArrayGetSize(pWinInfos);
if (size == 0) {
*pIndex = 0;
- return addNewSessionWindow(pWinInfos, startTs);
+ return addNewSessionWindow(pWinInfos, startTs, endTs);
}
// find the first position which is smaller than the key
int32_t index = binarySearch(pWinInfos, size, startTs, TSDB_ORDER_DESC, getSessionWindowEndkey);
@@ -3764,10 +3788,10 @@ SResultWindowInfo* getSessionTimeWindow(SStreamAggSupporter* pAggSup, TSKEY star
if (index == size - 1) {
*pIndex = taosArrayGetSize(pWinInfos);
- return addNewSessionWindow(pWinInfos, startTs);
+ return addNewSessionWindow(pWinInfos, startTs, endTs);
}
*pIndex = index + 1;
- return insertNewSessionWindow(pWinInfos, startTs, index + 1);
+ return insertNewSessionWindow(pWinInfos, startTs, endTs, index + 1);
}
int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t groupId,
@@ -3779,7 +3803,7 @@ int32_t updateSessionWindowInfo(SResultWindowInfo* pWinInfo, TSKEY* pStartTs, TS
if (pWinInfo->win.skey > pStartTs[i]) {
if (pStDeleted && pWinInfo->isOutput) {
SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey));
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->isOutput = false;
}
pWinInfo->win.skey = pStartTs[i];
@@ -3803,11 +3827,10 @@ static int32_t setWindowOutputBuf(SResultWindowInfo* pWinInfo, SResultRow** pRes
}
if (pWinInfo->pos.pageId == -1) {
- *pResult = getNewResultRow(pAggSup->pResultBuf, groupId, pAggSup->resultRowSize);
+ *pResult = getNewResultRow(pAggSup->pResultBuf, &pAggSup->currentPageId, pAggSup->resultRowSize);
if (*pResult == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- initResultRow(*pResult);
// add a new result set for a new group
pWinInfo->pos.pageId = (*pResult)->pageId;
@@ -3894,11 +3917,12 @@ void compactTimeWindow(SStreamSessionAggOperatorInfo* pInfo, int32_t startIndex,
setWindowOutputBuf(pWinInfo, &pWinResult, pInfo->pDummyCtx, groupId, numOfOutput, pSup->rowEntryInfoOffset,
&pInfo->streamAggSup, pTaskInfo);
pCurWin->win.ekey = TMAX(pCurWin->win.ekey, pWinInfo->win.ekey);
- compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->win, true);
+ compactFunctions(pSup->pCtx, pInfo->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
taosHashRemove(pStUpdated, &pWinInfo->pos, sizeof(SResultRowPosition));
- if (pWinInfo->isOutput) {
+ if (pWinInfo->isOutput && pStDeleted) {
SWinKey res = {.ts = pWinInfo->win.skey, .groupId = groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey));
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->isOutput = false;
}
taosArrayRemove(pInfo->streamAggSup.pCurWins, i);
@@ -3989,18 +4013,24 @@ static void doDeleteTimeWindows(SStreamAggSupporter* pAggSup, SSDataBlock* pBloc
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
uint64_t* gpDatas = (uint64_t*)pGroupCol->pData;
for (int32_t i = 0; i < pBlock->info.rows; i++) {
- int32_t winIndex = 0;
- while (1) {
- SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, startDatas[i], endDatas[i], gpDatas[i], gap, &winIndex);
- if (!pCurWin) {
- break;
- }
+ int32_t winIndex = 0;
+ SResultWindowInfo* pCurWin = getCurSessionWindow(pAggSup, startDatas[i], endDatas[i], gpDatas[i], gap, &winIndex);
+ if (!pCurWin) {
+ continue;
+ }
+
+ do {
+ SResultWindowInfo delWin = *pCurWin;
deleteWindow(pAggSup->pCurWins, winIndex, fp);
if (result) {
- pCurWin->groupId = gpDatas[i];
- taosArrayPush(result, pCurWin);
+ delWin.groupId = gpDatas[i];
+ taosArrayPush(result, &delWin);
}
- }
+ if (winIndex >= taosArrayGetSize(pAggSup->pCurWins)) {
+ break;
+ }
+ pCurWin = taosArrayGet(pAggSup->pCurWins, winIndex);
+ } while (pCurWin->win.skey <= endDatas[i]);
}
}
@@ -4023,6 +4053,7 @@ static void doClearSessionWindows(SStreamAggSupporter* pAggSup, SExprSupp* pSup,
ASSERT(isInWindow(pCurWin, tsCols[i], gap));
doClearWindowImpl(&pCurWin->pos, pAggSup->pResultBuf, pSup, numOfOutput);
if (result) {
+ pCurWin->groupId = gpCols[i];
taosArrayPush(result, pCurWin);
}
}
@@ -4057,10 +4088,18 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It
size_t keyLen = 0;
while (((*Ite) = taosHashIterate(pStDeleted, *Ite)) != NULL) {
SWinKey* res = *Ite;
- SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
- colDataAppend(pTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SColumnInfoData* pStartTsCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+ colDataAppend(pStartTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SColumnInfoData* pEndTsCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ colDataAppend(pEndTsCol, pBlock->info.rows, (const char*)&res->ts, false);
+ SColumnInfoData* pUidCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ colDataAppendNULL(pUidCol, pBlock->info.rows);
SColumnInfoData* pGpCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
colDataAppend(pGpCol, pBlock->info.rows, (const char*)&res->groupId, false);
+ SColumnInfoData* pCalStCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
+ colDataAppendNULL(pCalStCol, pBlock->info.rows);
+ SColumnInfoData* pCalEdCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
+ colDataAppendNULL(pCalEdCol, pBlock->info.rows);
pBlock->info.rows += 1;
if (pBlock->info.rows + 1 >= pBlock->info.capacity) {
break;
@@ -4071,20 +4110,17 @@ void doBuildDeleteDataBlock(SHashObj* pStDeleted, SSDataBlock* pBlock, void** It
}
}
-static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWinArray, int32_t groupId,
- int32_t numOfOutput, SOperatorInfo* pOperator) {
+static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWinArray, int32_t numOfOutput,
+ SOperatorInfo* pOperator, SHashObj* pStUpdated) {
SExprSupp* pSup = &pOperator->exprSupp;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
-
- int32_t size = taosArrayGetSize(pWinArray);
+ int32_t size = taosArrayGetSize(pWinArray);
ASSERT(pInfo->pChildren);
for (int32_t i = 0; i < size; i++) {
SResultWindowInfo* pParentWin = taosArrayGet(pWinArray, i);
- SResultRow* pCurResult = NULL;
- setWindowOutputBuf(pParentWin, &pCurResult, pSup->pCtx, groupId, numOfOutput, pSup->rowEntryInfoOffset,
- &pInfo->streamAggSup, pTaskInfo);
- int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
+ uint64_t groupId = pParentWin->groupId;
+ int32_t numOfChildren = taosArrayGetSize(pInfo->pChildren);
for (int32_t j = 0; j < numOfChildren; j++) {
SOperatorInfo* pChild = taosArrayGetP(pInfo->pChildren, j);
SStreamSessionAggOperatorInfo* pChInfo = pChild->info;
@@ -4097,22 +4133,36 @@ static void rebuildTimeWindow(SStreamSessionAggOperatorInfo* pInfo, SArray* pWin
for (int32_t k = index; k < chWinSize; k++) {
SResultWindowInfo* pChWin = taosArrayGet(pChWins, k);
if (pParentWin->win.skey <= pChWin->win.skey && pChWin->win.ekey <= pParentWin->win.ekey) {
+ int32_t winIndex = 0;
+ SResultWindowInfo* pNewParWin =
+ getSessionTimeWindow(&pInfo->streamAggSup, pChWin->win.skey, pChWin->win.ekey, groupId, 0, &winIndex);
+ SResultRow* pPareResult = NULL;
+ setWindowOutputBuf(pNewParWin, &pPareResult, pSup->pCtx, groupId, numOfOutput, pSup->rowEntryInfoOffset,
+ &pInfo->streamAggSup, pTaskInfo);
SResultRow* pChResult = NULL;
setWindowOutputBuf(pChWin, &pChResult, pChild->exprSupp.pCtx, groupId, numOfOutput,
pChild->exprSupp.rowEntryInfoOffset, &pChInfo->streamAggSup, pTaskInfo);
- compactFunctions(pSup->pCtx, pChild->exprSupp.pCtx, numOfOutput, pTaskInfo);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pNewParWin->win, true);
+ compactFunctions(pSup->pCtx, pChild->exprSupp.pCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData);
+
+ int32_t winNum = getNumCompactWindow(pInfo->streamAggSup.pCurWins, winIndex, pInfo->gap);
+ if (winNum > 0) {
+ compactTimeWindow(pInfo, winIndex, winNum, groupId, numOfOutput, pStUpdated, NULL, pOperator);
+ }
+
SFilePage* bufPage = getBufPage(pChInfo->streamAggSup.pResultBuf, pChWin->pos.pageId);
releaseBufPage(pChInfo->streamAggSup.pResultBuf, bufPage);
- continue;
+
+ bufPage = getBufPage(pInfo->streamAggSup.pResultBuf, pNewParWin->pos.pageId);
+ setBufPageDirty(bufPage, true);
+ releaseBufPage(pInfo->streamAggSup.pResultBuf, bufPage);
+ SWinKey value = {.ts = pNewParWin->win.skey, .groupId = groupId};
+ taosHashPut(pStUpdated, &pNewParWin->pos, sizeof(SResultRowPosition), &value, sizeof(SWinKey));
} else if (!pChWin->isClosed) {
break;
}
}
}
- SFilePage* bufPage = getBufPage(pInfo->streamAggSup.pResultBuf, pParentWin->pos.pageId);
- ASSERT(size > 0);
- setBufPageDirty(bufPage, true);
- releaseBufPage(pInfo->streamAggSup.pResultBuf, bufPage);
}
}
@@ -4188,7 +4238,46 @@ static void copyDeleteWindowInfo(SArray* pResWins, SHashObj* pStDeleted) {
for (int32_t i = 0; i < size; i++) {
SResultWindowInfo* pWinInfo = taosArrayGet(pResWins, i);
SWinKey res = {.ts = pWinInfo->win.skey, .groupId = pWinInfo->groupId};
- taosHashPut(pStDeleted, &pWinInfo->pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey));
+ taosHashPut(pStDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
+ }
+}
+
+static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {
+ int32_t size = taosArrayGetSize(pWins);
+ for (int32_t i = 0; i < size; i++) {
+ SResultWindowInfo* pWin = taosArrayGet(pWins, i);
+ taosHashRemove(pHashMap, &pWin->pos, sizeof(SResultRowPosition));
+ }
+}
+
+int32_t compareWinKey(void* pKey, void* data, int32_t index) {
+ SArray* res = (SArray*)data;
+ SResKeyPos* pos = taosArrayGetP(res, index);
+ SWinKey* pData = (SWinKey*)pKey;
+ if (pData->ts == *(int64_t*)pos->key) {
+ if (pData->groupId > pos->groupId) {
+ return 1;
+ } else if (pData->groupId < pos->groupId) {
+ return -1;
+ }
+ return 0;
+ } else if (pData->ts > *(int64_t*)pos->key) {
+ return 1;
+ }
+ return -1;
+}
+
+static void removeSessionDeleteResults(SArray* update, SHashObj* pStDeleted) {
+ int32_t size = taosHashGetSize(pStDeleted);
+ if (size == 0) {
+ return;
+ }
+
+ int32_t num = taosArrayGetSize(update);
+ for (int32_t i = 0; i < num; i++) {
+ SResKeyPos* pos = taosArrayGetP(update, i);
+ SWinKey winKey = {.ts = *(int64_t*)pos->key, .groupId = pos->groupId};
+ taosHashRemove(pStDeleted, &winKey, sizeof(SWinKey));
}
}
@@ -4216,7 +4305,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
SHashObj* pStUpdated = taosHashInit(64, hashFn, true, HASH_NO_LOCK);
SOperatorInfo* downstream = pOperator->pDownstream[0];
- SArray* pUpdated = taosArrayInit(16, POINTER_BYTES);
+ SArray* pUpdated = taosArrayInit(16, POINTER_BYTES); // SResKeyPos
while (1) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -4234,7 +4323,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
doClearSessionWindows(&pChildInfo->streamAggSup, &pChildOp->exprSupp, pBlock, START_TS_COLUMN_INDEX,
pChildOp->exprSupp.numOfExprs, 0, NULL);
- rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator);
+ rebuildTimeWindow(pInfo, pWins, pOperator->exprSupp.numOfExprs, pOperator, pStUpdated);
}
taosArrayDestroy(pWins);
continue;
@@ -4248,9 +4337,10 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
SStreamSessionAggOperatorInfo* pChildInfo = pChildOp->info;
// gap must be 0
doDeleteTimeWindows(&pChildInfo->streamAggSup, pBlock, 0, NULL, NULL);
- rebuildTimeWindow(pInfo, pWins, pBlock->info.groupId, pOperator->exprSupp.numOfExprs, pOperator);
+ rebuildTimeWindow(pInfo, pWins, pOperator->exprSupp.numOfExprs, pOperator, pStUpdated);
}
copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
+ removeSessionResults(pStUpdated, pWins);
taosArrayDestroy(pWins);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
@@ -4293,6 +4383,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) {
pInfo->ignoreExpiredData, NULL);
closeChildSessionWindow(pInfo->pChildren, pInfo->twAggSup.maxTs, pInfo->ignoreExpiredData, NULL);
copyUpdateResult(pStUpdated, pUpdated);
+ removeSessionDeleteResults(pUpdated, pInfo->pStDeleted);
taosHashCleanup(pStUpdated);
finalizeUpdatedResult(pSup->numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated, pSup->rowEntryInfoOffset);
@@ -4320,14 +4411,7 @@ static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
}
}
clearDiskbasedBuf(pInfo->streamAggSup.pResultBuf);
-}
-
-static void removeSessionResults(SHashObj* pHashMap, SArray* pWins) {
- int32_t size = taosArrayGetSize(pWins);
- for (int32_t i = 0; i < size; i++) {
- SResultWindowInfo* pWin = taosArrayGet(pWins, i);
- taosHashRemove(pHashMap, &pWin->pos, sizeof(SResultRowPosition));
- }
+ pInfo->streamAggSup.currentPageId = -1;
}
static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
@@ -4338,30 +4422,35 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
- } else if (pOperator->status == OP_RES_TO_RETURN) {
+ }
+
+ {
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->streamAggSup.pResultBuf);
if (pBInfo->pRes->info.rows > 0) {
printDataBlock(pBInfo->pRes, "semi session");
return pBInfo->pRes;
}
- // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
- if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) {
- pInfo->returnDelete = true;
+ doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
+ if (pInfo->pDelRes->info.rows > 0) {
printDataBlock(pInfo->pDelRes, "semi session");
return pInfo->pDelRes;
}
- if (pInfo->pUpdateRes->info.rows > 0) {
+ if (pInfo->pUpdateRes->info.rows > 0 && pInfo->returnUpdate) {
+ pInfo->returnUpdate = false;
// process the rest of the data
- pOperator->status = OP_OPENED;
printDataBlock(pInfo->pUpdateRes, "semi session");
return pInfo->pUpdateRes;
}
- // semi interval operator clear disk buffer
- clearStreamSessionOperator(pInfo);
- pOperator->status = OP_EXEC_DONE;
- return NULL;
+
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ clearFunctionContext(&pOperator->exprSupp);
+ // semi interval operator clear disk buffer
+ clearStreamSessionOperator(pInfo);
+ pOperator->status = OP_EXEC_DONE;
+ return NULL;
+ }
}
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
@@ -4372,6 +4461,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
clearSpecialDataBlock(pInfo->pUpdateRes);
+ pOperator->status = OP_RES_TO_RETURN;
break;
}
printDataBlock(pBlock, "semi session recv");
@@ -4382,12 +4472,15 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
removeSessionResults(pStUpdated, pWins);
taosArrayDestroy(pWins);
copyDataBlock(pInfo->pUpdateRes, pBlock);
+ pInfo->returnUpdate = true;
break;
} else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
// gap must be 0
- doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, NULL, NULL);
- copyDataBlock(pInfo->pDelRes, pBlock);
- pInfo->pDelRes->info.type = STREAM_DELETE_RESULT;
+ SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
+ doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, NULL);
+ copyDeleteWindowInfo(pWins, pInfo->pStDeleted);
+ removeSessionResults(pStUpdated, pWins);
+ taosArrayDestroy(pWins);
break;
} else if (pBlock->info.type == STREAM_GET_ALL) {
getAllSessionWindow(pInfo->streamAggSup.pResultRows, pUpdated, getResWinForSession);
@@ -4400,18 +4493,15 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
}
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSup->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
- doStreamSessionAggImpl(pOperator, pBlock, pStUpdated, pInfo->pStDeleted, false);
+ doStreamSessionAggImpl(pOperator, pBlock, pStUpdated, NULL, false);
maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
}
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
pBInfo->pRes->info.watermark = pInfo->twAggSup.maxTs;
- // restore the value
- pOperator->status = OP_RES_TO_RETURN;
- // semi operator
- // closeSessionWindow(pInfo->streamAggSup.pResultRows, &pInfo->twAggSup, pUpdated,
- // getResWinForSession);
+
copyUpdateResult(pStUpdated, pUpdated);
+ removeSessionDeleteResults(pUpdated, pInfo->pStDeleted);
taosHashCleanup(pStUpdated);
finalizeUpdatedResult(pOperator->exprSupp.numOfExprs, pInfo->streamAggSup.pResultBuf, pUpdated,
@@ -4425,16 +4515,15 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
return pBInfo->pRes;
}
- // doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
- if (pInfo->pDelRes->info.rows > 0 && !pInfo->returnDelete) {
- pInfo->returnDelete = true;
+ doBuildDeleteDataBlock(pInfo->pStDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
+ if (pInfo->pDelRes->info.rows > 0) {
printDataBlock(pInfo->pDelRes, "semi session");
return pInfo->pDelRes;
}
- if (pInfo->pUpdateRes->info.rows > 0) {
+ if (pInfo->pUpdateRes->info.rows > 0 && pInfo->returnUpdate) {
+ pInfo->returnUpdate = false;
// process the rest of the data
- pOperator->status = OP_OPENED;
printDataBlock(pInfo->pUpdateRes, "semi session");
return pInfo->pUpdateRes;
}
@@ -4658,7 +4747,7 @@ int32_t updateStateWindowInfo(SArray* pWinInfos, int32_t winIndex, TSKEY* pTs, u
if (pWinInfo->winInfo.win.skey > pTs[i]) {
if (pSeDeleted && pWinInfo->winInfo.isOutput) {
SWinKey res = {.ts = pWinInfo->winInfo.win.skey, .groupId = groupId};
- taosHashPut(pSeDeleted, &pWinInfo->winInfo.pos, sizeof(SResultRowPosition), &res, sizeof(SWinKey));
+ taosHashPut(pSeDeleted, &res, sizeof(SWinKey), &res, sizeof(SWinKey));
pWinInfo->winInfo.isOutput = false;
}
pWinInfo->winInfo.win.skey = pTs[i];
@@ -4726,8 +4815,8 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
winRows = updateStateWindowInfo(pAggSup->pCurWins, winIndex, tsCols, groupId, pKeyColInfo, pSDataBlock->info.rows,
i, &allEqual, pStDeleted);
if (!allEqual) {
- appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, GROUPID_COLUMN_INDEX,
- &groupId);
+ uint64_t uid = 0;
+ appendOneRow(pAggSup->pScanBlock, &pCurWin->winInfo.win.skey, &pCurWin->winInfo.win.ekey, &uid, &groupId);
taosHashRemove(pSeUpdated, &pCurWin->winInfo.pos, sizeof(SResultRowPosition));
deleteWindow(pAggSup->pCurWins, winIndex, destroyStateWinInfo);
continue;
@@ -4756,6 +4845,7 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
SExprSupp* pSup = &pOperator->exprSupp;
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
SOptrBasicInfo* pBInfo = &pInfo->binfo;
+ int64_t maxTs = INT64_MIN;
if (pOperator->status == OP_RES_TO_RETURN) {
doBuildDeleteDataBlock(pInfo->pSeDeleted, pInfo->pDelRes, &pInfo->pDelIterator);
if (pInfo->pDelRes->info.rows > 0) {
@@ -4784,10 +4874,11 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
if (pBlock->info.type == STREAM_CLEAR) {
doClearStateWindows(&pInfo->streamAggSup, pBlock, pSeUpdated, pInfo->pSeDeleted);
continue;
- } else if (pBlock->info.type == STREAM_DELETE_DATA) {
+ } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
SArray* pWins = taosArrayInit(16, sizeof(SResultWindowInfo));
doDeleteTimeWindows(&pInfo->streamAggSup, pBlock, 0, pWins, destroyStateWinInfo);
copyDeleteWindowInfo(pWins, pInfo->pSeDeleted);
+ removeSessionResults(pSeUpdated, pWins);
taosArrayDestroy(pWins);
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
@@ -4802,8 +4893,9 @@ static SSDataBlock* doStreamStateAgg(SOperatorInfo* pOperator) {
// the pDataBlock are always the same one, no need to call this again
setInputDataBlock(pOperator, pSup->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
doStreamStateAggImpl(pOperator, pBlock, pSeUpdated, pInfo->pSeDeleted);
- pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, pBlock->info.window.ekey);
+ maxTs = TMAX(maxTs, pBlock->info.window.ekey);
}
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
// restore the value
pOperator->status = OP_RES_TO_RETURN;
@@ -4866,6 +4958,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
.waterMark = pStateNode->window.watermark,
.calTrigger = pStateNode->window.triggerType,
.maxTs = INT64_MIN,
+ .minTs = INT64_MAX,
};
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
@@ -4898,13 +4991,12 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->exprSupp.pExprInfo = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->info = pInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doStreamStateAgg, NULL, NULL,
destroyStreamStateOperatorInfo, aggEncodeResultRow, aggDecodeResultRow, NULL);
- initDownStream(downstream, &pInfo->streamAggSup, 0, pInfo->twAggSup.waterMark, pOperator->operatorType);
+ initDownStream(downstream, &pInfo->streamAggSup, 0, pInfo->twAggSup.waterMark, pOperator->operatorType,
+ pInfo->primaryTsIndex);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
@@ -4918,72 +5010,65 @@ _error:
return NULL;
}
-void destroyMergeAlignedIntervalOperatorInfo(void* param) {
+void destroyMAIOperatorInfo(void* param) {
SMergeAlignedIntervalAggOperatorInfo* miaInfo = (SMergeAlignedIntervalAggOperatorInfo*)param;
destroyIntervalOperatorInfo(miaInfo->intervalAggOperatorInfo);
taosMemoryFreeClear(param);
}
-static int32_t outputMergeAlignedIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t tableGroupId,
- SSDataBlock* pResultBlock, TSKEY wstartTs) {
- SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
-
- SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
- SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
- SExprSupp* pSup = &pOperatorInfo->exprSupp;
-
- SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &wstartTs, TSDB_KEYSIZE, tableGroupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
- GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
- ASSERT(p1 != NULL);
+static SResultRow* doSetSingleOutputTupleBuf(SResultRowInfo* pResultRowInfo, SAggSupporter* pSup) {
+ SResultRow* pResult = getNewResultRow(pSup->pResultBuf, &pSup->currentPageId, pSup->resultRowSize);
+ pResultRowInfo->cur = (SResultRowPosition){.pageId = pResult->pageId, .offset = pResult->offset};
+ return pResult;
+}
- finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pSup->pCtx, pSup->pExprInfo, pSup->numOfExprs,
- pSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
- taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
+static int32_t setSingleOutputTupleBuf(SResultRowInfo* pResultRowInfo, STimeWindow* win, SResultRow** pResult,
+ SExprSupp* pExprSup, SAggSupporter* pAggSup) {
+ if (*pResult == NULL) {
+ *pResult = doSetSingleOutputTupleBuf(pResultRowInfo, pAggSup);
+ if (*pResult == NULL) {
+ return terrno;
+ }
+ }
+ // set time window for current result
+ (*pResult)->win = (*win);
+ setResultRowInitCtx((*pResult), pExprSup->pCtx, pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset);
return TSDB_CODE_SUCCESS;
}
static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo,
- SSDataBlock* pBlock, int32_t scanFlag, SSDataBlock* pResultBlock) {
+ SSDataBlock* pBlock, SSDataBlock* pResultBlock) {
SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
SExprSupp* pSup = &pOperatorInfo->exprSupp;
+ SInterval* pInterval = &iaInfo->interval;
- int32_t startPos = 0;
- int32_t numOfOutput = pSup->numOfExprs;
- int64_t* tsCols = extractTsCol(pBlock, iaInfo);
- uint64_t tableGroupId = pBlock->info.groupId;
- SResultRow* pResult = NULL;
+ int32_t startPos = 0;
+ int64_t* tsCols = extractTsCol(pBlock, iaInfo);
TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
// there is an result exists
if (miaInfo->curTs != INT64_MIN) {
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
-
if (ts != miaInfo->curTs) {
- outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
+ finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo);
+ resetResultRow(miaInfo->pResultRow, iaInfo->aggSup.resultRowSize - sizeof(SResultRow));
miaInfo->curTs = ts;
}
} else {
miaInfo->curTs = ts;
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 0);
}
STimeWindow win = {0};
win.skey = miaInfo->curTs;
- win.ekey =
- taosTimeAdd(win.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit, iaInfo->interval.precision) - 1;
+ win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
- // TODO: remove the hash table (groupid + winkey => result row position)
- int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
- pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
- if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ int32_t ret = setSingleOutputTupleBuf(pResultRowInfo, &win, &miaInfo->pResultRow, pSup, &iaInfo->aggSup);
+ if (ret != TSDB_CODE_SUCCESS || miaInfo->pResultRow == NULL) {
+ T_LONG_JMP(pTaskInfo->env, ret);
}
int32_t currPos = startPos;
@@ -4996,21 +5081,19 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
- pBlock->info.rows, numOfOutput);
+ pBlock->info.rows, pSup->numOfExprs);
- outputMergeAlignedIntervalResult(pOperatorInfo, tableGroupId, pResultBlock, miaInfo->curTs);
+ finalizeResultRows(iaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pResultBlock, pTaskInfo);
+ resetResultRow(miaInfo->pResultRow, iaInfo->aggSup.resultRowSize - sizeof(SResultRow));
miaInfo->curTs = tsCols[currPos];
currWin.skey = miaInfo->curTs;
- currWin.ekey = taosTimeAdd(currWin.skey, iaInfo->interval.interval, iaInfo->interval.intervalUnit,
- iaInfo->interval.precision) -
- 1;
+ currWin.ekey = taosTimeAdd(currWin.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
startPos = currPos;
- ret = setTimeWindowOutputBuf(pResultRowInfo, &currWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
- numOfOutput, pSup->rowEntryInfoOffset, &iaInfo->aggSup, pTaskInfo);
- if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
- T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ ret = setSingleOutputTupleBuf(pResultRowInfo, &win, &miaInfo->pResultRow, pSup, &iaInfo->aggSup);
+ if (ret != TSDB_CODE_SUCCESS || miaInfo->pResultRow == NULL) {
+ T_LONG_JMP(pTaskInfo->env, ret);
}
miaInfo->curTs = currWin.skey;
@@ -5018,68 +5101,79 @@ static void doMergeAlignedIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultR
updateTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &currWin, true);
doApplyFunctions(pTaskInfo, pSup->pCtx, &iaInfo->twAggSup.timeWindowData, startPos, currPos - startPos,
- pBlock->info.rows, numOfOutput);
+ pBlock->info.rows, pSup->numOfExprs);
+}
+
+static void cleanupAfterGroupResultGen(SMergeAlignedIntervalAggOperatorInfo* pMiaInfo, SSDataBlock* pRes) {
+ pRes->info.groupId = pMiaInfo->groupId;
+ pMiaInfo->curTs = INT64_MIN;
+ pMiaInfo->groupId = 0;
}
static void doMergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- SMergeAlignedIntervalAggOperatorInfo* miaInfo = pOperator->info;
- SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
-
- SExprSupp* pSup = &pOperator->exprSupp;
- SSDataBlock* pRes = iaInfo->binfo.pRes;
+ SMergeAlignedIntervalAggOperatorInfo* pMiaInfo = pOperator->info;
+ SIntervalAggOperatorInfo* pIaInfo = pMiaInfo->intervalAggOperatorInfo;
- SOperatorInfo* downstream = pOperator->pDownstream[0];
- int32_t scanFlag = MAIN_SCAN;
+ SExprSupp* pSup = &pOperator->exprSupp;
+ SSDataBlock* pRes = pIaInfo->binfo.pRes;
+ SResultRowInfo* pResultRowInfo = &pIaInfo->binfo.resultRowInfo;
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+ int32_t scanFlag = MAIN_SCAN;
while (1) {
SSDataBlock* pBlock = NULL;
- if (miaInfo->prefetchedBlock == NULL) {
+ if (pMiaInfo->prefetchedBlock == NULL) {
pBlock = downstream->fpSet.getNextFn(downstream);
} else {
- pBlock = miaInfo->prefetchedBlock;
- miaInfo->prefetchedBlock = NULL;
+ pBlock = pMiaInfo->prefetchedBlock;
+ pMiaInfo->prefetchedBlock = NULL;
- miaInfo->groupId = pBlock->info.groupId;
+ pMiaInfo->groupId = pBlock->info.groupId;
}
+ // no data exists, all query processing is done
if (pBlock == NULL) {
- // close last unfinalized time window
- if (miaInfo->curTs != INT64_MIN) {
- ASSERT(taosHashGetSize(iaInfo->aggSup.pResultRowHashTable) == 1);
- outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
- miaInfo->curTs = INT64_MIN;
+ // close last unclosed time window
+ if (pMiaInfo->curTs != INT64_MIN) {
+ finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo);
+ resetResultRow(pMiaInfo->pResultRow, pIaInfo->aggSup.resultRowSize - sizeof(SResultRow));
+ cleanupAfterGroupResultGen(pMiaInfo, pRes);
}
doSetOperatorCompleted(pOperator);
break;
}
- if (!miaInfo->hasGroupId) {
- miaInfo->hasGroupId = true;
- miaInfo->groupId = pBlock->info.groupId;
- } else if (miaInfo->groupId != pBlock->info.groupId) {
- // if there are unclosed time window, close it firstly.
- ASSERT(miaInfo->curTs != INT64_MIN);
- outputMergeAlignedIntervalResult(pOperator, miaInfo->groupId, pRes, miaInfo->curTs);
- miaInfo->prefetchedBlock = pBlock;
- miaInfo->curTs = INT64_MIN;
- break;
+ if (pMiaInfo->groupId == 0) {
+ if (pMiaInfo->groupId != pBlock->info.groupId) {
+ pMiaInfo->groupId = pBlock->info.groupId;
+ }
+ } else {
+ if (pMiaInfo->groupId != pBlock->info.groupId) {
+ // if there are unclosed time window, close it firstly.
+ ASSERT(pMiaInfo->curTs != INT64_MIN);
+ finalizeResultRows(pIaInfo->aggSup.pResultBuf, &pResultRowInfo->cur, pSup, pRes, pTaskInfo);
+ resetResultRow(pMiaInfo->pResultRow, pIaInfo->aggSup.resultRowSize - sizeof(SResultRow));
+
+ pMiaInfo->prefetchedBlock = pBlock;
+ cleanupAfterGroupResultGen(pMiaInfo, pRes);
+ break;
+ } else {
+ // continue
+ }
}
- getTableScanInfo(pOperator, &iaInfo->inputOrder, &scanFlag);
- setInputDataBlock(pOperator, pSup->pCtx, pBlock, iaInfo->inputOrder, scanFlag, true);
- doMergeAlignedIntervalAggImpl(pOperator, &iaInfo->binfo.resultRowInfo, pBlock, scanFlag, pRes);
+ getTableScanInfo(pOperator, &pIaInfo->inputOrder, &scanFlag);
+ setInputDataBlock(pOperator, pSup->pCtx, pBlock, pIaInfo->inputOrder, scanFlag, true);
+ doMergeAlignedIntervalAggImpl(pOperator, &pIaInfo->binfo.resultRowInfo, pBlock, pRes);
- doFilter(miaInfo->pCondition, pRes, NULL);
+ doFilter(pMiaInfo->pCondition, pRes, NULL);
if (pRes->info.rows >= pOperator->resultInfo.capacity) {
break;
}
}
-
- pRes->info.groupId = miaInfo->groupId;
- miaInfo->hasGroupId = false;
}
static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
@@ -5115,9 +5209,7 @@ static SSDataBlock* mergeAlignedIntervalAgg(SOperatorInfo* pOperator) {
return (rows == 0) ? NULL : pRes;
}
-SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo,
- int32_t numOfCols, SSDataBlock* pResBlock, SInterval* pInterval,
- int32_t primaryTsSlotId, SNode* pCondition, bool mergeResultBlock,
+SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream, SMergeAlignedIntervalPhysiNode* pNode,
SExecTaskInfo* pTaskInfo) {
SMergeAlignedIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeAlignedIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
@@ -5130,24 +5222,33 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
goto _error;
}
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pNode->window.node.pOutputDataBlockDesc);
+
+ SInterval interval = {.interval = pNode->interval,
+ .sliding = pNode->sliding,
+ .intervalUnit = pNode->intervalUnit,
+ .slidingUnit = pNode->slidingUnit,
+ .offset = pNode->offset,
+ .precision = ((SColumnNode*)pNode->window.pTspk)->node.resType.precision};
+
SIntervalAggOperatorInfo* iaInfo = miaInfo->intervalAggOperatorInfo;
SExprSupp* pSup = &pOperator->exprSupp;
- miaInfo->pCondition = pCondition;
+ miaInfo->pCondition = pNode->window.node.pConditions;
miaInfo->curTs = INT64_MIN;
-
iaInfo->win = pTaskInfo->window;
iaInfo->inputOrder = TSDB_ORDER_ASC;
- iaInfo->interval = *pInterval;
+ iaInfo->interval = interval;
iaInfo->execModel = pTaskInfo->execModel;
- iaInfo->primaryTsIndex = primaryTsSlotId;
- iaInfo->binfo.mergeResultBlock = mergeResultBlock;
+ iaInfo->primaryTsIndex = ((SColumnNode*)pNode->window.pTspk)->slotId;
+ iaInfo->binfo.mergeResultBlock = pNode->window.mergeDataBlock;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- int32_t code =
- initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ int32_t code = initAggInfo(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -5155,9 +5256,9 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
initBasicInfo(&iaInfo->binfo, pResBlock);
initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win);
- iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, numOfCols, iaInfo);
+ iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pSup->pCtx, num, iaInfo);
if (iaInfo->timeWindowInterpo) {
- iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
+ iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
}
initResultRowInfo(&iaInfo->binfo.resultRowInfo);
@@ -5167,13 +5268,11 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL;
pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->info = miaInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, mergeAlignedIntervalAgg, NULL, NULL,
- destroyMergeAlignedIntervalOperatorInfo, NULL, NULL, NULL);
+ destroyMAIOperatorInfo, NULL, NULL, NULL);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
@@ -5183,7 +5282,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
return pOperator;
_error:
- destroyMergeAlignedIntervalOperatorInfo(miaInfo);
+ destroyMAIOperatorInfo(miaInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
@@ -5223,12 +5322,11 @@ static int32_t finalizeWindowResult(SOperatorInfo* pOperatorInfo, uint64_t table
SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
SET_RES_WINDOW_KEY(iaInfo->aggSup.keyBuf, &win->skey, TSDB_KEYSIZE, tableGroupId);
- SResultRowPosition* p1 = (SResultRowPosition*)taosHashGet(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf,
- GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
+ SResultRowPosition* p1 = (SResultRowPosition*)tSimpleHashGet(
+ iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
ASSERT(p1 != NULL);
- finalizeResultRowIntoResultDataBlock(iaInfo->aggSup.pResultBuf, p1, pExprSup->pCtx, pExprSup->pExprInfo,
- pExprSup->numOfExprs, pExprSup->rowEntryInfoOffset, pResultBlock, pTaskInfo);
- taosHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
+ // finalizeResultRows(iaInfo->aggSup.pResultBuf, p1, pResultBlock, pTaskInfo);
+ tSimpleHashRemove(iaInfo->aggSup.pResultRowHashTable, iaInfo->aggSup.keyBuf, GET_RES_WINDOW_KEY_LEN(TSDB_KEYSIZE));
return TSDB_CODE_SUCCESS;
}
@@ -5236,9 +5334,7 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
STimeWindow* newWin) {
SMergeIntervalAggOperatorInfo* miaInfo = pOperatorInfo->info;
SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
- SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
bool ascScan = (iaInfo->inputOrder == TSDB_ORDER_ASC);
- SExprSupp* pExprSup = &pOperatorInfo->exprSupp;
SGroupTimeWindow groupTimeWindow = {.groupId = tableGroupId, .window = *newWin};
tdListAppend(miaInfo->groupIntervals, &groupTimeWindow);
@@ -5251,9 +5347,10 @@ static int32_t outputPrevIntervalResult(SOperatorInfo* pOperatorInfo, uint64_t t
if (prevGrpWin->groupId != tableGroupId) {
continue;
}
+
STimeWindow* prevWin = &prevGrpWin->window;
if ((ascScan && newWin->skey > prevWin->ekey) || ((!ascScan) && newWin->skey < prevWin->ekey)) {
- finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock);
+ // finalizeWindowResult(pOperatorInfo, tableGroupId, prevWin, pResultBlock);
tdListPopNode(miaInfo->groupIntervals, listNode);
}
}
@@ -5294,7 +5391,7 @@ static void doMergeIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo*
// prev time window not interpolation yet.
if (iaInfo->timeWindowInterpo) {
- SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult);
+ SResultRowPosition pos = addToOpenWindowList(pResultRowInfo, pResult, tableGroupId);
doInterpUnclosedTimeWindow(pOperatorInfo, numOfOutput, pResultRowInfo, pBlock, scanFlag, tsCols, &pos);
// restore current time window
@@ -5413,7 +5510,7 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
if (listNode != NULL) {
SGroupTimeWindow* grpWin = (SGroupTimeWindow*)(listNode->data);
- finalizeWindowResult(pOperator, grpWin->groupId, &grpWin->window, pRes);
+ // finalizeWindowResult(pOperator, grpWin->groupId, &grpWin->window, pRes);
pRes->info.groupId = grpWin->groupId;
}
}
@@ -5427,57 +5524,64 @@ static SSDataBlock* doMergeIntervalAgg(SOperatorInfo* pOperator) {
return (rows == 0) ? NULL : pRes;
}
-SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResBlock, SInterval* pInterval, int32_t primaryTsSlotId,
- bool mergeBlock, SExecTaskInfo* pTaskInfo) {
- SMergeIntervalAggOperatorInfo* miaInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo));
+SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMergeIntervalPhysiNode* pIntervalPhyNode,
+ SExecTaskInfo* pTaskInfo) {
+ SMergeIntervalAggOperatorInfo* pMergeIntervalInfo = taosMemoryCalloc(1, sizeof(SMergeIntervalAggOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
- if (miaInfo == NULL || pOperator == NULL) {
+ if (pMergeIntervalInfo == NULL || pOperator == NULL) {
goto _error;
}
- miaInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow));
+ int32_t num = 0;
+ SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &num);
+ SSDataBlock* pResBlock = createResDataBlock(pIntervalPhyNode->window.node.pOutputDataBlockDesc);
- SIntervalAggOperatorInfo* iaInfo = &miaInfo->intervalAggOperatorInfo;
- iaInfo->win = pTaskInfo->window;
- iaInfo->inputOrder = TSDB_ORDER_ASC;
- iaInfo->interval = *pInterval;
- iaInfo->execModel = pTaskInfo->execModel;
- iaInfo->binfo.mergeResultBlock = mergeBlock;
+ SInterval interval = {.interval = pIntervalPhyNode->interval,
+ .sliding = pIntervalPhyNode->sliding,
+ .intervalUnit = pIntervalPhyNode->intervalUnit,
+ .slidingUnit = pIntervalPhyNode->slidingUnit,
+ .offset = pIntervalPhyNode->offset,
+ .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision};
- iaInfo->primaryTsIndex = primaryTsSlotId;
+ pMergeIntervalInfo->groupIntervals = tdListNew(sizeof(SGroupTimeWindow));
+
+ SIntervalAggOperatorInfo* pIntervalInfo = &pMergeIntervalInfo->intervalAggOperatorInfo;
+ pIntervalInfo->win = pTaskInfo->window;
+ pIntervalInfo->inputOrder = TSDB_ORDER_ASC;
+ pIntervalInfo->interval = interval;
+ pIntervalInfo->execModel = pTaskInfo->execModel;
+ pIntervalInfo->binfo.mergeResultBlock = pIntervalPhyNode->window.mergeDataBlock;
+ pIntervalInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
SExprSupp* pExprSupp = &pOperator->exprSupp;
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
initResultSizeInfo(&pOperator->resultInfo, 4096);
- int32_t code = initAggInfo(pExprSupp, &iaInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ int32_t code = initAggInfo(pExprSupp, &pIntervalInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
- initBasicInfo(&iaInfo->binfo, pResBlock);
- initExecTimeWindowInfo(&iaInfo->twAggSup.timeWindowData, &iaInfo->win);
+ initBasicInfo(&pIntervalInfo->binfo, pResBlock);
+ initExecTimeWindowInfo(&pIntervalInfo->twAggSup.timeWindowData, &pIntervalInfo->win);
- iaInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, numOfCols, iaInfo);
- if (iaInfo->timeWindowInterpo) {
- iaInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SResultRowPosition));
- if (iaInfo->binfo.resultRowInfo.openWindow == NULL) {
+ pIntervalInfo->timeWindowInterpo = timeWindowinterpNeeded(pExprSupp->pCtx, num, pIntervalInfo);
+ if (pIntervalInfo->timeWindowInterpo) {
+ pIntervalInfo->binfo.resultRowInfo.openWindow = tdListNew(sizeof(SOpenWindowInfo));
+ if (pIntervalInfo->binfo.resultRowInfo.openWindow == NULL) {
goto _error;
}
}
- initResultRowInfo(&iaInfo->binfo.resultRowInfo);
+ initResultRowInfo(&pIntervalInfo->binfo.resultRowInfo);
pOperator->name = "TimeMergeIntervalAggOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL;
pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED;
- pOperator->exprSupp.pExprInfo = pExprInfo;
pOperator->pTaskInfo = pTaskInfo;
- pOperator->exprSupp.numOfExprs = numOfCols;
- pOperator->info = miaInfo;
+ pOperator->info = pMergeIntervalInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doMergeIntervalAgg, NULL, NULL,
destroyMergeIntervalOperatorInfo, NULL, NULL, NULL);
@@ -5490,7 +5594,376 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SExprI
return pOperator;
_error:
- destroyMergeIntervalOperatorInfo(miaInfo);
+ destroyMergeIntervalOperatorInfo(pMergeIntervalInfo);
+ taosMemoryFreeClear(pOperator);
+ pTaskInfo->code = code;
+ return NULL;
+}
+
+static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock,
+ int32_t scanFlag, SHashObj* pUpdatedMap) {
+ SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
+
+ SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
+ SExprSupp* pSup = &pOperatorInfo->exprSupp;
+
+ int32_t startPos = 0;
+ int32_t numOfOutput = pSup->numOfExprs;
+ SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
+ TSKEY* tsCols = (TSKEY*)pColDataInfo->pData;
+ uint64_t tableGroupId = pBlock->info.groupId;
+ bool ascScan = true;
+ TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols);
+ SResultRow* pResult = NULL;
+
+ STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, TSDB_ORDER_ASC);
+ int32_t ret = TSDB_CODE_SUCCESS;
+ if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
+ inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
+ ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx,
+ numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
+ if (ret != TSDB_CODE_SUCCESS || pResult == NULL) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
+ saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
+ setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
+ }
+ }
+
+ TSKEY ekey = ascScan ? win.ekey : win.skey;
+ int32_t forwardRows =
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ ASSERT(forwardRows > 0);
+
+ if ((!pInfo->ignoreExpiredData || !isCloseWindow(&win, &pInfo->twAggSup)) &&
+ inSlidingWindow(&pInfo->interval, &win, &pBlock->info)) {
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &win, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
+ }
+
+ STimeWindow nextWin = win;
+ while (1) {
+ int32_t prevEndPos = forwardRows - 1 + startPos;
+ startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC);
+ if (startPos < 0) {
+ break;
+ }
+ if (pInfo->ignoreExpiredData && isCloseWindow(&nextWin, &pInfo->twAggSup)) {
+ ekey = ascScan ? nextWin.ekey : nextWin.skey;
+ forwardRows =
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ continue;
+ }
+
+ // null data, failed to allocate more memory buffer
+ int32_t code = setTimeWindowOutputBuf(pResultRowInfo, &nextWin, (scanFlag == MAIN_SCAN), &pResult, tableGroupId,
+ pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) {
+ saveWinResultRow(pResult, tableGroupId, pUpdatedMap);
+ setResultBufPageDirty(pInfo->aggSup.pResultBuf, &pResultRowInfo->cur);
+ }
+
+ ekey = ascScan ? nextWin.ekey : nextWin.skey;
+ forwardRows =
+ getNumOfRowsInTimeWindow(&pBlock->info, tsCols, startPos, ekey, binarySearchForKey, NULL, TSDB_ORDER_ASC);
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows, pBlock->info.rows,
+ numOfOutput);
+ }
+}
+
+static void doStreamIntervalAggImpl2(SOperatorInfo* pOperatorInfo, SSDataBlock* pSDataBlock, uint64_t tableGroupId,
+ SHashObj* pUpdatedMap) {
+ SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)pOperatorInfo->info;
+
+ SResultRowInfo* pResultRowInfo = &(pInfo->binfo.resultRowInfo);
+ SExecTaskInfo* pTaskInfo = pOperatorInfo->pTaskInfo;
+ SExprSupp* pSup = &pOperatorInfo->exprSupp;
+ int32_t numOfOutput = pSup->numOfExprs;
+ int32_t step = 1;
+ TSKEY* tsCols = NULL;
+ SResultRow* pResult = NULL;
+ int32_t forwardRows = 0;
+
+ ASSERT(pSDataBlock->pDataBlock != NULL);
+ SColumnInfoData* pColDataInfo = taosArrayGet(pSDataBlock->pDataBlock, pInfo->primaryTsIndex);
+ tsCols = (int64_t*)pColDataInfo->pData;
+
+ int32_t startPos = 0;
+ TSKEY ts = getStartTsKey(&pSDataBlock->info.window, tsCols);
+ STimeWindow nextWin =
+ getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, TSDB_ORDER_ASC);
+ while (1) {
+ bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
+ if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
+ startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
+ if (startPos < 0) {
+ break;
+ }
+ continue;
+ }
+
+ int32_t code = setOutputBuf(&nextWin, &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset,
+ &pInfo->aggSup, pTaskInfo);
+ if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
+ T_LONG_JMP(pTaskInfo->env, TSDB_CODE_QRY_OUT_OF_MEMORY);
+ }
+
+ forwardRows = getNumOfRowsInTimeWindow(&pSDataBlock->info, tsCols, startPos, nextWin.ekey, binarySearchForKey, NULL,
+ TSDB_ORDER_ASC);
+ if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE && pUpdatedMap) {
+ saveWinResultInfo(pResult->win.skey, tableGroupId, pUpdatedMap);
+ }
+ updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &nextWin, true);
+ doApplyFunctions(pTaskInfo, pSup->pCtx, &pInfo->twAggSup.timeWindowData, startPos, forwardRows,
+ pSDataBlock->info.rows, numOfOutput);
+ SWinKey key = {
+ .ts = nextWin.skey,
+ .groupId = tableGroupId,
+ };
+ saveOutputBuf(pTaskInfo, &key, pResult, pInfo->aggSup.resultRowSize);
+ releaseOutputBuf(pTaskInfo, &key, pResult);
+ int32_t prevEndPos = (forwardRows - 1) * step + startPos;
+ ASSERT(pSDataBlock->info.window.skey > 0 && pSDataBlock->info.window.ekey > 0);
+ startPos =
+ getNextQualifiedWindow(&pInfo->interval, &nextWin, &pSDataBlock->info, tsCols, prevEndPos, TSDB_ORDER_ASC);
+ if (startPos < 0) {
+ break;
+ }
+ }
+}
+
+void doBuildResult(SOperatorInfo* pOperator, SSDataBlock* pBlock, SGroupResInfo* pGroupResInfo) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ // set output datablock version
+ pBlock->info.version = pTaskInfo->version;
+
+ blockDataCleanup(pBlock);
+ if (!hasRemainResults(pGroupResInfo)) {
+ return;
+ }
+
+ // clear the existed group id
+ pBlock->info.groupId = 0;
+ buildDataBlockFromGroupRes(pTaskInfo, pBlock, &pOperator->exprSupp, pGroupResInfo);
+}
+
+static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
+ SStreamIntervalOperatorInfo* pInfo = pOperator->info;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ int64_t maxTs = INT64_MIN;
+ int64_t minTs = INT64_MAX;
+ SExprSupp* pSup = &pOperator->exprSupp;
+
+ if (pOperator->status == OP_EXEC_DONE) {
+ return NULL;
+ }
+
+ if (pOperator->status == OP_RES_TO_RETURN) {
+ doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
+ if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "single interval delete");
+ return pInfo->pDelRes;
+ }
+
+ doBuildResult(pOperator, pInfo->binfo.pRes, &pInfo->groupResInfo);
+ if (pInfo->binfo.pRes->info.rows > 0) {
+ printDataBlock(pInfo->binfo.pRes, "single interval");
+ return pInfo->binfo.pRes;
+ }
+
+ doSetOperatorCompleted(pOperator);
+ return NULL;
+ }
+
+ SOperatorInfo* downstream = pOperator->pDownstream[0];
+
+ SArray* pUpdated = taosArrayInit(4, POINTER_BYTES); // SResKeyPos
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ SHashObj* pUpdatedMap = taosHashInit(1024, hashFn, false, HASH_NO_LOCK);
+
+ while (1) {
+ SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
+ if (pBlock == NULL) {
+ break;
+ }
+ printDataBlock(pBlock, "single interval recv");
+
+ if (pBlock->info.type == STREAM_CLEAR) {
+ doDeleteWindows(pOperator, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, NULL, NULL);
+ qDebug("%s clear existed time window results for updates checked", GET_TASKID(pTaskInfo));
+ continue;
+ } else if (pBlock->info.type == STREAM_DELETE_DATA || pBlock->info.type == STREAM_DELETE_RESULT) {
+ // doDeleteSpecifyIntervalWindow(&pInfo->aggSup, &pInfo->twAggSup, pBlock, pInfo->pDelWins, &pInfo->interval,
+ // pUpdatedMap);
+ doDeleteWindows(pOperator, &pInfo->interval, pOperator->exprSupp.numOfExprs, pBlock, pInfo->pDelWins,
+ pUpdatedMap);
+ continue;
+ } else if (pBlock->info.type == STREAM_GET_ALL) {
+ getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pUpdatedMap);
+ continue;
+ }
+
+ if (pBlock->info.type == STREAM_NORMAL && pBlock->info.version != 0) {
+ // set input version
+ pTaskInfo->version = pBlock->info.version;
+ }
+
+ if (pInfo->scalarSupp.pExprInfo != NULL) {
+ SExprSupp* pExprSup = &pInfo->scalarSupp;
+ projectApplyFunctions(pExprSup->pExprInfo, pBlock, pBlock, pExprSup->pCtx, pExprSup->numOfExprs, NULL);
+ }
+
+ // The timewindow that overlaps the timestamps of the input pBlock need to be recalculated and return to the
+ // caller. Note that all the time window are not close till now.
+ // the pDataBlock are always the same one, no need to call this again
+ setInputDataBlock(pOperator, pSup->pCtx, pBlock, TSDB_ORDER_ASC, MAIN_SCAN, true);
+ if (pInfo->invertible) {
+ setInverFunction(pSup->pCtx, pOperator->exprSupp.numOfExprs, pBlock->info.type);
+ }
+
+ maxTs = TMAX(maxTs, pBlock->info.window.ekey);
+ minTs = TMIN(minTs, pBlock->info.window.skey);
+ // doStreamIntervalAggImpl(pOperator, &pInfo->binfo.resultRowInfo, pBlock, MAIN_SCAN, pUpdatedMap);
+ // new disc buf
+ doStreamIntervalAggImpl2(pOperator, pBlock, pBlock->info.groupId, pUpdatedMap);
+ }
+ pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
+ pInfo->twAggSup.minTs = TMIN(pInfo->twAggSup.minTs, minTs);
+ pOperator->status = OP_RES_TO_RETURN;
+ closeStreamIntervalWindow(pInfo->aggSup.pResultRowHashTable, &pInfo->twAggSup, &pInfo->interval, NULL, pUpdatedMap,
+ pOperator);
+
+ void* pIte = NULL;
+ while ((pIte = taosHashIterate(pUpdatedMap, pIte)) != NULL) {
+ taosArrayPush(pUpdated, pIte);
+ }
+ taosArraySort(pUpdated, resultrowComparAsc);
+
+ initMultiResInfoFromArrayList(&pInfo->groupResInfo, pUpdated);
+ blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
+ removeDeleteResults(pUpdatedMap, pInfo->pDelWins);
+ taosHashCleanup(pUpdatedMap);
+
+ doBuildDeleteResult(pInfo->pDelWins, &pInfo->delIndex, pInfo->pDelRes);
+ if (pInfo->pDelRes->info.rows > 0) {
+ printDataBlock(pInfo->pDelRes, "single interval delete");
+ return pInfo->pDelRes;
+ }
+
+ doBuildResult(pOperator, pInfo->binfo.pRes, &pInfo->groupResInfo);
+ if (pInfo->binfo.pRes->info.rows > 0) {
+ printDataBlock(pInfo->binfo.pRes, "single interval");
+ return pInfo->binfo.pRes;
+ }
+
+ return NULL;
+}
+
+void destroyStreamIntervalOperatorInfo(void* param) {
+ SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)param;
+ cleanupBasicInfo(&pInfo->binfo);
+ cleanupAggSup(&pInfo->aggSup);
+ pInfo->pRecycledPages = taosArrayDestroy(pInfo->pRecycledPages);
+
+ pInfo->pDelWins = taosArrayDestroy(pInfo->pDelWins);
+ pInfo->pDelRes = blockDataDestroy(pInfo->pDelRes);
+
+ cleanupGroupResInfo(&pInfo->groupResInfo);
+ colDataDestroy(&pInfo->twAggSup.timeWindowData);
+ taosMemoryFreeClear(param);
+}
+
+SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode,
+ SExecTaskInfo* pTaskInfo) {
+ SStreamIntervalOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamIntervalOperatorInfo));
+ SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ if (pInfo == NULL || pOperator == NULL) {
+ goto _error;
+ }
+ SStreamIntervalPhysiNode* pIntervalPhyNode = (SStreamIntervalPhysiNode*)pPhyNode;
+
+ int32_t numOfCols = 0;
+ SExprInfo* pExprInfo = createExprInfo(pIntervalPhyNode->window.pFuncs, NULL, &numOfCols);
+ ASSERT(numOfCols > 0);
+ SSDataBlock* pResBlock = createResDataBlock(pPhyNode->pOutputDataBlockDesc);
+ SInterval interval = {
+ .interval = pIntervalPhyNode->interval,
+ .sliding = pIntervalPhyNode->sliding,
+ .intervalUnit = pIntervalPhyNode->intervalUnit,
+ .slidingUnit = pIntervalPhyNode->slidingUnit,
+ .offset = pIntervalPhyNode->offset,
+ .precision = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->node.resType.precision,
+ };
+ STimeWindowAggSupp twAggSupp = {
+ .waterMark = pIntervalPhyNode->window.watermark,
+ .calTrigger = pIntervalPhyNode->window.triggerType,
+ .maxTs = INT64_MIN,
+ .minTs = INT64_MAX,
+ .deleteMark = INT64_MAX,
+ };
+ ASSERT(twAggSupp.calTrigger != STREAM_TRIGGER_MAX_DELAY);
+ pOperator->pTaskInfo = pTaskInfo;
+ pInfo->interval = interval;
+ pInfo->twAggSup = twAggSupp;
+ pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
+ pInfo->isFinal = false;
+
+ if (pIntervalPhyNode->window.pExprs != NULL) {
+ int32_t numOfScalar = 0;
+ SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar);
+ int32_t code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+ }
+
+ pInfo->primaryTsIndex = ((SColumnNode*)pIntervalPhyNode->window.pTspk)->slotId;
+ initResultSizeInfo(&pOperator->resultInfo, 4096);
+ SExprSupp* pSup = &pOperator->exprSupp;
+ size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
+ int32_t code = initAggInfo(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
+ initBasicInfo(&pInfo->binfo, pResBlock);
+ initStreamFunciton(pSup->pCtx, pSup->numOfExprs);
+ initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
+
+ pInfo->invertible = allInvertible(pSup->pCtx, numOfCols);
+ pInfo->invertible = false; // Todo(liuyao): Dependent TSDB API
+ pInfo->pRecycledPages = taosArrayInit(4, sizeof(int32_t));
+ pInfo->pDelWins = taosArrayInit(4, sizeof(SWinKey));
+ pInfo->delIndex = 0;
+ pInfo->pDelRes = createSpecialDataBlock(STREAM_DELETE_RESULT);
+ initResultRowInfo(&pInfo->binfo.resultRowInfo);
+
+ pOperator->name = "StreamIntervalOperator";
+ pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL;
+ pOperator->blocking = true;
+ pOperator->status = OP_NOT_OPENED;
+ pOperator->info = pInfo;
+ pOperator->fpSet =
+ createOperatorFpSet(operatorDummyOpenFn, doStreamIntervalAgg, NULL, NULL, destroyStreamIntervalOperatorInfo,
+ aggEncodeResultRow, aggDecodeResultRow, NULL);
+
+ initIntervalDownStream(downstream, pPhyNode->type, &pInfo->aggSup, &pInfo->interval, &pInfo->twAggSup);
+ code = appendDownstream(pOperator, &downstream, 1);
+ if (code != TSDB_CODE_SUCCESS) {
+ goto _error;
+ }
+
+ return pOperator;
+
+_error:
+ destroyStreamIntervalOperatorInfo(pInfo);
taosMemoryFreeClear(pOperator);
pTaskInfo->code = code;
return NULL;
diff --git a/source/libs/executor/src/tlinearhash.c b/source/libs/executor/src/tlinearhash.c
index e0752840db07052e056063b5789003cf9b6507e0..cffabcb6aca1f1f5ba457fb765828889bc3c03e6 100644
--- a/source/libs/executor/src/tlinearhash.c
+++ b/source/libs/executor/src/tlinearhash.c
@@ -97,7 +97,7 @@ static int32_t doAddToBucket(SLHashObj* pHashObj, SLHashBucket* pBucket, int32_t
// allocate the overflow buffer page to hold this k/v.
int32_t newPageId = -1;
- SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, 0, &newPageId);
+ SFilePage* pNewPage = getNewBufPage(pHashObj->pBuf, &newPageId);
if (pNewPage == NULL) {
return terrno;
}
@@ -227,7 +227,7 @@ static int32_t doAddNewBucket(SLHashObj* pHashObj) {
}
int32_t pageId = -1;
- SFilePage* p = getNewBufPage(pHashObj->pBuf, 0, &pageId);
+ SFilePage* p = getNewBufPage(pHashObj->pBuf, &pageId);
if (p == NULL) {
return terrno;
}
diff --git a/source/libs/executor/src/tsimplehash.c b/source/libs/executor/src/tsimplehash.c
index 6b2edf0d5e6e1f41b5d354d110fb23892a864b33..16fd11f97d330fa3bc2622e2f7671fa532dce61f 100644
--- a/source/libs/executor/src/tsimplehash.c
+++ b/source/libs/executor/src/tsimplehash.c
@@ -31,21 +31,12 @@
taosMemoryFreeClear(_n); \
} while (0);
-#pragma pack(push, 4)
-typedef struct SHNode {
- struct SHNode *next;
- uint32_t keyLen : 20;
- uint32_t dataLen : 12;
- char data[];
-} SHNode;
-#pragma pack(pop)
-
struct SSHashObj {
SHNode **hashList;
size_t capacity; // number of slots
- int64_t size; // number of elements in hash table
- _hash_fn_t hashFp; // hash function
- _equal_fn_t equalFp; // equal function
+ int64_t size; // number of elements in hash table
+ _hash_fn_t hashFp; // hash function
+ _equal_fn_t equalFp; // equal function
};
static FORCE_INLINE int32_t taosHashCapacity(int32_t length) {
@@ -76,7 +67,6 @@ SSHashObj *tSimpleHashInit(size_t capacity, _hash_fn_t fn) {
pHashObj->hashFp = fn;
ASSERT((pHashObj->capacity & (pHashObj->capacity - 1)) == 0);
-
pHashObj->hashList = (SHNode **)taosMemoryCalloc(pHashObj->capacity, sizeof(void *));
if (!pHashObj->hashList) {
taosMemoryFree(pHashObj);
@@ -257,6 +247,37 @@ void *tSimpleHashGet(SSHashObj *pHashObj, const void *key, size_t keyLen) {
}
int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) {
+ int32_t code = TSDB_CODE_FAILED;
+ if (!pHashObj || !key) {
+ return code;
+ }
+
+ uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen);
+
+ int32_t slot = HASH_INDEX(hashVal, pHashObj->capacity);
+
+ SHNode *pNode = pHashObj->hashList[slot];
+ SHNode *pPrev = NULL;
+ while (pNode) {
+ if ((*(pHashObj->equalFp))(GET_SHASH_NODE_KEY(pNode, pNode->dataLen), key, keyLen) == 0) {
+ if (!pPrev) {
+ pHashObj->hashList[slot] = pNode->next;
+ } else {
+ pPrev->next = pNode->next;
+ }
+ FREE_HASH_NODE(pNode);
+ atomic_sub_fetch_64(&pHashObj->size, 1);
+ code = TSDB_CODE_SUCCESS;
+ break;
+ }
+ pPrev = pNode;
+ pNode = pNode->next;
+ }
+
+ return code;
+}
+
+int32_t tSimpleHashIterateRemove(SSHashObj *pHashObj, const void *key, size_t keyLen, void **pIter, int32_t *iter) {
if (!pHashObj || !key) {
return TSDB_CODE_FAILED;
}
@@ -274,6 +295,11 @@ int32_t tSimpleHashRemove(SSHashObj *pHashObj, const void *key, size_t keyLen) {
} else {
pPrev->next = pNode->next;
}
+
+ if (*pIter == (void *)GET_SHASH_NODE_DATA(pNode)) {
+ *pIter = pPrev ? GET_SHASH_NODE_DATA(pPrev) : NULL;
+ }
+
FREE_HASH_NODE(pNode);
atomic_sub_fetch_64(&pHashObj->size, 1);
break;
@@ -302,6 +328,7 @@ void tSimpleHashClear(SSHashObj *pHashObj) {
FREE_HASH_NODE(pNode);
pNode = pNext;
}
+ pHashObj->hashList[i] = NULL;
}
atomic_store_64(&pHashObj->size, 0);
}
@@ -324,15 +351,6 @@ size_t tSimpleHashGetMemSize(const SSHashObj *pHashObj) {
return (pHashObj->capacity * sizeof(void *)) + sizeof(SHNode) * tSimpleHashGetSize(pHashObj) + sizeof(SSHashObj);
}
-void *tSimpleHashGetKey(void *data, size_t *keyLen) {
- SHNode *node = (SHNode *)((char *)data - offsetof(SHNode, data));
- if (keyLen) {
- *keyLen = node->keyLen;
- }
-
- return POINTER_SHIFT(data, node->dataLen);
-}
-
void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
if (!pHashObj) {
return NULL;
@@ -341,53 +359,12 @@ void *tSimpleHashIterate(const SSHashObj *pHashObj, void *data, int32_t *iter) {
SHNode *pNode = NULL;
if (!data) {
- for (int32_t i = 0; i < pHashObj->capacity; ++i) {
- pNode = pHashObj->hashList[i];
- if (!pNode) {
- continue;
- }
- *iter = i;
- return GET_SHASH_NODE_DATA(pNode);
- }
- return NULL;
- }
-
- pNode = (SHNode *)((char *)data - offsetof(SHNode, data));
-
- if (pNode->next) {
- return GET_SHASH_NODE_DATA(pNode->next);
- }
-
- ++(*iter);
- for (int32_t i = *iter; i < pHashObj->capacity; ++i) {
- pNode = pHashObj->hashList[i];
- if (!pNode) {
- continue;
- }
- *iter = i;
- return GET_SHASH_NODE_DATA(pNode);
- }
-
- return NULL;
-}
-
-void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, int32_t *iter) {
- if (!pHashObj) {
- return NULL;
- }
-
- SHNode *pNode = NULL;
-
- if (!data) {
- for (int32_t i = 0; i < pHashObj->capacity; ++i) {
+ for (int32_t i = *iter; i < pHashObj->capacity; ++i) {
pNode = pHashObj->hashList[i];
if (!pNode) {
continue;
}
*iter = i;
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode);
}
return NULL;
@@ -396,9 +373,6 @@ void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, in
pNode = (SHNode *)((char *)data - offsetof(SHNode, data));
if (pNode->next) {
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode->next, pNode->next->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode->next);
}
@@ -409,9 +383,6 @@ void *tSimpleHashIterateKV(const SSHashObj *pHashObj, void *data, void **key, in
continue;
}
*iter = i;
- if (key) {
- *key = GET_SHASH_NODE_KEY(pNode, pNode->dataLen);
- }
return GET_SHASH_NODE_DATA(pNode);
}
diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c
index 48af951773814d9979eb6d349670753ad4b036eb..63fc9d9e1c553bc210c7f525b014cd3d0b4f852b 100644
--- a/source/libs/executor/src/tsort.c
+++ b/source/libs/executor/src/tsort.c
@@ -97,7 +97,7 @@ SSortHandle* tsortCreateSortHandle(SArray* pSortInfo, int32_t type, int32_t page
return pSortHandle;
}
-static int32_t sortComparClearup(SMsortComparParam* cmpParam) {
+static int32_t sortComparCleanup(SMsortComparParam* cmpParam) {
for(int32_t i = 0; i < cmpParam->numOfSources; ++i) {
SSortSource* pSource = cmpParam->pSources[i]; // NOTICE: pSource may be SGenericSource *, if it is SORT_MULTISOURCE_MERGE
blockDataDestroy(pSource->src.pBlock);
@@ -134,15 +134,14 @@ int32_t tsortAddSource(SSortHandle* pSortHandle, void* pSource) {
return TSDB_CODE_SUCCESS;
}
-static int32_t doAddNewExternalMemSource(SDiskbasedBuf *pBuf, SArray* pAllSources, SSDataBlock* pBlock, int32_t* sourceId) {
+static int32_t doAddNewExternalMemSource(SDiskbasedBuf *pBuf, SArray* pAllSources, SSDataBlock* pBlock, int32_t* sourceId, SArray* pPageIdList) {
SSortSource* pSource = taosMemoryCalloc(1, sizeof(SSortSource));
if (pSource == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
- pSource->pageIdList = getDataBufPagesIdList(pBuf, (*sourceId));
pSource->src.pBlock = pBlock;
-
+ pSource->pageIdList = pPageIdList;
taosArrayPush(pAllSources, &pSource);
(*sourceId) += 1;
@@ -171,6 +170,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
}
+ SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t));
while(start < pDataBlock->info.rows) {
int32_t stop = 0;
blockDataSplitRows(pDataBlock, pDataBlock->info.hasVarCol, start, &stop, pHandle->pageSize);
@@ -180,12 +180,14 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
blockDataDestroy(p);
return terrno;
}
+ taosArrayPush(pPageIdList, &pageId);
+
int32_t size = blockDataGetSize(p) + sizeof(int32_t) + taosArrayGetSize(p->pDataBlock) * sizeof(int32_t);
assert(size <= getBufPageSize(pHandle->pBuf));
@@ -201,7 +203,7 @@ static int32_t doAddToBuf(SSDataBlock* pDataBlock, SSortHandle* pHandle) {
blockDataCleanup(pDataBlock);
SSDataBlock* pBlock = createOneDataBlock(pDataBlock, false);
- return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId);
+ return doAddNewExternalMemSource(pHandle->pBuf, pHandle->pOrderedSource, pBlock, &pHandle->sourceId, pPageIdList);
}
static void setCurrentSourceIsDone(SSortSource* pSource, SSortHandle* pHandle) {
@@ -225,9 +227,9 @@ static int32_t sortComparInit(SMsortComparParam* cmpParam, SArray* pSources, int
continue;
}
- SPageInfo* pPgInfo = *(SPageInfo**)taosArrayGet(pSource->pageIdList, pSource->pageIndex);
+ int32_t* pPgId = taosArrayGet(pSource->pageIdList, pSource->pageIndex);
- void* pPage = getBufPage(pHandle->pBuf, getPageId(pPgInfo));
+ void* pPage = getBufPage(pHandle->pBuf, *pPgId);
code = blockDataFromBuf(pSource->src.pBlock, pPage);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -300,9 +302,9 @@ static int32_t adjustMergeTreeForNextTuple(SSortSource *pSource, SMultiwayMergeT
pSource->pageIndex = -1;
pSource->src.pBlock = blockDataDestroy(pSource->src.pBlock);
} else {
- SPageInfo* pPgInfo = *(SPageInfo**)taosArrayGet(pSource->pageIdList, pSource->pageIndex);
+ int32_t* pPgId = taosArrayGet(pSource->pageIdList, pSource->pageIndex);
- void* pPage = getBufPage(pHandle->pBuf, getPageId(pPgInfo));
+ void* pPage = getBufPage(pHandle->pBuf, *pPgId);
int32_t code = blockDataFromBuf(pSource->src.pBlock, pPage);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -502,6 +504,7 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
return code;
}
+ SArray* pPageIdList = taosArrayInit(4, sizeof(int32_t));
while (1) {
SSDataBlock* pDataBlock = getSortedBlockDataInner(pHandle, &pHandle->cmpParam, numOfRows);
if (pDataBlock == NULL) {
@@ -509,11 +512,13 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
}
int32_t pageId = -1;
- void* pPage = getNewBufPage(pHandle->pBuf, pHandle->sourceId, &pageId);
+ void* pPage = getNewBufPage(pHandle->pBuf, &pageId);
if (pPage == NULL) {
return terrno;
}
+ taosArrayPush(pPageIdList, &pageId);
+
int32_t size = blockDataGetSize(pDataBlock) + sizeof(int32_t) + taosArrayGetSize(pDataBlock->pDataBlock) * sizeof(int32_t);
assert(size <= getBufPageSize(pHandle->pBuf));
@@ -525,12 +530,12 @@ static int32_t doInternalMergeSort(SSortHandle* pHandle) {
blockDataCleanup(pDataBlock);
}
- sortComparClearup(&pHandle->cmpParam);
+ sortComparCleanup(&pHandle->cmpParam);
tMergeTreeDestroy(pHandle->pMergeTree);
pHandle->numOfCompletedSources = 0;
SSDataBlock* pBlock = createOneDataBlock(pHandle->pDataBlock, false);
- code = doAddNewExternalMemSource(pHandle->pBuf, pResList, pBlock, &pHandle->sourceId);
+ code = doAddNewExternalMemSource(pHandle->pBuf, pResList, pBlock, &pHandle->sourceId, pPageIdList);
if (code != 0) {
return code;
}
diff --git a/source/libs/executor/test/tSimpleHashTests.cpp b/source/libs/executor/test/tSimpleHashTests.cpp
index acb6d434b484057196067954df13eeb4bcd602b3..3bf339ef9040879c0978f9bedffb2b23bd8ec806 100644
--- a/source/libs/executor/test/tSimpleHashTests.cpp
+++ b/source/libs/executor/test/tSimpleHashTests.cpp
@@ -30,7 +30,7 @@
// return RUN_ALL_TESTS();
// }
-TEST(testCase, tSimpleHashTest) {
+TEST(testCase, tSimpleHashTest_intKey) {
SSHashObj *pHashObj =
tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
@@ -57,12 +57,14 @@ TEST(testCase, tSimpleHashTest) {
int32_t iter = 0;
int64_t keySum = 0;
int64_t dataSum = 0;
+ size_t kLen = 0;
while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
- void *key = tSimpleHashGetKey(data, NULL);
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
keySum += *(int64_t *)key;
dataSum += *(int64_t *)data;
}
-
+
ASSERT_EQ(keySum, dataSum);
ASSERT_EQ(keySum, originKeySum);
@@ -74,4 +76,69 @@ TEST(testCase, tSimpleHashTest) {
tSimpleHashCleanup(pHashObj);
}
+
+TEST(testCase, tSimpleHashTest_binaryKey) {
+ SSHashObj *pHashObj =
+ tSimpleHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
+
+ assert(pHashObj != nullptr);
+
+ ASSERT_EQ(0, tSimpleHashGetSize(pHashObj));
+
+ typedef struct {
+ int64_t suid;
+ int64_t uid;
+ } SCombineKey;
+
+ size_t keyLen = sizeof(SCombineKey);
+ size_t dataLen = sizeof(int64_t);
+
+ int64_t originDataSum = 0;
+ SCombineKey combineKey = {0};
+ for (int64_t i = 1; i <= 100; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ tSimpleHashPut(pHashObj, (const void *)&combineKey, keyLen, (const void *)&i, dataLen);
+ originDataSum += i;
+ ASSERT_EQ(i, tSimpleHashGetSize(pHashObj));
+ }
+
+ for (int64_t i = 1; i <= 100; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ void *data = tSimpleHashGet(pHashObj, (const void *)&combineKey, keyLen);
+ ASSERT_EQ(i, *(int64_t *)data);
+ }
+
+ void *data = NULL;
+ int32_t iter = 0;
+ int64_t keySum = 0;
+ int64_t dataSum = 0;
+ size_t kLen = 0;
+ while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
+ dataSum += *(int64_t *)data;
+ }
+
+ ASSERT_EQ(originDataSum, dataSum);
+
+ tSimpleHashRemove(pHashObj, (const void *)&combineKey, keyLen);
+
+ while ((data = tSimpleHashIterate(pHashObj, data, &iter))) {
+ void *key = tSimpleHashGetKey(data, &kLen);
+ ASSERT_EQ(keyLen, kLen);
+ }
+
+ for (int64_t i = 1; i <= 99; ++i) {
+ combineKey.suid = i;
+ combineKey.uid = i + 1;
+ tSimpleHashRemove(pHashObj, (const void *)&combineKey, keyLen);
+ ASSERT_EQ(99 - i, tSimpleHashGetSize(pHashObj));
+ }
+
+ tSimpleHashCleanup(pHashObj);
+}
+
+
#pragma GCC diagnostic pop
\ No newline at end of file
diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt
index ea401e56e5c6585b93344af99280bb450137f98f..dd048a047aeafdddce06dd0841939afe20bfd3d9 100644
--- a/source/libs/function/CMakeLists.txt
+++ b/source/libs/function/CMakeLists.txt
@@ -14,7 +14,7 @@ target_include_directories(
target_link_libraries(
function
- PRIVATE os util common nodes scalar qcom transport
+ PRIVATE os util common nodes scalar qcom transport stream
PUBLIC uv_a
)
diff --git a/source/libs/function/inc/functionMgtInt.h b/source/libs/function/inc/functionMgtInt.h
index 37208c4723fe2bac49215f7dcc7caf76a96855e3..9bff812c3a9866eb0ccb1eade0de1c63580d50c4 100644
--- a/source/libs/function/inc/functionMgtInt.h
+++ b/source/libs/function/inc/functionMgtInt.h
@@ -50,6 +50,7 @@ extern "C" {
#define FUNC_MGT_KEEP_ORDER_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(21)
#define FUNC_MGT_CUMULATIVE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(22)
#define FUNC_MGT_FORBID_STABLE_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(23)
+#define FUNC_MGT_INTERP_PC_FUNC FUNC_MGT_FUNC_CLASSIFICATION_MASK(24)
#define FUNC_MGT_TEST_MASK(val, mask) (((val) & (mask)) != 0)
diff --git a/source/libs/function/inc/tpercentile.h b/source/libs/function/inc/tpercentile.h
index dfb52f76946c502b38231130858b5694b7171f35..554f9e567f35cc0272a2a9755153de1b54d34392 100644
--- a/source/libs/function/inc/tpercentile.h
+++ b/source/libs/function/inc/tpercentile.h
@@ -51,20 +51,20 @@ struct tMemBucket;
typedef int32_t (*__perc_hash_func_t)(struct tMemBucket *pBucket, const void *value);
typedef struct tMemBucket {
- int16_t numOfSlots;
- int16_t type;
- int16_t bytes;
- int32_t total;
- int32_t elemPerPage; // number of elements for each object
- int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
- int32_t bufPageSize; // disk page size
- MinMaxEntry range; // value range
- int32_t times; // count that has been checked for deciding the correct data value buckets.
- __compar_fn_t comparFn;
-
- tMemBucketSlot * pSlots;
- SDiskbasedBuf *pBuffer;
- __perc_hash_func_t hashFunc;
+ int16_t numOfSlots;
+ int16_t type;
+ int16_t bytes;
+ int32_t total;
+ int32_t elemPerPage; // number of elements for each object
+ int32_t maxCapacity; // maximum allowed number of elements that can be sort directly to get the result
+ int32_t bufPageSize; // disk page size
+ MinMaxEntry range; // value range
+ int32_t times; // count that has been checked for deciding the correct data value buckets.
+ __compar_fn_t comparFn;
+ tMemBucketSlot* pSlots;
+ SDiskbasedBuf* pBuffer;
+ __perc_hash_func_t hashFunc;
+ SHashObj* groupPagesMap; // disk page map for different groups;
} tMemBucket;
tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, double maxval);
diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c
index ed82e4cb50cd2ce72ab3e9965b7ef1481fe2ccfa..54455415b8345b6ce80bd8fbf1c7e1eb250b3252 100644
--- a/source/libs/function/src/builtins.c
+++ b/source/libs/function/src/builtins.c
@@ -207,7 +207,6 @@ static int32_t countTrailingSpaces(const SValueNode* pVal, bool isLtrim) {
}
return numOfSpaces;
-
}
void static addTimezoneParam(SNodeList* pList) {
@@ -303,7 +302,7 @@ static int32_t translateInOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t le
}
SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- if (!IS_VAR_DATA_TYPE(pPara1->resType.type)) {
+ if (!IS_STR_DATA_TYPE(pPara1->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -317,12 +316,12 @@ static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len
}
SExprNode* pPara1 = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
- if (!IS_VAR_DATA_TYPE(pPara1->resType.type)) {
+ if (!IS_STR_DATA_TYPE(pPara1->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
int32_t numOfSpaces = 0;
- SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 0);
+ SNode* pParamNode1 = nodesListGetNode(pFunc->pParameterList, 0);
// for select trim functions with constant value from table,
// need to set the proper result result schema bytes to avoid
// trailing garbage characters
@@ -331,7 +330,6 @@ static int32_t translateTrimStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len
numOfSpaces = countTrailingSpaces(pValue, isLtrim);
}
-
int32_t resBytes = pPara1->resType.bytes - numOfSpaces;
pFunc->node.resType = (SDataType){.bytes = resBytes, .type = pPara1->resType.type};
return TSDB_CODE_SUCCESS;
@@ -546,7 +544,7 @@ static int32_t translateApercentile(SFunctionNode* pFunc, char* pErrBuf, int32_t
// param2
if (3 == numOfParams) {
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
- if (!IS_VAR_DATA_TYPE(para3Type)) {
+ if (!IS_STR_DATA_TYPE(para3Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -593,7 +591,7 @@ static int32_t translateApercentileImpl(SFunctionNode* pFunc, char* pErrBuf, int
// param2
if (3 == numOfParams) {
uint8_t para3Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 2))->resType.type;
- if (!IS_VAR_DATA_TYPE(para3Type)) {
+ if (!IS_STR_DATA_TYPE(para3Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -698,7 +696,7 @@ static int32_t translateSpread(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -713,7 +711,7 @@ static int32_t translateSpreadImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (isPartial) {
- if (!IS_NUMERIC_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_NUMERIC_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
pFunc->node.resType = (SDataType){.bytes = getSpreadInfoSize() + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY};
@@ -788,7 +786,7 @@ static int32_t translateElapsedImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
}
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1388,7 +1386,7 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
// set result type
- if (IS_VAR_DATA_TYPE(colType)) {
+ if (IS_STR_DATA_TYPE(colType)) {
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
} else {
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
@@ -1431,7 +1429,7 @@ static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
// set result type
- if (IS_VAR_DATA_TYPE(colType)) {
+ if (IS_STR_DATA_TYPE(colType)) {
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
} else {
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
@@ -1514,7 +1512,7 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
for (int32_t i = 1; i < 3; ++i) {
nodeType = nodeType(nodesListGetNode(pFunc->pParameterList, i));
paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) {
+ if (!IS_STR_DATA_TYPE(paraType) || QUERY_NODE_VALUE != nodeType) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1634,7 +1632,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
uint8_t colType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
if (!IS_SIGNED_NUMERIC_TYPE(colType) && !IS_FLOAT_TYPE(colType) && TSDB_DATA_TYPE_BOOL != colType &&
- TSDB_DATA_TYPE_TIMESTAMP != colType) {
+ !IS_TIMESTAMP_TYPE(colType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1660,7 +1658,7 @@ static int32_t translateDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
}
uint8_t resType;
- if (IS_SIGNED_NUMERIC_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType || TSDB_DATA_TYPE_TIMESTAMP == colType) {
+ if (IS_SIGNED_NUMERIC_TYPE(colType) || IS_TIMESTAMP_TYPE(colType) || TSDB_DATA_TYPE_BOOL == colType) {
resType = TSDB_DATA_TYPE_BIGINT;
} else {
resType = TSDB_DATA_TYPE_DOUBLE;
@@ -1682,7 +1680,7 @@ static int32_t translateLength(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- if (!IS_VAR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
+ if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1714,7 +1712,7 @@ static int32_t translateConcatImpl(SFunctionNode* pFunc, char* pErrBuf, int32_t
for (int32_t i = 0; i < numOfParams; ++i) {
SNode* pPara = nodesListGetNode(pFunc->pParameterList, i);
uint8_t paraType = ((SExprNode*)pPara)->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
+ if (!IS_STR_DATA_TYPE(paraType) && !IS_NULL_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
if (TSDB_DATA_TYPE_NCHAR == paraType) {
@@ -1770,7 +1768,7 @@ static int32_t translateSubstr(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
uint8_t para0Type = pPara0->resType.type;
uint8_t para1Type = pPara1->resType.type;
- if (!IS_VAR_DATA_TYPE(para0Type) || !IS_INTEGER_TYPE(para1Type)) {
+ if (!IS_STR_DATA_TYPE(para0Type) || !IS_INTEGER_TYPE(para1Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1802,7 +1800,7 @@ static int32_t translateCast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
uint8_t para2Type = pFunc->node.resType.type;
int32_t para2Bytes = pFunc->node.resType.bytes;
- if (IS_VAR_DATA_TYPE(para2Type)) {
+ if (IS_STR_DATA_TYPE(para2Type)) {
para2Bytes -= VARSTR_HEADER_SIZE;
}
if (para2Bytes <= 0 || para2Bytes > 4096) { // cast dst var type length limits to 4096 bytes
@@ -1825,7 +1823,7 @@ static int32_t translateToIso8601(SFunctionNode* pFunc, char* pErrBuf, int32_t l
// param0
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
- if (!IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1859,7 +1857,7 @@ static int32_t translateToUnixtimestamp(SFunctionNode* pFunc, char* pErrBuf, int
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
- if (!IS_VAR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
+ if (!IS_STR_DATA_TYPE(((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1878,7 +1876,7 @@ static int32_t translateTimeTruncate(SFunctionNode* pFunc, char* pErrBuf, int32_
uint8_t para1Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType.type;
uint8_t para2Type = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 1))->resType.type;
- if ((!IS_VAR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && TSDB_DATA_TYPE_TIMESTAMP != para1Type) ||
+ if ((!IS_STR_DATA_TYPE(para1Type) && !IS_INTEGER_TYPE(para1Type) && !IS_TIMESTAMP_TYPE(para1Type)) ||
!IS_INTEGER_TYPE(para2Type)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
@@ -1911,7 +1909,7 @@ static int32_t translateTimeDiff(SFunctionNode* pFunc, char* pErrBuf, int32_t le
for (int32_t i = 0; i < 2; ++i) {
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
- if (!IS_VAR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && TSDB_DATA_TYPE_TIMESTAMP != paraType) {
+ if (!IS_STR_DATA_TYPE(paraType) && !IS_INTEGER_TYPE(paraType) && !IS_TIMESTAMP_TYPE(paraType)) {
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
}
@@ -2141,7 +2139,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
{
.name = "avg",
.type = FUNCTION_TYPE_AVG,
- .classification = FUNC_MGT_AGG_FUNC,
+ .classification = FUNC_MGT_AGG_FUNC | FUNC_MGT_SPECIAL_DATA_REQUIRED,
.translateFunc = translateInNumOutDou,
.dataRequiredFunc = statisDataRequired,
.getEnvFunc = getAvgFuncEnv,
@@ -2159,6 +2157,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.type = FUNCTION_TYPE_AVG_PARTIAL,
.classification = FUNC_MGT_AGG_FUNC,
.translateFunc = translateAvgPartial,
+ .dataRequiredFunc = statisDataRequired,
.getEnvFunc = getAvgFuncEnv,
.initFunc = avgFunctionSetup,
.processFunc = avgFunction,
@@ -3147,6 +3146,16 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.classification = FUNC_MGT_SYSTEM_INFO_FUNC | FUNC_MGT_SCALAR_FUNC,
.translateFunc = translateUserFunc,
},
+ {
+ .name = "_irowts",
+ .type = FUNCTION_TYPE_IROWTS,
+ .classification = FUNC_MGT_PSEUDO_COLUMN_FUNC | FUNC_MGT_INTERP_PC_FUNC,
+ .translateFunc = translateTimePseudoColumn,
+ .getEnvFunc = getTimePseudoFuncEnv,
+ .initFunc = NULL,
+ .sprocessFunc = NULL,
+ .finalizeFunc = NULL
+ },
};
// clang-format on
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 013c58cc4501c091cc745330b584174064aff404..a8d51905ab68ea40c7f35b8aa09116030dca7038 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -18,6 +18,7 @@
#include "function.h"
#include "query.h"
#include "querynodes.h"
+#include "streamState.h"
#include "tcompare.h"
#include "tdatablock.h"
#include "tdigest.h"
@@ -46,6 +47,7 @@ typedef struct SSumRes {
uint64_t usum;
double dsum;
};
+ int16_t type;
} SSumRes;
typedef struct SAvgRes {
@@ -56,8 +58,13 @@ typedef struct SAvgRes {
} SAvgRes;
typedef struct STuplePos {
- int32_t pageId;
- int32_t offset;
+ union {
+ struct {
+ int32_t pageId;
+ int32_t offset;
+ };
+ STupleKey streamTupleKey;
+ };
} STuplePos;
typedef struct SMinmaxResInfo {
@@ -67,6 +74,7 @@ typedef struct SMinmaxResInfo {
STuplePos nullTuplePos;
bool nullTupleSaved;
+ int16_t type;
} SMinmaxResInfo;
typedef struct STopBotResItem {
@@ -76,11 +84,11 @@ typedef struct STopBotResItem {
} STopBotResItem;
typedef struct STopBotRes {
- int32_t maxSize;
- int16_t type;
+ int32_t maxSize;
+ int16_t type;
- STuplePos nullTuplePos;
- bool nullTupleSaved;
+ STuplePos nullTuplePos;
+ bool nullTupleSaved;
STopBotResItem* pItems;
} STopBotRes;
@@ -223,14 +231,14 @@ typedef struct SMavgInfo {
} SMavgInfo;
typedef struct SSampleInfo {
- int32_t samples;
- int32_t totalPoints;
- int32_t numSampled;
- uint8_t colType;
- int16_t colBytes;
+ int32_t samples;
+ int32_t totalPoints;
+ int32_t numSampled;
+ uint8_t colType;
+ int16_t colBytes;
- STuplePos nullTuplePos;
- bool nullTupleSaved;
+ STuplePos nullTuplePos;
+ bool nullTupleSaved;
char* data;
STuplePos* tuplePos;
@@ -479,8 +487,7 @@ int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t firstCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
SFirstLastRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
- int32_t type = pDestCtx->input.pData[0]->info.type;
- int32_t bytes = pDestCtx->input.pData[0]->info.bytes;
+ int32_t bytes = pDBuf->bytes;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SFirstLastRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
@@ -611,6 +618,7 @@ int32_t sumFunction(SqlFunctionCtx* pCtx) {
int32_t type = pInput->pData[0]->info.type;
SSumRes* pSumRes = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
+ pSumRes->type = type;
if (IS_NULL_TYPE(type)) {
numOfElem = 0;
@@ -734,10 +742,10 @@ int32_t sumInvertFunction(SqlFunctionCtx* pCtx) {
int32_t sumCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
SSumRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
- int32_t type = pDestCtx->input.pData[0]->info.type;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SSumRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+ int16_t type = pDBuf->type == TSDB_DATA_TYPE_NULL ? pSBuf->type : pDBuf->type;
if (IS_SIGNED_NUMERIC_TYPE(type) || type == TSDB_DATA_TYPE_BOOL) {
pDBuf->isum += pSBuf->isum;
@@ -1066,10 +1074,10 @@ int32_t avgInvertFunction(SqlFunctionCtx* pCtx) {
int32_t avgCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
SAvgRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
- int32_t type = pDestCtx->input.pData[0]->info.type;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SAvgRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+ int16_t type = pDBuf->type == TSDB_DATA_TYPE_NULL ? pSBuf->type : pDBuf->type;
if (IS_SIGNED_NUMERIC_TYPE(type)) {
pDBuf->sum.isum += pSBuf->sum.isum;
@@ -1146,8 +1154,10 @@ bool getMinmaxFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
return true;
}
-static void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
-static void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock,
+ const STupleKey* pKey);
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos);
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos);
static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, const char* tval) {
// the data is loaded, not only the block SMA value
@@ -1159,6 +1169,7 @@ static int32_t findRowIndex(int32_t start, int32_t num, SColumnInfoData* pCol, c
}
ASSERT(0);
+ return 0;
}
int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
@@ -1172,6 +1183,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SMinmaxResInfo* pBuf = GET_ROWCELL_INTERBUF(pResInfo);
+ pBuf->type = type;
if (IS_NULL_TYPE(type)) {
numOfElems = 0;
@@ -1199,7 +1211,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
pBuf->v = *(int64_t*)tval;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
}
} else {
if (IS_SIGNED_NUMERIC_TYPE(type)) {
@@ -1211,10 +1223,9 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(int64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
}
}
-
} else if (IS_UNSIGNED_NUMERIC_TYPE(type)) {
uint64_t prev = 0;
GET_TYPED_DATA(prev, uint64_t, type, &pBuf->v);
@@ -1224,7 +1235,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(uint64_t*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
}
}
} else if (type == TSDB_DATA_TYPE_DOUBLE) {
@@ -1236,7 +1247,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
*(double*)&pBuf->v = val;
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
}
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
@@ -1250,7 +1261,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (pCtx->subsidiaries.num > 0) {
index = findRowIndex(pInput->startRowIndex, pInput->numOfRows, pCol, tval);
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
}
}
}
@@ -1275,7 +1286,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1287,7 +1298,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1306,7 +1317,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1318,7 +1329,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1337,7 +1348,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1349,7 +1360,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1368,7 +1379,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1380,7 +1391,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1401,7 +1412,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1413,7 +1424,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1432,7 +1443,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1444,7 +1455,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1463,7 +1474,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1475,7 +1486,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1494,7 +1505,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1506,7 +1517,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1526,7 +1537,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1538,7 +1549,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1547,7 +1558,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
}
} else if (type == TSDB_DATA_TYPE_FLOAT) {
float* pData = (float*)pCol->pData;
- double* val = (double*)&pBuf->v;
+ float* val = (float*)&pBuf->v;
for (int32_t i = start; i < start + numOfRows; ++i) {
if ((pCol->hasNull) && colDataIsNull_f(pCol->nullbitmap, i)) {
@@ -1557,7 +1568,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if (!pBuf->assign) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ pBuf->tuplePos = saveTupleData(pCtx, i, pCtx->pSrcBlock, NULL);
}
pBuf->assign = true;
} else {
@@ -1569,7 +1580,7 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
if ((*val < pData[i]) ^ isMinFunc) {
*val = pData[i];
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
+ updateTupleData(pCtx, i, pCtx->pSrcBlock, &pBuf->tuplePos);
}
}
}
@@ -1579,8 +1590,8 @@ int32_t doMinMaxHelper(SqlFunctionCtx* pCtx, int32_t isMinFunc) {
}
_min_max_over:
- if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved ) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pBuf->nullTuplePos);
+ if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pBuf->nullTupleSaved) {
+ pBuf->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL);
pBuf->nullTupleSaved = true;
}
return numOfElems;
@@ -1599,8 +1610,8 @@ int32_t maxFunction(SqlFunctionCtx* pCtx) {
}
static void setNullSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, int32_t rowIndex);
-
-static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos, int32_t rIndex);
+static void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuplePos* pTuplePos,
+ int32_t rowIndex);
int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
@@ -1614,7 +1625,7 @@ int32_t minmaxFunctionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
pEntryInfo->isNullRes = (pEntryInfo->numOfRes == 0) ? 1 : 0;
if (pCol->info.type == TSDB_DATA_TYPE_FLOAT) {
- float v = *(double*)&pRes->v;
+ float v = *(float*)&pRes->v;
colDataAppend(pCol, currentRow, (const char*)&v, pEntryInfo->isNullRes);
} else {
colDataAppend(pCol, currentRow, (const char*)&pRes->v, pEntryInfo->isNullRes);
@@ -1648,34 +1659,29 @@ void setSelectivityValue(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, const STuple
return;
}
- int32_t pageId = pTuplePos->pageId;
- int32_t offset = pTuplePos->offset;
-
- if (pTuplePos->pageId != -1) {
- int32_t numOfCols = pCtx->subsidiaries.num;
- SFilePage* pPage = getBufPage(pCtx->pBuf, pageId);
+ if (pCtx->saveHandle.pBuf != NULL) {
+ if (pTuplePos->pageId != -1) {
+ int32_t numOfCols = pCtx->subsidiaries.num;
+ const char* p = loadTupleData(pCtx, pTuplePos);
- bool* nullList = (bool*)((char*)pPage + offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
+ bool* nullList = (bool*)p;
+ char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
- // todo set the offset value to optimize the performance.
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ // todo set the offset value to optimize the performance.
+ for (int32_t j = 0; j < numOfCols; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
-
- SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
- ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
- if (nullList[j]) {
- colDataAppendNULL(pDstCol, rowIndex);
- } else {
- colDataAppend(pDstCol, rowIndex, pStart, false);
+ SColumnInfoData* pDstCol = taosArrayGet(pBlock->pDataBlock, dstSlotId);
+ ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
+ if (nullList[j]) {
+ colDataAppendNULL(pDstCol, rowIndex);
+ } else {
+ colDataAppend(pDstCol, rowIndex, pStart, false);
+ }
+ pStart += pDstCol->info.bytes;
}
- pStart += pDstCol->info.bytes;
}
-
- releaseBufPage(pCtx->pBuf, pPage);
}
}
@@ -1705,7 +1711,7 @@ void appendSelectivityValue(SqlFunctionCtx* pCtx, int32_t rowIndex, int32_t pos)
char* pData = colDataGetData(pSrcCol, rowIndex);
// append to dest col
- int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
+ int32_t dstSlotId = pc->pExpr->base.resSchema.slotId;
SColumnInfoData* pDstCol = taosArrayGet(pCtx->pDstBlock->pDataBlock, dstSlotId);
ASSERT(pc->pExpr->base.resSchema.bytes == pDstCol->info.bytes);
@@ -1716,7 +1722,6 @@ void appendSelectivityValue(SqlFunctionCtx* pCtx, int32_t rowIndex, int32_t pos)
colDataAppend(pDstCol, pos, pData, false);
}
}
-
}
void replaceTupleData(STuplePos* pDestPos, STuplePos* pSourcePos) {
@@ -1727,10 +1732,10 @@ void replaceTupleData(STuplePos* pDestPos, STuplePos* pSourcePos) {
int32_t minMaxCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int32_t isMinFunc) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
SMinmaxResInfo* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
- int32_t type = pDestCtx->input.pData[0]->info.type;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SMinmaxResInfo* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+ int16_t type = pDBuf->type == TSDB_DATA_TYPE_NULL ? pSBuf->type : pDBuf->type;
if (IS_FLOAT_TYPE(type)) {
if (pSBuf->assign && ((((*(double*)&pDBuf->v) < (*(double*)&pSBuf->v)) ^ isMinFunc) || !pDBuf->assign)) {
*(double*)&pDBuf->v = *(double*)&pSBuf->v;
@@ -2103,10 +2108,10 @@ int32_t stddevPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t stddevCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
SStddevRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
- int32_t type = pDestCtx->input.pData[0]->info.type;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SStddevRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
+ int16_t type = pDBuf->type == TSDB_DATA_TYPE_NULL ? pSBuf->type : pDBuf->type;
if (IS_SIGNED_NUMERIC_TYPE(type)) {
pDBuf->isum += pSBuf->isum;
@@ -2594,8 +2599,8 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
memcpy(pHisto, pInput->pHisto, sizeof(SHistogramInfo) + sizeof(SHistBin) * (MAX_HISTOGRAM_BIN + 1));
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
- qDebug("%s merge histo, total:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems,
- pHisto->numOfEntries, pHisto);
+ qDebug("%s merge histo, total:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems, pHisto->numOfEntries,
+ pHisto);
} else {
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
qDebug("%s input histogram, elem:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems,
@@ -2605,8 +2610,8 @@ static void apercentileTransferInfo(SAPercentileInfo* pInput, SAPercentileInfo*
memcpy(pHisto, pRes, sizeof(SHistogramInfo) + sizeof(SHistBin) * MAX_HISTOGRAM_BIN);
pHisto->elems = (SHistBin*)((char*)pHisto + sizeof(SHistogramInfo));
- qDebug("%s merge histo, total:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems,
- pHisto->numOfEntries, pHisto);
+ qDebug("%s merge histo, total:%" PRId64 ", entry:%d, %p", __FUNCTION__, pHisto->numOfElems, pHisto->numOfEntries,
+ pHisto);
tHistogramDestroy(&pRes);
}
}
@@ -2633,8 +2638,8 @@ int32_t apercentileFunctionMerge(SqlFunctionCtx* pCtx) {
}
if (pInfo->algo != APERCT_ALGO_TDIGEST) {
- qDebug("%s after merge, total:%d, numOfEntry:%d, %p", __FUNCTION__, pInfo->pHisto->numOfElems, pInfo->pHisto->numOfEntries,
- pInfo->pHisto);
+ qDebug("%s after merge, total:%d, numOfEntry:%d, %p", __FUNCTION__, pInfo->pHisto->numOfElems,
+ pInfo->pHisto->numOfEntries, pInfo->pHisto);
}
SET_VAL(pResInfo, 1, 1);
@@ -2713,7 +2718,7 @@ int32_t apercentileCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx)
}
EFuncDataRequired lastDynDataReq(void* pRes, STimeWindow* pTimeWindow) {
- SResultRowEntryInfo* pEntry = (SResultRowEntryInfo*) pRes;
+ SResultRowEntryInfo* pEntry = (SResultRowEntryInfo*)pRes;
// not initialized yet, data is required
if (pEntry == NULL) {
@@ -2756,15 +2761,16 @@ static FORCE_INLINE TSKEY getRowPTs(SColumnInfoData* pTsColInfo, int32_t rowInde
return *(TSKEY*)colDataGetData(pTsColInfo, rowIndex);
}
-static void saveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx, SFirstLastRes* pInfo) {
+static void firstlastSaveTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SqlFunctionCtx* pCtx,
+ SFirstLastRes* pInfo) {
if (pCtx->subsidiaries.num <= 0) {
return;
}
if (!pInfo->hasResult) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ pInfo->pos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL);
} else {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pInfo->pos);
}
}
@@ -2778,7 +2784,7 @@ static void doSaveCurrentVal(SqlFunctionCtx* pCtx, int32_t rowIndex, int64_t cur
memcpy(pInfo->buf, pData, pInfo->bytes);
pInfo->ts = currentTs;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -2799,6 +2805,8 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
// All null data column, return directly.
if (pInput->colDataAggIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows)) {
ASSERT(pInputCol->hasNull == true);
+ // save selectivity value for column consisted of all null values
+ firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo);
return 0;
}
@@ -2875,7 +2883,10 @@ int32_t firstFunction(SqlFunctionCtx* pCtx) {
}
}
#endif
-
+ if (numOfElems == 0) {
+ // save selectivity value for column consisted of all null values
+ firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo);
+ }
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
}
@@ -2896,6 +2907,8 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
// All null data column, return directly.
if (pInput->colDataAggIsSet && (pInput->pColumnDataAgg[0]->numOfNull == pInput->totalRows)) {
ASSERT(pInputCol->hasNull == true);
+ // save selectivity value for column consisted of all null values
+ firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo);
return 0;
}
@@ -2956,15 +2969,17 @@ int32_t lastFunction(SqlFunctionCtx* pCtx) {
}
}
#endif
-
+ if (numOfElems == 0) {
+ // save selectivity value for column consisted of all null values
+ firstlastSaveTupleData(pCtx->pSrcBlock, pInput->startRowIndex, pCtx, pInfo);
+ }
SET_VAL(pResInfo, numOfElems, 1);
return TSDB_CODE_SUCCESS;
}
-static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst) {
+static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, SFirstLastRes* pOutput, bool isFirst, int32_t rowIndex) {
SInputColumnInfoData* pColInfo = &pCtx->input;
- int32_t start = pColInfo->startRowIndex;
if (pOutput->hasResult) {
if (isFirst) {
if (pInput->ts > pOutput->ts) {
@@ -2982,7 +2997,7 @@ static void firstLastTransferInfo(SqlFunctionCtx* pCtx, SFirstLastRes* pInput, S
pOutput->bytes = pInput->bytes;
memcpy(pOutput->buf, pInput->buf, pOutput->bytes);
- saveTupleData(pCtx->pSrcBlock, start, pCtx, pOutput);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pOutput);
pOutput->hasResult = true;
}
@@ -3000,7 +3015,7 @@ static int32_t firstLastFunctionMergeImpl(SqlFunctionCtx* pCtx, bool isFirstQuer
for (int32_t i = start; i < start + pInput->numOfRows; ++i) {
char* data = colDataGetData(pCol, i);
SFirstLastRes* pInputInfo = (SFirstLastRes*)varDataVal(data);
- firstLastTransferInfo(pCtx, pInputInfo, pInfo, isFirstQuery);
+ firstLastTransferInfo(pCtx, pInputInfo, pInfo, isFirstQuery, i);
if (!numOfElems) {
numOfElems = pInputInfo->hasResult ? 1 : 0;
}
@@ -3056,8 +3071,7 @@ int32_t firstLastPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
int32_t lastCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
SResultRowEntryInfo* pDResInfo = GET_RES_INFO(pDestCtx);
SFirstLastRes* pDBuf = GET_ROWCELL_INTERBUF(pDResInfo);
- int32_t type = pDestCtx->input.pData[0]->info.type;
- int32_t bytes = pDestCtx->input.pData[0]->info.bytes;
+ int32_t bytes = pDBuf->bytes;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
SFirstLastRes* pSBuf = GET_ROWCELL_INTERBUF(pSResInfo);
@@ -3087,7 +3101,7 @@ static void doSaveLastrow(SqlFunctionCtx* pCtx, char* pData, int32_t rowIndex, i
}
pInfo->ts = cts;
- saveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
+ firstlastSaveTupleData(pCtx->pSrcBlock, rowIndex, pCtx, pInfo);
pInfo->hasResult = true;
}
@@ -3180,7 +3194,7 @@ bool diffFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
static void doSetPrevVal(SDiffInfo* pDiffInfo, int32_t type, const char* pv) {
switch (type) {
case TSDB_DATA_TYPE_BOOL:
- pDiffInfo->prev.i64 = *(bool*)pv? 1:0;
+ pDiffInfo->prev.i64 = *(bool*)pv ? 1 : 0;
break;
case TSDB_DATA_TYPE_TINYINT:
pDiffInfo->prev.i64 = *(int8_t*)pv;
@@ -3420,7 +3434,7 @@ int32_t topFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL);
pRes->nullTupleSaved = true;
}
return TSDB_CODE_SUCCESS;
@@ -3448,7 +3462,7 @@ int32_t bottomFunction(SqlFunctionCtx* pCtx) {
}
if (numOfElems == 0 && pCtx->subsidiaries.num > 0 && !pRes->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pRes->nullTuplePos);
+ pRes->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL);
pRes->nullTupleSaved = true;
}
@@ -3500,7 +3514,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ pItem->tuplePos = saveTupleData(pCtx, rowIndex, pSrcBlock, NULL);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_saveTuple i:%d, item:%p,pageId:%d, offset:%d\n", pEntryInfo->numOfRes, pItem, pItem->tuplePos.pageId,
@@ -3524,7 +3538,7 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
// save the data of this tuple by over writing the old data
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
+ updateTupleData(pCtx, rowIndex, pSrcBlock, &pItem->tuplePos);
}
#ifdef BUF_PAGE_DEBUG
qDebug("page_copyTuple pageId:%d, offset:%d", pItem->tuplePos.pageId, pItem->tuplePos.offset);
@@ -3541,38 +3555,14 @@ void doAddIntoResult(SqlFunctionCtx* pCtx, void* pData, int32_t rowIndex, SSData
* |(n columns, one bit for each column)| src column #1| src column #2|
* +------------------------------------+--------------+--------------+
*/
-void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = NULL;
-
- // todo refactor: move away
- int32_t completeRowSize = pCtx->subsidiaries.num * sizeof(bool);
- for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
- completeRowSize += pc->pExpr->base.resSchema.bytes;
- }
-
- if (pCtx->curBufPage == -1) {
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- } else {
- pPage = getBufPage(pCtx->pBuf, pCtx->curBufPage);
- if (pPage->num + completeRowSize > getBufPageSize(pCtx->pBuf)) {
- // current page is all used, let's prepare a new buffer page
- releaseBufPage(pCtx->pBuf, pPage);
- pPage = getNewBufPage(pCtx->pBuf, 0, &pCtx->curBufPage);
- pPage->num = sizeof(SFilePage);
- }
- }
+void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsidiaryResInfo* pSubsidiaryies,
+ char* buf) {
+ char* nullList = buf;
+ char* pStart = (char*)(nullList + sizeof(bool) * pSubsidiaryies->num);
- pPos->pageId = pCtx->curBufPage;
- pPos->offset = pPage->num;
-
- // keep the current row data, extract method
int32_t offset = 0;
- bool* nullList = (bool*)((char*)pPage + pPage->num);
- char* pStart = (char*)(nullList + sizeof(bool) * pCtx->subsidiaries.num);
- for (int32_t i = 0; i < pCtx->subsidiaries.num; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
+ for (int32_t i = 0; i < pSubsidiaryies->num; ++i) {
+ SqlFunctionCtx* pc = pSubsidiaryies->pCtx[i];
SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
int32_t srcSlotId = pFuncParam->pCol->slotId;
@@ -3593,57 +3583,103 @@ void doSaveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock*
offset += pCol->info.bytes;
}
- pPage->num += completeRowSize;
-
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_saveTuple pos:%p,pageId:%d, offset:%d\n", pPos, pPos->pageId, pPos->offset);
-#endif
+ return buf;
}
-void doCopyTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
- SFilePage* pPage = getBufPage(pCtx->pBuf, pPos->pageId);
+static STuplePos doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length,
+ const STupleKey* pKey) {
+ STuplePos p = {0};
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = NULL;
- int32_t numOfCols = pCtx->subsidiaries.num;
-
- bool* nullList = (bool*)((char*)pPage + pPos->offset);
- char* pStart = (char*)(nullList + numOfCols * sizeof(bool));
+ if (pHandle->currentPage == -1) {
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
+ } else {
+ pPage = getBufPage(pHandle->pBuf, pHandle->currentPage);
+ if (pPage->num + length > getBufPageSize(pHandle->pBuf)) {
+ // current page is all used, let's prepare a new buffer page
+ releaseBufPage(pHandle->pBuf, pPage);
+ pPage = getNewBufPage(pHandle->pBuf, &pHandle->currentPage);
+ pPage->num = sizeof(SFilePage);
+ }
+ }
- int32_t offset = 0;
- for (int32_t i = 0; i < numOfCols; ++i) {
- SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[i];
- SFunctParam* pFuncParam = &pc->pExpr->base.pParam[0];
- int32_t srcSlotId = pFuncParam->pCol->slotId;
+ p = (STuplePos){.pageId = pHandle->currentPage, .offset = pPage->num};
+ memcpy(pPage->data + pPage->num, pBuf, length);
- SColumnInfoData* pCol = taosArrayGet(pSrcBlock->pDataBlock, srcSlotId);
- if ((nullList[i] = colDataIsNull_s(pCol, rowIndex)) == true) {
- offset += pCol->info.bytes;
- continue;
+ pPage->num += length;
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+ // other tuple save policy
+ if (streamStateFuncPut(pHandle->pState, pKey, pBuf, length) < 0) {
+ ASSERT(0);
}
+ p.streamTupleKey = *pKey;
+ }
- char* p = colDataGetData(pCol, rowIndex);
- if (IS_VAR_DATA_TYPE(pCol->info.type)) {
- memcpy(pStart + offset, p, (pCol->info.type == TSDB_DATA_TYPE_JSON) ? getJsonValueLen(p) : varDataTLen(p));
- } else {
- memcpy(pStart + offset, p, pCol->info.bytes);
+ return p;
+}
+
+STuplePos saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, const STupleKey* pKey) {
+ if (pCtx->subsidiaries.rowLen == 0) {
+ int32_t rowLen = 0;
+ for (int32_t j = 0; j < pCtx->subsidiaries.num; ++j) {
+ SqlFunctionCtx* pc = pCtx->subsidiaries.pCtx[j];
+ rowLen += pc->pExpr->base.resSchema.bytes;
}
- offset += pCol->info.bytes;
+ pCtx->subsidiaries.rowLen = rowLen + pCtx->subsidiaries.num * sizeof(bool);
+ pCtx->subsidiaries.buf = taosMemoryMalloc(pCtx->subsidiaries.rowLen);
}
- setBufPageDirty(pPage, true);
- releaseBufPage(pCtx->pBuf, pPage);
-#ifdef BUF_PAGE_DEBUG
- qDebug("page_copyTuple pos:%p, pageId:%d, offset:%d", pPos, pPos->pageId, pPos->offset);
-#endif
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pKey);
+}
+
+static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ memcpy(pPage->data + pPos->offset, pBuf, length);
+ setBufPageDirty(pPage, true);
+ releaseBufPage(pHandle->pBuf, pPage);
+ } else {
+ streamStateFuncPut(pHandle->pState, &pPos->streamTupleKey, pBuf, length);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock* pSrcBlock, STuplePos* pPos) {
+ char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
+ doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos);
+ return TSDB_CODE_SUCCESS;
+}
+
+static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) {
+ if (pHandle->pBuf != NULL) {
+ SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
+ char* p = pPage->data + pPos->offset;
+ releaseBufPage(pHandle->pBuf, pPage);
+ return p;
+ } else {
+ void* value = NULL;
+ int32_t vLen;
+ streamStateFuncGet(pHandle->pState, &pPos->streamTupleKey, &value, &vLen);
+ return (char*)value;
+ }
+}
+
+static const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) {
+ return doLoadTupleData(&pCtx->saveHandle, pPos);
}
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
STopBotRes* pRes = getTopBotOutputInfo(pCtx);
- int16_t type = pCtx->input.pData[0]->info.type;
+ int16_t type = pCtx->pExpr->base.resSchema.type;
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
@@ -3711,9 +3747,9 @@ void addResult(SqlFunctionCtx* pCtx, STopBotResItem* pSourceItem, int16_t type,
}
int32_t topCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
- int32_t type = pDestCtx->input.pData[0]->info.type;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
STopBotRes* pSBuf = getTopBotOutputInfo(pSourceCtx);
+ int16_t type = pSBuf->type;
for (int32_t i = 0; i < pSResInfo->numOfRes; i++) {
addResult(pDestCtx, pSBuf->pItems + i, type, true);
}
@@ -3721,9 +3757,9 @@ int32_t topCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
}
int32_t bottomCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {
- int32_t type = pDestCtx->input.pData[0]->info.type;
SResultRowEntryInfo* pSResInfo = GET_RES_INFO(pSourceCtx);
STopBotRes* pSBuf = getTopBotOutputInfo(pSourceCtx);
+ int16_t type = pSBuf->type;
for (int32_t i = 0; i < pSResInfo->numOfRes; i++) {
addResult(pDestCtx, pSBuf->pItems + i, type, false);
}
@@ -3788,8 +3824,6 @@ int32_t spreadFunction(SqlFunctionCtx* pCtx) {
SColumnInfoData* pCol = pInput->pData[0];
int32_t start = pInput->startRowIndex;
- int32_t numOfRows = pInput->numOfRows;
-
// check the valid data one by one
for (int32_t i = start; i < pInput->numOfRows + start; ++i) {
if (colDataIsNull_f(pCol->nullbitmap, i)) {
@@ -3973,8 +4007,8 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
}
if (pCtx->end.key == INT64_MIN) {
- pInfo->min = (pInfo->min > ptsList[start + pInput->numOfRows - 1]) ?
- ptsList[start + pInput->numOfRows - 1] : pInfo->min;
+ pInfo->min =
+ (pInfo->min > ptsList[start + pInput->numOfRows - 1]) ? ptsList[start + pInput->numOfRows - 1] : pInfo->min;
} else {
pInfo->min = pCtx->end.key;
}
@@ -3986,8 +4020,8 @@ int32_t elapsedFunction(SqlFunctionCtx* pCtx) {
}
if (pCtx->end.key == INT64_MIN) {
- pInfo->max = (pInfo->max < ptsList[start + pInput->numOfRows - 1]) ?
- ptsList[start + pInput->numOfRows - 1] : pInfo->max;
+ pInfo->max =
+ (pInfo->max < ptsList[start + pInput->numOfRows - 1]) ? ptsList[start + pInput->numOfRows - 1] : pInfo->max;
} else {
pInfo->max = pCtx->end.key + 1;
}
@@ -4694,7 +4728,7 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) {
colDataAppendNULL(pOutput, i);
// handle selectivity
if (pCtx->subsidiaries.num > 0) {
- appendSelectivityValue(pCtx, i, i);
+ appendSelectivityValue(pCtx, i, pCtx->offset + numOfElems - 1);
}
continue;
}
@@ -4707,11 +4741,11 @@ int32_t stateCountFunction(SqlFunctionCtx* pCtx) {
} else {
pInfo->count = 0;
}
- colDataAppend(pOutput, i, (char*)&output, false);
+ colDataAppend(pOutput, pCtx->offset + numOfElems - 1, (char*)&output, false);
// handle selectivity
if (pCtx->subsidiaries.num > 0) {
- appendSelectivityValue(pCtx, i, i);
+ appendSelectivityValue(pCtx, i, pCtx->offset + numOfElems - 1);
}
}
@@ -4747,7 +4781,7 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) {
colDataAppendNULL(pOutput, i);
// handle selectivity
if (pCtx->subsidiaries.num > 0) {
- appendSelectivityValue(pCtx, i, i);
+ appendSelectivityValue(pCtx, i, pCtx->offset + numOfElems - 1);
}
continue;
}
@@ -4764,11 +4798,11 @@ int32_t stateDurationFunction(SqlFunctionCtx* pCtx) {
} else {
pInfo->durationStart = 0;
}
- colDataAppend(pOutput, i, (char*)&output, false);
+ colDataAppend(pOutput, pCtx->offset + numOfElems - 1, (char*)&output, false);
// handle selectivity
if (pCtx->subsidiaries.num > 0) {
- appendSelectivityValue(pCtx, i, i);
+ appendSelectivityValue(pCtx, i, pCtx->offset + numOfElems - 1);
}
}
@@ -4964,7 +4998,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (pInfo->numSampled < pInfo->samples) {
sampleAssignResult(pInfo, data, pInfo->numSampled);
if (pCtx->subsidiaries.num > 0) {
- doSaveTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[pInfo->numSampled]);
+ pInfo->tuplePos[pInfo->numSampled] = saveTupleData(pCtx, index, pCtx->pSrcBlock, NULL);
}
pInfo->numSampled++;
} else {
@@ -4972,7 +5006,7 @@ static void doReservoirSample(SqlFunctionCtx* pCtx, SSampleInfo* pInfo, char* da
if (j < pInfo->samples) {
sampleAssignResult(pInfo, data, j);
if (pCtx->subsidiaries.num > 0) {
- doCopyTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
+ updateTupleData(pCtx, index, pCtx->pSrcBlock, &pInfo->tuplePos[j]);
}
}
}
@@ -4995,7 +5029,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
}
if (pInfo->numSampled == 0 && pCtx->subsidiaries.num > 0 && !pInfo->nullTupleSaved) {
- doSaveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, &pInfo->nullTuplePos);
+ pInfo->nullTuplePos = saveTupleData(pCtx, pInput->startRowIndex, pCtx->pSrcBlock, NULL);
pInfo->nullTupleSaved = true;
}
@@ -5264,12 +5298,12 @@ bool modeFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResInfo) {
}
static void doModeAdd(SModeInfo* pInfo, char* data) {
- int32_t hashKeyBytes = IS_VAR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes;
+ int32_t hashKeyBytes = IS_STR_DATA_TYPE(pInfo->colType) ? varDataTLen(data) : pInfo->colBytes;
SModeItem** pHashItem = taosHashGet(pInfo->pHash, data, hashKeyBytes);
if (pHashItem == NULL) {
int32_t size = sizeof(SModeItem) + pInfo->colBytes;
SModeItem* pItem = (SModeItem*)(pInfo->pItems + pInfo->numOfPoints * size);
- memcpy(pItem->data, data, pInfo->colBytes);
+ memcpy(pItem->data, data, hashKeyBytes);
pItem->count += 1;
taosHashPut(pInfo->pHash, data, hashKeyBytes, &pItem, sizeof(SModeItem*));
@@ -5381,8 +5415,8 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
int32_t i = pInput->startRowIndex;
if (pCtx->start.key != INT64_MIN) {
- ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) ||
- (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC));
+ // ASSERT((pCtx->start.key < tsList[i] && pCtx->order == TSDB_ORDER_ASC) ||
+ // (pCtx->start.key > tsList[i] && pCtx->order == TSDB_ORDER_DESC));
ASSERT(last->key == INT64_MIN);
for (; i < pInput->numOfRows + pInput->startRowIndex; ++i) {
@@ -5430,6 +5464,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5445,6 +5483,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5459,6 +5501,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5473,6 +5519,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5487,6 +5537,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5501,6 +5555,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5515,6 +5573,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5529,6 +5591,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5543,6 +5609,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
@@ -5557,6 +5627,10 @@ int32_t twaFunction(SqlFunctionCtx* pCtx) {
numOfElems++;
INIT_INTP_POINT(st, tsList[i], val[i]);
+ if (pInfo->p.key == st.key) {
+ return TSDB_CODE_FUNC_DUP_TIMESTAMP;
+ }
+
pInfo->dOutput += twa_get_area(pInfo->p, st);
pInfo->p = st;
}
diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c
index 152a970c48eb5fb374f8806062d264e53b88f664..ca8ddbc60acc987735fc7b4f5a43c852e165fd20 100644
--- a/source/libs/function/src/functionMgt.c
+++ b/source/libs/function/src/functionMgt.c
@@ -26,11 +26,6 @@ typedef struct SFuncMgtService {
SHashObj* pFuncNameHashTable;
} SFuncMgtService;
-typedef struct SUdfInfo {
- SDataType outputDt;
- int8_t funcType;
-} SUdfInfo;
-
static SFuncMgtService gFunMgtService;
static TdThreadOnce functionHashTableInit = PTHREAD_ONCE_INIT;
static int32_t initFunctionCode = 0;
@@ -101,6 +96,14 @@ bool fmIsBuiltinFunc(const char* pFunc) {
return NULL != taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
}
+EFunctionType fmGetFuncType(const char* pFunc) {
+ void* pVal = taosHashGet(gFunMgtService.pFuncNameHashTable, pFunc, strlen(pFunc));
+ if (NULL != pVal) {
+ return funcMgtBuiltins[*(int32_t*)pVal].type;
+ }
+ return FUNCTION_TYPE_UDF;
+}
+
EFuncDataRequired fmFuncDataRequired(SFunctionNode* pFunc, STimeWindow* pTimeWindow) {
if (fmIsUserDefinedFunc(pFunc->funcId) || pFunc->funcId < 0 || pFunc->funcId >= funcMgtBuiltinsNum) {
return FUNC_DATA_REQUIRED_DATA_LOAD;
@@ -221,6 +224,8 @@ bool fmIsInterpFunc(int32_t funcId) {
return FUNCTION_TYPE_INTERP == funcMgtBuiltins[funcId].type;
}
+bool fmIsInterpPseudoColumnFunc(int32_t funcId) { return isSpecificClassifyFunc(funcId, FUNC_MGT_INTERP_PC_FUNC); }
+
bool fmIsLastRowFunc(int32_t funcId) {
if (funcId < 0 || funcId >= funcMgtBuiltinsNum) {
return false;
diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c
index 517253dc01691754425bd93c40bfef2a2750eed5..4c58c0abe50e5784314445934618265231d4805a 100644
--- a/source/libs/function/src/tpercentile.c
+++ b/source/libs/function/src/tpercentile.c
@@ -33,13 +33,13 @@ static SFilePage *loadDataFromFilePage(tMemBucket *pMemBucket, int32_t slotIdx)
SFilePage *buffer = (SFilePage *)taosMemoryCalloc(1, pMemBucket->bytes * pMemBucket->pSlots[slotIdx].info.size + sizeof(SFilePage));
int32_t groupId = getGroupId(pMemBucket->numOfSlots, slotIdx, pMemBucket->times);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SArray* pIdList = *(SArray**)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
int32_t offset = 0;
- for(int32_t i = 0; i < list->size; ++i) {
- struct SPageInfo* pgInfo = *(struct SPageInfo**) taosArrayGet(list, i);
+ for(int32_t i = 0; i < taosArrayGetSize(pIdList); ++i) {
+ int32_t* pageId = taosArrayGet(pIdList, i);
- SFilePage* pg = getBufPage(pMemBucket->pBuffer, getPageId(pgInfo));
+ SFilePage* pg = getBufPage(pMemBucket->pBuffer, *pageId);
memcpy(buffer->data + offset, pg->data, (size_t)(pg->num * pMemBucket->bytes));
offset += (int32_t)(pg->num * pMemBucket->bytes);
@@ -97,11 +97,11 @@ double findOnlyResult(tMemBucket *pMemBucket) {
}
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SArray* list = *(SArray**)taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
assert(list->size == 1);
- struct SPageInfo* pgInfo = (struct SPageInfo*) taosArrayGetP(list, 0);
- SFilePage* pPage = getBufPage(pMemBucket->pBuffer, getPageId(pgInfo));
+ int32_t* pageId = taosArrayGet(list, 0);
+ SFilePage* pPage = getBufPage(pMemBucket->pBuffer, *pageId);
assert(pPage->num == 1);
double v = 0;
@@ -233,7 +233,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval,
pBucket->times = 1;
pBucket->maxCapacity = 200000;
-
+ pBucket->groupPagesMap = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (setBoundingBox(&pBucket->range, pBucket->type, minval, maxval) != 0) {
// qError("MemBucket:%p, invalid value range: %f-%f", pBucket, minval, maxval);
taosMemoryFree(pBucket);
@@ -280,8 +280,16 @@ void tMemBucketDestroy(tMemBucket *pBucket) {
return;
}
+ void* p = taosHashIterate(pBucket->groupPagesMap, NULL);
+ while(p) {
+ SArray** p1 = p;
+ p = taosHashIterate(pBucket->groupPagesMap, p);
+ taosArrayDestroy(*p1);
+ }
+
destroyDiskbasedBuf(pBucket->pBuffer);
taosMemoryFreeClear(pBucket->pSlots);
+ taosHashCleanup(pBucket->groupPagesMap);
taosMemoryFreeClear(pBucket);
}
@@ -357,8 +365,16 @@ int32_t tMemBucketPut(tMemBucket *pBucket, const void *data, size_t size) {
pSlot->info.data = NULL;
}
- pSlot->info.data = getNewBufPage(pBucket->pBuffer, groupId, &pageId);
+ SArray* pPageIdList = (SArray*)taosHashGet(pBucket->groupPagesMap, &groupId, sizeof(groupId));
+ if (pPageIdList == NULL) {
+ SArray* pList = taosArrayInit(4, sizeof(int32_t));
+ taosHashPut(pBucket->groupPagesMap, &groupId, sizeof(groupId), &pList, POINTER_BYTES);
+ pPageIdList = pList;
+ }
+
+ pSlot->info.data = getNewBufPage(pBucket->pBuffer, &pageId);
pSlot->info.pageId = pageId;
+ taosArrayPush(pPageIdList, &pageId);
}
memcpy(pSlot->info.data->data + pSlot->info.data->num * pBucket->bytes, d, pBucket->bytes);
@@ -476,7 +492,7 @@ double getPercentileImpl(tMemBucket *pMemBucket, int32_t count, double fraction)
resetSlotInfo(pMemBucket);
int32_t groupId = getGroupId(pMemBucket->numOfSlots, i, pMemBucket->times - 1);
- SIDList list = getDataBufPagesIdList(pMemBucket->pBuffer, groupId);
+ SIDList list = taosHashGet(pMemBucket->groupPagesMap, &groupId, sizeof(groupId));
assert(list->size > 0);
for (int32_t f = 0; f < list->size; ++f) {
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index d5a3e91eeaa63723029617dfc7be1f72679508bd..060a92f864ec6380a799c77a6790eeeb3175577b 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -81,7 +81,7 @@ static int32_t udfSpawnUdfd(SUdfdData* pData) {
taosDirName(path);
#endif
} else {
- strncpy(path, tsProcPath, strlen(tsProcPath));
+ strncpy(path, tsProcPath, PATH_MAX);
taosDirName(path);
}
#ifdef WINDOWS
@@ -961,6 +961,7 @@ void releaseUdfFuncHandle(char* udfName) {
strcpy(key.udfName, udfName);
SUdfcFuncStub *foundStub = taosArraySearch(gUdfdProxy.udfStubs, &key, compareUdfcFuncSub, TD_EQ);
if (!foundStub) {
+ uv_mutex_unlock(&gUdfdProxy.udfStubsMutex);
return;
}
if (foundStub->refCount > 0) {
@@ -1183,7 +1184,9 @@ void onUdfcPipeClose(uv_handle_t *handle) {
QUEUE_REMOVE(&task->procTaskQueue);
uv_sem_post(&task->taskSem);
}
- conn->session->udfUvPipe = NULL;
+ if (conn->session != NULL) {
+ conn->session->udfUvPipe = NULL;
+ }
taosMemoryFree(conn->readBuf.buf);
taosMemoryFree(conn);
taosMemoryFree((uv_pipe_t *) handle);
@@ -1653,6 +1656,8 @@ int32_t doSetupUdf(char udfName[], UdfcFuncHandle *funcHandle) {
int32_t errCode = udfcRunUdfUvTask(task, UV_TASK_CONNECT);
if (errCode != 0) {
fnError("failed to connect to pipe. udfName: %s, pipe: %s", udfName, (&gUdfdProxy)->udfdPipeName);
+ taosMemoryFree(task->session);
+ taosMemoryFree(task);
return TSDB_CODE_UDF_PIPE_CONNECT_ERR;
}
@@ -1803,6 +1808,7 @@ int32_t doTeardownUdf(UdfcFuncHandle handle) {
if (session->udfUvPipe == NULL) {
fnError("tear down udf. pipe to udfd does not exist. udf name: %s", session->udfName);
+ taosMemoryFree(session);
return TSDB_CODE_UDF_PIPE_NO_PIPE;
}
@@ -1821,7 +1827,11 @@ int32_t doTeardownUdf(UdfcFuncHandle handle) {
udfcRunUdfUvTask(task, UV_TASK_DISCONNECT);
fnInfo("tear down udf. udf name: %s, udf func handle: %p", session->udfName, handle);
-
+ //TODO: synchronization refactor between libuv event loop and request thread
+ if (session->udfUvPipe != NULL && session->udfUvPipe->data != NULL) {
+ SClientUvConn *conn = session->udfUvPipe->data;
+ conn->session = NULL;
+ }
taosMemoryFree(session);
taosMemoryFree(task);
diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c
index 1cbc78df48b1cbeb5d1645dcd945168f21d25ba6..8ff0dc15a56068d466664df01f4f2291100741f2 100644
--- a/source/libs/function/src/udfd.c
+++ b/source/libs/function/src/udfd.c
@@ -41,6 +41,8 @@ typedef struct SUdfdContext {
uv_mutex_t udfsMutex;
SHashObj * udfsHash;
+ SArray* residentFuncs;
+
bool printVersion;
} SUdfdContext;
@@ -67,6 +69,7 @@ typedef struct SUdf {
EUdfState state;
uv_mutex_t lock;
uv_cond_t condReady;
+ bool resident;
char name[TSDB_FUNC_NAME_LEN];
int8_t funcType;
@@ -84,6 +87,7 @@ typedef struct SUdf {
TUdfAggStartFunc aggStartFunc;
TUdfAggProcessFunc aggProcFunc;
TUdfAggFinishFunc aggFinishFunc;
+ TUdfAggMergeFunc aggMergeFunc;
TUdfInitFunc initFunc;
TUdfDestroyFunc destroyFunc;
@@ -199,6 +203,14 @@ void udfdProcessSetupRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
if (udf->initFunc) {
udf->initFunc();
}
+ udf->resident = false;
+ for (int32_t i = 0; i < taosArrayGetSize(global.residentFuncs); ++i) {
+ char* funcName = taosArrayGet(global.residentFuncs, i);
+ if (strcmp(setup->udfName, funcName) == 0) {
+ udf->resident = true;
+ break;
+ }
+ }
udf->state = UDF_STATE_READY;
uv_cond_broadcast(&udf->condReady);
uv_mutex_unlock(&udf->lock);
@@ -271,6 +283,15 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
break;
}
+ case TSDB_UDF_CALL_AGG_MERGE: {
+ SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
+ code = udf->aggMergeFunc(&call->interBuf, &call->interBuf2, &outBuf);
+ freeUdfInterBuf(&call->interBuf);
+ freeUdfInterBuf(&call->interBuf2);
+ subRsp->resultBuf = outBuf;
+
+ break;
+ }
case TSDB_UDF_CALL_AGG_FIN: {
SUdfInterBuf outBuf = {.buf = taosMemoryMalloc(udf->bufSize), .bufLen = udf->bufSize, .numOfResult = 0};
code = udf->aggFinishFunc(&call->interBuf, &outBuf);
@@ -309,6 +330,10 @@ void udfdProcessCallRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
freeUdfInterBuf(&subRsp->resultBuf);
break;
}
+ case TSDB_UDF_CALL_AGG_MERGE: {
+ freeUdfInterBuf(&subRsp->resultBuf);
+ break;
+ }
case TSDB_UDF_CALL_AGG_FIN: {
freeUdfInterBuf(&subRsp->resultBuf);
break;
@@ -331,7 +356,7 @@ void udfdProcessTeardownRequest(SUvUdfWork *uvUdf, SUdfRequest *request) {
uv_mutex_lock(&global.udfsMutex);
udf->refCount--;
- if (udf->refCount == 0) {
+ if (udf->refCount == 0 && !udf->resident) {
unloadUdf = true;
taosHashRemove(global.udfsHash, udf->name, strlen(udf->name));
}
@@ -439,7 +464,7 @@ void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
goto _return;
}
taosCloseFile(&file);
- strncpy(udf->path, path, strlen(path));
+ strncpy(udf->path, path, PATH_MAX);
tFreeSFuncInfo(pFuncInfo);
taosArrayDestroy(retrieveRsp.pFuncInfos);
msgInfo->code = 0;
@@ -552,15 +577,19 @@ int32_t udfdLoadUdf(char *udfName, SUdf *udf) {
uv_dlsym(&udf->lib, processFuncName, (void **)(&udf->aggProcFunc));
char startFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
char *startSuffix = "_start";
- strncpy(startFuncName, processFuncName, strlen(processFuncName));
+ strncpy(startFuncName, processFuncName, sizeof(startFuncName));
strncat(startFuncName, startSuffix, strlen(startSuffix));
uv_dlsym(&udf->lib, startFuncName, (void **)(&udf->aggStartFunc));
char finishFuncName[TSDB_FUNC_NAME_LEN + 7] = {0};
char *finishSuffix = "_finish";
- strncpy(finishFuncName, processFuncName, strlen(processFuncName));
+ strncpy(finishFuncName, processFuncName, sizeof(finishFuncName));
strncat(finishFuncName, finishSuffix, strlen(finishSuffix));
uv_dlsym(&udf->lib, finishFuncName, (void **)(&udf->aggFinishFunc));
- // TODO: merge
+ char mergeFuncName[TSDB_FUNC_NAME_LEN + 6] = {0};
+ char *mergeSuffix = "_merge";
+ strncpy(mergeFuncName, processFuncName, sizeof(mergeFuncName));
+ strncat(mergeFuncName, mergeSuffix, strlen(mergeSuffix));
+ uv_dlsym(&udf->lib, mergeFuncName, (void **)(&udf->aggMergeFunc));
}
return 0;
}
@@ -901,8 +930,6 @@ static int32_t udfdRun() {
uv_run(global.loop, UV_RUN_DEFAULT);
uv_loop_close(global.loop);
- uv_mutex_destroy(&global.udfsMutex);
- taosHashCleanup(global.udfsHash);
return 0;
}
@@ -923,6 +950,47 @@ void udfdConnectMnodeThreadFunc(void *args) {
}
}
+int32_t udfdInitResidentFuncs() {
+ if (strlen(tsUdfdResFuncs) == 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ global.residentFuncs = taosArrayInit(2, TSDB_FUNC_NAME_LEN);
+ char* pSave = tsUdfdResFuncs;
+ char* token;
+ while ((token = strtok_r(pSave, ",", &pSave)) != NULL) {
+ char func[TSDB_FUNC_NAME_LEN] = {0};
+ strncpy(func, token, sizeof(func));
+ taosArrayPush(global.residentFuncs, func);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t udfdDeinitResidentFuncs() {
+ for (int32_t i = 0; i < taosArrayGetSize(global.residentFuncs); ++i) {
+ char* funcName = taosArrayGet(global.residentFuncs, i);
+ SUdf** udfInHash = taosHashGet(global.udfsHash, funcName, strlen(funcName));
+ if (udfInHash) {
+ taosHashRemove(global.udfsHash, funcName, strlen(funcName));
+ SUdf* udf = *udfInHash;
+ if (udf->destroyFunc) {
+ (udf->destroyFunc)();
+ }
+ uv_dlclose(&udf->lib);
+ taosMemoryFree(udf);
+ }
+ }
+ taosArrayDestroy(global.residentFuncs);
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t udfdCleanup() {
+ uv_mutex_destroy(&global.udfsMutex);
+ taosHashCleanup(global.udfsHash);
+ return 0;
+}
+
int main(int argc, char *argv[]) {
if (!taosCheckSystemIsLittleEnd()) {
printf("failed to start since on non-little-end machines\n");
@@ -960,6 +1028,8 @@ int main(int argc, char *argv[]) {
return -5;
}
+ udfdInitResidentFuncs();
+
uv_thread_t mnodeConnectThread;
uv_thread_create(&mnodeConnectThread, udfdConnectMnodeThreadFunc, NULL);
@@ -968,5 +1038,7 @@ int main(int argc, char *argv[]) {
removeListeningPipe();
udfdCloseClientRpc();
+ udfdDeinitResidentFuncs();
+ udfdCleanup();
return 0;
}
diff --git a/source/libs/index/inc/indexInt.h b/source/libs/index/inc/indexInt.h
index 065f4acb576263d1f7d5cbe8238273dc325ccb09..9605528ad6ae150fd88f512cdf5344b81d486a99 100644
--- a/source/libs/index/inc/indexInt.h
+++ b/source/libs/index/inc/indexInt.h
@@ -40,26 +40,31 @@ extern "C" {
#define indexTrace(...) do { if (idxDebugFlag & DEBUG_TRACE) { taosPrintLog("IDX", DEBUG_TRACE, idxDebugFlag, __VA_ARGS__);} } while (0)
// clang-format on
+extern void* indexQhandle;
+
typedef enum { LT, LE, GT, GE, CONTAINS, EQ } RangeType;
typedef enum { kTypeValue, kTypeDeletion } STermValueType;
typedef enum { kRebuild, kFinished } SIdxStatus;
typedef struct SIndexStat {
- int32_t totalAdded; //
- int32_t totalDeled; //
- int32_t totalUpdated; //
- int32_t totalTerms; //
- int32_t distinctCol; // distinct column
+ int32_t total;
+ int32_t add; //
+ int32_t del; //
+ int32_t update; //
+ int32_t terms; //
+ int32_t distCol; // distinct column
} SIndexStat;
struct SIndex {
+ SIndexOpts opts;
+
int64_t refId;
void* cache;
void* tindex;
SHashObj* colObj; // < field name, field id>
- int64_t suid; // current super table id, -1 is normal table
- int32_t cVersion; // current version allocated to cache
+ int64_t suid; // current super table id, -1 is normal table
+ int32_t version; // current version allocated to cache
SLRUCache* lru;
char* path;
@@ -68,7 +73,6 @@ struct SIndex {
TdThreadMutex mtx;
tsem_t sem;
bool quit;
- SIndexOpts opts;
};
struct SIndexMultiTermQuery {
@@ -111,14 +115,15 @@ typedef struct Iterate {
void iterateValueDestroy(IterateValue* iv, bool destroy);
-extern void* indexQhandle;
-
typedef struct TFileCacheKey {
uint64_t suid;
uint8_t colType;
char* colName;
int32_t nColName;
} ICacheKey;
+
+int32_t idxSerialCacheKey(ICacheKey* key, char* buf);
+
int idxFlushCacheToTFile(SIndex* sIdx, void*, bool quit);
int64_t idxAddRef(void* p);
@@ -126,10 +131,6 @@ int32_t idxRemoveRef(int64_t ref);
void idxAcquireRef(int64_t ref);
void idxReleaseRef(int64_t ref);
-int32_t idxSerialCacheKey(ICacheKey* key, char* buf);
-// int32_t indexSerialKey(ICacheKey* key, char* buf);
-// int32_t indexSerialTermKey(SIndexTerm* itm, char* buf);
-
#define IDX_TYPE_CONTAIN_EXTERN_TYPE(ty, exTy) (((ty >> 4) & (exTy)) != 0)
#define IDX_TYPE_GET_TYPE(ty) (ty & 0x0F)
diff --git a/source/libs/index/src/index.c b/source/libs/index/src/index.c
index be64a8b44d28a76a0b04a78b3940bcb0c86101da..f507e1b3bed918419ca292b6d88ea85311122222 100644
--- a/source/libs/index/src/index.c
+++ b/source/libs/index/src/index.c
@@ -25,10 +25,6 @@
#include "tref.h"
#include "tsched.h"
-#ifdef USE_LUCENE
-#include "lucene++/Lucene_c.h"
-#endif
-
#define INDEX_NUM_OF_THREADS 5
#define INDEX_QUEUE_SIZE 200
@@ -74,7 +70,7 @@ void indexCleanup() {
typedef struct SIdxColInfo {
int colId; // generated by index internal
- int cVersion;
+ int version;
} SIdxColInfo;
static TdThreadOnce isInit = PTHREAD_ONCE_INIT;
@@ -123,7 +119,7 @@ int indexOpen(SIndexOpts* opts, const char* path, SIndex** index) {
}
idx->colObj = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
- idx->cVersion = 1;
+ idx->version = 1;
idx->path = tstrdup(path);
taosThreadMutexInit(&idx->mtx, NULL);
tsem_init(&idx->sem, 0, 0);
@@ -307,7 +303,7 @@ SIndexTerm* indexTermCreate(int64_t suid, SIndexOperOnColumn oper, uint8_t colTy
buf = strndup(INDEX_DATA_NULL_STR, (int32_t)strlen(INDEX_DATA_NULL_STR));
len = (int32_t)strlen(INDEX_DATA_NULL_STR);
} else {
- const char* emptyStr = " ";
+ static const char* emptyStr = " ";
buf = strndup(emptyStr, (int32_t)strlen(emptyStr));
len = (int32_t)strlen(emptyStr);
}
@@ -589,6 +585,12 @@ int idxFlushCacheToTFile(SIndex* sIdx, void* cache, bool quit) {
idxTRsltDestroy(tr);
int ret = idxGenTFile(sIdx, pCache, result);
+ if (ret != 0) {
+ indexError("failed to merge");
+ } else {
+ int64_t cost = taosGetTimestampUs() - st;
+ indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000);
+ }
idxDestroyFinalRslt(result);
idxCacheDestroyImm(pCache);
@@ -599,12 +601,6 @@ int idxFlushCacheToTFile(SIndex* sIdx, void* cache, bool quit) {
tfileReaderUnRef(pReader);
idxCacheUnRef(pCache);
- int64_t cost = taosGetTimestampUs() - st;
- if (ret != 0) {
- indexError("failed to merge, time cost: %" PRId64 "ms", cost / 1000);
- } else {
- indexInfo("success to merge , time cost: %" PRId64 "ms", cost / 1000);
- }
atomic_store_32(&pCache->merging, 0);
if (quit) {
idxPost(sIdx);
diff --git a/source/libs/index/src/indexCache.c b/source/libs/index/src/indexCache.c
index 794b85d244f875b6874855ac6a36a7500114afd5..39bba4e2695030cefc332469ae74ad25b5eda194 100644
--- a/source/libs/index/src/indexCache.c
+++ b/source/libs/index/src/indexCache.c
@@ -302,6 +302,7 @@ static int32_t cacheSearchCompareFunc_JSON(void* cache, SIndexTerm* term, SIdxTR
char* p = taosMemoryCalloc(1, strlen(c->colVal) + 1);
memcpy(p, c->colVal, strlen(c->colVal));
cond = cmpFn(p + skip, term->colVal, dType);
+ taosMemoryFree(p);
}
}
if (cond == MATCH) {
@@ -566,7 +567,6 @@ int idxCachePut(void* cache, SIndexTerm* term, uint64_t uid) {
taosThreadMutexUnlock(&pCache->mtx);
idxCacheUnRef(pCache);
return 0;
- // encode end
}
void idxCacheForceToMerge(void* cache) {
IndexCache* pCache = cache;
@@ -602,10 +602,10 @@ static int32_t idxQueryMem(MemTable* mem, SIndexTermQuery* query, SIdxTRslt* tr,
}
}
int idxCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STermValueType* s) {
- int64_t st = taosGetTimestampUs();
if (cache == NULL) {
return 0;
}
+
IndexCache* pCache = cache;
MemTable *mem = NULL, *imm = NULL;
@@ -616,6 +616,8 @@ int idxCacheSearch(void* cache, SIndexTermQuery* query, SIdxTRslt* result, STerm
idxMemRef(imm);
taosThreadMutexUnlock(&pCache->mtx);
+ int64_t st = taosGetTimestampUs();
+
int ret = (mem && mem->mem) ? idxQueryMem(mem, query, result, s) : 0;
if (ret == 0 && *s != kTypeDeletion) {
// continue search in imm
diff --git a/source/libs/index/src/indexComm.c b/source/libs/index/src/indexComm.c
index 4f33d98f9e4f7e5b210922b0dd6da0b5448d4472..ecf91360734e37f0060aeb7758e5c4c5d57d4972 100644
--- a/source/libs/index/src/indexComm.c
+++ b/source/libs/index/src/indexComm.c
@@ -81,28 +81,28 @@ __compar_fn_t idxGetCompar(int8_t type) {
}
return getComparFunc(type, 0);
}
-static TExeCond tCompareLessThan(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareLessThan(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_LESS_THAN, a, b, type);
}
-static TExeCond tCompareLessEqual(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareLessEqual(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_LESS_EQUAL, a, b, type);
}
-static TExeCond tCompareGreaterThan(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareGreaterThan(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_GREATER_THAN, a, b, type);
}
-static TExeCond tCompareGreaterEqual(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareGreaterEqual(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_GREATER_EQUAL, a, b, type);
}
-static TExeCond tCompareContains(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareContains(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_TERM, a, b, type);
}
-static TExeCond tCompareEqual(void* a, void* b, int8_t type) {
+static FORCE_INLINE TExeCond tCompareEqual(void* a, void* b, int8_t type) {
__compar_fn_t func = idxGetCompar(type);
return tCompare(func, QUERY_TERM, a, b, type);
}
@@ -171,15 +171,16 @@ TExeCond tCompare(__compar_fn_t func, int8_t cmptype, void* a, void* b, int8_t d
return tDoCompare(func, cmptype, &va, &vb);
}
assert(0);
+ return BREAK;
#endif
}
TExeCond tDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) {
// optime later
int32_t ret = func(a, b);
switch (comparType) {
- case QUERY_LESS_THAN: {
+ case QUERY_LESS_THAN:
if (ret < 0) return MATCH;
- } break;
+ break;
case QUERY_LESS_EQUAL: {
if (ret <= 0) return MATCH;
break;
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index 75844ce76f1cb50d6847709309dae1ed3f77bf70..b65acc467215da77019235e5ec44a335b363e344 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -27,6 +27,44 @@
#define SIF_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
#define SIF_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
// clang-format on
+
+typedef union {
+ uint8_t u8;
+ uint16_t u16;
+ uint32_t u32;
+ uint64_t u64;
+
+ int8_t i8;
+ int16_t i16;
+ int32_t i32;
+ int64_t i64;
+
+ double d;
+ float f;
+} SDataTypeBuf;
+
+#define SIF_DATA_CONVERT(type, val, dst) \
+ do { \
+ if (type == TSDB_DATA_TYPE_DOUBLE) \
+ dst = GET_DOUBLE_VAL(val); \
+ else if (type == TSDB_DATA_TYPE_BIGINT) \
+ dst = *(int64_t *)val; \
+ else if (type == TSDB_DATA_TYPE_INT) \
+ dst = *(int32_t *)val; \
+ else if (type == TSDB_DATA_TYPE_SMALLINT) \
+ dst = *(int16_t *)val; \
+ else if (type == TSDB_DATA_TYPE_TINYINT) \
+ dst = *(int8_t *)val; \
+ else if (type == TSDB_DATA_TYPE_UTINYINT) \
+ dst = *(uint8_t *)val; \
+ else if (type == TSDB_DATA_TYPE_USMALLINT) \
+ dst = *(uint16_t *)val; \
+ else if (type == TSDB_DATA_TYPE_UINT) \
+ dst = *(uint32_t *)val; \
+ else if (type == TSDB_DATA_TYPE_UBIGINT) \
+ dst = *(uint64_t *)val; \
+ } while (0);
+
typedef struct SIFParam {
SHashObj *pFilter;
@@ -48,10 +86,9 @@ typedef struct SIFCtx {
SHashObj *pRes; /* element is SIFParam */
bool noExec; // true: just iterate condition tree, and add hint to executor plan
SIndexMetaArg arg;
- // SIdxFltStatus st;
} SIFCtx;
-static int32_t sifGetFuncFromSql(EOperatorType src, EIndexQueryType *dst) {
+static FORCE_INLINE int32_t sifGetFuncFromSql(EOperatorType src, EIndexQueryType *dst) {
if (src == OP_TYPE_GREATER_THAN) {
*dst = QUERY_GREATER_THAN;
} else if (src == OP_TYPE_GREATER_EQUAL) {
@@ -73,15 +110,9 @@ static int32_t sifGetFuncFromSql(EOperatorType src, EIndexQueryType *dst) {
}
typedef int32_t (*sif_func_t)(SIFParam *left, SIFParam *rigth, SIFParam *output);
-
static sif_func_t sifNullFunc = NULL;
-// typedef struct SIFWalkParm
-// construct tag filter operator later
-// static void destroyTagFilterOperatorInfo(void *param) {
-// STagFilterOperatorInfo *pInfo = (STagFilterOperatorInfo *)param;
-//}
-static void sifFreeParam(SIFParam *param) {
+static FORCE_INLINE void sifFreeParam(SIFParam *param) {
if (param == NULL) return;
taosArrayDestroy(param->result);
@@ -91,7 +122,7 @@ static void sifFreeParam(SIFParam *param) {
param->pFilter = NULL;
}
-static int32_t sifGetOperParamNum(EOperatorType ty) {
+static FORCE_INLINE int32_t sifGetOperParamNum(EOperatorType ty) {
if (OP_TYPE_IS_NULL == ty || OP_TYPE_IS_NOT_NULL == ty || OP_TYPE_IS_TRUE == ty || OP_TYPE_IS_NOT_TRUE == ty ||
OP_TYPE_IS_FALSE == ty || OP_TYPE_IS_NOT_FALSE == ty || OP_TYPE_IS_UNKNOWN == ty ||
OP_TYPE_IS_NOT_UNKNOWN == ty || OP_TYPE_MINUS == ty) {
@@ -99,14 +130,14 @@ static int32_t sifGetOperParamNum(EOperatorType ty) {
}
return 2;
}
-static int32_t sifValidOp(EOperatorType ty) {
+static FORCE_INLINE int32_t sifValidOp(EOperatorType ty) {
if ((ty >= OP_TYPE_ADD && ty <= OP_TYPE_BIT_OR) || (ty == OP_TYPE_IN || ty == OP_TYPE_NOT_IN) ||
(ty == OP_TYPE_LIKE || ty == OP_TYPE_NOT_LIKE || ty == OP_TYPE_MATCH || ty == OP_TYPE_NMATCH)) {
return -1;
}
return 0;
}
-static int32_t sifValidColumn(SColumnNode *cn) {
+static FORCE_INLINE int32_t sifValidColumn(SColumnNode *cn) {
// add more check
if (cn == NULL) {
return TSDB_CODE_QRY_INVALID_INPUT;
@@ -117,7 +148,7 @@ static int32_t sifValidColumn(SColumnNode *cn) {
return TSDB_CODE_SUCCESS;
}
-static SIdxFltStatus sifMergeCond(ELogicConditionType type, SIdxFltStatus ls, SIdxFltStatus rs) {
+static FORCE_INLINE SIdxFltStatus sifMergeCond(ELogicConditionType type, SIdxFltStatus ls, SIdxFltStatus rs) {
// enh rule later
if (type == LOGIC_COND_TYPE_AND) {
if (ls == SFLT_NOT_INDEX || rs == SFLT_NOT_INDEX) {
@@ -135,7 +166,7 @@ static SIdxFltStatus sifMergeCond(ELogicConditionType type, SIdxFltStatus ls, SI
return SFLT_NOT_INDEX;
}
-static int32_t sifGetValueFromNode(SNode *node, char **value) {
+static FORCE_INLINE int32_t sifGetValueFromNode(SNode *node, char **value) {
// covert data From snode;
SValueNode *vn = (SValueNode *)node;
@@ -173,7 +204,7 @@ static int32_t sifGetValueFromNode(SNode *node, char **value) {
return TSDB_CODE_SUCCESS;
}
-static int32_t sifInitJsonParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
+static FORCE_INLINE int32_t sifInitJsonParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
SOperatorNode *nd = (SOperatorNode *)node;
assert(nodeType(node) == QUERY_NODE_OPERATOR);
SColumnNode *l = (SColumnNode *)nd->pLeft;
@@ -323,30 +354,30 @@ static int32_t sifExecFunction(SFunctionNode *node, SIFCtx *ctx, SIFParam *outpu
return TSDB_CODE_QRY_INVALID_INPUT;
}
-typedef int (*Filter)(void *a, void *b, int16_t dtype);
+typedef int (*FilterFunc)(void *a, void *b, int16_t dtype);
-int sifGreaterThan(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifGreaterThan(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
return tDoCompare(func, QUERY_GREATER_THAN, a, b);
}
-int sifGreaterEqual(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifGreaterEqual(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
return tDoCompare(func, QUERY_GREATER_EQUAL, a, b);
}
-int sifLessEqual(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifLessEqual(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
return tDoCompare(func, QUERY_LESS_EQUAL, a, b);
}
-int sifLessThan(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifLessThan(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
return (int)tDoCompare(func, QUERY_LESS_THAN, a, b);
}
-int sifEqual(void *a, void *b, int16_t dtype) {
+static FORCE_INLINE int sifEqual(void *a, void *b, int16_t dtype) {
__compar_fn_t func = getComparFunc(dtype, 0);
//__compar_fn_t func = idxGetCompar(dtype);
return (int)tDoCompare(func, QUERY_TERM, a, b);
}
-static Filter sifGetFilterFunc(EIndexQueryType type, bool *reverse) {
+static FORCE_INLINE FilterFunc sifGetFilterFunc(EIndexQueryType type, bool *reverse) {
if (type == QUERY_LESS_EQUAL || type == QUERY_LESS_THAN) {
*reverse = true;
} else {
@@ -365,42 +396,6 @@ static Filter sifGetFilterFunc(EIndexQueryType type, bool *reverse) {
}
return NULL;
}
-typedef union {
- uint8_t u8;
- uint16_t u16;
- uint32_t u32;
- uint64_t u64;
-
- int8_t i8;
- int16_t i16;
- int32_t i32;
- int64_t i64;
-
- double d;
- float f;
-} SDataTypeBuf;
-
-#define SIF_DATA_CONVERT(type, val, dst) \
- do { \
- if (type == TSDB_DATA_TYPE_DOUBLE) \
- dst = GET_DOUBLE_VAL(val); \
- else if (type == TSDB_DATA_TYPE_BIGINT) \
- dst = *(int64_t *)val; \
- else if (type == TSDB_DATA_TYPE_INT) \
- dst = *(int32_t *)val; \
- else if (type == TSDB_DATA_TYPE_SMALLINT) \
- dst = *(int16_t *)val; \
- else if (type == TSDB_DATA_TYPE_TINYINT) \
- dst = *(int8_t *)val; \
- else if (type == TSDB_DATA_TYPE_UTINYINT) \
- dst = *(uint8_t *)val; \
- else if (type == TSDB_DATA_TYPE_USMALLINT) \
- dst = *(uint16_t *)val; \
- else if (type == TSDB_DATA_TYPE_UINT) \
- dst = *(uint32_t *)val; \
- else if (type == TSDB_DATA_TYPE_UBIGINT) \
- dst = *(uint64_t *)val; \
- } while (0);
static void sifSetFltParam(SIFParam *left, SIFParam *right, SDataTypeBuf *typedata, SMetaFltParam *param) {
int8_t ltype = left->colValType, rtype = right->colValType;
@@ -474,8 +469,8 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
indexMultiTermQueryAdd(mtm, tm, qtype);
ret = indexJsonSearch(arg->ivtIdx, mtm, output->result);
} else {
- bool reverse;
- Filter filterFunc = sifGetFilterFunc(qtype, &reverse);
+ bool reverse;
+ FilterFunc filterFunc = sifGetFilterFunc(qtype, &reverse);
SMetaFltParam param = {.suid = arg->suid,
.cid = left->colId,
@@ -502,72 +497,72 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
return ret;
}
-static int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifLessThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_LOWER_THAN;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifLessEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifLessEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_LOWER_EQUAL;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifGreaterThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifGreaterThanFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_GREATER_THAN;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifGreaterEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifGreaterEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_GREATER_EQUAL;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_EQUAL;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifNotEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifNotEqualFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_NOT_EQUAL;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifInFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifInFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_IN;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifNotInFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifNotInFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_NOT_IN;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifLikeFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifLikeFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_LIKE;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifNotLikeFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifNotLikeFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_NOT_LIKE;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifMatchFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifMatchFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_MATCH;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifNotMatchFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifNotMatchFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_NMATCH;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifJsonContains(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifJsonContains(SIFParam *left, SIFParam *right, SIFParam *output) {
int id = OP_TYPE_JSON_CONTAINS;
return sifDoIndex(left, right, id, output);
}
-static int32_t sifJsonGetValue(SIFParam *left, SIFParam *rigth, SIFParam *output) {
+static FORCE_INLINE int32_t sifJsonGetValue(SIFParam *left, SIFParam *rigth, SIFParam *output) {
// return 0
return 0;
}
-static int32_t sifDefaultFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
+static FORCE_INLINE int32_t sifDefaultFunc(SIFParam *left, SIFParam *right, SIFParam *output) {
// add more except
return TSDB_CODE_QRY_INVALID_INPUT;
}
-static int32_t sifGetOperFn(int32_t funcId, sif_func_t *func, SIdxFltStatus *status) {
+static FORCE_INLINE int32_t sifGetOperFn(int32_t funcId, sif_func_t *func, SIdxFltStatus *status) {
// impl later
*status = SFLT_ACCURATE_INDEX;
switch (funcId) {
@@ -693,11 +688,8 @@ static int32_t sifExecLogic(SLogicConditionNode *node, SIFCtx *ctx, SIFParam *ou
for (int32_t m = 0; m < node->pParameterList->length; m++) {
if (node->condType == LOGIC_COND_TYPE_AND) {
taosArrayAddAll(output->result, params[m].result);
- // taosArrayDestroy(params[m].result);
- // params[m].result = NULL;
} else if (node->condType == LOGIC_COND_TYPE_OR) {
taosArrayAddAll(output->result, params[m].result);
- // params[m].result = NULL;
} else if (node->condType == LOGIC_COND_TYPE_NOT) {
// taosArrayAddAll(output->result, params[m].result);
}
diff --git a/source/libs/index/src/indexFst.c b/source/libs/index/src/indexFst.c
index 15152cef55c221f8a93bfee533dc6a9750f1db4b..2aa8345e03bb3cec2a86c4240351dc86cb3ec9c7 100644
--- a/source/libs/index/src/indexFst.c
+++ b/source/libs/index/src/indexFst.c
@@ -19,11 +19,12 @@
#include "tchecksum.h"
#include "tcoding.h"
-static void fstPackDeltaIn(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr, uint8_t nBytes) {
+static FORCE_INLINE void fstPackDeltaIn(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr,
+ uint8_t nBytes) {
CompiledAddr deltaAddr = (transAddr == EMPTY_ADDRESS) ? EMPTY_ADDRESS : nodeAddr - transAddr;
idxFilePackUintIn(wrt, deltaAddr, nBytes);
}
-static uint8_t fstPackDetla(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr) {
+static FORCE_INLINE uint8_t fstPackDetla(IdxFstFile* wrt, CompiledAddr nodeAddr, CompiledAddr transAddr) {
uint8_t nBytes = packDeltaSize(nodeAddr, transAddr);
fstPackDeltaIn(wrt, nodeAddr, transAddr, nBytes);
return nBytes;
@@ -39,7 +40,7 @@ FstUnFinishedNodes* fstUnFinishedNodesCreate() {
fstUnFinishedNodesPushEmpty(nodes, false);
return nodes;
}
-static void unFinishedNodeDestroyElem(void* elem) {
+static void FORCE_INLINE unFinishedNodeDestroyElem(void* elem) {
FstBuilderNodeUnfinished* b = (FstBuilderNodeUnfinished*)elem;
fstBuilderNodeDestroy(b->node);
taosMemoryFree(b->last);
diff --git a/source/libs/index/src/indexFstFile.c b/source/libs/index/src/indexFstFile.c
index 4f278c7af6adfa8ed4e890b06944d5d5c9560f43..7021fdfae33fc89289e4506fcf40fbfef9601505 100644
--- a/source/libs/index/src/indexFstFile.c
+++ b/source/libs/index/src/indexFstFile.c
@@ -30,23 +30,24 @@ typedef struct {
static void deleteDataBlockFromLRU(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); }
-static void idxGenLRUKey(char* buf, const char* path, int32_t blockId) {
+static FORCE_INLINE void idxGenLRUKey(char* buf, const char* path, int32_t blockId) {
char* p = buf;
SERIALIZE_STR_VAR_TO_BUF(p, path, strlen(path));
SERIALIZE_VAR_TO_BUF(p, '_', char);
idxInt2str(blockId, p, 0);
return;
}
-static int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) {
+static FORCE_INLINE int idxFileCtxDoWrite(IFileCtx* ctx, uint8_t* buf, int len) {
if (ctx->type == TFILE) {
- assert(len == taosWriteFile(ctx->file.pFile, buf, len));
+ int nwr = taosWriteFile(ctx->file.pFile, buf, len);
+ assert(nwr == len);
} else {
memcpy(ctx->mem.buf + ctx->offset, buf, len);
}
ctx->offset += len;
return len;
}
-static int idxFileCtxDoRead(IFileCtx* ctx, uint8_t* buf, int len) {
+static FORCE_INLINE int idxFileCtxDoRead(IFileCtx* ctx, uint8_t* buf, int len) {
int nRead = 0;
if (ctx->type == TFILE) {
#ifdef USE_MMAP
@@ -68,6 +69,8 @@ static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t of
int32_t blkOffset = offset % kBlockSize;
int32_t blkLeft = kBlockSize - blkOffset;
+ if (offset >= ctx->file.size) return 0;
+
do {
char key[128] = {0};
idxGenLRUKey(key, ctx->file.buf, blkId);
@@ -79,24 +82,34 @@ static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t of
memcpy(buf + total, blk->buf + blkOffset, nread);
taosLRUCacheRelease(ctx->lru, h, false);
} else {
- int32_t cacheMemSize = sizeof(SDataBlock) + kBlockSize;
+ int32_t left = ctx->file.size - offset;
+ if (left < kBlockSize) {
+ nread = TMIN(left, len);
+ int32_t bytes = taosPReadFile(ctx->file.pFile, buf + total, nread, offset);
+ assert(bytes == nread);
- SDataBlock* blk = taosMemoryCalloc(1, cacheMemSize);
- blk->blockId = blkId;
- blk->nread = taosPReadFile(ctx->file.pFile, blk->buf, kBlockSize, blkId * kBlockSize);
- assert(blk->nread <= kBlockSize);
+ total += bytes;
+ return total;
+ } else {
+ int32_t cacheMemSize = sizeof(SDataBlock) + kBlockSize;
- if (blk->nread < kBlockSize && blk->nread < len) {
- break;
- }
+ SDataBlock* blk = taosMemoryCalloc(1, cacheMemSize);
+ blk->blockId = blkId;
+ blk->nread = taosPReadFile(ctx->file.pFile, blk->buf, kBlockSize, blkId * kBlockSize);
+ assert(blk->nread <= kBlockSize);
- nread = TMIN(blkLeft, len);
- memcpy(buf + total, blk->buf + blkOffset, nread);
+ if (blk->nread < kBlockSize && blk->nread < len) {
+ break;
+ }
+
+ nread = TMIN(blkLeft, len);
+ memcpy(buf + total, blk->buf + blkOffset, nread);
- LRUStatus s = taosLRUCacheInsert(ctx->lru, key, strlen(key), blk, cacheMemSize, deleteDataBlockFromLRU, NULL,
- TAOS_LRU_PRIORITY_LOW);
- if (s != TAOS_LRU_STATUS_OK) {
- return -1;
+ LRUStatus s = taosLRUCacheInsert(ctx->lru, key, strlen(key), blk, cacheMemSize, deleteDataBlockFromLRU, NULL,
+ TAOS_LRU_PRIORITY_LOW);
+ if (s != TAOS_LRU_STATUS_OK) {
+ return -1;
+ }
}
}
total += nread;
@@ -110,7 +123,7 @@ static int idxFileCtxDoReadFrom(IFileCtx* ctx, uint8_t* buf, int len, int32_t of
} while (len > 0);
return total;
}
-static int idxFileCtxGetSize(IFileCtx* ctx) {
+static FORCE_INLINE int idxFileCtxGetSize(IFileCtx* ctx) {
if (ctx->type == TFILE) {
int64_t file_size = 0;
taosStatFile(ctx->file.buf, &file_size, NULL);
@@ -118,7 +131,7 @@ static int idxFileCtxGetSize(IFileCtx* ctx) {
}
return 0;
}
-static int idxFileCtxDoFlush(IFileCtx* ctx) {
+static FORCE_INLINE int idxFileCtxDoFlush(IFileCtx* ctx) {
if (ctx->type == TFILE) {
taosFsyncFile(ctx->file.pFile);
} else {
@@ -145,9 +158,7 @@ IFileCtx* idxFileCtxCreate(WriterType type, const char* path, bool readOnly, int
} else {
ctx->file.pFile = taosOpenFile(path, TD_FILE_READ);
- int64_t size = 0;
taosFStatFile(ctx->file.pFile, &ctx->file.size, NULL);
- ctx->file.size = (int)size;
#ifdef USE_MMAP
ctx->file.ptr = (char*)tfMmapReadOnly(ctx->file.pFile, ctx->file.size);
#endif
@@ -210,9 +221,7 @@ IdxFstFile* idxFileCreate(void* wrt) {
return cw;
}
void idxFileDestroy(IdxFstFile* cw) {
- // free wrt object: close fd or free mem
idxFileFlush(cw);
- // idxFileCtxDestroy((IFileCtx *)(cw->wrt));
taosMemoryFree(cw);
}
@@ -221,10 +230,8 @@ int idxFileWrite(IdxFstFile* write, uint8_t* buf, uint32_t len) {
return 0;
}
// update checksum
- // write data to file/socket or mem
IFileCtx* ctx = write->wrt;
-
- int nWrite = ctx->write(ctx, buf, len);
+ int nWrite = ctx->write(ctx, buf, len);
assert(nWrite == len);
write->count += len;
diff --git a/source/libs/index/src/indexFstRegister.c b/source/libs/index/src/indexFstRegister.c
index 34efee0d0db510ea1ce50de26c418ae1fd08761e..e0abcadc78a07b0f69ef92003d4304141551865e 100644
--- a/source/libs/index/src/indexFstRegister.c
+++ b/source/libs/index/src/indexFstRegister.c
@@ -16,7 +16,7 @@
#include "indexFstRegistry.h"
#include "os.h"
-uint64_t fstRegistryHash(FstRegistry* registry, FstBuilderNode* bNode) {
+static FORCE_INLINE uint64_t fstRegistryHash(FstRegistry* registry, FstBuilderNode* bNode) {
// TODO(yihaoDeng): refactor later
const uint64_t FNV_PRIME = 1099511628211;
uint64_t h = 14695981039346656037u;
diff --git a/source/libs/index/src/indexFstSparse.c b/source/libs/index/src/indexFstSparse.c
index ebc0cb3637dc14a1968afe7d9669c7eabdf99427..8746b04eab9c2ea46117e1287ebd934a0a5e4eb9 100644
--- a/source/libs/index/src/indexFstSparse.c
+++ b/source/libs/index/src/indexFstSparse.c
@@ -15,7 +15,7 @@
#include "indexFstSparse.h"
-static void sparSetUtil(int32_t *buf, int32_t cap) {
+static FORCE_INLINE void sparSetInitBuf(int32_t *buf, int32_t cap) {
for (int32_t i = 0; i < cap; i++) {
buf[i] = -1;
}
@@ -28,8 +28,8 @@ FstSparseSet *sparSetCreate(int32_t sz) {
ss->dense = (int32_t *)taosMemoryMalloc(sz * sizeof(int32_t));
ss->sparse = (int32_t *)taosMemoryMalloc(sz * sizeof(int32_t));
- sparSetUtil(ss->dense, sz);
- sparSetUtil(ss->sparse, sz);
+ sparSetInitBuf(ss->dense, sz);
+ sparSetInitBuf(ss->sparse, sz);
ss->cap = sz;
@@ -90,7 +90,7 @@ void sparSetClear(FstSparseSet *ss) {
if (ss == NULL) {
return;
}
- sparSetUtil(ss->dense, ss->cap);
- sparSetUtil(ss->sparse, ss->cap);
+ sparSetInitBuf(ss->dense, ss->cap);
+ sparSetInitBuf(ss->sparse, ss->cap);
ss->size = 0;
}
diff --git a/source/libs/index/src/indexTfile.c b/source/libs/index/src/indexTfile.c
index 0a47fc0f167a359b35952f0c1e88af03d544c95d..1fc631e9f36c0fb081060831629c7b715a59f978 100644
--- a/source/libs/index/src/indexTfile.c
+++ b/source/libs/index/src/indexTfile.c
@@ -183,13 +183,14 @@ TFileReader* tfileReaderCreate(IFileCtx* ctx) {
return NULL;
}
reader->ctx = ctx;
+ reader->remove = false;
if (0 != tfileReaderVerify(reader)) {
indexError("invalid tfile, suid:%" PRIu64 ", colName:%s", reader->header.suid, reader->header.colName);
tfileReaderDestroy(reader);
return NULL;
}
- // T_REF_INC(reader);
+
if (0 != tfileReaderLoadHeader(reader)) {
indexError("failed to load index header, suid:%" PRIu64 ", colName:%s", reader->header.suid,
reader->header.colName);
@@ -203,7 +204,6 @@ TFileReader* tfileReaderCreate(IFileCtx* ctx) {
tfileReaderDestroy(reader);
return NULL;
}
- reader->remove = false;
return reader;
}
@@ -211,7 +211,6 @@ void tfileReaderDestroy(TFileReader* reader) {
if (reader == NULL) {
return;
}
- // T_REF_INC(reader);
fstDestroy(reader->fst);
if (reader->remove) {
indexInfo("%s is removed", reader->ctx->file.buf);
@@ -222,6 +221,7 @@ void tfileReaderDestroy(TFileReader* reader) {
taosMemoryFree(reader);
}
+
static int32_t tfSearchTerm(void* reader, SIndexTerm* tem, SIdxTRslt* tr) {
int ret = 0;
char* p = tem->colVal;
@@ -323,10 +323,6 @@ static int32_t tfSearchCompareFunc(void* reader, SIndexTerm* tem, SIdxTRslt* tr,
while ((rt = stmStNextWith(st, NULL)) != NULL) {
FstSlice* s = &rt->data;
char* ch = (char*)fstSliceData(s, NULL);
- // if (0 != strncmp(ch, tem->colName, tem->nColName)) {
- // swsResultDestroy(rt);
- // break;
- //}
TExeCond cond = cmpFn(ch, p, tem->colType);
if (MATCH == cond) {
@@ -494,7 +490,6 @@ int tfileReaderSearch(TFileReader* reader, SIndexTermQuery* query, SIdxTRslt* tr
TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const char* colName, uint8_t colType) {
char fullname[256] = {0};
tfileGenFileFullName(fullname, path, suid, colName, version);
- // indexInfo("open write file name %s", fullname);
IFileCtx* wcx = idxFileCtxCreate(TFILE, fullname, false, 1024 * 1024 * 64);
if (wcx == NULL) {
return NULL;
@@ -503,8 +498,8 @@ TFileWriter* tfileWriterOpen(char* path, uint64_t suid, int64_t version, const c
TFileHeader tfh = {0};
tfh.suid = suid;
tfh.version = version;
- memcpy(tfh.colName, colName, strlen(colName));
tfh.colType = colType;
+ memcpy(tfh.colName, colName, strlen(colName));
return tfileWriterCreate(wcx, &tfh);
}
@@ -706,7 +701,6 @@ static bool tfileIteratorNext(Iterate* iiter) {
iv->type = ADD_VALUE; // value in tfile always ADD_VALUE
iv->colVal = colVal;
return true;
- // std::string key(ch, sz);
}
static IterateValue* tifileIterateGetValue(Iterate* iter) { return &iter->val; }
@@ -1036,7 +1030,8 @@ static void tfileGenFileName(char* filename, uint64_t suid, const char* col, int
sprintf(filename, "%" PRIu64 "-%s-%" PRId64 ".tindex", suid, col, version);
return;
}
-static void tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col, int64_t version) {
+static void FORCE_INLINE tfileGenFileFullName(char* fullname, const char* path, uint64_t suid, const char* col,
+ int64_t version) {
char filename[128] = {0};
tfileGenFileName(filename, suid, col, version);
sprintf(fullname, "%s/%s", path, filename);
diff --git a/source/libs/index/src/indexUtil.c b/source/libs/index/src/indexUtil.c
index 3d083c1817f4b8b3930da7d0bed12e278d948d87..cdfb79016f46658e6259cb7d89dc501e386d408f 100644
--- a/source/libs/index/src/indexUtil.c
+++ b/source/libs/index/src/indexUtil.c
@@ -21,7 +21,7 @@ typedef struct MergeIndex {
int len;
} MergeIndex;
-static int iBinarySearch(SArray *arr, int s, int e, uint64_t k) {
+static FORCE_INLINE int iBinarySearch(SArray *arr, int s, int e, uint64_t k) {
uint64_t v;
int32_t m;
while (s <= e) {
diff --git a/source/libs/index/test/CMakeLists.txt b/source/libs/index/test/CMakeLists.txt
index b3eca280032e56004e649b2d2cef44ec1672d8ac..2bc7353aa51e85cf9c9e1c27607e10d76337ff58 100644
--- a/source/libs/index/test/CMakeLists.txt
+++ b/source/libs/index/test/CMakeLists.txt
@@ -80,6 +80,11 @@ IF(NOT TD_DARWIN)
"${TD_SOURCE_DIR}/include/libs/index"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
+ target_include_directories (idxJsonUT
+ PUBLIC
+ "${TD_SOURCE_DIR}/include/libs/index"
+ "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
+ )
target_link_libraries (idxTest
os
@@ -102,11 +107,7 @@ IF(NOT TD_DARWIN)
gtest_main
index
)
- target_include_directories (idxJsonUT
- PUBLIC
- "${TD_SOURCE_DIR}/include/libs/index"
- "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
- )
+
target_link_libraries (idxTest
os
util
diff --git a/source/libs/index/test/indexBench.cc b/source/libs/index/test/indexBench.cc
new file mode 100644
index 0000000000000000000000000000000000000000..b828be0ffe97ee94c6b19e52c71d049ae023b66a
--- /dev/null
+++ b/source/libs/index/test/indexBench.cc
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3 * or later ("AGPL"), as published by the Free
+ * Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+#include
+#include
+#include
+#include
+#include
+#include "index.h"
+#include "indexCache.h"
+#include "indexFst.h"
+#include "indexFstUtil.h"
+#include "indexInt.h"
+#include "indexTfile.h"
+#include "indexUtil.h"
+#include "tskiplist.h"
+#include "tutil.h"
+using namespace std;
+
+static std::string logDir = TD_TMP_DIR_PATH "log";
+
+static void initLog() {
+ const char *defaultLogFileNamePrefix = "taoslog";
+ const int32_t maxLogFileNum = 10;
+
+ tsAsyncLog = 0;
+ idxDebugFlag = 143;
+ strcpy(tsLogDir, logDir.c_str());
+ taosRemoveDir(tsLogDir);
+ taosMkDir(tsLogDir);
+
+ if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
+ printf("failed to open log file in directory:%s\n", tsLogDir);
+ }
+}
+
+struct WriteBatch {
+ SIndexMultiTerm *terms;
+};
+class Idx {
+ public:
+ Idx(int _cacheSize = 1024 * 1024 * 4, const char *_path = "tindex") {
+ opts.cacheSize = _cacheSize;
+ path += TD_TMP_DIR_PATH;
+ path += _path;
+ }
+ int SetUp(bool remove) {
+ initLog();
+
+ if (remove) taosRemoveDir(path.c_str());
+
+ int ret = indexJsonOpen(&opts, path.c_str(), &index);
+ return ret;
+ }
+ int Write(WriteBatch *batch, uint64_t uid) {
+ // write batch
+ indexJsonPut(index, batch->terms, uid);
+ return 0;
+ }
+ int Read(const char *json, void *key, int64_t *id) {
+ // read batch
+ return 0;
+ }
+
+ void TearDown() { indexJsonClose(index); }
+
+ std::string path;
+
+ SIndexOpts opts;
+ SIndex *index;
+};
+
+SIndexTerm *indexTermCreateT(int64_t suid, SIndexOperOnColumn oper, uint8_t colType, const char *colName,
+ int32_t nColName, const char *colVal, int32_t nColVal) {
+ char buf[256] = {0};
+ int16_t sz = nColVal;
+ memcpy(buf, (uint16_t *)&sz, 2);
+ memcpy(buf + 2, colVal, nColVal);
+ if (colType == TSDB_DATA_TYPE_BINARY) {
+ return indexTermCreate(suid, oper, colType, colName, nColName, buf, sizeof(buf));
+ } else {
+ return indexTermCreate(suid, oper, colType, colName, nColName, colVal, nColVal);
+ }
+ return NULL;
+}
+int initWriteBatch(WriteBatch *wb, int batchSize) {
+ SIndexMultiTerm *terms = indexMultiTermCreate();
+
+ std::string colName;
+ std::string colVal;
+
+ for (int i = 0; i < 64; i++) {
+ colName += '0' + i;
+ colVal += '0' + i;
+ }
+
+ for (int i = 0; i < batchSize; i++) {
+ colVal[i % colVal.size()] = '0' + i % 128;
+ colName[i % colName.size()] = '0' + i % 128;
+ SIndexTerm *term = indexTermCreateT(0, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
+ colVal.c_str(), colVal.size());
+ indexMultiTermAdd(terms, term);
+ }
+
+ wb->terms = terms;
+ return 0;
+}
+
+int BenchWrite(Idx *idx, int batchSize, int limit) {
+ for (int i = 0; i < limit; i += batchSize) {
+ WriteBatch wb;
+ idx->Write(&wb, i);
+ }
+ return 0;
+}
+
+int BenchRead(Idx *idx) { return 0; }
+
+int main() {
+ // Idx *idx = new Idx;
+ // if (idx->SetUp(true) != 0) {
+ // std::cout << "failed to setup index" << std::endl;
+ // return 0;
+ // } else {
+ // std::cout << "succ to setup index" << std::endl;
+ // }
+ // BenchWrite(idx, 100, 10000);
+ return 1;
+}
diff --git a/source/libs/index/test/indexTests.cc b/source/libs/index/test/indexTests.cc
index 5b76de2ef89fdce4780fcf94a1360d68f7684a9e..08bf84ff60fdc07393abf546630c67dd52f6abc1 100644
--- a/source/libs/index/test/indexTests.cc
+++ b/source/libs/index/test/indexTests.cc
@@ -271,20 +271,20 @@ void validateFst() {
}
delete m;
}
-static std::string logDir = TD_TMP_DIR_PATH "log";
-
-static void initLog() {
- const char* defaultLogFileNamePrefix = "taoslog";
- const int32_t maxLogFileNum = 10;
- tsAsyncLog = 0;
- idxDebugFlag = 143;
- strcpy(tsLogDir, logDir.c_str());
- taosRemoveDir(tsLogDir);
- taosMkDir(tsLogDir);
-
- if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
- printf("failed to open log file in directory:%s\n", tsLogDir);
+static std::string logDir = TD_TMP_DIR_PATH "log";
+static void initLog() {
+ const char* defaultLogFileNamePrefix = "taoslog";
+ const int32_t maxLogFileNum = 10;
+
+ tsAsyncLog = 0;
+ idxDebugFlag = 143;
+ strcpy(tsLogDir, logDir.c_str());
+ taosRemoveDir(tsLogDir);
+ taosMkDir(tsLogDir);
+
+ if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
+ printf("failed to open log file in directory:%s\n", tsLogDir);
}
}
class IndexEnv : public ::testing::Test {
diff --git a/source/libs/index/test/jsonUT.cc b/source/libs/index/test/jsonUT.cc
index 1911514d9771e477ea0cc04ac30b647b095e4ef5..8ae3fd41357da90b426eed6f2cd8b1c8c358d154 100644
--- a/source/libs/index/test/jsonUT.cc
+++ b/source/libs/index/test/jsonUT.cc
@@ -172,9 +172,9 @@ TEST_F(JsonEnv, testWriteMillonData) {
{
std::string colName("voltagefdadfa");
std::string colVal("abxxxxxxxxxxxx");
- for (int i = 0; i < 10; i++) {
+ for (int i = 0; i < 10000; i++) {
colVal[i % colVal.size()] = '0' + i % 128;
- for (size_t i = 0; i < 100; i++) {
+ for (size_t i = 0; i < 10; i++) {
SIndexTerm* term = indexTermCreateT(1, ADD_VALUE, TSDB_DATA_TYPE_BINARY, colName.c_str(), colName.size(),
colVal.c_str(), colVal.size());
diff --git a/source/libs/monitor/src/monMsg.c b/source/libs/monitor/src/monMsg.c
index 8fa7e8860509ca473ad41b1c89efbf430f0c2649..bbee8b1166903bfcafb611baf7b2bf8ed8b48699 100644
--- a/source/libs/monitor/src/monMsg.c
+++ b/source/libs/monitor/src/monMsg.c
@@ -510,6 +510,7 @@ int32_t tSerializeSMonVloadInfo(void *buf, int32_t bufLen, SMonVloadInfo *pInfo)
SVnodeLoad *pLoad = taosArrayGet(pInfo->pVloads, i);
if (tEncodeI32(&encoder, pLoad->vgId) < 0) return -1;
if (tEncodeI32(&encoder, pLoad->syncState) < 0) return -1;
+ if (tEncodeI64(&encoder, pLoad->cacheUsage) < 0) return -1;
if (tEncodeI64(&encoder, pLoad->numOfTables) < 0) return -1;
if (tEncodeI64(&encoder, pLoad->numOfTimeSeries) < 0) return -1;
if (tEncodeI64(&encoder, pLoad->totalStorage) < 0) return -1;
@@ -544,6 +545,7 @@ int32_t tDeserializeSMonVloadInfo(void *buf, int32_t bufLen, SMonVloadInfo *pInf
SVnodeLoad load = {0};
if (tDecodeI32(&decoder, &load.vgId) < 0) return -1;
if (tDecodeI32(&decoder, &load.syncState) < 0) return -1;
+ if (tDecodeI64(&decoder, &load.cacheUsage) < 0) return -1;
if (tDecodeI64(&decoder, &load.numOfTables) < 0) return -1;
if (tDecodeI64(&decoder, &load.numOfTimeSeries) < 0) return -1;
if (tDecodeI64(&decoder, &load.totalStorage) < 0) return -1;
@@ -594,7 +596,6 @@ int32_t tDeserializeSMonMloadInfo(void *buf, int32_t bufLen, SMonMloadInfo *pInf
return 0;
}
-
int32_t tSerializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@@ -639,5 +640,3 @@ int32_t tDeserializeSQnodeLoad(void *buf, int32_t bufLen, SQnodeLoad *pInfo) {
tDecoderClear(&decoder);
return 0;
}
-
-
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index 83bccbffb4b973fe3a4b720219ab0bb91d6f05b6..7cad5df3a19339aab6fb96ec9fef5fdc6fb05f46 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -324,6 +324,21 @@ static int32_t fillNodeCopy(const SFillNode* pSrc, SFillNode* pDst) {
return TSDB_CODE_SUCCESS;
}
+static int32_t whenThenNodeCopy(const SWhenThenNode* pSrc, SWhenThenNode* pDst) {
+ COPY_BASE_OBJECT_FIELD(node, exprNodeCopy);
+ CLONE_NODE_FIELD(pWhen);
+ CLONE_NODE_FIELD(pThen);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t caseWhenNodeCopy(const SCaseWhenNode* pSrc, SCaseWhenNode* pDst) {
+ COPY_BASE_OBJECT_FIELD(node, exprNodeCopy);
+ CLONE_NODE_FIELD(pCase);
+ CLONE_NODE_FIELD(pElse);
+ CLONE_NODE_LIST_FIELD(pWhenThenList);
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t logicNodeCopy(const SLogicNode* pSrc, SLogicNode* pDst) {
CLONE_NODE_LIST_FIELD(pTargets);
CLONE_NODE_FIELD(pConditions);
@@ -366,6 +381,8 @@ static int32_t logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
COPY_SCALAR_FIELD(igExpired);
CLONE_NODE_LIST_FIELD(pGroupTags);
COPY_SCALAR_FIELD(groupSort);
+ CLONE_NODE_LIST_FIELD(pTags);
+ CLONE_NODE_FIELD(pSubtable);
return TSDB_CODE_SUCCESS;
}
@@ -399,6 +416,8 @@ static int32_t logicVnodeModifCopy(const SVnodeModifyLogicNode* pSrc, SVnodeModi
COPY_SCALAR_FIELD(modifyType);
COPY_SCALAR_FIELD(msgType);
CLONE_NODE_FIELD(pAffectedRows);
+ CLONE_NODE_FIELD(pStartTs);
+ CLONE_NODE_FIELD(pEndTs);
COPY_SCALAR_FIELD(tableId);
COPY_SCALAR_FIELD(stableId);
COPY_SCALAR_FIELD(tableType);
@@ -412,7 +431,8 @@ static int32_t logicVnodeModifCopy(const SVnodeModifyLogicNode* pSrc, SVnodeModi
static int32_t logicExchangeCopy(const SExchangeLogicNode* pSrc, SExchangeLogicNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
- COPY_SCALAR_FIELD(srcGroupId);
+ COPY_SCALAR_FIELD(srcStartGroupId);
+ COPY_SCALAR_FIELD(srcEndGroupId);
return TSDB_CODE_SUCCESS;
}
@@ -470,6 +490,8 @@ static int32_t logicSortCopy(const SSortLogicNode* pSrc, SSortLogicNode* pDst) {
static int32_t logicPartitionCopy(const SPartitionLogicNode* pSrc, SPartitionLogicNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, logicNodeCopy);
CLONE_NODE_LIST_FIELD(pPartitionKeys);
+ CLONE_NODE_LIST_FIELD(pTags);
+ CLONE_NODE_FIELD(pSubtable);
return TSDB_CODE_SUCCESS;
}
@@ -609,6 +631,7 @@ static int32_t downstreamSourceCopy(const SDownstreamSourceNode* pSrc, SDownstre
COPY_SCALAR_FIELD(schedId);
COPY_SCALAR_FIELD(execId);
COPY_SCALAR_FIELD(fetchMsgType);
+ COPY_SCALAR_FIELD(localExec);
return TSDB_CODE_SUCCESS;
}
@@ -709,6 +732,12 @@ SNode* nodesCloneNode(const SNode* pNode) {
case QUERY_NODE_LEFT_VALUE:
code = TSDB_CODE_SUCCESS;
break;
+ case QUERY_NODE_WHEN_THEN:
+ code = whenThenNodeCopy((const SWhenThenNode*)pNode, (SWhenThenNode*)pDst);
+ break;
+ case QUERY_NODE_CASE_WHEN:
+ code = caseWhenNodeCopy((const SCaseWhenNode*)pNode, (SCaseWhenNode*)pDst);
+ break;
case QUERY_NODE_SELECT_STMT:
code = selectStmtCopy((const SSelectStmt*)pNode, (SSelectStmt*)pDst);
break;
@@ -777,6 +806,7 @@ SNode* nodesCloneNode(const SNode* pNode) {
code = physiSessionCopy((const SSessionWinodwPhysiNode*)pNode, (SSessionWinodwPhysiNode*)pDst);
break;
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
code = physiPartitionCopy((const SPartitionPhysiNode*)pNode, (SPartitionPhysiNode*)pDst);
break;
default:
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index 822bdec365e6f5128d7c36cf6b7d765eeb2488de..e401a3da7fe4fdb9815eae3fedfec9e5b281689d 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -81,6 +81,10 @@ const char* nodesNodeName(ENodeType type) {
return "IndexOptions";
case QUERY_NODE_LEFT_VALUE:
return "LeftValue";
+ case QUERY_NODE_WHEN_THEN:
+ return "WhenThen";
+ case QUERY_NODE_CASE_WHEN:
+ return "CaseWhen";
case QUERY_NODE_SET_OPERATOR:
return "SetOperator";
case QUERY_NODE_SELECT_STMT:
@@ -250,6 +254,7 @@ const char* nodesNodeName(ENodeType type) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
return "PhysiStreamSemiInterval";
case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
return "PhysiFill";
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
return "PhysiSessionWindow";
@@ -265,6 +270,8 @@ const char* nodesNodeName(ENodeType type) {
return "PhysiStreamStateWindow";
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return "PhysiPartition";
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ return "PhysiStreamPartition";
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return "PhysiIndefRowsFunc";
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
@@ -360,18 +367,14 @@ static int32_t jsonToTableComInfo(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkTableComInfoNumOfTags, pNode->numOfTags, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableComInfoPrecision, pNode->precision, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableComInfoNumOfColumns, pNode->numOfColumns, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableComInfoRowSize, pNode->rowSize, code);
- ;
}
return code;
@@ -404,14 +407,11 @@ static int32_t jsonToSchema(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkSchemaType, pNode->type, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkSchemaColId, pNode->colId, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkSchemaBytes, pNode->bytes, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkSchemaName, pNode->name);
@@ -464,26 +464,20 @@ static int32_t jsonToTableMeta(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkTableMetaVgId, pNode->vgId, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaTableType, pNode->tableType, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaUid, pNode->uid, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaSuid, pNode->suid, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaSversion, pNode->sversion, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkTableMetaTversion, pNode->tversion, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonToObject(pJson, jkTableMetaComInfo, jsonToTableComInfo, &pNode->tableInfo);
@@ -570,6 +564,8 @@ static const char* jkScanLogicPlanStableId = "StableId";
static const char* jkScanLogicPlanScanType = "ScanType";
static const char* jkScanLogicPlanScanCount = "ScanCount";
static const char* jkScanLogicPlanReverseScanCount = "ReverseScanCount";
+static const char* jkScanLogicPlanDynamicScanFuncs = "DynamicScanFuncs";
+static const char* jkScanLogicPlanDataRequired = "DataRequired";
static const char* jkScanLogicPlanTagCond = "TagCond";
static const char* jkScanLogicPlanGroupTags = "GroupTags";
@@ -601,6 +597,12 @@ static int32_t logicScanNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkScanLogicPlanReverseScanCount, pNode->scanSeq[1]);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkScanLogicPlanDynamicScanFuncs, nodeToJson, pNode->pDynamicScanFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkScanLogicPlanDataRequired, pNode->dataRequired);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkScanLogicPlanTagCond, nodeToJson, pNode->pTagCond);
}
@@ -640,6 +642,12 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetUTinyIntValue(pJson, jkScanLogicPlanReverseScanCount, &pNode->scanSeq[1]);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkScanLogicPlanDynamicScanFuncs, &pNode->pDynamicScanFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkScanLogicPlanDataRequired, &pNode->dataRequired);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkScanLogicPlanTagCond, &pNode->pTagCond);
}
@@ -719,14 +727,18 @@ static int32_t jsonToLogicVnodeModifyNode(const SJson* pJson, void* pObj) {
return code;
}
-static const char* jkExchangeLogicPlanSrcGroupId = "SrcGroupId";
+static const char* jkExchangeLogicPlanSrcStartGroupId = "SrcStartGroupId";
+static const char* jkExchangeLogicPlanSrcEndGroupId = "SrcEndGroupId";
static int32_t logicExchangeNodeToJson(const void* pObj, SJson* pJson) {
const SExchangeLogicNode* pNode = (const SExchangeLogicNode*)pObj;
int32_t code = logicPlanNodeToJson(pObj, pJson);
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcGroupId, pNode->srcGroupId);
+ code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcStartGroupId, pNode->srcStartGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkExchangeLogicPlanSrcEndGroupId, pNode->srcEndGroupId);
}
return code;
@@ -737,7 +749,10 @@ static int32_t jsonToLogicExchangeNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToLogicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonGetIntValue(pJson, jkExchangeLogicPlanSrcGroupId, &pNode->srcGroupId);
+ code = tjsonGetIntValue(pJson, jkExchangeLogicPlanSrcStartGroupId, &pNode->srcStartGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkExchangeLogicPlanSrcEndGroupId, &pNode->srcEndGroupId);
}
return code;
@@ -924,7 +939,6 @@ static int32_t jsonToLogicFillNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToLogicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkFillLogicPlanMode, pNode->mode, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkFillLogicPlanWStartTs, &pNode->pWStartTs);
@@ -1525,6 +1539,8 @@ static const char* jkTableScanPhysiPlanWatermark = "Watermark";
static const char* jkTableScanPhysiPlanIgnoreExpired = "IgnoreExpired";
static const char* jkTableScanPhysiPlanGroupTags = "GroupTags";
static const char* jkTableScanPhysiPlanGroupSort = "GroupSort";
+static const char* jkTableScanPhysiPlanTags = "Tags";
+static const char* jkTableScanPhysiPlanSubtable = "Subtable";
static const char* jkTableScanPhysiPlanAssignBlockUid = "AssignBlockUid";
static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
@@ -1582,6 +1598,12 @@ static int32_t physiTableScanNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanGroupSort, pNode->groupSort);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkTableScanPhysiPlanTags, pNode->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkTableScanPhysiPlanSubtable, nodeToJson, pNode->pSubtable);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddBoolToObject(pJson, jkTableScanPhysiPlanAssignBlockUid, pNode->assignBlockUid);
}
@@ -1644,6 +1666,12 @@ static int32_t jsonToPhysiTableScanNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanGroupSort, &pNode->groupSort);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkTableScanPhysiPlanTags, &pNode->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkTableScanPhysiPlanSubtable, &pNode->pSubtable);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBoolValue(pJson, jkTableScanPhysiPlanAssignBlockUid, &pNode->assignBlockUid);
}
@@ -1831,7 +1859,8 @@ static int32_t jsonToPhysiAggNode(const SJson* pJson, void* pObj) {
return code;
}
-static const char* jkExchangePhysiPlanSrcGroupId = "SrcGroupId";
+static const char* jkExchangePhysiPlanSrcStartGroupId = "SrcStartGroupId";
+static const char* jkExchangePhysiPlanSrcEndGroupId = "SrcEndGroupId";
static const char* jkExchangePhysiPlanSrcEndPoints = "SrcEndPoints";
static int32_t physiExchangeNodeToJson(const void* pObj, SJson* pJson) {
@@ -1839,7 +1868,10 @@ static int32_t physiExchangeNodeToJson(const void* pObj, SJson* pJson) {
int32_t code = physicPlanNodeToJson(pObj, pJson);
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonAddIntegerToObject(pJson, jkExchangePhysiPlanSrcGroupId, pNode->srcGroupId);
+ code = tjsonAddIntegerToObject(pJson, jkExchangePhysiPlanSrcStartGroupId, pNode->srcStartGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddIntegerToObject(pJson, jkExchangePhysiPlanSrcEndGroupId, pNode->srcEndGroupId);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodeListToJson(pJson, jkExchangePhysiPlanSrcEndPoints, pNode->pSrcEndPoints);
@@ -1853,7 +1885,10 @@ static int32_t jsonToPhysiExchangeNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToPhysicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
- code = tjsonGetIntValue(pJson, jkExchangePhysiPlanSrcGroupId, &pNode->srcGroupId);
+ code = tjsonGetIntValue(pJson, jkExchangePhysiPlanSrcStartGroupId, &pNode->srcStartGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonGetIntValue(pJson, jkExchangePhysiPlanSrcEndGroupId, &pNode->srcEndGroupId);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkExchangePhysiPlanSrcEndPoints, &pNode->pSrcEndPoints);
@@ -2250,6 +2285,37 @@ static int32_t jsonToPhysiPartitionNode(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkStreamPartitionPhysiPlanTags = "Tags";
+static const char* jkStreamPartitionPhysiPlanSubtable = "Subtable";
+
+static int32_t physiStreamPartitionNodeToJson(const void* pObj, SJson* pJson) {
+ const SStreamPartitionPhysiNode* pNode = (const SStreamPartitionPhysiNode*)pObj;
+
+ int32_t code = physiPartitionNodeToJson(pObj, pJson);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkStreamPartitionPhysiPlanTags, pNode->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkStreamPartitionPhysiPlanSubtable, nodeToJson, pNode->pSubtable);
+ }
+
+ return code;
+}
+
+static int32_t jsonToPhysiStreamPartitionNode(const SJson* pJson, void* pObj) {
+ SStreamPartitionPhysiNode* pNode = (SStreamPartitionPhysiNode*)pObj;
+
+ int32_t code = jsonToPhysiPartitionNode(pJson, pObj);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkStreamPartitionPhysiPlanTags, &pNode->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkStreamPartitionPhysiPlanSubtable, &pNode->pSubtable);
+ }
+
+ return code;
+}
+
static const char* jkIndefRowsFuncPhysiPlanExprs = "Exprs";
static const char* jkIndefRowsFuncPhysiPlanFuncs = "Funcs";
@@ -2443,6 +2509,8 @@ static const char* jkDeletePhysiPlanTsColName = "TsColName";
static const char* jkDeletePhysiPlanDeleteTimeRangeStartKey = "DeleteTimeRangeStartKey";
static const char* jkDeletePhysiPlanDeleteTimeRangeEndKey = "DeleteTimeRangeEndKey";
static const char* jkDeletePhysiPlanAffectedRows = "AffectedRows";
+static const char* jkDeletePhysiPlanStartTs = "StartTs";
+static const char* jkDeletePhysiPlanEndTs = "EndTs";
static int32_t physiDeleteNodeToJson(const void* pObj, SJson* pJson) {
const SDataDeleterNode* pNode = (const SDataDeleterNode*)pObj;
@@ -2469,6 +2537,12 @@ static int32_t physiDeleteNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkDeletePhysiPlanAffectedRows, nodeToJson, pNode->pAffectedRows);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkDeletePhysiPlanStartTs, nodeToJson, pNode->pStartTs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkDeletePhysiPlanEndTs, nodeToJson, pNode->pEndTs);
+ }
return code;
}
@@ -2498,6 +2572,12 @@ static int32_t jsonToPhysiDeleteNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkDeletePhysiPlanAffectedRows, &pNode->pAffectedRows);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkDeletePhysiPlanStartTs, &pNode->pStartTs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkDeletePhysiPlanEndTs, &pNode->pEndTs);
+ }
return code;
}
@@ -2813,7 +2893,6 @@ static int32_t jsonToColumnNode(const SJson* pJson, void* pObj) {
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkColumnColType, pNode->colType, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetStringValue(pJson, jkColumnDbName, pNode->dbName);
@@ -3116,7 +3195,6 @@ static int32_t jsonToOperatorNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToExprNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkOperatorType, pNode->opType, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkOperatorLeft, &pNode->pLeft);
@@ -3151,7 +3229,6 @@ static int32_t jsonToLogicConditionNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToExprNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkLogicCondType, pNode->condType, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkLogicCondParameters, &pNode->pParameterList);
@@ -3440,11 +3517,9 @@ static int32_t jsonToOrderByExprNode(const SJson* pJson, void* pObj) {
int32_t code = jsonToNodeObject(pJson, jkOrderByExprExpr, &pNode->pExpr);
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkOrderByExprOrder, pNode->order, code);
- ;
}
if (TSDB_CODE_SUCCESS == code) {
tjsonGetNumberValue(pJson, jkOrderByExprNullOrder, pNode->nullOrder, code);
- ;
}
return code;
@@ -3622,7 +3697,6 @@ static int32_t jsonToFillNode(const SJson* pJson, void* pObj) {
int32_t code;
tjsonGetNumberValue(pJson, jkFillMode, pNode->mode, code);
- ;
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkFillValues, &pNode->pValues);
}
@@ -3907,6 +3981,75 @@ static int32_t jsonToDatabaseOptions(const SJson* pJson, void* pObj) {
return code;
}
+static const char* jkWhenThenWhen = "When";
+static const char* jkWhenThenThen = "Then";
+
+static int32_t whenThenNodeToJson(const void* pObj, SJson* pJson) {
+ const SWhenThenNode* pNode = (const SWhenThenNode*)pObj;
+
+ int32_t code = exprNodeToJson(pObj, pJson);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkWhenThenWhen, nodeToJson, pNode->pWhen);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkWhenThenThen, nodeToJson, pNode->pThen);
+ }
+
+ return code;
+}
+
+static int32_t jsonToWhenThenNode(const SJson* pJson, void* pObj) {
+ SWhenThenNode* pNode = (SWhenThenNode*)pObj;
+
+ int32_t code = jsonToExprNode(pJson, pObj);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkWhenThenWhen, &pNode->pWhen);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkWhenThenThen, &pNode->pThen);
+ }
+
+ return code;
+}
+
+static const char* jkCaseWhenCase = "Case";
+static const char* jkCaseWhenWhenThenList = "WhenThenList";
+static const char* jkCaseWhenElse = "Else";
+
+static int32_t caseWhenNodeToJson(const void* pObj, SJson* pJson) {
+ const SCaseWhenNode* pNode = (const SCaseWhenNode*)pObj;
+
+ int32_t code = exprNodeToJson(pObj, pJson);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkCaseWhenCase, nodeToJson, pNode->pCase);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkCaseWhenWhenThenList, pNode->pWhenThenList);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkCaseWhenElse, nodeToJson, pNode->pElse);
+ }
+
+ return code;
+}
+
+static int32_t jsonToCaseWhenNode(const SJson* pJson, void* pObj) {
+ SCaseWhenNode* pNode = (SCaseWhenNode*)pObj;
+
+ int32_t code = jsonToExprNode(pJson, pObj);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkCaseWhenCase, &pNode->pCase);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkCaseWhenWhenThenList, &pNode->pWhenThenList);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkCaseWhenElse, &pNode->pElse);
+ }
+
+ return code;
+}
+
static const char* jkDataBlockDescDataBlockId = "DataBlockId";
static const char* jkDataBlockDescSlots = "Slots";
static const char* jkDataBlockTotalRowSize = "TotalRowSize";
@@ -4012,6 +4155,8 @@ static const char* jkSelectStmtProjections = "Projections";
static const char* jkSelectStmtFrom = "From";
static const char* jkSelectStmtWhere = "Where";
static const char* jkSelectStmtPartitionBy = "PartitionBy";
+static const char* jkSelectStmtTags = "Tags";
+static const char* jkSelectStmtSubtable = "Subtable";
static const char* jkSelectStmtWindow = "Window";
static const char* jkSelectStmtGroupBy = "GroupBy";
static const char* jkSelectStmtHaving = "Having";
@@ -4037,6 +4182,12 @@ static int32_t selectStmtToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = nodeListToJson(pJson, jkSelectStmtPartitionBy, pNode->pPartitionByList);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeListToJson(pJson, jkSelectStmtTags, pNode->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkSelectStmtSubtable, nodeToJson, pNode->pSubtable);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkSelectStmtWindow, nodeToJson, pNode->pWindow);
}
@@ -4081,6 +4232,12 @@ static int32_t jsonToSelectStmt(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkSelectStmtPartitionBy, &pNode->pPartitionByList);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeList(pJson, jkSelectStmtTags, &pNode->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkSelectStmtSubtable, &pNode->pSubtable);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkSelectStmtWindow, &pNode->pWindow);
}
@@ -4389,6 +4546,10 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return databaseOptionsToJson(pObj, pJson);
case QUERY_NODE_LEFT_VALUE:
return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to serialize.
+ case QUERY_NODE_WHEN_THEN:
+ return whenThenNodeToJson(pObj, pJson);
+ case QUERY_NODE_CASE_WHEN:
+ return caseWhenNodeToJson(pObj, pJson);
case QUERY_NODE_SET_OPERATOR:
return setOperatorToJson(pObj, pJson);
case QUERY_NODE_SELECT_STMT:
@@ -4475,6 +4636,7 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
return physiIntervalNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
return physiFillNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
@@ -4486,6 +4648,8 @@ static int32_t specificNodeToJson(const void* pObj, SJson* pJson) {
return physiStateWindowNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return physiPartitionNodeToJson(pObj, pJson);
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ return physiStreamPartitionNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return physiIndefRowsFuncNodeToJson(pObj, pJson);
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
@@ -4551,6 +4715,10 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToDatabaseOptions(pJson, pObj);
case QUERY_NODE_LEFT_VALUE:
return TSDB_CODE_SUCCESS; // SLeftValueNode has no fields to deserialize.
+ case QUERY_NODE_WHEN_THEN:
+ return jsonToWhenThenNode(pJson, pObj);
+ case QUERY_NODE_CASE_WHEN:
+ return jsonToCaseWhenNode(pJson, pObj);
case QUERY_NODE_SET_OPERATOR:
return jsonToSetOperator(pJson, pObj);
case QUERY_NODE_SELECT_STMT:
@@ -4622,6 +4790,7 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
return jsonToPhysiIntervalNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
return jsonToPhysiFillNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
@@ -4633,6 +4802,8 @@ static int32_t jsonToSpecificNode(const SJson* pJson, void* pObj) {
return jsonToPhysiStateWindowNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return jsonToPhysiPartitionNode(pJson, pObj);
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ return jsonToPhysiStreamPartitionNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return jsonToPhysiIndefRowsFuncNode(pJson, pObj);
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
diff --git a/source/libs/nodes/src/nodesEqualFuncs.c b/source/libs/nodes/src/nodesEqualFuncs.c
index 9cb7e8b66d2a7dbc91e13e76e47c3189112a7825..4e23999ec2c32e2c82a6b0c569b3ff5c60770ef7 100644
--- a/source/libs/nodes/src/nodesEqualFuncs.c
+++ b/source/libs/nodes/src/nodesEqualFuncs.c
@@ -140,6 +140,19 @@ static bool functionNodeEqual(const SFunctionNode* a, const SFunctionNode* b) {
return true;
}
+static bool whenThenNodeEqual(const SWhenThenNode* a, const SWhenThenNode* b) {
+ COMPARE_NODE_FIELD(pWhen);
+ COMPARE_NODE_FIELD(pThen);
+ return true;
+}
+
+static bool caseWhenNodeEqual(const SCaseWhenNode* a, const SCaseWhenNode* b) {
+ COMPARE_NODE_FIELD(pCase);
+ COMPARE_NODE_FIELD(pElse);
+ COMPARE_NODE_LIST_FIELD(pWhenThenList);
+ return true;
+}
+
bool nodesEqualNode(const SNode* a, const SNode* b) {
if (a == b) {
return true;
@@ -164,13 +177,17 @@ bool nodesEqualNode(const SNode* a, const SNode* b) {
return logicConditionNodeEqual((const SLogicConditionNode*)a, (const SLogicConditionNode*)b);
case QUERY_NODE_FUNCTION:
return functionNodeEqual((const SFunctionNode*)a, (const SFunctionNode*)b);
+ case QUERY_NODE_WHEN_THEN:
+ return whenThenNodeEqual((const SWhenThenNode*)a, (const SWhenThenNode*)b);
+ case QUERY_NODE_CASE_WHEN:
+ return caseWhenNodeEqual((const SCaseWhenNode*)a, (const SCaseWhenNode*)b);
case QUERY_NODE_REAL_TABLE:
case QUERY_NODE_TEMP_TABLE:
case QUERY_NODE_JOIN_TABLE:
case QUERY_NODE_GROUPING_SET:
case QUERY_NODE_ORDER_BY_EXPR:
case QUERY_NODE_LIMIT:
- return false; // todo
+ return false;
default:
break;
}
diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c
new file mode 100644
index 0000000000000000000000000000000000000000..cdc4e66e4247615697a6090e2ad1c1e8a1659529
--- /dev/null
+++ b/source/libs/nodes/src/nodesMsgFuncs.c
@@ -0,0 +1,3908 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "nodesUtil.h"
+#include "plannodes.h"
+#include "tdatablock.h"
+
+#ifndef htonll
+
+#define htonll(x) \
+ (((int64_t)x & 0x00000000000000ff) << 7 * 8) | (((int64_t)x & 0x000000000000ff00) << 5 * 8) | \
+ (((int64_t)x & 0x0000000000ff0000) << 3 * 8) | (((int64_t)x & 0x00000000ff000000) << 1 * 8) | \
+ (((int64_t)x & 0x000000ff00000000) >> 1 * 8) | (((int64_t)x & 0x0000ff0000000000) >> 3 * 8) | \
+ (((int64_t)x & 0x00ff000000000000) >> 5 * 8) | (((int64_t)x & 0xff00000000000000) >> 7 * 8)
+
+#define ntohll(x) htonll(x)
+
+#endif
+
+#define NODES_MSG_DEFAULT_LEN 1024
+#define TLV_TYPE_ARRAY_ELEM 0
+
+#define tlvForEach(pDecoder, pTlv, code) \
+ while (TSDB_CODE_SUCCESS == code && TSDB_CODE_SUCCESS == (code = tlvGetNextTlv(pDecoder, &pTlv)) && NULL != pTlv)
+
+#pragma pack(push, 1)
+
+typedef struct STlv {
+ int16_t type;
+ int32_t len;
+ char value[0];
+} STlv;
+
+#pragma pack(pop)
+
+typedef struct STlvEncoder {
+ int32_t allocSize;
+ int32_t offset;
+ char* pBuf;
+ int32_t tlvCount;
+} STlvEncoder;
+
+typedef struct STlvDecoder {
+ int32_t bufSize;
+ int32_t offset;
+ const char* pBuf;
+} STlvDecoder;
+
+typedef int32_t (*FToMsg)(const void* pObj, STlvEncoder* pEncoder);
+typedef int32_t (*FToObject)(STlvDecoder* pDecoder, void* pObj);
+typedef void* (*FMakeObject)(int16_t type);
+typedef int32_t (*FSetObject)(STlv* pTlv, void* pObj);
+
+static int32_t nodeToMsg(const void* pObj, STlvEncoder* pEncoder);
+static int32_t nodeListToMsg(const void* pObj, STlvEncoder* pEncoder);
+static int32_t msgToNode(STlvDecoder* pDecoder, void** pObj);
+static int32_t msgToNodeFromTlv(STlv* pTlv, void** pObj);
+static int32_t msgToNodeList(STlvDecoder* pDecoder, void** pObj);
+static int32_t msgToNodeListFromTlv(STlv* pTlv, void** pObj);
+
+static int32_t initTlvEncoder(STlvEncoder* pEncoder) {
+ pEncoder->allocSize = NODES_MSG_DEFAULT_LEN;
+ pEncoder->offset = 0;
+ pEncoder->tlvCount = 0;
+ pEncoder->pBuf = taosMemoryMalloc(pEncoder->allocSize);
+ return NULL == pEncoder->pBuf ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS;
+}
+
+static void clearTlvEncoder(STlvEncoder* pEncoder) { taosMemoryFree(pEncoder->pBuf); }
+
+static void endTlvEncode(STlvEncoder* pEncoder, char** pMsg, int32_t* pLen) {
+ *pMsg = pEncoder->pBuf;
+ pEncoder->pBuf = NULL;
+ *pLen = pEncoder->offset;
+ // nodesWarn("encode tlv count = %d, tl size = %d", pEncoder->tlvCount, sizeof(STlv) * pEncoder->tlvCount);
+}
+
+static int32_t tlvEncodeImpl(STlvEncoder* pEncoder, int16_t type, const void* pValue, int32_t len) {
+ int32_t tlvLen = sizeof(STlv) + len;
+ if (pEncoder->offset + tlvLen > pEncoder->allocSize) {
+ pEncoder->allocSize = TMAX(pEncoder->allocSize * 2, pEncoder->allocSize + pEncoder->offset + tlvLen);
+ void* pNewBuf = taosMemoryRealloc(pEncoder->pBuf, pEncoder->allocSize);
+ if (NULL == pNewBuf) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pEncoder->pBuf = pNewBuf;
+ }
+ STlv* pTlv = (STlv*)(pEncoder->pBuf + pEncoder->offset);
+ pTlv->type = htons(type);
+ pTlv->len = htonl(len);
+ memcpy(pTlv->value, pValue, len);
+ pEncoder->offset += tlvLen;
+ ++(pEncoder->tlvCount);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvEncodeValueImpl(STlvEncoder* pEncoder, const void* pValue, int32_t len) {
+ if (pEncoder->offset + len > pEncoder->allocSize) {
+ void* pNewBuf = taosMemoryRealloc(pEncoder->pBuf, pEncoder->allocSize * 2);
+ if (NULL == pNewBuf) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pEncoder->pBuf = pNewBuf;
+ pEncoder->allocSize = pEncoder->allocSize * 2;
+ }
+ memcpy(pEncoder->pBuf + pEncoder->offset, pValue, len);
+ pEncoder->offset += len;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvEncodeI8(STlvEncoder* pEncoder, int16_t type, int8_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueI8(STlvEncoder* pEncoder, int8_t value) {
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeI16(STlvEncoder* pEncoder, int16_t type, int16_t value) {
+ value = htons(value);
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueI16(STlvEncoder* pEncoder, int16_t value) {
+ value = htons(value);
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeI32(STlvEncoder* pEncoder, int16_t type, int32_t value) {
+ value = htonl(value);
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueI32(STlvEncoder* pEncoder, int32_t value) {
+ value = htonl(value);
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeI64(STlvEncoder* pEncoder, int16_t type, int64_t value) {
+ value = htonll(value);
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueI64(STlvEncoder* pEncoder, int64_t value) {
+ value = htonll(value);
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeU8(STlvEncoder* pEncoder, int16_t type, uint8_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueU8(STlvEncoder* pEncoder, uint8_t value) {
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeU16(STlvEncoder* pEncoder, int16_t type, uint16_t value) {
+ value = htons(value);
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueU16(STlvEncoder* pEncoder, uint16_t value) {
+ value = htons(value);
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeU64(STlvEncoder* pEncoder, int16_t type, uint64_t value) {
+ value = htonll(value);
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueU64(STlvEncoder* pEncoder, uint64_t value) {
+ value = htonll(value);
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeDouble(STlvEncoder* pEncoder, int16_t type, double value) {
+ int64_t temp = *(int64_t*)&value;
+ temp = htonll(temp);
+ return tlvEncodeImpl(pEncoder, type, &temp, sizeof(temp));
+}
+
+static int32_t tlvEncodeValueDouble(STlvEncoder* pEncoder, double value) {
+ int64_t temp = *(int64_t*)&value;
+ temp = htonll(temp);
+ return tlvEncodeValueImpl(pEncoder, &temp, sizeof(temp));
+}
+
+static int32_t tlvEncodeEnum(STlvEncoder* pEncoder, int16_t type, int32_t value) {
+ value = htonl(value);
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueEnum(STlvEncoder* pEncoder, int32_t value) {
+ value = htonl(value);
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeBool(STlvEncoder* pEncoder, int16_t type, int8_t value) {
+ return tlvEncodeImpl(pEncoder, type, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeValueBool(STlvEncoder* pEncoder, int8_t value) {
+ return tlvEncodeValueImpl(pEncoder, &value, sizeof(value));
+}
+
+static int32_t tlvEncodeCStr(STlvEncoder* pEncoder, int16_t type, const char* pValue) {
+ if (NULL == pValue) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return tlvEncodeImpl(pEncoder, type, pValue, strlen(pValue));
+}
+
+static int32_t tlvEncodeValueCStr(STlvEncoder* pEncoder, const char* pValue) {
+ int16_t len = strlen(pValue);
+ int32_t code = tlvEncodeValueI16(pEncoder, len);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueImpl(pEncoder, pValue, len);
+ }
+ return code;
+}
+
+static int32_t tlvEncodeBinary(STlvEncoder* pEncoder, int16_t type, const void* pValue, int32_t len) {
+ return tlvEncodeImpl(pEncoder, type, pValue, len);
+}
+
+static int32_t tlvEncodeObj(STlvEncoder* pEncoder, int16_t type, FToMsg func, const void* pObj) {
+ if (NULL == pObj) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t start = pEncoder->offset;
+ pEncoder->offset += sizeof(STlv);
+ int32_t code = func(pObj, pEncoder);
+ if (TSDB_CODE_SUCCESS == code) {
+ STlv* pTlv = (STlv*)(pEncoder->pBuf + start);
+ pTlv->type = htons(type);
+ pTlv->len = htonl(pEncoder->offset - start - sizeof(STlv));
+ }
+ ++(pEncoder->tlvCount);
+ return code;
+}
+
+static int32_t tlvEncodeObjArray(STlvEncoder* pEncoder, int16_t type, FToMsg func, const void* pArray, int32_t itemSize,
+ int32_t num) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ if (num > 0) {
+ int32_t start = pEncoder->offset;
+ pEncoder->offset += sizeof(STlv);
+ for (size_t i = 0; TSDB_CODE_SUCCESS == code && i < num; ++i) {
+ code = tlvEncodeObj(pEncoder, TLV_TYPE_ARRAY_ELEM, func, (const char*)pArray + i * itemSize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ STlv* pTlv = (STlv*)(pEncoder->pBuf + start);
+ pTlv->type = htons(type);
+ pTlv->len = htonl(pEncoder->offset - start - sizeof(STlv));
+ }
+ }
+ return code;
+}
+
+static int32_t tlvEncodeValueArray(STlvEncoder* pEncoder, FToMsg func, const void* pArray, int32_t itemSize,
+ int32_t num) {
+ int32_t code = tlvEncodeValueI32(pEncoder, num);
+ for (size_t i = 0; TSDB_CODE_SUCCESS == code && i < num; ++i) {
+ code = func((const char*)pArray + i * itemSize, pEncoder);
+ }
+ return code;
+}
+
+static int32_t tlvGetNextTlv(STlvDecoder* pDecoder, STlv** pTlv) {
+ if (pDecoder->offset == pDecoder->bufSize) {
+ *pTlv = NULL;
+ return TSDB_CODE_SUCCESS;
+ }
+
+ *pTlv = (STlv*)(pDecoder->pBuf + pDecoder->offset);
+ (*pTlv)->type = ntohs((*pTlv)->type);
+ (*pTlv)->len = ntohl((*pTlv)->len);
+ if ((*pTlv)->len + pDecoder->offset > pDecoder->bufSize) {
+ return TSDB_CODE_FAILED;
+ }
+ pDecoder->offset += sizeof(STlv) + (*pTlv)->len;
+ return TSDB_CODE_SUCCESS;
+}
+
+static bool tlvDecodeEnd(STlvDecoder* pDecoder) { return pDecoder->offset == pDecoder->bufSize; }
+
+static int32_t tlvDecodeImpl(STlv* pTlv, void* pValue, int32_t len) {
+ if (pTlv->len != len) {
+ return TSDB_CODE_FAILED;
+ }
+ memcpy(pValue, pTlv->value, len);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeValueImpl(STlvDecoder* pDecoder, void* pValue, int32_t len) {
+ if (pDecoder->offset + len > pDecoder->bufSize) {
+ return TSDB_CODE_FAILED;
+ }
+ memcpy(pValue, pDecoder->pBuf + pDecoder->offset, len);
+ pDecoder->offset += len;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeI8(STlv* pTlv, int8_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeValueI8(STlvDecoder* pDecoder, int8_t* pValue) {
+ return tlvDecodeValueImpl(pDecoder, pValue, sizeof(*pValue));
+}
+
+static int32_t tlvDecodeI16(STlv* pTlv, int16_t* pValue) {
+ int32_t code = tlvDecodeImpl(pTlv, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohs(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeValueI16(STlvDecoder* pDecoder, int16_t* pValue) {
+ int32_t code = tlvDecodeValueImpl(pDecoder, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohs(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeI32(STlv* pTlv, int32_t* pValue) {
+ int32_t code = tlvDecodeImpl(pTlv, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohl(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeValueI32(STlvDecoder* pDecoder, int32_t* pValue) {
+ int32_t code = tlvDecodeValueImpl(pDecoder, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohl(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeI64(STlv* pTlv, int64_t* pValue) {
+ int32_t code = tlvDecodeImpl(pTlv, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohll(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeValueI64(STlvDecoder* pDecoder, int64_t* pValue) {
+ int32_t code = tlvDecodeValueImpl(pDecoder, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohll(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeU8(STlv* pTlv, uint8_t* pValue) { return tlvDecodeImpl(pTlv, pValue, sizeof(*pValue)); }
+
+static int32_t tlvDecodeValueU8(STlvDecoder* pDecoder, uint8_t* pValue) {
+ return tlvDecodeValueImpl(pDecoder, pValue, sizeof(*pValue));
+}
+
+static int32_t tlvDecodeU16(STlv* pTlv, uint16_t* pValue) {
+ int32_t code = tlvDecodeImpl(pTlv, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohs(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeValueU16(STlvDecoder* pDecoder, uint16_t* pValue) {
+ int32_t code = tlvDecodeValueImpl(pDecoder, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohs(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeU64(STlv* pTlv, uint64_t* pValue) {
+ int32_t code = tlvDecodeImpl(pTlv, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohll(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeValueU64(STlvDecoder* pDecoder, uint64_t* pValue) {
+ int32_t code = tlvDecodeValueImpl(pDecoder, pValue, sizeof(*pValue));
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = ntohll(*pValue);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeDouble(STlv* pTlv, double* pValue) {
+ int64_t temp = 0;
+ int32_t code = tlvDecodeI64(pTlv, &temp);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = *(double*)&temp;
+ }
+ return code;
+}
+
+static int32_t tlvDecodeValueDouble(STlvDecoder* pDecoder, double* pValue) {
+ int64_t temp = 0;
+ int32_t code = tlvDecodeValueI64(pDecoder, &temp);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pValue = *(double*)&temp;
+ }
+ return code;
+}
+
+static int32_t convertIntegerType(int32_t value, void* pValue, int16_t len) {
+ switch (len) {
+ case 1:
+ *(int8_t*)pValue = value;
+ break;
+ case 2:
+ *(int16_t*)pValue = value;
+ break;
+ case 4:
+ *(int32_t*)pValue = value;
+ break;
+ default:
+ return TSDB_CODE_FAILED;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeBool(STlv* pTlv, bool* pValue) {
+ int8_t value = 0;
+ int32_t code = tlvDecodeI8(pTlv, &value);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = convertIntegerType(value, pValue, sizeof(bool));
+ }
+ return code;
+}
+
+static int32_t tlvDecodeValueBool(STlvDecoder* pDecoder, bool* pValue) {
+ int8_t value = 0;
+ int32_t code = tlvDecodeValueI8(pDecoder, &value);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = convertIntegerType(value, pValue, sizeof(bool));
+ }
+ return code;
+}
+
+static int32_t tlvDecodeEnum(STlv* pTlv, void* pValue, int16_t len) {
+ int32_t value = 0;
+ int32_t code = tlvDecodeI32(pTlv, &value);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = convertIntegerType(value, pValue, len);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeValueEnum(STlvDecoder* pDecoder, void* pValue, int16_t len) {
+ int32_t value = 0;
+ int32_t code = tlvDecodeValueI32(pDecoder, &value);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = convertIntegerType(value, pValue, len);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeCStr(STlv* pTlv, char* pValue) {
+ memcpy(pValue, pTlv->value, pTlv->len);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeValueCStr(STlvDecoder* pDecoder, char* pValue) {
+ int16_t len = 0;
+ int32_t code = tlvDecodeValueI16(pDecoder, &len);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueImpl(pDecoder, pValue, len);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeCStrP(STlv* pTlv, char** pValue) {
+ *pValue = strndup(pTlv->value, pTlv->len);
+ return NULL == *pValue ? TSDB_CODE_OUT_OF_MEMORY : TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeDynBinary(STlv* pTlv, void** pValue) {
+ *pValue = taosMemoryMalloc(pTlv->len);
+ if (NULL == *pValue) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ memcpy(*pValue, pTlv->value, pTlv->len);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeBinary(STlv* pTlv, void* pValue) {
+ memcpy(pValue, pTlv->value, pTlv->len);
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t tlvDecodeObjFromTlv(STlv* pTlv, FToObject func, void* pObj) {
+ STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
+ return func(&decoder, pObj);
+}
+
+static int32_t tlvDecodeObj(STlvDecoder* pDecoder, FToObject func, void* pObj) {
+ STlv* pTlv = NULL;
+ int32_t code = tlvGetNextTlv(pDecoder, &pTlv);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeObjFromTlv(pTlv, func, pObj);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeObjArray(STlvDecoder* pDecoder, FToObject func, void* pArray, int32_t itemSize) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t i = 0;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) { code = tlvDecodeObjFromTlv(pTlv, func, (char*)pArray + itemSize * i++); }
+ return code;
+}
+
+static int32_t tlvDecodeValueArray(STlvDecoder* pDecoder, FToObject func, void* pArray, int32_t itemSize,
+ int32_t* pNum) {
+ int32_t code = tlvDecodeValueI32(pDecoder, pNum);
+ for (size_t i = 0; TSDB_CODE_SUCCESS == code && i < *pNum; ++i) {
+ code = func(pDecoder, (char*)pArray + i * itemSize);
+ }
+ return code;
+}
+
+static int32_t tlvDecodeObjArrayFromTlv(STlv* pTlv, FToObject func, void* pArray, int32_t itemSize) {
+ STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
+ return tlvDecodeObjArray(&decoder, func, pArray, itemSize);
+}
+
+static int32_t tlvDecodeDynObjFromTlv(STlv* pTlv, FMakeObject makeFunc, FToObject toFunc, void** pObj) {
+ *pObj = makeFunc(pTlv->type);
+ if (NULL == *pObj) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ return tlvDecodeObjFromTlv(pTlv, toFunc, *pObj);
+}
+
+static int32_t tlvDecodeDynObj(STlvDecoder* pDecoder, FMakeObject makeFunc, FToObject toFunc, void** pObj) {
+ STlv* pTlv = NULL;
+ int32_t code = tlvGetNextTlv(pDecoder, &pTlv);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeDynObjFromTlv(pTlv, makeFunc, toFunc, pObj);
+ }
+ return code;
+}
+
+enum { DATA_TYPE_CODE_TYPE = 1, DATA_TYPE_CODE_PRECISION, DATA_TYPE_CODE_SCALE, DATA_TYPE_CODE_BYTES };
+
+static int32_t dataTypeInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataType* pNode = (const SDataType*)pObj;
+
+ int32_t code = tlvEncodeValueI8(pEncoder, pNode->type);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU8(pEncoder, pNode->precision);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU8(pEncoder, pNode->scale);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->bytes);
+ }
+
+ return code;
+}
+
+static int32_t dataTypeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataType* pNode = (const SDataType*)pObj;
+
+ int32_t code = tlvEncodeI8(pEncoder, DATA_TYPE_CODE_TYPE, pNode->type);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU8(pEncoder, DATA_TYPE_CODE_PRECISION, pNode->precision);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU8(pEncoder, DATA_TYPE_CODE_SCALE, pNode->scale);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, DATA_TYPE_CODE_BYTES, pNode->bytes);
+ }
+
+ return code;
+}
+
+static int32_t msgToDataTypeInline(STlvDecoder* pDecoder, void* pObj) {
+ SDataType* pNode = (SDataType*)pObj;
+
+ int32_t code = tlvDecodeValueI8(pDecoder, &pNode->type);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueU8(pDecoder, &pNode->precision);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueU8(pDecoder, &pNode->scale);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->bytes);
+ }
+
+ return code;
+}
+
+static int32_t msgToDataType(STlvDecoder* pDecoder, void* pObj) {
+ SDataType* pNode = (SDataType*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case DATA_TYPE_CODE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->type);
+ break;
+ case DATA_TYPE_CODE_PRECISION:
+ code = tlvDecodeU8(pTlv, &pNode->precision);
+ break;
+ case DATA_TYPE_CODE_SCALE:
+ code = tlvDecodeU8(pTlv, &pNode->scale);
+ break;
+ case DATA_TYPE_CODE_BYTES:
+ code = tlvDecodeI32(pTlv, &pNode->bytes);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { EXPR_CODE_RES_TYPE = 1 };
+
+static int32_t exprNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SExprNode* pNode = (const SExprNode*)pObj;
+ return tlvEncodeObj(pEncoder, EXPR_CODE_RES_TYPE, dataTypeToMsg, &pNode->resType);
+}
+
+static int32_t msgToExprNode(STlvDecoder* pDecoder, void* pObj) {
+ SExprNode* pNode = (SExprNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case EXPR_CODE_RES_TYPE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToDataType, &pNode->resType);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { COLUMN_CODE_INLINE_ATTRS = 1 };
+
+static int32_t columnNodeInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SColumnNode* pNode = (const SColumnNode*)pObj;
+
+ int32_t code = dataTypeInlineToMsg(&pNode->node.resType, pEncoder);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU64(pEncoder, pNode->tableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI8(pEncoder, pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI16(pEncoder, pNode->colId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueEnum(pEncoder, pNode->colType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueCStr(pEncoder, pNode->dbName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueCStr(pEncoder, pNode->tableName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueCStr(pEncoder, pNode->tableAlias);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueCStr(pEncoder, pNode->colName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI16(pEncoder, pNode->dataBlockId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI16(pEncoder, pNode->slotId);
+ }
+
+ return code;
+}
+
+static int32_t columnNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ return tlvEncodeObj(pEncoder, COLUMN_CODE_INLINE_ATTRS, columnNodeInlineToMsg, pObj);
+}
+
+static int32_t msgToColumnNodeInline(STlvDecoder* pDecoder, void* pObj) {
+ SColumnNode* pNode = (SColumnNode*)pObj;
+
+ int32_t code = msgToDataTypeInline(pDecoder, &pNode->node.resType);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueU64(pDecoder, &pNode->tableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI8(pDecoder, &pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI16(pDecoder, &pNode->colId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueEnum(pDecoder, &pNode->colType, sizeof(pNode->colType));
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueCStr(pDecoder, pNode->dbName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueCStr(pDecoder, pNode->tableName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueCStr(pDecoder, pNode->tableAlias);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueCStr(pDecoder, pNode->colName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI16(pDecoder, &pNode->dataBlockId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI16(pDecoder, &pNode->slotId);
+ }
+
+ return code;
+}
+
+static int32_t msgToColumnNode(STlvDecoder* pDecoder, void* pObj) {
+ SColumnNode* pNode = (SColumnNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case COLUMN_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToColumnNodeInline, pNode);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ VALUE_CODE_EXPR_BASE = 1,
+ VALUE_CODE_LITERAL,
+ VALUE_CODE_IS_DURATION,
+ VALUE_CODE_TRANSLATE,
+ VALUE_CODE_NOT_RESERVED,
+ VALUE_CODE_IS_NULL,
+ VALUE_CODE_DATUM
+};
+
+static int32_t datumToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SValueNode* pNode = (const SValueNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (pNode->node.resType.type) {
+ case TSDB_DATA_TYPE_NULL:
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ code = tlvEncodeBool(pEncoder, VALUE_CODE_DATUM, pNode->datum.b);
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ code = tlvEncodeI64(pEncoder, VALUE_CODE_DATUM, pNode->datum.i);
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ case TSDB_DATA_TYPE_UINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ code = tlvEncodeU64(pEncoder, VALUE_CODE_DATUM, pNode->datum.u);
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE:
+ code = tlvEncodeDouble(pEncoder, VALUE_CODE_DATUM, pNode->datum.d);
+ break;
+ case TSDB_DATA_TYPE_VARCHAR:
+ case TSDB_DATA_TYPE_VARBINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ code = tlvEncodeBinary(pEncoder, VALUE_CODE_DATUM, pNode->datum.p, varDataTLen(pNode->datum.p));
+ break;
+ case TSDB_DATA_TYPE_JSON:
+ code = tlvEncodeBinary(pEncoder, VALUE_CODE_DATUM, pNode->datum.p, getJsonValueLen(pNode->datum.p));
+ break;
+ case TSDB_DATA_TYPE_DECIMAL:
+ case TSDB_DATA_TYPE_BLOB:
+ // todo
+ default:
+ break;
+ }
+
+ return code;
+}
+
+static int32_t valueNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SValueNode* pNode = (const SValueNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, VALUE_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, VALUE_CODE_LITERAL, pNode->literal);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_DURATION, pNode->isDuration);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, VALUE_CODE_TRANSLATE, pNode->translate);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, VALUE_CODE_NOT_RESERVED, pNode->notReserved);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, VALUE_CODE_IS_NULL, pNode->isNull);
+ }
+ if (TSDB_CODE_SUCCESS == code && !pNode->isNull) {
+ code = datumToMsg(pNode, pEncoder);
+ }
+
+ return code;
+}
+
+static int32_t msgToDatum(STlv* pTlv, void* pObj) {
+ SValueNode* pNode = (SValueNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (pNode->node.resType.type) {
+ case TSDB_DATA_TYPE_NULL:
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ code = tlvDecodeBool(pTlv, &pNode->datum.b);
+ *(bool*)&pNode->typeData = pNode->datum.b;
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int8_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int16_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_INT:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int32_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int64_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ code = tlvDecodeI64(pTlv, &pNode->datum.i);
+ *(int64_t*)&pNode->typeData = pNode->datum.i;
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ code = tlvDecodeU64(pTlv, &pNode->datum.u);
+ *(uint8_t*)&pNode->typeData = pNode->datum.u;
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ code = tlvDecodeU64(pTlv, &pNode->datum.u);
+ *(uint16_t*)&pNode->typeData = pNode->datum.u;
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ code = tlvDecodeU64(pTlv, &pNode->datum.u);
+ *(uint32_t*)&pNode->typeData = pNode->datum.u;
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ code = tlvDecodeU64(pTlv, &pNode->datum.u);
+ *(uint64_t*)&pNode->typeData = pNode->datum.u;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ code = tlvDecodeDouble(pTlv, &pNode->datum.d);
+ *(float*)&pNode->typeData = pNode->datum.d;
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ code = tlvDecodeDouble(pTlv, &pNode->datum.d);
+ *(double*)&pNode->typeData = pNode->datum.d;
+ break;
+ case TSDB_DATA_TYPE_NCHAR:
+ case TSDB_DATA_TYPE_VARCHAR:
+ case TSDB_DATA_TYPE_VARBINARY: {
+ pNode->datum.p = taosMemoryCalloc(1, pNode->node.resType.bytes + VARSTR_HEADER_SIZE + 1);
+ if (NULL == pNode->datum.p) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ break;
+ }
+ code = tlvDecodeBinary(pTlv, pNode->datum.p);
+ if (TSDB_CODE_SUCCESS == code) {
+ varDataSetLen(pNode->datum.p, pTlv->len - VARSTR_HEADER_SIZE);
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_JSON:
+ code = tlvDecodeDynBinary(pTlv, (void**)&pNode->datum.p);
+ break;
+ case TSDB_DATA_TYPE_DECIMAL:
+ case TSDB_DATA_TYPE_BLOB:
+ // todo
+ default:
+ break;
+ }
+
+ return code;
+}
+
+static int32_t msgToValueNode(STlvDecoder* pDecoder, void* pObj) {
+ SValueNode* pNode = (SValueNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case VALUE_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case VALUE_CODE_LITERAL:
+ code = tlvDecodeCStrP(pTlv, &pNode->literal);
+ break;
+ case VALUE_CODE_IS_DURATION:
+ code = tlvDecodeBool(pTlv, &pNode->isDuration);
+ break;
+ case VALUE_CODE_TRANSLATE:
+ code = tlvDecodeBool(pTlv, &pNode->translate);
+ break;
+ case VALUE_CODE_NOT_RESERVED:
+ code = tlvDecodeBool(pTlv, &pNode->notReserved);
+ break;
+ case VALUE_CODE_IS_NULL:
+ code = tlvDecodeBool(pTlv, &pNode->isNull);
+ break;
+ case VALUE_CODE_DATUM:
+ code = msgToDatum(pTlv, pNode);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { OPERATOR_CODE_EXPR_BASE = 1, OPERATOR_CODE_OP_TYPE, OPERATOR_CODE_LEFT, OPERATOR_CODE_RIGHT };
+
+static int32_t operatorNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SOperatorNode* pNode = (const SOperatorNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, OPERATOR_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, OPERATOR_CODE_OP_TYPE, pNode->opType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, OPERATOR_CODE_LEFT, nodeToMsg, pNode->pLeft);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, OPERATOR_CODE_RIGHT, nodeToMsg, pNode->pRight);
+ }
+
+ return code;
+}
+
+static int32_t msgToOperatorNode(STlvDecoder* pDecoder, void* pObj) {
+ SOperatorNode* pNode = (SOperatorNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case OPERATOR_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case OPERATOR_CODE_OP_TYPE:
+ code = tlvDecodeEnum(pTlv, &pNode->opType, sizeof(pNode->opType));
+ break;
+ case OPERATOR_CODE_LEFT:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pLeft);
+ break;
+ case OPERATOR_CODE_RIGHT:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pRight);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { LOGIC_COND_CODE_EXPR_BASE = 1, LOGIC_COND_CODE_COND_TYPE, LOGIC_COND_CODE_PARAMETERS };
+
+static int32_t logicConditionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SLogicConditionNode* pNode = (const SLogicConditionNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, LOGIC_COND_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, LOGIC_COND_CODE_COND_TYPE, pNode->condType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, LOGIC_COND_CODE_PARAMETERS, nodeListToMsg, pNode->pParameterList);
+ }
+
+ return code;
+}
+
+static int32_t msgToLogicConditionNode(STlvDecoder* pDecoder, void* pObj) {
+ SLogicConditionNode* pNode = (SLogicConditionNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case LOGIC_COND_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case LOGIC_COND_CODE_COND_TYPE:
+ code = tlvDecodeEnum(pTlv, &pNode->condType, sizeof(pNode->condType));
+ break;
+ case LOGIC_COND_CODE_PARAMETERS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pParameterList);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ FUNCTION_CODE_EXPR_BASE = 1,
+ FUNCTION_CODE_FUNCTION_NAME,
+ FUNCTION_CODE_FUNCTION_ID,
+ FUNCTION_CODE_FUNCTION_TYPE,
+ FUNCTION_CODE_PARAMETERS,
+ FUNCTION_CODE_UDF_BUF_SIZE
+};
+
+static int32_t functionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SFunctionNode* pNode = (const SFunctionNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, FUNCTION_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, FUNCTION_CODE_FUNCTION_NAME, pNode->functionName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, FUNCTION_CODE_FUNCTION_ID, pNode->funcId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, FUNCTION_CODE_FUNCTION_TYPE, pNode->funcType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, FUNCTION_CODE_PARAMETERS, nodeListToMsg, pNode->pParameterList);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, FUNCTION_CODE_UDF_BUF_SIZE, pNode->udfBufSize);
+ }
+
+ return code;
+}
+
+static int32_t msgToFunctionNode(STlvDecoder* pDecoder, void* pObj) {
+ SFunctionNode* pNode = (SFunctionNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case FUNCTION_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case FUNCTION_CODE_FUNCTION_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->functionName);
+ break;
+ case FUNCTION_CODE_FUNCTION_ID:
+ code = tlvDecodeI32(pTlv, &pNode->funcId);
+ break;
+ case FUNCTION_CODE_FUNCTION_TYPE:
+ code = tlvDecodeI32(pTlv, &pNode->funcType);
+ break;
+ case FUNCTION_CODE_PARAMETERS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pParameterList);
+ break;
+ case FUNCTION_CODE_UDF_BUF_SIZE:
+ code = tlvDecodeI32(pTlv, &pNode->udfBufSize);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { ORDER_BY_EXPR_CODE_EXPR = 1, ORDER_BY_EXPR_CODE_ORDER, ORDER_BY_EXPR_CODE_NULL_ORDER };
+
+static int32_t orderByExprNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SOrderByExprNode* pNode = (const SOrderByExprNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, ORDER_BY_EXPR_CODE_EXPR, nodeToMsg, pNode->pExpr);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, ORDER_BY_EXPR_CODE_ORDER, pNode->order);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, ORDER_BY_EXPR_CODE_NULL_ORDER, pNode->nullOrder);
+ }
+
+ return code;
+}
+
+static int32_t msgToOrderByExprNode(STlvDecoder* pDecoder, void* pObj) {
+ SOrderByExprNode* pNode = (SOrderByExprNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case ORDER_BY_EXPR_CODE_EXPR:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pExpr);
+ break;
+ case ORDER_BY_EXPR_CODE_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->order, sizeof(pNode->order));
+ break;
+ case ORDER_BY_EXPR_CODE_NULL_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->nullOrder, sizeof(pNode->nullOrder));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { LIMIT_CODE_LIMIT = 1, LIMIT_CODE_OFFSET };
+
+static int32_t limitNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SLimitNode* pNode = (const SLimitNode*)pObj;
+
+ int32_t code = tlvEncodeI64(pEncoder, LIMIT_CODE_LIMIT, pNode->limit);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, LIMIT_CODE_OFFSET, pNode->offset);
+ }
+
+ return code;
+}
+
+static int32_t msgToLimitNode(STlvDecoder* pDecoder, void* pObj) {
+ SLimitNode* pNode = (SLimitNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case LIMIT_CODE_LIMIT:
+ code = tlvDecodeI64(pTlv, &pNode->limit);
+ break;
+ case LIMIT_CODE_OFFSET:
+ code = tlvDecodeI64(pTlv, &pNode->offset);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { NAME_CODE_TYPE = 1, NAME_CODE_ACCT_ID, NAME_CODE_DB_NAME, NAME_CODE_TABLE_NAME };
+
+static int32_t nameToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SName* pNode = (const SName*)pObj;
+
+ int32_t code = tlvEncodeU8(pEncoder, NAME_CODE_TYPE, pNode->type);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, NAME_CODE_ACCT_ID, pNode->acctId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, NAME_CODE_DB_NAME, pNode->dbname);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, NAME_CODE_TABLE_NAME, pNode->tname);
+ }
+
+ return code;
+}
+
+static int32_t msgToName(STlvDecoder* pDecoder, void* pObj) {
+ SName* pNode = (SName*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case NAME_CODE_TYPE:
+ code = tlvDecodeU8(pTlv, &pNode->type);
+ break;
+ case NAME_CODE_ACCT_ID:
+ code = tlvDecodeI32(pTlv, &pNode->acctId);
+ break;
+ case NAME_CODE_DB_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->dbname);
+ break;
+ case NAME_CODE_TABLE_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->tname);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { TIME_WINDOW_CODE_START_KEY = 1, TIME_WINDOW_CODE_END_KEY };
+
+static int32_t timeWindowToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const STimeWindow* pNode = (const STimeWindow*)pObj;
+
+ int32_t code = tlvEncodeI64(pEncoder, TIME_WINDOW_CODE_START_KEY, pNode->skey);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, TIME_WINDOW_CODE_END_KEY, pNode->ekey);
+ }
+
+ return code;
+}
+
+static int32_t msgToTimeWindow(STlvDecoder* pDecoder, void* pObj) {
+ STimeWindow* pNode = (STimeWindow*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case TIME_WINDOW_CODE_START_KEY:
+ code = tlvDecodeI64(pTlv, &pNode->skey);
+ break;
+ case TIME_WINDOW_CODE_END_KEY:
+ code = tlvDecodeI64(pTlv, &pNode->ekey);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { NODE_LIST_CODE_DATA_TYPE = 1, NODE_LIST_CODE_NODE_LIST };
+
+static int32_t nodeListNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SNodeListNode* pNode = (const SNodeListNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, NODE_LIST_CODE_DATA_TYPE, dataTypeInlineToMsg, &pNode->dataType);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, NODE_LIST_CODE_NODE_LIST, nodeListToMsg, pNode->pNodeList);
+ }
+
+ return code;
+}
+
+static int32_t msgToNodeListNode(STlvDecoder* pDecoder, void* pObj) {
+ SNodeListNode* pNode = (SNodeListNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case NODE_LIST_CODE_DATA_TYPE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToDataTypeInline, &pNode->dataType);
+ break;
+ case NODE_LIST_CODE_NODE_LIST:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pNodeList);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { TARGET_CODE_INLINE_ATTRS = 1, TARGET_CODE_EXPR };
+
+static int32_t targetNodeInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const STargetNode* pNode = (const STargetNode*)pObj;
+
+ int32_t code = tlvEncodeValueI16(pEncoder, pNode->dataBlockId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI16(pEncoder, pNode->slotId);
+ }
+
+ return code;
+}
+
+static int32_t targetNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const STargetNode* pNode = (const STargetNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, TARGET_CODE_INLINE_ATTRS, targetNodeInlineToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, TARGET_CODE_EXPR, nodeToMsg, pNode->pExpr);
+ }
+
+ return code;
+}
+
+static int32_t msgToTargetNodeInline(STlvDecoder* pDecoder, void* pObj) {
+ STargetNode* pNode = (STargetNode*)pObj;
+
+ int32_t code = tlvDecodeValueI16(pDecoder, &pNode->dataBlockId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI16(pDecoder, &pNode->slotId);
+ }
+
+ return code;
+}
+
+static int32_t msgToTargetNode(STlvDecoder* pDecoder, void* pObj) {
+ STargetNode* pNode = (STargetNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case TARGET_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToTargetNodeInline, pNode);
+ break;
+ case TARGET_CODE_EXPR:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pExpr);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { DATA_BLOCK_DESC_CODE_INLINE_ATTRS = 1, DATA_BLOCK_DESC_CODE_SLOTS };
+
+static int32_t dataBlockDescNodeInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataBlockDescNode* pNode = (const SDataBlockDescNode*)pObj;
+
+ int32_t code = tlvEncodeValueI16(pEncoder, pNode->dataBlockId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->totalRowSize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->outputRowSize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU8(pEncoder, pNode->precision);
+ }
+
+ return code;
+}
+
+static int32_t dataBlockDescNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataBlockDescNode* pNode = (const SDataBlockDescNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, DATA_BLOCK_DESC_CODE_INLINE_ATTRS, dataBlockDescNodeInlineToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, DATA_BLOCK_DESC_CODE_SLOTS, nodeListToMsg, pNode->pSlots);
+ }
+
+ return code;
+}
+
+static int32_t msgToDataBlockDescNodeInline(STlvDecoder* pDecoder, void* pObj) {
+ SDataBlockDescNode* pNode = (SDataBlockDescNode*)pObj;
+
+ int32_t code = tlvDecodeValueI16(pDecoder, &pNode->dataBlockId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->totalRowSize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->outputRowSize);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueU8(pDecoder, &pNode->precision);
+ }
+
+ return code;
+}
+
+static int32_t msgToDataBlockDescNode(STlvDecoder* pDecoder, void* pObj) {
+ SDataBlockDescNode* pNode = (SDataBlockDescNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case DATA_BLOCK_DESC_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToDataBlockDescNodeInline, pNode);
+ break;
+ case DATA_BLOCK_DESC_CODE_SLOTS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pSlots);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { SLOT_DESC_CODE_INLINE_ATTRS = 1 };
+
+static int32_t slotDescNodeInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSlotDescNode* pNode = (const SSlotDescNode*)pObj;
+
+ int32_t code = tlvEncodeValueI16(pEncoder, pNode->slotId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = dataTypeInlineToMsg(&pNode->dataType, pEncoder);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueBool(pEncoder, pNode->reserve);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueBool(pEncoder, pNode->output);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueBool(pEncoder, pNode->tag);
+ }
+
+ return code;
+}
+
+static int32_t slotDescNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ return tlvEncodeObj(pEncoder, SLOT_DESC_CODE_INLINE_ATTRS, slotDescNodeInlineToMsg, pObj);
+}
+
+static int32_t msgToSlotDescNodeInline(STlvDecoder* pDecoder, void* pObj) {
+ SSlotDescNode* pNode = (SSlotDescNode*)pObj;
+
+ int32_t code = tlvDecodeValueI16(pDecoder, &pNode->slotId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = msgToDataTypeInline(pDecoder, &pNode->dataType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueBool(pDecoder, &pNode->reserve);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueBool(pDecoder, &pNode->output);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueBool(pDecoder, &pNode->tag);
+ }
+
+ return code;
+}
+
+static int32_t msgToSlotDescNode(STlvDecoder* pDecoder, void* pObj) {
+ SSlotDescNode* pNode = (SSlotDescNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case SLOT_DESC_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToSlotDescNodeInline, pNode);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { EP_CODE_FQDN = 1, EP_CODE_port };
+
+static int32_t epInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SEp* pNode = (const SEp*)pObj;
+
+ int32_t code = tlvEncodeValueCStr(pEncoder, pNode->fqdn);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU16(pEncoder, pNode->port);
+ }
+
+ return code;
+}
+
+static int32_t epToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SEp* pNode = (const SEp*)pObj;
+
+ int32_t code = tlvEncodeCStr(pEncoder, EP_CODE_FQDN, pNode->fqdn);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU16(pEncoder, EP_CODE_port, pNode->port);
+ }
+
+ return code;
+}
+
+static int32_t msgToEpInline(STlvDecoder* pDecoder, void* pObj) {
+ SEp* pNode = (SEp*)pObj;
+
+ int32_t code = tlvDecodeValueCStr(pDecoder, pNode->fqdn);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueU16(pDecoder, &pNode->port);
+ }
+
+ return code;
+}
+
+static int32_t msgToEp(STlvDecoder* pDecoder, void* pObj) {
+ SEp* pNode = (SEp*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case EP_CODE_FQDN:
+ code = tlvDecodeCStr(pTlv, pNode->fqdn);
+ break;
+ case EP_CODE_port:
+ code = tlvDecodeU16(pTlv, &pNode->port);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { EP_SET_CODE_IN_USE = 1, EP_SET_CODE_NUM_OF_EPS, EP_SET_CODE_EPS };
+
+static int32_t epSetInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SEpSet* pNode = (const SEpSet*)pObj;
+
+ int32_t code = tlvEncodeValueI8(pEncoder, pNode->inUse);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueArray(pEncoder, epInlineToMsg, pNode->eps, sizeof(SEp), pNode->numOfEps);
+ }
+
+ return code;
+}
+
+static int32_t epSetToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SEpSet* pNode = (const SEpSet*)pObj;
+
+ int32_t code = tlvEncodeI8(pEncoder, EP_SET_CODE_IN_USE, pNode->inUse);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, EP_SET_CODE_NUM_OF_EPS, pNode->numOfEps);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObjArray(pEncoder, EP_SET_CODE_EPS, epToMsg, pNode->eps, sizeof(SEp), pNode->numOfEps);
+ }
+
+ return code;
+}
+
+static int32_t msgToEpSetInline(STlvDecoder* pDecoder, void* pObj) {
+ SEpSet* pNode = (SEpSet*)pObj;
+
+ int32_t code = tlvDecodeValueI8(pDecoder, &pNode->inUse);
+ if (TSDB_CODE_SUCCESS == code) {
+ int32_t numOfEps = 0;
+ code = tlvDecodeValueArray(pDecoder, msgToEpInline, pNode->eps, sizeof(SEp), &numOfEps);
+ pNode->numOfEps = numOfEps;
+ }
+
+ return code;
+}
+
+static int32_t msgToEpSet(STlvDecoder* pDecoder, void* pObj) {
+ SEpSet* pNode = (SEpSet*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case EP_SET_CODE_IN_USE:
+ code = tlvDecodeI8(pTlv, &pNode->inUse);
+ break;
+ case EP_SET_CODE_NUM_OF_EPS:
+ code = tlvDecodeI8(pTlv, &pNode->numOfEps);
+ break;
+ case EP_SET_CODE_EPS:
+ code = tlvDecodeObjArrayFromTlv(pTlv, msgToEp, pNode->eps, sizeof(SEp));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { QUERY_NODE_ADDR_CODE_NODE_ID = 1, QUERY_NODE_ADDR_CODE_EP_SET };
+
+static int32_t queryNodeAddrInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SQueryNodeAddr* pNode = (const SQueryNodeAddr*)pObj;
+
+ int32_t code = tlvEncodeValueI32(pEncoder, pNode->nodeId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = epSetInlineToMsg(&pNode->epSet, pEncoder);
+ }
+
+ return code;
+}
+
+static int32_t queryNodeAddrToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SQueryNodeAddr* pNode = (const SQueryNodeAddr*)pObj;
+
+ int32_t code = tlvEncodeI32(pEncoder, QUERY_NODE_ADDR_CODE_NODE_ID, pNode->nodeId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, QUERY_NODE_ADDR_CODE_EP_SET, epSetToMsg, &pNode->epSet);
+ }
+
+ return code;
+}
+
+static int32_t msgToQueryNodeAddrInline(STlvDecoder* pDecoder, void* pObj) {
+ SQueryNodeAddr* pNode = (SQueryNodeAddr*)pObj;
+
+ int32_t code = tlvDecodeValueI32(pDecoder, &pNode->nodeId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = msgToEpSetInline(pDecoder, &pNode->epSet);
+ }
+
+ return code;
+}
+
+static int32_t msgToQueryNodeAddr(STlvDecoder* pDecoder, void* pObj) {
+ SQueryNodeAddr* pNode = (SQueryNodeAddr*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case QUERY_NODE_ADDR_CODE_NODE_ID:
+ code = tlvDecodeI32(pTlv, &pNode->nodeId);
+ break;
+ case QUERY_NODE_ADDR_CODE_EP_SET:
+ code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->epSet);
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { DOWNSTREAM_SOURCE_CODE_INLINE_ATTRS = 1 };
+
+static int32_t downstreamSourceNodeInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDownstreamSourceNode* pNode = (const SDownstreamSourceNode*)pObj;
+
+ int32_t code = queryNodeAddrInlineToMsg(&pNode->addr, pEncoder);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU64(pEncoder, pNode->taskId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU64(pEncoder, pNode->schedId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->execId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->fetchMsgType);
+ }
+
+ return code;
+}
+
+static int32_t downstreamSourceNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ return tlvEncodeObj(pEncoder, DOWNSTREAM_SOURCE_CODE_INLINE_ATTRS, downstreamSourceNodeInlineToMsg, pObj);
+}
+
+static int32_t msgToDownstreamSourceNodeInlineToMsg(STlvDecoder* pDecoder, void* pObj) {
+ SDownstreamSourceNode* pNode = (SDownstreamSourceNode*)pObj;
+
+ int32_t code = msgToQueryNodeAddrInline(pDecoder, &pNode->addr);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueU64(pDecoder, &pNode->taskId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueU64(pDecoder, &pNode->schedId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->execId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->fetchMsgType);
+ }
+
+ return code;
+}
+
+static int32_t msgToDownstreamSourceNode(STlvDecoder* pDecoder, void* pObj) {
+ SDownstreamSourceNode* pNode = (SDownstreamSourceNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case DOWNSTREAM_SOURCE_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToDownstreamSourceNodeInlineToMsg, pNode);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { WHEN_THEN_CODE_EXPR_BASE = 1, WHEN_THEN_CODE_WHEN, WHEN_THEN_CODE_THEN };
+
+static int32_t whenThenNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SWhenThenNode* pNode = (const SWhenThenNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, WHEN_THEN_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, WHEN_THEN_CODE_WHEN, nodeToMsg, pNode->pWhen);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, WHEN_THEN_CODE_THEN, nodeToMsg, pNode->pThen);
+ }
+
+ return code;
+}
+
+static int32_t msgToWhenThenNode(STlvDecoder* pDecoder, void* pObj) {
+ SWhenThenNode* pNode = (SWhenThenNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case WHEN_THEN_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case WHEN_THEN_CODE_WHEN:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pWhen);
+ break;
+ case WHEN_THEN_CODE_THEN:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pThen);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { CASE_WHEN_CODE_EXPR_BASE = 1, CASE_WHEN_CODE_CASE, CASE_WHEN_CODE_ELSE, CASE_WHEN_CODE_WHEN_THEN_LIST };
+
+static int32_t caseWhenNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SCaseWhenNode* pNode = (const SCaseWhenNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, CASE_WHEN_CODE_EXPR_BASE, exprNodeToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, CASE_WHEN_CODE_CASE, nodeToMsg, pNode->pCase);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, CASE_WHEN_CODE_ELSE, nodeToMsg, pNode->pElse);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, CASE_WHEN_CODE_WHEN_THEN_LIST, nodeListToMsg, pNode->pWhenThenList);
+ }
+
+ return code;
+}
+
+static int32_t msgToCaseWhenNode(STlvDecoder* pDecoder, void* pObj) {
+ SCaseWhenNode* pNode = (SCaseWhenNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case CASE_WHEN_CODE_EXPR_BASE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToExprNode, &pNode->node);
+ break;
+ case CASE_WHEN_CODE_CASE:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pCase);
+ break;
+ case CASE_WHEN_CODE_ELSE:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pElse);
+ break;
+ case CASE_WHEN_CODE_WHEN_THEN_LIST:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pWhenThenList);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_NODE_CODE_OUTPUT_DESC = 1,
+ PHY_NODE_CODE_CONDITIONS,
+ PHY_NODE_CODE_CHILDREN,
+ PHY_NODE_CODE_LIMIT,
+ PHY_NODE_CODE_SLIMIT
+};
+
+static int32_t physiNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SPhysiNode* pNode = (const SPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_OUTPUT_DESC, nodeToMsg, pNode->pOutputDataBlockDesc);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_CONDITIONS, nodeToMsg, pNode->pConditions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_CHILDREN, nodeListToMsg, pNode->pChildren);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_LIMIT, nodeToMsg, pNode->pLimit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_NODE_CODE_SLIMIT, nodeToMsg, pNode->pSlimit);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiNode(STlvDecoder* pDecoder, void* pObj) {
+ SPhysiNode* pNode = (SPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_NODE_CODE_OUTPUT_DESC:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pOutputDataBlockDesc);
+ break;
+ case PHY_NODE_CODE_CONDITIONS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pConditions);
+ break;
+ case PHY_NODE_CODE_CHILDREN:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pChildren);
+ break;
+ case PHY_NODE_CODE_LIMIT:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pLimit);
+ break;
+ case PHY_NODE_CODE_SLIMIT:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pSlimit);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_SCAN_CODE_BASE_NODE = 1,
+ PHY_SCAN_CODE_SCAN_COLS,
+ PHY_SCAN_CODE_SCAN_PSEUDO_COLS,
+ PHY_SCAN_CODE_BASE_UID,
+ PHY_SCAN_CODE_BASE_SUID,
+ PHY_SCAN_CODE_BASE_TABLE_TYPE,
+ PHY_SCAN_CODE_BASE_TABLE_NAME
+};
+
+static int32_t physiScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SScanPhysiNode* pNode = (const SScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SCAN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SCAN_CODE_SCAN_COLS, nodeListToMsg, pNode->pScanCols);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SCAN_CODE_SCAN_PSEUDO_COLS, nodeListToMsg, pNode->pScanPseudoCols);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_SCAN_CODE_BASE_UID, pNode->uid);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_SCAN_CODE_BASE_SUID, pNode->suid);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_SCAN_CODE_BASE_TABLE_TYPE, pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SCAN_CODE_BASE_TABLE_NAME, nameToMsg, &pNode->tableName);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiScanNode(STlvDecoder* pDecoder, void* pObj) {
+ SScanPhysiNode* pNode = (SScanPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SCAN_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_SCAN_CODE_SCAN_COLS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanCols);
+ break;
+ case PHY_SCAN_CODE_SCAN_PSEUDO_COLS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pScanPseudoCols);
+ break;
+ case PHY_SCAN_CODE_BASE_UID:
+ code = tlvDecodeU64(pTlv, &pNode->uid);
+ break;
+ case PHY_SCAN_CODE_BASE_SUID:
+ code = tlvDecodeU64(pTlv, &pNode->suid);
+ break;
+ case PHY_SCAN_CODE_BASE_TABLE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->tableType);
+ break;
+ case PHY_SCAN_CODE_BASE_TABLE_NAME:
+ code = tlvDecodeObjFromTlv(pTlv, msgToName, &pNode->tableName);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_LAST_ROW_SCAN_CODE_SCAN = 1, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, PHY_LAST_ROW_SCAN_CODE_GROUP_SORT };
+
+static int32_t physiLastRowScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SLastRowScanPhysiNode* pNode = (const SLastRowScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_LAST_ROW_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS, nodeListToMsg, pNode->pGroupTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_LAST_ROW_SCAN_CODE_GROUP_SORT, pNode->groupSort);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiLastRowScanNode(STlvDecoder* pDecoder, void* pObj) {
+ SLastRowScanPhysiNode* pNode = (SLastRowScanPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_LAST_ROW_SCAN_CODE_SCAN:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan);
+ break;
+ case PHY_LAST_ROW_SCAN_CODE_GROUP_TAGS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pGroupTags);
+ break;
+ case PHY_LAST_ROW_SCAN_CODE_GROUP_SORT:
+ code = tlvDecodeBool(pTlv, &pNode->groupSort);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_TABLE_SCAN_CODE_SCAN = 1,
+ PHY_TABLE_SCAN_CODE_INLINE_ATTRS,
+ PHY_TABLE_SCAN_CODE_DYN_SCAN_FUNCS,
+ PHY_TABLE_SCAN_CODE_GROUP_TAGS,
+ PHY_TABLE_SCAN_CODE_TAGS,
+ PHY_TABLE_SCAN_CODE_SUBTABLE
+};
+
+static int32_t physiTableScanNodeInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeValueU8(pEncoder, pNode->scanSeq[0]);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueU8(pEncoder, pNode->scanSeq[1]);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI64(pEncoder, pNode->scanRange.skey);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI64(pEncoder, pNode->scanRange.ekey);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueDouble(pEncoder, pNode->ratio);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->dataRequired);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueBool(pEncoder, pNode->groupSort);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI64(pEncoder, pNode->interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI64(pEncoder, pNode->offset);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI64(pEncoder, pNode->sliding);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI8(pEncoder, pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI8(pEncoder, pNode->slidingUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI8(pEncoder, pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI64(pEncoder, pNode->watermark);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI8(pEncoder, pNode->igExpired);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueBool(pEncoder, pNode->assignBlockUid);
+ }
+
+ return code;
+}
+
+static int32_t physiTableScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const STableScanPhysiNode* pNode = (const STableScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_INLINE_ATTRS, physiTableScanNodeInlineToMsg, pNode);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_DYN_SCAN_FUNCS, nodeListToMsg, pNode->pDynamicScanFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_GROUP_TAGS, nodeListToMsg, pNode->pGroupTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_TAGS, nodeListToMsg, pNode->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_TABLE_SCAN_CODE_SUBTABLE, nodeToMsg, pNode->pSubtable);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiTableScanNodeInline(STlvDecoder* pDecoder, void* pObj) {
+ STableScanPhysiNode* pNode = (STableScanPhysiNode*)pObj;
+
+ int32_t code = tlvDecodeValueU8(pDecoder, pNode->scanSeq);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueU8(pDecoder, pNode->scanSeq + 1);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI64(pDecoder, &pNode->scanRange.skey);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI64(pDecoder, &pNode->scanRange.ekey);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueDouble(pDecoder, &pNode->ratio);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->dataRequired);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueBool(pDecoder, &pNode->groupSort);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI64(pDecoder, &pNode->interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI64(pDecoder, &pNode->offset);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI64(pDecoder, &pNode->sliding);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI8(pDecoder, &pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI8(pDecoder, &pNode->slidingUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI8(pDecoder, &pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI64(pDecoder, &pNode->watermark);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI8(pDecoder, &pNode->igExpired);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueBool(pDecoder, &pNode->assignBlockUid);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiTableScanNode(STlvDecoder* pDecoder, void* pObj) {
+ STableScanPhysiNode* pNode = (STableScanPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_TABLE_SCAN_CODE_SCAN:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan);
+ break;
+ case PHY_TABLE_SCAN_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiTableScanNodeInline, pNode);
+ break;
+ case PHY_TABLE_SCAN_CODE_DYN_SCAN_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pDynamicScanFuncs);
+ break;
+ case PHY_TABLE_SCAN_CODE_GROUP_TAGS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pGroupTags);
+ break;
+ case PHY_TABLE_SCAN_CODE_TAGS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTags);
+ break;
+ case PHY_TABLE_SCAN_CODE_SUBTABLE:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pSubtable);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_SYSTABLE_SCAN_CODE_SCAN = 1,
+ PHY_SYSTABLE_SCAN_CODE_MGMT_EP_SET,
+ PHY_SYSTABLE_SCAN_CODE_SHOW_REWRITE,
+ PHY_SYSTABLE_SCAN_CODE_ACCOUNT_ID,
+ PHY_SYSTABLE_SCAN_CODE_SYS_INFO
+};
+
+static int32_t physiSysTableScanNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSystemTableScanPhysiNode* pNode = (const SSystemTableScanPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SYSTABLE_SCAN_CODE_SCAN, physiScanNodeToMsg, &pNode->scan);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SYSTABLE_SCAN_CODE_MGMT_EP_SET, epSetToMsg, &pNode->mgmtEpSet);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_SYSTABLE_SCAN_CODE_SHOW_REWRITE, pNode->showRewrite);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_SYSTABLE_SCAN_CODE_ACCOUNT_ID, pNode->accountId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_SYSTABLE_SCAN_CODE_SYS_INFO, pNode->sysInfo);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiSysTableScanNode(STlvDecoder* pDecoder, void* pObj) {
+ SSystemTableScanPhysiNode* pNode = (SSystemTableScanPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SYSTABLE_SCAN_CODE_SCAN:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiScanNode, &pNode->scan);
+ break;
+ case PHY_SYSTABLE_SCAN_CODE_MGMT_EP_SET:
+ code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->mgmtEpSet);
+ break;
+ case PHY_SYSTABLE_SCAN_CODE_SHOW_REWRITE:
+ code = tlvDecodeBool(pTlv, &pNode->showRewrite);
+ break;
+ case PHY_SYSTABLE_SCAN_CODE_ACCOUNT_ID:
+ code = tlvDecodeI32(pTlv, &pNode->accountId);
+ break;
+ case PHY_SYSTABLE_SCAN_CODE_SYS_INFO:
+ code = tlvDecodeBool(pTlv, &pNode->sysInfo);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_PROJECT_CODE_BASE_NODE = 1,
+ PHY_PROJECT_CODE_PROJECTIONS,
+ PHY_PROJECT_CODE_MERGE_DATA_BLOCK,
+ PHY_PROJECT_CODE_IGNORE_GROUP_ID
+};
+
+static int32_t physiProjectNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_PROJECT_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_PROJECT_CODE_PROJECTIONS, nodeListToMsg, pNode->pProjections);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_PROJECT_CODE_IGNORE_GROUP_ID, pNode->ignoreGroupId);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiProjectNode(STlvDecoder* pDecoder, void* pObj) {
+ SProjectPhysiNode* pNode = (SProjectPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_PROJECT_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_PROJECT_CODE_PROJECTIONS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pProjections);
+ break;
+ case PHY_PROJECT_CODE_MERGE_DATA_BLOCK:
+ code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock);
+ break;
+ case PHY_PROJECT_CODE_IGNORE_GROUP_ID:
+ code = tlvDecodeBool(pTlv, &pNode->ignoreGroupId);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_SORT_MERGE_JOIN_CODE_BASE_NODE = 1,
+ PHY_SORT_MERGE_JOIN_CODE_JOIN_TYPE,
+ PHY_SORT_MERGE_JOIN_CODE_MERGE_CONDITION,
+ PHY_SORT_MERGE_JOIN_CODE_ON_CONDITIONS,
+ PHY_SORT_MERGE_JOIN_CODE_TARGETS,
+ PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER
+};
+
+static int32_t physiJoinNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSortMergeJoinPhysiNode* pNode = (const SSortMergeJoinPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_SORT_MERGE_JOIN_CODE_JOIN_TYPE, pNode->joinType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_MERGE_CONDITION, nodeToMsg, pNode->pMergeCondition);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_ON_CONDITIONS, nodeToMsg, pNode->pOnConditions);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiJoinNode(STlvDecoder* pDecoder, void* pObj) {
+ SSortMergeJoinPhysiNode* pNode = (SSortMergeJoinPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SORT_MERGE_JOIN_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_JOIN_TYPE:
+ code = tlvDecodeEnum(pTlv, &pNode->joinType, sizeof(pNode->joinType));
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_MERGE_CONDITION:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pMergeCondition);
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_ON_CONDITIONS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pOnConditions);
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_TARGETS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
+ break;
+ case PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_AGG_CODE_BASE_NODE = 1,
+ PHY_AGG_CODE_EXPR,
+ PHY_AGG_CODE_GROUP_KEYS,
+ PHY_AGG_CODE_AGG_FUNCS,
+ PHY_AGG_CODE_MERGE_DATA_BLOCK
+};
+
+static int32_t physiAggNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SAggPhysiNode* pNode = (const SAggPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_AGG_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_AGG_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_AGG_CODE_GROUP_KEYS, nodeListToMsg, pNode->pGroupKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_AGG_CODE_AGG_FUNCS, nodeListToMsg, pNode->pAggFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_AGG_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiAggNode(STlvDecoder* pDecoder, void* pObj) {
+ SAggPhysiNode* pNode = (SAggPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_AGG_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_AGG_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_AGG_CODE_GROUP_KEYS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pGroupKeys);
+ break;
+ case PHY_AGG_CODE_AGG_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pAggFuncs);
+ break;
+ case PHY_AGG_CODE_MERGE_DATA_BLOCK:
+ code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_EXCHANGE_CODE_BASE_NODE = 1,
+ PHY_EXCHANGE_CODE_SRC_START_GROUP_ID,
+ PHY_EXCHANGE_CODE_SRC_END_GROUP_ID,
+ PHY_EXCHANGE_CODE_SINGLE_CHANNEL,
+ PHY_EXCHANGE_CODE_SRC_ENDPOINTS
+};
+
+static int32_t physiExchangeNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SExchangePhysiNode* pNode = (const SExchangePhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_EXCHANGE_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_EXCHANGE_CODE_SRC_START_GROUP_ID, pNode->srcStartGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_EXCHANGE_CODE_SRC_END_GROUP_ID, pNode->srcEndGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_EXCHANGE_CODE_SINGLE_CHANNEL, pNode->singleChannel);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_EXCHANGE_CODE_SRC_ENDPOINTS, nodeListToMsg, pNode->pSrcEndPoints);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiExchangeNode(STlvDecoder* pDecoder, void* pObj) {
+ SExchangePhysiNode* pNode = (SExchangePhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_EXCHANGE_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_EXCHANGE_CODE_SRC_START_GROUP_ID:
+ code = tlvDecodeI32(pTlv, &pNode->srcStartGroupId);
+ break;
+ case PHY_EXCHANGE_CODE_SRC_END_GROUP_ID:
+ code = tlvDecodeI32(pTlv, &pNode->srcEndGroupId);
+ break;
+ case PHY_EXCHANGE_CODE_SINGLE_CHANNEL:
+ code = tlvDecodeBool(pTlv, &pNode->singleChannel);
+ break;
+ case PHY_EXCHANGE_CODE_SRC_ENDPOINTS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pSrcEndPoints);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_MERGE_CODE_BASE_NODE = 1,
+ PHY_MERGE_CODE_MERGE_KEYS,
+ PHY_MERGE_CODE_TARGETS,
+ PHY_MERGE_CODE_NUM_OF_CHANNELS,
+ PHY_MERGE_CODE_SRC_GROUP_ID,
+ PHY_MERGE_CODE_GROUP_SORT
+};
+
+static int32_t physiMergeNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SMergePhysiNode* pNode = (const SMergePhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_MERGE_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_MERGE_CODE_MERGE_KEYS, nodeListToMsg, pNode->pMergeKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_MERGE_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_MERGE_CODE_NUM_OF_CHANNELS, pNode->numOfChannels);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_MERGE_CODE_SRC_GROUP_ID, pNode->srcGroupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_MERGE_CODE_GROUP_SORT, pNode->groupSort);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiMergeNode(STlvDecoder* pDecoder, void* pObj) {
+ SMergePhysiNode* pNode = (SMergePhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_MERGE_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_MERGE_CODE_MERGE_KEYS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pMergeKeys);
+ break;
+ case PHY_MERGE_CODE_TARGETS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
+ break;
+ case PHY_MERGE_CODE_NUM_OF_CHANNELS:
+ code = tlvDecodeI32(pTlv, &pNode->numOfChannels);
+ break;
+ case PHY_MERGE_CODE_SRC_GROUP_ID:
+ code = tlvDecodeI32(pTlv, &pNode->srcGroupId);
+ break;
+ case PHY_MERGE_CODE_GROUP_SORT:
+ code = tlvDecodeBool(pTlv, &pNode->groupSort);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_SORT_CODE_BASE_NODE = 1, PHY_SORT_CODE_EXPR, PHY_SORT_CODE_SORT_KEYS, PHY_SORT_CODE_TARGETS };
+
+static int32_t physiSortNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSortPhysiNode* pNode = (const SSortPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_SORT_KEYS, nodeListToMsg, pNode->pSortKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiSortNode(STlvDecoder* pDecoder, void* pObj) {
+ SSortPhysiNode* pNode = (SSortPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SORT_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_SORT_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_SORT_CODE_SORT_KEYS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pSortKeys);
+ break;
+ case PHY_SORT_CODE_TARGETS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_WINDOW_CODE_BASE_NODE = 1,
+ PHY_WINDOW_CODE_EXPR,
+ PHY_WINDOW_CODE_FUNCS,
+ PHY_WINDOW_CODE_TS_PK,
+ PHY_WINDOW_CODE_TS_END,
+ PHY_WINDOW_CODE_TRIGGER_TYPE,
+ PHY_WINDOW_CODE_WATERMARK,
+ PHY_WINDOW_CODE_IG_EXPIRED,
+ PHY_WINDOW_CODE_INPUT_TS_ORDER,
+ PHY_WINDOW_CODE_OUTPUT_TS_ORDER,
+ PHY_WINDOW_CODE_MERGE_DATA_BLOCK
+};
+
+static int32_t physiWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_FUNCS, nodeListToMsg, pNode->pFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_TS_PK, nodeToMsg, pNode->pTspk);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_TS_END, nodeToMsg, pNode->pTsEnd);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_WINDOW_CODE_TRIGGER_TYPE, pNode->triggerType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_WINDOW_CODE_WATERMARK, pNode->watermark);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_WINDOW_CODE_IG_EXPIRED, pNode->igExpired);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_WINDOW_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_WINDOW_CODE_OUTPUT_TS_ORDER, pNode->outputTsOrder);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeBool(pEncoder, PHY_WINDOW_CODE_MERGE_DATA_BLOCK, pNode->mergeDataBlock);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiWindowNode(STlvDecoder* pDecoder, void* pObj) {
+ SWinodwPhysiNode* pNode = (SWinodwPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_WINDOW_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_WINDOW_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_WINDOW_CODE_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFuncs);
+ break;
+ case PHY_WINDOW_CODE_TS_PK:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTspk);
+ break;
+ case PHY_WINDOW_CODE_TS_END:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTsEnd);
+ break;
+ case PHY_WINDOW_CODE_TRIGGER_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->triggerType);
+ break;
+ case PHY_WINDOW_CODE_WATERMARK:
+ code = tlvDecodeI64(pTlv, &pNode->watermark);
+ break;
+ case PHY_WINDOW_CODE_IG_EXPIRED:
+ code = tlvDecodeI8(pTlv, &pNode->igExpired);
+ break;
+ case PHY_WINDOW_CODE_INPUT_TS_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
+ break;
+ case PHY_WINDOW_CODE_OUTPUT_TS_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->outputTsOrder, sizeof(pNode->outputTsOrder));
+ break;
+ case PHY_WINDOW_CODE_MERGE_DATA_BLOCK:
+ code = tlvDecodeBool(pTlv, &pNode->mergeDataBlock);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_INTERVAL_CODE_WINDOW = 1, PHY_INTERVAL_CODE_INLINE_ATTRS };
+
+static int32_t physiIntervalNodeInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SIntervalPhysiNode* pNode = (const SIntervalPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeValueI64(pEncoder, pNode->interval);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI64(pEncoder, pNode->offset);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI64(pEncoder, pNode->sliding);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI8(pEncoder, pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI8(pEncoder, pNode->slidingUnit);
+ }
+
+ return code;
+}
+
+static int32_t physiIntervalNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SIntervalPhysiNode* pNode = (const SIntervalPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_INTERVAL_CODE_WINDOW, physiWindowNodeToMsg, &pNode->window);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INTERVAL_CODE_INLINE_ATTRS, physiIntervalNodeInlineToMsg, pNode);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiIntervalNodeInline(STlvDecoder* pDecoder, void* pObj) {
+ SIntervalPhysiNode* pNode = (SIntervalPhysiNode*)pObj;
+
+ int32_t code = tlvDecodeValueI64(pDecoder, &pNode->interval);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI64(pDecoder, &pNode->offset);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI64(pDecoder, &pNode->sliding);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI8(pDecoder, &pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI8(pDecoder, &pNode->slidingUnit);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiIntervalNode(STlvDecoder* pDecoder, void* pObj) {
+ SIntervalPhysiNode* pNode = (SIntervalPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_INTERVAL_CODE_WINDOW:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiWindowNode, &pNode->window);
+ break;
+ case PHY_INTERVAL_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiIntervalNodeInline, pNode);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_FILL_CODE_BASE_NODE = 1,
+ PHY_FILL_CODE_MODE,
+ PHY_FILL_CODE_FILL_EXPRS,
+ PHY_FILL_CODE_NOT_FILL_EXPRS,
+ PHY_FILL_CODE_WSTART,
+ PHY_FILL_CODE_VALUES,
+ PHY_FILL_CODE_TIME_RANGE,
+ PHY_FILL_CODE_INPUT_TS_ORDER
+};
+
+static int32_t physiFillNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SFillPhysiNode* pNode = (const SFillPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_FILL_CODE_MODE, pNode->mode);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_FILL_EXPRS, nodeListToMsg, pNode->pFillExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_NOT_FILL_EXPRS, nodeListToMsg, pNode->pNotFillExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_WSTART, nodeToMsg, pNode->pWStartTs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_VALUES, nodeToMsg, pNode->pValues);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_FILL_CODE_TIME_RANGE, timeWindowToMsg, &pNode->timeRange);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_FILL_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiFillNode(STlvDecoder* pDecoder, void* pObj) {
+ SFillPhysiNode* pNode = (SFillPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_FILL_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_FILL_CODE_MODE:
+ code = tlvDecodeEnum(pTlv, &pNode->mode, sizeof(pNode->mode));
+ break;
+ case PHY_FILL_CODE_FILL_EXPRS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFillExprs);
+ break;
+ case PHY_FILL_CODE_NOT_FILL_EXPRS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pNotFillExprs);
+ break;
+ case PHY_FILL_CODE_WSTART:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pWStartTs);
+ break;
+ case PHY_FILL_CODE_VALUES:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pValues);
+ break;
+ case PHY_FILL_CODE_TIME_RANGE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, (void**)&pNode->timeRange);
+ break;
+ case PHY_FILL_CODE_INPUT_TS_ORDER:
+ code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_SESSION_CODE_WINDOW = 1, PHY_SESSION_CODE_GAP };
+
+static int32_t physiSessionWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSessionWinodwPhysiNode* pNode = (const SSessionWinodwPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_SESSION_CODE_WINDOW, physiWindowNodeToMsg, &pNode->window);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_SESSION_CODE_GAP, pNode->gap);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiSessionWindowNode(STlvDecoder* pDecoder, void* pObj) {
+ SSessionWinodwPhysiNode* pNode = (SSessionWinodwPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_SESSION_CODE_WINDOW:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiWindowNode, &pNode->window);
+ break;
+ case PHY_SESSION_CODE_GAP:
+ code = tlvDecodeI64(pTlv, &pNode->gap);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_STATE_CODE_WINDOW = 1, PHY_STATE_CODE_KEY };
+
+static int32_t physiStateWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SStateWinodwPhysiNode* pNode = (const SStateWinodwPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_STATE_CODE_WINDOW, physiWindowNodeToMsg, &pNode->window);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_STATE_CODE_KEY, nodeToMsg, pNode->pStateKey);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiStateWindowNode(STlvDecoder* pDecoder, void* pObj) {
+ SStateWinodwPhysiNode* pNode = (SStateWinodwPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_STATE_CODE_WINDOW:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiWindowNode, &pNode->window);
+ break;
+ case PHY_STATE_CODE_KEY:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pStateKey);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_PARTITION_CODE_BASE_NODE = 1, PHY_PARTITION_CODE_EXPR, PHY_PARTITION_CODE_KEYS, PHY_PARTITION_CODE_TARGETS };
+
+static int32_t physiPartitionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SPartitionPhysiNode* pNode = (const SPartitionPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_PARTITION_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_PARTITION_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_PARTITION_CODE_KEYS, nodeListToMsg, pNode->pPartitionKeys);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_PARTITION_CODE_TARGETS, nodeListToMsg, pNode->pTargets);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiPartitionNode(STlvDecoder* pDecoder, void* pObj) {
+ SPartitionPhysiNode* pNode = (SPartitionPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_PARTITION_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_PARTITION_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_PARTITION_CODE_KEYS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pPartitionKeys);
+ break;
+ case PHY_PARTITION_CODE_TARGETS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTargets);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_STREAM_PARTITION_CODE_BASE_NODE = 1, PHY_STREAM_PARTITION_CODE_TAGS, PHY_STREAM_PARTITION_CODE_SUBTABLE };
+
+static int32_t physiStreamPartitionNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SStreamPartitionPhysiNode* pNode = (const SStreamPartitionPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_STREAM_PARTITION_CODE_BASE_NODE, physiPartitionNodeToMsg, &pNode->part);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_STREAM_PARTITION_CODE_TAGS, nodeListToMsg, pNode->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_STREAM_PARTITION_CODE_SUBTABLE, nodeToMsg, pNode->pSubtable);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiStreamPartitionNode(STlvDecoder* pDecoder, void* pObj) {
+ SStreamPartitionPhysiNode* pNode = (SStreamPartitionPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_STREAM_PARTITION_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiPartitionNode, &pNode->part);
+ break;
+ case PHY_STREAM_PARTITION_CODE_TAGS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pTags);
+ break;
+ case PHY_STREAM_PARTITION_CODE_SUBTABLE:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pSubtable);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_INDEF_ROWS_FUNC_CODE_BASE_NODE = 1, PHY_INDEF_ROWS_FUNC_CODE_EXPRS, PHY_INDEF_ROWS_FUNC_CODE_FUNCS };
+
+static int32_t physiIndefRowsFuncNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SIndefRowsFuncPhysiNode* pNode = (const SIndefRowsFuncPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_INDEF_ROWS_FUNC_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INDEF_ROWS_FUNC_CODE_EXPRS, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INDEF_ROWS_FUNC_CODE_FUNCS, nodeListToMsg, pNode->pFuncs);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiIndefRowsFuncNode(STlvDecoder* pDecoder, void* pObj) {
+ SIndefRowsFuncPhysiNode* pNode = (SIndefRowsFuncPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_INDEF_ROWS_FUNC_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_INDEF_ROWS_FUNC_CODE_EXPRS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_INDEF_ROWS_FUNC_CODE_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFuncs);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_INERP_FUNC_CODE_BASE_NODE = 1,
+ PHY_INERP_FUNC_CODE_EXPR,
+ PHY_INERP_FUNC_CODE_FUNCS,
+ PHY_INERP_FUNC_CODE_TIME_RANGE,
+ PHY_INERP_FUNC_CODE_INTERVAL,
+ PHY_INERP_FUNC_CODE_INTERVAL_UNIT,
+ PHY_INERP_FUNC_CODE_FILL_MODE,
+ PHY_INERP_FUNC_CODE_FILL_VALUES,
+ PHY_INERP_FUNC_CODE_TIME_SERIES
+};
+
+static int32_t physiInterpFuncNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SInterpFuncPhysiNode* pNode = (const SInterpFuncPhysiNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_EXPR, nodeListToMsg, pNode->pExprs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_FUNCS, nodeListToMsg, pNode->pFuncs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_TIME_RANGE, timeWindowToMsg, &pNode->timeRange);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI64(pEncoder, PHY_INERP_FUNC_CODE_INTERVAL, pNode->interval);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_INERP_FUNC_CODE_INTERVAL_UNIT, pNode->intervalUnit);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeEnum(pEncoder, PHY_INERP_FUNC_CODE_FILL_MODE, pNode->fillMode);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_FILL_VALUES, nodeToMsg, pNode->pFillValues);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_INERP_FUNC_CODE_TIME_SERIES, nodeToMsg, pNode->pTimeSeries);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiInterpFuncNode(STlvDecoder* pDecoder, void* pObj) {
+ SInterpFuncPhysiNode* pNode = (SInterpFuncPhysiNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_INERP_FUNC_CODE_BASE_NODE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysiNode, &pNode->node);
+ break;
+ case PHY_INERP_FUNC_CODE_EXPR:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pExprs);
+ break;
+ case PHY_INERP_FUNC_CODE_FUNCS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pFuncs);
+ break;
+ case PHY_INERP_FUNC_CODE_TIME_RANGE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, &pNode->timeRange);
+ break;
+ case PHY_INERP_FUNC_CODE_INTERVAL:
+ code = tlvDecodeI64(pTlv, &pNode->interval);
+ break;
+ case PHY_INERP_FUNC_CODE_INTERVAL_UNIT:
+ code = tlvDecodeI8(pTlv, &pNode->intervalUnit);
+ break;
+ case PHY_INERP_FUNC_CODE_FILL_MODE:
+ code = tlvDecodeEnum(pTlv, &pNode->fillMode, sizeof(pNode->fillMode));
+ break;
+ case PHY_INERP_FUNC_CODE_FILL_VALUES:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pFillValues);
+ break;
+ case PHY_INERP_FUNC_CODE_TIME_SERIES:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTimeSeries);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_DATA_SINK_CODE_INPUT_DESC = 1 };
+
+static int32_t physicDataSinkNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataSinkNode* pNode = (const SDataSinkNode*)pObj;
+ return tlvEncodeObj(pEncoder, PHY_DATA_SINK_CODE_INPUT_DESC, nodeToMsg, pNode->pInputDataBlockDesc);
+}
+
+static int32_t msgToPhysicDataSinkNode(STlvDecoder* pDecoder, void* pObj) {
+ SDataSinkNode* pNode = (SDataSinkNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_DATA_SINK_CODE_INPUT_DESC:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pInputDataBlockDesc);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { PHY_DISPATCH_CODE_SINK = 1 };
+
+static int32_t physiDispatchNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataDispatcherNode* pNode = (const SDataDispatcherNode*)pObj;
+ return tlvEncodeObj(pEncoder, PHY_DISPATCH_CODE_SINK, physicDataSinkNodeToMsg, &pNode->sink);
+}
+
+static int32_t msgToPhysiDispatchNode(STlvDecoder* pDecoder, void* pObj) {
+ SDataDispatcherNode* pNode = (SDataDispatcherNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_DISPATCH_CODE_SINK:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysicDataSinkNode, &pNode->sink);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_QUERY_INSERT_CODE_SINK = 1,
+ PHY_QUERY_INSERT_CODE_COLS,
+ PHY_QUERY_INSERT_CODE_TABLE_ID,
+ PHY_QUERY_INSERT_CODE_STABLE_ID,
+ PHY_QUERY_INSERT_CODE_TABLE_TYPE,
+ PHY_QUERY_INSERT_CODE_TABLE_NAME,
+ PHY_QUERY_INSERT_CODE_VG_ID,
+ PHY_QUERY_INSERT_CODE_EP_SET
+};
+
+static int32_t physiQueryInsertNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SQueryInserterNode* pNode = (const SQueryInserterNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_QUERY_INSERT_CODE_SINK, physicDataSinkNodeToMsg, &pNode->sink);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_QUERY_INSERT_CODE_COLS, nodeListToMsg, pNode->pCols);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_QUERY_INSERT_CODE_TABLE_ID, pNode->tableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_QUERY_INSERT_CODE_STABLE_ID, pNode->stableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_QUERY_INSERT_CODE_TABLE_TYPE, pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, PHY_QUERY_INSERT_CODE_TABLE_NAME, pNode->tableName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, PHY_QUERY_INSERT_CODE_VG_ID, pNode->vgId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_QUERY_INSERT_CODE_EP_SET, epSetToMsg, &pNode->epSet);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiQueryInsertNode(STlvDecoder* pDecoder, void* pObj) {
+ SQueryInserterNode* pNode = (SQueryInserterNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_QUERY_INSERT_CODE_SINK:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysicDataSinkNode, &pNode->sink);
+ break;
+ case PHY_QUERY_INSERT_CODE_COLS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pCols);
+ break;
+ case PHY_QUERY_INSERT_CODE_TABLE_ID:
+ code = tlvDecodeU64(pTlv, &pNode->tableId);
+ break;
+ case PHY_QUERY_INSERT_CODE_STABLE_ID:
+ code = tlvDecodeU64(pTlv, &pNode->stableId);
+ break;
+ case PHY_QUERY_INSERT_CODE_TABLE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->tableType);
+ break;
+ case PHY_QUERY_INSERT_CODE_TABLE_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->tableName);
+ break;
+ case PHY_QUERY_INSERT_CODE_VG_ID:
+ code = tlvDecodeI32(pTlv, &pNode->vgId);
+ break;
+ case PHY_QUERY_INSERT_CODE_EP_SET:
+ code = tlvDecodeObjFromTlv(pTlv, msgToEpSet, &pNode->epSet);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ PHY_DELETER_CODE_SINK = 1,
+ PHY_DELETER_CODE_TABLE_ID,
+ PHY_DELETER_CODE_TABLE_TYPE,
+ PHY_DELETER_CODE_TABLE_FNAME,
+ PHY_DELETER_CODE_TS_COL_NAME,
+ PHY_DELETER_CODE_DELETE_TIME_RANGE,
+ PHY_DELETER_CODE_AFFECTED_ROWS,
+ PHY_DELETER_CODE_START_TS,
+ PHY_DELETER_CODE_END_TS
+};
+
+static int32_t physiDeleteNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SDataDeleterNode* pNode = (const SDataDeleterNode*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_SINK, physicDataSinkNodeToMsg, &pNode->sink);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeU64(pEncoder, PHY_DELETER_CODE_TABLE_ID, pNode->tableId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI8(pEncoder, PHY_DELETER_CODE_TABLE_TYPE, pNode->tableType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, PHY_DELETER_CODE_TABLE_FNAME, pNode->tableFName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeCStr(pEncoder, PHY_DELETER_CODE_TS_COL_NAME, pNode->tsColName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_DELETE_TIME_RANGE, timeWindowToMsg, &pNode->deleteTimeRange);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_AFFECTED_ROWS, nodeToMsg, pNode->pAffectedRows);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_START_TS, nodeToMsg, pNode->pStartTs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_DELETER_CODE_END_TS, nodeToMsg, pNode->pEndTs);
+ }
+
+ return code;
+}
+
+static int32_t msgToPhysiDeleteNode(STlvDecoder* pDecoder, void* pObj) {
+ SDataDeleterNode* pNode = (SDataDeleterNode*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case PHY_DELETER_CODE_SINK:
+ code = tlvDecodeObjFromTlv(pTlv, msgToPhysicDataSinkNode, &pNode->sink);
+ break;
+ case PHY_DELETER_CODE_TABLE_ID:
+ code = tlvDecodeU64(pTlv, &pNode->tableId);
+ break;
+ case PHY_DELETER_CODE_TABLE_TYPE:
+ code = tlvDecodeI8(pTlv, &pNode->tableType);
+ break;
+ case PHY_DELETER_CODE_TABLE_FNAME:
+ code = tlvDecodeCStr(pTlv, pNode->tableFName);
+ break;
+ case PHY_DELETER_CODE_TS_COL_NAME:
+ code = tlvDecodeCStr(pTlv, pNode->tsColName);
+ break;
+ case PHY_DELETER_CODE_DELETE_TIME_RANGE:
+ code = tlvDecodeObjFromTlv(pTlv, msgToTimeWindow, &pNode->deleteTimeRange);
+ break;
+ case PHY_DELETER_CODE_AFFECTED_ROWS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pAffectedRows);
+ break;
+ case PHY_DELETER_CODE_START_TS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pStartTs);
+ break;
+ case PHY_DELETER_CODE_END_TS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pEndTs);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { SUBPLAN_ID_CODE_QUERY_ID = 1, SUBPLAN_ID_CODE_GROUP_ID, SUBPLAN_ID_CODE_SUBPLAN_ID };
+
+static int32_t subplanIdInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSubplanId* pNode = (const SSubplanId*)pObj;
+
+ int32_t code = tlvEncodeValueU64(pEncoder, pNode->queryId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->groupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->subplanId);
+ }
+
+ return code;
+}
+
+static int32_t subplanIdToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSubplanId* pNode = (const SSubplanId*)pObj;
+
+ int32_t code = tlvEncodeU64(pEncoder, SUBPLAN_ID_CODE_QUERY_ID, pNode->queryId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, SUBPLAN_ID_CODE_GROUP_ID, pNode->groupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeI32(pEncoder, SUBPLAN_ID_CODE_SUBPLAN_ID, pNode->subplanId);
+ }
+
+ return code;
+}
+
+static int32_t msgToSubplanIdInline(STlvDecoder* pDecoder, void* pObj) {
+ SSubplanId* pNode = (SSubplanId*)pObj;
+
+ int32_t code = tlvDecodeValueU64(pDecoder, &pNode->queryId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->groupId);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->subplanId);
+ }
+
+ return code;
+}
+
+static int32_t msgToSubplanId(STlvDecoder* pDecoder, void* pObj) {
+ SSubplanId* pNode = (SSubplanId*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case SUBPLAN_ID_CODE_QUERY_ID:
+ code = tlvDecodeU64(pTlv, &pNode->queryId);
+ break;
+ case SUBPLAN_ID_CODE_GROUP_ID:
+ code = tlvDecodeI32(pTlv, &pNode->groupId);
+ break;
+ case SUBPLAN_ID_CODE_SUBPLAN_ID:
+ code = tlvDecodeI32(pTlv, &pNode->subplanId);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum {
+ SUBPLAN_CODE_INLINE_ATTRS = 1,
+ SUBPLAN_CODE_ROOT_NODE,
+ SUBPLAN_CODE_DATA_SINK,
+ SUBPLAN_CODE_TAG_COND,
+ SUBPLAN_CODE_TAG_INDEX_COND
+};
+
+static int32_t subplanInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSubplan* pNode = (const SSubplan*)pObj;
+
+ int32_t code = subplanIdInlineToMsg(&pNode->id, pEncoder);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueEnum(pEncoder, pNode->subplanType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->msgType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->level);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueCStr(pEncoder, pNode->dbFName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueCStr(pEncoder, pNode->user);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = queryNodeAddrInlineToMsg(&pNode->execNode, pEncoder);
+ }
+
+ return code;
+}
+
+static int32_t subplanToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SSubplan* pNode = (const SSubplan*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_INLINE_ATTRS, subplanInlineToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_ROOT_NODE, nodeToMsg, pNode->pNode);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_DATA_SINK, nodeToMsg, pNode->pDataSink);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_TAG_COND, nodeToMsg, pNode->pTagCond);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, SUBPLAN_CODE_TAG_INDEX_COND, nodeToMsg, pNode->pTagIndexCond);
+ }
+
+ return code;
+}
+
+static int32_t msgToSubplanInline(STlvDecoder* pDecoder, void* pObj) {
+ SSubplan* pNode = (SSubplan*)pObj;
+
+ int32_t code = msgToSubplanIdInline(pDecoder, &pNode->id);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueEnum(pDecoder, &pNode->subplanType, sizeof(pNode->subplanType));
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->msgType);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->level);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueCStr(pDecoder, pNode->dbFName);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueCStr(pDecoder, pNode->user);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = msgToQueryNodeAddrInline(pDecoder, &pNode->execNode);
+ }
+
+ return code;
+}
+
+static int32_t msgToSubplan(STlvDecoder* pDecoder, void* pObj) {
+ SSubplan* pNode = (SSubplan*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case SUBPLAN_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToSubplanInline, pNode);
+ break;
+ case SUBPLAN_CODE_ROOT_NODE:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pNode);
+ break;
+ case SUBPLAN_CODE_DATA_SINK:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pDataSink);
+ break;
+ case SUBPLAN_CODE_TAG_COND:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTagCond);
+ break;
+ case SUBPLAN_CODE_TAG_INDEX_COND:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pTagIndexCond);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+enum { QUERY_PLAN_CODE_INLINE_ATTRS = 1, QUERY_PLAN_CODE_SUBPLANS };
+
+static int32_t queryPlanInlineToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SQueryPlan* pNode = (const SQueryPlan*)pObj;
+
+ int32_t code = tlvEncodeValueU64(pEncoder, pNode->queryId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeValueI32(pEncoder, pNode->numOfSubplans);
+ }
+
+ return code;
+}
+
+static int32_t queryPlanToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SQueryPlan* pNode = (const SQueryPlan*)pObj;
+
+ int32_t code = tlvEncodeObj(pEncoder, QUERY_PLAN_CODE_INLINE_ATTRS, queryPlanInlineToMsg, pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, QUERY_PLAN_CODE_SUBPLANS, nodeListToMsg, pNode->pSubplans);
+ }
+
+ return code;
+}
+
+static int32_t msgToQueryPlanInline(STlvDecoder* pDecoder, void* pObj) {
+ SQueryPlan* pNode = (SQueryPlan*)pObj;
+
+ int32_t code = tlvDecodeValueU64(pDecoder, &pNode->queryId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvDecodeValueI32(pDecoder, &pNode->numOfSubplans);
+ }
+
+ return code;
+}
+
+static int32_t msgToQueryPlan(STlvDecoder* pDecoder, void* pObj) {
+ SQueryPlan* pNode = (SQueryPlan*)pObj;
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ STlv* pTlv = NULL;
+ tlvForEach(pDecoder, pTlv, code) {
+ switch (pTlv->type) {
+ case QUERY_PLAN_CODE_INLINE_ATTRS:
+ code = tlvDecodeObjFromTlv(pTlv, msgToQueryPlanInline, pNode);
+ break;
+ case QUERY_PLAN_CODE_SUBPLANS:
+ code = msgToNodeListFromTlv(pTlv, (void**)&pNode->pSubplans);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return code;
+}
+
+static int32_t specificNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ // nodesWarn("specificNodeToMsg node = %s, before tlv count = %d", nodesNodeName(nodeType(pObj)), pEncoder->tlvCount);
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (nodeType(pObj)) {
+ case QUERY_NODE_COLUMN:
+ code = columnNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_VALUE:
+ code = valueNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_OPERATOR:
+ code = operatorNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_LOGIC_CONDITION:
+ code = logicConditionNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_FUNCTION:
+ code = functionNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_ORDER_BY_EXPR:
+ code = orderByExprNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_LIMIT:
+ code = limitNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_NODE_LIST:
+ code = nodeListNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_TARGET:
+ code = targetNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_DATABLOCK_DESC:
+ code = dataBlockDescNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_SLOT_DESC:
+ code = slotDescNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_DOWNSTREAM_SOURCE:
+ code = downstreamSourceNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_LEFT_VALUE:
+ break;
+ case QUERY_NODE_WHEN_THEN:
+ code = whenThenNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_CASE_WHEN:
+ code = caseWhenNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
+ code = physiScanNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
+ code = physiLastRowScanNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
+ code = physiTableScanNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
+ code = physiSysTableScanNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
+ code = physiProjectNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
+ code = physiJoinNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
+ code = physiAggNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
+ code = physiExchangeNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE:
+ code = physiMergeNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_SORT:
+ case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
+ code = physiSortNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
+ code = physiIntervalNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
+ code = physiFillNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION:
+ code = physiSessionWindowNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
+ code = physiStateWindowNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ code = physiPartitionNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ code = physiStreamPartitionNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
+ code = physiIndefRowsFuncNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
+ code = physiInterpFuncNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
+ code = physiDispatchNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT:
+ code = physiQueryInsertNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_DELETE:
+ code = physiDeleteNodeToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_SUBPLAN:
+ code = subplanToMsg(pObj, pEncoder);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN:
+ code = queryPlanToMsg(pObj, pEncoder);
+ break;
+ default:
+ break;
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesError("specificNodeToMsg error node = %s", nodesNodeName(nodeType(pObj)));
+ }
+ // nodesWarn("specificNodeToMsg node = %s, after tlv count = %d", nodesNodeName(nodeType(pObj)), pEncoder->tlvCount);
+ return code;
+}
+
+static int32_t msgToSpecificNode(STlvDecoder* pDecoder, void* pObj) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ switch (nodeType(pObj)) {
+ case QUERY_NODE_COLUMN:
+ code = msgToColumnNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_VALUE:
+ code = msgToValueNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_OPERATOR:
+ code = msgToOperatorNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_LOGIC_CONDITION:
+ code = msgToLogicConditionNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_FUNCTION:
+ code = msgToFunctionNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_ORDER_BY_EXPR:
+ code = msgToOrderByExprNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_LIMIT:
+ code = msgToLimitNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_NODE_LIST:
+ code = msgToNodeListNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_TARGET:
+ code = msgToTargetNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_DATABLOCK_DESC:
+ code = msgToDataBlockDescNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_SLOT_DESC:
+ code = msgToSlotDescNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_DOWNSTREAM_SOURCE:
+ code = msgToDownstreamSourceNode(pDecoder, pObj);
+ case QUERY_NODE_LEFT_VALUE:
+ break;
+ case QUERY_NODE_WHEN_THEN:
+ code = msgToWhenThenNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_CASE_WHEN:
+ code = msgToCaseWhenNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_BLOCK_DIST_SCAN:
+ code = msgToPhysiScanNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN:
+ code = msgToPhysiLastRowScanNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN:
+ code = msgToPhysiTableScanNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:
+ code = msgToPhysiSysTableScanNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_PROJECT:
+ code = msgToPhysiProjectNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_JOIN:
+ code = msgToPhysiJoinNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG:
+ code = msgToPhysiAggNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:
+ code = msgToPhysiExchangeNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE:
+ code = msgToPhysiMergeNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_SORT:
+ case QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT:
+ code = msgToPhysiSortNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
+ code = msgToPhysiIntervalNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
+ code = msgToPhysiFillNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION:
+ code = msgToPhysiSessionWindowNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE:
+ code = msgToPhysiStateWindowNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ code = msgToPhysiPartitionNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ code = msgToPhysiStreamPartitionNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
+ code = msgToPhysiIndefRowsFuncNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
+ code = msgToPhysiInterpFuncNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_DISPATCH:
+ code = msgToPhysiDispatchNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_QUERY_INSERT:
+ code = msgToPhysiQueryInsertNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN_DELETE:
+ code = msgToPhysiDeleteNode(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_SUBPLAN:
+ code = msgToSubplan(pDecoder, pObj);
+ break;
+ case QUERY_NODE_PHYSICAL_PLAN:
+ code = msgToQueryPlan(pDecoder, pObj);
+ break;
+ default:
+ break;
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesError("msgToSpecificNode error node = %s", nodesNodeName(nodeType(pObj)));
+ }
+ return code;
+}
+
+static int32_t nodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ return tlvEncodeObj(pEncoder, nodeType(pObj), specificNodeToMsg, pObj);
+}
+
+static int32_t msgToNode(STlvDecoder* pDecoder, void** pObj) {
+ return tlvDecodeDynObj(pDecoder, (FMakeObject)nodesMakeNode, msgToSpecificNode, pObj);
+}
+
+static int32_t msgToNodeFromTlv(STlv* pTlv, void** pObj) {
+ STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
+ return msgToNode(&decoder, pObj);
+}
+
+static int32_t nodeListToMsg(const void* pObj, STlvEncoder* pEncoder) {
+ const SNodeList* pList = (const SNodeList*)pObj;
+
+ SNode* pNode = NULL;
+ FOREACH(pNode, pList) {
+ int32_t code = nodeToMsg(pNode, pEncoder);
+ if (TSDB_CODE_SUCCESS != code) {
+ return code;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t msgToNodeList(STlvDecoder* pDecoder, void** pObj) {
+ SNodeList* pList = nodesMakeList();
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ while (TSDB_CODE_SUCCESS == code && !tlvDecodeEnd(pDecoder)) {
+ SNode* pNode = NULL;
+ code = msgToNode(pDecoder, (void**)&pNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListAppend(pList, pNode);
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ *pObj = pList;
+ } else {
+ nodesDestroyList(pList);
+ }
+ return code;
+}
+
+static int32_t msgToNodeListFromTlv(STlv* pTlv, void** pObj) {
+ STlvDecoder decoder = {.bufSize = pTlv->len, .offset = 0, .pBuf = pTlv->value};
+ return msgToNodeList(&decoder, pObj);
+}
+
+int32_t nodesNodeToMsg(const SNode* pNode, char** pMsg, int32_t* pLen) {
+ if (NULL == pNode || NULL == pMsg || NULL == pLen) {
+ terrno = TSDB_CODE_FAILED;
+ return TSDB_CODE_FAILED;
+ }
+
+ STlvEncoder encoder;
+ int32_t code = initTlvEncoder(&encoder);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodeToMsg(pNode, &encoder);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ endTlvEncode(&encoder, pMsg, pLen);
+ }
+ clearTlvEncoder(&encoder);
+
+ terrno = code;
+ return code;
+}
+
+int32_t nodesMsgToNode(const char* pMsg, int32_t len, SNode** pNode) {
+ if (NULL == pMsg || NULL == pNode) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ STlvDecoder decoder = {.bufSize = len, .offset = 0, .pBuf = pMsg};
+ int32_t code = msgToNode(&decoder, (void**)pNode);
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesDestroyNode(*pNode);
+ *pNode = NULL;
+ }
+
+ terrno = code;
+ return code;
+}
diff --git a/source/libs/nodes/src/nodesTraverseFuncs.c b/source/libs/nodes/src/nodesTraverseFuncs.c
index 2e23998aad9463fb7a4a9b6834ceab2f7ea51e55..9f851f6a3f763d8d3d87797ffb6826e908ddba4a 100644
--- a/source/libs/nodes/src/nodesTraverseFuncs.c
+++ b/source/libs/nodes/src/nodesTraverseFuncs.c
@@ -146,6 +146,25 @@ static EDealRes dispatchExpr(SNode* pNode, ETraversalOrder order, FNodeWalker wa
case QUERY_NODE_TARGET:
res = walkExpr(((STargetNode*)pNode)->pExpr, order, walker, pContext);
break;
+ case QUERY_NODE_WHEN_THEN: {
+ SWhenThenNode* pWhenThen = (SWhenThenNode*)pNode;
+ res = walkExpr(pWhenThen->pWhen, order, walker, pContext);
+ if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
+ res = walkExpr(pWhenThen->pThen, order, walker, pContext);
+ }
+ break;
+ }
+ case QUERY_NODE_CASE_WHEN: {
+ SCaseWhenNode* pCaseWhen = (SCaseWhenNode*)pNode;
+ res = walkExpr(pCaseWhen->pCase, order, walker, pContext);
+ if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
+ res = walkExpr(pCaseWhen->pElse, order, walker, pContext);
+ }
+ if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
+ res = walkExprs(pCaseWhen->pWhenThenList, order, walker, pContext);
+ }
+ break;
+ }
default:
break;
}
@@ -291,6 +310,25 @@ static EDealRes rewriteExpr(SNode** pRawNode, ETraversalOrder order, FNodeRewrit
case QUERY_NODE_TARGET:
res = rewriteExpr(&(((STargetNode*)pNode)->pExpr), order, rewriter, pContext);
break;
+ case QUERY_NODE_WHEN_THEN: {
+ SWhenThenNode* pWhenThen = (SWhenThenNode*)pNode;
+ res = rewriteExpr(&pWhenThen->pWhen, order, rewriter, pContext);
+ if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
+ res = rewriteExpr(&pWhenThen->pThen, order, rewriter, pContext);
+ }
+ break;
+ }
+ case QUERY_NODE_CASE_WHEN: {
+ SCaseWhenNode* pCaseWhen = (SCaseWhenNode*)pNode;
+ res = rewriteExpr(&pCaseWhen->pCase, order, rewriter, pContext);
+ if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
+ res = rewriteExpr(&pCaseWhen->pElse, order, rewriter, pContext);
+ }
+ if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
+ res = rewriteExprs(pCaseWhen->pWhenThenList, order, rewriter, pContext);
+ }
+ break;
+ }
default:
break;
}
@@ -340,6 +378,8 @@ void nodesWalkSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeWalker wa
nodesWalkExpr(pSelect->pWhere, walker, pContext);
case SQL_CLAUSE_WHERE:
nodesWalkExprs(pSelect->pPartitionByList, walker, pContext);
+ nodesWalkExprs(pSelect->pTags, walker, pContext);
+ nodesWalkExpr(pSelect->pSubtable, walker, pContext);
case SQL_CLAUSE_PARTITION_BY:
nodesWalkExpr(pSelect->pWindow, walker, pContext);
case SQL_CLAUSE_WINDOW:
@@ -374,6 +414,8 @@ void nodesRewriteSelectStmt(SSelectStmt* pSelect, ESqlClause clause, FNodeRewrit
nodesRewriteExpr(&(pSelect->pWhere), rewriter, pContext);
case SQL_CLAUSE_WHERE:
nodesRewriteExprs(pSelect->pPartitionByList, rewriter, pContext);
+ nodesRewriteExprs(pSelect->pTags, rewriter, pContext);
+ nodesRewriteExpr(&(pSelect->pSubtable), rewriter, pContext);
case SQL_CLAUSE_PARTITION_BY:
nodesRewriteExpr(&(pSelect->pWindow), rewriter, pContext);
case SQL_CLAUSE_WINDOW:
@@ -537,7 +579,8 @@ static EDealRes dispatchPhysiPlan(SNode* pNode, ETraversalOrder order, FNodeWalk
}
break;
}
- case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: {
SPartitionPhysiNode* pPart = (SPartitionPhysiNode*)pNode;
res = walkPhysiNode((SPhysiNode*)pNode, order, walker, pContext);
if (DEAL_RES_ERROR != res && DEAL_RES_END != res) {
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index d13057a93e824c2b94d94a006664b4cbc4c2f870..f8dda501e9acc5ad77492766e841f0e20b2c3ebf 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -21,9 +21,209 @@
#include "taoserror.h"
#include "tdatablock.h"
#include "thash.h"
+#include "tref.h"
+
+typedef struct SNodeMemChunk {
+ int32_t availableSize;
+ int32_t usedSize;
+ char* pBuf;
+ struct SNodeMemChunk* pNext;
+} SNodeMemChunk;
+
+typedef struct SNodeAllocator {
+ int64_t self;
+ int64_t queryId;
+ int32_t chunkSize;
+ int32_t chunkNum;
+ SNodeMemChunk* pCurrChunk;
+ SNodeMemChunk* pChunks;
+ TdThreadMutex mutex;
+} SNodeAllocator;
+
+static threadlocal SNodeAllocator* g_pNodeAllocator;
+static int32_t g_allocatorReqRefPool = -1;
+
+static SNodeMemChunk* callocNodeChunk(SNodeAllocator* pAllocator) {
+ SNodeMemChunk* pNewChunk = taosMemoryCalloc(1, sizeof(SNodeMemChunk) + pAllocator->chunkSize);
+ if (NULL == pNewChunk) {
+ return NULL;
+ }
+ pNewChunk->pBuf = (char*)(pNewChunk + 1);
+ pNewChunk->availableSize = pAllocator->chunkSize;
+ pNewChunk->usedSize = 0;
+ pNewChunk->pNext = NULL;
+ if (NULL != pAllocator->pCurrChunk) {
+ pAllocator->pCurrChunk->pNext = pNewChunk;
+ }
+ pAllocator->pCurrChunk = pNewChunk;
+ if (NULL == pAllocator->pChunks) {
+ pAllocator->pChunks = pNewChunk;
+ }
+ ++(pAllocator->chunkNum);
+ return pNewChunk;
+}
+
+static void* nodesCallocImpl(int32_t size) {
+ if (NULL == g_pNodeAllocator) {
+ return taosMemoryCalloc(1, size);
+ }
+
+ if (g_pNodeAllocator->pCurrChunk->usedSize + size > g_pNodeAllocator->pCurrChunk->availableSize) {
+ if (NULL == callocNodeChunk(g_pNodeAllocator)) {
+ return NULL;
+ }
+ }
+ void* p = g_pNodeAllocator->pCurrChunk->pBuf + g_pNodeAllocator->pCurrChunk->usedSize;
+ g_pNodeAllocator->pCurrChunk->usedSize += size;
+ return p;
+}
-static SNode* makeNode(ENodeType type, size_t size) {
- SNode* p = taosMemoryCalloc(1, size);
+static void* nodesCalloc(int32_t num, int32_t size) {
+ void* p = nodesCallocImpl(num * size + 1);
+ if (NULL == p) {
+ return NULL;
+ }
+ *(char*)p = (NULL != g_pNodeAllocator) ? 1 : 0;
+ return (char*)p + 1;
+}
+
+static void nodesFree(void* p) {
+ char* ptr = (char*)p - 1;
+ if (0 == *ptr) {
+ taosMemoryFree(ptr);
+ }
+ return;
+}
+
+static int32_t createNodeAllocator(int32_t chunkSize, SNodeAllocator** pAllocator) {
+ *pAllocator = taosMemoryCalloc(1, sizeof(SNodeAllocator));
+ if (NULL == *pAllocator) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ (*pAllocator)->chunkSize = chunkSize;
+ if (NULL == callocNodeChunk(*pAllocator)) {
+ taosMemoryFreeClear(*pAllocator);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ taosThreadMutexInit(&(*pAllocator)->mutex, NULL);
+ return TSDB_CODE_SUCCESS;
+}
+
+static void destroyNodeAllocator(void* p) {
+ if (NULL == p) {
+ return;
+ }
+
+ SNodeAllocator* pAllocator = p;
+
+ nodesDebug("query id %" PRIx64 " allocator id %" PRIx64 " alloc chunkNum: %d, chunkTotakSize: %d",
+ pAllocator->queryId, pAllocator->self, pAllocator->chunkNum, pAllocator->chunkNum * pAllocator->chunkSize);
+
+ SNodeMemChunk* pChunk = pAllocator->pChunks;
+ while (NULL != pChunk) {
+ SNodeMemChunk* pTemp = pChunk->pNext;
+ taosMemoryFree(pChunk);
+ pChunk = pTemp;
+ }
+ taosThreadMutexDestroy(&pAllocator->mutex);
+ taosMemoryFree(pAllocator);
+}
+
+int32_t nodesInitAllocatorSet() {
+ if (g_allocatorReqRefPool >= 0) {
+ nodesWarn("nodes already initialized");
+ return TSDB_CODE_SUCCESS;
+ }
+
+ g_allocatorReqRefPool = taosOpenRef(1024, destroyNodeAllocator);
+ if (g_allocatorReqRefPool < 0) {
+ nodesError("init nodes failed");
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+void nodesDestroyAllocatorSet() {
+ if (g_allocatorReqRefPool >= 0) {
+ SNodeAllocator* pAllocator = taosIterateRef(g_allocatorReqRefPool, 0);
+ int64_t refId = 0;
+ while (NULL != pAllocator) {
+ refId = pAllocator->self;
+ taosRemoveRef(g_allocatorReqRefPool, refId);
+ pAllocator = taosIterateRef(g_allocatorReqRefPool, refId);
+ }
+ taosCloseRef(g_allocatorReqRefPool);
+ }
+}
+
+int32_t nodesCreateAllocator(int64_t queryId, int32_t chunkSize, int64_t* pAllocatorId) {
+ SNodeAllocator* pAllocator = NULL;
+ int32_t code = createNodeAllocator(chunkSize, &pAllocator);
+ if (TSDB_CODE_SUCCESS == code) {
+ pAllocator->self = taosAddRef(g_allocatorReqRefPool, pAllocator);
+ if (pAllocator->self <= 0) {
+ return terrno;
+ }
+ pAllocator->queryId = queryId;
+ *pAllocatorId = pAllocator->self;
+ }
+ return code;
+}
+
+int32_t nodesAcquireAllocator(int64_t allocatorId) {
+ if (allocatorId <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SNodeAllocator* pAllocator = taosAcquireRef(g_allocatorReqRefPool, allocatorId);
+ if (NULL == pAllocator) {
+ return terrno;
+ }
+ taosThreadMutexLock(&pAllocator->mutex);
+ g_pNodeAllocator = pAllocator;
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t nodesReleaseAllocator(int64_t allocatorId) {
+ if (allocatorId <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (NULL == g_pNodeAllocator) {
+ nodesError("allocator id %" PRIx64
+ " release failed: The nodesReleaseAllocator function needs to be called after the nodesAcquireAllocator "
+ "function is called!",
+ allocatorId);
+ return TSDB_CODE_FAILED;
+ }
+ SNodeAllocator* pAllocator = g_pNodeAllocator;
+ g_pNodeAllocator = NULL;
+ taosThreadMutexUnlock(&pAllocator->mutex);
+ return taosReleaseRef(g_allocatorReqRefPool, allocatorId);
+}
+
+int64_t nodesMakeAllocatorWeakRef(int64_t allocatorId) {
+ if (allocatorId <= 0) {
+ return 0;
+ }
+
+ SNodeAllocator* pAllocator = taosAcquireRef(g_allocatorReqRefPool, allocatorId);
+ return pAllocator->self;
+}
+
+int64_t nodesReleaseAllocatorWeakRef(int64_t allocatorId) { return taosReleaseRef(g_allocatorReqRefPool, allocatorId); }
+
+void nodesDestroyAllocator(int64_t allocatorId) {
+ if (allocatorId <= 0) {
+ return;
+ }
+
+ taosRemoveRef(g_allocatorReqRefPool, allocatorId);
+}
+
+static SNode* makeNode(ENodeType type, int32_t size) {
+ SNode* p = nodesCalloc(1, size);
if (NULL == p) {
return NULL;
}
@@ -91,6 +291,10 @@ SNode* nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SLeftValueNode));
case QUERY_NODE_COLUMN_REF:
return makeNode(type, sizeof(SColumnDefNode));
+ case QUERY_NODE_WHEN_THEN:
+ return makeNode(type, sizeof(SWhenThenNode));
+ case QUERY_NODE_CASE_WHEN:
+ return makeNode(type, sizeof(SCaseWhenNode));
case QUERY_NODE_SET_OPERATOR:
return makeNode(type, sizeof(SSetOperator));
case QUERY_NODE_SELECT_STMT:
@@ -307,6 +511,7 @@ SNode* nodesMakeNode(ENodeType type) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
return makeNode(type, sizeof(SStreamSemiIntervalPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL:
return makeNode(type, sizeof(SFillPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION:
return makeNode(type, sizeof(SSessionWinodwPhysiNode));
@@ -322,6 +527,8 @@ SNode* nodesMakeNode(ENodeType type) {
return makeNode(type, sizeof(SStreamStateWinodwPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
return makeNode(type, sizeof(SPartitionPhysiNode));
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION:
+ return makeNode(type, sizeof(SStreamPartitionPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_INDEF_ROWS_FUNC:
return makeNode(type, sizeof(SIndefRowsFuncPhysiNode));
case QUERY_NODE_PHYSICAL_PLAN_INTERP_FUNC:
@@ -536,7 +743,21 @@ void nodesDestroyNode(SNode* pNode) {
break;
}
case QUERY_NODE_LEFT_VALUE: // no pointer field
+ case QUERY_NODE_COLUMN_REF: // no pointer field
+ break;
+ case QUERY_NODE_WHEN_THEN: {
+ SWhenThenNode* pStmt = (SWhenThenNode*)pNode;
+ nodesDestroyNode(pStmt->pWhen);
+ nodesDestroyNode(pStmt->pThen);
+ break;
+ }
+ case QUERY_NODE_CASE_WHEN: {
+ SCaseWhenNode* pStmt = (SCaseWhenNode*)pNode;
+ nodesDestroyNode(pStmt->pCase);
+ nodesDestroyNode(pStmt->pElse);
+ nodesDestroyList(pStmt->pWhenThenList);
break;
+ }
case QUERY_NODE_SET_OPERATOR: {
SSetOperator* pStmt = (SSetOperator*)pNode;
nodesDestroyList(pStmt->pProjectionList);
@@ -552,6 +773,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pStmt->pFromTable);
nodesDestroyNode(pStmt->pWhere);
nodesDestroyList(pStmt->pPartitionByList);
+ nodesDestroyList(pStmt->pTags);
+ nodesDestroyNode(pStmt->pSubtable);
nodesDestroyNode(pStmt->pWindow);
nodesDestroyList(pStmt->pGroupByList);
nodesDestroyNode(pStmt->pHaving);
@@ -725,6 +948,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pStmt->pFromTable);
nodesDestroyNode(pStmt->pWhere);
nodesDestroyNode(pStmt->pCountFunc);
+ nodesDestroyNode(pStmt->pFirstFunc);
+ nodesDestroyNode(pStmt->pLastFunc);
nodesDestroyNode(pStmt->pTagCond);
break;
}
@@ -789,6 +1014,8 @@ void nodesDestroyNode(SNode* pNode) {
destroyVgDataBlockArray(pLogicNode->pDataBlocks);
// pVgDataBlocks is weak reference
nodesDestroyNode(pLogicNode->pAffectedRows);
+ nodesDestroyNode(pLogicNode->pStartTs);
+ nodesDestroyNode(pLogicNode->pEndTs);
taosMemoryFreeClear(pLogicNode->pVgroupList);
nodesDestroyList(pLogicNode->pInsertCols);
break;
@@ -818,6 +1045,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pLogicNode->pWStartTs);
nodesDestroyNode(pLogicNode->pValues);
nodesDestroyList(pLogicNode->pFillExprs);
+ nodesDestroyList(pLogicNode->pNotFillExprs);
break;
}
case QUERY_NODE_LOGIC_PLAN_SORT: {
@@ -929,7 +1157,8 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
break;
- case QUERY_NODE_PHYSICAL_PLAN_FILL: {
+ case QUERY_NODE_PHYSICAL_PLAN_FILL:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL: {
SFillPhysiNode* pPhyNode = (SFillPhysiNode*)pNode;
destroyPhysiNode((SPhysiNode*)pPhyNode);
nodesDestroyList(pPhyNode->pFillExprs);
@@ -951,7 +1180,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pPhyNode->pStateKey);
break;
}
- case QUERY_NODE_PHYSICAL_PLAN_PARTITION: {
+ case QUERY_NODE_PHYSICAL_PLAN_PARTITION:
+ case QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION: {
SPartitionPhysiNode* pPhyNode = (SPartitionPhysiNode*)pNode;
destroyPhysiNode((SPhysiNode*)pPhyNode);
nodesDestroyList(pPhyNode->pExprs);
@@ -994,6 +1224,8 @@ void nodesDestroyNode(SNode* pNode) {
SDataDeleterNode* pSink = (SDataDeleterNode*)pNode;
destroyDataSinkNode((SDataSinkNode*)pSink);
nodesDestroyNode(pSink->pAffectedRows);
+ nodesDestroyNode(pSink->pStartTs);
+ nodesDestroyNode(pSink->pEndTs);
break;
}
case QUERY_NODE_PHYSICAL_SUBPLAN: {
@@ -1012,12 +1244,12 @@ void nodesDestroyNode(SNode* pNode) {
default:
break;
}
- taosMemoryFreeClear(pNode);
+ nodesFree(pNode);
return;
}
SNodeList* nodesMakeList() {
- SNodeList* p = taosMemoryCalloc(1, sizeof(SNodeList));
+ SNodeList* p = nodesCalloc(1, sizeof(SNodeList));
if (NULL == p) {
return NULL;
}
@@ -1028,7 +1260,7 @@ int32_t nodesListAppend(SNodeList* pList, SNode* pNode) {
if (NULL == pList || NULL == pNode) {
return TSDB_CODE_FAILED;
}
- SListCell* p = taosMemoryCalloc(1, sizeof(SListCell));
+ SListCell* p = nodesCalloc(1, sizeof(SListCell));
if (NULL == p) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_OUT_OF_MEMORY;
@@ -1095,7 +1327,7 @@ int32_t nodesListAppendList(SNodeList* pTarget, SNodeList* pSrc) {
}
pTarget->pTail = pSrc->pTail;
pTarget->length += pSrc->length;
- taosMemoryFreeClear(pSrc);
+ nodesFree(pSrc);
return TSDB_CODE_SUCCESS;
}
@@ -1115,7 +1347,7 @@ int32_t nodesListPushFront(SNodeList* pList, SNode* pNode) {
if (NULL == pList || NULL == pNode) {
return TSDB_CODE_FAILED;
}
- SListCell* p = taosMemoryCalloc(1, sizeof(SListCell));
+ SListCell* p = nodesCalloc(1, sizeof(SListCell));
if (NULL == p) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return TSDB_CODE_OUT_OF_MEMORY;
@@ -1143,7 +1375,7 @@ SListCell* nodesListErase(SNodeList* pList, SListCell* pCell) {
}
SListCell* pNext = pCell->pNext;
nodesDestroyNode(pCell->pNode);
- taosMemoryFreeClear(pCell);
+ nodesFree(pCell);
--(pList->length);
return pNext;
}
@@ -1163,7 +1395,7 @@ void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* pSrc) {
pPos->pPrev = pSrc->pTail;
pTarget->length += pSrc->length;
- taosMemoryFreeClear(pSrc);
+ nodesFree(pSrc);
}
SNode* nodesListGetNode(SNodeList* pList, int32_t index) {
@@ -1195,7 +1427,7 @@ void nodesDestroyList(SNodeList* pList) {
while (NULL != pNext) {
pNext = nodesListErase(pList, pNext);
}
- taosMemoryFreeClear(pList);
+ nodesFree(pList);
}
void nodesClearList(SNodeList* pList) {
@@ -1207,9 +1439,9 @@ void nodesClearList(SNodeList* pList) {
while (NULL != pNext) {
SListCell* tmp = pNext;
pNext = pNext->pNext;
- taosMemoryFreeClear(tmp);
+ nodesFree(tmp);
}
- taosMemoryFreeClear(pList);
+ nodesFree(pList);
}
void* nodesGetValueFromNode(SValueNode* pNode) {
diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h
index 2a4f4c194d3e46c9ff4d052187431287dce87b3e..1a955b3f6b04aa4c14e85697706940d2de9bd001 100644
--- a/source/libs/parser/inc/parAst.h
+++ b/source/libs/parser/inc/parAst.h
@@ -48,6 +48,7 @@ typedef enum EDatabaseOptionType {
DB_OPTION_KEEP,
DB_OPTION_PAGES,
DB_OPTION_PAGESIZE,
+ DB_OPTION_TSDB_PAGESIZE,
DB_OPTION_PRECISION,
DB_OPTION_REPLICA,
DB_OPTION_STRICT,
@@ -59,7 +60,10 @@ typedef enum EDatabaseOptionType {
DB_OPTION_WAL_RETENTION_PERIOD,
DB_OPTION_WAL_RETENTION_SIZE,
DB_OPTION_WAL_ROLL_PERIOD,
- DB_OPTION_WAL_SEGMENT_SIZE
+ DB_OPTION_WAL_SEGMENT_SIZE,
+ DB_OPTION_STT_TRIGGER,
+ DB_OPTION_TABLE_PREFIX,
+ DB_OPTION_TABLE_SUFFIX
} EDatabaseOptionType;
typedef enum ETableOptionType {
@@ -115,6 +119,8 @@ SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues);
SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode);
SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd);
+SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen);
+SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse);
SNode* addWhereClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pWhere);
SNode* addPartitionByClause(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pPartitionByList);
@@ -141,7 +147,7 @@ SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, STok
SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pDbName);
SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pOptions);
SNode* createFlushDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
-SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
+SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, int32_t maxSpeed);
SNode* createDefaultTableOptions(SAstCreateContext* pCxt);
SNode* createAlterTableOptions(SAstCreateContext* pCxt);
SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType type, void* pVal);
@@ -170,6 +176,7 @@ SNode* createShowCreateDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
SNode* createShowCreateTableStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pRealTable);
SNode* createShowTableDistributedStmt(SAstCreateContext* pCxt, SNode* pRealTable);
SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId);
+SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint);
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo);
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, const SToken* pVal);
SNode* createDropUserStmt(SAstCreateContext* pCxt, SToken* pUserName);
@@ -205,7 +212,7 @@ SNode* createCreateFunctionStmt(SAstCreateContext* pCxt, bool ignoreExists, bool
SNode* createDropFunctionStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pFuncName);
SNode* createStreamOptions(SAstCreateContext* pCxt);
SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pStreamName, SNode* pRealTable,
- SNode* pOptions, SNode* pQuery);
+ SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery);
SNode* createDropStreamStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pStreamName);
SNode* createKillStmt(SAstCreateContext* pCxt, ENodeType type, const SToken* pId);
SNode* createKillQueryStmt(SAstCreateContext* pCxt, const SToken* pQueryId);
diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y
index 9bff061d02fbfa8d5795dff82c9ec93b7093f96d..225f16928980ac9ffe55b688923b93980334ae92 100644
--- a/source/libs/parser/inc/sql.y
+++ b/source/libs/parser/inc/sql.y
@@ -159,7 +159,7 @@ cmd ::= DROP DATABASE exists_opt(A) db_name(B).
cmd ::= USE db_name(A). { pCxt->pRootNode = createUseDatabaseStmt(pCxt, &A); }
cmd ::= ALTER DATABASE db_name(A) alter_db_options(B). { pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &A, B); }
cmd ::= FLUSH DATABASE db_name(A). { pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &A); }
-cmd ::= TRIM DATABASE db_name(A). { pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &A); }
+cmd ::= TRIM DATABASE db_name(A) speed_opt(B). { pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &A, B); }
%type not_exists_opt { bool }
%destructor not_exists_opt { }
@@ -184,6 +184,7 @@ db_options(A) ::= db_options(B) KEEP integer_list(C).
db_options(A) ::= db_options(B) KEEP variable_list(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_KEEP, C); }
db_options(A) ::= db_options(B) PAGES NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PAGES, &C); }
db_options(A) ::= db_options(B) PAGESIZE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PAGESIZE, &C); }
+db_options(A) ::= db_options(B) TSDB_PAGESIZE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TSDB_PAGESIZE, &C); }
db_options(A) ::= db_options(B) PRECISION NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PRECISION, &C); }
db_options(A) ::= db_options(B) REPLICA NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_REPLICA, &C); }
db_options(A) ::= db_options(B) STRICT NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STRICT, &C); }
@@ -207,6 +208,9 @@ db_options(A) ::= db_options(B) WAL_RETENTION_SIZE NK_MINUS(D) NK_INTEGER(C).
}
db_options(A) ::= db_options(B) WAL_ROLL_PERIOD NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL_ROLL_PERIOD, &C); }
db_options(A) ::= db_options(B) WAL_SEGMENT_SIZE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_WAL_SEGMENT_SIZE, &C); }
+db_options(A) ::= db_options(B) STT_TRIGGER NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STT_TRIGGER, &C); }
+db_options(A) ::= db_options(B) TABLE_PREFIX NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TABLE_PREFIX, &C); }
+db_options(A) ::= db_options(B) TABLE_SUFFIX NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TABLE_SUFFIX, &C); }
alter_db_options(A) ::= alter_db_option(B). { A = createAlterDatabaseOptions(pCxt); A = setAlterDatabaseOption(pCxt, A, &B); }
alter_db_options(A) ::= alter_db_options(B) alter_db_option(C). { A = setAlterDatabaseOption(pCxt, B, &C); }
@@ -223,6 +227,7 @@ alter_db_option(A) ::= KEEP variable_list(B).
//alter_db_option(A) ::= REPLICA NK_INTEGER(B). { A.type = DB_OPTION_REPLICA; A.val = B; }
//alter_db_option(A) ::= STRICT NK_STRING(B). { A.type = DB_OPTION_STRICT; A.val = B; }
alter_db_option(A) ::= WAL_LEVEL NK_INTEGER(B). { A.type = DB_OPTION_WAL; A.val = B; }
+alter_db_option(A) ::= STT_TRIGGER NK_INTEGER(B). { A.type = DB_OPTION_STT_TRIGGER; A.val = B; }
%type integer_list { SNodeList* }
%destructor integer_list { nodesDestroyList($$); }
@@ -241,6 +246,11 @@ retention_list(A) ::= retention_list(B) NK_COMMA retention(C).
retention(A) ::= NK_VARIABLE(B) NK_COLON NK_VARIABLE(C). { A = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &B), createDurationValueNode(pCxt, &C)); }
+%type speed_opt { int32_t }
+%destructor speed_opt { }
+speed_opt(A) ::= . { A = 0; }
+speed_opt(A) ::= MAX_SPEED NK_INTEGER(B). { A = taosStr2Int32(B.z, NULL, 10); }
+
/************************************************ create/drop table/stable ********************************************/
cmd ::= CREATE TABLE not_exists_opt(A) full_table_name(B)
NK_LP column_def_list(C) NK_RP tags_def_opt(D) table_options(E). { pCxt->pRootNode = createCreateTableStmt(pCxt, A, B, C, D, E); }
@@ -410,6 +420,8 @@ cmd ::= SHOW TABLE DISTRIBUTED full_table_name(A).
cmd ::= SHOW CONSUMERS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); }
cmd ::= SHOW SUBSCRIPTIONS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); }
cmd ::= SHOW TAGS FROM table_name_cond(A) from_db_opt(B). { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, B, A, OP_TYPE_EQUAL); }
+cmd ::= SHOW VNODES NK_INTEGER(A). { pCxt->pRootNode = createShowVnodesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &A), NULL); }
+cmd ::= SHOW VNODES NK_STRING(A). { pCxt->pRootNode = createShowVnodesStmt(pCxt, NULL, createValueNode(pCxt, TSDB_DATA_TYPE_VARCHAR, &A)); }
db_name_cond_opt(A) ::= . { A = createDefaultDatabaseCondValue(pCxt); }
db_name_cond_opt(A) ::= db_name(B) NK_DOT. { A = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &B); }
@@ -445,7 +457,7 @@ sma_stream_opt(A) ::= stream_options(B) WATERMARK duration_literal(C).
sma_stream_opt(A) ::= stream_options(B) MAX_DELAY duration_literal(C). { ((SStreamOptions*)B)->pDelay = releaseRawExprNode(pCxt, C); A = B; }
/************************************************ create/drop topic ***************************************************/
-cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS query_expression(C). { pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, A, &B, C); }
+cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS query_or_subquery(C). { pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, A, &B, C); }
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS DATABASE db_name(C). { pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, A, &B, &C, false); }
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B)
WITH META AS DATABASE db_name(C). { pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, A, &B, &C, true); }
@@ -464,7 +476,7 @@ cmd ::= DESCRIBE full_table_name(A).
cmd ::= RESET QUERY CACHE. { pCxt->pRootNode = createResetQueryCacheStmt(pCxt); }
/************************************************ explain *************************************************************/
-cmd ::= EXPLAIN analyze_opt(A) explain_options(B) query_expression(C). { pCxt->pRootNode = createExplainStmt(pCxt, A, B, C); }
+cmd ::= EXPLAIN analyze_opt(A) explain_options(B) query_or_subquery(C). { pCxt->pRootNode = createExplainStmt(pCxt, A, B, C); }
%type analyze_opt { bool }
%destructor analyze_opt { }
@@ -494,8 +506,8 @@ bufsize_opt(A) ::= .
bufsize_opt(A) ::= BUFSIZE NK_INTEGER(B). { A = taosStr2Int32(B.z, NULL, 10); }
/************************************************ create/drop stream **************************************************/
-cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A)
- stream_options(B) INTO full_table_name(C) AS query_expression(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, D); }
+cmd ::= CREATE STREAM not_exists_opt(E) stream_name(A) stream_options(B) INTO
+ full_table_name(C) tags_def_opt(F) subtable_opt(G) AS query_or_subquery(D). { pCxt->pRootNode = createCreateStreamStmt(pCxt, E, &A, C, B, F, G, D); }
cmd ::= DROP STREAM exists_opt(A) stream_name(B). { pCxt->pRootNode = createDropStreamStmt(pCxt, A, &B); }
stream_options(A) ::= . { A = createStreamOptions(pCxt); }
@@ -505,6 +517,9 @@ stream_options(A) ::= stream_options(B) TRIGGER MAX_DELAY duration_literal(C).
stream_options(A) ::= stream_options(B) WATERMARK duration_literal(C). { ((SStreamOptions*)B)->pWatermark = releaseRawExprNode(pCxt, C); A = B; }
stream_options(A) ::= stream_options(B) IGNORE EXPIRED NK_INTEGER(C). { ((SStreamOptions*)B)->ignoreExpired = taosStr2Int8(C.z, NULL, 10); A = B; }
+subtable_opt(A) ::= . { A = NULL; }
+subtable_opt(A) ::= SUBTABLE NK_LP expression(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
+
/************************************************ kill connection/query ***********************************************/
cmd ::= KILL CONNECTION NK_INTEGER(A). { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &A); }
cmd ::= KILL QUERY NK_STRING(A). { pCxt->pRootNode = createKillQueryStmt(pCxt, &A); }
@@ -528,12 +543,12 @@ dnode_list(A) ::= dnode_list(B) DNODE NK_INTEGER(C).
cmd ::= DELETE FROM full_table_name(A) where_clause_opt(B). { pCxt->pRootNode = createDeleteStmt(pCxt, A, B); }
/************************************************ select **************************************************************/
-cmd ::= query_expression(A). { pCxt->pRootNode = A; }
+cmd ::= query_or_subquery(A). { pCxt->pRootNode = A; }
/************************************************ insert **************************************************************/
cmd ::= INSERT INTO full_table_name(A)
- NK_LP col_name_list(B) NK_RP query_expression(C). { pCxt->pRootNode = createInsertStmt(pCxt, A, B, C); }
-cmd ::= INSERT INTO full_table_name(A) query_expression(B). { pCxt->pRootNode = createInsertStmt(pCxt, A, NULL, B); }
+ NK_LP col_name_list(B) NK_RP query_or_subquery(C). { pCxt->pRootNode = createInsertStmt(pCxt, A, B, C); }
+cmd ::= INSERT INTO full_table_name(A) query_or_subquery(B). { pCxt->pRootNode = createInsertStmt(pCxt, A, NULL, B); }
/************************************************ literal *************************************************************/
literal(A) ::= NK_INTEGER(B). { A = createRawExprNode(pCxt, &B, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &B)); }
@@ -618,42 +633,44 @@ stream_name(A) ::= NK_ID(B).
cgroup_name(A) ::= NK_ID(B). { A = B; }
/************************************************ expression **********************************************************/
+expr_or_subquery(A) ::= expression(B). { A = B; }
+expr_or_subquery(A) ::= subquery(B). { A = B; }
+
expression(A) ::= literal(B). { A = B; }
expression(A) ::= pseudo_column(B). { A = B; }
expression(A) ::= column_reference(B). { A = B; }
expression(A) ::= function_expression(B). { A = B; }
-//expression(A) ::= case_expression(B). { A = B; }
-expression(A) ::= subquery(B). { A = B; }
+expression(A) ::= case_when_expression(B). { A = B; }
expression(A) ::= NK_LP(B) expression(C) NK_RP(D). { A = createRawExprNodeExt(pCxt, &B, &D, releaseRawExprNode(pCxt, C)); }
-expression(A) ::= NK_PLUS(B) expression(C). {
+expression(A) ::= NK_PLUS(B) expr_or_subquery(C). {
SToken t = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &B, &t, releaseRawExprNode(pCxt, C));
}
-expression(A) ::= NK_MINUS(B) expression(C). {
+expression(A) ::= NK_MINUS(B) expr_or_subquery(C). {
SToken t = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &B, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, C), NULL));
}
-expression(A) ::= expression(B) NK_PLUS expression(C). {
+expression(A) ::= expr_or_subquery(B) NK_PLUS expr_or_subquery(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)));
}
-expression(A) ::= expression(B) NK_MINUS expression(C). {
+expression(A) ::= expr_or_subquery(B) NK_MINUS expr_or_subquery(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)));
}
-expression(A) ::= expression(B) NK_STAR expression(C). {
+expression(A) ::= expr_or_subquery(B) NK_STAR expr_or_subquery(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)));
}
-expression(A) ::= expression(B) NK_SLASH expression(C). {
+expression(A) ::= expr_or_subquery(B) NK_SLASH expr_or_subquery(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)));
}
-expression(A) ::= expression(B) NK_REM expression(C). {
+expression(A) ::= expr_or_subquery(B) NK_REM expr_or_subquery(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)));
@@ -662,12 +679,12 @@ expression(A) ::= column_reference(B) NK_ARROW NK_STRING(C).
SToken s = getTokenFromRawExprNode(pCxt, B);
A = createRawExprNodeExt(pCxt, &s, &C, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, B), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &C)));
}
-expression(A) ::= expression(B) NK_BITAND expression(C). {
+expression(A) ::= expr_or_subquery(B) NK_BITAND expr_or_subquery(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)));
}
-expression(A) ::= expression(B) NK_BITOR expression(C). {
+expression(A) ::= expr_or_subquery(B) NK_BITOR expr_or_subquery(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, C);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)));
@@ -675,8 +692,8 @@ expression(A) ::= expression(B) NK_BITOR expression(C).
%type expression_list { SNodeList* }
%destructor expression_list { nodesDestroyList($$); }
-expression_list(A) ::= expression(B). { A = createNodeList(pCxt, releaseRawExprNode(pCxt, B)); }
-expression_list(A) ::= expression_list(B) NK_COMMA expression(C). { A = addNodeToList(pCxt, B, releaseRawExprNode(pCxt, C)); }
+expression_list(A) ::= expr_or_subquery(B). { A = createNodeList(pCxt, releaseRawExprNode(pCxt, B)); }
+expression_list(A) ::= expression_list(B) NK_COMMA expr_or_subquery(C). { A = addNodeToList(pCxt, B, releaseRawExprNode(pCxt, C)); }
column_reference(A) ::= column_name(B). { A = createRawExprNode(pCxt, &B, createColumnNode(pCxt, NULL, &B)); }
column_reference(A) ::= table_name(B) NK_DOT column_name(C). { A = createRawExprNodeExt(pCxt, &B, &C, createColumnNode(pCxt, &B, &C)); }
@@ -690,10 +707,12 @@ pseudo_column(A) ::= QDURATION(B).
pseudo_column(A) ::= WSTART(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
pseudo_column(A) ::= WEND(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
pseudo_column(A) ::= WDURATION(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
+pseudo_column(A) ::= IROWTS(B). { A = createRawExprNode(pCxt, &B, createFunctionNode(pCxt, &B, NULL)); }
function_expression(A) ::= function_name(B) NK_LP expression_list(C) NK_RP(D). { A = createRawExprNodeExt(pCxt, &B, &D, createFunctionNode(pCxt, &B, C)); }
function_expression(A) ::= star_func(B) NK_LP star_func_para_list(C) NK_RP(D). { A = createRawExprNodeExt(pCxt, &B, &D, createFunctionNode(pCxt, &B, C)); }
-function_expression(A) ::= CAST(B) NK_LP expression(C) AS type_name(D) NK_RP(E). { A = createRawExprNodeExt(pCxt, &B, &E, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, C), D)); }
+function_expression(A) ::=
+ CAST(B) NK_LP expr_or_subquery(C) AS type_name(D) NK_RP(E). { A = createRawExprNodeExt(pCxt, &B, &E, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, C), D)); }
function_expression(A) ::= literal_func(B). { A = B; }
literal_func(A) ::= noarg_func(B) NK_LP NK_RP(C). { A = createRawExprNodeExt(pCxt, &B, &C, createFunctionNode(pCxt, &B, NULL)); }
@@ -728,35 +747,52 @@ star_func_para_list(A) ::= other_para_list(B).
other_para_list(A) ::= star_func_para(B). { A = createNodeList(pCxt, B); }
other_para_list(A) ::= other_para_list(B) NK_COMMA star_func_para(C). { A = addNodeToList(pCxt, B, C); }
-star_func_para(A) ::= expression(B). { A = releaseRawExprNode(pCxt, B); }
+star_func_para(A) ::= expr_or_subquery(B). { A = releaseRawExprNode(pCxt, B); }
star_func_para(A) ::= table_name(B) NK_DOT NK_STAR(C). { A = createColumnNode(pCxt, &B, &C); }
+case_when_expression(A) ::=
+ CASE(E) when_then_list(C) case_when_else_opt(D) END(F). { A = createRawExprNodeExt(pCxt, &E, &F, createCaseWhenNode(pCxt, NULL, C, D)); }
+case_when_expression(A) ::=
+ CASE(E) common_expression(B) when_then_list(C) case_when_else_opt(D) END(F). { A = createRawExprNodeExt(pCxt, &E, &F, createCaseWhenNode(pCxt, releaseRawExprNode(pCxt, B), C, D)); }
+
+%type when_then_list { SNodeList* }
+%destructor when_then_list { nodesDestroyList($$); }
+when_then_list(A) ::= when_then_expr(B). { A = createNodeList(pCxt, B); }
+when_then_list(A) ::= when_then_list(B) when_then_expr(C). { A = addNodeToList(pCxt, B, C); }
+
+when_then_expr(A) ::= WHEN common_expression(B) THEN common_expression(C). { A = createWhenThenNode(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); }
+
+case_when_else_opt(A) ::= . { A = NULL; }
+case_when_else_opt(A) ::= ELSE common_expression(B). { A = releaseRawExprNode(pCxt, B); }
+
/************************************************ predicate ***********************************************************/
-predicate(A) ::= expression(B) compare_op(C) expression(D). {
+predicate(A) ::= expr_or_subquery(B) compare_op(C) expr_or_subquery(D). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, D);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, C, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, D)));
}
//predicate(A) ::= expression(B) compare_op sub_type expression(B).
-predicate(A) ::= expression(B) BETWEEN expression(C) AND expression(D). {
+predicate(A) ::=
+ expr_or_subquery(B) BETWEEN expr_or_subquery(C) AND expr_or_subquery(D). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, D);
A = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C), releaseRawExprNode(pCxt, D)));
}
-predicate(A) ::= expression(B) NOT BETWEEN expression(C) AND expression(D). {
+predicate(A) ::=
+ expr_or_subquery(B) NOT BETWEEN expr_or_subquery(C) AND expr_or_subquery(D). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, D);
A = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C), releaseRawExprNode(pCxt, D)));
}
-predicate(A) ::= expression(B) IS NULL(C). {
+predicate(A) ::= expr_or_subquery(B) IS NULL(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
A = createRawExprNodeExt(pCxt, &s, &C, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, B), NULL));
}
-predicate(A) ::= expression(B) IS NOT NULL(C). {
+predicate(A) ::= expr_or_subquery(B) IS NOT NULL(C). {
SToken s = getTokenFromRawExprNode(pCxt, B);
A = createRawExprNodeExt(pCxt, &s, &C, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, B), NULL));
}
-predicate(A) ::= expression(B) in_op(C) in_predicate_value(D). {
+predicate(A) ::= expr_or_subquery(B) in_op(C) in_predicate_value(D). {
SToken s = getTokenFromRawExprNode(pCxt, B);
SToken e = getTokenFromRawExprNode(pCxt, D);
A = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, C, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, D)));
@@ -806,7 +842,7 @@ boolean_primary(A) ::= predicate(B).
boolean_primary(A) ::= NK_LP(C) boolean_value_expression(B) NK_RP(D). { A = createRawExprNodeExt(pCxt, &C, &D, releaseRawExprNode(pCxt, B)); }
/************************************************ common_expression ********************************************/
-common_expression(A) ::= expression(B). { A = B; }
+common_expression(A) ::= expr_or_subquery(B). { A = B; }
common_expression(A) ::= boolean_value_expression(B). { A = B; }
/************************************************ from_clause_opt *********************************************************/
@@ -882,12 +918,21 @@ where_clause_opt(A) ::= WHERE search_condition(B).
%type partition_by_clause_opt { SNodeList* }
%destructor partition_by_clause_opt { nodesDestroyList($$); }
partition_by_clause_opt(A) ::= . { A = NULL; }
-partition_by_clause_opt(A) ::= PARTITION BY expression_list(B). { A = B; }
+partition_by_clause_opt(A) ::= PARTITION BY partition_list(B). { A = B; }
+
+%type partition_list { SNodeList* }
+%destructor partition_list { nodesDestroyList($$); }
+partition_list(A) ::= partition_item(B). { A = createNodeList(pCxt, B); }
+partition_list(A) ::= partition_list(B) NK_COMMA partition_item(C). { A = addNodeToList(pCxt, B, C); }
+
+partition_item(A) ::= expr_or_subquery(B). { A = releaseRawExprNode(pCxt, B); }
+partition_item(A) ::= expr_or_subquery(B) column_alias(C). { A = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, B), &C); }
+partition_item(A) ::= expr_or_subquery(B) AS column_alias(C). { A = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, B), &C); }
twindow_clause_opt(A) ::= . { A = NULL; }
twindow_clause_opt(A) ::=
SESSION NK_LP column_reference(B) NK_COMMA duration_literal(C) NK_RP. { A = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); }
-twindow_clause_opt(A) ::= STATE_WINDOW NK_LP expression(B) NK_RP. { A = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, B)); }
+twindow_clause_opt(A) ::= STATE_WINDOW NK_LP expr_or_subquery(B) NK_RP. { A = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, B)); }
twindow_clause_opt(A) ::=
INTERVAL NK_LP duration_literal(B) NK_RP sliding_opt(C) fill_opt(D). { A = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, B), NULL, C, D); }
twindow_clause_opt(A) ::=
@@ -916,41 +961,40 @@ group_by_clause_opt(A) ::= GROUP BY group_by_list(B).
%type group_by_list { SNodeList* }
%destructor group_by_list { nodesDestroyList($$); }
-group_by_list(A) ::= expression(B). { A = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, B))); }
-group_by_list(A) ::= group_by_list(B) NK_COMMA expression(C). { A = addNodeToList(pCxt, B, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, C))); }
+group_by_list(A) ::= expr_or_subquery(B). { A = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, B))); }
+group_by_list(A) ::= group_by_list(B) NK_COMMA expr_or_subquery(C). { A = addNodeToList(pCxt, B, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, C))); }
having_clause_opt(A) ::= . { A = NULL; }
having_clause_opt(A) ::= HAVING search_condition(B). { A = B; }
range_opt(A) ::= . { A = NULL; }
-range_opt(A) ::= RANGE NK_LP expression(B) NK_COMMA expression(C) NK_RP. { A = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); }
+range_opt(A) ::=
+ RANGE NK_LP expr_or_subquery(B) NK_COMMA expr_or_subquery(C) NK_RP. { A = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); }
every_opt(A) ::= . { A = NULL; }
every_opt(A) ::= EVERY NK_LP duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
/************************************************ query_expression ****************************************************/
-query_expression(A) ::=
- query_expression_body(B)
- order_by_clause_opt(C) slimit_clause_opt(D) limit_clause_opt(E). {
+query_expression(A) ::= query_simple(B)
+ order_by_clause_opt(C) slimit_clause_opt(D) limit_clause_opt(E). {
A = addOrderByClause(pCxt, B, C);
A = addSlimitClause(pCxt, A, D);
A = addLimitClause(pCxt, A, E);
}
-query_expression_body(A) ::= query_primary(B). { A = B; }
-query_expression_body(A) ::=
- query_expression_body(B) UNION ALL query_expression_body(D). { A = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, B, D); }
-query_expression_body(A) ::=
- query_expression_body(B) UNION query_expression_body(D). { A = createSetOperator(pCxt, SET_OP_TYPE_UNION, B, D); }
+query_simple(A) ::= query_specification(B). { A = B; }
+query_simple(A) ::= union_query_expression(B). { A = B; }
-query_primary(A) ::= query_specification(B). { A = B; }
-query_primary(A) ::=
- NK_LP query_expression_body(B)
- order_by_clause_opt(C) slimit_clause_opt(D) limit_clause_opt(E) NK_RP. {
- A = addOrderByClause(pCxt, B, C);
- A = addSlimitClause(pCxt, A, D);
- A = addLimitClause(pCxt, A, E);
- }
+union_query_expression(A) ::=
+ query_simple_or_subquery(B) UNION ALL query_simple_or_subquery(C). { A = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, B, C); }
+union_query_expression(A) ::=
+ query_simple_or_subquery(B) UNION query_simple_or_subquery(C). { A = createSetOperator(pCxt, SET_OP_TYPE_UNION, B, C); }
+
+query_simple_or_subquery(A) ::= query_simple(B). { A = B; }
+query_simple_or_subquery(A) ::= subquery(B). { A = releaseRawExprNode(pCxt, B); }
+
+query_or_subquery(A) ::= query_expression(B). { A = B; }
+query_or_subquery(A) ::= subquery(B). { A = releaseRawExprNode(pCxt, B); }
%type order_by_clause_opt { SNodeList* }
%destructor order_by_clause_opt { nodesDestroyList($$); }
@@ -969,6 +1013,7 @@ limit_clause_opt(A) ::= LIMIT NK_INTEGER(C) NK_COMMA NK_INTEGER(B).
/************************************************ subquery ************************************************************/
subquery(A) ::= NK_LP(B) query_expression(C) NK_RP(D). { A = createRawExprNodeExt(pCxt, &B, &D, C); }
+subquery(A) ::= NK_LP(B) subquery(C) NK_RP(D). { A = createRawExprNodeExt(pCxt, &B, &D, releaseRawExprNode(pCxt, C)); }
/************************************************ search_condition ****************************************************/
search_condition(A) ::= common_expression(B). { A = releaseRawExprNode(pCxt, B); }
@@ -981,7 +1026,7 @@ sort_specification_list(A) ::=
sort_specification_list(B) NK_COMMA sort_specification(C). { A = addNodeToList(pCxt, B, C); }
sort_specification(A) ::=
- expression(B) ordering_specification_opt(C) null_ordering_opt(D). { A = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, B), C, D); }
+ expr_or_subquery(B) ordering_specification_opt(C) null_ordering_opt(D). { A = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, B), C, D); }
%type ordering_specification_opt EOrder
%destructor ordering_specification_opt { }
@@ -997,4 +1042,4 @@ null_ordering_opt(A) ::= NULLS LAST.
%fallback ABORT AFTER ATTACH BEFORE BEGIN BITAND BITNOT BITOR BLOCKS CHANGE COMMA COMPACT CONCAT CONFLICT COPY DEFERRED DELIMITERS DETACH DIVIDE DOT EACH END FAIL
FILE FOR GLOB ID IMMEDIATE IMPORT INITIALLY INSTEAD ISNULL KEY NK_BITNOT NK_SEMI NOTNULL OF PLUS PRIVILEGE RAISE REPLACE RESTRICT ROW SEMI STAR STATEMENT STRING
- TIMES UPDATE VALUES VARIABLE VIEW VNODES WAL.
+ TIMES UPDATE VALUES VARIABLE VIEW WAL.
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 4d0b0bbb2533111fe31d4810c58270bea5b22314..10a066db40e497767e6e4eca623aabf3ded3dcea 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -247,7 +247,8 @@ SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) {
pExpr->userAlias[len] = '\0';
}
}
- taosMemoryFreeClear(pNode);
+ pRawExpr->pNode = NULL;
+ nodesDestroyNode(pNode);
return pRealizedExpr;
}
@@ -646,6 +647,25 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd
return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt, NULL), pStart, pEnd);
}
+SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen) {
+ CHECK_PARSER_STATUS(pCxt);
+ SWhenThenNode* pWhenThen = (SWhenThenNode*)nodesMakeNode(QUERY_NODE_WHEN_THEN);
+ CHECK_OUT_OF_MEM(pWhenThen);
+ pWhenThen->pWhen = pWhen;
+ pWhenThen->pThen = pThen;
+ return (SNode*)pWhenThen;
+}
+
+SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse) {
+ CHECK_PARSER_STATUS(pCxt);
+ SCaseWhenNode* pCaseWhen = (SCaseWhenNode*)nodesMakeNode(QUERY_NODE_CASE_WHEN);
+ CHECK_OUT_OF_MEM(pCaseWhen);
+ pCaseWhen->pCase = pCase;
+ pCaseWhen->pWhenThenList = pWhenThenList;
+ pCaseWhen->pElse = pElse;
+ return (SNode*)pCaseWhen;
+}
+
SNode* setProjectionAlias(SAstCreateContext* pCxt, SNode* pNode, SToken* pAlias) {
CHECK_PARSER_STATUS(pCxt);
trimEscape(pAlias);
@@ -795,6 +815,20 @@ SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode*
return (SNode*)setOp;
}
+static void updateWalOptionsDefault(SDatabaseOptions* pOptions) {
+ if (!pOptions->walRetentionPeriodIsSet) {
+ pOptions->walRetentionPeriod =
+ pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_RET_PERIOD : TSDB_REP_DEF_DB_WAL_RET_PERIOD;
+ }
+ if (!pOptions->walRetentionSizeIsSet) {
+ pOptions->walRetentionSize = pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_RET_SIZE : TSDB_REP_DEF_DB_WAL_RET_SIZE;
+ }
+ if (!pOptions->walRollPeriodIsSet) {
+ pOptions->walRollPeriod =
+ pOptions->replica > 1 ? TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD : TSDB_REP_DEF_DB_WAL_ROLL_PERIOD;
+ }
+}
+
SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
CHECK_PARSER_STATUS(pCxt);
SDatabaseOptions* pOptions = (SDatabaseOptions*)nodesMakeNode(QUERY_NODE_DATABASE_OPTIONS);
@@ -812,6 +846,7 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->keep[2] = TSDB_DEFAULT_KEEP;
pOptions->pages = TSDB_DEFAULT_PAGES_PER_VNODE;
pOptions->pagesize = TSDB_DEFAULT_PAGESIZE_PER_VNODE;
+ pOptions->tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
pOptions->precision = TSDB_DEFAULT_PRECISION;
pOptions->replica = TSDB_DEFAULT_DB_REPLICA;
pOptions->strict = TSDB_DEFAULT_DB_STRICT;
@@ -819,10 +854,11 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->numOfVgroups = TSDB_DEFAULT_VN_PER_DB;
pOptions->singleStable = TSDB_DEFAULT_DB_SINGLE_STABLE;
pOptions->schemaless = TSDB_DEFAULT_DB_SCHEMALESS;
- pOptions->walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
- pOptions->walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- pOptions->walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ updateWalOptionsDefault(pOptions);
pOptions->walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
+ pOptions->sstTrigger = TSDB_DEFAULT_SST_TRIGGER;
+ pOptions->tablePrefix = TSDB_DEFAULT_HASH_PREFIX;
+ pOptions->tableSuffix = TSDB_DEFAULT_HASH_SUFFIX;
return (SNode*)pOptions;
}
@@ -843,6 +879,7 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->keep[2] = -1;
pOptions->pages = -1;
pOptions->pagesize = -1;
+ pOptions->tsdbPageSize = -1;
pOptions->precision = -1;
pOptions->replica = -1;
pOptions->strict = -1;
@@ -854,83 +891,103 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->walRetentionSize = -1;
pOptions->walRollPeriod = -1;
pOptions->walSegmentSize = -1;
+ pOptions->sstTrigger = -1;
+ pOptions->tablePrefix = -1;
+ pOptions->tableSuffix = -1;
return (SNode*)pOptions;
}
SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOptionType type, void* pVal) {
CHECK_PARSER_STATUS(pCxt);
+ SDatabaseOptions* pDbOptions = (SDatabaseOptions*)pOptions;
switch (type) {
case DB_OPTION_BUFFER:
- ((SDatabaseOptions*)pOptions)->buffer = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->buffer = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_CACHEMODEL:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->cacheModelStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->cacheModelStr, (SToken*)pVal);
break;
case DB_OPTION_CACHESIZE:
- ((SDatabaseOptions*)pOptions)->cacheLastSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->cacheLastSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_COMP:
- ((SDatabaseOptions*)pOptions)->compressionLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->compressionLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_DAYS: {
SToken* pToken = pVal;
if (TK_NK_INTEGER == pToken->type) {
- ((SDatabaseOptions*)pOptions)->daysPerFile = taosStr2Int32(pToken->z, NULL, 10) * 1440;
+ pDbOptions->daysPerFile = taosStr2Int32(pToken->z, NULL, 10) * 1440;
} else {
- ((SDatabaseOptions*)pOptions)->pDaysPerFile = (SValueNode*)createDurationValueNode(pCxt, pToken);
+ pDbOptions->pDaysPerFile = (SValueNode*)createDurationValueNode(pCxt, pToken);
}
break;
}
case DB_OPTION_FSYNC:
- ((SDatabaseOptions*)pOptions)->fsyncPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->fsyncPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_MAXROWS:
- ((SDatabaseOptions*)pOptions)->maxRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->maxRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_MINROWS:
- ((SDatabaseOptions*)pOptions)->minRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->minRowsPerBlock = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_KEEP:
- ((SDatabaseOptions*)pOptions)->pKeep = pVal;
+ pDbOptions->pKeep = pVal;
break;
case DB_OPTION_PAGES:
- ((SDatabaseOptions*)pOptions)->pages = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->pages = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_PAGESIZE:
- ((SDatabaseOptions*)pOptions)->pagesize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->pagesize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ break;
+ case DB_OPTION_TSDB_PAGESIZE:
+ pDbOptions->tsdbPageSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_PRECISION:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->precisionStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->precisionStr, (SToken*)pVal);
break;
case DB_OPTION_REPLICA:
- ((SDatabaseOptions*)pOptions)->replica = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->replica = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ updateWalOptionsDefault(pDbOptions);
break;
case DB_OPTION_STRICT:
- COPY_STRING_FORM_STR_TOKEN(((SDatabaseOptions*)pOptions)->strictStr, (SToken*)pVal);
+ COPY_STRING_FORM_STR_TOKEN(pDbOptions->strictStr, (SToken*)pVal);
break;
case DB_OPTION_WAL:
- ((SDatabaseOptions*)pOptions)->walLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walLevel = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_VGROUPS:
- ((SDatabaseOptions*)pOptions)->numOfVgroups = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->numOfVgroups = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_SINGLE_STABLE:
- ((SDatabaseOptions*)pOptions)->singleStable = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->singleStable = taosStr2Int8(((SToken*)pVal)->z, NULL, 10);
break;
case DB_OPTION_RETENTIONS:
- ((SDatabaseOptions*)pOptions)->pRetentions = pVal;
+ pDbOptions->pRetentions = pVal;
break;
case DB_OPTION_WAL_RETENTION_PERIOD:
- ((SDatabaseOptions*)pOptions)->walRetentionPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionPeriodIsSet = true;
break;
case DB_OPTION_WAL_RETENTION_SIZE:
- ((SDatabaseOptions*)pOptions)->walRetentionSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRetentionSizeIsSet = true;
break;
case DB_OPTION_WAL_ROLL_PERIOD:
- ((SDatabaseOptions*)pOptions)->walRollPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRollPeriod = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walRollPeriodIsSet = true;
break;
case DB_OPTION_WAL_SEGMENT_SIZE:
- ((SDatabaseOptions*)pOptions)->walSegmentSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ pDbOptions->walSegmentSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ break;
+ case DB_OPTION_STT_TRIGGER:
+ pDbOptions->sstTrigger = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ break;
+ case DB_OPTION_TABLE_PREFIX:
+ pDbOptions->tablePrefix = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
+ break;
+ case DB_OPTION_TABLE_SUFFIX:
+ pDbOptions->tableSuffix = taosStr2Int32(((SToken*)pVal)->z, NULL, 10);
break;
default:
break;
@@ -998,7 +1055,7 @@ SNode* createFlushDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
return (SNode*)pStmt;
}
-SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
+SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, int32_t maxSpeed) {
CHECK_PARSER_STATUS(pCxt);
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
@@ -1006,6 +1063,7 @@ SNode* createTrimDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
STrimDatabaseStmt* pStmt = (STrimDatabaseStmt*)nodesMakeNode(QUERY_NODE_TRIM_DATABASE_STMT);
CHECK_OUT_OF_MEM(pStmt);
COPY_STRING_FORM_ID_TOKEN(pStmt->dbName, pDbName);
+ pStmt->maxSpeed = maxSpeed;
return (SNode*)pStmt;
}
@@ -1251,7 +1309,8 @@ SNode* createUseDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName) {
static bool needDbShowStmt(ENodeType type) {
return QUERY_NODE_SHOW_TABLES_STMT == type || QUERY_NODE_SHOW_STABLES_STMT == type ||
- QUERY_NODE_SHOW_VGROUPS_STMT == type;
+ QUERY_NODE_SHOW_VGROUPS_STMT == type || QUERY_NODE_SHOW_INDEXES_STMT == type ||
+ QUERY_NODE_SHOW_TAGS_STMT == type;
}
SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type) {
@@ -1264,8 +1323,8 @@ SNode* createShowStmt(SAstCreateContext* pCxt, ENodeType type) {
SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pDbName, SNode* pTbName,
EOperatorType tableCondType) {
CHECK_PARSER_STATUS(pCxt);
- if (needDbShowStmt(type) && NULL == pDbName && NULL == pCxt->pQueryCxt->db) {
- snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified");
+ if (needDbShowStmt(type) && NULL == pDbName) {
+ snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "database not specified");
pCxt->errCode = TSDB_CODE_PAR_SYNTAX_ERROR;
return NULL;
}
@@ -1316,6 +1375,15 @@ SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId) {
return (SNode*)pStmt;
}
+SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint) {
+ CHECK_PARSER_STATUS(pCxt);
+ SShowVnodesStmt* pStmt = (SShowVnodesStmt*)nodesMakeNode(QUERY_NODE_SHOW_VNODES_STMT);
+ CHECK_OUT_OF_MEM(pStmt);
+ pStmt->pDnodeId = pDnodeId;
+ pStmt->pDnodeEndpoint = pDnodeEndpoint;
+ return (SNode*)pStmt;
+}
+
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo) {
CHECK_PARSER_STATUS(pCxt);
char password[TSDB_USET_PASSWORD_LEN] = {0};
@@ -1633,7 +1701,7 @@ SNode* createStreamOptions(SAstCreateContext* pCxt) {
}
SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, const SToken* pStreamName, SNode* pRealTable,
- SNode* pOptions, SNode* pQuery) {
+ SNode* pOptions, SNodeList* pTags, SNode* pSubtable, SNode* pQuery) {
CHECK_PARSER_STATUS(pCxt);
SCreateStreamStmt* pStmt = (SCreateStreamStmt*)nodesMakeNode(QUERY_NODE_CREATE_STREAM_STMT);
CHECK_OUT_OF_MEM(pStmt);
@@ -1646,6 +1714,8 @@ SNode* createCreateStreamStmt(SAstCreateContext* pCxt, bool ignoreExists, const
pStmt->ignoreExists = ignoreExists;
pStmt->pOptions = (SStreamOptions*)pOptions;
pStmt->pQuery = pQuery;
+ pStmt->pTags = pTags;
+ pStmt->pSubtable = pSubtable;
return (SNode*)pStmt;
}
@@ -1740,10 +1810,10 @@ SNode* createRevokeStmt(SAstCreateContext* pCxt, int64_t privileges, SToken* pDb
return (SNode*)pStmt;
}
-SNode* createCountFuncForDelete(SAstCreateContext* pCxt) {
+SNode* createFuncForDelete(SAstCreateContext* pCxt, const char* pFuncName) {
SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
CHECK_OUT_OF_MEM(pFunc);
- strcpy(pFunc->functionName, "count");
+ strcpy(pFunc->functionName, pFuncName);
if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pFunc->pParameterList, createPrimaryKeyCol(pCxt, NULL))) {
nodesDestroyNode((SNode*)pFunc);
CHECK_OUT_OF_MEM(NULL);
@@ -1757,8 +1827,10 @@ SNode* createDeleteStmt(SAstCreateContext* pCxt, SNode* pTable, SNode* pWhere) {
CHECK_OUT_OF_MEM(pStmt);
pStmt->pFromTable = pTable;
pStmt->pWhere = pWhere;
- pStmt->pCountFunc = createCountFuncForDelete(pCxt);
- if (NULL == pStmt->pCountFunc) {
+ pStmt->pCountFunc = createFuncForDelete(pCxt, "count");
+ pStmt->pFirstFunc = createFuncForDelete(pCxt, "first");
+ pStmt->pLastFunc = createFuncForDelete(pCxt, "last");
+ if (NULL == pStmt->pCountFunc || NULL == pStmt->pFirstFunc || NULL == pStmt->pLastFunc) {
nodesDestroyNode((SNode*)pStmt);
CHECK_OUT_OF_MEM(NULL);
}
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index 82b5842663a824b08553da9ac3b1044a19ad3ef6..968e03f97e2b9e5afd1153bf72eb66bbf388bd45 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -97,16 +97,23 @@ typedef struct SCollectMetaKeyCxt {
typedef struct SCollectMetaKeyFromExprCxt {
SCollectMetaKeyCxt* pComCxt;
+ bool hasLastRow;
int32_t errCode;
} SCollectMetaKeyFromExprCxt;
static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt);
static EDealRes collectMetaKeyFromFunction(SCollectMetaKeyFromExprCxt* pCxt, SFunctionNode* pFunc) {
- if (fmIsBuiltinFunc(pFunc->functionName)) {
- return DEAL_RES_CONTINUE;
+ switch (fmGetFuncType(pFunc->functionName)) {
+ case FUNCTION_TYPE_LAST_ROW:
+ pCxt->hasLastRow = true;
+ break;
+ case FUNCTION_TYPE_UDF:
+ pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
+ break;
+ default:
+ break;
}
- pCxt->errCode = reserveUdfInCache(pFunc->functionName, pCxt->pComCxt->pMetaCache);
return TSDB_CODE_SUCCESS == pCxt->errCode ? DEAL_RES_CONTINUE : DEAL_RES_ERROR;
}
@@ -136,9 +143,6 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, const c
if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES))) {
code = reserveDnodeRequiredInCache(pCxt->pMetaCache);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pDb, pCxt->pMetaCache);
- }
return code;
}
@@ -185,9 +189,19 @@ static int32_t collectMetaKeyFromSetOperator(SCollectMetaKeyCxt* pCxt, SSetOpera
return code;
}
+static int32_t reserveDbCfgForLastRow(SCollectMetaKeyCxt* pCxt, SNode* pTable) {
+ if (NULL == pTable || QUERY_NODE_REAL_TABLE != nodeType(pTable)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return reserveDbCfgInCache(pCxt->pParseCxt->acctId, ((SRealTableNode*)pTable)->table.dbName, pCxt->pMetaCache);
+}
+
static int32_t collectMetaKeyFromSelect(SCollectMetaKeyCxt* pCxt, SSelectStmt* pStmt) {
- SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .errCode = TSDB_CODE_SUCCESS};
+ SCollectMetaKeyFromExprCxt cxt = {.pComCxt = pCxt, .hasLastRow = false, .errCode = TSDB_CODE_SUCCESS};
nodesWalkSelectStmt(pStmt, SQL_CLAUSE_FROM, collectMetaKeyFromExprImpl, &cxt);
+ if (TSDB_CODE_SUCCESS == cxt.errCode && cxt.hasLastRow) {
+ cxt.errCode = reserveDbCfgForLastRow(pCxt, pStmt->pFromTable);
+ }
return cxt.errCode;
}
@@ -360,12 +374,17 @@ static int32_t collectMetaKeyFromShowIndexes(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowStables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES,
- pCxt->pMetaCache);
+ int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES,
+ pCxt->pMetaCache);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser,
+ ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ, pCxt->pMetaCache);
+ }
+ return code;
}
static int32_t collectMetaKeyFromShowStreams(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS,
pCxt->pMetaCache);
}
@@ -373,11 +392,11 @@ static int32_t collectMetaKeyFromShowTables(SCollectMetaKeyCxt* pCxt, SShowStmt*
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLES,
pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
- if (NULL != pStmt->pDbName) {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
- } else {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
- }
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser,
+ ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ, pCxt->pMetaCache);
}
return code;
}
@@ -386,11 +405,11 @@ static int32_t collectMetaKeyFromShowTags(SCollectMetaKeyCxt* pCxt, SShowStmt* p
int32_t code = reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TAGS,
pCxt->pMetaCache);
if (TSDB_CODE_SUCCESS == code) {
- if (NULL != pStmt->pDbName) {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
- } else {
- code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, pCxt->pMetaCache);
- }
+ code = reserveDbVgInfoInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal, pCxt->pMetaCache);
+ }
+ if (TSDB_CODE_SUCCESS == code && NULL != pStmt->pTbName) {
+ code = reserveTableVgroupInCache(pCxt->pParseCxt->acctId, ((SValueNode*)pStmt->pDbName)->literal,
+ ((SValueNode*)pStmt->pTbName)->literal, pCxt->pMetaCache);
}
return code;
}
@@ -411,7 +430,7 @@ static int32_t collectMetaKeyFromShowVgroups(SCollectMetaKeyCxt* pCxt, SShowStmt
}
static int32_t collectMetaKeyFromShowTopics(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_TOPICS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TOPICS,
pCxt->pMetaCache);
}
@@ -449,6 +468,11 @@ static int32_t collectMetaKeyFromShowDnodeVariables(SCollectMetaKeyCxt* pCxt, SS
return code;
}
+static int32_t collectMetaKeyFromShowVnodes(SCollectMetaKeyCxt* pCxt, SShowVnodesStmt* pStmt) {
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VNODES,
+ pCxt->pMetaCache);
+}
+
static int32_t collectMetaKeyFromShowCreateDatabase(SCollectMetaKeyCxt* pCxt, SShowCreateDatabaseStmt* pStmt) {
return reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
}
@@ -506,7 +530,7 @@ static int32_t collectMetaKeyFromShowBlockDist(SCollectMetaKeyCxt* pCxt, SShowTa
}
static int32_t collectMetaKeyFromShowSubscriptions(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
- return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS,
+ return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS,
pCxt->pMetaCache);
}
@@ -589,6 +613,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) {
return collectMetaKeyFromShowVariables(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
return collectMetaKeyFromShowDnodeVariables(pCxt, (SShowDnodeVariablesStmt*)pStmt);
+ case QUERY_NODE_SHOW_VNODES_STMT:
+ return collectMetaKeyFromShowVnodes(pCxt, (SShowVnodesStmt*)pStmt);
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
return collectMetaKeyFromShowCreateDatabase(pCxt, (SShowCreateDatabaseStmt*)pStmt);
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c
index d9a5761d99b7f04d5f2d4d9604ceee3faf76a896..9d73be745468c33f6041f5f6bb2a9cd9bfb51b52 100644
--- a/source/libs/parser/src/parAuthenticator.c
+++ b/source/libs/parser/src/parAuthenticator.c
@@ -96,6 +96,10 @@ static int32_t authInsert(SAuthCxt* pCxt, SInsertStmt* pInsert) {
return code;
}
+static int32_t authShowTables(SAuthCxt* pCxt, SShowStmt* pStmt) {
+ return checkAuth(pCxt, ((SValueNode*)pStmt->pDbName)->literal, AUTH_TYPE_READ);
+}
+
static int32_t authShowCreateTable(SAuthCxt* pCxt, SShowCreateTableStmt* pStmt) {
return checkAuth(pCxt, pStmt->dbName, AUTH_TYPE_READ);
}
@@ -127,6 +131,9 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
case QUERY_NODE_SHOW_VNODES_STMT:
case QUERY_NODE_SHOW_SCORES_STMT:
return !pCxt->pParseCxt->enableSysInfo ? TSDB_CODE_PAR_PERMISSION_DENIED : TSDB_CODE_SUCCESS;
+ case QUERY_NODE_SHOW_TABLES_STMT:
+ case QUERY_NODE_SHOW_STABLES_STMT:
+ return authShowTables(pCxt, (SShowStmt*)pStmt);
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
return authShowCreateTable(pCxt, (SShowCreateTableStmt*)pStmt);
diff --git a/source/libs/parser/src/parCalcConst.c b/source/libs/parser/src/parCalcConst.c
index a7c08d8f659c2477c8c28c9711d687176a9477ae..efc9d77e67b8865b16409894920609277b38c1db 100644
--- a/source/libs/parser/src/parCalcConst.c
+++ b/source/libs/parser/src/parCalcConst.c
@@ -275,6 +275,12 @@ static int32_t calcConstSelectFrom(SCalcConstContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = calcConstList(pSelect->pPartitionByList);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = calcConstList(pSelect->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = calcConstNode(&pSelect->pSubtable);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = calcConstNode(&pSelect->pWindow);
}
diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c
index c9115d90e18a73ff6328e877eaddfeb0e2c72b36..4fb55ed373bfdc849853184bd19916c388123ae2 100644
--- a/source/libs/parser/src/parInsert.c
+++ b/source/libs/parser/src/parInsert.c
@@ -502,6 +502,10 @@ static int32_t parseValueToken(char** end, SToken* pToken, SSchema* pSchema, int
return func(pMsgBuf, NULL, 0, param);
}
+ if (IS_NUMERIC_TYPE(pSchema->type) && pToken->n == 0) {
+ return buildSyntaxErrMsg(pMsgBuf, "invalid numeric data", pToken->z);
+ }
+
switch (pSchema->type) {
case TSDB_DATA_TYPE_BOOL: {
if ((pToken->type == TK_NK_BOOL || pToken->type == TK_NK_STRING) && (pToken->n != 0)) {
@@ -782,6 +786,7 @@ static void buildCreateTbReq(SVCreateTbReq* pTbReq, const char* tname, STag* pTa
if (sname) pTbReq->ctb.name = strdup(sname);
pTbReq->ctb.pTag = (uint8_t*)pTag;
pTbReq->ctb.tagName = taosArrayDup(tagName);
+ pTbReq->ttl = TSDB_DEFAULT_TABLE_TTL;
pTbReq->commentLen = -1;
return;
@@ -1054,7 +1059,7 @@ end:
for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) {
STagVal* p = (STagVal*)taosArrayGet(pTagVals, i);
if (IS_VAR_DATA_TYPE(p->type)) {
- taosMemoryFree(p->pData);
+ taosMemoryFreeClear(p->pData);
}
}
taosArrayDestroy(pTagVals);
@@ -1117,6 +1122,43 @@ static int32_t ignoreAutoCreateTableClause(SInsertParseContext* pCxt) {
return code;
}
+static int32_t parseTableOptions(SInsertParseContext* pCxt) {
+ do {
+ int32_t index = 0;
+ SToken sToken;
+ NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index);
+ if (TK_TTL == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN_WITH_PREV(pCxt->pSql, sToken);
+ if (TK_NK_INTEGER != sToken.type) {
+ return buildSyntaxErrMsg(&pCxt->msg, "Invalid option ttl", sToken.z);
+ }
+ pCxt->createTblReq.ttl = taosStr2Int32(sToken.z, NULL, 10);
+ if (pCxt->createTblReq.ttl < 0) {
+ return buildSyntaxErrMsg(&pCxt->msg, "Invalid option ttl", sToken.z);
+ }
+ } else if (TK_COMMENT == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN(pCxt->pSql, sToken);
+ if (TK_NK_STRING != sToken.type) {
+ return buildSyntaxErrMsg(&pCxt->msg, "Invalid option comment", sToken.z);
+ }
+ if (sToken.n >= TSDB_TB_COMMENT_LEN) {
+ return buildSyntaxErrMsg(&pCxt->msg, "comment too long", sToken.z);
+ }
+ int32_t len = trimString(sToken.z, sToken.n, pCxt->tmpTokenBuf, TSDB_TB_COMMENT_LEN);
+ pCxt->createTblReq.comment = strndup(pCxt->tmpTokenBuf, len);
+ if (NULL == pCxt->createTblReq.comment) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ pCxt->createTblReq.commentLen = len;
+ } else {
+ break;
+ }
+ } while (1);
+ return TSDB_CODE_SUCCESS;
+}
+
// pSql -> stb_name [(tag1_name, ...)] TAGS (tag1_value, ...)
static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName* name, char* tbFName) {
int32_t len = strlen(tbFName);
@@ -1168,7 +1210,7 @@ static int32_t parseUsingClause(SInsertParseContext* pCxt, int32_t tbNo, SName*
return buildSyntaxErrMsg(&pCxt->msg, ") is expected", sToken.z);
}
- return TSDB_CODE_SUCCESS;
+ return parseTableOptions(pCxt);
}
static int parseOneRow(SInsertParseContext* pCxt, STableDataBlocks* pDataBlocks, int16_t timePrec, bool* gotRow,
@@ -1381,9 +1423,7 @@ static int32_t parseDataFromFile(SInsertParseContext* pCxt, SToken filePath, STa
}
static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) {
- if (!pCxt->pComCxt->async) {
- taosMemoryFreeClear(pCxt->pTableMeta);
- }
+ taosMemoryFreeClear(pCxt->pTableMeta);
destroyBoundColumnInfo(&pCxt->tags);
tdDestroySVCreateTbReq(&pCxt->createTblReq);
}
@@ -1497,6 +1537,9 @@ static int32_t parseInsertBody(SInsertParseContext* pCxt) {
autoCreateTbl = true;
} else if (!existedUsing) {
CHECK_CODE(getTableMeta(pCxt, tbNum, &name, dbFName));
+ if (TSDB_SUPER_TABLE == pCxt->pTableMeta->tableType) {
+ return buildInvalidOperationMsg(&pCxt->msg, "insert data into super table is not supported");
+ }
}
STableDataBlocks* dataBuf = NULL;
@@ -1665,6 +1708,10 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache
pDb = taosHashIterate(context.pDbFNameHashObj, pDb);
}
}
+ if (pContext->pStmtCb) {
+ context.pVgroupsHashObj = NULL;
+ context.pTableBlockHashObj = NULL;
+ }
destroyInsertParseContext(&context);
return code;
}
@@ -1692,6 +1739,21 @@ static int32_t skipValuesClause(SInsertParseSyntaxCxt* pCxt) {
static int32_t skipTagsClause(SInsertParseSyntaxCxt* pCxt) { return skipParentheses(pCxt); }
+static int32_t skipTableOptions(SInsertParseSyntaxCxt* pCxt) {
+ do {
+ int32_t index = 0;
+ SToken sToken;
+ NEXT_TOKEN_KEEP_SQL(pCxt->pSql, sToken, index);
+ if (TK_TTL == sToken.type || TK_COMMENT == sToken.type) {
+ pCxt->pSql += index;
+ NEXT_TOKEN_WITH_PREV(pCxt->pSql, sToken);
+ } else {
+ break;
+ }
+ } while (1);
+ return TSDB_CODE_SUCCESS;
+}
+
// pSql -> [(tag1_name, ...)] TAGS (tag1_value, ...)
static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
SToken sToken;
@@ -1710,6 +1772,7 @@ static int32_t skipUsingClause(SInsertParseSyntaxCxt* pCxt) {
return buildSyntaxErrMsg(&pCxt->msg, "( is expected", sToken.z);
}
CHECK_CODE(skipTagsClause(pCxt));
+ CHECK_CODE(skipTableOptions(pCxt));
return TSDB_CODE_SUCCESS;
}
@@ -1980,7 +2043,7 @@ end:
for (int i = 0; i < taosArrayGetSize(pTagArray); ++i) {
STagVal* p = (STagVal*)taosArrayGet(pTagArray, i);
if (p->type == TSDB_DATA_TYPE_NCHAR) {
- taosMemoryFree(p->pData);
+ taosMemoryFreeClear(p->pData);
}
}
taosArrayDestroy(pTagArray);
@@ -2253,7 +2316,7 @@ static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SS
SToken sToken = {.n = kv->keyLen, .z = (char*)kv->key};
col_id_t t = lastColIdx + 1;
col_id_t index = ((t == 0 && !isTag) ? 0 : findCol(&sToken, t, nCols, pSchema));
- uDebug("SML, index:%d, t:%d, ncols:%d, kv->name:%s", index, t, nCols, kv->key);
+ uDebug("SML, index:%d, t:%d, ncols:%d", index, t, nCols);
if (index < 0 && t > 0) {
index = findCol(&sToken, 0, t, pSchema);
isOrdered = false;
@@ -2474,9 +2537,7 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
if (p) kv = *p;
}
- if (!kv || kv->length == 0) {
- MemRowAppend(&pBuf, NULL, 0, ¶m);
- } else {
+ if (kv) {
int32_t colLen = kv->length;
if (pColSchema->type == TSDB_DATA_TYPE_TIMESTAMP) {
// uError("SML:data before:%" PRId64 ", precision:%d", kv->i, pTableMeta->tableInfo.precision);
@@ -2489,6 +2550,8 @@ int32_t smlBindData(void* handle, SArray* tags, SArray* colsSchema, SArray* cols
} else {
MemRowAppend(&pBuf, &(kv->value), colLen, ¶m);
}
+ } else {
+ pBuilder->hasNone = true;
}
if (PRIMARYKEY_TIMESTAMP_COL_ID == pColSchema->colId) {
diff --git a/source/libs/parser/src/parInsertData.c b/source/libs/parser/src/parInsertData.c
index 3ea5e81872f1562f09e35226c69c5a5295d684ab..e8c877bed2d69346cd718625688b0db3de85ae69 100644
--- a/source/libs/parser/src/parInsertData.c
+++ b/source/libs/parser/src/parInsertData.c
@@ -505,7 +505,7 @@ static int32_t tdBlockRowMerge(STableMeta* pTableMeta, SBlockKeyTuple* pEndKeyTp
SColVal colVal = {0};
for (int32_t j = 0; j < nDupRows; ++j) {
tTSRowGetVal((pEndKeyTp - j)->payloadAddr, pSchema, i, &colVal);
- if (!colVal.isNone) {
+ if (!COL_VAL_IS_NONE(&colVal)) {
break;
}
}
diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c
index 80ec447f66157dd00e1081ee87e431a58b5998cd..64510773a2d394e1164de85f93b5a292ab115c28 100644
--- a/source/libs/parser/src/parTokenizer.c
+++ b/source/libs/parser/src/parTokenizer.c
@@ -54,6 +54,7 @@ static SKeyword keywordTable[] = {
{"CACHE", TK_CACHE},
{"CACHEMODEL", TK_CACHEMODEL},
{"CACHESIZE", TK_CACHESIZE},
+ {"CASE", TK_CASE},
{"CAST", TK_CAST},
{"CLIENT_VERSION", TK_CLIENT_VERSION},
{"CLUSTER", TK_CLUSTER},
@@ -82,7 +83,9 @@ static SKeyword keywordTable[] = {
{"DOUBLE", TK_DOUBLE},
{"DROP", TK_DROP},
{"DURATION", TK_DURATION},
+ {"ELSE", TK_ELSE},
{"ENABLE", TK_ENABLE},
+ {"END", TK_END},
{"EXISTS", TK_EXISTS},
{"EXPIRED", TK_EXPIRED},
{"EXPLAIN", TK_EXPLAIN},
@@ -126,6 +129,7 @@ static SKeyword keywordTable[] = {
{"MATCH", TK_MATCH},
{"MAXROWS", TK_MAXROWS},
{"MAX_DELAY", TK_MAX_DELAY},
+ {"MAX_SPEED", TK_MAX_SPEED},
{"MERGE", TK_MERGE},
{"META", TK_META},
{"MINROWS", TK_MINROWS},
@@ -187,6 +191,7 @@ static SKeyword keywordTable[] = {
{"SNODES", TK_SNODES},
{"SOFFSET", TK_SOFFSET},
{"SPLIT", TK_SPLIT},
+ {"STT_TRIGGER", TK_STT_TRIGGER},
{"STABLE", TK_STABLE},
{"STABLES", TK_STABLES},
{"STATE", TK_STATE},
@@ -196,12 +201,16 @@ static SKeyword keywordTable[] = {
{"STREAMS", TK_STREAMS},
{"STRICT", TK_STRICT},
{"SUBSCRIPTIONS", TK_SUBSCRIPTIONS},
+ {"SUBTABLE", TK_SUBTABLE},
{"SYSINFO", TK_SYSINFO},
{"TABLE", TK_TABLE},
{"TABLES", TK_TABLES},
+ {"TABLE_PREFIX", TK_TABLE_PREFIX},
+ {"TABLE_SUFFIX", TK_TABLE_SUFFIX},
{"TAG", TK_TAG},
{"TAGS", TK_TAGS},
{"TBNAME", TK_TBNAME},
+ {"THEN", TK_THEN},
{"TIMESTAMP", TK_TIMESTAMP},
{"TIMEZONE", TK_TIMEZONE},
{"TINYINT", TK_TINYINT},
@@ -213,6 +222,7 @@ static SKeyword keywordTable[] = {
{"TRANSACTIONS", TK_TRANSACTIONS},
{"TRIGGER", TK_TRIGGER},
{"TRIM", TK_TRIM},
+ {"TSDB_PAGESIZE", TK_TSDB_PAGESIZE},
{"TSERIES", TK_TSERIES},
{"TTL", TK_TTL},
{"UNION", TK_UNION},
@@ -228,6 +238,7 @@ static SKeyword keywordTable[] = {
{"VERBOSE", TK_VERBOSE},
{"VGROUP", TK_VGROUP},
{"VGROUPS", TK_VGROUPS},
+ {"VNODES", TK_VNODES},
{"WAL_FSYNC_PERIOD", TK_WAL_FSYNC_PERIOD},
{"WAL_LEVEL", TK_WAL_LEVEL},
{"WAL_RETENTION_PERIOD", TK_WAL_RETENTION_PERIOD},
@@ -235,11 +246,13 @@ static SKeyword keywordTable[] = {
{"WAL_ROLL_PERIOD", TK_WAL_ROLL_PERIOD},
{"WAL_SEGMENT_SIZE", TK_WAL_SEGMENT_SIZE},
{"WATERMARK", TK_WATERMARK},
+ {"WHEN", TK_WHEN},
{"WHERE", TK_WHERE},
{"WINDOW_CLOSE", TK_WINDOW_CLOSE},
{"WITH", TK_WITH},
{"WRITE", TK_WRITE},
{"_C0", TK_ROWTS},
+ {"_IROWTS", TK_IROWTS},
{"_QDURATION", TK_QDURATION},
{"_QEND", TK_QEND},
{"_QSTART", TK_QSTART},
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 5a32c87fc3da2f2c1eaa7760ed2fa05fdb560e1f..eb63630dbf3089d1378675752a977b2092f7857a 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -142,8 +142,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_STREAMS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_STREAMS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_STREAMS,
.numOfShowCols = 1,
.pShowCols = {"stream_name"}
},
@@ -184,8 +184,8 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_TOPICS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_TOPICS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_TOPICS,
.numOfShowCols = 1,
.pShowCols = {"topic_name"}
},
@@ -240,8 +240,14 @@ static const SSysTableShowAdapter sysTableShowAdapter[] = {
},
{
.showType = QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT,
- .pDbName = TSDB_PERFORMANCE_SCHEMA_DB,
- .pTableName = TSDB_PERFS_TABLE_SUBSCRIPTIONS,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_SUBSCRIPTIONS,
+ .numOfShowCols = 1,
+ .pShowCols = {"*"}
+ },
+ { .showType = QUERY_NODE_SHOW_VNODES_STMT,
+ .pDbName = TSDB_INFORMATION_SCHEMA_DB,
+ .pTableName = TSDB_INS_TABLE_VNODES,
.numOfShowCols = 1,
.pShowCols = {"*"}
},
@@ -258,6 +264,8 @@ static bool beforeHaving(ESqlClause clause) { return clause < SQL_CLAUSE_HAVING;
static bool afterHaving(ESqlClause clause) { return clause > SQL_CLAUSE_HAVING; }
+static bool beforeWindow(ESqlClause clause) { return clause < SQL_CLAUSE_WINDOW; }
+
static bool hasSameTableAlias(SArray* pTables) {
if (taosArrayGetSize(pTables) < 2) {
return false;
@@ -1277,6 +1285,36 @@ static int32_t rewriteCountStar(STranslateContext* pCxt, SFunctionNode* pCount)
return code;
}
+static bool isCountTbname(SFunctionNode* pFunc) {
+ if (FUNCTION_TYPE_COUNT != pFunc->funcType || 1 != LIST_LENGTH(pFunc->pParameterList)) {
+ return false;
+ }
+ SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
+ return (QUERY_NODE_FUNCTION == nodeType(pPara) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPara)->funcType);
+}
+
+// count(tbname) is rewritten as count(ts) for scannning optimization
+static int32_t rewriteCountTbname(STranslateContext* pCxt, SFunctionNode* pCount) {
+ SFunctionNode* pTbname = (SFunctionNode*)nodesListGetNode(pCount->pParameterList, 0);
+ const char* pTableAlias = NULL;
+ if (LIST_LENGTH(pTbname->pParameterList) > 0) {
+ pTableAlias = ((SValueNode*)nodesListGetNode(pTbname->pParameterList, 0))->literal;
+ }
+ STableNode* pTable = NULL;
+ int32_t code = findTable(pCxt, pTableAlias, &pTable);
+ if (TSDB_CODE_SUCCESS == code) {
+ SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
+ if (NULL == pCol) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ setColumnInfoBySchema((SRealTableNode*)pTable, ((SRealTableNode*)pTable)->pMeta->schema, -1, pCol);
+ NODES_DESTORY_LIST(pCount->pParameterList);
+ code = nodesListMakeAppend(&pCount->pParameterList, (SNode*)pCol);
+ }
+ }
+ return code;
+}
+
static bool hasInvalidFuncNesting(SNodeList* pParameterList) {
bool hasInvalidFunc = false;
nodesWalkExprs(pParameterList, haveVectorFunction, &hasInvalidFunc);
@@ -1312,6 +1350,9 @@ static int32_t translateAggFunc(STranslateContext* pCxt, SFunctionNode* pFunc) {
if (isCountStar(pFunc)) {
return rewriteCountStar(pCxt, pFunc);
}
+ if (isCountTbname(pFunc)) {
+ return rewriteCountTbname(pCxt, pFunc);
+ }
return TSDB_CODE_SUCCESS;
}
@@ -1437,6 +1478,10 @@ static int32_t translateWindowPseudoColumnFunc(STranslateContext* pCxt, SFunctio
if (!isSelectStmt(pCxt->pCurrStmt) || NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pWindow) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_WINDOW_PC);
}
+ if (beforeWindow(pCxt->currClause)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_WINDOW_PC, "There mustn't be %s",
+ pFunc->functionName);
+ }
return TSDB_CODE_SUCCESS;
}
@@ -1511,6 +1556,9 @@ static int32_t translateMultiResFunc(STranslateContext* pCxt, SFunctionNode* pFu
"%s(*) is only supported in SELECTed list", pFunc->functionName);
}
}
+ if (tsKeepColumnName) {
+ strcpy(pFunc->node.userAlias, ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->userAlias);
+ }
return TSDB_CODE_SUCCESS;
}
@@ -1768,6 +1816,59 @@ static EDealRes translateLogicCond(STranslateContext* pCxt, SLogicConditionNode*
return DEAL_RES_CONTINUE;
}
+static int32_t createCastFunc(STranslateContext* pCxt, SNode* pExpr, SDataType dt, SNode** pCast) {
+ SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
+ if (NULL == pFunc) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ strcpy(pFunc->functionName, "cast");
+ pFunc->node.resType = dt;
+ if (TSDB_CODE_SUCCESS != nodesListMakeAppend(&pFunc->pParameterList, pExpr)) {
+ nodesDestroyNode((SNode*)pFunc);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ if (TSDB_CODE_SUCCESS != getFuncInfo(pCxt, pFunc)) {
+ nodesClearList(pFunc->pParameterList);
+ pFunc->pParameterList = NULL;
+ nodesDestroyNode((SNode*)pFunc);
+ return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)pExpr)->aliasName);
+ }
+ *pCast = (SNode*)pFunc;
+ return TSDB_CODE_SUCCESS;
+}
+
+static EDealRes translateWhenThen(STranslateContext* pCxt, SWhenThenNode* pWhenThen) {
+ pWhenThen->node.resType = ((SExprNode*)pWhenThen->pThen)->resType;
+ return DEAL_RES_CONTINUE;
+}
+
+static EDealRes translateCaseWhen(STranslateContext* pCxt, SCaseWhenNode* pCaseWhen) {
+ bool first = true;
+ SNode* pNode = NULL;
+ FOREACH(pNode, pCaseWhen->pWhenThenList) {
+ if (first) {
+ pCaseWhen->node.resType = ((SExprNode*)pNode)->resType;
+ } else if (!dataTypeEqual(&pCaseWhen->node.resType, &((SExprNode*)pNode)->resType)) {
+ SWhenThenNode* pWhenThen = (SWhenThenNode*)pNode;
+ SNode* pCastFunc = NULL;
+ if (TSDB_CODE_SUCCESS != createCastFunc(pCxt, pWhenThen->pThen, pCaseWhen->node.resType, &pCastFunc)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "CASE WHEN data type mismatch");
+ }
+ pWhenThen->pThen = pCastFunc;
+ pWhenThen->node.resType = pCaseWhen->node.resType;
+ }
+ }
+ if (NULL != pCaseWhen->pElse && !dataTypeEqual(&pCaseWhen->node.resType, &((SExprNode*)pCaseWhen->pElse)->resType)) {
+ SNode* pCastFunc = NULL;
+ if (TSDB_CODE_SUCCESS != createCastFunc(pCxt, pCaseWhen->pElse, pCaseWhen->node.resType, &pCastFunc)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "CASE WHEN data type mismatch");
+ }
+ pCaseWhen->pElse = pCastFunc;
+ ((SExprNode*)pCaseWhen->pElse)->resType = pCaseWhen->node.resType;
+ }
+ return DEAL_RES_CONTINUE;
+}
+
static EDealRes doTranslateExpr(SNode** pNode, void* pContext) {
STranslateContext* pCxt = (STranslateContext*)pContext;
switch (nodeType(*pNode)) {
@@ -1783,6 +1884,10 @@ static EDealRes doTranslateExpr(SNode** pNode, void* pContext) {
return translateLogicCond(pCxt, (SLogicConditionNode*)*pNode);
case QUERY_NODE_TEMP_TABLE:
return translateExprSubquery(pCxt, ((STempTableNode*)*pNode)->pSubquery);
+ case QUERY_NODE_WHEN_THEN:
+ return translateWhenThen(pCxt, (SWhenThenNode*)*pNode);
+ case QUERY_NODE_CASE_WHEN:
+ return translateCaseWhen(pCxt, (SCaseWhenNode*)*pNode);
default:
break;
}
@@ -2036,10 +2141,114 @@ static bool sysTableFromVnode(const char* pTable) {
static bool sysTableFromDnode(const char* pTable) { return 0 == strcmp(pTable, TSDB_INS_TABLE_DNODE_VARIABLES); }
+static int32_t getTagsTableVgroupListImpl(STranslateContext* pCxt, SName* pTargetName, SName* pName,
+ SArray** pVgroupList) {
+ if (0 == pTargetName->type) {
+ return getDBVgInfoImpl(pCxt, pName, pVgroupList);
+ }
+
+ if (TSDB_DB_NAME_T == pTargetName->type) {
+ return getDBVgInfoImpl(pCxt, pTargetName, pVgroupList);
+ }
+
+ SVgroupInfo vgInfo = {0};
+ int32_t code = getTableHashVgroupImpl(pCxt, pTargetName, &vgInfo);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pVgroupList = taosArrayInit(1, sizeof(SVgroupInfo));
+ if (NULL == *pVgroupList) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ taosArrayPush(*pVgroupList, &vgInfo);
+ }
+ return code;
+}
+
+static int32_t getTagsTableTargetNameFromOp(STranslateContext* pCxt, SOperatorNode* pOper, SName* pName) {
+ if (OP_TYPE_EQUAL != pOper->opType) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SColumnNode* pCol = NULL;
+ SValueNode* pVal = NULL;
+ if (QUERY_NODE_COLUMN == nodeType(pOper->pLeft)) {
+ pCol = (SColumnNode*)pOper->pLeft;
+ } else if (QUERY_NODE_VALUE == nodeType(pOper->pLeft)) {
+ pVal = (SValueNode*)pOper->pLeft;
+ }
+ if (QUERY_NODE_COLUMN == nodeType(pOper->pRight)) {
+ pCol = (SColumnNode*)pOper->pRight;
+ } else if (QUERY_NODE_VALUE == nodeType(pOper->pRight)) {
+ pVal = (SValueNode*)pOper->pRight;
+ }
+ if (NULL == pCol || NULL == pVal) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (0 == strcmp(pCol->colName, "db_name")) {
+ return tNameSetDbName(pName, pCxt->pParseCxt->acctId, pVal->literal, strlen(pVal->literal));
+ } else if (0 == strcmp(pCol->colName, "table_name")) {
+ return tNameAddTbName(pName, pVal->literal, strlen(pVal->literal));
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static void getTagsTableTargetObjName(STranslateContext* pCxt, SNode* pNode, SName* pName) {
+ if (QUERY_NODE_OPERATOR == nodeType(pNode)) {
+ getTagsTableTargetNameFromOp(pCxt, (SOperatorNode*)pNode, pName);
+ }
+}
+
+static int32_t getTagsTableTargetNameFromCond(STranslateContext* pCxt, SLogicConditionNode* pCond, SName* pName) {
+ if (LOGIC_COND_TYPE_AND != pCond->condType) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SNode* pNode = NULL;
+ FOREACH(pNode, pCond->pParameterList) { getTagsTableTargetObjName(pCxt, pNode, pName); }
+ if ('\0' == pName->dbname[0]) {
+ pName->type = 0;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t getTagsTableTargetName(STranslateContext* pCxt, SNode* pWhere, SName* pName) {
+ if (NULL == pWhere) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (QUERY_NODE_OPERATOR == nodeType(pWhere)) {
+ return getTagsTableTargetNameFromOp(pCxt, (SOperatorNode*)pWhere, pName);
+ }
+
+ if (QUERY_NODE_LOGIC_CONDITION == nodeType(pWhere)) {
+ return getTagsTableTargetNameFromCond(pCxt, (SLogicConditionNode*)pWhere, pName);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t getTagsTableVgroupList(STranslateContext* pCxt, SName* pName, SArray** pVgroupList) {
+ if (!isSelectStmt(pCxt->pCurrStmt)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
+ SName targetName = {0};
+ int32_t code = getTagsTableTargetName(pCxt, pSelect->pWhere, &targetName);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = getTagsTableVgroupListImpl(pCxt, &targetName, pName, pVgroupList);
+ }
+ return code;
+}
+
static int32_t setVnodeSysTableVgroupList(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) {
int32_t code = TSDB_CODE_SUCCESS;
SArray* vgroupList = NULL;
- if ('\0' != pRealTable->qualDbName[0]) {
+ if (0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS)) {
+ code = getTagsTableVgroupList(pCxt, pName, &vgroupList);
+ } else if ('\0' != pRealTable->qualDbName[0]) {
if (0 != strcmp(pRealTable->qualDbName, TSDB_INFORMATION_SCHEMA_DB)) {
code = getDBVgInfo(pCxt, pRealTable->qualDbName, &vgroupList);
}
@@ -2047,14 +2256,12 @@ static int32_t setVnodeSysTableVgroupList(STranslateContext* pCxt, SName* pName,
code = getDBVgInfoImpl(pCxt, pName, &vgroupList);
}
- if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
- 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) && isSelectStmt(pCxt->pCurrStmt) &&
- 0 == taosArrayGetSize(vgroupList)) {
+ if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TAGS) &&
+ isSelectStmt(pCxt->pCurrStmt) && 0 == taosArrayGetSize(vgroupList)) {
((SSelectStmt*)pCxt->pCurrStmt)->isEmptyResult = true;
}
- if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.dbName, TSDB_INFORMATION_SCHEMA_DB) &&
- 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TABLES)) {
+ if (TSDB_CODE_SUCCESS == code && 0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_TABLES)) {
code = addMnodeToVgroupList(&pCxt->pParseCxt->mgmtEpSet, &vgroupList);
}
@@ -2160,19 +2367,31 @@ static int32_t setTableIndex(STranslateContext* pCxt, SName* pName, SRealTableNo
return TSDB_CODE_SUCCESS;
}
-static int32_t setTableCacheLastMode(STranslateContext* pCxt, SName* pName, SRealTableNode* pRealTable) {
- if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
+static int32_t setTableCacheLastMode(STranslateContext* pCxt, SSelectStmt* pSelect) {
+ if (!pSelect->hasLastRowFunc || QUERY_NODE_REAL_TABLE != nodeType(pSelect->pFromTable)) {
return TSDB_CODE_SUCCESS;
}
- SDbCfgInfo dbCfg = {0};
- int32_t code = getDBCfg(pCxt, pRealTable->table.dbName, &dbCfg);
+ SRealTableNode* pTable = (SRealTableNode*)pSelect->pFromTable;
+ SDbCfgInfo dbCfg = {0};
+ int32_t code = getDBCfg(pCxt, pTable->table.dbName, &dbCfg);
if (TSDB_CODE_SUCCESS == code) {
- pRealTable->cacheLastMode = dbCfg.cacheLast;
+ pTable->cacheLastMode = dbCfg.cacheLast;
}
return code;
}
+static int32_t checkJoinTable(STranslateContext* pCxt, SJoinTableNode* pJoinTable) {
+ if ((QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pLeft) &&
+ !isTimeLineQuery(((STempTableNode*)pJoinTable->pLeft)->pSubquery)) ||
+ (QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pRight) &&
+ !isTimeLineQuery(((STempTableNode*)pJoinTable->pRight)->pSubquery))) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SUPPORT_JOIN,
+ "Join requires valid time series input");
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
int32_t code = TSDB_CODE_SUCCESS;
switch (nodeType(pTable)) {
@@ -2192,9 +2411,6 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
if (TSDB_CODE_SUCCESS == code) {
code = setTableIndex(pCxt, &name, pRealTable);
}
- if (TSDB_CODE_SUCCESS == code) {
- code = setTableCacheLastMode(pCxt, &name, pRealTable);
- }
}
if (TSDB_CODE_SUCCESS == code) {
pRealTable->table.precision = pRealTable->pMeta->tableInfo.precision;
@@ -2222,6 +2438,9 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
if (TSDB_CODE_SUCCESS == code) {
code = translateTable(pCxt, pJoinTable->pRight);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkJoinTable(pCxt, pJoinTable);
+ }
if (TSDB_CODE_SUCCESS == code) {
pJoinTable->table.precision = calcJoinTablePrecision(pJoinTable);
pJoinTable->table.singleTable = joinTableIsSingleTable(pJoinTable);
@@ -2273,10 +2492,18 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
if (QUERY_NODE_COLUMN == nodeType(pExpr)) {
SColumnNode* pCol = (SColumnNode*)pExpr;
len = snprintf(buf, sizeof(buf), "%s(%s.%s)", pSrcFunc->functionName, pCol->tableAlias, pCol->colName);
+ strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ if (tsKeepColumnName) {
+ strcpy(pFunc->node.userAlias, pCol->colName);
+ } else {
+ len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pCol->colName);
+ strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
+ }
} else {
len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->aliasName);
+ strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
}
- strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
return (SNode*)pFunc;
}
@@ -2475,13 +2702,65 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
return code;
}
+static EDealRes needFillImpl(SNode* pNode, void* pContext) {
+ if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
+ *(bool*)pContext = true;
+ return DEAL_RES_END;
+ }
+ return DEAL_RES_CONTINUE;
+}
+
+static bool needFill(SNode* pNode) {
+ bool hasFillFunc = false;
+ nodesWalkExpr(pNode, needFillImpl, &hasFillFunc);
+ return hasFillFunc;
+}
+
+static bool mismatchFillDataType(SDataType origDt, SDataType fillDt) {
+ if (TSDB_DATA_TYPE_NULL == fillDt.type) {
+ return false;
+ }
+ if (IS_NUMERIC_TYPE(origDt.type) && !IS_NUMERIC_TYPE(fillDt.type)) {
+ return true;
+ }
+ if (IS_VAR_DATA_TYPE(origDt.type) && !IS_VAR_DATA_TYPE(fillDt.type)) {
+ return true;
+ }
+ return false;
+}
+
+static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) {
+ if (FILL_MODE_VALUE != pFill->mode) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t fillNo = 0;
+ SNodeListNode* pFillValues = (SNodeListNode*)pFill->pValues;
+ SNode* pProject = NULL;
+ FOREACH(pProject, pProjectionList) {
+ if (needFill(pProject)) {
+ if (fillNo >= LIST_LENGTH(pFillValues->pNodeList)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
+ }
+ if (mismatchFillDataType(((SExprNode*)pProject)->resType,
+ ((SExprNode*)nodesListGetNode(pFillValues->pNodeList, fillNo))->resType)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
+ }
+ ++fillNo;
+ }
+ }
+ if (fillNo != LIST_LENGTH(pFillValues->pNodeList)) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch");
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (NULL == pSelect->pWindow || QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow) ||
NULL == ((SIntervalWindowNode*)pSelect->pWindow)->pFill) {
return TSDB_CODE_SUCCESS;
}
- SFillNode* pFill = (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill;
- return TSDB_CODE_SUCCESS;
+ return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList);
}
static int32_t rewriteProjectAlias(SNodeList* pProjectionList) {
@@ -2906,7 +3185,14 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelec
return TSDB_CODE_SUCCESS;
}
pCxt->currClause = SQL_CLAUSE_PARTITION_BY;
- return translateExprList(pCxt, pSelect->pPartitionByList);
+ int32_t code = translateExprList(pCxt, pSelect->pPartitionByList);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = translateExprList(pCxt, pSelect->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = translateExpr(pCxt, &pSelect->pSubtable);
+ }
+ return code;
}
static int32_t translateWhere(STranslateContext* pCxt, SSelectStmt* pSelect) {
@@ -3088,6 +3374,9 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = replaceOrderByAliasForSelect(pCxt, pSelect);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setTableCacheLastMode(pCxt, pSelect);
+ }
return code;
}
@@ -3112,27 +3401,6 @@ static SNode* createSetOperProject(const char* pTableAlias, SNode* pNode) {
return (SNode*)pCol;
}
-static int32_t createCastFunc(STranslateContext* pCxt, SNode* pExpr, SDataType dt, SNode** pCast) {
- SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
- if (NULL == pFunc) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- strcpy(pFunc->functionName, "cast");
- pFunc->node.resType = dt;
- if (TSDB_CODE_SUCCESS != nodesListMakeAppend(&pFunc->pParameterList, pExpr)) {
- nodesDestroyNode((SNode*)pFunc);
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- if (TSDB_CODE_SUCCESS != getFuncInfo(pCxt, pFunc)) {
- nodesClearList(pFunc->pParameterList);
- pFunc->pParameterList = NULL;
- nodesDestroyNode((SNode*)pFunc);
- return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, ((SExprNode*)pExpr)->aliasName);
- }
- *pCast = (SNode*)pFunc;
- return TSDB_CODE_SUCCESS;
-}
-
static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pSetOperator) {
SNodeList* pLeftProjections = getProjectList(pSetOperator->pLeft);
SNodeList* pRightProjections = getProjectList(pSetOperator->pRight);
@@ -3251,10 +3519,16 @@ static int32_t translateDelete(STranslateContext* pCxt, SDeleteStmt* pDelete) {
if (TSDB_CODE_SUCCESS == code) {
code = translateDeleteWhere(pCxt, pDelete);
}
+ pCxt->currClause = SQL_CLAUSE_SELECT;
if (TSDB_CODE_SUCCESS == code) {
- pCxt->currClause = SQL_CLAUSE_SELECT;
code = translateExpr(pCxt, &pDelete->pCountFunc);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = translateExpr(pCxt, &pDelete->pFirstFunc);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = translateExpr(pCxt, &pDelete->pLastFunc);
+ }
return code;
}
@@ -3419,6 +3693,10 @@ static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pS
pReq->walRetentionSize = pStmt->pOptions->walRetentionSize;
pReq->walRollPeriod = pStmt->pOptions->walRollPeriod;
pReq->walSegmentSize = pStmt->pOptions->walSegmentSize;
+ pReq->sstTrigger = pStmt->pOptions->sstTrigger;
+ pReq->hashPrefix = pStmt->pOptions->tablePrefix;
+ pReq->hashSuffix = pStmt->pOptions->tableSuffix;
+ pReq->tsdbPageSize = pStmt->pOptions->tsdbPageSize;
pReq->ignoreExist = pStmt->ignoreExists;
return buildCreateDbRetentions(pStmt->pOptions->pRetentions, pReq);
}
@@ -3663,6 +3941,10 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
code = checkDbRangeOption(pCxt, "pagesize", pOptions->pagesize, TSDB_MIN_PAGESIZE_PER_VNODE,
TSDB_MAX_PAGESIZE_PER_VNODE);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbRangeOption(pCxt, "tsdbPagesize", pOptions->tsdbPageSize, TSDB_MIN_TSDB_PAGESIZE,
+ TSDB_MAX_TSDB_PAGESIZE);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = checkDbPrecisionOption(pCxt, pOptions);
}
@@ -3703,6 +3985,15 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName
code =
checkDbRangeOption(pCxt, "walSegmentSize", pOptions->walSegmentSize, TSDB_DB_MIN_WAL_SEGMENT_SIZE, INT32_MAX);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbRangeOption(pCxt, "sstTrigger", pOptions->sstTrigger, TSDB_MIN_STT_TRIGGER, TSDB_MAX_STT_TRIGGER);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbRangeOption(pCxt, "tablePrefix", pOptions->tablePrefix, TSDB_MIN_HASH_PREFIX, TSDB_MAX_HASH_PREFIX);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkDbRangeOption(pCxt, "tableSuffix", pOptions->tableSuffix, TSDB_MIN_HASH_SUFFIX, TSDB_MAX_HASH_SUFFIX);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = checkOptionsDependency(pCxt, pDbName, pOptions);
}
@@ -3776,6 +4067,7 @@ static void buildAlterDbReq(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt,
pReq->cacheLast = pStmt->pOptions->cacheModel;
pReq->cacheLastSize = pStmt->pOptions->cacheLastSize;
pReq->replications = pStmt->pOptions->replica;
+ pReq->sstTrigger = pStmt->pOptions->sstTrigger;
return;
}
@@ -3792,7 +4084,7 @@ static int32_t translateAlterDatabase(STranslateContext* pCxt, SAlterDatabaseStm
}
static int32_t translateTrimDatabase(STranslateContext* pCxt, STrimDatabaseStmt* pStmt) {
- STrimDbReq req = {0};
+ STrimDbReq req = {.maxSpeed = pStmt->maxSpeed};
SName name = {0};
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName));
tNameGetFullDbName(&name, req.db);
@@ -5001,7 +5293,7 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt
return TSDB_CODE_SUCCESS;
}
- if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) ||
+ if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) || NULL == ((SSelectStmt*)pStmt->pQuery)->pFromTable ||
QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pStmt->pQuery)->pFromTable)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
}
@@ -5035,6 +5327,93 @@ static int32_t addWstartTsToCreateStreamQuery(SNode* pStmt) {
return code;
}
+static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SSelectStmt* pSelect) {
+ if (NULL == pStmt->pTags) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SNode* pTag = NULL;
+ FOREACH(pTag, pStmt->pTags) {
+ bool found = false;
+ SNode* pPart = NULL;
+ FOREACH(pPart, pSelect->pPartitionByList) {
+ if (0 == strcmp(((SColumnDefNode*)pTag)->colName, ((SExprNode*)pPart)->userAlias)) {
+ if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSelect->pTags, nodesCloneNode(pPart))) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, ((SColumnDefNode*)pTag)->colName);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+typedef struct SRewriteSubtableCxt {
+ STranslateContext* pCxt;
+ SNodeList* pPartitionList;
+} SRewriteSubtableCxt;
+
+static EDealRes rewriteSubtable(SNode** pNode, void* pContext) {
+ if (QUERY_NODE_COLUMN == nodeType(*pNode)) {
+ SRewriteSubtableCxt* pCxt = pContext;
+ bool found = false;
+ SNode* pPart = NULL;
+ FOREACH(pPart, pCxt->pPartitionList) {
+ if (0 == strcmp(((SColumnNode*)*pNode)->colName, ((SExprNode*)pPart)->userAlias)) {
+ SNode* pNew = nodesCloneNode(pPart);
+ if (NULL == pNew) {
+ pCxt->pCxt->errCode = TSDB_CODE_OUT_OF_MEMORY;
+ return DEAL_RES_ERROR;
+ }
+ nodesDestroyNode(*pNode);
+ *pNode = pNew;
+ found = true;
+ break;
+ }
+ if (!found) {
+ return generateDealNodeErrMsg(pCxt->pCxt, TSDB_CODE_PAR_INVALID_COLUMN, ((SColumnNode*)*pNode)->colName);
+ }
+ }
+ return DEAL_RES_IGNORE_CHILD;
+ }
+ return DEAL_RES_CONTINUE;
+}
+
+static int32_t addSubtableNameToCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt,
+ SSelectStmt* pSelect) {
+ if (NULL == pStmt->pSubtable) {
+ return TSDB_CODE_SUCCESS;
+ }
+ pSelect->pSubtable = nodesCloneNode(pStmt->pSubtable);
+ if (NULL == pSelect->pSubtable) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+ SRewriteSubtableCxt cxt = {.pCxt = pCxt, .pPartitionList = pSelect->pPartitionByList};
+ nodesRewriteExpr(&pSelect->pSubtable, rewriteSubtable, &cxt);
+ return pCxt->errCode;
+}
+
+static int32_t addSubtableInfoToCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
+ SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
+ if (NULL == pSelect->pPartitionByList) {
+ if (NULL != pStmt->pTags || NULL != pStmt->pSubtable) {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
+ }
+ return TSDB_CODE_SUCCESS;
+ }
+
+ int32_t code = addTagsToCreateStreamQuery(pCxt, pStmt, pSelect);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = addSubtableNameToCreateStreamQuery(pCxt, pStmt, pSelect);
+ }
+
+ return code;
+}
+
static int32_t checkStreamQuery(STranslateContext* pCxt, SSelectStmt* pSelect) {
if (TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type ||
!pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect) || NULL != pSelect->pOrderByList ||
@@ -5044,18 +5423,21 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SSelectStmt* pSelect) {
return TSDB_CODE_SUCCESS;
}
-static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SCMCreateStreamReq* pReq) {
+static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt, SCMCreateStreamReq* pReq) {
pCxt->createStream = true;
- int32_t code = addWstartTsToCreateStreamQuery(pStmt);
+ int32_t code = addWstartTsToCreateStreamQuery(pStmt->pQuery);
if (TSDB_CODE_SUCCESS == code) {
- code = translateQuery(pCxt, pStmt);
+ code = addSubtableInfoToCreateStreamQuery(pCxt, pStmt);
}
if (TSDB_CODE_SUCCESS == code) {
- code = checkStreamQuery(pCxt, (SSelectStmt*)pStmt);
+ code = translateQuery(pCxt, pStmt->pQuery);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkStreamQuery(pCxt, (SSelectStmt*)pStmt->pQuery);
}
if (TSDB_CODE_SUCCESS == code) {
- getSourceDatabase(pStmt, pCxt->pParseCxt->acctId, pReq->sourceDB);
- code = nodesNodeToString(pStmt, false, &pReq->ast, NULL);
+ getSourceDatabase(pStmt->pQuery, pCxt->pParseCxt->acctId, pReq->sourceDB);
+ code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL);
}
return code;
}
@@ -5073,7 +5455,7 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt*
tNameExtractFullName(&name, pReq->targetStbFullName);
}
- int32_t code = buildCreateStreamQuery(pCxt, pStmt->pQuery, pReq);
+ int32_t code = buildCreateStreamQuery(pCxt, pStmt, pReq);
if (TSDB_CODE_SUCCESS == code) {
pReq->sql = strdup(pCxt->pParseCxt->pSql);
if (NULL == pReq->sql) {
@@ -5086,6 +5468,8 @@ static int32_t buildCreateStreamReq(STranslateContext* pCxt, SCreateStreamStmt*
pReq->maxDelay = (NULL != pStmt->pOptions->pDelay ? ((SValueNode*)pStmt->pOptions->pDelay)->datum.i : 0);
pReq->watermark = (NULL != pStmt->pOptions->pWatermark ? ((SValueNode*)pStmt->pOptions->pWatermark)->datum.i : 0);
pReq->igExpired = pStmt->pOptions->ignoreExpired;
+ columnDefNodeToField(pStmt->pTags, &pReq->pTags);
+ pReq->numOfTags = LIST_LENGTH(pStmt->pTags);
}
return code;
@@ -5780,6 +6164,25 @@ static int32_t rewriteShowDnodeVariables(STranslateContext* pCxt, SQuery* pQuery
return code;
}
+static int32_t rewriteShowVnodes(STranslateContext* pCxt, SQuery* pQuery) {
+ SShowVnodesStmt* pShow = (SShowVnodesStmt*)(pQuery->pRoot);
+ SSelectStmt* pStmt = NULL;
+ int32_t code = createSelectStmtForShow(QUERY_NODE_SHOW_VNODES_STMT, &pStmt);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (NULL != pShow->pDnodeId) {
+ code = createOperatorNode(OP_TYPE_EQUAL, "dnode_id", pShow->pDnodeId, &pStmt->pWhere);
+ } else {
+ code = createOperatorNode(OP_TYPE_EQUAL, "dnode_ep", pShow->pDnodeEndpoint, &pStmt->pWhere);
+ }
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ pQuery->showRewrite = true;
+ nodesDestroyNode(pQuery->pRoot);
+ pQuery->pRoot = (SNode*)pStmt;
+ }
+ return code;
+}
+
static SNode* createBlockDistInfoFunc() {
SFunctionNode* pFunc = (SFunctionNode*)nodesMakeNode(QUERY_NODE_FUNCTION);
if (NULL == pFunc) {
@@ -5827,12 +6230,6 @@ typedef struct SVgroupCreateTableBatch {
char dbName[TSDB_DB_NAME_LEN];
} SVgroupCreateTableBatch;
-static void destroyCreateTbReq(SVCreateTbReq* pReq) {
- taosMemoryFreeClear(pReq->name);
- taosMemoryFreeClear(pReq->comment);
- taosMemoryFreeClear(pReq->ntb.schemaRow.pSchema);
-}
-
static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* pStmt, const SVgroupInfo* pVgroupInfo,
SVgroupCreateTableBatch* pBatch) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
@@ -5847,7 +6244,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
if (pStmt->pOptions->commentNull == false) {
req.comment = strdup(pStmt->pOptions->comment);
if (NULL == req.comment) {
- destroyCreateTbReq(&req);
+ tdDestroySVCreateTbReq(&req);
return TSDB_CODE_OUT_OF_MEMORY;
}
req.commentLen = strlen(pStmt->pOptions->comment);
@@ -5858,7 +6255,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
req.ntb.schemaRow.version = 1;
req.ntb.schemaRow.pSchema = taosMemoryCalloc(req.ntb.schemaRow.nCols, sizeof(SSchema));
if (NULL == req.name || NULL == req.ntb.schemaRow.pSchema) {
- destroyCreateTbReq(&req);
+ tdDestroySVCreateTbReq(&req);
return TSDB_CODE_OUT_OF_MEMORY;
}
if (pStmt->ignoreExists) {
@@ -5874,7 +6271,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt*
strcpy(pBatch->dbName, pStmt->dbName);
pBatch->req.pArray = taosArrayInit(1, sizeof(struct SVCreateTbReq));
if (NULL == pBatch->req.pArray) {
- destroyCreateTbReq(&req);
+ tdDestroySVCreateTbReq(&req);
return TSDB_CODE_OUT_OF_MEMORY;
}
taosArrayPush(pBatch->req.pArray, &req);
@@ -5919,16 +6316,7 @@ static void destroyCreateTbReqBatch(void* data) {
size_t size = taosArrayGetSize(pTbBatch->req.pArray);
for (int32_t i = 0; i < size; ++i) {
SVCreateTbReq* pTableReq = taosArrayGet(pTbBatch->req.pArray, i);
- taosMemoryFreeClear(pTableReq->name);
- taosMemoryFreeClear(pTableReq->comment);
-
- if (pTableReq->type == TSDB_NORMAL_TABLE) {
- taosMemoryFreeClear(pTableReq->ntb.schemaRow.pSchema);
- } else if (pTableReq->type == TSDB_CHILD_TABLE) {
- taosMemoryFreeClear(pTableReq->ctb.pTag);
- taosMemoryFreeClear(pTableReq->ctb.name);
- taosArrayDestroy(pTableReq->ctb.tagName);
- }
+ tdDestroySVCreateTbReq(pTableReq);
}
taosArrayDestroy(pTbBatch->req.pArray);
@@ -6289,6 +6677,8 @@ static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableCla
if (TSDB_CODE_SUCCESS == code) {
addCreateTbReqIntoVgroup(pCxt->pParseCxt->acctId, pVgroupHashmap, pStmt, pTag, pSuperTableMeta->uid,
pStmt->useTableName, &info, tagName, pSuperTableMeta->tableInfo.numOfTags);
+ } else {
+ taosMemoryFree(pTag);
}
taosArrayDestroy(tagName);
@@ -6351,8 +6741,9 @@ typedef struct SVgroupDropTableBatch {
char dbName[TSDB_DB_NAME_LEN];
} SVgroupDropTableBatch;
-static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo) {
- SVDropTbReq req = {.name = pClause->tableName, .igNotExists = pClause->ignoreNotExists};
+static void addDropTbReqIntoVgroup(SHashObj* pVgroupHashmap, SDropTableClause* pClause, SVgroupInfo* pVgInfo,
+ uint64_t suid) {
+ SVDropTbReq req = {.name = pClause->tableName, .suid = suid, .igNotExists = pClause->ignoreNotExists};
SVgroupDropTableBatch* pTableBatch = taosHashGet(pVgroupHashmap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
if (NULL == pTableBatch) {
SVgroupDropTableBatch tBatch = {0};
@@ -6393,7 +6784,7 @@ static int32_t buildDropTableVgroupHashmap(STranslateContext* pCxt, SDropTableCl
code = getTableHashVgroup(pCxt, pClause->dbName, pClause->tableName, &info);
}
if (TSDB_CODE_SUCCESS == code) {
- addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info);
+ addDropTbReqIntoVgroup(pVgroupHashmap, pClause, &info, pTableMeta->suid);
}
over:
@@ -6508,7 +6899,17 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS
pReq->colId = pSchema->colId;
SDataType targetDt = schemaToDataType(pTableMeta->tableInfo.precision, pSchema);
- if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt, true)) {
+
+ if (QUERY_NODE_VALUE != pStmt->pVal->node.type) {
+ SValueNode* pVal = NULL;
+ pCxt->errCode = createTagValFromExpr(pCxt, targetDt, (SNode*)pStmt->pVal, &pVal);
+ if (pCxt->errCode) {
+ return pCxt->errCode;
+ }
+
+ nodesDestroyNode((SNode*)pStmt->pVal);
+ pStmt->pVal = pVal;
+ } else if (DEAL_RES_ERROR == translateValueImpl(pCxt, pStmt->pVal, targetDt, true)) {
return pCxt->errCode;
}
@@ -6527,12 +6928,7 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS
break;
}
} while (0);
- for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) {
- STagVal* p = (STagVal*)taosArrayGet(pTagVals, i);
- if (IS_VAR_DATA_TYPE(p->type)) {
- taosMemoryFree(p->pData);
- }
- }
+
taosArrayDestroy(pTagVals);
if (code != TSDB_CODE_SUCCESS) {
return code;
@@ -6889,6 +7285,9 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
code = rewriteShowDnodeVariables(pCxt, pQuery);
break;
+ case QUERY_NODE_SHOW_VNODES_STMT:
+ code = rewriteShowVnodes(pCxt, pQuery);
+ break;
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
code = rewriteShowTableDist(pCxt, pQuery);
break;
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index 32513fd0b6f56097b2b7f08ae03725ce39498a37..3075be1a0087b550bd9919c4be4f4427957b8c80 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -410,6 +410,12 @@ end:
if (retCode == TSDB_CODE_SUCCESS) {
tTagNew(pTagVals, 1, true, ppTag);
}
+ for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) {
+ STagVal* p = (STagVal*)taosArrayGet(pTagVals, i);
+ if (IS_VAR_DATA_TYPE(p->type)) {
+ taosMemoryFreeClear(p->pData);
+ }
+ }
cJSON_Delete(root);
return retCode;
}
@@ -1124,7 +1130,7 @@ int32_t getTableMetaFromCacheForInsert(SArray* pTableMetaPos, SParseMetaCache* p
int32_t reqIndex = *(int32_t*)taosArrayGet(pTableMetaPos, tableNo);
SMetaRes* pRes = taosArrayGet(pMetaCache->pTableMetaData, reqIndex);
if (TSDB_CODE_SUCCESS == pRes->code) {
- *pMeta = pRes->pRes;
+ *pMeta = tableMetaDup(pRes->pRes);
if (NULL == *pMeta) {
return TSDB_CODE_OUT_OF_MEMORY;
}
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 7e27132f3cbc453a5cf09bd487acc75fa546ff7e..2fe6ebfb79447653731ff907e5128dfededdf111 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -136,8 +136,7 @@ static int32_t setValueByBindParam(SValueNode* pVal, TAOS_MULTI_BIND* pParam) {
}
static EDealRes rewriteQueryExprAliasImpl(SNode* pNode, void* pContext) {
- if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode) && '\0' == ((SExprNode*)pNode)->userAlias[0]) {
- strcpy(((SExprNode*)pNode)->userAlias, ((SExprNode*)pNode)->aliasName);
+ if (nodesIsExprNode(pNode) && QUERY_NODE_COLUMN != nodeType(pNode)) {
sprintf(((SExprNode*)pNode)->aliasName, "#%d", *(int32_t*)pContext);
++(*(int32_t*)pContext);
}
@@ -178,15 +177,18 @@ int32_t qParseSql(SParseContext* pCxt, SQuery** pQuery) {
int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq* pCatalogReq) {
SParseMetaCache metaCache = {0};
- int32_t code = TSDB_CODE_SUCCESS;
- if (qIsInsertValuesSql(pCxt->pSql, pCxt->sqlLen)) {
- code = parseInsertSyntax(pCxt, pQuery, &metaCache);
- } else {
- code = parseSqlSyntax(pCxt, pQuery, &metaCache);
+ int32_t code = nodesAcquireAllocator(pCxt->allocatorId);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (qIsInsertValuesSql(pCxt->pSql, pCxt->sqlLen)) {
+ code = parseInsertSyntax(pCxt, pQuery, &metaCache);
+ } else {
+ code = parseSqlSyntax(pCxt, pQuery, &metaCache);
+ }
}
if (TSDB_CODE_SUCCESS == code) {
code = buildCatalogReq(pCxt, &metaCache, pCatalogReq);
}
+ nodesReleaseAllocator(pCxt->allocatorId);
destoryParseMetaCache(&metaCache, true);
terrno = code;
return code;
@@ -195,7 +197,10 @@ int32_t qParseSqlSyntax(SParseContext* pCxt, SQuery** pQuery, struct SCatalogReq
int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCatalogReq,
const struct SMetaData* pMetaData, SQuery* pQuery) {
SParseMetaCache metaCache = {0};
- int32_t code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache, NULL == pQuery->pRoot);
+ int32_t code = nodesAcquireAllocator(pCxt->allocatorId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = putMetaDataToCache(pCatalogReq, pMetaData, &metaCache, NULL == pQuery->pRoot);
+ }
if (TSDB_CODE_SUCCESS == code) {
if (NULL == pQuery->pRoot) {
code = parseInsertSql(pCxt, &pQuery, &metaCache);
@@ -203,6 +208,7 @@ int32_t qAnalyseSqlSemantic(SParseContext* pCxt, const struct SCatalogReq* pCata
code = analyseSemantic(pCxt, pQuery, &metaCache);
}
}
+ nodesReleaseAllocator(pCxt->allocatorId);
destoryParseMetaCache(&metaCache, false);
terrno = code;
return code;
diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c
index c4bd1aff044a491edede232eff74b8dea1feeadb..9acf6ffcdda0b5d767888268b7f1497b630b0e1f 100644
--- a/source/libs/parser/src/sql.c
+++ b/source/libs/parser/src/sql.c
@@ -104,26 +104,26 @@
#endif
/************* Begin control #defines *****************************************/
#define YYCODETYPE unsigned short int
-#define YYNOCODE 426
+#define YYNOCODE 448
#define YYACTIONTYPE unsigned short int
#define ParseTOKENTYPE SToken
typedef union {
int yyinit;
ParseTOKENTYPE yy0;
- SAlterOption yy5;
- int8_t yy59;
- int64_t yy69;
- EJoinType yy156;
- SNodeList* yy172;
- EFillMode yy186;
- SToken yy209;
- int32_t yy232;
- SNode* yy272;
- bool yy293;
- EOperatorType yy392;
- ENullOrder yy493;
- SDataType yy616;
- EOrder yy818;
+ bool yy89;
+ EFillMode yy102;
+ SNodeList* yy152;
+ int64_t yy221;
+ EOperatorType yy380;
+ EOrder yy386;
+ int8_t yy439;
+ int32_t yy452;
+ ENullOrder yy585;
+ EJoinType yy596;
+ SNode* yy616;
+ SAlterOption yy669;
+ SToken yy673;
+ SDataType yy784;
} YYMINORTYPE;
#ifndef YYSTACKDEPTH
#define YYSTACKDEPTH 100
@@ -139,17 +139,17 @@ typedef union {
#define ParseCTX_FETCH
#define ParseCTX_STORE
#define YYFALLBACK 1
-#define YYNSTATE 667
-#define YYNRULE 489
-#define YYNTOKEN 305
-#define YY_MAX_SHIFT 666
-#define YY_MIN_SHIFTREDUCE 972
-#define YY_MAX_SHIFTREDUCE 1460
-#define YY_ERROR_ACTION 1461
-#define YY_ACCEPT_ACTION 1462
-#define YY_NO_ACTION 1463
-#define YY_MIN_REDUCE 1464
-#define YY_MAX_REDUCE 1952
+#define YYNSTATE 689
+#define YYNRULE 519
+#define YYNTOKEN 316
+#define YY_MAX_SHIFT 688
+#define YY_MIN_SHIFTREDUCE 1017
+#define YY_MAX_SHIFTREDUCE 1535
+#define YY_ERROR_ACTION 1536
+#define YY_ACCEPT_ACTION 1537
+#define YY_NO_ACTION 1538
+#define YY_MIN_REDUCE 1539
+#define YY_MAX_REDUCE 2057
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -216,650 +216,798 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2259)
+#define YY_ACTTAB_COUNT (3046)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 433, 1930, 434, 1499, 1593, 441, 526, 434, 1499, 513,
- /* 10 */ 30, 260, 39, 37, 1929, 326, 325, 117, 1927, 1702,
- /* 20 */ 339, 1465, 1261, 146, 471, 40, 38, 36, 35, 34,
- /* 30 */ 1786, 552, 1606, 1337, 1604, 1259, 344, 552, 1287, 1649,
- /* 40 */ 1651, 378, 107, 1774, 526, 106, 105, 104, 103, 102,
- /* 50 */ 101, 100, 99, 98, 1770, 117, 1332, 432, 1804, 64,
- /* 60 */ 436, 14, 476, 36, 35, 34, 553, 148, 1267, 1476,
- /* 70 */ 450, 1756, 1604, 577, 39, 37, 1400, 1595, 1766, 1772,
- /* 80 */ 328, 1930, 339, 1526, 1261, 1804, 217, 1286, 1770, 1,
- /* 90 */ 571, 1005, 1656, 542, 164, 1337, 1818, 1259, 1927, 327,
- /* 100 */ 95, 1787, 580, 1789, 1790, 576, 43, 571, 1654, 158,
- /* 110 */ 1864, 663, 1766, 1772, 330, 1860, 159, 513, 1332, 63,
- /* 120 */ 1930, 78, 1643, 14, 571, 1339, 1340, 1703, 163, 541,
- /* 130 */ 1267, 1009, 1010, 165, 33, 32, 1890, 1927, 40, 38,
- /* 140 */ 36, 35, 34, 543, 63, 63, 640, 639, 638, 637,
- /* 150 */ 349, 2, 636, 635, 128, 630, 629, 628, 627, 626,
- /* 160 */ 625, 624, 139, 620, 619, 618, 348, 347, 615, 614,
- /* 170 */ 1262, 107, 1260, 663, 106, 105, 104, 103, 102, 101,
- /* 180 */ 100, 99, 98, 440, 1775, 1287, 436, 1339, 1340, 223,
- /* 190 */ 224, 11, 10, 1265, 1266, 1770, 1314, 1315, 1317, 1318,
- /* 200 */ 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333, 1334,
- /* 210 */ 1335, 1336, 1338, 1341, 1464, 496, 1431, 33, 32, 1766,
- /* 220 */ 1772, 40, 38, 36, 35, 34, 526, 167, 494, 150,
- /* 230 */ 492, 571, 1262, 1564, 1260, 1261, 210, 55, 116, 115,
- /* 240 */ 114, 113, 112, 111, 110, 109, 108, 305, 1259, 63,
- /* 250 */ 516, 1701, 22, 300, 1604, 1265, 1266, 167, 1314, 1315,
- /* 260 */ 1317, 1318, 1319, 1320, 1321, 1322, 573, 569, 1330, 1331,
- /* 270 */ 1333, 1334, 1335, 1336, 1338, 1341, 39, 37, 1650, 1651,
- /* 280 */ 1373, 1267, 167, 167, 339, 552, 1261, 613, 1286, 49,
- /* 290 */ 1160, 1161, 76, 305, 1786, 1487, 516, 1337, 1421, 1259,
- /* 300 */ 1118, 602, 601, 600, 1122, 599, 1124, 1125, 598, 1127,
- /* 310 */ 595, 342, 1133, 592, 1135, 1136, 589, 586, 1285, 146,
- /* 320 */ 1332, 1581, 1804, 173, 663, 14, 479, 478, 1606, 377,
- /* 330 */ 553, 376, 1267, 1656, 1930, 1756, 1756, 577, 39, 37,
- /* 340 */ 312, 535, 1419, 1420, 1422, 1423, 339, 1928, 1261, 1654,
- /* 350 */ 1705, 1927, 84, 2, 42, 71, 1656, 63, 70, 1337,
- /* 360 */ 1818, 1259, 1267, 343, 95, 1787, 580, 1789, 1790, 576,
- /* 370 */ 605, 571, 1654, 1597, 1864, 663, 345, 1589, 330, 1860,
- /* 380 */ 159, 1288, 1332, 1262, 146, 1260, 1022, 167, 1021, 1339,
- /* 390 */ 1340, 33, 32, 1606, 1267, 40, 38, 36, 35, 34,
- /* 400 */ 1891, 1930, 384, 39, 37, 1342, 1265, 1266, 1486, 1485,
- /* 410 */ 611, 339, 1786, 1261, 165, 8, 1023, 438, 1927, 634,
- /* 420 */ 632, 1080, 611, 1284, 1337, 622, 1259, 167, 549, 137,
- /* 430 */ 136, 608, 607, 606, 1262, 1695, 1260, 663, 1484, 303,
- /* 440 */ 1804, 137, 136, 608, 607, 606, 172, 1332, 575, 1756,
- /* 450 */ 1756, 1339, 1340, 1756, 1082, 577, 127, 1265, 1266, 1267,
- /* 460 */ 1314, 1315, 1317, 1318, 1319, 1320, 1321, 1322, 573, 569,
- /* 470 */ 1330, 1331, 1333, 1334, 1335, 1336, 1338, 1341, 1818, 1756,
- /* 480 */ 9, 1591, 293, 1787, 580, 1789, 1790, 576, 574, 571,
- /* 490 */ 568, 1836, 167, 74, 125, 167, 1262, 222, 1260, 33,
- /* 500 */ 32, 1529, 663, 40, 38, 36, 35, 34, 551, 160,
- /* 510 */ 1872, 1873, 1587, 1877, 1483, 1600, 1339, 1340, 252, 1265,
- /* 520 */ 1266, 1579, 1314, 1315, 1317, 1318, 1319, 1320, 1321, 1322,
- /* 530 */ 573, 569, 1330, 1331, 1333, 1334, 1335, 1336, 1338, 1341,
- /* 540 */ 1700, 526, 300, 33, 32, 1457, 91, 40, 38, 36,
- /* 550 */ 35, 34, 169, 167, 316, 1756, 1241, 1242, 623, 124,
- /* 560 */ 1576, 1262, 1879, 1260, 26, 482, 481, 1596, 1462, 1604,
- /* 570 */ 33, 32, 123, 1582, 40, 38, 36, 35, 34, 213,
- /* 580 */ 1786, 1411, 477, 480, 1265, 1266, 1876, 1314, 1315, 1317,
- /* 590 */ 1318, 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333,
- /* 600 */ 1334, 1335, 1336, 1338, 1341, 39, 37, 475, 1804, 317,
- /* 610 */ 146, 315, 314, 339, 473, 1261, 578, 1361, 475, 1607,
- /* 620 */ 549, 1756, 611, 577, 28, 1299, 1337, 354, 1259, 474,
- /* 630 */ 33, 32, 1456, 450, 40, 38, 36, 35, 34, 538,
- /* 640 */ 474, 137, 136, 608, 607, 606, 1818, 1695, 127, 1332,
- /* 650 */ 96, 1787, 580, 1789, 1790, 576, 572, 571, 175, 74,
- /* 660 */ 1864, 1267, 526, 609, 1863, 1860, 1647, 1930, 554, 512,
- /* 670 */ 33, 32, 122, 382, 40, 38, 36, 35, 34, 27,
- /* 680 */ 164, 1599, 9, 1482, 1927, 1022, 125, 1021, 7, 1366,
- /* 690 */ 1604, 33, 32, 1481, 1565, 40, 38, 36, 35, 34,
- /* 700 */ 469, 250, 1872, 548, 663, 547, 33, 32, 1930, 1930,
- /* 710 */ 40, 38, 36, 35, 34, 1023, 544, 539, 1339, 1340,
- /* 720 */ 526, 166, 164, 307, 1756, 1927, 1927, 135, 487, 1404,
- /* 730 */ 526, 383, 39, 37, 1756, 1286, 1480, 302, 1879, 1284,
- /* 740 */ 339, 389, 1261, 497, 307, 526, 412, 604, 1604, 424,
- /* 750 */ 526, 549, 1299, 1337, 1477, 1259, 404, 209, 1604, 61,
- /* 760 */ 1359, 405, 1875, 1262, 373, 1260, 397, 1479, 425, 1478,
- /* 770 */ 399, 490, 255, 1604, 54, 484, 1332, 1756, 1604, 127,
- /* 780 */ 208, 1359, 1475, 375, 371, 419, 1265, 1266, 1267, 1314,
- /* 790 */ 1315, 1317, 1318, 1319, 1320, 1321, 1322, 573, 569, 1330,
- /* 800 */ 1331, 1333, 1334, 1335, 1336, 1338, 1341, 390, 1756, 2,
- /* 810 */ 1756, 1397, 58, 526, 1360, 57, 1879, 125, 505, 386,
- /* 820 */ 1289, 33, 32, 1756, 448, 40, 38, 36, 35, 34,
- /* 830 */ 1474, 663, 161, 1872, 1873, 1360, 1877, 177, 176, 1505,
- /* 840 */ 1874, 1604, 1347, 1009, 1010, 1339, 1340, 423, 1286, 1580,
- /* 850 */ 418, 417, 416, 415, 414, 411, 410, 409, 408, 407,
- /* 860 */ 403, 402, 401, 400, 394, 393, 392, 391, 549, 388,
- /* 870 */ 387, 1756, 1656, 616, 1473, 1393, 29, 337, 1354, 1355,
- /* 880 */ 1356, 1357, 1358, 1362, 1363, 1364, 1365, 658, 1655, 468,
- /* 890 */ 1262, 610, 1260, 1286, 1647, 1068, 127, 29, 337, 1354,
- /* 900 */ 1355, 1356, 1357, 1358, 1362, 1363, 1364, 1365, 272, 613,
- /* 910 */ 536, 1634, 1316, 1265, 1266, 1756, 1314, 1315, 1317, 1318,
- /* 920 */ 1319, 1320, 1321, 1322, 573, 569, 1330, 1331, 1333, 1334,
- /* 930 */ 1335, 1336, 1338, 1341, 125, 147, 1472, 1786, 561, 352,
- /* 940 */ 278, 351, 1884, 1393, 1743, 482, 481, 1516, 1774, 162,
- /* 950 */ 1872, 1873, 123, 1877, 276, 60, 1805, 232, 59, 1770,
- /* 960 */ 526, 526, 477, 480, 1471, 1804, 44, 4, 244, 483,
- /* 970 */ 145, 449, 1601, 578, 180, 429, 427, 1756, 1756, 1930,
- /* 980 */ 577, 1930, 1500, 1766, 1772, 334, 1470, 1786, 1604, 1604,
- /* 990 */ 526, 361, 164, 554, 164, 571, 1927, 216, 1927, 498,
- /* 1000 */ 556, 499, 1469, 1818, 1468, 1756, 63, 94, 1787, 580,
- /* 1010 */ 1789, 1790, 576, 526, 571, 1804, 558, 1864, 1604, 336,
- /* 1020 */ 335, 306, 1860, 578, 506, 1316, 526, 1756, 1756, 1275,
- /* 1030 */ 577, 201, 77, 1930, 199, 1396, 1644, 510, 526, 1930,
- /* 1040 */ 1337, 1604, 1268, 1756, 93, 1756, 166, 1511, 1467, 227,
- /* 1050 */ 1927, 350, 164, 1818, 1604, 1786, 1927, 95, 1787, 580,
- /* 1060 */ 1789, 1790, 576, 1332, 571, 526, 1604, 1864, 41, 485,
- /* 1070 */ 1316, 330, 1860, 1943, 526, 1267, 522, 53, 509, 68,
- /* 1080 */ 67, 381, 1898, 1804, 171, 524, 221, 203, 526, 1756,
- /* 1090 */ 202, 578, 205, 1604, 207, 204, 1756, 206, 577, 525,
- /* 1100 */ 301, 1509, 1604, 369, 1894, 367, 363, 359, 356, 353,
- /* 1110 */ 1270, 554, 131, 526, 1786, 1212, 1604, 1269, 566, 567,
- /* 1120 */ 526, 1818, 134, 488, 261, 94, 1787, 580, 1789, 1790,
- /* 1130 */ 576, 346, 571, 225, 135, 1864, 51, 550, 666, 306,
- /* 1140 */ 1860, 1604, 1804, 562, 167, 236, 51, 323, 1604, 41,
- /* 1150 */ 578, 1930, 267, 90, 1786, 1756, 617, 577, 41, 519,
- /* 1160 */ 1777, 11, 10, 87, 164, 249, 156, 3, 1927, 229,
- /* 1170 */ 254, 656, 652, 648, 644, 265, 584, 1276, 1066, 1271,
- /* 1180 */ 1818, 1111, 1804, 1418, 294, 1787, 580, 1789, 1790, 576,
- /* 1190 */ 578, 571, 239, 1367, 1786, 1756, 1323, 577, 1459, 1460,
- /* 1200 */ 1279, 1281, 257, 1779, 259, 271, 134, 92, 135, 5,
- /* 1210 */ 230, 1049, 569, 1330, 1331, 1333, 1334, 1335, 1336, 559,
- /* 1220 */ 1818, 360, 1804, 1139, 95, 1787, 580, 1789, 1790, 576,
- /* 1230 */ 578, 571, 268, 355, 1864, 1756, 119, 577, 330, 1860,
- /* 1240 */ 1943, 134, 549, 523, 1050, 313, 1228, 1273, 174, 1921,
- /* 1250 */ 385, 1351, 1284, 1143, 1272, 1150, 406, 413, 1697, 421,
- /* 1260 */ 1818, 420, 422, 1786, 95, 1787, 580, 1789, 1790, 576,
- /* 1270 */ 127, 571, 426, 428, 1864, 219, 430, 1290, 330, 1860,
- /* 1280 */ 1943, 431, 439, 1148, 1292, 442, 183, 443, 138, 1883,
- /* 1290 */ 554, 1804, 1291, 1235, 185, 212, 444, 1293, 188, 578,
- /* 1300 */ 445, 190, 447, 72, 1756, 73, 577, 451, 125, 194,
- /* 1310 */ 470, 472, 1594, 198, 118, 1590, 304, 1786, 200, 554,
- /* 1320 */ 140, 269, 141, 250, 1872, 548, 1592, 547, 1588, 1818,
- /* 1330 */ 1930, 142, 143, 285, 1787, 580, 1789, 1790, 576, 211,
- /* 1340 */ 571, 500, 1736, 164, 214, 1804, 507, 1927, 504, 511,
- /* 1350 */ 218, 322, 534, 578, 514, 520, 501, 1735, 1756, 1930,
- /* 1360 */ 577, 132, 1707, 517, 324, 1289, 81, 1786, 521, 133,
- /* 1370 */ 270, 83, 166, 554, 537, 1605, 1927, 530, 1905, 234,
- /* 1380 */ 1895, 238, 6, 1818, 1786, 532, 533, 285, 1787, 580,
- /* 1390 */ 1789, 1790, 576, 329, 571, 1804, 546, 531, 540, 529,
- /* 1400 */ 528, 248, 1288, 578, 1393, 126, 563, 560, 1756, 48,
- /* 1410 */ 577, 1880, 1804, 1930, 1904, 85, 1648, 331, 1577, 659,
- /* 1420 */ 578, 582, 264, 660, 243, 1756, 164, 577, 153, 1886,
- /* 1430 */ 1927, 247, 245, 1818, 1786, 246, 253, 96, 1787, 580,
- /* 1440 */ 1789, 1790, 576, 1845, 571, 273, 662, 1864, 299, 275,
- /* 1450 */ 1818, 565, 1860, 256, 149, 1787, 580, 1789, 1790, 576,
- /* 1460 */ 1786, 571, 1804, 52, 1946, 1926, 557, 286, 296, 258,
- /* 1470 */ 578, 564, 295, 1750, 277, 1756, 1749, 577, 65, 1748,
- /* 1480 */ 1747, 66, 1744, 357, 358, 1253, 1254, 170, 1804, 362,
- /* 1490 */ 1742, 364, 365, 527, 366, 1741, 578, 368, 555, 1944,
- /* 1500 */ 1818, 1756, 1740, 577, 96, 1787, 580, 1789, 1790, 576,
- /* 1510 */ 1786, 571, 370, 1739, 1864, 372, 1738, 1230, 374, 1861,
- /* 1520 */ 1231, 1718, 1786, 379, 380, 1716, 1818, 1717, 1715, 1690,
- /* 1530 */ 294, 1787, 580, 1789, 1790, 576, 1786, 571, 1804, 1689,
- /* 1540 */ 1200, 129, 1688, 1687, 69, 1686, 578, 395, 1681, 396,
- /* 1550 */ 1804, 1756, 1685, 577, 1684, 1683, 1682, 398, 578, 1680,
- /* 1560 */ 1679, 1678, 1677, 1756, 1804, 577, 1676, 1675, 1674, 1673,
- /* 1570 */ 1672, 1671, 575, 1670, 1669, 1668, 1818, 1756, 1667, 577,
- /* 1580 */ 289, 1787, 580, 1789, 1790, 576, 130, 571, 1818, 1786,
- /* 1590 */ 1666, 1665, 149, 1787, 580, 1789, 1790, 576, 1664, 571,
- /* 1600 */ 1663, 1662, 1818, 1202, 1660, 1659, 293, 1787, 580, 1789,
- /* 1610 */ 1790, 576, 1661, 571, 1658, 1837, 1657, 1804, 545, 1531,
- /* 1620 */ 178, 1530, 338, 120, 181, 578, 196, 1528, 179, 1496,
- /* 1630 */ 1756, 157, 577, 435, 1012, 437, 1011, 1945, 1495, 182,
- /* 1640 */ 152, 121, 1786, 452, 453, 467, 463, 459, 455, 195,
- /* 1650 */ 1731, 1725, 1714, 189, 1786, 1818, 187, 1713, 1699, 294,
- /* 1660 */ 1787, 580, 1789, 1790, 576, 1583, 571, 1527, 1786, 1042,
- /* 1670 */ 1804, 1525, 454, 1523, 456, 340, 458, 457, 578, 1521,
- /* 1680 */ 460, 75, 1804, 1756, 193, 577, 462, 461, 1519, 464,
- /* 1690 */ 578, 465, 466, 1508, 1507, 1756, 1804, 577, 1492, 1585,
- /* 1700 */ 1153, 1154, 197, 1584, 578, 1079, 1074, 50, 1818, 1756,
- /* 1710 */ 1517, 577, 294, 1787, 580, 1789, 1790, 576, 631, 571,
- /* 1720 */ 1818, 1076, 1786, 633, 279, 1787, 580, 1789, 1790, 576,
- /* 1730 */ 1075, 571, 1512, 318, 1818, 319, 1786, 1510, 280, 1787,
- /* 1740 */ 580, 1789, 1790, 576, 320, 571, 192, 186, 1786, 191,
- /* 1750 */ 1804, 486, 489, 446, 1491, 491, 1490, 1489, 578, 493,
- /* 1760 */ 495, 97, 1730, 1756, 1804, 577, 1237, 56, 1724, 184,
- /* 1770 */ 502, 1712, 578, 1710, 508, 503, 1804, 1756, 215, 577,
- /* 1780 */ 1711, 1709, 321, 1708, 578, 15, 144, 220, 1818, 1756,
- /* 1790 */ 1245, 577, 281, 1787, 580, 1789, 1790, 576, 1706, 571,
- /* 1800 */ 1698, 226, 1818, 518, 79, 1786, 288, 1787, 580, 1789,
- /* 1810 */ 1790, 576, 228, 571, 1818, 1786, 515, 80, 290, 1787,
- /* 1820 */ 580, 1789, 1790, 576, 82, 571, 87, 41, 231, 23,
- /* 1830 */ 47, 1786, 1433, 1804, 233, 241, 235, 1415, 237, 242,
- /* 1840 */ 1417, 578, 16, 1804, 25, 1777, 1756, 151, 577, 240,
- /* 1850 */ 24, 578, 46, 1410, 86, 1786, 1756, 17, 577, 1804,
- /* 1860 */ 1390, 251, 1389, 1776, 154, 1450, 45, 578, 18, 1439,
- /* 1870 */ 1445, 1818, 1756, 13, 577, 282, 1787, 580, 1789, 1790,
- /* 1880 */ 576, 1818, 571, 1804, 1444, 291, 1787, 580, 1789, 1790,
- /* 1890 */ 576, 578, 571, 332, 1449, 1448, 1756, 1818, 577, 333,
- /* 1900 */ 10, 283, 1787, 580, 1789, 1790, 576, 1277, 571, 1352,
- /* 1910 */ 19, 1786, 1821, 1307, 1327, 570, 155, 1325, 31, 581,
- /* 1920 */ 1324, 1818, 12, 20, 168, 292, 1787, 580, 1789, 1790,
- /* 1930 */ 576, 1786, 571, 21, 583, 1140, 341, 585, 579, 1804,
- /* 1940 */ 1137, 587, 588, 590, 1134, 591, 593, 578, 596, 1132,
- /* 1950 */ 594, 1786, 1756, 1128, 577, 1126, 1131, 597, 1117, 1804,
- /* 1960 */ 1130, 88, 1149, 603, 1129, 89, 62, 578, 262, 1145,
- /* 1970 */ 612, 1786, 1756, 1071, 577, 1070, 1040, 1818, 1069, 1804,
- /* 1980 */ 1067, 284, 1787, 580, 1789, 1790, 576, 578, 571, 1065,
- /* 1990 */ 1064, 1786, 1756, 1063, 577, 263, 1086, 1818, 1061, 1804,
- /* 2000 */ 1060, 297, 1787, 580, 1789, 1790, 576, 578, 571, 621,
- /* 2010 */ 1059, 1058, 1756, 1057, 577, 1056, 1055, 1818, 1083, 1804,
- /* 2020 */ 1081, 298, 1787, 580, 1789, 1790, 576, 578, 571, 1052,
- /* 2030 */ 1051, 1786, 1756, 1048, 577, 1047, 1046, 1818, 1045, 1524,
- /* 2040 */ 641, 1798, 1787, 580, 1789, 1790, 576, 1786, 571, 642,
- /* 2050 */ 643, 1522, 645, 646, 647, 1520, 649, 1818, 651, 1804,
- /* 2060 */ 650, 1797, 1787, 580, 1789, 1790, 576, 578, 571, 1518,
- /* 2070 */ 653, 654, 1756, 655, 577, 1804, 1506, 657, 1002, 1488,
- /* 2080 */ 266, 661, 664, 578, 1263, 274, 665, 1463, 1756, 1463,
- /* 2090 */ 577, 1463, 1463, 1463, 1463, 1463, 1463, 1818, 1786, 1463,
- /* 2100 */ 1463, 1796, 1787, 580, 1789, 1790, 576, 1463, 571, 1463,
- /* 2110 */ 1463, 1463, 1463, 1818, 1786, 1463, 1463, 310, 1787, 580,
- /* 2120 */ 1789, 1790, 576, 1463, 571, 1463, 1804, 1463, 1463, 1463,
- /* 2130 */ 1463, 1463, 1463, 1463, 578, 1463, 1463, 1463, 1463, 1756,
- /* 2140 */ 1463, 577, 1804, 1463, 1463, 1463, 1463, 1463, 1463, 1463,
- /* 2150 */ 578, 1463, 1463, 1463, 1463, 1756, 1463, 577, 1463, 1463,
- /* 2160 */ 1463, 1463, 1463, 1463, 1818, 1786, 1463, 1463, 309, 1787,
- /* 2170 */ 580, 1789, 1790, 576, 1463, 571, 1463, 1463, 1463, 1463,
- /* 2180 */ 1818, 1786, 1463, 1463, 311, 1787, 580, 1789, 1790, 576,
- /* 2190 */ 1463, 571, 1463, 1804, 1463, 1463, 1463, 1463, 1463, 1463,
- /* 2200 */ 1463, 578, 1463, 1463, 1463, 1463, 1756, 1463, 577, 1804,
- /* 2210 */ 1463, 1463, 1463, 1463, 1463, 1463, 1463, 578, 1463, 1463,
- /* 2220 */ 1463, 1463, 1756, 1463, 577, 1463, 1463, 1463, 1463, 1463,
- /* 2230 */ 1463, 1818, 1463, 1463, 1463, 308, 1787, 580, 1789, 1790,
- /* 2240 */ 576, 1463, 571, 1463, 1463, 1463, 1463, 1818, 1463, 1463,
- /* 2250 */ 1463, 287, 1787, 580, 1789, 1790, 576, 1463, 571,
+ /* 0 */ 1860, 1789, 443, 1860, 444, 1574, 451, 1874, 444, 1574,
+ /* 10 */ 1677, 1856, 44, 42, 1856, 351, 540, 1675, 1731, 1733,
+ /* 20 */ 346, 1856, 1317, 43, 41, 40, 39, 38, 52, 2033,
+ /* 30 */ 40, 39, 38, 1396, 1050, 1315, 1892, 1852, 1858, 334,
+ /* 40 */ 1852, 1858, 340, 1343, 582, 1686, 1344, 1852, 1858, 1842,
+ /* 50 */ 588, 594, 30, 588, 442, 581, 1391, 446, 37, 36,
+ /* 60 */ 588, 17, 43, 41, 40, 39, 38, 1874, 1323, 44,
+ /* 70 */ 42, 1466, 1872, 1130, 1054, 1055, 1907, 346, 566, 1317,
+ /* 80 */ 97, 1873, 1875, 598, 1877, 1878, 593, 1067, 588, 1066,
+ /* 90 */ 1396, 1562, 1315, 167, 1, 1960, 1892, 1732, 1733, 339,
+ /* 100 */ 1956, 77, 479, 1738, 595, 578, 1132, 2028, 1470, 1842,
+ /* 110 */ 333, 594, 172, 1391, 1342, 125, 685, 1068, 17, 1736,
+ /* 120 */ 1986, 1525, 565, 170, 1681, 1323, 1663, 2029, 567, 46,
+ /* 130 */ 1398, 1399, 596, 1842, 132, 62, 1907, 1540, 1561, 225,
+ /* 140 */ 98, 345, 1875, 598, 1877, 1878, 593, 1975, 588, 1892,
+ /* 150 */ 87, 1, 581, 1212, 1213, 1960, 58, 560, 109, 312,
+ /* 160 */ 1956, 108, 107, 106, 105, 104, 103, 102, 101, 100,
+ /* 170 */ 2028, 130, 1679, 685, 527, 1972, 1318, 58, 1316, 81,
+ /* 180 */ 1842, 331, 34, 265, 1786, 565, 170, 1398, 1399, 264,
+ /* 190 */ 2029, 567, 580, 168, 1968, 1969, 559, 1973, 556, 450,
+ /* 200 */ 1321, 1322, 446, 1372, 1373, 1375, 1376, 1377, 1378, 1379,
+ /* 210 */ 1380, 1381, 590, 586, 1389, 1390, 1392, 1393, 1394, 1395,
+ /* 220 */ 1397, 1400, 3, 581, 203, 58, 46, 94, 1427, 155,
+ /* 230 */ 578, 1551, 629, 1318, 1342, 1316, 58, 384, 160, 383,
+ /* 240 */ 173, 127, 1492, 477, 473, 469, 465, 202, 428, 1678,
+ /* 250 */ 448, 144, 143, 626, 625, 624, 1340, 1321, 1322, 132,
+ /* 260 */ 1372, 1373, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 590,
+ /* 270 */ 586, 1389, 1390, 1392, 1393, 1394, 1395, 1397, 1400, 3,
+ /* 280 */ 44, 42, 562, 557, 78, 490, 489, 200, 346, 1861,
+ /* 290 */ 1317, 553, 1490, 1491, 1493, 1494, 130, 31, 1374, 578,
+ /* 300 */ 1856, 1396, 173, 1315, 183, 182, 109, 1432, 58, 108,
+ /* 310 */ 107, 106, 105, 104, 103, 102, 101, 100, 169, 1968,
+ /* 320 */ 1969, 629, 1973, 173, 1391, 460, 1852, 1858, 132, 17,
+ /* 330 */ 2033, 11, 1874, 9, 540, 380, 1323, 44, 42, 588,
+ /* 340 */ 144, 143, 626, 625, 624, 346, 175, 1317, 578, 158,
+ /* 350 */ 199, 193, 47, 198, 1643, 382, 378, 456, 1396, 2028,
+ /* 360 */ 1315, 1892, 1, 1686, 1343, 130, 540, 2033, 542, 582,
+ /* 370 */ 1932, 173, 173, 191, 1842, 2032, 594, 132, 119, 2029,
+ /* 380 */ 2031, 1391, 173, 391, 685, 481, 17, 171, 1968, 1969,
+ /* 390 */ 217, 1973, 1067, 1323, 1066, 1686, 2028, 1872, 1398, 1399,
+ /* 400 */ 1342, 1907, 1539, 231, 232, 97, 1873, 1875, 598, 1877,
+ /* 410 */ 1878, 593, 2032, 588, 121, 561, 2029, 2030, 167, 1,
+ /* 420 */ 1960, 308, 1068, 540, 339, 1956, 118, 117, 116, 115,
+ /* 430 */ 114, 113, 112, 111, 110, 119, 262, 1968, 577, 1975,
+ /* 440 */ 576, 685, 486, 2028, 1318, 1987, 1316, 79, 310, 631,
+ /* 450 */ 1738, 530, 1686, 1341, 173, 1398, 1399, 350, 565, 170,
+ /* 460 */ 13, 12, 310, 2029, 567, 530, 1736, 1971, 1321, 1322,
+ /* 470 */ 1661, 1372, 1373, 1375, 1376, 1377, 1378, 1379, 1380, 1381,
+ /* 480 */ 590, 586, 1389, 1390, 1392, 1393, 1394, 1395, 1397, 1400,
+ /* 490 */ 3, 230, 11, 1406, 544, 1537, 1932, 322, 173, 1342,
+ /* 500 */ 77, 1318, 1785, 1316, 305, 1170, 620, 619, 618, 1174,
+ /* 510 */ 617, 1176, 1177, 616, 1179, 613, 142, 1185, 610, 1187,
+ /* 520 */ 1188, 607, 604, 1682, 623, 1321, 1322, 179, 1372, 1373,
+ /* 530 */ 1375, 1376, 1377, 1378, 1379, 1380, 1381, 590, 586, 1389,
+ /* 540 */ 1390, 1392, 1393, 1394, 1395, 1397, 1400, 3, 44, 42,
+ /* 550 */ 392, 1295, 1296, 332, 1439, 361, 346, 323, 1317, 321,
+ /* 560 */ 320, 153, 483, 393, 74, 540, 485, 73, 51, 1396,
+ /* 570 */ 1688, 1315, 523, 1664, 385, 1975, 629, 389, 37, 36,
+ /* 580 */ 166, 1874, 43, 41, 40, 39, 38, 1344, 484, 11,
+ /* 590 */ 1345, 523, 1391, 1725, 1686, 144, 143, 626, 625, 624,
+ /* 600 */ 1874, 2028, 540, 1970, 1323, 44, 42, 1401, 656, 654,
+ /* 610 */ 1892, 1560, 1323, 346, 390, 1317, 2034, 170, 595, 643,
+ /* 620 */ 2028, 2029, 567, 1842, 224, 594, 1396, 1738, 1315, 1892,
+ /* 630 */ 8, 1686, 507, 460, 317, 2034, 170, 595, 2032, 540,
+ /* 640 */ 2029, 567, 1842, 1736, 594, 505, 596, 503, 1662, 1391,
+ /* 650 */ 1907, 398, 685, 1842, 292, 345, 1875, 598, 1877, 1878,
+ /* 660 */ 593, 1323, 588, 349, 80, 1872, 1398, 1399, 1686, 1907,
+ /* 670 */ 1671, 153, 566, 97, 1873, 1875, 598, 1877, 1878, 593,
+ /* 680 */ 1688, 588, 218, 1374, 2028, 1716, 2048, 8, 1960, 1559,
+ /* 690 */ 37, 36, 339, 1956, 43, 41, 40, 39, 38, 565,
+ /* 700 */ 170, 2028, 1994, 540, 2029, 567, 540, 45, 631, 685,
+ /* 710 */ 527, 1784, 1318, 305, 1316, 413, 565, 170, 414, 1552,
+ /* 720 */ 1787, 2029, 567, 1398, 1399, 153, 627, 1779, 1558, 1729,
+ /* 730 */ 264, 1842, 1686, 241, 1689, 1686, 1321, 1322, 178, 1372,
+ /* 740 */ 1373, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 590, 586,
+ /* 750 */ 1389, 1390, 1392, 1393, 1394, 1395, 1397, 1400, 3, 1266,
+ /* 760 */ 37, 36, 540, 540, 43, 41, 40, 39, 38, 1318,
+ /* 770 */ 1842, 1316, 352, 1673, 458, 459, 173, 1502, 37, 36,
+ /* 780 */ 153, 1557, 43, 41, 40, 39, 38, 128, 1556, 1688,
+ /* 790 */ 1931, 1686, 1686, 1321, 1322, 26, 1372, 1373, 1375, 1376,
+ /* 800 */ 1377, 1378, 1379, 1380, 1381, 590, 586, 1389, 1390, 1392,
+ /* 810 */ 1393, 1394, 1395, 1397, 1400, 3, 44, 42, 526, 1317,
+ /* 820 */ 1779, 1555, 1738, 1842, 346, 628, 1317, 1554, 1729, 2033,
+ /* 830 */ 1842, 181, 1315, 540, 1553, 523, 1669, 1396, 1737, 1315,
+ /* 840 */ 32, 359, 1054, 1055, 485, 1683, 37, 36, 1550, 1874,
+ /* 850 */ 43, 41, 40, 39, 38, 644, 7, 1656, 523, 540,
+ /* 860 */ 1391, 1463, 1686, 1842, 2028, 1323, 484, 1549, 1874, 1842,
+ /* 870 */ 540, 136, 1323, 44, 42, 570, 1842, 540, 1892, 2034,
+ /* 880 */ 170, 346, 519, 1317, 2029, 567, 595, 2028, 1686, 524,
+ /* 890 */ 1842, 1842, 569, 594, 1396, 1345, 1315, 1892, 8, 1686,
+ /* 900 */ 518, 277, 2034, 170, 1716, 595, 1686, 2029, 567, 1842,
+ /* 910 */ 1842, 1342, 594, 685, 1872, 1829, 1548, 1391, 1907, 540,
+ /* 920 */ 685, 1525, 156, 1873, 1875, 598, 1877, 1878, 593, 1323,
+ /* 930 */ 588, 235, 221, 1872, 1398, 1399, 1547, 1907, 1532, 48,
+ /* 940 */ 4, 97, 1873, 1875, 598, 1877, 1878, 593, 1686, 588,
+ /* 950 */ 493, 492, 1546, 585, 2048, 1, 1960, 126, 1842, 1591,
+ /* 960 */ 339, 1956, 368, 545, 1997, 1980, 1459, 540, 488, 491,
+ /* 970 */ 2022, 1604, 540, 1318, 487, 1316, 1326, 685, 1842, 536,
+ /* 980 */ 1318, 494, 1316, 358, 538, 1545, 208, 210, 1544, 206,
+ /* 990 */ 209, 1398, 1399, 589, 1842, 229, 1686, 1321, 1322, 1543,
+ /* 1000 */ 523, 1686, 50, 522, 1321, 1322, 1542, 1372, 1373, 1375,
+ /* 1010 */ 1376, 1377, 1378, 1379, 1380, 1381, 590, 586, 1389, 1390,
+ /* 1020 */ 1392, 1393, 1394, 1395, 1397, 1400, 3, 1842, 540, 2028,
+ /* 1030 */ 1842, 540, 1531, 1534, 1535, 493, 492, 1318, 240, 1316,
+ /* 1040 */ 539, 1842, 126, 266, 2034, 170, 573, 233, 1842, 2029,
+ /* 1050 */ 567, 622, 137, 488, 491, 212, 1459, 1686, 211, 487,
+ /* 1060 */ 1686, 1321, 1322, 1601, 1372, 1373, 1375, 1376, 1377, 1378,
+ /* 1070 */ 1379, 1380, 1381, 590, 586, 1389, 1390, 1392, 1393, 1394,
+ /* 1080 */ 1395, 1397, 1400, 3, 307, 540, 1340, 214, 13, 12,
+ /* 1090 */ 213, 571, 1417, 421, 1325, 1374, 433, 353, 1462, 1586,
+ /* 1100 */ 1584, 93, 37, 36, 533, 2000, 43, 41, 40, 39,
+ /* 1110 */ 38, 90, 1644, 406, 1686, 434, 141, 408, 142, 1329,
+ /* 1120 */ 60, 496, 499, 259, 245, 60, 662, 661, 660, 659,
+ /* 1130 */ 356, 45, 658, 657, 133, 652, 651, 650, 649, 648,
+ /* 1140 */ 647, 646, 645, 146, 641, 640, 639, 355, 354, 636,
+ /* 1150 */ 635, 634, 633, 632, 554, 154, 478, 37, 36, 399,
+ /* 1160 */ 283, 43, 41, 40, 39, 38, 253, 1094, 237, 637,
+ /* 1170 */ 1163, 395, 1489, 45, 281, 66, 248, 1433, 65, 1863,
+ /* 1180 */ 37, 36, 1893, 1382, 43, 41, 40, 39, 38, 1482,
+ /* 1190 */ 602, 1114, 357, 1575, 187, 439, 437, 141, 1990, 432,
+ /* 1200 */ 1095, 1874, 427, 426, 425, 424, 423, 420, 419, 418,
+ /* 1210 */ 417, 416, 412, 411, 410, 409, 403, 402, 401, 400,
+ /* 1220 */ 142, 397, 396, 319, 638, 276, 1726, 1865, 37, 36,
+ /* 1230 */ 1892, 58, 43, 41, 40, 39, 38, 1328, 595, 122,
+ /* 1240 */ 141, 579, 1191, 1842, 261, 594, 1112, 1580, 258, 1195,
+ /* 1250 */ 2, 362, 5, 367, 318, 273, 180, 1282, 394, 1340,
+ /* 1260 */ 415, 1781, 422, 430, 574, 429, 1872, 431, 435, 96,
+ /* 1270 */ 1907, 436, 1202, 184, 97, 1873, 1875, 598, 1877, 1878,
+ /* 1280 */ 593, 313, 588, 438, 1346, 129, 441, 140, 1931, 1960,
+ /* 1290 */ 440, 1200, 145, 339, 1956, 680, 449, 1348, 452, 190,
+ /* 1300 */ 511, 192, 453, 1347, 454, 71, 70, 388, 1349, 455,
+ /* 1310 */ 177, 1874, 152, 195, 197, 457, 498, 523, 75, 76,
+ /* 1320 */ 461, 201, 480, 1425, 482, 1676, 205, 306, 1672, 207,
+ /* 1330 */ 376, 508, 374, 370, 366, 363, 360, 309, 120, 147,
+ /* 1340 */ 1892, 510, 148, 1674, 1670, 216, 2028, 149, 595, 512,
+ /* 1350 */ 150, 274, 219, 1842, 513, 594, 222, 520, 1820, 501,
+ /* 1360 */ 514, 2034, 170, 495, 525, 226, 2029, 567, 215, 517,
+ /* 1370 */ 552, 138, 534, 328, 528, 1819, 1872, 173, 1426, 1791,
+ /* 1380 */ 1907, 531, 330, 139, 97, 1873, 1875, 598, 1877, 1878,
+ /* 1390 */ 593, 535, 588, 1874, 84, 275, 86, 2048, 1687, 1960,
+ /* 1400 */ 1345, 548, 2006, 339, 1956, 64, 1991, 555, 63, 243,
+ /* 1410 */ 2001, 550, 2005, 1979, 551, 247, 335, 558, 6, 564,
+ /* 1420 */ 336, 549, 1892, 547, 546, 257, 575, 2027, 572, 1459,
+ /* 1430 */ 595, 1344, 2051, 256, 252, 1842, 1982, 594, 1976, 57,
+ /* 1440 */ 131, 1941, 88, 600, 1657, 161, 1874, 33, 343, 1420,
+ /* 1450 */ 1421, 1422, 1423, 1424, 1428, 1429, 1430, 1431, 1872, 254,
+ /* 1460 */ 255, 278, 1907, 1730, 260, 269, 97, 1873, 1875, 598,
+ /* 1470 */ 1877, 1878, 593, 681, 588, 1892, 49, 682, 304, 1935,
+ /* 1480 */ 290, 1960, 684, 595, 301, 339, 1956, 300, 1842, 1836,
+ /* 1490 */ 594, 282, 280, 1835, 68, 1834, 1833, 69, 1830, 364,
+ /* 1500 */ 365, 1309, 1310, 176, 369, 1828, 371, 372, 373, 1827,
+ /* 1510 */ 375, 1872, 1826, 377, 1874, 1907, 1825, 1824, 381, 97,
+ /* 1520 */ 1873, 1875, 598, 1877, 1878, 593, 379, 588, 1285, 1284,
+ /* 1530 */ 1802, 1801, 1933, 386, 1960, 387, 1874, 1800, 339, 1956,
+ /* 1540 */ 342, 341, 1799, 1892, 134, 1254, 1774, 1773, 1772, 1771,
+ /* 1550 */ 1331, 595, 1770, 1769, 72, 1768, 1842, 1767, 594, 1766,
+ /* 1560 */ 1765, 1396, 1764, 1324, 404, 1892, 1763, 405, 407, 1762,
+ /* 1570 */ 1761, 1760, 1759, 595, 1758, 1757, 1756, 1755, 1842, 1872,
+ /* 1580 */ 594, 1754, 1753, 1907, 1391, 1752, 1751, 97, 1873, 1875,
+ /* 1590 */ 598, 1877, 1878, 593, 1874, 588, 1323, 1750, 1749, 1748,
+ /* 1600 */ 543, 1872, 1960, 135, 1747, 1907, 339, 1956, 1746, 98,
+ /* 1610 */ 1873, 1875, 598, 1877, 1878, 593, 1745, 588, 1744, 1743,
+ /* 1620 */ 688, 1742, 1256, 1892, 1960, 1741, 1740, 1739, 1959, 1956,
+ /* 1630 */ 1606, 595, 1605, 1603, 272, 185, 1842, 1138, 594, 186,
+ /* 1640 */ 1571, 1570, 1057, 1056, 584, 188, 123, 1815, 164, 165,
+ /* 1650 */ 189, 445, 124, 678, 674, 670, 666, 270, 1809, 1872,
+ /* 1660 */ 447, 1798, 194, 1907, 196, 1797, 1783, 98, 1873, 1875,
+ /* 1670 */ 598, 1877, 1878, 593, 1665, 588, 1602, 1087, 1600, 462,
+ /* 1680 */ 1598, 463, 1960, 466, 464, 467, 583, 1956, 468, 1596,
+ /* 1690 */ 470, 472, 1594, 471, 95, 474, 1874, 238, 475, 1583,
+ /* 1700 */ 1582, 476, 1567, 1667, 1332, 1206, 1327, 1205, 1666, 1123,
+ /* 1710 */ 1129, 653, 59, 1592, 1128, 655, 1125, 1124, 204, 1587,
+ /* 1720 */ 324, 1585, 497, 325, 1566, 1892, 326, 1565, 1335, 1337,
+ /* 1730 */ 537, 500, 1564, 592, 506, 1814, 99, 1301, 1842, 1808,
+ /* 1740 */ 594, 586, 1389, 1390, 1392, 1393, 1394, 1395, 25, 502,
+ /* 1750 */ 504, 53, 1291, 515, 516, 1874, 223, 151, 1796, 1794,
+ /* 1760 */ 2033, 1872, 1795, 227, 1793, 1907, 1792, 1790, 1299, 298,
+ /* 1770 */ 1873, 1875, 598, 1877, 1878, 593, 591, 588, 541, 1925,
+ /* 1780 */ 18, 1289, 529, 220, 1892, 327, 228, 234, 82, 521,
+ /* 1790 */ 1782, 83, 595, 239, 90, 236, 532, 1842, 19, 594,
+ /* 1800 */ 85, 27, 15, 1408, 20, 1504, 244, 21, 56, 10,
+ /* 1810 */ 1874, 250, 1407, 1486, 242, 251, 246, 1863, 1488, 159,
+ /* 1820 */ 1872, 1481, 249, 28, 1907, 29, 61, 22, 157, 1873,
+ /* 1830 */ 1875, 598, 1877, 1878, 593, 89, 588, 1524, 1525, 1892,
+ /* 1840 */ 1519, 1518, 337, 1523, 1522, 338, 1456, 595, 1455, 263,
+ /* 1850 */ 1862, 12, 1842, 1418, 594, 16, 55, 1333, 54, 1365,
+ /* 1860 */ 162, 163, 174, 1874, 1910, 597, 1169, 1386, 587, 601,
+ /* 1870 */ 1384, 35, 1184, 1383, 14, 1872, 1357, 23, 599, 1907,
+ /* 1880 */ 568, 2049, 24, 98, 1873, 1875, 598, 1877, 1878, 593,
+ /* 1890 */ 1192, 588, 1892, 348, 603, 605, 1189, 329, 1960, 608,
+ /* 1900 */ 595, 611, 621, 1957, 614, 1842, 606, 594, 1201, 1183,
+ /* 1910 */ 1182, 1181, 1186, 609, 1874, 1180, 612, 267, 1197, 1178,
+ /* 1920 */ 615, 91, 92, 67, 1085, 1120, 630, 1119, 1872, 1118,
+ /* 1930 */ 1117, 1874, 1907, 1116, 642, 1115, 299, 1873, 1875, 598,
+ /* 1940 */ 1877, 1878, 593, 1892, 588, 1113, 1111, 1110, 1109, 1136,
+ /* 1950 */ 268, 592, 1107, 1106, 1105, 1104, 1842, 1103, 594, 1102,
+ /* 1960 */ 1892, 1101, 1100, 1131, 1097, 1133, 1096, 1091, 595, 1093,
+ /* 1970 */ 1599, 1092, 1090, 1842, 663, 594, 1597, 664, 667, 1872,
+ /* 1980 */ 665, 1595, 1593, 1907, 671, 668, 669, 298, 1873, 1875,
+ /* 1990 */ 598, 1877, 1878, 593, 673, 588, 1872, 1926, 672, 675,
+ /* 2000 */ 1907, 676, 677, 1581, 156, 1873, 1875, 598, 1877, 1878,
+ /* 2010 */ 593, 1563, 588, 679, 1047, 683, 1874, 271, 1319, 279,
+ /* 2020 */ 686, 687, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2030 */ 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2040 */ 1538, 1538, 1538, 1538, 1538, 1892, 1998, 1538, 1538, 1538,
+ /* 2050 */ 1538, 1538, 1538, 595, 1538, 1538, 1538, 1538, 1842, 1538,
+ /* 2060 */ 594, 1538, 1538, 1538, 1538, 1538, 1538, 1874, 1538, 1538,
+ /* 2070 */ 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2080 */ 1538, 1872, 1538, 1538, 1538, 1907, 1538, 1538, 1538, 294,
+ /* 2090 */ 1873, 1875, 598, 1877, 1878, 593, 1892, 588, 1538, 1538,
+ /* 2100 */ 1538, 1538, 1538, 1538, 595, 1538, 1538, 1538, 1538, 1842,
+ /* 2110 */ 1538, 594, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2120 */ 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2130 */ 1874, 1538, 1872, 563, 1538, 1538, 1907, 1538, 1538, 1538,
+ /* 2140 */ 157, 1873, 1875, 598, 1877, 1878, 593, 1538, 588, 1538,
+ /* 2150 */ 1874, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1892,
+ /* 2160 */ 1538, 1538, 1538, 1538, 344, 1538, 1538, 595, 1538, 1538,
+ /* 2170 */ 1538, 1538, 1842, 1538, 594, 1538, 1538, 1538, 1538, 1892,
+ /* 2180 */ 1538, 1538, 1538, 1538, 347, 1538, 1538, 595, 1538, 1538,
+ /* 2190 */ 1538, 1538, 1842, 2050, 594, 1872, 1538, 1538, 1538, 1907,
+ /* 2200 */ 1538, 1874, 1538, 299, 1873, 1875, 598, 1877, 1878, 593,
+ /* 2210 */ 1538, 588, 1538, 1538, 1538, 1872, 1538, 1538, 1874, 1907,
+ /* 2220 */ 1538, 1538, 1538, 299, 1873, 1875, 598, 1877, 1878, 593,
+ /* 2230 */ 1892, 588, 1538, 1538, 1538, 1538, 1538, 1538, 595, 1538,
+ /* 2240 */ 1538, 1538, 1538, 1842, 1538, 594, 1538, 1892, 1538, 1538,
+ /* 2250 */ 1538, 1538, 1538, 1538, 1538, 595, 1538, 1538, 1538, 1538,
+ /* 2260 */ 1842, 1874, 594, 1538, 1538, 1538, 509, 1538, 1538, 1538,
+ /* 2270 */ 1907, 1538, 1538, 1538, 292, 1873, 1875, 598, 1877, 1878,
+ /* 2280 */ 593, 1538, 588, 1872, 1538, 1538, 1538, 1907, 1538, 1538,
+ /* 2290 */ 1892, 284, 1873, 1875, 598, 1877, 1878, 593, 595, 588,
+ /* 2300 */ 1538, 1538, 1538, 1842, 1538, 594, 1538, 1538, 1538, 1538,
+ /* 2310 */ 1538, 1538, 1874, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2320 */ 1538, 1538, 1538, 1538, 1538, 1538, 1872, 1538, 1538, 1874,
+ /* 2330 */ 1907, 1538, 1538, 1538, 285, 1873, 1875, 598, 1877, 1878,
+ /* 2340 */ 593, 1892, 588, 1538, 1538, 1538, 1538, 1538, 1538, 595,
+ /* 2350 */ 1538, 1538, 1538, 1538, 1842, 1538, 594, 1538, 1892, 1538,
+ /* 2360 */ 1538, 1538, 1538, 1538, 1538, 1538, 595, 1538, 1538, 1538,
+ /* 2370 */ 1538, 1842, 1538, 594, 1538, 1538, 1538, 1872, 1538, 1538,
+ /* 2380 */ 1874, 1907, 1538, 1538, 1538, 286, 1873, 1875, 598, 1877,
+ /* 2390 */ 1878, 593, 1538, 588, 1872, 1538, 1538, 1538, 1907, 1538,
+ /* 2400 */ 1538, 1538, 293, 1873, 1875, 598, 1877, 1878, 593, 1892,
+ /* 2410 */ 588, 1538, 1538, 1538, 1538, 1538, 1538, 595, 1538, 1538,
+ /* 2420 */ 1538, 1538, 1842, 1538, 594, 1538, 1538, 1538, 1538, 1538,
+ /* 2430 */ 1538, 1874, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2440 */ 1538, 1538, 1538, 1538, 1538, 1872, 1538, 1538, 1538, 1907,
+ /* 2450 */ 1874, 1538, 1538, 295, 1873, 1875, 598, 1877, 1878, 593,
+ /* 2460 */ 1892, 588, 1538, 1538, 1538, 1538, 1538, 1538, 595, 1538,
+ /* 2470 */ 1538, 1538, 1538, 1842, 1538, 594, 1538, 1538, 1538, 1892,
+ /* 2480 */ 1538, 1538, 1538, 1538, 1538, 1538, 1538, 595, 1538, 1538,
+ /* 2490 */ 1538, 1538, 1842, 1538, 594, 1538, 1872, 1538, 1538, 1538,
+ /* 2500 */ 1907, 1538, 1538, 1538, 287, 1873, 1875, 598, 1877, 1878,
+ /* 2510 */ 593, 1538, 588, 1874, 1538, 1872, 1538, 1538, 1538, 1907,
+ /* 2520 */ 1538, 1538, 1538, 296, 1873, 1875, 598, 1877, 1878, 593,
+ /* 2530 */ 1538, 588, 1538, 1538, 1538, 1874, 1538, 1538, 1538, 1538,
+ /* 2540 */ 1538, 1538, 1892, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2550 */ 595, 1538, 1538, 1538, 1538, 1842, 1538, 594, 1538, 1538,
+ /* 2560 */ 1538, 1538, 1538, 1538, 1892, 1538, 1538, 1538, 1538, 1538,
+ /* 2570 */ 1538, 1538, 595, 1538, 1538, 1538, 1538, 1842, 1872, 594,
+ /* 2580 */ 1538, 1538, 1907, 1538, 1538, 1538, 288, 1873, 1875, 598,
+ /* 2590 */ 1877, 1878, 593, 1538, 588, 1874, 1538, 1538, 1538, 1538,
+ /* 2600 */ 1872, 1538, 1538, 1538, 1907, 1538, 1538, 1538, 297, 1873,
+ /* 2610 */ 1875, 598, 1877, 1878, 593, 1874, 588, 1538, 1538, 1538,
+ /* 2620 */ 1538, 1538, 1538, 1538, 1892, 1538, 1538, 1538, 1538, 1538,
+ /* 2630 */ 1538, 1538, 595, 1538, 1538, 1538, 1538, 1842, 1538, 594,
+ /* 2640 */ 1538, 1538, 1538, 1538, 1892, 1538, 1538, 1538, 1538, 1538,
+ /* 2650 */ 1538, 1538, 595, 1538, 1538, 1538, 1538, 1842, 1874, 594,
+ /* 2660 */ 1872, 1538, 1538, 1538, 1907, 1538, 1538, 1538, 289, 1873,
+ /* 2670 */ 1875, 598, 1877, 1878, 593, 1538, 588, 1538, 1874, 1538,
+ /* 2680 */ 1872, 1538, 1538, 1538, 1907, 1538, 1538, 1892, 302, 1873,
+ /* 2690 */ 1875, 598, 1877, 1878, 593, 595, 588, 1538, 1538, 1538,
+ /* 2700 */ 1842, 1538, 594, 1538, 1538, 1538, 1538, 1892, 1538, 1538,
+ /* 2710 */ 1538, 1538, 1538, 1538, 1538, 595, 1538, 1538, 1538, 1538,
+ /* 2720 */ 1842, 1538, 594, 1872, 1538, 1538, 1538, 1907, 1538, 1874,
+ /* 2730 */ 1538, 303, 1873, 1875, 598, 1877, 1878, 593, 1538, 588,
+ /* 2740 */ 1538, 1538, 1538, 1872, 1538, 1538, 1538, 1907, 1538, 1538,
+ /* 2750 */ 1538, 1886, 1873, 1875, 598, 1877, 1878, 593, 1892, 588,
+ /* 2760 */ 1538, 1538, 1538, 1538, 1538, 1538, 595, 1538, 1538, 1538,
+ /* 2770 */ 1538, 1842, 1538, 594, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2780 */ 1874, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2790 */ 1538, 1538, 1538, 1538, 1872, 1538, 1538, 1538, 1907, 1874,
+ /* 2800 */ 1538, 1538, 1885, 1873, 1875, 598, 1877, 1878, 593, 1892,
+ /* 2810 */ 588, 1538, 1538, 1538, 1538, 1538, 1538, 595, 1538, 1538,
+ /* 2820 */ 1538, 1538, 1842, 1538, 594, 1538, 1538, 1538, 1892, 1538,
+ /* 2830 */ 1538, 1538, 1538, 1538, 1538, 1538, 595, 1538, 1538, 1538,
+ /* 2840 */ 1538, 1842, 1538, 594, 1538, 1872, 1538, 1538, 1538, 1907,
+ /* 2850 */ 1538, 1538, 1538, 1884, 1873, 1875, 598, 1877, 1878, 593,
+ /* 2860 */ 1538, 588, 1874, 1538, 1872, 1538, 1538, 1538, 1907, 1538,
+ /* 2870 */ 1538, 1538, 314, 1873, 1875, 598, 1877, 1878, 593, 1538,
+ /* 2880 */ 588, 1538, 1538, 1538, 1874, 1538, 1538, 1538, 1538, 1538,
+ /* 2890 */ 1538, 1892, 1538, 1538, 1538, 1538, 1538, 1538, 1538, 595,
+ /* 2900 */ 1538, 1538, 1538, 1538, 1842, 1538, 594, 1538, 1538, 1538,
+ /* 2910 */ 1538, 1538, 1538, 1892, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2920 */ 1538, 595, 1538, 1538, 1538, 1538, 1842, 1872, 594, 1538,
+ /* 2930 */ 1538, 1907, 1538, 1538, 1538, 315, 1873, 1875, 598, 1877,
+ /* 2940 */ 1878, 593, 1538, 588, 1874, 1538, 1538, 1538, 1538, 1872,
+ /* 2950 */ 1538, 1538, 1538, 1907, 1538, 1538, 1538, 311, 1873, 1875,
+ /* 2960 */ 598, 1877, 1878, 593, 1874, 588, 1538, 1538, 1538, 1538,
+ /* 2970 */ 1538, 1538, 1538, 1892, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 2980 */ 1538, 595, 1538, 1538, 1538, 1538, 1842, 1538, 594, 1538,
+ /* 2990 */ 1538, 1538, 1538, 1892, 1538, 1538, 1538, 1538, 1538, 1538,
+ /* 3000 */ 1538, 595, 1538, 1538, 1538, 1538, 1842, 1538, 594, 1872,
+ /* 3010 */ 1538, 1538, 1538, 1907, 1538, 1538, 1538, 316, 1873, 1875,
+ /* 3020 */ 598, 1877, 1878, 593, 1538, 588, 1538, 1538, 1538, 1872,
+ /* 3030 */ 1538, 1538, 1538, 1907, 1538, 1538, 1538, 291, 1873, 1875,
+ /* 3040 */ 598, 1877, 1878, 593, 1538, 588,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 312, 404, 314, 315, 337, 312, 316, 314, 315, 351,
- /* 10 */ 389, 390, 12, 13, 417, 328, 358, 327, 421, 361,
- /* 20 */ 20, 0, 22, 336, 334, 12, 13, 14, 15, 16,
- /* 30 */ 308, 20, 345, 33, 344, 35, 347, 20, 20, 350,
- /* 40 */ 351, 364, 21, 338, 316, 24, 25, 26, 27, 28,
- /* 50 */ 29, 30, 31, 32, 349, 327, 56, 313, 336, 4,
- /* 60 */ 316, 61, 334, 14, 15, 16, 344, 307, 68, 309,
- /* 70 */ 60, 349, 344, 351, 12, 13, 14, 338, 373, 374,
- /* 80 */ 375, 404, 20, 0, 22, 336, 56, 20, 349, 89,
- /* 90 */ 385, 4, 336, 344, 417, 33, 374, 35, 421, 343,
- /* 100 */ 378, 379, 380, 381, 382, 383, 89, 385, 352, 335,
- /* 110 */ 388, 111, 373, 374, 392, 393, 394, 351, 56, 89,
- /* 120 */ 404, 91, 348, 61, 385, 125, 126, 361, 406, 380,
- /* 130 */ 68, 44, 45, 417, 8, 9, 414, 421, 12, 13,
- /* 140 */ 14, 15, 16, 20, 89, 89, 63, 64, 65, 66,
- /* 150 */ 67, 89, 69, 70, 71, 72, 73, 74, 75, 76,
- /* 160 */ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
- /* 170 */ 170, 21, 172, 111, 24, 25, 26, 27, 28, 29,
- /* 180 */ 30, 31, 32, 313, 338, 20, 316, 125, 126, 120,
- /* 190 */ 121, 1, 2, 193, 194, 349, 196, 197, 198, 199,
- /* 200 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
- /* 210 */ 210, 211, 212, 213, 0, 21, 90, 8, 9, 373,
- /* 220 */ 374, 12, 13, 14, 15, 16, 316, 227, 34, 321,
- /* 230 */ 36, 385, 170, 325, 172, 22, 121, 327, 24, 25,
- /* 240 */ 26, 27, 28, 29, 30, 31, 32, 178, 35, 89,
- /* 250 */ 181, 360, 43, 362, 344, 193, 194, 227, 196, 197,
- /* 260 */ 198, 199, 200, 201, 202, 203, 204, 205, 206, 207,
- /* 270 */ 208, 209, 210, 211, 212, 213, 12, 13, 350, 351,
- /* 280 */ 90, 68, 227, 227, 20, 20, 22, 60, 20, 89,
- /* 290 */ 125, 126, 177, 178, 308, 308, 181, 33, 193, 35,
- /* 300 */ 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
- /* 310 */ 112, 328, 114, 115, 116, 117, 118, 119, 20, 336,
- /* 320 */ 56, 0, 336, 56, 111, 61, 322, 323, 345, 169,
- /* 330 */ 344, 171, 68, 336, 404, 349, 349, 351, 12, 13,
- /* 340 */ 343, 236, 237, 238, 239, 240, 20, 417, 22, 352,
- /* 350 */ 0, 421, 318, 89, 89, 88, 336, 89, 91, 33,
- /* 360 */ 374, 35, 68, 343, 378, 379, 380, 381, 382, 383,
- /* 370 */ 100, 385, 352, 339, 388, 111, 328, 337, 392, 393,
- /* 380 */ 394, 20, 56, 170, 336, 172, 20, 227, 22, 125,
- /* 390 */ 126, 8, 9, 345, 68, 12, 13, 14, 15, 16,
- /* 400 */ 414, 404, 316, 12, 13, 14, 193, 194, 308, 308,
- /* 410 */ 101, 20, 308, 22, 417, 89, 50, 14, 421, 322,
- /* 420 */ 323, 35, 101, 20, 33, 68, 35, 227, 316, 120,
- /* 430 */ 121, 122, 123, 124, 170, 344, 172, 111, 308, 353,
- /* 440 */ 336, 120, 121, 122, 123, 124, 355, 56, 344, 349,
- /* 450 */ 349, 125, 126, 349, 68, 351, 344, 193, 194, 68,
- /* 460 */ 196, 197, 198, 199, 200, 201, 202, 203, 204, 205,
- /* 470 */ 206, 207, 208, 209, 210, 211, 212, 213, 374, 349,
- /* 480 */ 89, 337, 378, 379, 380, 381, 382, 383, 384, 385,
- /* 490 */ 386, 387, 227, 320, 382, 227, 170, 120, 172, 8,
- /* 500 */ 9, 0, 111, 12, 13, 14, 15, 16, 396, 397,
- /* 510 */ 398, 399, 337, 401, 308, 342, 125, 126, 157, 193,
- /* 520 */ 194, 0, 196, 197, 198, 199, 200, 201, 202, 203,
- /* 530 */ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
- /* 540 */ 360, 316, 362, 8, 9, 162, 318, 12, 13, 14,
- /* 550 */ 15, 16, 327, 227, 37, 349, 179, 180, 324, 331,
- /* 560 */ 326, 170, 376, 172, 2, 64, 65, 339, 305, 344,
- /* 570 */ 8, 9, 71, 0, 12, 13, 14, 15, 16, 337,
- /* 580 */ 308, 90, 81, 82, 193, 194, 400, 196, 197, 198,
- /* 590 */ 199, 200, 201, 202, 203, 204, 205, 206, 207, 208,
- /* 600 */ 209, 210, 211, 212, 213, 12, 13, 101, 336, 92,
- /* 610 */ 336, 94, 95, 20, 97, 22, 344, 152, 101, 345,
- /* 620 */ 316, 349, 101, 351, 2, 90, 33, 364, 35, 123,
- /* 630 */ 8, 9, 249, 60, 12, 13, 14, 15, 16, 155,
- /* 640 */ 123, 120, 121, 122, 123, 124, 374, 344, 344, 56,
- /* 650 */ 378, 379, 380, 381, 382, 383, 337, 385, 355, 320,
- /* 660 */ 388, 68, 316, 346, 392, 393, 349, 404, 364, 364,
- /* 670 */ 8, 9, 333, 327, 12, 13, 14, 15, 16, 214,
- /* 680 */ 417, 342, 89, 308, 421, 20, 382, 22, 39, 224,
- /* 690 */ 344, 8, 9, 308, 325, 12, 13, 14, 15, 16,
- /* 700 */ 35, 397, 398, 399, 111, 401, 8, 9, 404, 404,
- /* 710 */ 12, 13, 14, 15, 16, 50, 232, 233, 125, 126,
- /* 720 */ 316, 417, 417, 61, 349, 421, 421, 43, 4, 14,
- /* 730 */ 316, 327, 12, 13, 349, 20, 308, 18, 376, 20,
- /* 740 */ 20, 327, 22, 19, 61, 316, 27, 337, 344, 30,
- /* 750 */ 316, 316, 90, 33, 309, 35, 327, 33, 344, 3,
- /* 760 */ 98, 327, 400, 170, 165, 172, 47, 308, 49, 308,
- /* 770 */ 51, 47, 424, 344, 90, 51, 56, 349, 344, 344,
- /* 780 */ 56, 98, 308, 184, 185, 77, 193, 194, 68, 196,
- /* 790 */ 197, 198, 199, 200, 201, 202, 203, 204, 205, 206,
- /* 800 */ 207, 208, 209, 210, 211, 212, 213, 88, 349, 89,
- /* 810 */ 349, 4, 88, 316, 152, 91, 376, 382, 368, 100,
- /* 820 */ 20, 8, 9, 349, 327, 12, 13, 14, 15, 16,
- /* 830 */ 308, 111, 397, 398, 399, 152, 401, 129, 130, 0,
- /* 840 */ 400, 344, 14, 44, 45, 125, 126, 128, 20, 0,
- /* 850 */ 131, 132, 133, 134, 135, 136, 137, 138, 139, 140,
- /* 860 */ 141, 142, 143, 144, 145, 146, 147, 148, 316, 150,
- /* 870 */ 151, 349, 336, 13, 308, 226, 214, 215, 216, 217,
- /* 880 */ 218, 219, 220, 221, 222, 223, 224, 48, 352, 317,
- /* 890 */ 170, 346, 172, 20, 349, 35, 344, 214, 215, 216,
- /* 900 */ 217, 218, 219, 220, 221, 222, 223, 224, 329, 60,
- /* 910 */ 415, 332, 197, 193, 194, 349, 196, 197, 198, 199,
- /* 920 */ 200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
- /* 930 */ 210, 211, 212, 213, 382, 18, 308, 308, 43, 364,
- /* 940 */ 23, 364, 225, 226, 0, 64, 65, 0, 338, 397,
- /* 950 */ 398, 399, 71, 401, 37, 38, 336, 157, 41, 349,
- /* 960 */ 316, 316, 81, 82, 308, 336, 42, 43, 411, 22,
- /* 970 */ 157, 327, 327, 344, 57, 58, 59, 349, 349, 404,
- /* 980 */ 351, 404, 315, 373, 374, 375, 308, 308, 344, 344,
- /* 990 */ 316, 47, 417, 364, 417, 385, 421, 56, 421, 364,
- /* 1000 */ 244, 327, 308, 374, 308, 349, 89, 378, 379, 380,
- /* 1010 */ 381, 382, 383, 316, 385, 336, 43, 388, 344, 12,
- /* 1020 */ 13, 392, 393, 344, 327, 197, 316, 349, 349, 22,
- /* 1030 */ 351, 93, 91, 404, 96, 228, 348, 327, 316, 404,
- /* 1040 */ 33, 344, 35, 349, 127, 349, 417, 0, 308, 327,
- /* 1050 */ 421, 317, 417, 374, 344, 308, 421, 378, 379, 380,
- /* 1060 */ 381, 382, 383, 56, 385, 316, 344, 388, 43, 22,
- /* 1070 */ 197, 392, 393, 394, 316, 68, 327, 157, 158, 162,
- /* 1080 */ 163, 164, 403, 336, 167, 327, 43, 93, 316, 349,
- /* 1090 */ 96, 344, 93, 344, 93, 96, 349, 96, 351, 327,
- /* 1100 */ 183, 0, 344, 186, 377, 188, 189, 190, 191, 192,
- /* 1110 */ 35, 364, 43, 316, 308, 90, 344, 35, 111, 61,
- /* 1120 */ 316, 374, 43, 22, 327, 378, 379, 380, 381, 382,
- /* 1130 */ 383, 327, 385, 90, 43, 388, 43, 402, 19, 392,
- /* 1140 */ 393, 344, 336, 248, 227, 43, 43, 341, 344, 43,
- /* 1150 */ 344, 404, 33, 89, 308, 349, 13, 351, 43, 90,
- /* 1160 */ 46, 1, 2, 99, 417, 395, 47, 405, 421, 90,
- /* 1170 */ 418, 52, 53, 54, 55, 56, 43, 170, 35, 172,
- /* 1180 */ 374, 90, 336, 90, 378, 379, 380, 381, 382, 383,
- /* 1190 */ 344, 385, 90, 90, 308, 349, 90, 351, 125, 126,
- /* 1200 */ 193, 194, 418, 89, 418, 90, 43, 88, 43, 229,
- /* 1210 */ 91, 35, 205, 206, 207, 208, 209, 210, 211, 246,
- /* 1220 */ 374, 47, 336, 90, 378, 379, 380, 381, 382, 383,
- /* 1230 */ 344, 385, 366, 372, 388, 349, 43, 351, 392, 393,
- /* 1240 */ 394, 43, 316, 124, 68, 371, 168, 172, 42, 403,
- /* 1250 */ 356, 193, 20, 90, 172, 90, 316, 356, 316, 152,
- /* 1260 */ 374, 354, 354, 308, 378, 379, 380, 381, 382, 383,
- /* 1270 */ 344, 385, 316, 316, 388, 156, 316, 20, 392, 393,
- /* 1280 */ 394, 310, 310, 90, 20, 370, 320, 351, 90, 403,
- /* 1290 */ 364, 336, 20, 174, 320, 176, 363, 20, 320, 344,
- /* 1300 */ 365, 320, 363, 320, 349, 320, 351, 316, 382, 320,
- /* 1310 */ 310, 336, 336, 336, 316, 336, 310, 308, 336, 364,
- /* 1320 */ 336, 370, 336, 397, 398, 399, 336, 401, 336, 374,
- /* 1330 */ 404, 336, 336, 378, 379, 380, 381, 382, 383, 318,
- /* 1340 */ 385, 175, 349, 417, 318, 336, 316, 421, 351, 316,
- /* 1350 */ 318, 363, 234, 344, 349, 154, 369, 349, 349, 404,
- /* 1360 */ 351, 359, 349, 349, 349, 20, 318, 308, 357, 359,
- /* 1370 */ 332, 318, 417, 364, 235, 344, 421, 349, 410, 359,
- /* 1380 */ 377, 359, 241, 374, 308, 349, 349, 378, 379, 380,
- /* 1390 */ 381, 382, 383, 349, 385, 336, 161, 243, 349, 242,
- /* 1400 */ 230, 372, 20, 344, 226, 344, 247, 245, 349, 89,
- /* 1410 */ 351, 376, 336, 404, 410, 89, 349, 250, 326, 36,
- /* 1420 */ 344, 340, 318, 311, 412, 349, 417, 351, 410, 413,
- /* 1430 */ 421, 407, 409, 374, 308, 408, 419, 378, 379, 380,
- /* 1440 */ 381, 382, 383, 391, 385, 316, 310, 388, 362, 319,
- /* 1450 */ 374, 392, 393, 419, 378, 379, 380, 381, 382, 383,
- /* 1460 */ 308, 385, 336, 367, 425, 420, 420, 330, 330, 419,
- /* 1470 */ 344, 420, 330, 0, 306, 349, 0, 351, 177, 0,
- /* 1480 */ 0, 42, 0, 35, 187, 35, 35, 35, 336, 187,
- /* 1490 */ 0, 35, 35, 341, 187, 0, 344, 187, 422, 423,
- /* 1500 */ 374, 349, 0, 351, 378, 379, 380, 381, 382, 383,
- /* 1510 */ 308, 385, 35, 0, 388, 22, 0, 170, 35, 393,
- /* 1520 */ 172, 0, 308, 166, 165, 0, 374, 0, 0, 0,
- /* 1530 */ 378, 379, 380, 381, 382, 383, 308, 385, 336, 0,
- /* 1540 */ 46, 42, 0, 0, 149, 0, 344, 144, 0, 35,
- /* 1550 */ 336, 349, 0, 351, 0, 0, 0, 144, 344, 0,
- /* 1560 */ 0, 0, 0, 349, 336, 351, 0, 0, 0, 0,
- /* 1570 */ 0, 0, 344, 0, 0, 0, 374, 349, 0, 351,
- /* 1580 */ 378, 379, 380, 381, 382, 383, 42, 385, 374, 308,
- /* 1590 */ 0, 0, 378, 379, 380, 381, 382, 383, 0, 385,
- /* 1600 */ 0, 0, 374, 22, 0, 0, 378, 379, 380, 381,
- /* 1610 */ 382, 383, 0, 385, 0, 387, 0, 336, 416, 0,
- /* 1620 */ 56, 0, 341, 39, 42, 344, 33, 0, 56, 0,
- /* 1630 */ 349, 43, 351, 46, 14, 46, 14, 423, 0, 40,
- /* 1640 */ 47, 39, 308, 35, 47, 52, 53, 54, 55, 56,
- /* 1650 */ 0, 0, 0, 161, 308, 374, 39, 0, 0, 378,
- /* 1660 */ 379, 380, 381, 382, 383, 0, 385, 0, 308, 62,
- /* 1670 */ 336, 0, 39, 0, 35, 341, 39, 47, 344, 0,
- /* 1680 */ 35, 88, 336, 349, 91, 351, 39, 47, 0, 35,
- /* 1690 */ 344, 47, 39, 0, 0, 349, 336, 351, 0, 0,
- /* 1700 */ 22, 35, 96, 0, 344, 35, 22, 98, 374, 349,
- /* 1710 */ 0, 351, 378, 379, 380, 381, 382, 383, 43, 385,
- /* 1720 */ 374, 35, 308, 43, 378, 379, 380, 381, 382, 383,
- /* 1730 */ 35, 385, 0, 22, 374, 22, 308, 0, 378, 379,
- /* 1740 */ 380, 381, 382, 383, 22, 385, 153, 154, 308, 156,
- /* 1750 */ 336, 49, 35, 160, 0, 35, 0, 0, 344, 35,
- /* 1760 */ 22, 20, 0, 349, 336, 351, 35, 157, 0, 176,
- /* 1770 */ 22, 0, 344, 0, 159, 157, 336, 349, 154, 351,
- /* 1780 */ 0, 0, 157, 0, 344, 89, 173, 90, 374, 349,
- /* 1790 */ 35, 351, 378, 379, 380, 381, 382, 383, 0, 385,
- /* 1800 */ 0, 89, 374, 155, 89, 308, 378, 379, 380, 381,
- /* 1810 */ 382, 383, 153, 385, 374, 308, 182, 39, 378, 379,
- /* 1820 */ 380, 381, 382, 383, 89, 385, 99, 43, 46, 89,
- /* 1830 */ 43, 308, 90, 336, 89, 43, 90, 90, 89, 46,
- /* 1840 */ 90, 344, 231, 336, 43, 46, 349, 89, 351, 89,
- /* 1850 */ 89, 344, 43, 90, 89, 308, 349, 231, 351, 336,
- /* 1860 */ 90, 46, 90, 46, 46, 90, 225, 344, 43, 90,
- /* 1870 */ 35, 374, 349, 231, 351, 378, 379, 380, 381, 382,
- /* 1880 */ 383, 374, 385, 336, 35, 378, 379, 380, 381, 382,
- /* 1890 */ 383, 344, 385, 35, 35, 35, 349, 374, 351, 35,
- /* 1900 */ 2, 378, 379, 380, 381, 382, 383, 22, 385, 193,
- /* 1910 */ 43, 308, 89, 22, 90, 89, 46, 90, 89, 100,
- /* 1920 */ 90, 374, 89, 89, 46, 378, 379, 380, 381, 382,
- /* 1930 */ 383, 308, 385, 89, 35, 90, 35, 89, 195, 336,
- /* 1940 */ 90, 35, 89, 35, 90, 89, 35, 344, 35, 113,
- /* 1950 */ 89, 308, 349, 90, 351, 90, 113, 89, 22, 336,
- /* 1960 */ 113, 89, 35, 101, 113, 89, 89, 344, 43, 22,
- /* 1970 */ 61, 308, 349, 35, 351, 35, 62, 374, 35, 336,
- /* 1980 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 35,
- /* 1990 */ 35, 308, 349, 35, 351, 43, 68, 374, 35, 336,
- /* 2000 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 87,
- /* 2010 */ 22, 35, 349, 22, 351, 35, 35, 374, 68, 336,
- /* 2020 */ 35, 378, 379, 380, 381, 382, 383, 344, 385, 35,
- /* 2030 */ 35, 308, 349, 35, 351, 35, 22, 374, 35, 0,
- /* 2040 */ 35, 378, 379, 380, 381, 382, 383, 308, 385, 47,
- /* 2050 */ 39, 0, 35, 47, 39, 0, 35, 374, 39, 336,
- /* 2060 */ 47, 378, 379, 380, 381, 382, 383, 344, 385, 0,
- /* 2070 */ 35, 47, 349, 39, 351, 336, 0, 35, 35, 0,
- /* 2080 */ 22, 21, 21, 344, 22, 22, 20, 426, 349, 426,
- /* 2090 */ 351, 426, 426, 426, 426, 426, 426, 374, 308, 426,
- /* 2100 */ 426, 378, 379, 380, 381, 382, 383, 426, 385, 426,
- /* 2110 */ 426, 426, 426, 374, 308, 426, 426, 378, 379, 380,
- /* 2120 */ 381, 382, 383, 426, 385, 426, 336, 426, 426, 426,
- /* 2130 */ 426, 426, 426, 426, 344, 426, 426, 426, 426, 349,
- /* 2140 */ 426, 351, 336, 426, 426, 426, 426, 426, 426, 426,
- /* 2150 */ 344, 426, 426, 426, 426, 349, 426, 351, 426, 426,
- /* 2160 */ 426, 426, 426, 426, 374, 308, 426, 426, 378, 379,
- /* 2170 */ 380, 381, 382, 383, 426, 385, 426, 426, 426, 426,
- /* 2180 */ 374, 308, 426, 426, 378, 379, 380, 381, 382, 383,
- /* 2190 */ 426, 385, 426, 336, 426, 426, 426, 426, 426, 426,
- /* 2200 */ 426, 344, 426, 426, 426, 426, 349, 426, 351, 336,
- /* 2210 */ 426, 426, 426, 426, 426, 426, 426, 344, 426, 426,
- /* 2220 */ 426, 426, 349, 426, 351, 426, 426, 426, 426, 426,
- /* 2230 */ 426, 374, 426, 426, 426, 378, 379, 380, 381, 382,
- /* 2240 */ 383, 426, 385, 426, 426, 426, 426, 374, 426, 426,
- /* 2250 */ 426, 378, 379, 380, 381, 382, 383, 426, 385, 426,
- /* 2260 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2270 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2280 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2290 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2300 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2310 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2320 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2330 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2340 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2350 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2360 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2370 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2380 */ 426, 426, 426, 426, 426, 426, 426, 426, 426, 426,
- /* 2390 */ 426, 426,
+ /* 0 */ 350, 0, 323, 350, 325, 326, 323, 319, 325, 326,
+ /* 10 */ 350, 361, 12, 13, 361, 359, 327, 349, 362, 363,
+ /* 20 */ 20, 361, 22, 12, 13, 14, 15, 16, 339, 3,
+ /* 30 */ 14, 15, 16, 33, 4, 35, 348, 387, 388, 389,
+ /* 40 */ 387, 388, 389, 20, 356, 356, 20, 387, 388, 361,
+ /* 50 */ 400, 363, 2, 400, 324, 20, 56, 327, 8, 9,
+ /* 60 */ 400, 61, 12, 13, 14, 15, 16, 319, 68, 12,
+ /* 70 */ 13, 14, 384, 35, 44, 45, 388, 20, 393, 22,
+ /* 80 */ 392, 393, 394, 395, 396, 397, 398, 20, 400, 22,
+ /* 90 */ 33, 319, 35, 405, 94, 407, 348, 362, 363, 411,
+ /* 100 */ 412, 331, 35, 348, 356, 327, 68, 422, 14, 361,
+ /* 110 */ 355, 363, 424, 56, 20, 345, 116, 50, 61, 364,
+ /* 120 */ 432, 95, 437, 438, 354, 68, 0, 442, 443, 94,
+ /* 130 */ 130, 131, 384, 361, 356, 4, 388, 0, 319, 56,
+ /* 140 */ 392, 393, 394, 395, 396, 397, 398, 390, 400, 348,
+ /* 150 */ 329, 94, 20, 130, 131, 407, 94, 356, 21, 411,
+ /* 160 */ 412, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ /* 170 */ 422, 393, 351, 116, 363, 418, 176, 94, 178, 96,
+ /* 180 */ 361, 370, 408, 409, 373, 437, 438, 130, 131, 163,
+ /* 190 */ 442, 443, 414, 415, 416, 417, 395, 419, 161, 324,
+ /* 200 */ 200, 201, 327, 203, 204, 205, 206, 207, 208, 209,
+ /* 210 */ 210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+ /* 220 */ 220, 221, 222, 20, 33, 94, 94, 329, 158, 318,
+ /* 230 */ 327, 320, 106, 176, 20, 178, 94, 175, 47, 177,
+ /* 240 */ 240, 343, 200, 52, 53, 54, 55, 56, 78, 351,
+ /* 250 */ 14, 125, 126, 127, 128, 129, 20, 200, 201, 356,
+ /* 260 */ 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
+ /* 270 */ 213, 214, 215, 216, 217, 218, 219, 220, 221, 222,
+ /* 280 */ 12, 13, 245, 246, 93, 334, 335, 96, 20, 350,
+ /* 290 */ 22, 249, 250, 251, 252, 253, 393, 227, 204, 327,
+ /* 300 */ 361, 33, 240, 35, 134, 135, 21, 237, 94, 24,
+ /* 310 */ 25, 26, 27, 28, 29, 30, 31, 32, 415, 416,
+ /* 320 */ 417, 106, 419, 240, 56, 60, 387, 388, 356, 61,
+ /* 330 */ 393, 224, 319, 226, 327, 171, 68, 12, 13, 400,
+ /* 340 */ 125, 126, 127, 128, 129, 20, 339, 22, 327, 332,
+ /* 350 */ 159, 160, 94, 162, 337, 191, 192, 166, 33, 422,
+ /* 360 */ 35, 348, 94, 356, 20, 393, 327, 393, 404, 356,
+ /* 370 */ 406, 240, 240, 182, 361, 438, 363, 356, 339, 442,
+ /* 380 */ 443, 56, 240, 327, 116, 346, 61, 415, 416, 417,
+ /* 390 */ 126, 419, 20, 68, 22, 356, 422, 384, 130, 131,
+ /* 400 */ 20, 388, 0, 125, 126, 392, 393, 394, 395, 396,
+ /* 410 */ 397, 398, 438, 400, 393, 20, 442, 443, 405, 94,
+ /* 420 */ 407, 365, 50, 327, 411, 412, 24, 25, 26, 27,
+ /* 430 */ 28, 29, 30, 31, 32, 339, 415, 416, 417, 390,
+ /* 440 */ 419, 116, 346, 422, 176, 432, 178, 183, 184, 60,
+ /* 450 */ 348, 187, 356, 20, 240, 130, 131, 355, 437, 438,
+ /* 460 */ 1, 2, 184, 442, 443, 187, 364, 418, 200, 201,
+ /* 470 */ 0, 203, 204, 205, 206, 207, 208, 209, 210, 211,
+ /* 480 */ 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
+ /* 490 */ 222, 125, 224, 14, 404, 316, 406, 37, 240, 20,
+ /* 500 */ 331, 176, 372, 178, 374, 107, 108, 109, 110, 111,
+ /* 510 */ 112, 113, 114, 115, 116, 117, 43, 119, 120, 121,
+ /* 520 */ 122, 123, 124, 354, 105, 200, 201, 56, 203, 204,
+ /* 530 */ 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
+ /* 540 */ 215, 216, 217, 218, 219, 220, 221, 222, 12, 13,
+ /* 550 */ 22, 185, 186, 340, 95, 376, 20, 97, 22, 99,
+ /* 560 */ 100, 348, 102, 35, 93, 327, 106, 96, 95, 33,
+ /* 570 */ 357, 35, 393, 0, 376, 390, 106, 339, 8, 9,
+ /* 580 */ 347, 319, 12, 13, 14, 15, 16, 20, 128, 224,
+ /* 590 */ 20, 393, 56, 360, 356, 125, 126, 127, 128, 129,
+ /* 600 */ 319, 422, 327, 418, 68, 12, 13, 14, 334, 335,
+ /* 610 */ 348, 319, 68, 20, 339, 22, 437, 438, 356, 68,
+ /* 620 */ 422, 442, 443, 361, 56, 363, 33, 348, 35, 348,
+ /* 630 */ 94, 356, 21, 60, 355, 437, 438, 356, 3, 327,
+ /* 640 */ 442, 443, 361, 364, 363, 34, 384, 36, 0, 56,
+ /* 650 */ 388, 339, 116, 361, 392, 393, 394, 395, 396, 397,
+ /* 660 */ 398, 68, 400, 340, 96, 384, 130, 131, 356, 388,
+ /* 670 */ 349, 348, 393, 392, 393, 394, 395, 396, 397, 398,
+ /* 680 */ 357, 400, 341, 204, 422, 344, 405, 94, 407, 319,
+ /* 690 */ 8, 9, 411, 412, 12, 13, 14, 15, 16, 437,
+ /* 700 */ 438, 422, 421, 327, 442, 443, 327, 43, 60, 116,
+ /* 710 */ 363, 372, 176, 374, 178, 339, 437, 438, 339, 320,
+ /* 720 */ 373, 442, 443, 130, 131, 348, 358, 356, 319, 361,
+ /* 730 */ 163, 361, 356, 163, 357, 356, 200, 201, 367, 203,
+ /* 740 */ 204, 205, 206, 207, 208, 209, 210, 211, 212, 213,
+ /* 750 */ 214, 215, 216, 217, 218, 219, 220, 221, 222, 95,
+ /* 760 */ 8, 9, 327, 327, 12, 13, 14, 15, 16, 176,
+ /* 770 */ 361, 178, 340, 349, 339, 339, 240, 95, 8, 9,
+ /* 780 */ 348, 319, 12, 13, 14, 15, 16, 403, 319, 357,
+ /* 790 */ 406, 356, 356, 200, 201, 43, 203, 204, 205, 206,
+ /* 800 */ 207, 208, 209, 210, 211, 212, 213, 214, 215, 216,
+ /* 810 */ 217, 218, 219, 220, 221, 222, 12, 13, 376, 22,
+ /* 820 */ 356, 319, 348, 361, 20, 358, 22, 319, 361, 3,
+ /* 830 */ 361, 367, 35, 327, 319, 393, 349, 33, 364, 35,
+ /* 840 */ 2, 376, 44, 45, 106, 339, 8, 9, 319, 319,
+ /* 850 */ 12, 13, 14, 15, 16, 336, 39, 338, 393, 327,
+ /* 860 */ 56, 4, 356, 361, 422, 68, 128, 319, 319, 361,
+ /* 870 */ 327, 339, 68, 12, 13, 43, 361, 327, 348, 437,
+ /* 880 */ 438, 20, 339, 22, 442, 443, 356, 422, 356, 339,
+ /* 890 */ 361, 361, 257, 363, 33, 20, 35, 348, 94, 356,
+ /* 900 */ 380, 341, 437, 438, 344, 356, 356, 442, 443, 361,
+ /* 910 */ 361, 20, 363, 116, 384, 0, 319, 56, 388, 327,
+ /* 920 */ 116, 95, 392, 393, 394, 395, 396, 397, 398, 68,
+ /* 930 */ 400, 339, 349, 384, 130, 131, 319, 388, 168, 42,
+ /* 940 */ 43, 392, 393, 394, 395, 396, 397, 398, 356, 400,
+ /* 950 */ 64, 65, 319, 61, 405, 94, 407, 71, 361, 0,
+ /* 960 */ 411, 412, 47, 433, 434, 238, 239, 327, 82, 83,
+ /* 970 */ 421, 0, 327, 176, 88, 178, 35, 116, 361, 339,
+ /* 980 */ 176, 22, 178, 376, 339, 319, 98, 98, 319, 101,
+ /* 990 */ 101, 130, 131, 349, 361, 43, 356, 200, 201, 319,
+ /* 1000 */ 393, 356, 163, 164, 200, 201, 319, 203, 204, 205,
+ /* 1010 */ 206, 207, 208, 209, 210, 211, 212, 213, 214, 215,
+ /* 1020 */ 216, 217, 218, 219, 220, 221, 222, 361, 327, 422,
+ /* 1030 */ 361, 327, 262, 130, 131, 64, 65, 176, 163, 178,
+ /* 1040 */ 339, 361, 71, 339, 437, 438, 43, 95, 361, 442,
+ /* 1050 */ 443, 349, 43, 82, 83, 98, 239, 356, 101, 88,
+ /* 1060 */ 356, 200, 201, 0, 203, 204, 205, 206, 207, 208,
+ /* 1070 */ 209, 210, 211, 212, 213, 214, 215, 216, 217, 218,
+ /* 1080 */ 219, 220, 221, 222, 18, 327, 20, 98, 1, 2,
+ /* 1090 */ 101, 259, 200, 27, 35, 204, 30, 339, 241, 0,
+ /* 1100 */ 0, 94, 8, 9, 95, 391, 12, 13, 14, 15,
+ /* 1110 */ 16, 104, 337, 47, 356, 49, 43, 51, 43, 178,
+ /* 1120 */ 43, 22, 22, 446, 43, 43, 63, 64, 65, 66,
+ /* 1130 */ 67, 43, 69, 70, 71, 72, 73, 74, 75, 76,
+ /* 1140 */ 77, 78, 79, 80, 81, 82, 83, 84, 85, 86,
+ /* 1150 */ 87, 88, 89, 90, 435, 18, 328, 8, 9, 93,
+ /* 1160 */ 23, 12, 13, 14, 15, 16, 429, 35, 95, 13,
+ /* 1170 */ 95, 105, 95, 43, 37, 38, 95, 95, 41, 46,
+ /* 1180 */ 8, 9, 348, 95, 12, 13, 14, 15, 16, 95,
+ /* 1190 */ 43, 35, 328, 326, 57, 58, 59, 43, 391, 133,
+ /* 1200 */ 68, 319, 136, 137, 138, 139, 140, 141, 142, 143,
+ /* 1210 */ 144, 145, 146, 147, 148, 149, 150, 151, 152, 153,
+ /* 1220 */ 43, 155, 156, 157, 13, 95, 360, 94, 8, 9,
+ /* 1230 */ 348, 94, 12, 13, 14, 15, 16, 178, 356, 43,
+ /* 1240 */ 43, 420, 95, 361, 439, 363, 35, 0, 413, 95,
+ /* 1250 */ 423, 386, 242, 47, 385, 378, 42, 174, 368, 20,
+ /* 1260 */ 327, 327, 368, 158, 261, 366, 384, 366, 92, 132,
+ /* 1270 */ 388, 333, 95, 327, 392, 393, 394, 395, 396, 397,
+ /* 1280 */ 398, 61, 400, 327, 20, 403, 321, 405, 406, 407,
+ /* 1290 */ 327, 95, 95, 411, 412, 48, 321, 20, 382, 331,
+ /* 1300 */ 376, 331, 363, 20, 375, 168, 169, 170, 20, 377,
+ /* 1310 */ 173, 319, 163, 331, 331, 375, 4, 393, 331, 331,
+ /* 1320 */ 327, 331, 321, 103, 348, 348, 348, 190, 348, 348,
+ /* 1330 */ 193, 19, 195, 196, 197, 198, 199, 321, 327, 348,
+ /* 1340 */ 348, 189, 348, 348, 348, 33, 422, 348, 356, 383,
+ /* 1350 */ 348, 382, 329, 361, 181, 363, 329, 327, 361, 47,
+ /* 1360 */ 381, 437, 438, 51, 327, 329, 442, 443, 56, 363,
+ /* 1370 */ 247, 371, 160, 375, 361, 361, 384, 240, 158, 361,
+ /* 1380 */ 388, 361, 361, 371, 392, 393, 394, 395, 396, 397,
+ /* 1390 */ 398, 369, 400, 319, 329, 344, 329, 405, 356, 407,
+ /* 1400 */ 20, 361, 428, 411, 412, 93, 391, 248, 96, 371,
+ /* 1410 */ 391, 361, 428, 421, 361, 371, 361, 361, 254, 167,
+ /* 1420 */ 263, 256, 348, 255, 243, 386, 260, 441, 258, 239,
+ /* 1430 */ 356, 20, 447, 425, 430, 361, 431, 363, 390, 94,
+ /* 1440 */ 356, 410, 94, 352, 338, 428, 319, 227, 228, 229,
+ /* 1450 */ 230, 231, 232, 233, 234, 235, 236, 237, 384, 427,
+ /* 1460 */ 426, 327, 388, 361, 440, 329, 392, 393, 394, 395,
+ /* 1470 */ 396, 397, 398, 36, 400, 348, 379, 322, 374, 405,
+ /* 1480 */ 342, 407, 321, 356, 342, 411, 412, 342, 361, 0,
+ /* 1490 */ 363, 317, 330, 0, 183, 0, 0, 42, 0, 35,
+ /* 1500 */ 194, 35, 35, 35, 194, 0, 35, 35, 194, 0,
+ /* 1510 */ 194, 384, 0, 35, 319, 388, 0, 0, 35, 392,
+ /* 1520 */ 393, 394, 395, 396, 397, 398, 22, 400, 178, 176,
+ /* 1530 */ 0, 0, 405, 172, 407, 171, 319, 0, 411, 412,
+ /* 1540 */ 12, 13, 0, 348, 42, 46, 0, 0, 0, 0,
+ /* 1550 */ 22, 356, 0, 0, 154, 0, 361, 0, 363, 0,
+ /* 1560 */ 0, 33, 0, 35, 149, 348, 0, 35, 149, 0,
+ /* 1570 */ 0, 0, 0, 356, 0, 0, 0, 0, 361, 384,
+ /* 1580 */ 363, 0, 0, 388, 56, 0, 0, 392, 393, 394,
+ /* 1590 */ 395, 396, 397, 398, 319, 400, 68, 0, 0, 0,
+ /* 1600 */ 405, 384, 407, 42, 0, 388, 411, 412, 0, 392,
+ /* 1610 */ 393, 394, 395, 396, 397, 398, 0, 400, 0, 0,
+ /* 1620 */ 19, 0, 22, 348, 407, 0, 0, 0, 411, 412,
+ /* 1630 */ 0, 356, 0, 0, 33, 56, 361, 35, 363, 56,
+ /* 1640 */ 0, 0, 14, 14, 116, 42, 39, 0, 47, 43,
+ /* 1650 */ 40, 46, 39, 52, 53, 54, 55, 56, 0, 384,
+ /* 1660 */ 46, 0, 39, 388, 167, 0, 0, 392, 393, 394,
+ /* 1670 */ 395, 396, 397, 398, 0, 400, 0, 62, 0, 35,
+ /* 1680 */ 0, 47, 407, 35, 39, 47, 411, 412, 39, 0,
+ /* 1690 */ 35, 39, 0, 47, 93, 35, 319, 96, 47, 0,
+ /* 1700 */ 0, 39, 0, 0, 176, 35, 178, 22, 0, 22,
+ /* 1710 */ 35, 43, 103, 0, 35, 43, 35, 35, 101, 0,
+ /* 1720 */ 22, 0, 49, 22, 0, 348, 22, 0, 200, 201,
+ /* 1730 */ 129, 35, 0, 356, 22, 0, 20, 95, 361, 0,
+ /* 1740 */ 363, 213, 214, 215, 216, 217, 218, 219, 94, 35,
+ /* 1750 */ 35, 163, 35, 22, 163, 319, 160, 179, 0, 0,
+ /* 1760 */ 3, 384, 0, 162, 0, 388, 0, 0, 35, 392,
+ /* 1770 */ 393, 394, 395, 396, 397, 398, 399, 400, 401, 402,
+ /* 1780 */ 94, 180, 188, 182, 348, 163, 95, 94, 94, 165,
+ /* 1790 */ 0, 39, 356, 46, 104, 159, 161, 361, 43, 363,
+ /* 1800 */ 94, 94, 244, 223, 43, 95, 95, 244, 43, 225,
+ /* 1810 */ 319, 43, 223, 95, 94, 46, 94, 46, 95, 94,
+ /* 1820 */ 384, 95, 94, 94, 388, 43, 3, 43, 392, 393,
+ /* 1830 */ 394, 395, 396, 397, 398, 94, 400, 95, 95, 348,
+ /* 1840 */ 35, 35, 35, 35, 35, 35, 95, 356, 95, 46,
+ /* 1850 */ 46, 2, 361, 200, 363, 244, 43, 22, 238, 22,
+ /* 1860 */ 46, 46, 46, 319, 94, 202, 22, 95, 94, 35,
+ /* 1870 */ 95, 94, 118, 95, 94, 384, 95, 94, 105, 388,
+ /* 1880 */ 444, 445, 94, 392, 393, 394, 395, 396, 397, 398,
+ /* 1890 */ 95, 400, 348, 35, 94, 35, 95, 353, 407, 35,
+ /* 1900 */ 356, 35, 106, 412, 35, 361, 94, 363, 35, 118,
+ /* 1910 */ 118, 118, 95, 94, 319, 95, 94, 43, 22, 95,
+ /* 1920 */ 94, 94, 94, 94, 62, 35, 61, 35, 384, 35,
+ /* 1930 */ 35, 319, 388, 35, 91, 35, 392, 393, 394, 395,
+ /* 1940 */ 396, 397, 398, 348, 400, 35, 35, 35, 35, 68,
+ /* 1950 */ 43, 356, 35, 35, 22, 35, 361, 22, 363, 35,
+ /* 1960 */ 348, 35, 35, 35, 35, 68, 35, 22, 356, 35,
+ /* 1970 */ 0, 35, 35, 361, 35, 363, 0, 47, 35, 384,
+ /* 1980 */ 39, 0, 0, 388, 35, 47, 39, 392, 393, 394,
+ /* 1990 */ 395, 396, 397, 398, 39, 400, 384, 402, 47, 35,
+ /* 2000 */ 388, 47, 39, 0, 392, 393, 394, 395, 396, 397,
+ /* 2010 */ 398, 0, 400, 35, 35, 21, 319, 22, 22, 22,
+ /* 2020 */ 21, 20, 448, 448, 448, 448, 448, 448, 448, 448,
+ /* 2030 */ 448, 448, 448, 448, 448, 448, 448, 448, 448, 448,
+ /* 2040 */ 448, 448, 448, 448, 448, 348, 434, 448, 448, 448,
+ /* 2050 */ 448, 448, 448, 356, 448, 448, 448, 448, 361, 448,
+ /* 2060 */ 363, 448, 448, 448, 448, 448, 448, 319, 448, 448,
+ /* 2070 */ 448, 448, 448, 448, 448, 448, 448, 448, 448, 448,
+ /* 2080 */ 448, 384, 448, 448, 448, 388, 448, 448, 448, 392,
+ /* 2090 */ 393, 394, 395, 396, 397, 398, 348, 400, 448, 448,
+ /* 2100 */ 448, 448, 448, 448, 356, 448, 448, 448, 448, 361,
+ /* 2110 */ 448, 363, 448, 448, 448, 448, 448, 448, 448, 448,
+ /* 2120 */ 448, 448, 448, 448, 448, 448, 448, 448, 448, 448,
+ /* 2130 */ 319, 448, 384, 436, 448, 448, 388, 448, 448, 448,
+ /* 2140 */ 392, 393, 394, 395, 396, 397, 398, 448, 400, 448,
+ /* 2150 */ 319, 448, 448, 448, 448, 448, 448, 448, 448, 348,
+ /* 2160 */ 448, 448, 448, 448, 353, 448, 448, 356, 448, 448,
+ /* 2170 */ 448, 448, 361, 448, 363, 448, 448, 448, 448, 348,
+ /* 2180 */ 448, 448, 448, 448, 353, 448, 448, 356, 448, 448,
+ /* 2190 */ 448, 448, 361, 445, 363, 384, 448, 448, 448, 388,
+ /* 2200 */ 448, 319, 448, 392, 393, 394, 395, 396, 397, 398,
+ /* 2210 */ 448, 400, 448, 448, 448, 384, 448, 448, 319, 388,
+ /* 2220 */ 448, 448, 448, 392, 393, 394, 395, 396, 397, 398,
+ /* 2230 */ 348, 400, 448, 448, 448, 448, 448, 448, 356, 448,
+ /* 2240 */ 448, 448, 448, 361, 448, 363, 448, 348, 448, 448,
+ /* 2250 */ 448, 448, 448, 448, 448, 356, 448, 448, 448, 448,
+ /* 2260 */ 361, 319, 363, 448, 448, 448, 384, 448, 448, 448,
+ /* 2270 */ 388, 448, 448, 448, 392, 393, 394, 395, 396, 397,
+ /* 2280 */ 398, 448, 400, 384, 448, 448, 448, 388, 448, 448,
+ /* 2290 */ 348, 392, 393, 394, 395, 396, 397, 398, 356, 400,
+ /* 2300 */ 448, 448, 448, 361, 448, 363, 448, 448, 448, 448,
+ /* 2310 */ 448, 448, 319, 448, 448, 448, 448, 448, 448, 448,
+ /* 2320 */ 448, 448, 448, 448, 448, 448, 384, 448, 448, 319,
+ /* 2330 */ 388, 448, 448, 448, 392, 393, 394, 395, 396, 397,
+ /* 2340 */ 398, 348, 400, 448, 448, 448, 448, 448, 448, 356,
+ /* 2350 */ 448, 448, 448, 448, 361, 448, 363, 448, 348, 448,
+ /* 2360 */ 448, 448, 448, 448, 448, 448, 356, 448, 448, 448,
+ /* 2370 */ 448, 361, 448, 363, 448, 448, 448, 384, 448, 448,
+ /* 2380 */ 319, 388, 448, 448, 448, 392, 393, 394, 395, 396,
+ /* 2390 */ 397, 398, 448, 400, 384, 448, 448, 448, 388, 448,
+ /* 2400 */ 448, 448, 392, 393, 394, 395, 396, 397, 398, 348,
+ /* 2410 */ 400, 448, 448, 448, 448, 448, 448, 356, 448, 448,
+ /* 2420 */ 448, 448, 361, 448, 363, 448, 448, 448, 448, 448,
+ /* 2430 */ 448, 319, 448, 448, 448, 448, 448, 448, 448, 448,
+ /* 2440 */ 448, 448, 448, 448, 448, 384, 448, 448, 448, 388,
+ /* 2450 */ 319, 448, 448, 392, 393, 394, 395, 396, 397, 398,
+ /* 2460 */ 348, 400, 448, 448, 448, 448, 448, 448, 356, 448,
+ /* 2470 */ 448, 448, 448, 361, 448, 363, 448, 448, 448, 348,
+ /* 2480 */ 448, 448, 448, 448, 448, 448, 448, 356, 448, 448,
+ /* 2490 */ 448, 448, 361, 448, 363, 448, 384, 448, 448, 448,
+ /* 2500 */ 388, 448, 448, 448, 392, 393, 394, 395, 396, 397,
+ /* 2510 */ 398, 448, 400, 319, 448, 384, 448, 448, 448, 388,
+ /* 2520 */ 448, 448, 448, 392, 393, 394, 395, 396, 397, 398,
+ /* 2530 */ 448, 400, 448, 448, 448, 319, 448, 448, 448, 448,
+ /* 2540 */ 448, 448, 348, 448, 448, 448, 448, 448, 448, 448,
+ /* 2550 */ 356, 448, 448, 448, 448, 361, 448, 363, 448, 448,
+ /* 2560 */ 448, 448, 448, 448, 348, 448, 448, 448, 448, 448,
+ /* 2570 */ 448, 448, 356, 448, 448, 448, 448, 361, 384, 363,
+ /* 2580 */ 448, 448, 388, 448, 448, 448, 392, 393, 394, 395,
+ /* 2590 */ 396, 397, 398, 448, 400, 319, 448, 448, 448, 448,
+ /* 2600 */ 384, 448, 448, 448, 388, 448, 448, 448, 392, 393,
+ /* 2610 */ 394, 395, 396, 397, 398, 319, 400, 448, 448, 448,
+ /* 2620 */ 448, 448, 448, 448, 348, 448, 448, 448, 448, 448,
+ /* 2630 */ 448, 448, 356, 448, 448, 448, 448, 361, 448, 363,
+ /* 2640 */ 448, 448, 448, 448, 348, 448, 448, 448, 448, 448,
+ /* 2650 */ 448, 448, 356, 448, 448, 448, 448, 361, 319, 363,
+ /* 2660 */ 384, 448, 448, 448, 388, 448, 448, 448, 392, 393,
+ /* 2670 */ 394, 395, 396, 397, 398, 448, 400, 448, 319, 448,
+ /* 2680 */ 384, 448, 448, 448, 388, 448, 448, 348, 392, 393,
+ /* 2690 */ 394, 395, 396, 397, 398, 356, 400, 448, 448, 448,
+ /* 2700 */ 361, 448, 363, 448, 448, 448, 448, 348, 448, 448,
+ /* 2710 */ 448, 448, 448, 448, 448, 356, 448, 448, 448, 448,
+ /* 2720 */ 361, 448, 363, 384, 448, 448, 448, 388, 448, 319,
+ /* 2730 */ 448, 392, 393, 394, 395, 396, 397, 398, 448, 400,
+ /* 2740 */ 448, 448, 448, 384, 448, 448, 448, 388, 448, 448,
+ /* 2750 */ 448, 392, 393, 394, 395, 396, 397, 398, 348, 400,
+ /* 2760 */ 448, 448, 448, 448, 448, 448, 356, 448, 448, 448,
+ /* 2770 */ 448, 361, 448, 363, 448, 448, 448, 448, 448, 448,
+ /* 2780 */ 319, 448, 448, 448, 448, 448, 448, 448, 448, 448,
+ /* 2790 */ 448, 448, 448, 448, 384, 448, 448, 448, 388, 319,
+ /* 2800 */ 448, 448, 392, 393, 394, 395, 396, 397, 398, 348,
+ /* 2810 */ 400, 448, 448, 448, 448, 448, 448, 356, 448, 448,
+ /* 2820 */ 448, 448, 361, 448, 363, 448, 448, 448, 348, 448,
+ /* 2830 */ 448, 448, 448, 448, 448, 448, 356, 448, 448, 448,
+ /* 2840 */ 448, 361, 448, 363, 448, 384, 448, 448, 448, 388,
+ /* 2850 */ 448, 448, 448, 392, 393, 394, 395, 396, 397, 398,
+ /* 2860 */ 448, 400, 319, 448, 384, 448, 448, 448, 388, 448,
+ /* 2870 */ 448, 448, 392, 393, 394, 395, 396, 397, 398, 448,
+ /* 2880 */ 400, 448, 448, 448, 319, 448, 448, 448, 448, 448,
+ /* 2890 */ 448, 348, 448, 448, 448, 448, 448, 448, 448, 356,
+ /* 2900 */ 448, 448, 448, 448, 361, 448, 363, 448, 448, 448,
+ /* 2910 */ 448, 448, 448, 348, 448, 448, 448, 448, 448, 448,
+ /* 2920 */ 448, 356, 448, 448, 448, 448, 361, 384, 363, 448,
+ /* 2930 */ 448, 388, 448, 448, 448, 392, 393, 394, 395, 396,
+ /* 2940 */ 397, 398, 448, 400, 319, 448, 448, 448, 448, 384,
+ /* 2950 */ 448, 448, 448, 388, 448, 448, 448, 392, 393, 394,
+ /* 2960 */ 395, 396, 397, 398, 319, 400, 448, 448, 448, 448,
+ /* 2970 */ 448, 448, 448, 348, 448, 448, 448, 448, 448, 448,
+ /* 2980 */ 448, 356, 448, 448, 448, 448, 361, 448, 363, 448,
+ /* 2990 */ 448, 448, 448, 348, 448, 448, 448, 448, 448, 448,
+ /* 3000 */ 448, 356, 448, 448, 448, 448, 361, 448, 363, 384,
+ /* 3010 */ 448, 448, 448, 388, 448, 448, 448, 392, 393, 394,
+ /* 3020 */ 395, 396, 397, 398, 448, 400, 448, 448, 448, 384,
+ /* 3030 */ 448, 448, 448, 388, 448, 448, 448, 392, 393, 394,
+ /* 3040 */ 395, 396, 397, 398, 448, 400,
};
-#define YY_SHIFT_COUNT (666)
+#define YY_SHIFT_COUNT (688)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (2079)
+#define YY_SHIFT_MAX (2011)
static const unsigned short int yy_shift_ofst[] = {
- /* 0 */ 917, 0, 0, 62, 62, 264, 264, 264, 326, 326,
- /* 10 */ 264, 264, 391, 593, 720, 593, 593, 593, 593, 593,
- /* 20 */ 593, 593, 593, 593, 593, 593, 593, 593, 593, 593,
- /* 30 */ 593, 593, 593, 593, 593, 593, 593, 593, 593, 593,
- /* 40 */ 593, 593, 265, 265, 17, 17, 17, 1007, 1007, 268,
- /* 50 */ 1007, 1007, 160, 30, 56, 200, 56, 11, 11, 87,
- /* 60 */ 87, 55, 165, 56, 56, 11, 11, 11, 11, 11,
- /* 70 */ 11, 11, 11, 11, 11, 10, 11, 11, 11, 18,
- /* 80 */ 11, 11, 67, 11, 11, 67, 123, 11, 67, 67,
- /* 90 */ 67, 11, 227, 719, 662, 683, 683, 150, 213, 213,
- /* 100 */ 213, 213, 213, 213, 213, 213, 213, 213, 213, 213,
- /* 110 */ 213, 213, 213, 213, 213, 213, 213, 517, 881, 165,
- /* 120 */ 403, 403, 573, 386, 849, 361, 361, 361, 386, 298,
- /* 130 */ 298, 18, 350, 350, 67, 67, 294, 294, 270, 357,
- /* 140 */ 198, 198, 198, 198, 198, 198, 198, 1119, 21, 383,
- /* 150 */ 501, 105, 665, 484, 715, 828, 366, 799, 506, 800,
- /* 160 */ 717, 649, 717, 924, 756, 756, 756, 807, 873, 980,
- /* 170 */ 1174, 1078, 1206, 1232, 1232, 1206, 1107, 1107, 1232, 1232,
- /* 180 */ 1232, 1257, 1257, 1264, 10, 18, 10, 1272, 1277, 10,
- /* 190 */ 1272, 10, 10, 10, 1232, 10, 1257, 67, 67, 67,
- /* 200 */ 67, 67, 67, 67, 67, 67, 67, 67, 1232, 1257,
- /* 210 */ 294, 1264, 227, 1166, 18, 227, 1232, 1232, 1272, 227,
- /* 220 */ 1118, 294, 294, 294, 294, 1118, 294, 1201, 227, 270,
- /* 230 */ 227, 298, 1345, 294, 1139, 1118, 294, 294, 1139, 1118,
- /* 240 */ 294, 294, 67, 1141, 1235, 1139, 1154, 1157, 1170, 980,
- /* 250 */ 1178, 298, 1382, 1159, 1162, 1167, 1159, 1162, 1159, 1162,
- /* 260 */ 1320, 1326, 294, 357, 1232, 227, 1383, 1257, 2259, 2259,
- /* 270 */ 2259, 2259, 2259, 2259, 2259, 83, 1593, 214, 724, 126,
- /* 280 */ 209, 491, 562, 622, 813, 535, 321, 698, 698, 698,
- /* 290 */ 698, 698, 698, 698, 698, 521, 309, 13, 13, 115,
- /* 300 */ 69, 599, 267, 708, 194, 377, 190, 465, 49, 49,
- /* 310 */ 49, 49, 684, 944, 938, 994, 999, 1001, 947, 1047,
- /* 320 */ 1101, 941, 920, 1025, 1043, 1069, 1079, 1091, 1093, 1102,
- /* 330 */ 1160, 1073, 973, 895, 1103, 1075, 1082, 1058, 1106, 1114,
- /* 340 */ 1115, 1133, 1163, 1165, 1193, 1198, 1064, 860, 1143, 1176,
- /* 350 */ 839, 1473, 1476, 1301, 1479, 1480, 1439, 1482, 1448, 1297,
- /* 360 */ 1450, 1451, 1452, 1302, 1490, 1456, 1457, 1307, 1495, 1310,
- /* 370 */ 1502, 1477, 1513, 1493, 1516, 1483, 1348, 1347, 1521, 1527,
- /* 380 */ 1357, 1359, 1525, 1528, 1494, 1529, 1499, 1539, 1542, 1543,
- /* 390 */ 1395, 1545, 1552, 1554, 1555, 1556, 1403, 1514, 1548, 1413,
- /* 400 */ 1559, 1560, 1561, 1562, 1566, 1567, 1568, 1569, 1570, 1571,
- /* 410 */ 1573, 1574, 1575, 1578, 1544, 1590, 1591, 1598, 1600, 1601,
- /* 420 */ 1612, 1581, 1604, 1605, 1614, 1616, 1619, 1564, 1621, 1572,
- /* 430 */ 1627, 1629, 1582, 1584, 1588, 1620, 1587, 1622, 1589, 1638,
- /* 440 */ 1599, 1602, 1650, 1651, 1652, 1617, 1492, 1657, 1658, 1665,
- /* 450 */ 1607, 1667, 1671, 1608, 1597, 1633, 1673, 1639, 1630, 1637,
- /* 460 */ 1679, 1645, 1640, 1647, 1688, 1654, 1644, 1653, 1693, 1694,
- /* 470 */ 1698, 1699, 1609, 1606, 1666, 1678, 1703, 1670, 1675, 1680,
- /* 480 */ 1686, 1695, 1684, 1710, 1711, 1732, 1713, 1702, 1737, 1722,
- /* 490 */ 1717, 1754, 1720, 1756, 1724, 1757, 1738, 1741, 1762, 1610,
- /* 500 */ 1731, 1768, 1613, 1748, 1618, 1624, 1771, 1773, 1625, 1615,
- /* 510 */ 1780, 1781, 1783, 1696, 1697, 1755, 1634, 1798, 1712, 1648,
- /* 520 */ 1715, 1800, 1778, 1659, 1735, 1727, 1782, 1784, 1611, 1740,
- /* 530 */ 1742, 1745, 1746, 1747, 1749, 1787, 1750, 1758, 1760, 1761,
- /* 540 */ 1763, 1792, 1793, 1799, 1765, 1801, 1626, 1770, 1772, 1815,
- /* 550 */ 1641, 1809, 1817, 1818, 1775, 1825, 1642, 1779, 1835, 1849,
- /* 560 */ 1858, 1859, 1860, 1864, 1779, 1898, 1885, 1716, 1867, 1823,
- /* 570 */ 1824, 1826, 1827, 1829, 1830, 1870, 1833, 1834, 1878, 1891,
- /* 580 */ 1743, 1844, 1819, 1845, 1899, 1901, 1848, 1850, 1906, 1853,
- /* 590 */ 1854, 1908, 1856, 1863, 1911, 1861, 1865, 1913, 1868, 1836,
- /* 600 */ 1843, 1847, 1851, 1936, 1862, 1872, 1876, 1927, 1877, 1925,
- /* 610 */ 1925, 1947, 1914, 1909, 1938, 1940, 1943, 1945, 1954, 1955,
- /* 620 */ 1958, 1928, 1922, 1952, 1963, 1965, 1988, 1976, 1991, 1980,
- /* 630 */ 1981, 1950, 1675, 1985, 1680, 1994, 1995, 1998, 2000, 2014,
- /* 640 */ 2003, 2039, 2005, 2002, 2011, 2051, 2017, 2006, 2015, 2055,
- /* 650 */ 2021, 2013, 2019, 2069, 2035, 2024, 2034, 2076, 2042, 2043,
- /* 660 */ 2079, 2058, 2060, 2062, 2063, 2061, 2066,
+ /* 0 */ 1137, 0, 57, 268, 57, 325, 325, 325, 536, 325,
+ /* 10 */ 325, 325, 325, 325, 593, 804, 804, 861, 804, 804,
+ /* 20 */ 804, 804, 804, 804, 804, 804, 804, 804, 804, 804,
+ /* 30 */ 804, 804, 804, 804, 804, 804, 804, 804, 804, 804,
+ /* 40 */ 804, 804, 804, 804, 804, 804, 132, 214, 35, 62,
+ /* 50 */ 83, 142, 258, 142, 35, 35, 1528, 1528, 142, 1528,
+ /* 60 */ 1528, 131, 142, 203, 203, 30, 30, 23, 203, 203,
+ /* 70 */ 203, 203, 203, 203, 203, 203, 203, 203, 265, 203,
+ /* 80 */ 203, 203, 344, 203, 203, 380, 203, 203, 380, 395,
+ /* 90 */ 203, 380, 380, 380, 203, 389, 1066, 1220, 1220, 285,
+ /* 100 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 797,
+ /* 110 */ 797, 797, 797, 797, 797, 797, 797, 797, 797, 460,
+ /* 120 */ 886, 26, 23, 236, 236, 573, 38, 648, 107, 107,
+ /* 130 */ 567, 567, 567, 38, 433, 433, 419, 344, 1, 1,
+ /* 140 */ 365, 380, 380, 544, 544, 419, 551, 398, 398, 398,
+ /* 150 */ 398, 398, 398, 398, 1601, 137, 570, 770, 971, 42,
+ /* 160 */ 67, 37, 94, 479, 372, 798, 738, 875, 727, 817,
+ /* 170 */ 635, 727, 897, 857, 891, 1010, 1206, 1083, 1214, 1239,
+ /* 180 */ 1239, 1214, 1105, 1105, 1176, 1239, 1239, 1239, 1264, 1264,
+ /* 190 */ 1277, 265, 344, 265, 1283, 1288, 265, 1283, 265, 265,
+ /* 200 */ 265, 1239, 265, 1264, 380, 380, 380, 380, 380, 380,
+ /* 210 */ 380, 380, 380, 380, 380, 1239, 1264, 544, 1152, 1277,
+ /* 220 */ 389, 1173, 344, 389, 1239, 1239, 1283, 389, 1123, 544,
+ /* 230 */ 544, 544, 544, 1123, 544, 1212, 389, 419, 389, 433,
+ /* 240 */ 1380, 1380, 544, 1159, 1123, 544, 544, 1159, 1123, 544,
+ /* 250 */ 544, 380, 1164, 1252, 1159, 1165, 1168, 1181, 1010, 1157,
+ /* 260 */ 1166, 1170, 1190, 433, 1411, 1345, 1348, 544, 551, 1239,
+ /* 270 */ 389, 1437, 1264, 3046, 3046, 3046, 3046, 3046, 3046, 3046,
+ /* 280 */ 1063, 191, 402, 1312, 682, 752, 1094, 50, 838, 1149,
+ /* 290 */ 126, 1172, 1172, 1172, 1172, 1172, 1172, 1172, 1172, 1172,
+ /* 300 */ 470, 215, 11, 11, 264, 278, 164, 471, 170, 611,
+ /* 310 */ 366, 16, 459, 70, 16, 16, 16, 473, 915, 528,
+ /* 320 */ 888, 889, 957, 989, 959, 1099, 1100, 568, 839, 664,
+ /* 330 */ 952, 1009, 1073, 1075, 1077, 1081, 903, 832, 1003, 1087,
+ /* 340 */ 1082, 941, 1059, 892, 1088, 826, 1133, 1130, 1147, 1154,
+ /* 350 */ 1177, 1196, 1197, 1007, 1156, 1211, 1132, 1247, 1489, 1493,
+ /* 360 */ 1311, 1495, 1496, 1455, 1498, 1464, 1306, 1466, 1467, 1468,
+ /* 370 */ 1310, 1505, 1471, 1472, 1314, 1509, 1316, 1512, 1478, 1516,
+ /* 380 */ 1504, 1517, 1483, 1350, 1353, 1530, 1531, 1361, 1364, 1537,
+ /* 390 */ 1542, 1499, 1546, 1547, 1548, 1502, 1549, 1552, 1553, 1400,
+ /* 400 */ 1555, 1557, 1559, 1560, 1562, 1415, 1532, 1566, 1419, 1569,
+ /* 410 */ 1570, 1571, 1572, 1574, 1575, 1576, 1577, 1581, 1582, 1585,
+ /* 420 */ 1586, 1597, 1598, 1561, 1599, 1604, 1608, 1616, 1618, 1619,
+ /* 430 */ 1600, 1621, 1625, 1626, 1627, 1602, 1630, 1579, 1632, 1583,
+ /* 440 */ 1633, 1640, 1603, 1607, 1606, 1628, 1605, 1629, 1614, 1641,
+ /* 450 */ 1610, 1613, 1647, 1658, 1661, 1623, 1497, 1665, 1666, 1674,
+ /* 460 */ 1615, 1676, 1678, 1644, 1634, 1645, 1680, 1648, 1638, 1649,
+ /* 470 */ 1689, 1655, 1646, 1652, 1692, 1660, 1651, 1662, 1699, 1700,
+ /* 480 */ 1702, 1703, 1609, 1617, 1670, 1685, 1708, 1675, 1679, 1668,
+ /* 490 */ 1672, 1681, 1682, 1687, 1713, 1698, 1719, 1701, 1673, 1721,
+ /* 500 */ 1704, 1696, 1724, 1714, 1727, 1715, 1732, 1712, 1716, 1642,
+ /* 510 */ 1654, 1735, 1588, 1717, 1739, 1578, 1731, 1591, 1596, 1758,
+ /* 520 */ 1759, 1622, 1624, 1757, 1762, 1764, 1766, 1686, 1691, 1733,
+ /* 530 */ 1594, 1767, 1693, 1635, 1694, 1790, 1752, 1636, 1706, 1690,
+ /* 540 */ 1747, 1755, 1580, 1584, 1589, 1761, 1558, 1707, 1710, 1720,
+ /* 550 */ 1711, 1718, 1722, 1765, 1723, 1725, 1728, 1729, 1726, 1768,
+ /* 560 */ 1769, 1771, 1741, 1782, 1563, 1742, 1743, 1823, 1784, 1611,
+ /* 570 */ 1805, 1806, 1807, 1808, 1809, 1810, 1751, 1753, 1803, 1620,
+ /* 580 */ 1813, 1804, 1814, 1849, 1835, 1653, 1770, 1772, 1774, 1775,
+ /* 590 */ 1777, 1778, 1815, 1780, 1783, 1816, 1781, 1837, 1663, 1788,
+ /* 600 */ 1773, 1795, 1834, 1858, 1800, 1801, 1860, 1812, 1817, 1864,
+ /* 610 */ 1819, 1820, 1866, 1822, 1824, 1869, 1826, 1754, 1791, 1792,
+ /* 620 */ 1793, 1844, 1796, 1827, 1828, 1873, 1829, 1874, 1874, 1896,
+ /* 630 */ 1862, 1865, 1890, 1892, 1894, 1895, 1898, 1900, 1910, 1911,
+ /* 640 */ 1912, 1913, 1881, 1843, 1907, 1917, 1918, 1932, 1920, 1935,
+ /* 650 */ 1924, 1926, 1927, 1897, 1668, 1928, 1672, 1929, 1931, 1934,
+ /* 660 */ 1936, 1945, 1937, 1970, 1939, 1930, 1941, 1976, 1943, 1938,
+ /* 670 */ 1947, 1981, 1949, 1951, 1955, 1982, 1964, 1954, 1963, 2003,
+ /* 680 */ 1978, 1979, 2011, 1995, 1994, 1996, 1997, 1999, 2001,
};
-#define YY_REDUCE_COUNT (274)
-#define YY_REDUCE_MIN (-403)
-#define YY_REDUCE_MAX (1873)
+#define YY_REDUCE_COUNT (279)
+#define YY_REDUCE_MIN (-350)
+#define YY_REDUCE_MAX (2645)
static const short yy_reduce_ofst[] = {
- /* 0 */ 263, 629, 747, -278, -14, 679, 846, 886, 955, 1009,
- /* 10 */ 272, 1059, 104, 1076, 1126, 806, 1152, 1202, 1214, 1228,
- /* 20 */ 1281, 1334, 1346, 1360, 1414, 1428, 1440, 1497, 1507, 1523,
- /* 30 */ 1547, 1603, 1623, 1643, 1663, 1683, 1723, 1739, 1790, 1806,
- /* 40 */ 1857, 1873, 304, 926, 112, 435, 552, -295, 610, -3,
- /* 50 */ -261, -154, -323, 305, 575, 577, 635, -310, -272, -312,
- /* 60 */ -307, -403, -311, -284, -70, -90, 225, 346, 404, 414,
- /* 70 */ 429, 434, 497, 644, 645, 339, 674, 697, 710, -342,
- /* 80 */ 722, 749, -313, 758, 772, -244, -251, 797, -17, 20,
- /* 90 */ 48, 804, 228, 86, -379, -379, -379, -240, -13, 100,
- /* 100 */ 101, 130, 206, 375, 385, 428, 459, 461, 474, 522,
- /* 110 */ 566, 628, 656, 678, 694, 696, 740, -226, -92, -72,
- /* 120 */ -256, -130, 173, 4, 34, 186, 362, 440, 97, 91,
- /* 130 */ 303, -234, -109, 180, 274, 536, 317, 545, 579, 234,
- /* 140 */ -333, 40, 144, 175, 242, 319, 410, 450, 445, 348,
- /* 150 */ 369, 495, 572, 557, 620, 620, 734, 667, 688, 727,
- /* 160 */ 735, 735, 735, 770, 752, 784, 786, 762, 620, 861,
- /* 170 */ 874, 866, 894, 940, 942, 901, 907, 908, 956, 957,
- /* 180 */ 960, 971, 972, 915, 966, 936, 974, 933, 935, 978,
- /* 190 */ 939, 981, 983, 985, 991, 989, 1000, 975, 976, 977,
- /* 200 */ 979, 982, 984, 986, 990, 992, 995, 996, 998, 1006,
- /* 210 */ 993, 951, 1021, 987, 997, 1026, 1030, 1033, 988, 1032,
- /* 220 */ 1002, 1005, 1008, 1013, 1014, 1010, 1015, 1011, 1048, 1038,
- /* 230 */ 1053, 1031, 1003, 1028, 968, 1020, 1036, 1037, 1004, 1022,
- /* 240 */ 1044, 1049, 620, 1016, 1012, 1018, 1023, 1027, 1024, 1029,
- /* 250 */ 735, 1061, 1035, 1045, 1017, 1039, 1046, 1034, 1051, 1050,
- /* 260 */ 1052, 1081, 1067, 1092, 1129, 1104, 1112, 1136, 1096, 1086,
- /* 270 */ 1137, 1138, 1142, 1130, 1168,
+ /* 0 */ 179, -252, -312, 882, 13, 281, 549, 992, 262, 1074,
+ /* 10 */ 1127, 1195, 1217, 1275, 1377, 530, 1436, 1491, 1544, 1595,
+ /* 20 */ 1612, 1697, 1748, 1811, 1831, 1882, 1899, 1942, 1993, 2010,
+ /* 30 */ 2061, 2112, 2131, 2194, 2216, 2276, 2296, 2339, 2359, 2410,
+ /* 40 */ 2461, 2480, 2543, 2565, 2625, 2645, 21, 279, -222, 198,
+ /* 50 */ 442, 465, 607, 924, -97, -28, -350, -347, -315, -340,
+ /* 60 */ -61, -63, -26, 39, 96, -321, -317, -344, -311, 7,
+ /* 70 */ 238, 275, 312, 376, 379, 435, 436, 506, -230, 532,
+ /* 80 */ 543, 550, -189, 592, 640, 213, 645, 701, -245, -199,
+ /* 90 */ 704, 323, 102, 432, 758, -102, 56, -226, -226, -89,
+ /* 100 */ -228, -181, 292, 370, 409, 462, 469, 502, 508, 515,
+ /* 110 */ 529, 548, 597, 617, 633, 666, 669, 680, 687, 233,
+ /* 120 */ 17, -243, -265, -270, -125, 169, -49, -179, -36, 90,
+ /* 130 */ -243, 49, 185, 274, 371, 464, 341, 347, 130, 339,
+ /* 140 */ 384, 377, 474, 368, 467, 560, 519, -332, 321, 424,
+ /* 150 */ 487, 583, 644, 702, 520, 399, 714, 677, 775, 719,
+ /* 160 */ 828, 737, 834, 834, 864, 867, 866, 807, 821, 821,
+ /* 170 */ 805, 821, 835, 827, 834, 865, 869, 877, 890, 933,
+ /* 180 */ 934, 894, 899, 901, 938, 946, 956, 963, 965, 975,
+ /* 190 */ 916, 968, 939, 970, 929, 932, 982, 940, 983, 987,
+ /* 200 */ 988, 993, 990, 1001, 976, 977, 978, 980, 981, 991,
+ /* 210 */ 994, 995, 996, 999, 1002, 1011, 1016, 997, 966, 969,
+ /* 220 */ 1023, 979, 1006, 1027, 1030, 1037, 998, 1036, 1000, 1013,
+ /* 230 */ 1014, 1018, 1020, 1012, 1021, 1022, 1065, 1051, 1067, 1042,
+ /* 240 */ 1015, 1019, 1040, 974, 1038, 1050, 1053, 984, 1044, 1055,
+ /* 250 */ 1056, 834, 1005, 1004, 1017, 1032, 1034, 1008, 1039, 985,
+ /* 260 */ 986, 1024, 821, 1084, 1048, 1031, 1091, 1102, 1106, 1134,
+ /* 270 */ 1136, 1155, 1161, 1097, 1104, 1138, 1142, 1145, 1162, 1174,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 10 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 20 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 30 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 40 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 50 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 60 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 70 */ 1461, 1461, 1461, 1461, 1461, 1535, 1461, 1461, 1461, 1461,
- /* 80 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 90 */ 1461, 1461, 1533, 1691, 1461, 1866, 1461, 1461, 1461, 1461,
- /* 100 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 110 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 120 */ 1461, 1461, 1535, 1461, 1533, 1878, 1878, 1878, 1461, 1461,
- /* 130 */ 1461, 1461, 1732, 1732, 1461, 1461, 1461, 1461, 1633, 1461,
- /* 140 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1726, 1461, 1947,
- /* 150 */ 1461, 1461, 1461, 1901, 1461, 1461, 1461, 1461, 1586, 1893,
- /* 160 */ 1870, 1884, 1871, 1868, 1932, 1932, 1932, 1887, 1461, 1897,
- /* 170 */ 1461, 1719, 1696, 1461, 1461, 1696, 1693, 1693, 1461, 1461,
- /* 180 */ 1461, 1461, 1461, 1461, 1535, 1461, 1535, 1461, 1461, 1535,
- /* 190 */ 1461, 1535, 1535, 1535, 1461, 1535, 1461, 1461, 1461, 1461,
- /* 200 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 210 */ 1461, 1461, 1533, 1728, 1461, 1533, 1461, 1461, 1461, 1533,
- /* 220 */ 1906, 1461, 1461, 1461, 1461, 1906, 1461, 1461, 1533, 1461,
- /* 230 */ 1533, 1461, 1461, 1461, 1908, 1906, 1461, 1461, 1908, 1906,
- /* 240 */ 1461, 1461, 1461, 1920, 1916, 1908, 1924, 1922, 1899, 1897,
- /* 250 */ 1884, 1461, 1461, 1938, 1934, 1950, 1938, 1934, 1938, 1934,
- /* 260 */ 1461, 1602, 1461, 1461, 1461, 1533, 1493, 1461, 1721, 1732,
- /* 270 */ 1636, 1636, 1636, 1536, 1466, 1461, 1461, 1461, 1461, 1461,
- /* 280 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1803, 1919, 1918,
- /* 290 */ 1842, 1841, 1840, 1838, 1802, 1461, 1598, 1801, 1800, 1461,
- /* 300 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1794, 1795,
- /* 310 */ 1793, 1792, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 320 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 330 */ 1867, 1461, 1935, 1939, 1461, 1461, 1461, 1461, 1461, 1778,
- /* 340 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 350 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 360 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 370 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 380 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 390 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 400 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 410 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 420 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 430 */ 1461, 1461, 1461, 1461, 1498, 1461, 1461, 1461, 1461, 1461,
- /* 440 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 450 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 460 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 470 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1570, 1569,
- /* 480 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 490 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 500 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 510 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1736, 1461, 1461,
- /* 520 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1900, 1461, 1461,
- /* 530 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 540 */ 1461, 1461, 1461, 1778, 1461, 1917, 1461, 1877, 1873, 1461,
- /* 550 */ 1461, 1869, 1777, 1461, 1461, 1933, 1461, 1461, 1461, 1461,
- /* 560 */ 1461, 1461, 1461, 1461, 1461, 1862, 1461, 1461, 1835, 1820,
- /* 570 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 580 */ 1788, 1461, 1461, 1461, 1461, 1461, 1630, 1461, 1461, 1461,
- /* 590 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1615,
- /* 600 */ 1613, 1612, 1611, 1461, 1608, 1461, 1461, 1461, 1461, 1639,
- /* 610 */ 1638, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 620 */ 1461, 1461, 1461, 1554, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 630 */ 1461, 1461, 1546, 1461, 1545, 1461, 1461, 1461, 1461, 1461,
- /* 640 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 650 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461, 1461,
- /* 660 */ 1461, 1461, 1461, 1461, 1461, 1461, 1461,
+ /* 0 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 10 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 20 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 30 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 40 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 50 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 60 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 70 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1610, 1536,
+ /* 80 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 90 */ 1536, 1536, 1536, 1536, 1536, 1608, 1775, 1962, 1536, 1536,
+ /* 100 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 110 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 120 */ 1536, 1974, 1536, 1536, 1536, 1610, 1536, 1608, 1934, 1934,
+ /* 130 */ 1974, 1974, 1974, 1536, 1536, 1536, 1715, 1536, 1816, 1816,
+ /* 140 */ 1536, 1536, 1536, 1536, 1536, 1715, 1536, 1536, 1536, 1536,
+ /* 150 */ 1536, 1536, 1536, 1536, 1810, 1536, 1999, 2052, 1536, 1536,
+ /* 160 */ 1536, 2002, 1536, 1536, 1536, 1536, 1668, 1989, 1966, 1980,
+ /* 170 */ 2036, 1967, 1964, 1983, 1536, 1993, 1536, 1803, 1780, 1536,
+ /* 180 */ 1536, 1780, 1777, 1777, 1659, 1536, 1536, 1536, 1536, 1536,
+ /* 190 */ 1536, 1610, 1536, 1610, 1536, 1536, 1610, 1536, 1610, 1610,
+ /* 200 */ 1610, 1536, 1610, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 210 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1822, 1536,
+ /* 220 */ 1608, 1812, 1536, 1608, 1536, 1536, 1536, 1608, 2007, 1536,
+ /* 230 */ 1536, 1536, 1536, 2007, 1536, 1536, 1608, 1536, 1608, 1536,
+ /* 240 */ 1536, 1536, 1536, 2009, 2007, 1536, 1536, 2009, 2007, 1536,
+ /* 250 */ 1536, 1536, 2021, 2017, 2009, 2025, 2023, 1995, 1993, 2055,
+ /* 260 */ 2042, 2038, 1980, 1536, 1536, 1536, 1684, 1536, 1536, 1536,
+ /* 270 */ 1608, 1568, 1536, 1805, 1816, 1718, 1718, 1718, 1611, 1541,
+ /* 280 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 290 */ 1536, 1891, 1536, 2020, 2019, 1938, 1937, 1936, 1927, 1890,
+ /* 300 */ 1536, 1680, 1889, 1888, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 310 */ 1536, 1882, 1536, 1536, 1883, 1881, 1880, 1536, 1536, 1536,
+ /* 320 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 330 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 2039, 2043, 1963,
+ /* 340 */ 1536, 1536, 1536, 1536, 1536, 1873, 1864, 1536, 1536, 1536,
+ /* 350 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 360 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 370 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 380 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 390 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 400 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 410 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 420 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 430 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 440 */ 1536, 1536, 1536, 1536, 1573, 1536, 1536, 1536, 1536, 1536,
+ /* 450 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 460 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 470 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 480 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1649,
+ /* 490 */ 1648, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 500 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1872,
+ /* 510 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 520 */ 1536, 1536, 1536, 2035, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 530 */ 1536, 1820, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 540 */ 1536, 1924, 1536, 1536, 1536, 1996, 1536, 1536, 1536, 1536,
+ /* 550 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 560 */ 1536, 1864, 1536, 2018, 1536, 1536, 2033, 1536, 2037, 1536,
+ /* 570 */ 1536, 1536, 1536, 1536, 1536, 1536, 1973, 1969, 1536, 1536,
+ /* 580 */ 1965, 1863, 1536, 1958, 1536, 1536, 1909, 1536, 1536, 1536,
+ /* 590 */ 1536, 1536, 1536, 1536, 1536, 1536, 1872, 1536, 1876, 1536,
+ /* 600 */ 1536, 1536, 1536, 1536, 1712, 1536, 1536, 1536, 1536, 1536,
+ /* 610 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1697, 1695, 1694,
+ /* 620 */ 1693, 1536, 1690, 1536, 1536, 1536, 1536, 1721, 1720, 1536,
+ /* 630 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 640 */ 1536, 1536, 1536, 1536, 1630, 1536, 1536, 1536, 1536, 1536,
+ /* 650 */ 1536, 1536, 1536, 1536, 1621, 1536, 1620, 1536, 1536, 1536,
+ /* 660 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 670 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
+ /* 680 */ 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536, 1536,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -953,6 +1101,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* KEEP => nothing */
0, /* PAGES => nothing */
0, /* PAGESIZE => nothing */
+ 0, /* TSDB_PAGESIZE => nothing */
0, /* PRECISION => nothing */
0, /* REPLICA => nothing */
0, /* STRICT => nothing */
@@ -966,7 +1115,11 @@ static const YYCODETYPE yyFallback[] = {
0, /* WAL_RETENTION_SIZE => nothing */
0, /* WAL_ROLL_PERIOD => nothing */
0, /* WAL_SEGMENT_SIZE => nothing */
+ 0, /* STT_TRIGGER => nothing */
+ 0, /* TABLE_PREFIX => nothing */
+ 0, /* TABLE_SUFFIX => nothing */
0, /* NK_COLON => nothing */
+ 0, /* MAX_SPEED => nothing */
0, /* TABLE => nothing */
0, /* NK_LP => nothing */
0, /* NK_RP => nothing */
@@ -1031,6 +1184,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* DISTRIBUTED => nothing */
0, /* CONSUMERS => nothing */
0, /* SUBSCRIPTIONS => nothing */
+ 0, /* VNODES => nothing */
0, /* LIKE => nothing */
0, /* INDEX => nothing */
0, /* FUNCTION => nothing */
@@ -1062,6 +1216,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* WINDOW_CLOSE => nothing */
0, /* IGNORE => nothing */
0, /* EXPIRED => nothing */
+ 0, /* SUBTABLE => nothing */
0, /* KILL => nothing */
0, /* CONNECTION => nothing */
0, /* TRANSACTION => nothing */
@@ -1083,6 +1238,7 @@ static const YYCODETYPE yyFallback[] = {
0, /* WSTART => nothing */
0, /* WEND => nothing */
0, /* WDURATION => nothing */
+ 0, /* IROWTS => nothing */
0, /* CAST => nothing */
0, /* NOW => nothing */
0, /* TODAY => nothing */
@@ -1093,6 +1249,11 @@ static const YYCODETYPE yyFallback[] = {
0, /* CURRENT_USER => nothing */
0, /* COUNT => nothing */
0, /* LAST_ROW => nothing */
+ 0, /* CASE => nothing */
+ 264, /* END => ABORT */
+ 0, /* WHEN => nothing */
+ 0, /* THEN => nothing */
+ 0, /* ELSE => nothing */
0, /* BETWEEN => nothing */
0, /* IS => nothing */
0, /* NK_LT => nothing */
@@ -1131,59 +1292,57 @@ static const YYCODETYPE yyFallback[] = {
0, /* ASC => nothing */
0, /* NULLS => nothing */
0, /* ABORT => nothing */
- 251, /* AFTER => ABORT */
- 251, /* ATTACH => ABORT */
- 251, /* BEFORE => ABORT */
- 251, /* BEGIN => ABORT */
- 251, /* BITAND => ABORT */
- 251, /* BITNOT => ABORT */
- 251, /* BITOR => ABORT */
- 251, /* BLOCKS => ABORT */
- 251, /* CHANGE => ABORT */
- 251, /* COMMA => ABORT */
- 251, /* COMPACT => ABORT */
- 251, /* CONCAT => ABORT */
- 251, /* CONFLICT => ABORT */
- 251, /* COPY => ABORT */
- 251, /* DEFERRED => ABORT */
- 251, /* DELIMITERS => ABORT */
- 251, /* DETACH => ABORT */
- 251, /* DIVIDE => ABORT */
- 251, /* DOT => ABORT */
- 251, /* EACH => ABORT */
- 251, /* END => ABORT */
- 251, /* FAIL => ABORT */
- 251, /* FILE => ABORT */
- 251, /* FOR => ABORT */
- 251, /* GLOB => ABORT */
- 251, /* ID => ABORT */
- 251, /* IMMEDIATE => ABORT */
- 251, /* IMPORT => ABORT */
- 251, /* INITIALLY => ABORT */
- 251, /* INSTEAD => ABORT */
- 251, /* ISNULL => ABORT */
- 251, /* KEY => ABORT */
- 251, /* NK_BITNOT => ABORT */
- 251, /* NK_SEMI => ABORT */
- 251, /* NOTNULL => ABORT */
- 251, /* OF => ABORT */
- 251, /* PLUS => ABORT */
- 251, /* PRIVILEGE => ABORT */
- 251, /* RAISE => ABORT */
- 251, /* REPLACE => ABORT */
- 251, /* RESTRICT => ABORT */
- 251, /* ROW => ABORT */
- 251, /* SEMI => ABORT */
- 251, /* STAR => ABORT */
- 251, /* STATEMENT => ABORT */
- 251, /* STRING => ABORT */
- 251, /* TIMES => ABORT */
- 251, /* UPDATE => ABORT */
- 251, /* VALUES => ABORT */
- 251, /* VARIABLE => ABORT */
- 251, /* VIEW => ABORT */
- 251, /* VNODES => ABORT */
- 251, /* WAL => ABORT */
+ 264, /* AFTER => ABORT */
+ 264, /* ATTACH => ABORT */
+ 264, /* BEFORE => ABORT */
+ 264, /* BEGIN => ABORT */
+ 264, /* BITAND => ABORT */
+ 264, /* BITNOT => ABORT */
+ 264, /* BITOR => ABORT */
+ 264, /* BLOCKS => ABORT */
+ 264, /* CHANGE => ABORT */
+ 264, /* COMMA => ABORT */
+ 264, /* COMPACT => ABORT */
+ 264, /* CONCAT => ABORT */
+ 264, /* CONFLICT => ABORT */
+ 264, /* COPY => ABORT */
+ 264, /* DEFERRED => ABORT */
+ 264, /* DELIMITERS => ABORT */
+ 264, /* DETACH => ABORT */
+ 264, /* DIVIDE => ABORT */
+ 264, /* DOT => ABORT */
+ 264, /* EACH => ABORT */
+ 264, /* FAIL => ABORT */
+ 264, /* FILE => ABORT */
+ 264, /* FOR => ABORT */
+ 264, /* GLOB => ABORT */
+ 264, /* ID => ABORT */
+ 264, /* IMMEDIATE => ABORT */
+ 264, /* IMPORT => ABORT */
+ 264, /* INITIALLY => ABORT */
+ 264, /* INSTEAD => ABORT */
+ 264, /* ISNULL => ABORT */
+ 264, /* KEY => ABORT */
+ 264, /* NK_BITNOT => ABORT */
+ 264, /* NK_SEMI => ABORT */
+ 264, /* NOTNULL => ABORT */
+ 264, /* OF => ABORT */
+ 264, /* PLUS => ABORT */
+ 264, /* PRIVILEGE => ABORT */
+ 264, /* RAISE => ABORT */
+ 264, /* REPLACE => ABORT */
+ 264, /* RESTRICT => ABORT */
+ 264, /* ROW => ABORT */
+ 264, /* SEMI => ABORT */
+ 264, /* STAR => ABORT */
+ 264, /* STATEMENT => ABORT */
+ 264, /* STRING => ABORT */
+ 264, /* TIMES => ABORT */
+ 264, /* UPDATE => ABORT */
+ 264, /* VALUES => ABORT */
+ 264, /* VARIABLE => ABORT */
+ 264, /* VIEW => ABORT */
+ 264, /* WAL => ABORT */
};
#endif /* YYFALLBACK */
@@ -1345,358 +1504,380 @@ static const char *const yyTokenName[] = {
/* 71 */ "KEEP",
/* 72 */ "PAGES",
/* 73 */ "PAGESIZE",
- /* 74 */ "PRECISION",
- /* 75 */ "REPLICA",
- /* 76 */ "STRICT",
- /* 77 */ "VGROUPS",
- /* 78 */ "SINGLE_STABLE",
- /* 79 */ "RETENTIONS",
- /* 80 */ "SCHEMALESS",
- /* 81 */ "WAL_LEVEL",
- /* 82 */ "WAL_FSYNC_PERIOD",
- /* 83 */ "WAL_RETENTION_PERIOD",
- /* 84 */ "WAL_RETENTION_SIZE",
- /* 85 */ "WAL_ROLL_PERIOD",
- /* 86 */ "WAL_SEGMENT_SIZE",
- /* 87 */ "NK_COLON",
- /* 88 */ "TABLE",
- /* 89 */ "NK_LP",
- /* 90 */ "NK_RP",
- /* 91 */ "STABLE",
- /* 92 */ "ADD",
- /* 93 */ "COLUMN",
- /* 94 */ "MODIFY",
- /* 95 */ "RENAME",
- /* 96 */ "TAG",
- /* 97 */ "SET",
- /* 98 */ "NK_EQ",
- /* 99 */ "USING",
- /* 100 */ "TAGS",
- /* 101 */ "COMMENT",
- /* 102 */ "BOOL",
- /* 103 */ "TINYINT",
- /* 104 */ "SMALLINT",
- /* 105 */ "INT",
- /* 106 */ "INTEGER",
- /* 107 */ "BIGINT",
- /* 108 */ "FLOAT",
- /* 109 */ "DOUBLE",
- /* 110 */ "BINARY",
- /* 111 */ "TIMESTAMP",
- /* 112 */ "NCHAR",
- /* 113 */ "UNSIGNED",
- /* 114 */ "JSON",
- /* 115 */ "VARCHAR",
- /* 116 */ "MEDIUMBLOB",
- /* 117 */ "BLOB",
- /* 118 */ "VARBINARY",
- /* 119 */ "DECIMAL",
- /* 120 */ "MAX_DELAY",
- /* 121 */ "WATERMARK",
- /* 122 */ "ROLLUP",
- /* 123 */ "TTL",
- /* 124 */ "SMA",
- /* 125 */ "FIRST",
- /* 126 */ "LAST",
- /* 127 */ "SHOW",
- /* 128 */ "DATABASES",
- /* 129 */ "TABLES",
- /* 130 */ "STABLES",
- /* 131 */ "MNODES",
- /* 132 */ "MODULES",
- /* 133 */ "QNODES",
- /* 134 */ "FUNCTIONS",
- /* 135 */ "INDEXES",
- /* 136 */ "ACCOUNTS",
- /* 137 */ "APPS",
- /* 138 */ "CONNECTIONS",
- /* 139 */ "LICENCES",
- /* 140 */ "GRANTS",
- /* 141 */ "QUERIES",
- /* 142 */ "SCORES",
- /* 143 */ "TOPICS",
- /* 144 */ "VARIABLES",
- /* 145 */ "BNODES",
- /* 146 */ "SNODES",
- /* 147 */ "CLUSTER",
- /* 148 */ "TRANSACTIONS",
- /* 149 */ "DISTRIBUTED",
- /* 150 */ "CONSUMERS",
- /* 151 */ "SUBSCRIPTIONS",
- /* 152 */ "LIKE",
- /* 153 */ "INDEX",
- /* 154 */ "FUNCTION",
- /* 155 */ "INTERVAL",
- /* 156 */ "TOPIC",
- /* 157 */ "AS",
- /* 158 */ "WITH",
- /* 159 */ "META",
- /* 160 */ "CONSUMER",
- /* 161 */ "GROUP",
- /* 162 */ "DESC",
- /* 163 */ "DESCRIBE",
- /* 164 */ "RESET",
- /* 165 */ "QUERY",
- /* 166 */ "CACHE",
- /* 167 */ "EXPLAIN",
- /* 168 */ "ANALYZE",
- /* 169 */ "VERBOSE",
- /* 170 */ "NK_BOOL",
- /* 171 */ "RATIO",
- /* 172 */ "NK_FLOAT",
- /* 173 */ "OUTPUTTYPE",
- /* 174 */ "AGGREGATE",
- /* 175 */ "BUFSIZE",
- /* 176 */ "STREAM",
- /* 177 */ "INTO",
- /* 178 */ "TRIGGER",
- /* 179 */ "AT_ONCE",
- /* 180 */ "WINDOW_CLOSE",
- /* 181 */ "IGNORE",
- /* 182 */ "EXPIRED",
- /* 183 */ "KILL",
- /* 184 */ "CONNECTION",
- /* 185 */ "TRANSACTION",
- /* 186 */ "BALANCE",
- /* 187 */ "VGROUP",
- /* 188 */ "MERGE",
- /* 189 */ "REDISTRIBUTE",
- /* 190 */ "SPLIT",
- /* 191 */ "DELETE",
- /* 192 */ "INSERT",
- /* 193 */ "NULL",
- /* 194 */ "NK_QUESTION",
- /* 195 */ "NK_ARROW",
- /* 196 */ "ROWTS",
- /* 197 */ "TBNAME",
- /* 198 */ "QSTART",
- /* 199 */ "QEND",
- /* 200 */ "QDURATION",
- /* 201 */ "WSTART",
- /* 202 */ "WEND",
- /* 203 */ "WDURATION",
- /* 204 */ "CAST",
- /* 205 */ "NOW",
- /* 206 */ "TODAY",
- /* 207 */ "TIMEZONE",
- /* 208 */ "CLIENT_VERSION",
- /* 209 */ "SERVER_VERSION",
- /* 210 */ "SERVER_STATUS",
- /* 211 */ "CURRENT_USER",
- /* 212 */ "COUNT",
- /* 213 */ "LAST_ROW",
- /* 214 */ "BETWEEN",
- /* 215 */ "IS",
- /* 216 */ "NK_LT",
- /* 217 */ "NK_GT",
- /* 218 */ "NK_LE",
- /* 219 */ "NK_GE",
- /* 220 */ "NK_NE",
- /* 221 */ "MATCH",
- /* 222 */ "NMATCH",
- /* 223 */ "CONTAINS",
- /* 224 */ "IN",
- /* 225 */ "JOIN",
- /* 226 */ "INNER",
- /* 227 */ "SELECT",
- /* 228 */ "DISTINCT",
- /* 229 */ "WHERE",
- /* 230 */ "PARTITION",
- /* 231 */ "BY",
- /* 232 */ "SESSION",
- /* 233 */ "STATE_WINDOW",
- /* 234 */ "SLIDING",
- /* 235 */ "FILL",
- /* 236 */ "VALUE",
- /* 237 */ "NONE",
- /* 238 */ "PREV",
- /* 239 */ "LINEAR",
- /* 240 */ "NEXT",
- /* 241 */ "HAVING",
- /* 242 */ "RANGE",
- /* 243 */ "EVERY",
- /* 244 */ "ORDER",
- /* 245 */ "SLIMIT",
- /* 246 */ "SOFFSET",
- /* 247 */ "LIMIT",
- /* 248 */ "OFFSET",
- /* 249 */ "ASC",
- /* 250 */ "NULLS",
- /* 251 */ "ABORT",
- /* 252 */ "AFTER",
- /* 253 */ "ATTACH",
- /* 254 */ "BEFORE",
- /* 255 */ "BEGIN",
- /* 256 */ "BITAND",
- /* 257 */ "BITNOT",
- /* 258 */ "BITOR",
- /* 259 */ "BLOCKS",
- /* 260 */ "CHANGE",
- /* 261 */ "COMMA",
- /* 262 */ "COMPACT",
- /* 263 */ "CONCAT",
- /* 264 */ "CONFLICT",
- /* 265 */ "COPY",
- /* 266 */ "DEFERRED",
- /* 267 */ "DELIMITERS",
- /* 268 */ "DETACH",
- /* 269 */ "DIVIDE",
- /* 270 */ "DOT",
- /* 271 */ "EACH",
- /* 272 */ "END",
- /* 273 */ "FAIL",
- /* 274 */ "FILE",
- /* 275 */ "FOR",
- /* 276 */ "GLOB",
- /* 277 */ "ID",
- /* 278 */ "IMMEDIATE",
- /* 279 */ "IMPORT",
- /* 280 */ "INITIALLY",
- /* 281 */ "INSTEAD",
- /* 282 */ "ISNULL",
- /* 283 */ "KEY",
- /* 284 */ "NK_BITNOT",
- /* 285 */ "NK_SEMI",
- /* 286 */ "NOTNULL",
- /* 287 */ "OF",
- /* 288 */ "PLUS",
- /* 289 */ "PRIVILEGE",
- /* 290 */ "RAISE",
- /* 291 */ "REPLACE",
- /* 292 */ "RESTRICT",
- /* 293 */ "ROW",
- /* 294 */ "SEMI",
- /* 295 */ "STAR",
- /* 296 */ "STATEMENT",
- /* 297 */ "STRING",
- /* 298 */ "TIMES",
- /* 299 */ "UPDATE",
- /* 300 */ "VALUES",
- /* 301 */ "VARIABLE",
- /* 302 */ "VIEW",
- /* 303 */ "VNODES",
- /* 304 */ "WAL",
- /* 305 */ "cmd",
- /* 306 */ "account_options",
- /* 307 */ "alter_account_options",
- /* 308 */ "literal",
- /* 309 */ "alter_account_option",
- /* 310 */ "user_name",
- /* 311 */ "sysinfo_opt",
- /* 312 */ "privileges",
- /* 313 */ "priv_level",
- /* 314 */ "priv_type_list",
- /* 315 */ "priv_type",
- /* 316 */ "db_name",
- /* 317 */ "dnode_endpoint",
- /* 318 */ "not_exists_opt",
- /* 319 */ "db_options",
- /* 320 */ "exists_opt",
- /* 321 */ "alter_db_options",
- /* 322 */ "integer_list",
- /* 323 */ "variable_list",
- /* 324 */ "retention_list",
- /* 325 */ "alter_db_option",
- /* 326 */ "retention",
- /* 327 */ "full_table_name",
- /* 328 */ "column_def_list",
- /* 329 */ "tags_def_opt",
- /* 330 */ "table_options",
- /* 331 */ "multi_create_clause",
- /* 332 */ "tags_def",
- /* 333 */ "multi_drop_clause",
- /* 334 */ "alter_table_clause",
- /* 335 */ "alter_table_options",
- /* 336 */ "column_name",
- /* 337 */ "type_name",
- /* 338 */ "signed_literal",
- /* 339 */ "create_subtable_clause",
- /* 340 */ "specific_cols_opt",
- /* 341 */ "expression_list",
- /* 342 */ "drop_table_clause",
- /* 343 */ "col_name_list",
- /* 344 */ "table_name",
- /* 345 */ "column_def",
- /* 346 */ "duration_list",
- /* 347 */ "rollup_func_list",
- /* 348 */ "alter_table_option",
- /* 349 */ "duration_literal",
- /* 350 */ "rollup_func_name",
- /* 351 */ "function_name",
- /* 352 */ "col_name",
- /* 353 */ "db_name_cond_opt",
- /* 354 */ "like_pattern_opt",
- /* 355 */ "table_name_cond",
- /* 356 */ "from_db_opt",
- /* 357 */ "index_options",
- /* 358 */ "func_list",
- /* 359 */ "sliding_opt",
- /* 360 */ "sma_stream_opt",
- /* 361 */ "func",
- /* 362 */ "stream_options",
- /* 363 */ "topic_name",
- /* 364 */ "query_expression",
- /* 365 */ "cgroup_name",
- /* 366 */ "analyze_opt",
- /* 367 */ "explain_options",
- /* 368 */ "agg_func_opt",
- /* 369 */ "bufsize_opt",
- /* 370 */ "stream_name",
- /* 371 */ "dnode_list",
- /* 372 */ "where_clause_opt",
- /* 373 */ "signed",
- /* 374 */ "literal_func",
- /* 375 */ "literal_list",
- /* 376 */ "table_alias",
- /* 377 */ "column_alias",
- /* 378 */ "expression",
- /* 379 */ "pseudo_column",
- /* 380 */ "column_reference",
- /* 381 */ "function_expression",
- /* 382 */ "subquery",
- /* 383 */ "star_func",
- /* 384 */ "star_func_para_list",
- /* 385 */ "noarg_func",
- /* 386 */ "other_para_list",
- /* 387 */ "star_func_para",
- /* 388 */ "predicate",
- /* 389 */ "compare_op",
- /* 390 */ "in_op",
- /* 391 */ "in_predicate_value",
- /* 392 */ "boolean_value_expression",
- /* 393 */ "boolean_primary",
- /* 394 */ "common_expression",
- /* 395 */ "from_clause_opt",
- /* 396 */ "table_reference_list",
- /* 397 */ "table_reference",
- /* 398 */ "table_primary",
- /* 399 */ "joined_table",
- /* 400 */ "alias_opt",
- /* 401 */ "parenthesized_joined_table",
- /* 402 */ "join_type",
- /* 403 */ "search_condition",
- /* 404 */ "query_specification",
- /* 405 */ "set_quantifier_opt",
- /* 406 */ "select_list",
- /* 407 */ "partition_by_clause_opt",
- /* 408 */ "range_opt",
- /* 409 */ "every_opt",
- /* 410 */ "fill_opt",
- /* 411 */ "twindow_clause_opt",
- /* 412 */ "group_by_clause_opt",
- /* 413 */ "having_clause_opt",
- /* 414 */ "select_item",
- /* 415 */ "fill_mode",
- /* 416 */ "group_by_list",
- /* 417 */ "query_expression_body",
- /* 418 */ "order_by_clause_opt",
- /* 419 */ "slimit_clause_opt",
- /* 420 */ "limit_clause_opt",
- /* 421 */ "query_primary",
- /* 422 */ "sort_specification_list",
- /* 423 */ "sort_specification",
- /* 424 */ "ordering_specification_opt",
- /* 425 */ "null_ordering_opt",
+ /* 74 */ "TSDB_PAGESIZE",
+ /* 75 */ "PRECISION",
+ /* 76 */ "REPLICA",
+ /* 77 */ "STRICT",
+ /* 78 */ "VGROUPS",
+ /* 79 */ "SINGLE_STABLE",
+ /* 80 */ "RETENTIONS",
+ /* 81 */ "SCHEMALESS",
+ /* 82 */ "WAL_LEVEL",
+ /* 83 */ "WAL_FSYNC_PERIOD",
+ /* 84 */ "WAL_RETENTION_PERIOD",
+ /* 85 */ "WAL_RETENTION_SIZE",
+ /* 86 */ "WAL_ROLL_PERIOD",
+ /* 87 */ "WAL_SEGMENT_SIZE",
+ /* 88 */ "STT_TRIGGER",
+ /* 89 */ "TABLE_PREFIX",
+ /* 90 */ "TABLE_SUFFIX",
+ /* 91 */ "NK_COLON",
+ /* 92 */ "MAX_SPEED",
+ /* 93 */ "TABLE",
+ /* 94 */ "NK_LP",
+ /* 95 */ "NK_RP",
+ /* 96 */ "STABLE",
+ /* 97 */ "ADD",
+ /* 98 */ "COLUMN",
+ /* 99 */ "MODIFY",
+ /* 100 */ "RENAME",
+ /* 101 */ "TAG",
+ /* 102 */ "SET",
+ /* 103 */ "NK_EQ",
+ /* 104 */ "USING",
+ /* 105 */ "TAGS",
+ /* 106 */ "COMMENT",
+ /* 107 */ "BOOL",
+ /* 108 */ "TINYINT",
+ /* 109 */ "SMALLINT",
+ /* 110 */ "INT",
+ /* 111 */ "INTEGER",
+ /* 112 */ "BIGINT",
+ /* 113 */ "FLOAT",
+ /* 114 */ "DOUBLE",
+ /* 115 */ "BINARY",
+ /* 116 */ "TIMESTAMP",
+ /* 117 */ "NCHAR",
+ /* 118 */ "UNSIGNED",
+ /* 119 */ "JSON",
+ /* 120 */ "VARCHAR",
+ /* 121 */ "MEDIUMBLOB",
+ /* 122 */ "BLOB",
+ /* 123 */ "VARBINARY",
+ /* 124 */ "DECIMAL",
+ /* 125 */ "MAX_DELAY",
+ /* 126 */ "WATERMARK",
+ /* 127 */ "ROLLUP",
+ /* 128 */ "TTL",
+ /* 129 */ "SMA",
+ /* 130 */ "FIRST",
+ /* 131 */ "LAST",
+ /* 132 */ "SHOW",
+ /* 133 */ "DATABASES",
+ /* 134 */ "TABLES",
+ /* 135 */ "STABLES",
+ /* 136 */ "MNODES",
+ /* 137 */ "MODULES",
+ /* 138 */ "QNODES",
+ /* 139 */ "FUNCTIONS",
+ /* 140 */ "INDEXES",
+ /* 141 */ "ACCOUNTS",
+ /* 142 */ "APPS",
+ /* 143 */ "CONNECTIONS",
+ /* 144 */ "LICENCES",
+ /* 145 */ "GRANTS",
+ /* 146 */ "QUERIES",
+ /* 147 */ "SCORES",
+ /* 148 */ "TOPICS",
+ /* 149 */ "VARIABLES",
+ /* 150 */ "BNODES",
+ /* 151 */ "SNODES",
+ /* 152 */ "CLUSTER",
+ /* 153 */ "TRANSACTIONS",
+ /* 154 */ "DISTRIBUTED",
+ /* 155 */ "CONSUMERS",
+ /* 156 */ "SUBSCRIPTIONS",
+ /* 157 */ "VNODES",
+ /* 158 */ "LIKE",
+ /* 159 */ "INDEX",
+ /* 160 */ "FUNCTION",
+ /* 161 */ "INTERVAL",
+ /* 162 */ "TOPIC",
+ /* 163 */ "AS",
+ /* 164 */ "WITH",
+ /* 165 */ "META",
+ /* 166 */ "CONSUMER",
+ /* 167 */ "GROUP",
+ /* 168 */ "DESC",
+ /* 169 */ "DESCRIBE",
+ /* 170 */ "RESET",
+ /* 171 */ "QUERY",
+ /* 172 */ "CACHE",
+ /* 173 */ "EXPLAIN",
+ /* 174 */ "ANALYZE",
+ /* 175 */ "VERBOSE",
+ /* 176 */ "NK_BOOL",
+ /* 177 */ "RATIO",
+ /* 178 */ "NK_FLOAT",
+ /* 179 */ "OUTPUTTYPE",
+ /* 180 */ "AGGREGATE",
+ /* 181 */ "BUFSIZE",
+ /* 182 */ "STREAM",
+ /* 183 */ "INTO",
+ /* 184 */ "TRIGGER",
+ /* 185 */ "AT_ONCE",
+ /* 186 */ "WINDOW_CLOSE",
+ /* 187 */ "IGNORE",
+ /* 188 */ "EXPIRED",
+ /* 189 */ "SUBTABLE",
+ /* 190 */ "KILL",
+ /* 191 */ "CONNECTION",
+ /* 192 */ "TRANSACTION",
+ /* 193 */ "BALANCE",
+ /* 194 */ "VGROUP",
+ /* 195 */ "MERGE",
+ /* 196 */ "REDISTRIBUTE",
+ /* 197 */ "SPLIT",
+ /* 198 */ "DELETE",
+ /* 199 */ "INSERT",
+ /* 200 */ "NULL",
+ /* 201 */ "NK_QUESTION",
+ /* 202 */ "NK_ARROW",
+ /* 203 */ "ROWTS",
+ /* 204 */ "TBNAME",
+ /* 205 */ "QSTART",
+ /* 206 */ "QEND",
+ /* 207 */ "QDURATION",
+ /* 208 */ "WSTART",
+ /* 209 */ "WEND",
+ /* 210 */ "WDURATION",
+ /* 211 */ "IROWTS",
+ /* 212 */ "CAST",
+ /* 213 */ "NOW",
+ /* 214 */ "TODAY",
+ /* 215 */ "TIMEZONE",
+ /* 216 */ "CLIENT_VERSION",
+ /* 217 */ "SERVER_VERSION",
+ /* 218 */ "SERVER_STATUS",
+ /* 219 */ "CURRENT_USER",
+ /* 220 */ "COUNT",
+ /* 221 */ "LAST_ROW",
+ /* 222 */ "CASE",
+ /* 223 */ "END",
+ /* 224 */ "WHEN",
+ /* 225 */ "THEN",
+ /* 226 */ "ELSE",
+ /* 227 */ "BETWEEN",
+ /* 228 */ "IS",
+ /* 229 */ "NK_LT",
+ /* 230 */ "NK_GT",
+ /* 231 */ "NK_LE",
+ /* 232 */ "NK_GE",
+ /* 233 */ "NK_NE",
+ /* 234 */ "MATCH",
+ /* 235 */ "NMATCH",
+ /* 236 */ "CONTAINS",
+ /* 237 */ "IN",
+ /* 238 */ "JOIN",
+ /* 239 */ "INNER",
+ /* 240 */ "SELECT",
+ /* 241 */ "DISTINCT",
+ /* 242 */ "WHERE",
+ /* 243 */ "PARTITION",
+ /* 244 */ "BY",
+ /* 245 */ "SESSION",
+ /* 246 */ "STATE_WINDOW",
+ /* 247 */ "SLIDING",
+ /* 248 */ "FILL",
+ /* 249 */ "VALUE",
+ /* 250 */ "NONE",
+ /* 251 */ "PREV",
+ /* 252 */ "LINEAR",
+ /* 253 */ "NEXT",
+ /* 254 */ "HAVING",
+ /* 255 */ "RANGE",
+ /* 256 */ "EVERY",
+ /* 257 */ "ORDER",
+ /* 258 */ "SLIMIT",
+ /* 259 */ "SOFFSET",
+ /* 260 */ "LIMIT",
+ /* 261 */ "OFFSET",
+ /* 262 */ "ASC",
+ /* 263 */ "NULLS",
+ /* 264 */ "ABORT",
+ /* 265 */ "AFTER",
+ /* 266 */ "ATTACH",
+ /* 267 */ "BEFORE",
+ /* 268 */ "BEGIN",
+ /* 269 */ "BITAND",
+ /* 270 */ "BITNOT",
+ /* 271 */ "BITOR",
+ /* 272 */ "BLOCKS",
+ /* 273 */ "CHANGE",
+ /* 274 */ "COMMA",
+ /* 275 */ "COMPACT",
+ /* 276 */ "CONCAT",
+ /* 277 */ "CONFLICT",
+ /* 278 */ "COPY",
+ /* 279 */ "DEFERRED",
+ /* 280 */ "DELIMITERS",
+ /* 281 */ "DETACH",
+ /* 282 */ "DIVIDE",
+ /* 283 */ "DOT",
+ /* 284 */ "EACH",
+ /* 285 */ "FAIL",
+ /* 286 */ "FILE",
+ /* 287 */ "FOR",
+ /* 288 */ "GLOB",
+ /* 289 */ "ID",
+ /* 290 */ "IMMEDIATE",
+ /* 291 */ "IMPORT",
+ /* 292 */ "INITIALLY",
+ /* 293 */ "INSTEAD",
+ /* 294 */ "ISNULL",
+ /* 295 */ "KEY",
+ /* 296 */ "NK_BITNOT",
+ /* 297 */ "NK_SEMI",
+ /* 298 */ "NOTNULL",
+ /* 299 */ "OF",
+ /* 300 */ "PLUS",
+ /* 301 */ "PRIVILEGE",
+ /* 302 */ "RAISE",
+ /* 303 */ "REPLACE",
+ /* 304 */ "RESTRICT",
+ /* 305 */ "ROW",
+ /* 306 */ "SEMI",
+ /* 307 */ "STAR",
+ /* 308 */ "STATEMENT",
+ /* 309 */ "STRING",
+ /* 310 */ "TIMES",
+ /* 311 */ "UPDATE",
+ /* 312 */ "VALUES",
+ /* 313 */ "VARIABLE",
+ /* 314 */ "VIEW",
+ /* 315 */ "WAL",
+ /* 316 */ "cmd",
+ /* 317 */ "account_options",
+ /* 318 */ "alter_account_options",
+ /* 319 */ "literal",
+ /* 320 */ "alter_account_option",
+ /* 321 */ "user_name",
+ /* 322 */ "sysinfo_opt",
+ /* 323 */ "privileges",
+ /* 324 */ "priv_level",
+ /* 325 */ "priv_type_list",
+ /* 326 */ "priv_type",
+ /* 327 */ "db_name",
+ /* 328 */ "dnode_endpoint",
+ /* 329 */ "not_exists_opt",
+ /* 330 */ "db_options",
+ /* 331 */ "exists_opt",
+ /* 332 */ "alter_db_options",
+ /* 333 */ "speed_opt",
+ /* 334 */ "integer_list",
+ /* 335 */ "variable_list",
+ /* 336 */ "retention_list",
+ /* 337 */ "alter_db_option",
+ /* 338 */ "retention",
+ /* 339 */ "full_table_name",
+ /* 340 */ "column_def_list",
+ /* 341 */ "tags_def_opt",
+ /* 342 */ "table_options",
+ /* 343 */ "multi_create_clause",
+ /* 344 */ "tags_def",
+ /* 345 */ "multi_drop_clause",
+ /* 346 */ "alter_table_clause",
+ /* 347 */ "alter_table_options",
+ /* 348 */ "column_name",
+ /* 349 */ "type_name",
+ /* 350 */ "signed_literal",
+ /* 351 */ "create_subtable_clause",
+ /* 352 */ "specific_cols_opt",
+ /* 353 */ "expression_list",
+ /* 354 */ "drop_table_clause",
+ /* 355 */ "col_name_list",
+ /* 356 */ "table_name",
+ /* 357 */ "column_def",
+ /* 358 */ "duration_list",
+ /* 359 */ "rollup_func_list",
+ /* 360 */ "alter_table_option",
+ /* 361 */ "duration_literal",
+ /* 362 */ "rollup_func_name",
+ /* 363 */ "function_name",
+ /* 364 */ "col_name",
+ /* 365 */ "db_name_cond_opt",
+ /* 366 */ "like_pattern_opt",
+ /* 367 */ "table_name_cond",
+ /* 368 */ "from_db_opt",
+ /* 369 */ "index_options",
+ /* 370 */ "func_list",
+ /* 371 */ "sliding_opt",
+ /* 372 */ "sma_stream_opt",
+ /* 373 */ "func",
+ /* 374 */ "stream_options",
+ /* 375 */ "topic_name",
+ /* 376 */ "query_or_subquery",
+ /* 377 */ "cgroup_name",
+ /* 378 */ "analyze_opt",
+ /* 379 */ "explain_options",
+ /* 380 */ "agg_func_opt",
+ /* 381 */ "bufsize_opt",
+ /* 382 */ "stream_name",
+ /* 383 */ "subtable_opt",
+ /* 384 */ "expression",
+ /* 385 */ "dnode_list",
+ /* 386 */ "where_clause_opt",
+ /* 387 */ "signed",
+ /* 388 */ "literal_func",
+ /* 389 */ "literal_list",
+ /* 390 */ "table_alias",
+ /* 391 */ "column_alias",
+ /* 392 */ "expr_or_subquery",
+ /* 393 */ "subquery",
+ /* 394 */ "pseudo_column",
+ /* 395 */ "column_reference",
+ /* 396 */ "function_expression",
+ /* 397 */ "case_when_expression",
+ /* 398 */ "star_func",
+ /* 399 */ "star_func_para_list",
+ /* 400 */ "noarg_func",
+ /* 401 */ "other_para_list",
+ /* 402 */ "star_func_para",
+ /* 403 */ "when_then_list",
+ /* 404 */ "case_when_else_opt",
+ /* 405 */ "common_expression",
+ /* 406 */ "when_then_expr",
+ /* 407 */ "predicate",
+ /* 408 */ "compare_op",
+ /* 409 */ "in_op",
+ /* 410 */ "in_predicate_value",
+ /* 411 */ "boolean_value_expression",
+ /* 412 */ "boolean_primary",
+ /* 413 */ "from_clause_opt",
+ /* 414 */ "table_reference_list",
+ /* 415 */ "table_reference",
+ /* 416 */ "table_primary",
+ /* 417 */ "joined_table",
+ /* 418 */ "alias_opt",
+ /* 419 */ "parenthesized_joined_table",
+ /* 420 */ "join_type",
+ /* 421 */ "search_condition",
+ /* 422 */ "query_specification",
+ /* 423 */ "set_quantifier_opt",
+ /* 424 */ "select_list",
+ /* 425 */ "partition_by_clause_opt",
+ /* 426 */ "range_opt",
+ /* 427 */ "every_opt",
+ /* 428 */ "fill_opt",
+ /* 429 */ "twindow_clause_opt",
+ /* 430 */ "group_by_clause_opt",
+ /* 431 */ "having_clause_opt",
+ /* 432 */ "select_item",
+ /* 433 */ "partition_list",
+ /* 434 */ "partition_item",
+ /* 435 */ "fill_mode",
+ /* 436 */ "group_by_list",
+ /* 437 */ "query_expression",
+ /* 438 */ "query_simple",
+ /* 439 */ "order_by_clause_opt",
+ /* 440 */ "slimit_clause_opt",
+ /* 441 */ "limit_clause_opt",
+ /* 442 */ "union_query_expression",
+ /* 443 */ "query_simple_or_subquery",
+ /* 444 */ "sort_specification_list",
+ /* 445 */ "sort_specification",
+ /* 446 */ "ordering_specification_opt",
+ /* 447 */ "null_ordering_opt",
};
#endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */
@@ -1771,7 +1952,7 @@ static const char *const yyRuleName[] = {
/* 64 */ "cmd ::= USE db_name",
/* 65 */ "cmd ::= ALTER DATABASE db_name alter_db_options",
/* 66 */ "cmd ::= FLUSH DATABASE db_name",
- /* 67 */ "cmd ::= TRIM DATABASE db_name",
+ /* 67 */ "cmd ::= TRIM DATABASE db_name speed_opt",
/* 68 */ "not_exists_opt ::= IF NOT EXISTS",
/* 69 */ "not_exists_opt ::=",
/* 70 */ "exists_opt ::= IF EXISTS",
@@ -1789,410 +1970,440 @@ static const char *const yyRuleName[] = {
/* 82 */ "db_options ::= db_options KEEP variable_list",
/* 83 */ "db_options ::= db_options PAGES NK_INTEGER",
/* 84 */ "db_options ::= db_options PAGESIZE NK_INTEGER",
- /* 85 */ "db_options ::= db_options PRECISION NK_STRING",
- /* 86 */ "db_options ::= db_options REPLICA NK_INTEGER",
- /* 87 */ "db_options ::= db_options STRICT NK_STRING",
- /* 88 */ "db_options ::= db_options VGROUPS NK_INTEGER",
- /* 89 */ "db_options ::= db_options SINGLE_STABLE NK_INTEGER",
- /* 90 */ "db_options ::= db_options RETENTIONS retention_list",
- /* 91 */ "db_options ::= db_options SCHEMALESS NK_INTEGER",
- /* 92 */ "db_options ::= db_options WAL_LEVEL NK_INTEGER",
- /* 93 */ "db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER",
- /* 94 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER",
- /* 95 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER",
- /* 96 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER",
- /* 97 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER",
- /* 98 */ "db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER",
- /* 99 */ "db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER",
- /* 100 */ "alter_db_options ::= alter_db_option",
- /* 101 */ "alter_db_options ::= alter_db_options alter_db_option",
- /* 102 */ "alter_db_option ::= CACHEMODEL NK_STRING",
- /* 103 */ "alter_db_option ::= CACHESIZE NK_INTEGER",
- /* 104 */ "alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER",
- /* 105 */ "alter_db_option ::= KEEP integer_list",
- /* 106 */ "alter_db_option ::= KEEP variable_list",
- /* 107 */ "alter_db_option ::= WAL_LEVEL NK_INTEGER",
- /* 108 */ "integer_list ::= NK_INTEGER",
- /* 109 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER",
- /* 110 */ "variable_list ::= NK_VARIABLE",
- /* 111 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE",
- /* 112 */ "retention_list ::= retention",
- /* 113 */ "retention_list ::= retention_list NK_COMMA retention",
- /* 114 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE",
- /* 115 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options",
- /* 116 */ "cmd ::= CREATE TABLE multi_create_clause",
- /* 117 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options",
- /* 118 */ "cmd ::= DROP TABLE multi_drop_clause",
- /* 119 */ "cmd ::= DROP STABLE exists_opt full_table_name",
- /* 120 */ "cmd ::= ALTER TABLE alter_table_clause",
- /* 121 */ "cmd ::= ALTER STABLE alter_table_clause",
- /* 122 */ "alter_table_clause ::= full_table_name alter_table_options",
- /* 123 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name",
- /* 124 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name",
- /* 125 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name",
- /* 126 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name",
- /* 127 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name",
- /* 128 */ "alter_table_clause ::= full_table_name DROP TAG column_name",
- /* 129 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name",
- /* 130 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name",
- /* 131 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal",
- /* 132 */ "multi_create_clause ::= create_subtable_clause",
- /* 133 */ "multi_create_clause ::= multi_create_clause create_subtable_clause",
- /* 134 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options",
- /* 135 */ "multi_drop_clause ::= drop_table_clause",
- /* 136 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause",
- /* 137 */ "drop_table_clause ::= exists_opt full_table_name",
- /* 138 */ "specific_cols_opt ::=",
- /* 139 */ "specific_cols_opt ::= NK_LP col_name_list NK_RP",
- /* 140 */ "full_table_name ::= table_name",
- /* 141 */ "full_table_name ::= db_name NK_DOT table_name",
- /* 142 */ "column_def_list ::= column_def",
- /* 143 */ "column_def_list ::= column_def_list NK_COMMA column_def",
- /* 144 */ "column_def ::= column_name type_name",
- /* 145 */ "column_def ::= column_name type_name COMMENT NK_STRING",
- /* 146 */ "type_name ::= BOOL",
- /* 147 */ "type_name ::= TINYINT",
- /* 148 */ "type_name ::= SMALLINT",
- /* 149 */ "type_name ::= INT",
- /* 150 */ "type_name ::= INTEGER",
- /* 151 */ "type_name ::= BIGINT",
- /* 152 */ "type_name ::= FLOAT",
- /* 153 */ "type_name ::= DOUBLE",
- /* 154 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP",
- /* 155 */ "type_name ::= TIMESTAMP",
- /* 156 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP",
- /* 157 */ "type_name ::= TINYINT UNSIGNED",
- /* 158 */ "type_name ::= SMALLINT UNSIGNED",
- /* 159 */ "type_name ::= INT UNSIGNED",
- /* 160 */ "type_name ::= BIGINT UNSIGNED",
- /* 161 */ "type_name ::= JSON",
- /* 162 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP",
- /* 163 */ "type_name ::= MEDIUMBLOB",
- /* 164 */ "type_name ::= BLOB",
- /* 165 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP",
- /* 166 */ "type_name ::= DECIMAL",
- /* 167 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP",
- /* 168 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP",
- /* 169 */ "tags_def_opt ::=",
- /* 170 */ "tags_def_opt ::= tags_def",
- /* 171 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP",
- /* 172 */ "table_options ::=",
- /* 173 */ "table_options ::= table_options COMMENT NK_STRING",
- /* 174 */ "table_options ::= table_options MAX_DELAY duration_list",
- /* 175 */ "table_options ::= table_options WATERMARK duration_list",
- /* 176 */ "table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP",
- /* 177 */ "table_options ::= table_options TTL NK_INTEGER",
- /* 178 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP",
- /* 179 */ "alter_table_options ::= alter_table_option",
- /* 180 */ "alter_table_options ::= alter_table_options alter_table_option",
- /* 181 */ "alter_table_option ::= COMMENT NK_STRING",
- /* 182 */ "alter_table_option ::= TTL NK_INTEGER",
- /* 183 */ "duration_list ::= duration_literal",
- /* 184 */ "duration_list ::= duration_list NK_COMMA duration_literal",
- /* 185 */ "rollup_func_list ::= rollup_func_name",
- /* 186 */ "rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name",
- /* 187 */ "rollup_func_name ::= function_name",
- /* 188 */ "rollup_func_name ::= FIRST",
- /* 189 */ "rollup_func_name ::= LAST",
- /* 190 */ "col_name_list ::= col_name",
- /* 191 */ "col_name_list ::= col_name_list NK_COMMA col_name",
- /* 192 */ "col_name ::= column_name",
- /* 193 */ "cmd ::= SHOW DNODES",
- /* 194 */ "cmd ::= SHOW USERS",
- /* 195 */ "cmd ::= SHOW DATABASES",
- /* 196 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt",
- /* 197 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt",
- /* 198 */ "cmd ::= SHOW db_name_cond_opt VGROUPS",
- /* 199 */ "cmd ::= SHOW MNODES",
- /* 200 */ "cmd ::= SHOW MODULES",
- /* 201 */ "cmd ::= SHOW QNODES",
- /* 202 */ "cmd ::= SHOW FUNCTIONS",
- /* 203 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt",
- /* 204 */ "cmd ::= SHOW STREAMS",
- /* 205 */ "cmd ::= SHOW ACCOUNTS",
- /* 206 */ "cmd ::= SHOW APPS",
- /* 207 */ "cmd ::= SHOW CONNECTIONS",
- /* 208 */ "cmd ::= SHOW LICENCES",
- /* 209 */ "cmd ::= SHOW GRANTS",
- /* 210 */ "cmd ::= SHOW CREATE DATABASE db_name",
- /* 211 */ "cmd ::= SHOW CREATE TABLE full_table_name",
- /* 212 */ "cmd ::= SHOW CREATE STABLE full_table_name",
- /* 213 */ "cmd ::= SHOW QUERIES",
- /* 214 */ "cmd ::= SHOW SCORES",
- /* 215 */ "cmd ::= SHOW TOPICS",
- /* 216 */ "cmd ::= SHOW VARIABLES",
- /* 217 */ "cmd ::= SHOW LOCAL VARIABLES",
- /* 218 */ "cmd ::= SHOW DNODE NK_INTEGER VARIABLES",
- /* 219 */ "cmd ::= SHOW BNODES",
- /* 220 */ "cmd ::= SHOW SNODES",
- /* 221 */ "cmd ::= SHOW CLUSTER",
- /* 222 */ "cmd ::= SHOW TRANSACTIONS",
- /* 223 */ "cmd ::= SHOW TABLE DISTRIBUTED full_table_name",
- /* 224 */ "cmd ::= SHOW CONSUMERS",
- /* 225 */ "cmd ::= SHOW SUBSCRIPTIONS",
- /* 226 */ "cmd ::= SHOW TAGS FROM table_name_cond from_db_opt",
- /* 227 */ "db_name_cond_opt ::=",
- /* 228 */ "db_name_cond_opt ::= db_name NK_DOT",
- /* 229 */ "like_pattern_opt ::=",
- /* 230 */ "like_pattern_opt ::= LIKE NK_STRING",
- /* 231 */ "table_name_cond ::= table_name",
- /* 232 */ "from_db_opt ::=",
- /* 233 */ "from_db_opt ::= FROM db_name",
- /* 234 */ "cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options",
- /* 235 */ "cmd ::= DROP INDEX exists_opt full_table_name",
- /* 236 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt",
- /* 237 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt",
- /* 238 */ "func_list ::= func",
- /* 239 */ "func_list ::= func_list NK_COMMA func",
- /* 240 */ "func ::= function_name NK_LP expression_list NK_RP",
- /* 241 */ "sma_stream_opt ::=",
- /* 242 */ "sma_stream_opt ::= stream_options WATERMARK duration_literal",
- /* 243 */ "sma_stream_opt ::= stream_options MAX_DELAY duration_literal",
- /* 244 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression",
- /* 245 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name",
- /* 246 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name",
- /* 247 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name",
- /* 248 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name",
- /* 249 */ "cmd ::= DROP TOPIC exists_opt topic_name",
- /* 250 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name",
- /* 251 */ "cmd ::= DESC full_table_name",
- /* 252 */ "cmd ::= DESCRIBE full_table_name",
- /* 253 */ "cmd ::= RESET QUERY CACHE",
- /* 254 */ "cmd ::= EXPLAIN analyze_opt explain_options query_expression",
- /* 255 */ "analyze_opt ::=",
- /* 256 */ "analyze_opt ::= ANALYZE",
- /* 257 */ "explain_options ::=",
- /* 258 */ "explain_options ::= explain_options VERBOSE NK_BOOL",
- /* 259 */ "explain_options ::= explain_options RATIO NK_FLOAT",
- /* 260 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt",
- /* 261 */ "cmd ::= DROP FUNCTION exists_opt function_name",
- /* 262 */ "agg_func_opt ::=",
- /* 263 */ "agg_func_opt ::= AGGREGATE",
- /* 264 */ "bufsize_opt ::=",
- /* 265 */ "bufsize_opt ::= BUFSIZE NK_INTEGER",
- /* 266 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression",
- /* 267 */ "cmd ::= DROP STREAM exists_opt stream_name",
- /* 268 */ "stream_options ::=",
- /* 269 */ "stream_options ::= stream_options TRIGGER AT_ONCE",
- /* 270 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE",
- /* 271 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal",
- /* 272 */ "stream_options ::= stream_options WATERMARK duration_literal",
- /* 273 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER",
- /* 274 */ "cmd ::= KILL CONNECTION NK_INTEGER",
- /* 275 */ "cmd ::= KILL QUERY NK_STRING",
- /* 276 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
- /* 277 */ "cmd ::= BALANCE VGROUP",
- /* 278 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
- /* 279 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list",
- /* 280 */ "cmd ::= SPLIT VGROUP NK_INTEGER",
- /* 281 */ "dnode_list ::= DNODE NK_INTEGER",
- /* 282 */ "dnode_list ::= dnode_list DNODE NK_INTEGER",
- /* 283 */ "cmd ::= DELETE FROM full_table_name where_clause_opt",
- /* 284 */ "cmd ::= query_expression",
- /* 285 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression",
- /* 286 */ "cmd ::= INSERT INTO full_table_name query_expression",
- /* 287 */ "literal ::= NK_INTEGER",
- /* 288 */ "literal ::= NK_FLOAT",
- /* 289 */ "literal ::= NK_STRING",
- /* 290 */ "literal ::= NK_BOOL",
- /* 291 */ "literal ::= TIMESTAMP NK_STRING",
- /* 292 */ "literal ::= duration_literal",
- /* 293 */ "literal ::= NULL",
- /* 294 */ "literal ::= NK_QUESTION",
- /* 295 */ "duration_literal ::= NK_VARIABLE",
- /* 296 */ "signed ::= NK_INTEGER",
- /* 297 */ "signed ::= NK_PLUS NK_INTEGER",
- /* 298 */ "signed ::= NK_MINUS NK_INTEGER",
- /* 299 */ "signed ::= NK_FLOAT",
- /* 300 */ "signed ::= NK_PLUS NK_FLOAT",
- /* 301 */ "signed ::= NK_MINUS NK_FLOAT",
- /* 302 */ "signed_literal ::= signed",
- /* 303 */ "signed_literal ::= NK_STRING",
- /* 304 */ "signed_literal ::= NK_BOOL",
- /* 305 */ "signed_literal ::= TIMESTAMP NK_STRING",
- /* 306 */ "signed_literal ::= duration_literal",
- /* 307 */ "signed_literal ::= NULL",
- /* 308 */ "signed_literal ::= literal_func",
- /* 309 */ "signed_literal ::= NK_QUESTION",
- /* 310 */ "literal_list ::= signed_literal",
- /* 311 */ "literal_list ::= literal_list NK_COMMA signed_literal",
- /* 312 */ "db_name ::= NK_ID",
- /* 313 */ "table_name ::= NK_ID",
- /* 314 */ "column_name ::= NK_ID",
- /* 315 */ "function_name ::= NK_ID",
- /* 316 */ "table_alias ::= NK_ID",
- /* 317 */ "column_alias ::= NK_ID",
- /* 318 */ "user_name ::= NK_ID",
- /* 319 */ "topic_name ::= NK_ID",
- /* 320 */ "stream_name ::= NK_ID",
- /* 321 */ "cgroup_name ::= NK_ID",
- /* 322 */ "expression ::= literal",
- /* 323 */ "expression ::= pseudo_column",
- /* 324 */ "expression ::= column_reference",
- /* 325 */ "expression ::= function_expression",
- /* 326 */ "expression ::= subquery",
- /* 327 */ "expression ::= NK_LP expression NK_RP",
- /* 328 */ "expression ::= NK_PLUS expression",
- /* 329 */ "expression ::= NK_MINUS expression",
- /* 330 */ "expression ::= expression NK_PLUS expression",
- /* 331 */ "expression ::= expression NK_MINUS expression",
- /* 332 */ "expression ::= expression NK_STAR expression",
- /* 333 */ "expression ::= expression NK_SLASH expression",
- /* 334 */ "expression ::= expression NK_REM expression",
- /* 335 */ "expression ::= column_reference NK_ARROW NK_STRING",
- /* 336 */ "expression ::= expression NK_BITAND expression",
- /* 337 */ "expression ::= expression NK_BITOR expression",
- /* 338 */ "expression_list ::= expression",
- /* 339 */ "expression_list ::= expression_list NK_COMMA expression",
- /* 340 */ "column_reference ::= column_name",
- /* 341 */ "column_reference ::= table_name NK_DOT column_name",
- /* 342 */ "pseudo_column ::= ROWTS",
- /* 343 */ "pseudo_column ::= TBNAME",
- /* 344 */ "pseudo_column ::= table_name NK_DOT TBNAME",
- /* 345 */ "pseudo_column ::= QSTART",
- /* 346 */ "pseudo_column ::= QEND",
- /* 347 */ "pseudo_column ::= QDURATION",
- /* 348 */ "pseudo_column ::= WSTART",
- /* 349 */ "pseudo_column ::= WEND",
- /* 350 */ "pseudo_column ::= WDURATION",
- /* 351 */ "function_expression ::= function_name NK_LP expression_list NK_RP",
- /* 352 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP",
- /* 353 */ "function_expression ::= CAST NK_LP expression AS type_name NK_RP",
- /* 354 */ "function_expression ::= literal_func",
- /* 355 */ "literal_func ::= noarg_func NK_LP NK_RP",
- /* 356 */ "literal_func ::= NOW",
- /* 357 */ "noarg_func ::= NOW",
- /* 358 */ "noarg_func ::= TODAY",
- /* 359 */ "noarg_func ::= TIMEZONE",
- /* 360 */ "noarg_func ::= DATABASE",
- /* 361 */ "noarg_func ::= CLIENT_VERSION",
- /* 362 */ "noarg_func ::= SERVER_VERSION",
- /* 363 */ "noarg_func ::= SERVER_STATUS",
- /* 364 */ "noarg_func ::= CURRENT_USER",
- /* 365 */ "noarg_func ::= USER",
- /* 366 */ "star_func ::= COUNT",
- /* 367 */ "star_func ::= FIRST",
- /* 368 */ "star_func ::= LAST",
- /* 369 */ "star_func ::= LAST_ROW",
- /* 370 */ "star_func_para_list ::= NK_STAR",
- /* 371 */ "star_func_para_list ::= other_para_list",
- /* 372 */ "other_para_list ::= star_func_para",
- /* 373 */ "other_para_list ::= other_para_list NK_COMMA star_func_para",
- /* 374 */ "star_func_para ::= expression",
- /* 375 */ "star_func_para ::= table_name NK_DOT NK_STAR",
- /* 376 */ "predicate ::= expression compare_op expression",
- /* 377 */ "predicate ::= expression BETWEEN expression AND expression",
- /* 378 */ "predicate ::= expression NOT BETWEEN expression AND expression",
- /* 379 */ "predicate ::= expression IS NULL",
- /* 380 */ "predicate ::= expression IS NOT NULL",
- /* 381 */ "predicate ::= expression in_op in_predicate_value",
- /* 382 */ "compare_op ::= NK_LT",
- /* 383 */ "compare_op ::= NK_GT",
- /* 384 */ "compare_op ::= NK_LE",
- /* 385 */ "compare_op ::= NK_GE",
- /* 386 */ "compare_op ::= NK_NE",
- /* 387 */ "compare_op ::= NK_EQ",
- /* 388 */ "compare_op ::= LIKE",
- /* 389 */ "compare_op ::= NOT LIKE",
- /* 390 */ "compare_op ::= MATCH",
- /* 391 */ "compare_op ::= NMATCH",
- /* 392 */ "compare_op ::= CONTAINS",
- /* 393 */ "in_op ::= IN",
- /* 394 */ "in_op ::= NOT IN",
- /* 395 */ "in_predicate_value ::= NK_LP literal_list NK_RP",
- /* 396 */ "boolean_value_expression ::= boolean_primary",
- /* 397 */ "boolean_value_expression ::= NOT boolean_primary",
- /* 398 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression",
- /* 399 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression",
- /* 400 */ "boolean_primary ::= predicate",
- /* 401 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP",
- /* 402 */ "common_expression ::= expression",
- /* 403 */ "common_expression ::= boolean_value_expression",
- /* 404 */ "from_clause_opt ::=",
- /* 405 */ "from_clause_opt ::= FROM table_reference_list",
- /* 406 */ "table_reference_list ::= table_reference",
- /* 407 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference",
- /* 408 */ "table_reference ::= table_primary",
- /* 409 */ "table_reference ::= joined_table",
- /* 410 */ "table_primary ::= table_name alias_opt",
- /* 411 */ "table_primary ::= db_name NK_DOT table_name alias_opt",
- /* 412 */ "table_primary ::= subquery alias_opt",
- /* 413 */ "table_primary ::= parenthesized_joined_table",
- /* 414 */ "alias_opt ::=",
- /* 415 */ "alias_opt ::= table_alias",
- /* 416 */ "alias_opt ::= AS table_alias",
- /* 417 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP",
- /* 418 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP",
- /* 419 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition",
- /* 420 */ "join_type ::=",
- /* 421 */ "join_type ::= INNER",
- /* 422 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt",
- /* 423 */ "set_quantifier_opt ::=",
- /* 424 */ "set_quantifier_opt ::= DISTINCT",
- /* 425 */ "set_quantifier_opt ::= ALL",
- /* 426 */ "select_list ::= select_item",
- /* 427 */ "select_list ::= select_list NK_COMMA select_item",
- /* 428 */ "select_item ::= NK_STAR",
- /* 429 */ "select_item ::= common_expression",
- /* 430 */ "select_item ::= common_expression column_alias",
- /* 431 */ "select_item ::= common_expression AS column_alias",
- /* 432 */ "select_item ::= table_name NK_DOT NK_STAR",
- /* 433 */ "where_clause_opt ::=",
- /* 434 */ "where_clause_opt ::= WHERE search_condition",
- /* 435 */ "partition_by_clause_opt ::=",
- /* 436 */ "partition_by_clause_opt ::= PARTITION BY expression_list",
- /* 437 */ "twindow_clause_opt ::=",
- /* 438 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP",
- /* 439 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP",
- /* 440 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt",
- /* 441 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt",
- /* 442 */ "sliding_opt ::=",
- /* 443 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP",
- /* 444 */ "fill_opt ::=",
- /* 445 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP",
- /* 446 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP",
- /* 447 */ "fill_mode ::= NONE",
- /* 448 */ "fill_mode ::= PREV",
- /* 449 */ "fill_mode ::= NULL",
- /* 450 */ "fill_mode ::= LINEAR",
- /* 451 */ "fill_mode ::= NEXT",
- /* 452 */ "group_by_clause_opt ::=",
- /* 453 */ "group_by_clause_opt ::= GROUP BY group_by_list",
- /* 454 */ "group_by_list ::= expression",
- /* 455 */ "group_by_list ::= group_by_list NK_COMMA expression",
- /* 456 */ "having_clause_opt ::=",
- /* 457 */ "having_clause_opt ::= HAVING search_condition",
- /* 458 */ "range_opt ::=",
- /* 459 */ "range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP",
- /* 460 */ "every_opt ::=",
- /* 461 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP",
- /* 462 */ "query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt",
- /* 463 */ "query_expression_body ::= query_primary",
- /* 464 */ "query_expression_body ::= query_expression_body UNION ALL query_expression_body",
- /* 465 */ "query_expression_body ::= query_expression_body UNION query_expression_body",
- /* 466 */ "query_primary ::= query_specification",
- /* 467 */ "query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP",
- /* 468 */ "order_by_clause_opt ::=",
- /* 469 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
- /* 470 */ "slimit_clause_opt ::=",
- /* 471 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
- /* 472 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
- /* 473 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 474 */ "limit_clause_opt ::=",
- /* 475 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
- /* 476 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
- /* 477 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 478 */ "subquery ::= NK_LP query_expression NK_RP",
- /* 479 */ "search_condition ::= common_expression",
- /* 480 */ "sort_specification_list ::= sort_specification",
- /* 481 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
- /* 482 */ "sort_specification ::= expression ordering_specification_opt null_ordering_opt",
- /* 483 */ "ordering_specification_opt ::=",
- /* 484 */ "ordering_specification_opt ::= ASC",
- /* 485 */ "ordering_specification_opt ::= DESC",
- /* 486 */ "null_ordering_opt ::=",
- /* 487 */ "null_ordering_opt ::= NULLS FIRST",
- /* 488 */ "null_ordering_opt ::= NULLS LAST",
+ /* 85 */ "db_options ::= db_options TSDB_PAGESIZE NK_INTEGER",
+ /* 86 */ "db_options ::= db_options PRECISION NK_STRING",
+ /* 87 */ "db_options ::= db_options REPLICA NK_INTEGER",
+ /* 88 */ "db_options ::= db_options STRICT NK_STRING",
+ /* 89 */ "db_options ::= db_options VGROUPS NK_INTEGER",
+ /* 90 */ "db_options ::= db_options SINGLE_STABLE NK_INTEGER",
+ /* 91 */ "db_options ::= db_options RETENTIONS retention_list",
+ /* 92 */ "db_options ::= db_options SCHEMALESS NK_INTEGER",
+ /* 93 */ "db_options ::= db_options WAL_LEVEL NK_INTEGER",
+ /* 94 */ "db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER",
+ /* 95 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER",
+ /* 96 */ "db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER",
+ /* 97 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER",
+ /* 98 */ "db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER",
+ /* 99 */ "db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER",
+ /* 100 */ "db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER",
+ /* 101 */ "db_options ::= db_options STT_TRIGGER NK_INTEGER",
+ /* 102 */ "db_options ::= db_options TABLE_PREFIX NK_INTEGER",
+ /* 103 */ "db_options ::= db_options TABLE_SUFFIX NK_INTEGER",
+ /* 104 */ "alter_db_options ::= alter_db_option",
+ /* 105 */ "alter_db_options ::= alter_db_options alter_db_option",
+ /* 106 */ "alter_db_option ::= CACHEMODEL NK_STRING",
+ /* 107 */ "alter_db_option ::= CACHESIZE NK_INTEGER",
+ /* 108 */ "alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER",
+ /* 109 */ "alter_db_option ::= KEEP integer_list",
+ /* 110 */ "alter_db_option ::= KEEP variable_list",
+ /* 111 */ "alter_db_option ::= WAL_LEVEL NK_INTEGER",
+ /* 112 */ "alter_db_option ::= STT_TRIGGER NK_INTEGER",
+ /* 113 */ "integer_list ::= NK_INTEGER",
+ /* 114 */ "integer_list ::= integer_list NK_COMMA NK_INTEGER",
+ /* 115 */ "variable_list ::= NK_VARIABLE",
+ /* 116 */ "variable_list ::= variable_list NK_COMMA NK_VARIABLE",
+ /* 117 */ "retention_list ::= retention",
+ /* 118 */ "retention_list ::= retention_list NK_COMMA retention",
+ /* 119 */ "retention ::= NK_VARIABLE NK_COLON NK_VARIABLE",
+ /* 120 */ "speed_opt ::=",
+ /* 121 */ "speed_opt ::= MAX_SPEED NK_INTEGER",
+ /* 122 */ "cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options",
+ /* 123 */ "cmd ::= CREATE TABLE multi_create_clause",
+ /* 124 */ "cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options",
+ /* 125 */ "cmd ::= DROP TABLE multi_drop_clause",
+ /* 126 */ "cmd ::= DROP STABLE exists_opt full_table_name",
+ /* 127 */ "cmd ::= ALTER TABLE alter_table_clause",
+ /* 128 */ "cmd ::= ALTER STABLE alter_table_clause",
+ /* 129 */ "alter_table_clause ::= full_table_name alter_table_options",
+ /* 130 */ "alter_table_clause ::= full_table_name ADD COLUMN column_name type_name",
+ /* 131 */ "alter_table_clause ::= full_table_name DROP COLUMN column_name",
+ /* 132 */ "alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name",
+ /* 133 */ "alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name",
+ /* 134 */ "alter_table_clause ::= full_table_name ADD TAG column_name type_name",
+ /* 135 */ "alter_table_clause ::= full_table_name DROP TAG column_name",
+ /* 136 */ "alter_table_clause ::= full_table_name MODIFY TAG column_name type_name",
+ /* 137 */ "alter_table_clause ::= full_table_name RENAME TAG column_name column_name",
+ /* 138 */ "alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal",
+ /* 139 */ "multi_create_clause ::= create_subtable_clause",
+ /* 140 */ "multi_create_clause ::= multi_create_clause create_subtable_clause",
+ /* 141 */ "create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options",
+ /* 142 */ "multi_drop_clause ::= drop_table_clause",
+ /* 143 */ "multi_drop_clause ::= multi_drop_clause drop_table_clause",
+ /* 144 */ "drop_table_clause ::= exists_opt full_table_name",
+ /* 145 */ "specific_cols_opt ::=",
+ /* 146 */ "specific_cols_opt ::= NK_LP col_name_list NK_RP",
+ /* 147 */ "full_table_name ::= table_name",
+ /* 148 */ "full_table_name ::= db_name NK_DOT table_name",
+ /* 149 */ "column_def_list ::= column_def",
+ /* 150 */ "column_def_list ::= column_def_list NK_COMMA column_def",
+ /* 151 */ "column_def ::= column_name type_name",
+ /* 152 */ "column_def ::= column_name type_name COMMENT NK_STRING",
+ /* 153 */ "type_name ::= BOOL",
+ /* 154 */ "type_name ::= TINYINT",
+ /* 155 */ "type_name ::= SMALLINT",
+ /* 156 */ "type_name ::= INT",
+ /* 157 */ "type_name ::= INTEGER",
+ /* 158 */ "type_name ::= BIGINT",
+ /* 159 */ "type_name ::= FLOAT",
+ /* 160 */ "type_name ::= DOUBLE",
+ /* 161 */ "type_name ::= BINARY NK_LP NK_INTEGER NK_RP",
+ /* 162 */ "type_name ::= TIMESTAMP",
+ /* 163 */ "type_name ::= NCHAR NK_LP NK_INTEGER NK_RP",
+ /* 164 */ "type_name ::= TINYINT UNSIGNED",
+ /* 165 */ "type_name ::= SMALLINT UNSIGNED",
+ /* 166 */ "type_name ::= INT UNSIGNED",
+ /* 167 */ "type_name ::= BIGINT UNSIGNED",
+ /* 168 */ "type_name ::= JSON",
+ /* 169 */ "type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP",
+ /* 170 */ "type_name ::= MEDIUMBLOB",
+ /* 171 */ "type_name ::= BLOB",
+ /* 172 */ "type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP",
+ /* 173 */ "type_name ::= DECIMAL",
+ /* 174 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP",
+ /* 175 */ "type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP",
+ /* 176 */ "tags_def_opt ::=",
+ /* 177 */ "tags_def_opt ::= tags_def",
+ /* 178 */ "tags_def ::= TAGS NK_LP column_def_list NK_RP",
+ /* 179 */ "table_options ::=",
+ /* 180 */ "table_options ::= table_options COMMENT NK_STRING",
+ /* 181 */ "table_options ::= table_options MAX_DELAY duration_list",
+ /* 182 */ "table_options ::= table_options WATERMARK duration_list",
+ /* 183 */ "table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP",
+ /* 184 */ "table_options ::= table_options TTL NK_INTEGER",
+ /* 185 */ "table_options ::= table_options SMA NK_LP col_name_list NK_RP",
+ /* 186 */ "alter_table_options ::= alter_table_option",
+ /* 187 */ "alter_table_options ::= alter_table_options alter_table_option",
+ /* 188 */ "alter_table_option ::= COMMENT NK_STRING",
+ /* 189 */ "alter_table_option ::= TTL NK_INTEGER",
+ /* 190 */ "duration_list ::= duration_literal",
+ /* 191 */ "duration_list ::= duration_list NK_COMMA duration_literal",
+ /* 192 */ "rollup_func_list ::= rollup_func_name",
+ /* 193 */ "rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name",
+ /* 194 */ "rollup_func_name ::= function_name",
+ /* 195 */ "rollup_func_name ::= FIRST",
+ /* 196 */ "rollup_func_name ::= LAST",
+ /* 197 */ "col_name_list ::= col_name",
+ /* 198 */ "col_name_list ::= col_name_list NK_COMMA col_name",
+ /* 199 */ "col_name ::= column_name",
+ /* 200 */ "cmd ::= SHOW DNODES",
+ /* 201 */ "cmd ::= SHOW USERS",
+ /* 202 */ "cmd ::= SHOW DATABASES",
+ /* 203 */ "cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt",
+ /* 204 */ "cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt",
+ /* 205 */ "cmd ::= SHOW db_name_cond_opt VGROUPS",
+ /* 206 */ "cmd ::= SHOW MNODES",
+ /* 207 */ "cmd ::= SHOW MODULES",
+ /* 208 */ "cmd ::= SHOW QNODES",
+ /* 209 */ "cmd ::= SHOW FUNCTIONS",
+ /* 210 */ "cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt",
+ /* 211 */ "cmd ::= SHOW STREAMS",
+ /* 212 */ "cmd ::= SHOW ACCOUNTS",
+ /* 213 */ "cmd ::= SHOW APPS",
+ /* 214 */ "cmd ::= SHOW CONNECTIONS",
+ /* 215 */ "cmd ::= SHOW LICENCES",
+ /* 216 */ "cmd ::= SHOW GRANTS",
+ /* 217 */ "cmd ::= SHOW CREATE DATABASE db_name",
+ /* 218 */ "cmd ::= SHOW CREATE TABLE full_table_name",
+ /* 219 */ "cmd ::= SHOW CREATE STABLE full_table_name",
+ /* 220 */ "cmd ::= SHOW QUERIES",
+ /* 221 */ "cmd ::= SHOW SCORES",
+ /* 222 */ "cmd ::= SHOW TOPICS",
+ /* 223 */ "cmd ::= SHOW VARIABLES",
+ /* 224 */ "cmd ::= SHOW LOCAL VARIABLES",
+ /* 225 */ "cmd ::= SHOW DNODE NK_INTEGER VARIABLES",
+ /* 226 */ "cmd ::= SHOW BNODES",
+ /* 227 */ "cmd ::= SHOW SNODES",
+ /* 228 */ "cmd ::= SHOW CLUSTER",
+ /* 229 */ "cmd ::= SHOW TRANSACTIONS",
+ /* 230 */ "cmd ::= SHOW TABLE DISTRIBUTED full_table_name",
+ /* 231 */ "cmd ::= SHOW CONSUMERS",
+ /* 232 */ "cmd ::= SHOW SUBSCRIPTIONS",
+ /* 233 */ "cmd ::= SHOW TAGS FROM table_name_cond from_db_opt",
+ /* 234 */ "cmd ::= SHOW VNODES NK_INTEGER",
+ /* 235 */ "cmd ::= SHOW VNODES NK_STRING",
+ /* 236 */ "db_name_cond_opt ::=",
+ /* 237 */ "db_name_cond_opt ::= db_name NK_DOT",
+ /* 238 */ "like_pattern_opt ::=",
+ /* 239 */ "like_pattern_opt ::= LIKE NK_STRING",
+ /* 240 */ "table_name_cond ::= table_name",
+ /* 241 */ "from_db_opt ::=",
+ /* 242 */ "from_db_opt ::= FROM db_name",
+ /* 243 */ "cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options",
+ /* 244 */ "cmd ::= DROP INDEX exists_opt full_table_name",
+ /* 245 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt",
+ /* 246 */ "index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt",
+ /* 247 */ "func_list ::= func",
+ /* 248 */ "func_list ::= func_list NK_COMMA func",
+ /* 249 */ "func ::= function_name NK_LP expression_list NK_RP",
+ /* 250 */ "sma_stream_opt ::=",
+ /* 251 */ "sma_stream_opt ::= stream_options WATERMARK duration_literal",
+ /* 252 */ "sma_stream_opt ::= stream_options MAX_DELAY duration_literal",
+ /* 253 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery",
+ /* 254 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name",
+ /* 255 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name",
+ /* 256 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name",
+ /* 257 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name",
+ /* 258 */ "cmd ::= DROP TOPIC exists_opt topic_name",
+ /* 259 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name",
+ /* 260 */ "cmd ::= DESC full_table_name",
+ /* 261 */ "cmd ::= DESCRIBE full_table_name",
+ /* 262 */ "cmd ::= RESET QUERY CACHE",
+ /* 263 */ "cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery",
+ /* 264 */ "analyze_opt ::=",
+ /* 265 */ "analyze_opt ::= ANALYZE",
+ /* 266 */ "explain_options ::=",
+ /* 267 */ "explain_options ::= explain_options VERBOSE NK_BOOL",
+ /* 268 */ "explain_options ::= explain_options RATIO NK_FLOAT",
+ /* 269 */ "cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt",
+ /* 270 */ "cmd ::= DROP FUNCTION exists_opt function_name",
+ /* 271 */ "agg_func_opt ::=",
+ /* 272 */ "agg_func_opt ::= AGGREGATE",
+ /* 273 */ "bufsize_opt ::=",
+ /* 274 */ "bufsize_opt ::= BUFSIZE NK_INTEGER",
+ /* 275 */ "cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name tags_def_opt subtable_opt AS query_or_subquery",
+ /* 276 */ "cmd ::= DROP STREAM exists_opt stream_name",
+ /* 277 */ "stream_options ::=",
+ /* 278 */ "stream_options ::= stream_options TRIGGER AT_ONCE",
+ /* 279 */ "stream_options ::= stream_options TRIGGER WINDOW_CLOSE",
+ /* 280 */ "stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal",
+ /* 281 */ "stream_options ::= stream_options WATERMARK duration_literal",
+ /* 282 */ "stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER",
+ /* 283 */ "subtable_opt ::=",
+ /* 284 */ "subtable_opt ::= SUBTABLE NK_LP expression NK_RP",
+ /* 285 */ "cmd ::= KILL CONNECTION NK_INTEGER",
+ /* 286 */ "cmd ::= KILL QUERY NK_STRING",
+ /* 287 */ "cmd ::= KILL TRANSACTION NK_INTEGER",
+ /* 288 */ "cmd ::= BALANCE VGROUP",
+ /* 289 */ "cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER",
+ /* 290 */ "cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list",
+ /* 291 */ "cmd ::= SPLIT VGROUP NK_INTEGER",
+ /* 292 */ "dnode_list ::= DNODE NK_INTEGER",
+ /* 293 */ "dnode_list ::= dnode_list DNODE NK_INTEGER",
+ /* 294 */ "cmd ::= DELETE FROM full_table_name where_clause_opt",
+ /* 295 */ "cmd ::= query_or_subquery",
+ /* 296 */ "cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery",
+ /* 297 */ "cmd ::= INSERT INTO full_table_name query_or_subquery",
+ /* 298 */ "literal ::= NK_INTEGER",
+ /* 299 */ "literal ::= NK_FLOAT",
+ /* 300 */ "literal ::= NK_STRING",
+ /* 301 */ "literal ::= NK_BOOL",
+ /* 302 */ "literal ::= TIMESTAMP NK_STRING",
+ /* 303 */ "literal ::= duration_literal",
+ /* 304 */ "literal ::= NULL",
+ /* 305 */ "literal ::= NK_QUESTION",
+ /* 306 */ "duration_literal ::= NK_VARIABLE",
+ /* 307 */ "signed ::= NK_INTEGER",
+ /* 308 */ "signed ::= NK_PLUS NK_INTEGER",
+ /* 309 */ "signed ::= NK_MINUS NK_INTEGER",
+ /* 310 */ "signed ::= NK_FLOAT",
+ /* 311 */ "signed ::= NK_PLUS NK_FLOAT",
+ /* 312 */ "signed ::= NK_MINUS NK_FLOAT",
+ /* 313 */ "signed_literal ::= signed",
+ /* 314 */ "signed_literal ::= NK_STRING",
+ /* 315 */ "signed_literal ::= NK_BOOL",
+ /* 316 */ "signed_literal ::= TIMESTAMP NK_STRING",
+ /* 317 */ "signed_literal ::= duration_literal",
+ /* 318 */ "signed_literal ::= NULL",
+ /* 319 */ "signed_literal ::= literal_func",
+ /* 320 */ "signed_literal ::= NK_QUESTION",
+ /* 321 */ "literal_list ::= signed_literal",
+ /* 322 */ "literal_list ::= literal_list NK_COMMA signed_literal",
+ /* 323 */ "db_name ::= NK_ID",
+ /* 324 */ "table_name ::= NK_ID",
+ /* 325 */ "column_name ::= NK_ID",
+ /* 326 */ "function_name ::= NK_ID",
+ /* 327 */ "table_alias ::= NK_ID",
+ /* 328 */ "column_alias ::= NK_ID",
+ /* 329 */ "user_name ::= NK_ID",
+ /* 330 */ "topic_name ::= NK_ID",
+ /* 331 */ "stream_name ::= NK_ID",
+ /* 332 */ "cgroup_name ::= NK_ID",
+ /* 333 */ "expr_or_subquery ::= expression",
+ /* 334 */ "expr_or_subquery ::= subquery",
+ /* 335 */ "expression ::= literal",
+ /* 336 */ "expression ::= pseudo_column",
+ /* 337 */ "expression ::= column_reference",
+ /* 338 */ "expression ::= function_expression",
+ /* 339 */ "expression ::= case_when_expression",
+ /* 340 */ "expression ::= NK_LP expression NK_RP",
+ /* 341 */ "expression ::= NK_PLUS expr_or_subquery",
+ /* 342 */ "expression ::= NK_MINUS expr_or_subquery",
+ /* 343 */ "expression ::= expr_or_subquery NK_PLUS expr_or_subquery",
+ /* 344 */ "expression ::= expr_or_subquery NK_MINUS expr_or_subquery",
+ /* 345 */ "expression ::= expr_or_subquery NK_STAR expr_or_subquery",
+ /* 346 */ "expression ::= expr_or_subquery NK_SLASH expr_or_subquery",
+ /* 347 */ "expression ::= expr_or_subquery NK_REM expr_or_subquery",
+ /* 348 */ "expression ::= column_reference NK_ARROW NK_STRING",
+ /* 349 */ "expression ::= expr_or_subquery NK_BITAND expr_or_subquery",
+ /* 350 */ "expression ::= expr_or_subquery NK_BITOR expr_or_subquery",
+ /* 351 */ "expression_list ::= expr_or_subquery",
+ /* 352 */ "expression_list ::= expression_list NK_COMMA expr_or_subquery",
+ /* 353 */ "column_reference ::= column_name",
+ /* 354 */ "column_reference ::= table_name NK_DOT column_name",
+ /* 355 */ "pseudo_column ::= ROWTS",
+ /* 356 */ "pseudo_column ::= TBNAME",
+ /* 357 */ "pseudo_column ::= table_name NK_DOT TBNAME",
+ /* 358 */ "pseudo_column ::= QSTART",
+ /* 359 */ "pseudo_column ::= QEND",
+ /* 360 */ "pseudo_column ::= QDURATION",
+ /* 361 */ "pseudo_column ::= WSTART",
+ /* 362 */ "pseudo_column ::= WEND",
+ /* 363 */ "pseudo_column ::= WDURATION",
+ /* 364 */ "pseudo_column ::= IROWTS",
+ /* 365 */ "function_expression ::= function_name NK_LP expression_list NK_RP",
+ /* 366 */ "function_expression ::= star_func NK_LP star_func_para_list NK_RP",
+ /* 367 */ "function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP",
+ /* 368 */ "function_expression ::= literal_func",
+ /* 369 */ "literal_func ::= noarg_func NK_LP NK_RP",
+ /* 370 */ "literal_func ::= NOW",
+ /* 371 */ "noarg_func ::= NOW",
+ /* 372 */ "noarg_func ::= TODAY",
+ /* 373 */ "noarg_func ::= TIMEZONE",
+ /* 374 */ "noarg_func ::= DATABASE",
+ /* 375 */ "noarg_func ::= CLIENT_VERSION",
+ /* 376 */ "noarg_func ::= SERVER_VERSION",
+ /* 377 */ "noarg_func ::= SERVER_STATUS",
+ /* 378 */ "noarg_func ::= CURRENT_USER",
+ /* 379 */ "noarg_func ::= USER",
+ /* 380 */ "star_func ::= COUNT",
+ /* 381 */ "star_func ::= FIRST",
+ /* 382 */ "star_func ::= LAST",
+ /* 383 */ "star_func ::= LAST_ROW",
+ /* 384 */ "star_func_para_list ::= NK_STAR",
+ /* 385 */ "star_func_para_list ::= other_para_list",
+ /* 386 */ "other_para_list ::= star_func_para",
+ /* 387 */ "other_para_list ::= other_para_list NK_COMMA star_func_para",
+ /* 388 */ "star_func_para ::= expr_or_subquery",
+ /* 389 */ "star_func_para ::= table_name NK_DOT NK_STAR",
+ /* 390 */ "case_when_expression ::= CASE when_then_list case_when_else_opt END",
+ /* 391 */ "case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END",
+ /* 392 */ "when_then_list ::= when_then_expr",
+ /* 393 */ "when_then_list ::= when_then_list when_then_expr",
+ /* 394 */ "when_then_expr ::= WHEN common_expression THEN common_expression",
+ /* 395 */ "case_when_else_opt ::=",
+ /* 396 */ "case_when_else_opt ::= ELSE common_expression",
+ /* 397 */ "predicate ::= expr_or_subquery compare_op expr_or_subquery",
+ /* 398 */ "predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery",
+ /* 399 */ "predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery",
+ /* 400 */ "predicate ::= expr_or_subquery IS NULL",
+ /* 401 */ "predicate ::= expr_or_subquery IS NOT NULL",
+ /* 402 */ "predicate ::= expr_or_subquery in_op in_predicate_value",
+ /* 403 */ "compare_op ::= NK_LT",
+ /* 404 */ "compare_op ::= NK_GT",
+ /* 405 */ "compare_op ::= NK_LE",
+ /* 406 */ "compare_op ::= NK_GE",
+ /* 407 */ "compare_op ::= NK_NE",
+ /* 408 */ "compare_op ::= NK_EQ",
+ /* 409 */ "compare_op ::= LIKE",
+ /* 410 */ "compare_op ::= NOT LIKE",
+ /* 411 */ "compare_op ::= MATCH",
+ /* 412 */ "compare_op ::= NMATCH",
+ /* 413 */ "compare_op ::= CONTAINS",
+ /* 414 */ "in_op ::= IN",
+ /* 415 */ "in_op ::= NOT IN",
+ /* 416 */ "in_predicate_value ::= NK_LP literal_list NK_RP",
+ /* 417 */ "boolean_value_expression ::= boolean_primary",
+ /* 418 */ "boolean_value_expression ::= NOT boolean_primary",
+ /* 419 */ "boolean_value_expression ::= boolean_value_expression OR boolean_value_expression",
+ /* 420 */ "boolean_value_expression ::= boolean_value_expression AND boolean_value_expression",
+ /* 421 */ "boolean_primary ::= predicate",
+ /* 422 */ "boolean_primary ::= NK_LP boolean_value_expression NK_RP",
+ /* 423 */ "common_expression ::= expr_or_subquery",
+ /* 424 */ "common_expression ::= boolean_value_expression",
+ /* 425 */ "from_clause_opt ::=",
+ /* 426 */ "from_clause_opt ::= FROM table_reference_list",
+ /* 427 */ "table_reference_list ::= table_reference",
+ /* 428 */ "table_reference_list ::= table_reference_list NK_COMMA table_reference",
+ /* 429 */ "table_reference ::= table_primary",
+ /* 430 */ "table_reference ::= joined_table",
+ /* 431 */ "table_primary ::= table_name alias_opt",
+ /* 432 */ "table_primary ::= db_name NK_DOT table_name alias_opt",
+ /* 433 */ "table_primary ::= subquery alias_opt",
+ /* 434 */ "table_primary ::= parenthesized_joined_table",
+ /* 435 */ "alias_opt ::=",
+ /* 436 */ "alias_opt ::= table_alias",
+ /* 437 */ "alias_opt ::= AS table_alias",
+ /* 438 */ "parenthesized_joined_table ::= NK_LP joined_table NK_RP",
+ /* 439 */ "parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP",
+ /* 440 */ "joined_table ::= table_reference join_type JOIN table_reference ON search_condition",
+ /* 441 */ "join_type ::=",
+ /* 442 */ "join_type ::= INNER",
+ /* 443 */ "query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt",
+ /* 444 */ "set_quantifier_opt ::=",
+ /* 445 */ "set_quantifier_opt ::= DISTINCT",
+ /* 446 */ "set_quantifier_opt ::= ALL",
+ /* 447 */ "select_list ::= select_item",
+ /* 448 */ "select_list ::= select_list NK_COMMA select_item",
+ /* 449 */ "select_item ::= NK_STAR",
+ /* 450 */ "select_item ::= common_expression",
+ /* 451 */ "select_item ::= common_expression column_alias",
+ /* 452 */ "select_item ::= common_expression AS column_alias",
+ /* 453 */ "select_item ::= table_name NK_DOT NK_STAR",
+ /* 454 */ "where_clause_opt ::=",
+ /* 455 */ "where_clause_opt ::= WHERE search_condition",
+ /* 456 */ "partition_by_clause_opt ::=",
+ /* 457 */ "partition_by_clause_opt ::= PARTITION BY partition_list",
+ /* 458 */ "partition_list ::= partition_item",
+ /* 459 */ "partition_list ::= partition_list NK_COMMA partition_item",
+ /* 460 */ "partition_item ::= expr_or_subquery",
+ /* 461 */ "partition_item ::= expr_or_subquery column_alias",
+ /* 462 */ "partition_item ::= expr_or_subquery AS column_alias",
+ /* 463 */ "twindow_clause_opt ::=",
+ /* 464 */ "twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP",
+ /* 465 */ "twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP",
+ /* 466 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt",
+ /* 467 */ "twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt",
+ /* 468 */ "sliding_opt ::=",
+ /* 469 */ "sliding_opt ::= SLIDING NK_LP duration_literal NK_RP",
+ /* 470 */ "fill_opt ::=",
+ /* 471 */ "fill_opt ::= FILL NK_LP fill_mode NK_RP",
+ /* 472 */ "fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP",
+ /* 473 */ "fill_mode ::= NONE",
+ /* 474 */ "fill_mode ::= PREV",
+ /* 475 */ "fill_mode ::= NULL",
+ /* 476 */ "fill_mode ::= LINEAR",
+ /* 477 */ "fill_mode ::= NEXT",
+ /* 478 */ "group_by_clause_opt ::=",
+ /* 479 */ "group_by_clause_opt ::= GROUP BY group_by_list",
+ /* 480 */ "group_by_list ::= expr_or_subquery",
+ /* 481 */ "group_by_list ::= group_by_list NK_COMMA expr_or_subquery",
+ /* 482 */ "having_clause_opt ::=",
+ /* 483 */ "having_clause_opt ::= HAVING search_condition",
+ /* 484 */ "range_opt ::=",
+ /* 485 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP",
+ /* 486 */ "every_opt ::=",
+ /* 487 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP",
+ /* 488 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt",
+ /* 489 */ "query_simple ::= query_specification",
+ /* 490 */ "query_simple ::= union_query_expression",
+ /* 491 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery",
+ /* 492 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery",
+ /* 493 */ "query_simple_or_subquery ::= query_simple",
+ /* 494 */ "query_simple_or_subquery ::= subquery",
+ /* 495 */ "query_or_subquery ::= query_expression",
+ /* 496 */ "query_or_subquery ::= subquery",
+ /* 497 */ "order_by_clause_opt ::=",
+ /* 498 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
+ /* 499 */ "slimit_clause_opt ::=",
+ /* 500 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
+ /* 501 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
+ /* 502 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 503 */ "limit_clause_opt ::=",
+ /* 504 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
+ /* 505 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
+ /* 506 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 507 */ "subquery ::= NK_LP query_expression NK_RP",
+ /* 508 */ "subquery ::= NK_LP subquery NK_RP",
+ /* 509 */ "search_condition ::= common_expression",
+ /* 510 */ "sort_specification_list ::= sort_specification",
+ /* 511 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
+ /* 512 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt",
+ /* 513 */ "ordering_specification_opt ::=",
+ /* 514 */ "ordering_specification_opt ::= ASC",
+ /* 515 */ "ordering_specification_opt ::= DESC",
+ /* 516 */ "null_ordering_opt ::=",
+ /* 517 */ "null_ordering_opt ::= NULLS FIRST",
+ /* 518 */ "null_ordering_opt ::= NULLS LAST",
};
#endif /* NDEBUG */
@@ -2319,179 +2530,190 @@ static void yy_destructor(
*/
/********* Begin destructor definitions ***************************************/
/* Default NON-TERMINAL Destructor */
- case 305: /* cmd */
- case 308: /* literal */
- case 319: /* db_options */
- case 321: /* alter_db_options */
- case 326: /* retention */
- case 327: /* full_table_name */
- case 330: /* table_options */
- case 334: /* alter_table_clause */
- case 335: /* alter_table_options */
- case 338: /* signed_literal */
- case 339: /* create_subtable_clause */
- case 342: /* drop_table_clause */
- case 345: /* column_def */
- case 349: /* duration_literal */
- case 350: /* rollup_func_name */
- case 352: /* col_name */
- case 353: /* db_name_cond_opt */
- case 354: /* like_pattern_opt */
- case 355: /* table_name_cond */
- case 356: /* from_db_opt */
- case 357: /* index_options */
- case 359: /* sliding_opt */
- case 360: /* sma_stream_opt */
- case 361: /* func */
- case 362: /* stream_options */
- case 364: /* query_expression */
- case 367: /* explain_options */
- case 372: /* where_clause_opt */
- case 373: /* signed */
- case 374: /* literal_func */
- case 378: /* expression */
- case 379: /* pseudo_column */
- case 380: /* column_reference */
- case 381: /* function_expression */
- case 382: /* subquery */
- case 387: /* star_func_para */
- case 388: /* predicate */
- case 391: /* in_predicate_value */
- case 392: /* boolean_value_expression */
- case 393: /* boolean_primary */
- case 394: /* common_expression */
- case 395: /* from_clause_opt */
- case 396: /* table_reference_list */
- case 397: /* table_reference */
- case 398: /* table_primary */
- case 399: /* joined_table */
- case 401: /* parenthesized_joined_table */
- case 403: /* search_condition */
- case 404: /* query_specification */
- case 408: /* range_opt */
- case 409: /* every_opt */
- case 410: /* fill_opt */
- case 411: /* twindow_clause_opt */
- case 413: /* having_clause_opt */
- case 414: /* select_item */
- case 417: /* query_expression_body */
- case 419: /* slimit_clause_opt */
- case 420: /* limit_clause_opt */
- case 421: /* query_primary */
- case 423: /* sort_specification */
+ case 316: /* cmd */
+ case 319: /* literal */
+ case 330: /* db_options */
+ case 332: /* alter_db_options */
+ case 338: /* retention */
+ case 339: /* full_table_name */
+ case 342: /* table_options */
+ case 346: /* alter_table_clause */
+ case 347: /* alter_table_options */
+ case 350: /* signed_literal */
+ case 351: /* create_subtable_clause */
+ case 354: /* drop_table_clause */
+ case 357: /* column_def */
+ case 361: /* duration_literal */
+ case 362: /* rollup_func_name */
+ case 364: /* col_name */
+ case 365: /* db_name_cond_opt */
+ case 366: /* like_pattern_opt */
+ case 367: /* table_name_cond */
+ case 368: /* from_db_opt */
+ case 369: /* index_options */
+ case 371: /* sliding_opt */
+ case 372: /* sma_stream_opt */
+ case 373: /* func */
+ case 374: /* stream_options */
+ case 376: /* query_or_subquery */
+ case 379: /* explain_options */
+ case 383: /* subtable_opt */
+ case 384: /* expression */
+ case 386: /* where_clause_opt */
+ case 387: /* signed */
+ case 388: /* literal_func */
+ case 392: /* expr_or_subquery */
+ case 393: /* subquery */
+ case 394: /* pseudo_column */
+ case 395: /* column_reference */
+ case 396: /* function_expression */
+ case 397: /* case_when_expression */
+ case 402: /* star_func_para */
+ case 404: /* case_when_else_opt */
+ case 405: /* common_expression */
+ case 406: /* when_then_expr */
+ case 407: /* predicate */
+ case 410: /* in_predicate_value */
+ case 411: /* boolean_value_expression */
+ case 412: /* boolean_primary */
+ case 413: /* from_clause_opt */
+ case 414: /* table_reference_list */
+ case 415: /* table_reference */
+ case 416: /* table_primary */
+ case 417: /* joined_table */
+ case 419: /* parenthesized_joined_table */
+ case 421: /* search_condition */
+ case 422: /* query_specification */
+ case 426: /* range_opt */
+ case 427: /* every_opt */
+ case 428: /* fill_opt */
+ case 429: /* twindow_clause_opt */
+ case 431: /* having_clause_opt */
+ case 432: /* select_item */
+ case 434: /* partition_item */
+ case 437: /* query_expression */
+ case 438: /* query_simple */
+ case 440: /* slimit_clause_opt */
+ case 441: /* limit_clause_opt */
+ case 442: /* union_query_expression */
+ case 443: /* query_simple_or_subquery */
+ case 445: /* sort_specification */
{
- nodesDestroyNode((yypminor->yy272));
+ nodesDestroyNode((yypminor->yy616));
}
break;
- case 306: /* account_options */
- case 307: /* alter_account_options */
- case 309: /* alter_account_option */
- case 369: /* bufsize_opt */
+ case 317: /* account_options */
+ case 318: /* alter_account_options */
+ case 320: /* alter_account_option */
+ case 333: /* speed_opt */
+ case 381: /* bufsize_opt */
{
}
break;
- case 310: /* user_name */
- case 313: /* priv_level */
- case 316: /* db_name */
- case 317: /* dnode_endpoint */
- case 336: /* column_name */
- case 344: /* table_name */
- case 351: /* function_name */
- case 363: /* topic_name */
- case 365: /* cgroup_name */
- case 370: /* stream_name */
- case 376: /* table_alias */
- case 377: /* column_alias */
- case 383: /* star_func */
- case 385: /* noarg_func */
- case 400: /* alias_opt */
+ case 321: /* user_name */
+ case 324: /* priv_level */
+ case 327: /* db_name */
+ case 328: /* dnode_endpoint */
+ case 348: /* column_name */
+ case 356: /* table_name */
+ case 363: /* function_name */
+ case 375: /* topic_name */
+ case 377: /* cgroup_name */
+ case 382: /* stream_name */
+ case 390: /* table_alias */
+ case 391: /* column_alias */
+ case 398: /* star_func */
+ case 400: /* noarg_func */
+ case 418: /* alias_opt */
{
}
break;
- case 311: /* sysinfo_opt */
+ case 322: /* sysinfo_opt */
{
}
break;
- case 312: /* privileges */
- case 314: /* priv_type_list */
- case 315: /* priv_type */
+ case 323: /* privileges */
+ case 325: /* priv_type_list */
+ case 326: /* priv_type */
{
}
break;
- case 318: /* not_exists_opt */
- case 320: /* exists_opt */
- case 366: /* analyze_opt */
- case 368: /* agg_func_opt */
- case 405: /* set_quantifier_opt */
+ case 329: /* not_exists_opt */
+ case 331: /* exists_opt */
+ case 378: /* analyze_opt */
+ case 380: /* agg_func_opt */
+ case 423: /* set_quantifier_opt */
{
}
break;
- case 322: /* integer_list */
- case 323: /* variable_list */
- case 324: /* retention_list */
- case 328: /* column_def_list */
- case 329: /* tags_def_opt */
- case 331: /* multi_create_clause */
- case 332: /* tags_def */
- case 333: /* multi_drop_clause */
- case 340: /* specific_cols_opt */
- case 341: /* expression_list */
- case 343: /* col_name_list */
- case 346: /* duration_list */
- case 347: /* rollup_func_list */
- case 358: /* func_list */
- case 371: /* dnode_list */
- case 375: /* literal_list */
- case 384: /* star_func_para_list */
- case 386: /* other_para_list */
- case 406: /* select_list */
- case 407: /* partition_by_clause_opt */
- case 412: /* group_by_clause_opt */
- case 416: /* group_by_list */
- case 418: /* order_by_clause_opt */
- case 422: /* sort_specification_list */
+ case 334: /* integer_list */
+ case 335: /* variable_list */
+ case 336: /* retention_list */
+ case 340: /* column_def_list */
+ case 341: /* tags_def_opt */
+ case 343: /* multi_create_clause */
+ case 344: /* tags_def */
+ case 345: /* multi_drop_clause */
+ case 352: /* specific_cols_opt */
+ case 353: /* expression_list */
+ case 355: /* col_name_list */
+ case 358: /* duration_list */
+ case 359: /* rollup_func_list */
+ case 370: /* func_list */
+ case 385: /* dnode_list */
+ case 389: /* literal_list */
+ case 399: /* star_func_para_list */
+ case 401: /* other_para_list */
+ case 403: /* when_then_list */
+ case 424: /* select_list */
+ case 425: /* partition_by_clause_opt */
+ case 430: /* group_by_clause_opt */
+ case 433: /* partition_list */
+ case 436: /* group_by_list */
+ case 439: /* order_by_clause_opt */
+ case 444: /* sort_specification_list */
{
- nodesDestroyList((yypminor->yy172));
+ nodesDestroyList((yypminor->yy152));
}
break;
- case 325: /* alter_db_option */
- case 348: /* alter_table_option */
+ case 337: /* alter_db_option */
+ case 360: /* alter_table_option */
{
}
break;
- case 337: /* type_name */
+ case 349: /* type_name */
{
}
break;
- case 389: /* compare_op */
- case 390: /* in_op */
+ case 408: /* compare_op */
+ case 409: /* in_op */
{
}
break;
- case 402: /* join_type */
+ case 420: /* join_type */
{
}
break;
- case 415: /* fill_mode */
+ case 435: /* fill_mode */
{
}
break;
- case 424: /* ordering_specification_opt */
+ case 446: /* ordering_specification_opt */
{
}
break;
- case 425: /* null_ordering_opt */
+ case 447: /* null_ordering_opt */
{
}
@@ -2790,495 +3012,525 @@ static const struct {
YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */
signed char nrhs; /* Negative of the number of RHS symbols in the rule */
} yyRuleInfo[] = {
- { 305, -6 }, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */
- { 305, -4 }, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */
- { 306, 0 }, /* (2) account_options ::= */
- { 306, -3 }, /* (3) account_options ::= account_options PPS literal */
- { 306, -3 }, /* (4) account_options ::= account_options TSERIES literal */
- { 306, -3 }, /* (5) account_options ::= account_options STORAGE literal */
- { 306, -3 }, /* (6) account_options ::= account_options STREAMS literal */
- { 306, -3 }, /* (7) account_options ::= account_options QTIME literal */
- { 306, -3 }, /* (8) account_options ::= account_options DBS literal */
- { 306, -3 }, /* (9) account_options ::= account_options USERS literal */
- { 306, -3 }, /* (10) account_options ::= account_options CONNS literal */
- { 306, -3 }, /* (11) account_options ::= account_options STATE literal */
- { 307, -1 }, /* (12) alter_account_options ::= alter_account_option */
- { 307, -2 }, /* (13) alter_account_options ::= alter_account_options alter_account_option */
- { 309, -2 }, /* (14) alter_account_option ::= PASS literal */
- { 309, -2 }, /* (15) alter_account_option ::= PPS literal */
- { 309, -2 }, /* (16) alter_account_option ::= TSERIES literal */
- { 309, -2 }, /* (17) alter_account_option ::= STORAGE literal */
- { 309, -2 }, /* (18) alter_account_option ::= STREAMS literal */
- { 309, -2 }, /* (19) alter_account_option ::= QTIME literal */
- { 309, -2 }, /* (20) alter_account_option ::= DBS literal */
- { 309, -2 }, /* (21) alter_account_option ::= USERS literal */
- { 309, -2 }, /* (22) alter_account_option ::= CONNS literal */
- { 309, -2 }, /* (23) alter_account_option ::= STATE literal */
- { 305, -6 }, /* (24) cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */
- { 305, -5 }, /* (25) cmd ::= ALTER USER user_name PASS NK_STRING */
- { 305, -5 }, /* (26) cmd ::= ALTER USER user_name ENABLE NK_INTEGER */
- { 305, -5 }, /* (27) cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */
- { 305, -3 }, /* (28) cmd ::= DROP USER user_name */
- { 311, 0 }, /* (29) sysinfo_opt ::= */
- { 311, -2 }, /* (30) sysinfo_opt ::= SYSINFO NK_INTEGER */
- { 305, -6 }, /* (31) cmd ::= GRANT privileges ON priv_level TO user_name */
- { 305, -6 }, /* (32) cmd ::= REVOKE privileges ON priv_level FROM user_name */
- { 312, -1 }, /* (33) privileges ::= ALL */
- { 312, -1 }, /* (34) privileges ::= priv_type_list */
- { 314, -1 }, /* (35) priv_type_list ::= priv_type */
- { 314, -3 }, /* (36) priv_type_list ::= priv_type_list NK_COMMA priv_type */
- { 315, -1 }, /* (37) priv_type ::= READ */
- { 315, -1 }, /* (38) priv_type ::= WRITE */
- { 313, -3 }, /* (39) priv_level ::= NK_STAR NK_DOT NK_STAR */
- { 313, -3 }, /* (40) priv_level ::= db_name NK_DOT NK_STAR */
- { 305, -3 }, /* (41) cmd ::= CREATE DNODE dnode_endpoint */
- { 305, -5 }, /* (42) cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */
- { 305, -3 }, /* (43) cmd ::= DROP DNODE NK_INTEGER */
- { 305, -3 }, /* (44) cmd ::= DROP DNODE dnode_endpoint */
- { 305, -4 }, /* (45) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */
- { 305, -5 }, /* (46) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */
- { 305, -4 }, /* (47) cmd ::= ALTER ALL DNODES NK_STRING */
- { 305, -5 }, /* (48) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */
- { 317, -1 }, /* (49) dnode_endpoint ::= NK_STRING */
- { 317, -1 }, /* (50) dnode_endpoint ::= NK_ID */
- { 317, -1 }, /* (51) dnode_endpoint ::= NK_IPTOKEN */
- { 305, -3 }, /* (52) cmd ::= ALTER LOCAL NK_STRING */
- { 305, -4 }, /* (53) cmd ::= ALTER LOCAL NK_STRING NK_STRING */
- { 305, -5 }, /* (54) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (55) cmd ::= DROP QNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (56) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (57) cmd ::= DROP BNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (58) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (59) cmd ::= DROP SNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (60) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (61) cmd ::= DROP MNODE ON DNODE NK_INTEGER */
- { 305, -5 }, /* (62) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */
- { 305, -4 }, /* (63) cmd ::= DROP DATABASE exists_opt db_name */
- { 305, -2 }, /* (64) cmd ::= USE db_name */
- { 305, -4 }, /* (65) cmd ::= ALTER DATABASE db_name alter_db_options */
- { 305, -3 }, /* (66) cmd ::= FLUSH DATABASE db_name */
- { 305, -3 }, /* (67) cmd ::= TRIM DATABASE db_name */
- { 318, -3 }, /* (68) not_exists_opt ::= IF NOT EXISTS */
- { 318, 0 }, /* (69) not_exists_opt ::= */
- { 320, -2 }, /* (70) exists_opt ::= IF EXISTS */
- { 320, 0 }, /* (71) exists_opt ::= */
- { 319, 0 }, /* (72) db_options ::= */
- { 319, -3 }, /* (73) db_options ::= db_options BUFFER NK_INTEGER */
- { 319, -3 }, /* (74) db_options ::= db_options CACHEMODEL NK_STRING */
- { 319, -3 }, /* (75) db_options ::= db_options CACHESIZE NK_INTEGER */
- { 319, -3 }, /* (76) db_options ::= db_options COMP NK_INTEGER */
- { 319, -3 }, /* (77) db_options ::= db_options DURATION NK_INTEGER */
- { 319, -3 }, /* (78) db_options ::= db_options DURATION NK_VARIABLE */
- { 319, -3 }, /* (79) db_options ::= db_options MAXROWS NK_INTEGER */
- { 319, -3 }, /* (80) db_options ::= db_options MINROWS NK_INTEGER */
- { 319, -3 }, /* (81) db_options ::= db_options KEEP integer_list */
- { 319, -3 }, /* (82) db_options ::= db_options KEEP variable_list */
- { 319, -3 }, /* (83) db_options ::= db_options PAGES NK_INTEGER */
- { 319, -3 }, /* (84) db_options ::= db_options PAGESIZE NK_INTEGER */
- { 319, -3 }, /* (85) db_options ::= db_options PRECISION NK_STRING */
- { 319, -3 }, /* (86) db_options ::= db_options REPLICA NK_INTEGER */
- { 319, -3 }, /* (87) db_options ::= db_options STRICT NK_STRING */
- { 319, -3 }, /* (88) db_options ::= db_options VGROUPS NK_INTEGER */
- { 319, -3 }, /* (89) db_options ::= db_options SINGLE_STABLE NK_INTEGER */
- { 319, -3 }, /* (90) db_options ::= db_options RETENTIONS retention_list */
- { 319, -3 }, /* (91) db_options ::= db_options SCHEMALESS NK_INTEGER */
- { 319, -3 }, /* (92) db_options ::= db_options WAL_LEVEL NK_INTEGER */
- { 319, -3 }, /* (93) db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
- { 319, -3 }, /* (94) db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
- { 319, -4 }, /* (95) db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
- { 319, -3 }, /* (96) db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
- { 319, -4 }, /* (97) db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
- { 319, -3 }, /* (98) db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
- { 319, -3 }, /* (99) db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
- { 321, -1 }, /* (100) alter_db_options ::= alter_db_option */
- { 321, -2 }, /* (101) alter_db_options ::= alter_db_options alter_db_option */
- { 325, -2 }, /* (102) alter_db_option ::= CACHEMODEL NK_STRING */
- { 325, -2 }, /* (103) alter_db_option ::= CACHESIZE NK_INTEGER */
- { 325, -2 }, /* (104) alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
- { 325, -2 }, /* (105) alter_db_option ::= KEEP integer_list */
- { 325, -2 }, /* (106) alter_db_option ::= KEEP variable_list */
- { 325, -2 }, /* (107) alter_db_option ::= WAL_LEVEL NK_INTEGER */
- { 322, -1 }, /* (108) integer_list ::= NK_INTEGER */
- { 322, -3 }, /* (109) integer_list ::= integer_list NK_COMMA NK_INTEGER */
- { 323, -1 }, /* (110) variable_list ::= NK_VARIABLE */
- { 323, -3 }, /* (111) variable_list ::= variable_list NK_COMMA NK_VARIABLE */
- { 324, -1 }, /* (112) retention_list ::= retention */
- { 324, -3 }, /* (113) retention_list ::= retention_list NK_COMMA retention */
- { 326, -3 }, /* (114) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
- { 305, -9 }, /* (115) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
- { 305, -3 }, /* (116) cmd ::= CREATE TABLE multi_create_clause */
- { 305, -9 }, /* (117) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */
- { 305, -3 }, /* (118) cmd ::= DROP TABLE multi_drop_clause */
- { 305, -4 }, /* (119) cmd ::= DROP STABLE exists_opt full_table_name */
- { 305, -3 }, /* (120) cmd ::= ALTER TABLE alter_table_clause */
- { 305, -3 }, /* (121) cmd ::= ALTER STABLE alter_table_clause */
- { 334, -2 }, /* (122) alter_table_clause ::= full_table_name alter_table_options */
- { 334, -5 }, /* (123) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
- { 334, -4 }, /* (124) alter_table_clause ::= full_table_name DROP COLUMN column_name */
- { 334, -5 }, /* (125) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
- { 334, -5 }, /* (126) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
- { 334, -5 }, /* (127) alter_table_clause ::= full_table_name ADD TAG column_name type_name */
- { 334, -4 }, /* (128) alter_table_clause ::= full_table_name DROP TAG column_name */
- { 334, -5 }, /* (129) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
- { 334, -5 }, /* (130) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
- { 334, -6 }, /* (131) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
- { 331, -1 }, /* (132) multi_create_clause ::= create_subtable_clause */
- { 331, -2 }, /* (133) multi_create_clause ::= multi_create_clause create_subtable_clause */
- { 339, -10 }, /* (134) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
- { 333, -1 }, /* (135) multi_drop_clause ::= drop_table_clause */
- { 333, -2 }, /* (136) multi_drop_clause ::= multi_drop_clause drop_table_clause */
- { 342, -2 }, /* (137) drop_table_clause ::= exists_opt full_table_name */
- { 340, 0 }, /* (138) specific_cols_opt ::= */
- { 340, -3 }, /* (139) specific_cols_opt ::= NK_LP col_name_list NK_RP */
- { 327, -1 }, /* (140) full_table_name ::= table_name */
- { 327, -3 }, /* (141) full_table_name ::= db_name NK_DOT table_name */
- { 328, -1 }, /* (142) column_def_list ::= column_def */
- { 328, -3 }, /* (143) column_def_list ::= column_def_list NK_COMMA column_def */
- { 345, -2 }, /* (144) column_def ::= column_name type_name */
- { 345, -4 }, /* (145) column_def ::= column_name type_name COMMENT NK_STRING */
- { 337, -1 }, /* (146) type_name ::= BOOL */
- { 337, -1 }, /* (147) type_name ::= TINYINT */
- { 337, -1 }, /* (148) type_name ::= SMALLINT */
- { 337, -1 }, /* (149) type_name ::= INT */
- { 337, -1 }, /* (150) type_name ::= INTEGER */
- { 337, -1 }, /* (151) type_name ::= BIGINT */
- { 337, -1 }, /* (152) type_name ::= FLOAT */
- { 337, -1 }, /* (153) type_name ::= DOUBLE */
- { 337, -4 }, /* (154) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
- { 337, -1 }, /* (155) type_name ::= TIMESTAMP */
- { 337, -4 }, /* (156) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
- { 337, -2 }, /* (157) type_name ::= TINYINT UNSIGNED */
- { 337, -2 }, /* (158) type_name ::= SMALLINT UNSIGNED */
- { 337, -2 }, /* (159) type_name ::= INT UNSIGNED */
- { 337, -2 }, /* (160) type_name ::= BIGINT UNSIGNED */
- { 337, -1 }, /* (161) type_name ::= JSON */
- { 337, -4 }, /* (162) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
- { 337, -1 }, /* (163) type_name ::= MEDIUMBLOB */
- { 337, -1 }, /* (164) type_name ::= BLOB */
- { 337, -4 }, /* (165) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
- { 337, -1 }, /* (166) type_name ::= DECIMAL */
- { 337, -4 }, /* (167) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
- { 337, -6 }, /* (168) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
- { 329, 0 }, /* (169) tags_def_opt ::= */
- { 329, -1 }, /* (170) tags_def_opt ::= tags_def */
- { 332, -4 }, /* (171) tags_def ::= TAGS NK_LP column_def_list NK_RP */
- { 330, 0 }, /* (172) table_options ::= */
- { 330, -3 }, /* (173) table_options ::= table_options COMMENT NK_STRING */
- { 330, -3 }, /* (174) table_options ::= table_options MAX_DELAY duration_list */
- { 330, -3 }, /* (175) table_options ::= table_options WATERMARK duration_list */
- { 330, -5 }, /* (176) table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
- { 330, -3 }, /* (177) table_options ::= table_options TTL NK_INTEGER */
- { 330, -5 }, /* (178) table_options ::= table_options SMA NK_LP col_name_list NK_RP */
- { 335, -1 }, /* (179) alter_table_options ::= alter_table_option */
- { 335, -2 }, /* (180) alter_table_options ::= alter_table_options alter_table_option */
- { 348, -2 }, /* (181) alter_table_option ::= COMMENT NK_STRING */
- { 348, -2 }, /* (182) alter_table_option ::= TTL NK_INTEGER */
- { 346, -1 }, /* (183) duration_list ::= duration_literal */
- { 346, -3 }, /* (184) duration_list ::= duration_list NK_COMMA duration_literal */
- { 347, -1 }, /* (185) rollup_func_list ::= rollup_func_name */
- { 347, -3 }, /* (186) rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */
- { 350, -1 }, /* (187) rollup_func_name ::= function_name */
- { 350, -1 }, /* (188) rollup_func_name ::= FIRST */
- { 350, -1 }, /* (189) rollup_func_name ::= LAST */
- { 343, -1 }, /* (190) col_name_list ::= col_name */
- { 343, -3 }, /* (191) col_name_list ::= col_name_list NK_COMMA col_name */
- { 352, -1 }, /* (192) col_name ::= column_name */
- { 305, -2 }, /* (193) cmd ::= SHOW DNODES */
- { 305, -2 }, /* (194) cmd ::= SHOW USERS */
- { 305, -2 }, /* (195) cmd ::= SHOW DATABASES */
- { 305, -4 }, /* (196) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
- { 305, -4 }, /* (197) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
- { 305, -3 }, /* (198) cmd ::= SHOW db_name_cond_opt VGROUPS */
- { 305, -2 }, /* (199) cmd ::= SHOW MNODES */
- { 305, -2 }, /* (200) cmd ::= SHOW MODULES */
- { 305, -2 }, /* (201) cmd ::= SHOW QNODES */
- { 305, -2 }, /* (202) cmd ::= SHOW FUNCTIONS */
- { 305, -5 }, /* (203) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
- { 305, -2 }, /* (204) cmd ::= SHOW STREAMS */
- { 305, -2 }, /* (205) cmd ::= SHOW ACCOUNTS */
- { 305, -2 }, /* (206) cmd ::= SHOW APPS */
- { 305, -2 }, /* (207) cmd ::= SHOW CONNECTIONS */
- { 305, -2 }, /* (208) cmd ::= SHOW LICENCES */
- { 305, -2 }, /* (209) cmd ::= SHOW GRANTS */
- { 305, -4 }, /* (210) cmd ::= SHOW CREATE DATABASE db_name */
- { 305, -4 }, /* (211) cmd ::= SHOW CREATE TABLE full_table_name */
- { 305, -4 }, /* (212) cmd ::= SHOW CREATE STABLE full_table_name */
- { 305, -2 }, /* (213) cmd ::= SHOW QUERIES */
- { 305, -2 }, /* (214) cmd ::= SHOW SCORES */
- { 305, -2 }, /* (215) cmd ::= SHOW TOPICS */
- { 305, -2 }, /* (216) cmd ::= SHOW VARIABLES */
- { 305, -3 }, /* (217) cmd ::= SHOW LOCAL VARIABLES */
- { 305, -4 }, /* (218) cmd ::= SHOW DNODE NK_INTEGER VARIABLES */
- { 305, -2 }, /* (219) cmd ::= SHOW BNODES */
- { 305, -2 }, /* (220) cmd ::= SHOW SNODES */
- { 305, -2 }, /* (221) cmd ::= SHOW CLUSTER */
- { 305, -2 }, /* (222) cmd ::= SHOW TRANSACTIONS */
- { 305, -4 }, /* (223) cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
- { 305, -2 }, /* (224) cmd ::= SHOW CONSUMERS */
- { 305, -2 }, /* (225) cmd ::= SHOW SUBSCRIPTIONS */
- { 305, -5 }, /* (226) cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
- { 353, 0 }, /* (227) db_name_cond_opt ::= */
- { 353, -2 }, /* (228) db_name_cond_opt ::= db_name NK_DOT */
- { 354, 0 }, /* (229) like_pattern_opt ::= */
- { 354, -2 }, /* (230) like_pattern_opt ::= LIKE NK_STRING */
- { 355, -1 }, /* (231) table_name_cond ::= table_name */
- { 356, 0 }, /* (232) from_db_opt ::= */
- { 356, -2 }, /* (233) from_db_opt ::= FROM db_name */
- { 305, -8 }, /* (234) cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
- { 305, -4 }, /* (235) cmd ::= DROP INDEX exists_opt full_table_name */
- { 357, -10 }, /* (236) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
- { 357, -12 }, /* (237) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
- { 358, -1 }, /* (238) func_list ::= func */
- { 358, -3 }, /* (239) func_list ::= func_list NK_COMMA func */
- { 361, -4 }, /* (240) func ::= function_name NK_LP expression_list NK_RP */
- { 360, 0 }, /* (241) sma_stream_opt ::= */
- { 360, -3 }, /* (242) sma_stream_opt ::= stream_options WATERMARK duration_literal */
- { 360, -3 }, /* (243) sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
- { 305, -6 }, /* (244) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */
- { 305, -7 }, /* (245) cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
- { 305, -9 }, /* (246) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
- { 305, -7 }, /* (247) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
- { 305, -9 }, /* (248) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
- { 305, -4 }, /* (249) cmd ::= DROP TOPIC exists_opt topic_name */
- { 305, -7 }, /* (250) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
- { 305, -2 }, /* (251) cmd ::= DESC full_table_name */
- { 305, -2 }, /* (252) cmd ::= DESCRIBE full_table_name */
- { 305, -3 }, /* (253) cmd ::= RESET QUERY CACHE */
- { 305, -4 }, /* (254) cmd ::= EXPLAIN analyze_opt explain_options query_expression */
- { 366, 0 }, /* (255) analyze_opt ::= */
- { 366, -1 }, /* (256) analyze_opt ::= ANALYZE */
- { 367, 0 }, /* (257) explain_options ::= */
- { 367, -3 }, /* (258) explain_options ::= explain_options VERBOSE NK_BOOL */
- { 367, -3 }, /* (259) explain_options ::= explain_options RATIO NK_FLOAT */
- { 305, -10 }, /* (260) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
- { 305, -4 }, /* (261) cmd ::= DROP FUNCTION exists_opt function_name */
- { 368, 0 }, /* (262) agg_func_opt ::= */
- { 368, -1 }, /* (263) agg_func_opt ::= AGGREGATE */
- { 369, 0 }, /* (264) bufsize_opt ::= */
- { 369, -2 }, /* (265) bufsize_opt ::= BUFSIZE NK_INTEGER */
- { 305, -9 }, /* (266) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression */
- { 305, -4 }, /* (267) cmd ::= DROP STREAM exists_opt stream_name */
- { 362, 0 }, /* (268) stream_options ::= */
- { 362, -3 }, /* (269) stream_options ::= stream_options TRIGGER AT_ONCE */
- { 362, -3 }, /* (270) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
- { 362, -4 }, /* (271) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
- { 362, -3 }, /* (272) stream_options ::= stream_options WATERMARK duration_literal */
- { 362, -4 }, /* (273) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
- { 305, -3 }, /* (274) cmd ::= KILL CONNECTION NK_INTEGER */
- { 305, -3 }, /* (275) cmd ::= KILL QUERY NK_STRING */
- { 305, -3 }, /* (276) cmd ::= KILL TRANSACTION NK_INTEGER */
- { 305, -2 }, /* (277) cmd ::= BALANCE VGROUP */
- { 305, -4 }, /* (278) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
- { 305, -4 }, /* (279) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
- { 305, -3 }, /* (280) cmd ::= SPLIT VGROUP NK_INTEGER */
- { 371, -2 }, /* (281) dnode_list ::= DNODE NK_INTEGER */
- { 371, -3 }, /* (282) dnode_list ::= dnode_list DNODE NK_INTEGER */
- { 305, -4 }, /* (283) cmd ::= DELETE FROM full_table_name where_clause_opt */
- { 305, -1 }, /* (284) cmd ::= query_expression */
- { 305, -7 }, /* (285) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
- { 305, -4 }, /* (286) cmd ::= INSERT INTO full_table_name query_expression */
- { 308, -1 }, /* (287) literal ::= NK_INTEGER */
- { 308, -1 }, /* (288) literal ::= NK_FLOAT */
- { 308, -1 }, /* (289) literal ::= NK_STRING */
- { 308, -1 }, /* (290) literal ::= NK_BOOL */
- { 308, -2 }, /* (291) literal ::= TIMESTAMP NK_STRING */
- { 308, -1 }, /* (292) literal ::= duration_literal */
- { 308, -1 }, /* (293) literal ::= NULL */
- { 308, -1 }, /* (294) literal ::= NK_QUESTION */
- { 349, -1 }, /* (295) duration_literal ::= NK_VARIABLE */
- { 373, -1 }, /* (296) signed ::= NK_INTEGER */
- { 373, -2 }, /* (297) signed ::= NK_PLUS NK_INTEGER */
- { 373, -2 }, /* (298) signed ::= NK_MINUS NK_INTEGER */
- { 373, -1 }, /* (299) signed ::= NK_FLOAT */
- { 373, -2 }, /* (300) signed ::= NK_PLUS NK_FLOAT */
- { 373, -2 }, /* (301) signed ::= NK_MINUS NK_FLOAT */
- { 338, -1 }, /* (302) signed_literal ::= signed */
- { 338, -1 }, /* (303) signed_literal ::= NK_STRING */
- { 338, -1 }, /* (304) signed_literal ::= NK_BOOL */
- { 338, -2 }, /* (305) signed_literal ::= TIMESTAMP NK_STRING */
- { 338, -1 }, /* (306) signed_literal ::= duration_literal */
- { 338, -1 }, /* (307) signed_literal ::= NULL */
- { 338, -1 }, /* (308) signed_literal ::= literal_func */
- { 338, -1 }, /* (309) signed_literal ::= NK_QUESTION */
- { 375, -1 }, /* (310) literal_list ::= signed_literal */
- { 375, -3 }, /* (311) literal_list ::= literal_list NK_COMMA signed_literal */
- { 316, -1 }, /* (312) db_name ::= NK_ID */
- { 344, -1 }, /* (313) table_name ::= NK_ID */
- { 336, -1 }, /* (314) column_name ::= NK_ID */
- { 351, -1 }, /* (315) function_name ::= NK_ID */
- { 376, -1 }, /* (316) table_alias ::= NK_ID */
- { 377, -1 }, /* (317) column_alias ::= NK_ID */
- { 310, -1 }, /* (318) user_name ::= NK_ID */
- { 363, -1 }, /* (319) topic_name ::= NK_ID */
- { 370, -1 }, /* (320) stream_name ::= NK_ID */
- { 365, -1 }, /* (321) cgroup_name ::= NK_ID */
- { 378, -1 }, /* (322) expression ::= literal */
- { 378, -1 }, /* (323) expression ::= pseudo_column */
- { 378, -1 }, /* (324) expression ::= column_reference */
- { 378, -1 }, /* (325) expression ::= function_expression */
- { 378, -1 }, /* (326) expression ::= subquery */
- { 378, -3 }, /* (327) expression ::= NK_LP expression NK_RP */
- { 378, -2 }, /* (328) expression ::= NK_PLUS expression */
- { 378, -2 }, /* (329) expression ::= NK_MINUS expression */
- { 378, -3 }, /* (330) expression ::= expression NK_PLUS expression */
- { 378, -3 }, /* (331) expression ::= expression NK_MINUS expression */
- { 378, -3 }, /* (332) expression ::= expression NK_STAR expression */
- { 378, -3 }, /* (333) expression ::= expression NK_SLASH expression */
- { 378, -3 }, /* (334) expression ::= expression NK_REM expression */
- { 378, -3 }, /* (335) expression ::= column_reference NK_ARROW NK_STRING */
- { 378, -3 }, /* (336) expression ::= expression NK_BITAND expression */
- { 378, -3 }, /* (337) expression ::= expression NK_BITOR expression */
- { 341, -1 }, /* (338) expression_list ::= expression */
- { 341, -3 }, /* (339) expression_list ::= expression_list NK_COMMA expression */
- { 380, -1 }, /* (340) column_reference ::= column_name */
- { 380, -3 }, /* (341) column_reference ::= table_name NK_DOT column_name */
- { 379, -1 }, /* (342) pseudo_column ::= ROWTS */
- { 379, -1 }, /* (343) pseudo_column ::= TBNAME */
- { 379, -3 }, /* (344) pseudo_column ::= table_name NK_DOT TBNAME */
- { 379, -1 }, /* (345) pseudo_column ::= QSTART */
- { 379, -1 }, /* (346) pseudo_column ::= QEND */
- { 379, -1 }, /* (347) pseudo_column ::= QDURATION */
- { 379, -1 }, /* (348) pseudo_column ::= WSTART */
- { 379, -1 }, /* (349) pseudo_column ::= WEND */
- { 379, -1 }, /* (350) pseudo_column ::= WDURATION */
- { 381, -4 }, /* (351) function_expression ::= function_name NK_LP expression_list NK_RP */
- { 381, -4 }, /* (352) function_expression ::= star_func NK_LP star_func_para_list NK_RP */
- { 381, -6 }, /* (353) function_expression ::= CAST NK_LP expression AS type_name NK_RP */
- { 381, -1 }, /* (354) function_expression ::= literal_func */
- { 374, -3 }, /* (355) literal_func ::= noarg_func NK_LP NK_RP */
- { 374, -1 }, /* (356) literal_func ::= NOW */
- { 385, -1 }, /* (357) noarg_func ::= NOW */
- { 385, -1 }, /* (358) noarg_func ::= TODAY */
- { 385, -1 }, /* (359) noarg_func ::= TIMEZONE */
- { 385, -1 }, /* (360) noarg_func ::= DATABASE */
- { 385, -1 }, /* (361) noarg_func ::= CLIENT_VERSION */
- { 385, -1 }, /* (362) noarg_func ::= SERVER_VERSION */
- { 385, -1 }, /* (363) noarg_func ::= SERVER_STATUS */
- { 385, -1 }, /* (364) noarg_func ::= CURRENT_USER */
- { 385, -1 }, /* (365) noarg_func ::= USER */
- { 383, -1 }, /* (366) star_func ::= COUNT */
- { 383, -1 }, /* (367) star_func ::= FIRST */
- { 383, -1 }, /* (368) star_func ::= LAST */
- { 383, -1 }, /* (369) star_func ::= LAST_ROW */
- { 384, -1 }, /* (370) star_func_para_list ::= NK_STAR */
- { 384, -1 }, /* (371) star_func_para_list ::= other_para_list */
- { 386, -1 }, /* (372) other_para_list ::= star_func_para */
- { 386, -3 }, /* (373) other_para_list ::= other_para_list NK_COMMA star_func_para */
- { 387, -1 }, /* (374) star_func_para ::= expression */
- { 387, -3 }, /* (375) star_func_para ::= table_name NK_DOT NK_STAR */
- { 388, -3 }, /* (376) predicate ::= expression compare_op expression */
- { 388, -5 }, /* (377) predicate ::= expression BETWEEN expression AND expression */
- { 388, -6 }, /* (378) predicate ::= expression NOT BETWEEN expression AND expression */
- { 388, -3 }, /* (379) predicate ::= expression IS NULL */
- { 388, -4 }, /* (380) predicate ::= expression IS NOT NULL */
- { 388, -3 }, /* (381) predicate ::= expression in_op in_predicate_value */
- { 389, -1 }, /* (382) compare_op ::= NK_LT */
- { 389, -1 }, /* (383) compare_op ::= NK_GT */
- { 389, -1 }, /* (384) compare_op ::= NK_LE */
- { 389, -1 }, /* (385) compare_op ::= NK_GE */
- { 389, -1 }, /* (386) compare_op ::= NK_NE */
- { 389, -1 }, /* (387) compare_op ::= NK_EQ */
- { 389, -1 }, /* (388) compare_op ::= LIKE */
- { 389, -2 }, /* (389) compare_op ::= NOT LIKE */
- { 389, -1 }, /* (390) compare_op ::= MATCH */
- { 389, -1 }, /* (391) compare_op ::= NMATCH */
- { 389, -1 }, /* (392) compare_op ::= CONTAINS */
- { 390, -1 }, /* (393) in_op ::= IN */
- { 390, -2 }, /* (394) in_op ::= NOT IN */
- { 391, -3 }, /* (395) in_predicate_value ::= NK_LP literal_list NK_RP */
- { 392, -1 }, /* (396) boolean_value_expression ::= boolean_primary */
- { 392, -2 }, /* (397) boolean_value_expression ::= NOT boolean_primary */
- { 392, -3 }, /* (398) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
- { 392, -3 }, /* (399) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
- { 393, -1 }, /* (400) boolean_primary ::= predicate */
- { 393, -3 }, /* (401) boolean_primary ::= NK_LP boolean_value_expression NK_RP */
- { 394, -1 }, /* (402) common_expression ::= expression */
- { 394, -1 }, /* (403) common_expression ::= boolean_value_expression */
- { 395, 0 }, /* (404) from_clause_opt ::= */
- { 395, -2 }, /* (405) from_clause_opt ::= FROM table_reference_list */
- { 396, -1 }, /* (406) table_reference_list ::= table_reference */
- { 396, -3 }, /* (407) table_reference_list ::= table_reference_list NK_COMMA table_reference */
- { 397, -1 }, /* (408) table_reference ::= table_primary */
- { 397, -1 }, /* (409) table_reference ::= joined_table */
- { 398, -2 }, /* (410) table_primary ::= table_name alias_opt */
- { 398, -4 }, /* (411) table_primary ::= db_name NK_DOT table_name alias_opt */
- { 398, -2 }, /* (412) table_primary ::= subquery alias_opt */
- { 398, -1 }, /* (413) table_primary ::= parenthesized_joined_table */
- { 400, 0 }, /* (414) alias_opt ::= */
- { 400, -1 }, /* (415) alias_opt ::= table_alias */
- { 400, -2 }, /* (416) alias_opt ::= AS table_alias */
- { 401, -3 }, /* (417) parenthesized_joined_table ::= NK_LP joined_table NK_RP */
- { 401, -3 }, /* (418) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */
- { 399, -6 }, /* (419) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
- { 402, 0 }, /* (420) join_type ::= */
- { 402, -1 }, /* (421) join_type ::= INNER */
- { 404, -12 }, /* (422) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
- { 405, 0 }, /* (423) set_quantifier_opt ::= */
- { 405, -1 }, /* (424) set_quantifier_opt ::= DISTINCT */
- { 405, -1 }, /* (425) set_quantifier_opt ::= ALL */
- { 406, -1 }, /* (426) select_list ::= select_item */
- { 406, -3 }, /* (427) select_list ::= select_list NK_COMMA select_item */
- { 414, -1 }, /* (428) select_item ::= NK_STAR */
- { 414, -1 }, /* (429) select_item ::= common_expression */
- { 414, -2 }, /* (430) select_item ::= common_expression column_alias */
- { 414, -3 }, /* (431) select_item ::= common_expression AS column_alias */
- { 414, -3 }, /* (432) select_item ::= table_name NK_DOT NK_STAR */
- { 372, 0 }, /* (433) where_clause_opt ::= */
- { 372, -2 }, /* (434) where_clause_opt ::= WHERE search_condition */
- { 407, 0 }, /* (435) partition_by_clause_opt ::= */
- { 407, -3 }, /* (436) partition_by_clause_opt ::= PARTITION BY expression_list */
- { 411, 0 }, /* (437) twindow_clause_opt ::= */
- { 411, -6 }, /* (438) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
- { 411, -4 }, /* (439) twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
- { 411, -6 }, /* (440) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
- { 411, -8 }, /* (441) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
- { 359, 0 }, /* (442) sliding_opt ::= */
- { 359, -4 }, /* (443) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
- { 410, 0 }, /* (444) fill_opt ::= */
- { 410, -4 }, /* (445) fill_opt ::= FILL NK_LP fill_mode NK_RP */
- { 410, -6 }, /* (446) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
- { 415, -1 }, /* (447) fill_mode ::= NONE */
- { 415, -1 }, /* (448) fill_mode ::= PREV */
- { 415, -1 }, /* (449) fill_mode ::= NULL */
- { 415, -1 }, /* (450) fill_mode ::= LINEAR */
- { 415, -1 }, /* (451) fill_mode ::= NEXT */
- { 412, 0 }, /* (452) group_by_clause_opt ::= */
- { 412, -3 }, /* (453) group_by_clause_opt ::= GROUP BY group_by_list */
- { 416, -1 }, /* (454) group_by_list ::= expression */
- { 416, -3 }, /* (455) group_by_list ::= group_by_list NK_COMMA expression */
- { 413, 0 }, /* (456) having_clause_opt ::= */
- { 413, -2 }, /* (457) having_clause_opt ::= HAVING search_condition */
- { 408, 0 }, /* (458) range_opt ::= */
- { 408, -6 }, /* (459) range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
- { 409, 0 }, /* (460) every_opt ::= */
- { 409, -4 }, /* (461) every_opt ::= EVERY NK_LP duration_literal NK_RP */
- { 364, -4 }, /* (462) query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
- { 417, -1 }, /* (463) query_expression_body ::= query_primary */
- { 417, -4 }, /* (464) query_expression_body ::= query_expression_body UNION ALL query_expression_body */
- { 417, -3 }, /* (465) query_expression_body ::= query_expression_body UNION query_expression_body */
- { 421, -1 }, /* (466) query_primary ::= query_specification */
- { 421, -6 }, /* (467) query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
- { 418, 0 }, /* (468) order_by_clause_opt ::= */
- { 418, -3 }, /* (469) order_by_clause_opt ::= ORDER BY sort_specification_list */
- { 419, 0 }, /* (470) slimit_clause_opt ::= */
- { 419, -2 }, /* (471) slimit_clause_opt ::= SLIMIT NK_INTEGER */
- { 419, -4 }, /* (472) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- { 419, -4 }, /* (473) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- { 420, 0 }, /* (474) limit_clause_opt ::= */
- { 420, -2 }, /* (475) limit_clause_opt ::= LIMIT NK_INTEGER */
- { 420, -4 }, /* (476) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
- { 420, -4 }, /* (477) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- { 382, -3 }, /* (478) subquery ::= NK_LP query_expression NK_RP */
- { 403, -1 }, /* (479) search_condition ::= common_expression */
- { 422, -1 }, /* (480) sort_specification_list ::= sort_specification */
- { 422, -3 }, /* (481) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
- { 423, -3 }, /* (482) sort_specification ::= expression ordering_specification_opt null_ordering_opt */
- { 424, 0 }, /* (483) ordering_specification_opt ::= */
- { 424, -1 }, /* (484) ordering_specification_opt ::= ASC */
- { 424, -1 }, /* (485) ordering_specification_opt ::= DESC */
- { 425, 0 }, /* (486) null_ordering_opt ::= */
- { 425, -2 }, /* (487) null_ordering_opt ::= NULLS FIRST */
- { 425, -2 }, /* (488) null_ordering_opt ::= NULLS LAST */
+ { 316, -6 }, /* (0) cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */
+ { 316, -4 }, /* (1) cmd ::= ALTER ACCOUNT NK_ID alter_account_options */
+ { 317, 0 }, /* (2) account_options ::= */
+ { 317, -3 }, /* (3) account_options ::= account_options PPS literal */
+ { 317, -3 }, /* (4) account_options ::= account_options TSERIES literal */
+ { 317, -3 }, /* (5) account_options ::= account_options STORAGE literal */
+ { 317, -3 }, /* (6) account_options ::= account_options STREAMS literal */
+ { 317, -3 }, /* (7) account_options ::= account_options QTIME literal */
+ { 317, -3 }, /* (8) account_options ::= account_options DBS literal */
+ { 317, -3 }, /* (9) account_options ::= account_options USERS literal */
+ { 317, -3 }, /* (10) account_options ::= account_options CONNS literal */
+ { 317, -3 }, /* (11) account_options ::= account_options STATE literal */
+ { 318, -1 }, /* (12) alter_account_options ::= alter_account_option */
+ { 318, -2 }, /* (13) alter_account_options ::= alter_account_options alter_account_option */
+ { 320, -2 }, /* (14) alter_account_option ::= PASS literal */
+ { 320, -2 }, /* (15) alter_account_option ::= PPS literal */
+ { 320, -2 }, /* (16) alter_account_option ::= TSERIES literal */
+ { 320, -2 }, /* (17) alter_account_option ::= STORAGE literal */
+ { 320, -2 }, /* (18) alter_account_option ::= STREAMS literal */
+ { 320, -2 }, /* (19) alter_account_option ::= QTIME literal */
+ { 320, -2 }, /* (20) alter_account_option ::= DBS literal */
+ { 320, -2 }, /* (21) alter_account_option ::= USERS literal */
+ { 320, -2 }, /* (22) alter_account_option ::= CONNS literal */
+ { 320, -2 }, /* (23) alter_account_option ::= STATE literal */
+ { 316, -6 }, /* (24) cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */
+ { 316, -5 }, /* (25) cmd ::= ALTER USER user_name PASS NK_STRING */
+ { 316, -5 }, /* (26) cmd ::= ALTER USER user_name ENABLE NK_INTEGER */
+ { 316, -5 }, /* (27) cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */
+ { 316, -3 }, /* (28) cmd ::= DROP USER user_name */
+ { 322, 0 }, /* (29) sysinfo_opt ::= */
+ { 322, -2 }, /* (30) sysinfo_opt ::= SYSINFO NK_INTEGER */
+ { 316, -6 }, /* (31) cmd ::= GRANT privileges ON priv_level TO user_name */
+ { 316, -6 }, /* (32) cmd ::= REVOKE privileges ON priv_level FROM user_name */
+ { 323, -1 }, /* (33) privileges ::= ALL */
+ { 323, -1 }, /* (34) privileges ::= priv_type_list */
+ { 325, -1 }, /* (35) priv_type_list ::= priv_type */
+ { 325, -3 }, /* (36) priv_type_list ::= priv_type_list NK_COMMA priv_type */
+ { 326, -1 }, /* (37) priv_type ::= READ */
+ { 326, -1 }, /* (38) priv_type ::= WRITE */
+ { 324, -3 }, /* (39) priv_level ::= NK_STAR NK_DOT NK_STAR */
+ { 324, -3 }, /* (40) priv_level ::= db_name NK_DOT NK_STAR */
+ { 316, -3 }, /* (41) cmd ::= CREATE DNODE dnode_endpoint */
+ { 316, -5 }, /* (42) cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */
+ { 316, -3 }, /* (43) cmd ::= DROP DNODE NK_INTEGER */
+ { 316, -3 }, /* (44) cmd ::= DROP DNODE dnode_endpoint */
+ { 316, -4 }, /* (45) cmd ::= ALTER DNODE NK_INTEGER NK_STRING */
+ { 316, -5 }, /* (46) cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */
+ { 316, -4 }, /* (47) cmd ::= ALTER ALL DNODES NK_STRING */
+ { 316, -5 }, /* (48) cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */
+ { 328, -1 }, /* (49) dnode_endpoint ::= NK_STRING */
+ { 328, -1 }, /* (50) dnode_endpoint ::= NK_ID */
+ { 328, -1 }, /* (51) dnode_endpoint ::= NK_IPTOKEN */
+ { 316, -3 }, /* (52) cmd ::= ALTER LOCAL NK_STRING */
+ { 316, -4 }, /* (53) cmd ::= ALTER LOCAL NK_STRING NK_STRING */
+ { 316, -5 }, /* (54) cmd ::= CREATE QNODE ON DNODE NK_INTEGER */
+ { 316, -5 }, /* (55) cmd ::= DROP QNODE ON DNODE NK_INTEGER */
+ { 316, -5 }, /* (56) cmd ::= CREATE BNODE ON DNODE NK_INTEGER */
+ { 316, -5 }, /* (57) cmd ::= DROP BNODE ON DNODE NK_INTEGER */
+ { 316, -5 }, /* (58) cmd ::= CREATE SNODE ON DNODE NK_INTEGER */
+ { 316, -5 }, /* (59) cmd ::= DROP SNODE ON DNODE NK_INTEGER */
+ { 316, -5 }, /* (60) cmd ::= CREATE MNODE ON DNODE NK_INTEGER */
+ { 316, -5 }, /* (61) cmd ::= DROP MNODE ON DNODE NK_INTEGER */
+ { 316, -5 }, /* (62) cmd ::= CREATE DATABASE not_exists_opt db_name db_options */
+ { 316, -4 }, /* (63) cmd ::= DROP DATABASE exists_opt db_name */
+ { 316, -2 }, /* (64) cmd ::= USE db_name */
+ { 316, -4 }, /* (65) cmd ::= ALTER DATABASE db_name alter_db_options */
+ { 316, -3 }, /* (66) cmd ::= FLUSH DATABASE db_name */
+ { 316, -4 }, /* (67) cmd ::= TRIM DATABASE db_name speed_opt */
+ { 329, -3 }, /* (68) not_exists_opt ::= IF NOT EXISTS */
+ { 329, 0 }, /* (69) not_exists_opt ::= */
+ { 331, -2 }, /* (70) exists_opt ::= IF EXISTS */
+ { 331, 0 }, /* (71) exists_opt ::= */
+ { 330, 0 }, /* (72) db_options ::= */
+ { 330, -3 }, /* (73) db_options ::= db_options BUFFER NK_INTEGER */
+ { 330, -3 }, /* (74) db_options ::= db_options CACHEMODEL NK_STRING */
+ { 330, -3 }, /* (75) db_options ::= db_options CACHESIZE NK_INTEGER */
+ { 330, -3 }, /* (76) db_options ::= db_options COMP NK_INTEGER */
+ { 330, -3 }, /* (77) db_options ::= db_options DURATION NK_INTEGER */
+ { 330, -3 }, /* (78) db_options ::= db_options DURATION NK_VARIABLE */
+ { 330, -3 }, /* (79) db_options ::= db_options MAXROWS NK_INTEGER */
+ { 330, -3 }, /* (80) db_options ::= db_options MINROWS NK_INTEGER */
+ { 330, -3 }, /* (81) db_options ::= db_options KEEP integer_list */
+ { 330, -3 }, /* (82) db_options ::= db_options KEEP variable_list */
+ { 330, -3 }, /* (83) db_options ::= db_options PAGES NK_INTEGER */
+ { 330, -3 }, /* (84) db_options ::= db_options PAGESIZE NK_INTEGER */
+ { 330, -3 }, /* (85) db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */
+ { 330, -3 }, /* (86) db_options ::= db_options PRECISION NK_STRING */
+ { 330, -3 }, /* (87) db_options ::= db_options REPLICA NK_INTEGER */
+ { 330, -3 }, /* (88) db_options ::= db_options STRICT NK_STRING */
+ { 330, -3 }, /* (89) db_options ::= db_options VGROUPS NK_INTEGER */
+ { 330, -3 }, /* (90) db_options ::= db_options SINGLE_STABLE NK_INTEGER */
+ { 330, -3 }, /* (91) db_options ::= db_options RETENTIONS retention_list */
+ { 330, -3 }, /* (92) db_options ::= db_options SCHEMALESS NK_INTEGER */
+ { 330, -3 }, /* (93) db_options ::= db_options WAL_LEVEL NK_INTEGER */
+ { 330, -3 }, /* (94) db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
+ { 330, -3 }, /* (95) db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
+ { 330, -4 }, /* (96) db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
+ { 330, -3 }, /* (97) db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
+ { 330, -4 }, /* (98) db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
+ { 330, -3 }, /* (99) db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
+ { 330, -3 }, /* (100) db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
+ { 330, -3 }, /* (101) db_options ::= db_options STT_TRIGGER NK_INTEGER */
+ { 330, -3 }, /* (102) db_options ::= db_options TABLE_PREFIX NK_INTEGER */
+ { 330, -3 }, /* (103) db_options ::= db_options TABLE_SUFFIX NK_INTEGER */
+ { 332, -1 }, /* (104) alter_db_options ::= alter_db_option */
+ { 332, -2 }, /* (105) alter_db_options ::= alter_db_options alter_db_option */
+ { 337, -2 }, /* (106) alter_db_option ::= CACHEMODEL NK_STRING */
+ { 337, -2 }, /* (107) alter_db_option ::= CACHESIZE NK_INTEGER */
+ { 337, -2 }, /* (108) alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
+ { 337, -2 }, /* (109) alter_db_option ::= KEEP integer_list */
+ { 337, -2 }, /* (110) alter_db_option ::= KEEP variable_list */
+ { 337, -2 }, /* (111) alter_db_option ::= WAL_LEVEL NK_INTEGER */
+ { 337, -2 }, /* (112) alter_db_option ::= STT_TRIGGER NK_INTEGER */
+ { 334, -1 }, /* (113) integer_list ::= NK_INTEGER */
+ { 334, -3 }, /* (114) integer_list ::= integer_list NK_COMMA NK_INTEGER */
+ { 335, -1 }, /* (115) variable_list ::= NK_VARIABLE */
+ { 335, -3 }, /* (116) variable_list ::= variable_list NK_COMMA NK_VARIABLE */
+ { 336, -1 }, /* (117) retention_list ::= retention */
+ { 336, -3 }, /* (118) retention_list ::= retention_list NK_COMMA retention */
+ { 338, -3 }, /* (119) retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
+ { 333, 0 }, /* (120) speed_opt ::= */
+ { 333, -2 }, /* (121) speed_opt ::= MAX_SPEED NK_INTEGER */
+ { 316, -9 }, /* (122) cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
+ { 316, -3 }, /* (123) cmd ::= CREATE TABLE multi_create_clause */
+ { 316, -9 }, /* (124) cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */
+ { 316, -3 }, /* (125) cmd ::= DROP TABLE multi_drop_clause */
+ { 316, -4 }, /* (126) cmd ::= DROP STABLE exists_opt full_table_name */
+ { 316, -3 }, /* (127) cmd ::= ALTER TABLE alter_table_clause */
+ { 316, -3 }, /* (128) cmd ::= ALTER STABLE alter_table_clause */
+ { 346, -2 }, /* (129) alter_table_clause ::= full_table_name alter_table_options */
+ { 346, -5 }, /* (130) alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
+ { 346, -4 }, /* (131) alter_table_clause ::= full_table_name DROP COLUMN column_name */
+ { 346, -5 }, /* (132) alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
+ { 346, -5 }, /* (133) alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
+ { 346, -5 }, /* (134) alter_table_clause ::= full_table_name ADD TAG column_name type_name */
+ { 346, -4 }, /* (135) alter_table_clause ::= full_table_name DROP TAG column_name */
+ { 346, -5 }, /* (136) alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
+ { 346, -5 }, /* (137) alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
+ { 346, -6 }, /* (138) alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
+ { 343, -1 }, /* (139) multi_create_clause ::= create_subtable_clause */
+ { 343, -2 }, /* (140) multi_create_clause ::= multi_create_clause create_subtable_clause */
+ { 351, -10 }, /* (141) create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
+ { 345, -1 }, /* (142) multi_drop_clause ::= drop_table_clause */
+ { 345, -2 }, /* (143) multi_drop_clause ::= multi_drop_clause drop_table_clause */
+ { 354, -2 }, /* (144) drop_table_clause ::= exists_opt full_table_name */
+ { 352, 0 }, /* (145) specific_cols_opt ::= */
+ { 352, -3 }, /* (146) specific_cols_opt ::= NK_LP col_name_list NK_RP */
+ { 339, -1 }, /* (147) full_table_name ::= table_name */
+ { 339, -3 }, /* (148) full_table_name ::= db_name NK_DOT table_name */
+ { 340, -1 }, /* (149) column_def_list ::= column_def */
+ { 340, -3 }, /* (150) column_def_list ::= column_def_list NK_COMMA column_def */
+ { 357, -2 }, /* (151) column_def ::= column_name type_name */
+ { 357, -4 }, /* (152) column_def ::= column_name type_name COMMENT NK_STRING */
+ { 349, -1 }, /* (153) type_name ::= BOOL */
+ { 349, -1 }, /* (154) type_name ::= TINYINT */
+ { 349, -1 }, /* (155) type_name ::= SMALLINT */
+ { 349, -1 }, /* (156) type_name ::= INT */
+ { 349, -1 }, /* (157) type_name ::= INTEGER */
+ { 349, -1 }, /* (158) type_name ::= BIGINT */
+ { 349, -1 }, /* (159) type_name ::= FLOAT */
+ { 349, -1 }, /* (160) type_name ::= DOUBLE */
+ { 349, -4 }, /* (161) type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
+ { 349, -1 }, /* (162) type_name ::= TIMESTAMP */
+ { 349, -4 }, /* (163) type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
+ { 349, -2 }, /* (164) type_name ::= TINYINT UNSIGNED */
+ { 349, -2 }, /* (165) type_name ::= SMALLINT UNSIGNED */
+ { 349, -2 }, /* (166) type_name ::= INT UNSIGNED */
+ { 349, -2 }, /* (167) type_name ::= BIGINT UNSIGNED */
+ { 349, -1 }, /* (168) type_name ::= JSON */
+ { 349, -4 }, /* (169) type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
+ { 349, -1 }, /* (170) type_name ::= MEDIUMBLOB */
+ { 349, -1 }, /* (171) type_name ::= BLOB */
+ { 349, -4 }, /* (172) type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
+ { 349, -1 }, /* (173) type_name ::= DECIMAL */
+ { 349, -4 }, /* (174) type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
+ { 349, -6 }, /* (175) type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
+ { 341, 0 }, /* (176) tags_def_opt ::= */
+ { 341, -1 }, /* (177) tags_def_opt ::= tags_def */
+ { 344, -4 }, /* (178) tags_def ::= TAGS NK_LP column_def_list NK_RP */
+ { 342, 0 }, /* (179) table_options ::= */
+ { 342, -3 }, /* (180) table_options ::= table_options COMMENT NK_STRING */
+ { 342, -3 }, /* (181) table_options ::= table_options MAX_DELAY duration_list */
+ { 342, -3 }, /* (182) table_options ::= table_options WATERMARK duration_list */
+ { 342, -5 }, /* (183) table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
+ { 342, -3 }, /* (184) table_options ::= table_options TTL NK_INTEGER */
+ { 342, -5 }, /* (185) table_options ::= table_options SMA NK_LP col_name_list NK_RP */
+ { 347, -1 }, /* (186) alter_table_options ::= alter_table_option */
+ { 347, -2 }, /* (187) alter_table_options ::= alter_table_options alter_table_option */
+ { 360, -2 }, /* (188) alter_table_option ::= COMMENT NK_STRING */
+ { 360, -2 }, /* (189) alter_table_option ::= TTL NK_INTEGER */
+ { 358, -1 }, /* (190) duration_list ::= duration_literal */
+ { 358, -3 }, /* (191) duration_list ::= duration_list NK_COMMA duration_literal */
+ { 359, -1 }, /* (192) rollup_func_list ::= rollup_func_name */
+ { 359, -3 }, /* (193) rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */
+ { 362, -1 }, /* (194) rollup_func_name ::= function_name */
+ { 362, -1 }, /* (195) rollup_func_name ::= FIRST */
+ { 362, -1 }, /* (196) rollup_func_name ::= LAST */
+ { 355, -1 }, /* (197) col_name_list ::= col_name */
+ { 355, -3 }, /* (198) col_name_list ::= col_name_list NK_COMMA col_name */
+ { 364, -1 }, /* (199) col_name ::= column_name */
+ { 316, -2 }, /* (200) cmd ::= SHOW DNODES */
+ { 316, -2 }, /* (201) cmd ::= SHOW USERS */
+ { 316, -2 }, /* (202) cmd ::= SHOW DATABASES */
+ { 316, -4 }, /* (203) cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
+ { 316, -4 }, /* (204) cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
+ { 316, -3 }, /* (205) cmd ::= SHOW db_name_cond_opt VGROUPS */
+ { 316, -2 }, /* (206) cmd ::= SHOW MNODES */
+ { 316, -2 }, /* (207) cmd ::= SHOW MODULES */
+ { 316, -2 }, /* (208) cmd ::= SHOW QNODES */
+ { 316, -2 }, /* (209) cmd ::= SHOW FUNCTIONS */
+ { 316, -5 }, /* (210) cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
+ { 316, -2 }, /* (211) cmd ::= SHOW STREAMS */
+ { 316, -2 }, /* (212) cmd ::= SHOW ACCOUNTS */
+ { 316, -2 }, /* (213) cmd ::= SHOW APPS */
+ { 316, -2 }, /* (214) cmd ::= SHOW CONNECTIONS */
+ { 316, -2 }, /* (215) cmd ::= SHOW LICENCES */
+ { 316, -2 }, /* (216) cmd ::= SHOW GRANTS */
+ { 316, -4 }, /* (217) cmd ::= SHOW CREATE DATABASE db_name */
+ { 316, -4 }, /* (218) cmd ::= SHOW CREATE TABLE full_table_name */
+ { 316, -4 }, /* (219) cmd ::= SHOW CREATE STABLE full_table_name */
+ { 316, -2 }, /* (220) cmd ::= SHOW QUERIES */
+ { 316, -2 }, /* (221) cmd ::= SHOW SCORES */
+ { 316, -2 }, /* (222) cmd ::= SHOW TOPICS */
+ { 316, -2 }, /* (223) cmd ::= SHOW VARIABLES */
+ { 316, -3 }, /* (224) cmd ::= SHOW LOCAL VARIABLES */
+ { 316, -4 }, /* (225) cmd ::= SHOW DNODE NK_INTEGER VARIABLES */
+ { 316, -2 }, /* (226) cmd ::= SHOW BNODES */
+ { 316, -2 }, /* (227) cmd ::= SHOW SNODES */
+ { 316, -2 }, /* (228) cmd ::= SHOW CLUSTER */
+ { 316, -2 }, /* (229) cmd ::= SHOW TRANSACTIONS */
+ { 316, -4 }, /* (230) cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
+ { 316, -2 }, /* (231) cmd ::= SHOW CONSUMERS */
+ { 316, -2 }, /* (232) cmd ::= SHOW SUBSCRIPTIONS */
+ { 316, -5 }, /* (233) cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
+ { 316, -3 }, /* (234) cmd ::= SHOW VNODES NK_INTEGER */
+ { 316, -3 }, /* (235) cmd ::= SHOW VNODES NK_STRING */
+ { 365, 0 }, /* (236) db_name_cond_opt ::= */
+ { 365, -2 }, /* (237) db_name_cond_opt ::= db_name NK_DOT */
+ { 366, 0 }, /* (238) like_pattern_opt ::= */
+ { 366, -2 }, /* (239) like_pattern_opt ::= LIKE NK_STRING */
+ { 367, -1 }, /* (240) table_name_cond ::= table_name */
+ { 368, 0 }, /* (241) from_db_opt ::= */
+ { 368, -2 }, /* (242) from_db_opt ::= FROM db_name */
+ { 316, -8 }, /* (243) cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
+ { 316, -4 }, /* (244) cmd ::= DROP INDEX exists_opt full_table_name */
+ { 369, -10 }, /* (245) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
+ { 369, -12 }, /* (246) index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
+ { 370, -1 }, /* (247) func_list ::= func */
+ { 370, -3 }, /* (248) func_list ::= func_list NK_COMMA func */
+ { 373, -4 }, /* (249) func ::= function_name NK_LP expression_list NK_RP */
+ { 372, 0 }, /* (250) sma_stream_opt ::= */
+ { 372, -3 }, /* (251) sma_stream_opt ::= stream_options WATERMARK duration_literal */
+ { 372, -3 }, /* (252) sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
+ { 316, -6 }, /* (253) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */
+ { 316, -7 }, /* (254) cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
+ { 316, -9 }, /* (255) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
+ { 316, -7 }, /* (256) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
+ { 316, -9 }, /* (257) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
+ { 316, -4 }, /* (258) cmd ::= DROP TOPIC exists_opt topic_name */
+ { 316, -7 }, /* (259) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
+ { 316, -2 }, /* (260) cmd ::= DESC full_table_name */
+ { 316, -2 }, /* (261) cmd ::= DESCRIBE full_table_name */
+ { 316, -3 }, /* (262) cmd ::= RESET QUERY CACHE */
+ { 316, -4 }, /* (263) cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */
+ { 378, 0 }, /* (264) analyze_opt ::= */
+ { 378, -1 }, /* (265) analyze_opt ::= ANALYZE */
+ { 379, 0 }, /* (266) explain_options ::= */
+ { 379, -3 }, /* (267) explain_options ::= explain_options VERBOSE NK_BOOL */
+ { 379, -3 }, /* (268) explain_options ::= explain_options RATIO NK_FLOAT */
+ { 316, -10 }, /* (269) cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
+ { 316, -4 }, /* (270) cmd ::= DROP FUNCTION exists_opt function_name */
+ { 380, 0 }, /* (271) agg_func_opt ::= */
+ { 380, -1 }, /* (272) agg_func_opt ::= AGGREGATE */
+ { 381, 0 }, /* (273) bufsize_opt ::= */
+ { 381, -2 }, /* (274) bufsize_opt ::= BUFSIZE NK_INTEGER */
+ { 316, -11 }, /* (275) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name tags_def_opt subtable_opt AS query_or_subquery */
+ { 316, -4 }, /* (276) cmd ::= DROP STREAM exists_opt stream_name */
+ { 374, 0 }, /* (277) stream_options ::= */
+ { 374, -3 }, /* (278) stream_options ::= stream_options TRIGGER AT_ONCE */
+ { 374, -3 }, /* (279) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
+ { 374, -4 }, /* (280) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
+ { 374, -3 }, /* (281) stream_options ::= stream_options WATERMARK duration_literal */
+ { 374, -4 }, /* (282) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
+ { 383, 0 }, /* (283) subtable_opt ::= */
+ { 383, -4 }, /* (284) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */
+ { 316, -3 }, /* (285) cmd ::= KILL CONNECTION NK_INTEGER */
+ { 316, -3 }, /* (286) cmd ::= KILL QUERY NK_STRING */
+ { 316, -3 }, /* (287) cmd ::= KILL TRANSACTION NK_INTEGER */
+ { 316, -2 }, /* (288) cmd ::= BALANCE VGROUP */
+ { 316, -4 }, /* (289) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
+ { 316, -4 }, /* (290) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
+ { 316, -3 }, /* (291) cmd ::= SPLIT VGROUP NK_INTEGER */
+ { 385, -2 }, /* (292) dnode_list ::= DNODE NK_INTEGER */
+ { 385, -3 }, /* (293) dnode_list ::= dnode_list DNODE NK_INTEGER */
+ { 316, -4 }, /* (294) cmd ::= DELETE FROM full_table_name where_clause_opt */
+ { 316, -1 }, /* (295) cmd ::= query_or_subquery */
+ { 316, -7 }, /* (296) cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */
+ { 316, -4 }, /* (297) cmd ::= INSERT INTO full_table_name query_or_subquery */
+ { 319, -1 }, /* (298) literal ::= NK_INTEGER */
+ { 319, -1 }, /* (299) literal ::= NK_FLOAT */
+ { 319, -1 }, /* (300) literal ::= NK_STRING */
+ { 319, -1 }, /* (301) literal ::= NK_BOOL */
+ { 319, -2 }, /* (302) literal ::= TIMESTAMP NK_STRING */
+ { 319, -1 }, /* (303) literal ::= duration_literal */
+ { 319, -1 }, /* (304) literal ::= NULL */
+ { 319, -1 }, /* (305) literal ::= NK_QUESTION */
+ { 361, -1 }, /* (306) duration_literal ::= NK_VARIABLE */
+ { 387, -1 }, /* (307) signed ::= NK_INTEGER */
+ { 387, -2 }, /* (308) signed ::= NK_PLUS NK_INTEGER */
+ { 387, -2 }, /* (309) signed ::= NK_MINUS NK_INTEGER */
+ { 387, -1 }, /* (310) signed ::= NK_FLOAT */
+ { 387, -2 }, /* (311) signed ::= NK_PLUS NK_FLOAT */
+ { 387, -2 }, /* (312) signed ::= NK_MINUS NK_FLOAT */
+ { 350, -1 }, /* (313) signed_literal ::= signed */
+ { 350, -1 }, /* (314) signed_literal ::= NK_STRING */
+ { 350, -1 }, /* (315) signed_literal ::= NK_BOOL */
+ { 350, -2 }, /* (316) signed_literal ::= TIMESTAMP NK_STRING */
+ { 350, -1 }, /* (317) signed_literal ::= duration_literal */
+ { 350, -1 }, /* (318) signed_literal ::= NULL */
+ { 350, -1 }, /* (319) signed_literal ::= literal_func */
+ { 350, -1 }, /* (320) signed_literal ::= NK_QUESTION */
+ { 389, -1 }, /* (321) literal_list ::= signed_literal */
+ { 389, -3 }, /* (322) literal_list ::= literal_list NK_COMMA signed_literal */
+ { 327, -1 }, /* (323) db_name ::= NK_ID */
+ { 356, -1 }, /* (324) table_name ::= NK_ID */
+ { 348, -1 }, /* (325) column_name ::= NK_ID */
+ { 363, -1 }, /* (326) function_name ::= NK_ID */
+ { 390, -1 }, /* (327) table_alias ::= NK_ID */
+ { 391, -1 }, /* (328) column_alias ::= NK_ID */
+ { 321, -1 }, /* (329) user_name ::= NK_ID */
+ { 375, -1 }, /* (330) topic_name ::= NK_ID */
+ { 382, -1 }, /* (331) stream_name ::= NK_ID */
+ { 377, -1 }, /* (332) cgroup_name ::= NK_ID */
+ { 392, -1 }, /* (333) expr_or_subquery ::= expression */
+ { 392, -1 }, /* (334) expr_or_subquery ::= subquery */
+ { 384, -1 }, /* (335) expression ::= literal */
+ { 384, -1 }, /* (336) expression ::= pseudo_column */
+ { 384, -1 }, /* (337) expression ::= column_reference */
+ { 384, -1 }, /* (338) expression ::= function_expression */
+ { 384, -1 }, /* (339) expression ::= case_when_expression */
+ { 384, -3 }, /* (340) expression ::= NK_LP expression NK_RP */
+ { 384, -2 }, /* (341) expression ::= NK_PLUS expr_or_subquery */
+ { 384, -2 }, /* (342) expression ::= NK_MINUS expr_or_subquery */
+ { 384, -3 }, /* (343) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */
+ { 384, -3 }, /* (344) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */
+ { 384, -3 }, /* (345) expression ::= expr_or_subquery NK_STAR expr_or_subquery */
+ { 384, -3 }, /* (346) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */
+ { 384, -3 }, /* (347) expression ::= expr_or_subquery NK_REM expr_or_subquery */
+ { 384, -3 }, /* (348) expression ::= column_reference NK_ARROW NK_STRING */
+ { 384, -3 }, /* (349) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */
+ { 384, -3 }, /* (350) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */
+ { 353, -1 }, /* (351) expression_list ::= expr_or_subquery */
+ { 353, -3 }, /* (352) expression_list ::= expression_list NK_COMMA expr_or_subquery */
+ { 395, -1 }, /* (353) column_reference ::= column_name */
+ { 395, -3 }, /* (354) column_reference ::= table_name NK_DOT column_name */
+ { 394, -1 }, /* (355) pseudo_column ::= ROWTS */
+ { 394, -1 }, /* (356) pseudo_column ::= TBNAME */
+ { 394, -3 }, /* (357) pseudo_column ::= table_name NK_DOT TBNAME */
+ { 394, -1 }, /* (358) pseudo_column ::= QSTART */
+ { 394, -1 }, /* (359) pseudo_column ::= QEND */
+ { 394, -1 }, /* (360) pseudo_column ::= QDURATION */
+ { 394, -1 }, /* (361) pseudo_column ::= WSTART */
+ { 394, -1 }, /* (362) pseudo_column ::= WEND */
+ { 394, -1 }, /* (363) pseudo_column ::= WDURATION */
+ { 394, -1 }, /* (364) pseudo_column ::= IROWTS */
+ { 396, -4 }, /* (365) function_expression ::= function_name NK_LP expression_list NK_RP */
+ { 396, -4 }, /* (366) function_expression ::= star_func NK_LP star_func_para_list NK_RP */
+ { 396, -6 }, /* (367) function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */
+ { 396, -1 }, /* (368) function_expression ::= literal_func */
+ { 388, -3 }, /* (369) literal_func ::= noarg_func NK_LP NK_RP */
+ { 388, -1 }, /* (370) literal_func ::= NOW */
+ { 400, -1 }, /* (371) noarg_func ::= NOW */
+ { 400, -1 }, /* (372) noarg_func ::= TODAY */
+ { 400, -1 }, /* (373) noarg_func ::= TIMEZONE */
+ { 400, -1 }, /* (374) noarg_func ::= DATABASE */
+ { 400, -1 }, /* (375) noarg_func ::= CLIENT_VERSION */
+ { 400, -1 }, /* (376) noarg_func ::= SERVER_VERSION */
+ { 400, -1 }, /* (377) noarg_func ::= SERVER_STATUS */
+ { 400, -1 }, /* (378) noarg_func ::= CURRENT_USER */
+ { 400, -1 }, /* (379) noarg_func ::= USER */
+ { 398, -1 }, /* (380) star_func ::= COUNT */
+ { 398, -1 }, /* (381) star_func ::= FIRST */
+ { 398, -1 }, /* (382) star_func ::= LAST */
+ { 398, -1 }, /* (383) star_func ::= LAST_ROW */
+ { 399, -1 }, /* (384) star_func_para_list ::= NK_STAR */
+ { 399, -1 }, /* (385) star_func_para_list ::= other_para_list */
+ { 401, -1 }, /* (386) other_para_list ::= star_func_para */
+ { 401, -3 }, /* (387) other_para_list ::= other_para_list NK_COMMA star_func_para */
+ { 402, -1 }, /* (388) star_func_para ::= expr_or_subquery */
+ { 402, -3 }, /* (389) star_func_para ::= table_name NK_DOT NK_STAR */
+ { 397, -4 }, /* (390) case_when_expression ::= CASE when_then_list case_when_else_opt END */
+ { 397, -5 }, /* (391) case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */
+ { 403, -1 }, /* (392) when_then_list ::= when_then_expr */
+ { 403, -2 }, /* (393) when_then_list ::= when_then_list when_then_expr */
+ { 406, -4 }, /* (394) when_then_expr ::= WHEN common_expression THEN common_expression */
+ { 404, 0 }, /* (395) case_when_else_opt ::= */
+ { 404, -2 }, /* (396) case_when_else_opt ::= ELSE common_expression */
+ { 407, -3 }, /* (397) predicate ::= expr_or_subquery compare_op expr_or_subquery */
+ { 407, -5 }, /* (398) predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */
+ { 407, -6 }, /* (399) predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */
+ { 407, -3 }, /* (400) predicate ::= expr_or_subquery IS NULL */
+ { 407, -4 }, /* (401) predicate ::= expr_or_subquery IS NOT NULL */
+ { 407, -3 }, /* (402) predicate ::= expr_or_subquery in_op in_predicate_value */
+ { 408, -1 }, /* (403) compare_op ::= NK_LT */
+ { 408, -1 }, /* (404) compare_op ::= NK_GT */
+ { 408, -1 }, /* (405) compare_op ::= NK_LE */
+ { 408, -1 }, /* (406) compare_op ::= NK_GE */
+ { 408, -1 }, /* (407) compare_op ::= NK_NE */
+ { 408, -1 }, /* (408) compare_op ::= NK_EQ */
+ { 408, -1 }, /* (409) compare_op ::= LIKE */
+ { 408, -2 }, /* (410) compare_op ::= NOT LIKE */
+ { 408, -1 }, /* (411) compare_op ::= MATCH */
+ { 408, -1 }, /* (412) compare_op ::= NMATCH */
+ { 408, -1 }, /* (413) compare_op ::= CONTAINS */
+ { 409, -1 }, /* (414) in_op ::= IN */
+ { 409, -2 }, /* (415) in_op ::= NOT IN */
+ { 410, -3 }, /* (416) in_predicate_value ::= NK_LP literal_list NK_RP */
+ { 411, -1 }, /* (417) boolean_value_expression ::= boolean_primary */
+ { 411, -2 }, /* (418) boolean_value_expression ::= NOT boolean_primary */
+ { 411, -3 }, /* (419) boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
+ { 411, -3 }, /* (420) boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
+ { 412, -1 }, /* (421) boolean_primary ::= predicate */
+ { 412, -3 }, /* (422) boolean_primary ::= NK_LP boolean_value_expression NK_RP */
+ { 405, -1 }, /* (423) common_expression ::= expr_or_subquery */
+ { 405, -1 }, /* (424) common_expression ::= boolean_value_expression */
+ { 413, 0 }, /* (425) from_clause_opt ::= */
+ { 413, -2 }, /* (426) from_clause_opt ::= FROM table_reference_list */
+ { 414, -1 }, /* (427) table_reference_list ::= table_reference */
+ { 414, -3 }, /* (428) table_reference_list ::= table_reference_list NK_COMMA table_reference */
+ { 415, -1 }, /* (429) table_reference ::= table_primary */
+ { 415, -1 }, /* (430) table_reference ::= joined_table */
+ { 416, -2 }, /* (431) table_primary ::= table_name alias_opt */
+ { 416, -4 }, /* (432) table_primary ::= db_name NK_DOT table_name alias_opt */
+ { 416, -2 }, /* (433) table_primary ::= subquery alias_opt */
+ { 416, -1 }, /* (434) table_primary ::= parenthesized_joined_table */
+ { 418, 0 }, /* (435) alias_opt ::= */
+ { 418, -1 }, /* (436) alias_opt ::= table_alias */
+ { 418, -2 }, /* (437) alias_opt ::= AS table_alias */
+ { 419, -3 }, /* (438) parenthesized_joined_table ::= NK_LP joined_table NK_RP */
+ { 419, -3 }, /* (439) parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */
+ { 417, -6 }, /* (440) joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
+ { 420, 0 }, /* (441) join_type ::= */
+ { 420, -1 }, /* (442) join_type ::= INNER */
+ { 422, -12 }, /* (443) query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
+ { 423, 0 }, /* (444) set_quantifier_opt ::= */
+ { 423, -1 }, /* (445) set_quantifier_opt ::= DISTINCT */
+ { 423, -1 }, /* (446) set_quantifier_opt ::= ALL */
+ { 424, -1 }, /* (447) select_list ::= select_item */
+ { 424, -3 }, /* (448) select_list ::= select_list NK_COMMA select_item */
+ { 432, -1 }, /* (449) select_item ::= NK_STAR */
+ { 432, -1 }, /* (450) select_item ::= common_expression */
+ { 432, -2 }, /* (451) select_item ::= common_expression column_alias */
+ { 432, -3 }, /* (452) select_item ::= common_expression AS column_alias */
+ { 432, -3 }, /* (453) select_item ::= table_name NK_DOT NK_STAR */
+ { 386, 0 }, /* (454) where_clause_opt ::= */
+ { 386, -2 }, /* (455) where_clause_opt ::= WHERE search_condition */
+ { 425, 0 }, /* (456) partition_by_clause_opt ::= */
+ { 425, -3 }, /* (457) partition_by_clause_opt ::= PARTITION BY partition_list */
+ { 433, -1 }, /* (458) partition_list ::= partition_item */
+ { 433, -3 }, /* (459) partition_list ::= partition_list NK_COMMA partition_item */
+ { 434, -1 }, /* (460) partition_item ::= expr_or_subquery */
+ { 434, -2 }, /* (461) partition_item ::= expr_or_subquery column_alias */
+ { 434, -3 }, /* (462) partition_item ::= expr_or_subquery AS column_alias */
+ { 429, 0 }, /* (463) twindow_clause_opt ::= */
+ { 429, -6 }, /* (464) twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
+ { 429, -4 }, /* (465) twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */
+ { 429, -6 }, /* (466) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
+ { 429, -8 }, /* (467) twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
+ { 371, 0 }, /* (468) sliding_opt ::= */
+ { 371, -4 }, /* (469) sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
+ { 428, 0 }, /* (470) fill_opt ::= */
+ { 428, -4 }, /* (471) fill_opt ::= FILL NK_LP fill_mode NK_RP */
+ { 428, -6 }, /* (472) fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
+ { 435, -1 }, /* (473) fill_mode ::= NONE */
+ { 435, -1 }, /* (474) fill_mode ::= PREV */
+ { 435, -1 }, /* (475) fill_mode ::= NULL */
+ { 435, -1 }, /* (476) fill_mode ::= LINEAR */
+ { 435, -1 }, /* (477) fill_mode ::= NEXT */
+ { 430, 0 }, /* (478) group_by_clause_opt ::= */
+ { 430, -3 }, /* (479) group_by_clause_opt ::= GROUP BY group_by_list */
+ { 436, -1 }, /* (480) group_by_list ::= expr_or_subquery */
+ { 436, -3 }, /* (481) group_by_list ::= group_by_list NK_COMMA expr_or_subquery */
+ { 431, 0 }, /* (482) having_clause_opt ::= */
+ { 431, -2 }, /* (483) having_clause_opt ::= HAVING search_condition */
+ { 426, 0 }, /* (484) range_opt ::= */
+ { 426, -6 }, /* (485) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */
+ { 427, 0 }, /* (486) every_opt ::= */
+ { 427, -4 }, /* (487) every_opt ::= EVERY NK_LP duration_literal NK_RP */
+ { 437, -4 }, /* (488) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ { 438, -1 }, /* (489) query_simple ::= query_specification */
+ { 438, -1 }, /* (490) query_simple ::= union_query_expression */
+ { 442, -4 }, /* (491) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */
+ { 442, -3 }, /* (492) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */
+ { 443, -1 }, /* (493) query_simple_or_subquery ::= query_simple */
+ { 443, -1 }, /* (494) query_simple_or_subquery ::= subquery */
+ { 376, -1 }, /* (495) query_or_subquery ::= query_expression */
+ { 376, -1 }, /* (496) query_or_subquery ::= subquery */
+ { 439, 0 }, /* (497) order_by_clause_opt ::= */
+ { 439, -3 }, /* (498) order_by_clause_opt ::= ORDER BY sort_specification_list */
+ { 440, 0 }, /* (499) slimit_clause_opt ::= */
+ { 440, -2 }, /* (500) slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ { 440, -4 }, /* (501) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ { 440, -4 }, /* (502) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ { 441, 0 }, /* (503) limit_clause_opt ::= */
+ { 441, -2 }, /* (504) limit_clause_opt ::= LIMIT NK_INTEGER */
+ { 441, -4 }, /* (505) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
+ { 441, -4 }, /* (506) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ { 393, -3 }, /* (507) subquery ::= NK_LP query_expression NK_RP */
+ { 393, -3 }, /* (508) subquery ::= NK_LP subquery NK_RP */
+ { 421, -1 }, /* (509) search_condition ::= common_expression */
+ { 444, -1 }, /* (510) sort_specification_list ::= sort_specification */
+ { 444, -3 }, /* (511) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
+ { 445, -3 }, /* (512) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */
+ { 446, 0 }, /* (513) ordering_specification_opt ::= */
+ { 446, -1 }, /* (514) ordering_specification_opt ::= ASC */
+ { 446, -1 }, /* (515) ordering_specification_opt ::= DESC */
+ { 447, 0 }, /* (516) null_ordering_opt ::= */
+ { 447, -2 }, /* (517) null_ordering_opt ::= NULLS FIRST */
+ { 447, -2 }, /* (518) null_ordering_opt ::= NULLS LAST */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -3367,11 +3619,11 @@ static YYACTIONTYPE yy_reduce(
YYMINORTYPE yylhsminor;
case 0: /* cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
- yy_destructor(yypParser,306,&yymsp[0].minor);
+ yy_destructor(yypParser,317,&yymsp[0].minor);
break;
case 1: /* cmd ::= ALTER ACCOUNT NK_ID alter_account_options */
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
- yy_destructor(yypParser,307,&yymsp[0].minor);
+ yy_destructor(yypParser,318,&yymsp[0].minor);
break;
case 2: /* account_options ::= */
{ }
@@ -3385,20 +3637,20 @@ static YYACTIONTYPE yy_reduce(
case 9: /* account_options ::= account_options USERS literal */ yytestcase(yyruleno==9);
case 10: /* account_options ::= account_options CONNS literal */ yytestcase(yyruleno==10);
case 11: /* account_options ::= account_options STATE literal */ yytestcase(yyruleno==11);
-{ yy_destructor(yypParser,306,&yymsp[-2].minor);
+{ yy_destructor(yypParser,317,&yymsp[-2].minor);
{ }
- yy_destructor(yypParser,308,&yymsp[0].minor);
+ yy_destructor(yypParser,319,&yymsp[0].minor);
}
break;
case 12: /* alter_account_options ::= alter_account_option */
-{ yy_destructor(yypParser,309,&yymsp[0].minor);
+{ yy_destructor(yypParser,320,&yymsp[0].minor);
{ }
}
break;
case 13: /* alter_account_options ::= alter_account_options alter_account_option */
-{ yy_destructor(yypParser,307,&yymsp[-1].minor);
+{ yy_destructor(yypParser,318,&yymsp[-1].minor);
{ }
- yy_destructor(yypParser,309,&yymsp[0].minor);
+ yy_destructor(yypParser,320,&yymsp[0].minor);
}
break;
case 14: /* alter_account_option ::= PASS literal */
@@ -3412,72 +3664,72 @@ static YYACTIONTYPE yy_reduce(
case 22: /* alter_account_option ::= CONNS literal */ yytestcase(yyruleno==22);
case 23: /* alter_account_option ::= STATE literal */ yytestcase(yyruleno==23);
{ }
- yy_destructor(yypParser,308,&yymsp[0].minor);
+ yy_destructor(yypParser,319,&yymsp[0].minor);
break;
case 24: /* cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt */
-{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy209, &yymsp[-1].minor.yy0, yymsp[0].minor.yy59); }
+{ pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-3].minor.yy673, &yymsp[-1].minor.yy0, yymsp[0].minor.yy439); }
break;
case 25: /* cmd ::= ALTER USER user_name PASS NK_STRING */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy673, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); }
break;
case 26: /* cmd ::= ALTER USER user_name ENABLE NK_INTEGER */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy673, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); }
break;
case 27: /* cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */
-{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy209, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy673, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); }
break;
case 28: /* cmd ::= DROP USER user_name */
-{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy209); }
+{ pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy673); }
break;
case 29: /* sysinfo_opt ::= */
-{ yymsp[1].minor.yy59 = 1; }
+{ yymsp[1].minor.yy439 = 1; }
break;
case 30: /* sysinfo_opt ::= SYSINFO NK_INTEGER */
-{ yymsp[-1].minor.yy59 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); }
+{ yymsp[-1].minor.yy439 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); }
break;
case 31: /* cmd ::= GRANT privileges ON priv_level TO user_name */
-{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy69, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); }
+{ pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-4].minor.yy221, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy673); }
break;
case 32: /* cmd ::= REVOKE privileges ON priv_level FROM user_name */
-{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy69, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); }
+{ pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-4].minor.yy221, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy673); }
break;
case 33: /* privileges ::= ALL */
-{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_ALL; }
+{ yymsp[0].minor.yy221 = PRIVILEGE_TYPE_ALL; }
break;
case 34: /* privileges ::= priv_type_list */
case 35: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==35);
-{ yylhsminor.yy69 = yymsp[0].minor.yy69; }
- yymsp[0].minor.yy69 = yylhsminor.yy69;
+{ yylhsminor.yy221 = yymsp[0].minor.yy221; }
+ yymsp[0].minor.yy221 = yylhsminor.yy221;
break;
case 36: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */
-{ yylhsminor.yy69 = yymsp[-2].minor.yy69 | yymsp[0].minor.yy69; }
- yymsp[-2].minor.yy69 = yylhsminor.yy69;
+{ yylhsminor.yy221 = yymsp[-2].minor.yy221 | yymsp[0].minor.yy221; }
+ yymsp[-2].minor.yy221 = yylhsminor.yy221;
break;
case 37: /* priv_type ::= READ */
-{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_READ; }
+{ yymsp[0].minor.yy221 = PRIVILEGE_TYPE_READ; }
break;
case 38: /* priv_type ::= WRITE */
-{ yymsp[0].minor.yy69 = PRIVILEGE_TYPE_WRITE; }
+{ yymsp[0].minor.yy221 = PRIVILEGE_TYPE_WRITE; }
break;
case 39: /* priv_level ::= NK_STAR NK_DOT NK_STAR */
-{ yylhsminor.yy209 = yymsp[-2].minor.yy0; }
- yymsp[-2].minor.yy209 = yylhsminor.yy209;
+{ yylhsminor.yy673 = yymsp[-2].minor.yy0; }
+ yymsp[-2].minor.yy673 = yylhsminor.yy673;
break;
case 40: /* priv_level ::= db_name NK_DOT NK_STAR */
-{ yylhsminor.yy209 = yymsp[-2].minor.yy209; }
- yymsp[-2].minor.yy209 = yylhsminor.yy209;
+{ yylhsminor.yy673 = yymsp[-2].minor.yy673; }
+ yymsp[-2].minor.yy673 = yylhsminor.yy673;
break;
case 41: /* cmd ::= CREATE DNODE dnode_endpoint */
-{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy209, NULL); }
+{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy673, NULL); }
break;
case 42: /* cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */
-{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0); }
+{ pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy0); }
break;
case 43: /* cmd ::= DROP DNODE NK_INTEGER */
{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy0); }
break;
case 44: /* cmd ::= DROP DNODE dnode_endpoint */
-{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy209); }
+{ pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[0].minor.yy673); }
break;
case 45: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */
{ pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); }
@@ -3494,31 +3746,31 @@ static YYACTIONTYPE yy_reduce(
case 49: /* dnode_endpoint ::= NK_STRING */
case 50: /* dnode_endpoint ::= NK_ID */ yytestcase(yyruleno==50);
case 51: /* dnode_endpoint ::= NK_IPTOKEN */ yytestcase(yyruleno==51);
- case 312: /* db_name ::= NK_ID */ yytestcase(yyruleno==312);
- case 313: /* table_name ::= NK_ID */ yytestcase(yyruleno==313);
- case 314: /* column_name ::= NK_ID */ yytestcase(yyruleno==314);
- case 315: /* function_name ::= NK_ID */ yytestcase(yyruleno==315);
- case 316: /* table_alias ::= NK_ID */ yytestcase(yyruleno==316);
- case 317: /* column_alias ::= NK_ID */ yytestcase(yyruleno==317);
- case 318: /* user_name ::= NK_ID */ yytestcase(yyruleno==318);
- case 319: /* topic_name ::= NK_ID */ yytestcase(yyruleno==319);
- case 320: /* stream_name ::= NK_ID */ yytestcase(yyruleno==320);
- case 321: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==321);
- case 357: /* noarg_func ::= NOW */ yytestcase(yyruleno==357);
- case 358: /* noarg_func ::= TODAY */ yytestcase(yyruleno==358);
- case 359: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==359);
- case 360: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==360);
- case 361: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==361);
- case 362: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==362);
- case 363: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==363);
- case 364: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==364);
- case 365: /* noarg_func ::= USER */ yytestcase(yyruleno==365);
- case 366: /* star_func ::= COUNT */ yytestcase(yyruleno==366);
- case 367: /* star_func ::= FIRST */ yytestcase(yyruleno==367);
- case 368: /* star_func ::= LAST */ yytestcase(yyruleno==368);
- case 369: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==369);
-{ yylhsminor.yy209 = yymsp[0].minor.yy0; }
- yymsp[0].minor.yy209 = yylhsminor.yy209;
+ case 323: /* db_name ::= NK_ID */ yytestcase(yyruleno==323);
+ case 324: /* table_name ::= NK_ID */ yytestcase(yyruleno==324);
+ case 325: /* column_name ::= NK_ID */ yytestcase(yyruleno==325);
+ case 326: /* function_name ::= NK_ID */ yytestcase(yyruleno==326);
+ case 327: /* table_alias ::= NK_ID */ yytestcase(yyruleno==327);
+ case 328: /* column_alias ::= NK_ID */ yytestcase(yyruleno==328);
+ case 329: /* user_name ::= NK_ID */ yytestcase(yyruleno==329);
+ case 330: /* topic_name ::= NK_ID */ yytestcase(yyruleno==330);
+ case 331: /* stream_name ::= NK_ID */ yytestcase(yyruleno==331);
+ case 332: /* cgroup_name ::= NK_ID */ yytestcase(yyruleno==332);
+ case 371: /* noarg_func ::= NOW */ yytestcase(yyruleno==371);
+ case 372: /* noarg_func ::= TODAY */ yytestcase(yyruleno==372);
+ case 373: /* noarg_func ::= TIMEZONE */ yytestcase(yyruleno==373);
+ case 374: /* noarg_func ::= DATABASE */ yytestcase(yyruleno==374);
+ case 375: /* noarg_func ::= CLIENT_VERSION */ yytestcase(yyruleno==375);
+ case 376: /* noarg_func ::= SERVER_VERSION */ yytestcase(yyruleno==376);
+ case 377: /* noarg_func ::= SERVER_STATUS */ yytestcase(yyruleno==377);
+ case 378: /* noarg_func ::= CURRENT_USER */ yytestcase(yyruleno==378);
+ case 379: /* noarg_func ::= USER */ yytestcase(yyruleno==379);
+ case 380: /* star_func ::= COUNT */ yytestcase(yyruleno==380);
+ case 381: /* star_func ::= FIRST */ yytestcase(yyruleno==381);
+ case 382: /* star_func ::= LAST */ yytestcase(yyruleno==382);
+ case 383: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==383);
+{ yylhsminor.yy673 = yymsp[0].minor.yy0; }
+ yymsp[0].minor.yy673 = yylhsminor.yy673;
break;
case 52: /* cmd ::= ALTER LOCAL NK_STRING */
{ pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); }
@@ -3551,1261 +3803,1313 @@ static YYACTIONTYPE yy_reduce(
{ pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); }
break;
case 62: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */
-{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy293, &yymsp[-1].minor.yy209, yymsp[0].minor.yy272); }
+{ pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy89, &yymsp[-1].minor.yy673, yymsp[0].minor.yy616); }
break;
case 63: /* cmd ::= DROP DATABASE exists_opt db_name */
-{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); }
+{ pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy89, &yymsp[0].minor.yy673); }
break;
case 64: /* cmd ::= USE db_name */
-{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy209); }
+{ pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy673); }
break;
case 65: /* cmd ::= ALTER DATABASE db_name alter_db_options */
-{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy209, yymsp[0].minor.yy272); }
+{ pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy673, yymsp[0].minor.yy616); }
break;
case 66: /* cmd ::= FLUSH DATABASE db_name */
-{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy209); }
+{ pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy673); }
break;
- case 67: /* cmd ::= TRIM DATABASE db_name */
-{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[0].minor.yy209); }
+ case 67: /* cmd ::= TRIM DATABASE db_name speed_opt */
+{ pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[-1].minor.yy673, yymsp[0].minor.yy452); }
break;
case 68: /* not_exists_opt ::= IF NOT EXISTS */
-{ yymsp[-2].minor.yy293 = true; }
+{ yymsp[-2].minor.yy89 = true; }
break;
case 69: /* not_exists_opt ::= */
case 71: /* exists_opt ::= */ yytestcase(yyruleno==71);
- case 255: /* analyze_opt ::= */ yytestcase(yyruleno==255);
- case 262: /* agg_func_opt ::= */ yytestcase(yyruleno==262);
- case 423: /* set_quantifier_opt ::= */ yytestcase(yyruleno==423);
-{ yymsp[1].minor.yy293 = false; }
+ case 264: /* analyze_opt ::= */ yytestcase(yyruleno==264);
+ case 271: /* agg_func_opt ::= */ yytestcase(yyruleno==271);
+ case 444: /* set_quantifier_opt ::= */ yytestcase(yyruleno==444);
+{ yymsp[1].minor.yy89 = false; }
break;
case 70: /* exists_opt ::= IF EXISTS */
-{ yymsp[-1].minor.yy293 = true; }
+{ yymsp[-1].minor.yy89 = true; }
break;
case 72: /* db_options ::= */
-{ yymsp[1].minor.yy272 = createDefaultDatabaseOptions(pCxt); }
+{ yymsp[1].minor.yy616 = createDefaultDatabaseOptions(pCxt); }
break;
case 73: /* db_options ::= db_options BUFFER NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 74: /* db_options ::= db_options CACHEMODEL NK_STRING */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 75: /* db_options ::= db_options CACHESIZE NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 76: /* db_options ::= db_options COMP NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_COMP, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 77: /* db_options ::= db_options DURATION NK_INTEGER */
case 78: /* db_options ::= db_options DURATION NK_VARIABLE */ yytestcase(yyruleno==78);
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_DAYS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 79: /* db_options ::= db_options MAXROWS NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 80: /* db_options ::= db_options MINROWS NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 81: /* db_options ::= db_options KEEP integer_list */
case 82: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==82);
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_KEEP, yymsp[0].minor.yy172); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_KEEP, yymsp[0].minor.yy152); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 83: /* db_options ::= db_options PAGES NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PAGES, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_PAGES, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
case 84: /* db_options ::= db_options PAGESIZE NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 85: /* db_options ::= db_options PRECISION NK_STRING */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 85: /* db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_TSDB_PAGESIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 86: /* db_options ::= db_options REPLICA NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 86: /* db_options ::= db_options PRECISION NK_STRING */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 87: /* db_options ::= db_options STRICT NK_STRING */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_STRICT, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 87: /* db_options ::= db_options REPLICA NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 88: /* db_options ::= db_options VGROUPS NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 88: /* db_options ::= db_options STRICT NK_STRING */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_STRICT, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 89: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 89: /* db_options ::= db_options VGROUPS NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 90: /* db_options ::= db_options RETENTIONS retention_list */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_RETENTIONS, yymsp[0].minor.yy172); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 90: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 91: /* db_options ::= db_options SCHEMALESS NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 91: /* db_options ::= db_options RETENTIONS retention_list */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_RETENTIONS, yymsp[0].minor.yy152); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 92: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 92: /* db_options ::= db_options SCHEMALESS NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 93: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 93: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_WAL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 94: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 94: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 95: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
+ case 95: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 96: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-3].minor.yy272, DB_OPTION_WAL_RETENTION_PERIOD, &t);
+ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-3].minor.yy616, DB_OPTION_WAL_RETENTION_PERIOD, &t);
}
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 96: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 97: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 97: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
+ case 98: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-3].minor.yy272, DB_OPTION_WAL_RETENTION_SIZE, &t);
+ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-3].minor.yy616, DB_OPTION_WAL_RETENTION_SIZE, &t);
}
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
- break;
- case 98: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 99: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
-{ yylhsminor.yy272 = setDatabaseOption(pCxt, yymsp[-2].minor.yy272, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 100: /* alter_db_options ::= alter_db_option */
-{ yylhsminor.yy272 = createAlterDatabaseOptions(pCxt); yylhsminor.yy272 = setAlterDatabaseOption(pCxt, yylhsminor.yy272, &yymsp[0].minor.yy5); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 101: /* alter_db_options ::= alter_db_options alter_db_option */
-{ yylhsminor.yy272 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy272, &yymsp[0].minor.yy5); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
- break;
- case 102: /* alter_db_option ::= CACHEMODEL NK_STRING */
-{ yymsp[-1].minor.yy5.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
- break;
- case 103: /* alter_db_option ::= CACHESIZE NK_INTEGER */
-{ yymsp[-1].minor.yy5.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
- break;
- case 104: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
-{ yymsp[-1].minor.yy5.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
- break;
- case 105: /* alter_db_option ::= KEEP integer_list */
- case 106: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==106);
-{ yymsp[-1].minor.yy5.type = DB_OPTION_KEEP; yymsp[-1].minor.yy5.pList = yymsp[0].minor.yy172; }
- break;
- case 107: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */
-{ yymsp[-1].minor.yy5.type = DB_OPTION_WAL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
- break;
- case 108: /* integer_list ::= NK_INTEGER */
-{ yylhsminor.yy172 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 109: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */
- case 282: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==282);
-{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 110: /* variable_list ::= NK_VARIABLE */
-{ yylhsminor.yy172 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 111: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */
-{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 112: /* retention_list ::= retention */
- case 132: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==132);
- case 135: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==135);
- case 142: /* column_def_list ::= column_def */ yytestcase(yyruleno==142);
- case 185: /* rollup_func_list ::= rollup_func_name */ yytestcase(yyruleno==185);
- case 190: /* col_name_list ::= col_name */ yytestcase(yyruleno==190);
- case 238: /* func_list ::= func */ yytestcase(yyruleno==238);
- case 310: /* literal_list ::= signed_literal */ yytestcase(yyruleno==310);
- case 372: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==372);
- case 426: /* select_list ::= select_item */ yytestcase(yyruleno==426);
- case 480: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==480);
-{ yylhsminor.yy172 = createNodeList(pCxt, yymsp[0].minor.yy272); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 113: /* retention_list ::= retention_list NK_COMMA retention */
- case 143: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==143);
- case 186: /* rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ yytestcase(yyruleno==186);
- case 191: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==191);
- case 239: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==239);
- case 311: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==311);
- case 373: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==373);
- case 427: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==427);
- case 481: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==481);
-{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
- break;
- case 114: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
-{ yylhsminor.yy272 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 115: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
- case 117: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==117);
-{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy293, yymsp[-5].minor.yy272, yymsp[-3].minor.yy172, yymsp[-1].minor.yy172, yymsp[0].minor.yy272); }
- break;
- case 116: /* cmd ::= CREATE TABLE multi_create_clause */
-{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy172); }
- break;
- case 118: /* cmd ::= DROP TABLE multi_drop_clause */
-{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy172); }
- break;
- case 119: /* cmd ::= DROP STABLE exists_opt full_table_name */
-{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); }
- break;
- case 120: /* cmd ::= ALTER TABLE alter_table_clause */
- case 284: /* cmd ::= query_expression */ yytestcase(yyruleno==284);
-{ pCxt->pRootNode = yymsp[0].minor.yy272; }
- break;
- case 121: /* cmd ::= ALTER STABLE alter_table_clause */
-{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy272); }
- break;
- case 122: /* alter_table_clause ::= full_table_name alter_table_options */
-{ yylhsminor.yy272 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
- break;
- case 123: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
-{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); }
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
- break;
- case 124: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */
-{ yylhsminor.yy272 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy272, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy209); }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
- break;
- case 125: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
-{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); }
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
- break;
- case 126: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
-{ yylhsminor.yy272 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); }
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
- break;
- case 127: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */
-{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); }
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
- break;
- case 128: /* alter_table_clause ::= full_table_name DROP TAG column_name */
-{ yylhsminor.yy272 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy272, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy209); }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
- break;
- case 129: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
-{ yylhsminor.yy272 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616); }
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
- break;
- case 130: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
-{ yylhsminor.yy272 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy272, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); }
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
- break;
- case 131: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
-{ yylhsminor.yy272 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy272, &yymsp[-2].minor.yy209, yymsp[0].minor.yy272); }
- yymsp[-5].minor.yy272 = yylhsminor.yy272;
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 99: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 100: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 101: /* db_options ::= db_options STT_TRIGGER NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_STT_TRIGGER, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 102: /* db_options ::= db_options TABLE_PREFIX NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_TABLE_PREFIX, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 103: /* db_options ::= db_options TABLE_SUFFIX NK_INTEGER */
+{ yylhsminor.yy616 = setDatabaseOption(pCxt, yymsp[-2].minor.yy616, DB_OPTION_TABLE_SUFFIX, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 104: /* alter_db_options ::= alter_db_option */
+{ yylhsminor.yy616 = createAlterDatabaseOptions(pCxt); yylhsminor.yy616 = setAlterDatabaseOption(pCxt, yylhsminor.yy616, &yymsp[0].minor.yy669); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 105: /* alter_db_options ::= alter_db_options alter_db_option */
+{ yylhsminor.yy616 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy616, &yymsp[0].minor.yy669); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 106: /* alter_db_option ::= CACHEMODEL NK_STRING */
+{ yymsp[-1].minor.yy669.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy669.val = yymsp[0].minor.yy0; }
+ break;
+ case 107: /* alter_db_option ::= CACHESIZE NK_INTEGER */
+{ yymsp[-1].minor.yy669.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy669.val = yymsp[0].minor.yy0; }
+ break;
+ case 108: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */
+{ yymsp[-1].minor.yy669.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy669.val = yymsp[0].minor.yy0; }
+ break;
+ case 109: /* alter_db_option ::= KEEP integer_list */
+ case 110: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==110);
+{ yymsp[-1].minor.yy669.type = DB_OPTION_KEEP; yymsp[-1].minor.yy669.pList = yymsp[0].minor.yy152; }
+ break;
+ case 111: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */
+{ yymsp[-1].minor.yy669.type = DB_OPTION_WAL; yymsp[-1].minor.yy669.val = yymsp[0].minor.yy0; }
+ break;
+ case 112: /* alter_db_option ::= STT_TRIGGER NK_INTEGER */
+{ yymsp[-1].minor.yy669.type = DB_OPTION_STT_TRIGGER; yymsp[-1].minor.yy669.val = yymsp[0].minor.yy0; }
+ break;
+ case 113: /* integer_list ::= NK_INTEGER */
+{ yylhsminor.yy152 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy152 = yylhsminor.yy152;
+ break;
+ case 114: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */
+ case 293: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==293);
+{ yylhsminor.yy152 = addNodeToList(pCxt, yymsp[-2].minor.yy152, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy152 = yylhsminor.yy152;
+ break;
+ case 115: /* variable_list ::= NK_VARIABLE */
+{ yylhsminor.yy152 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy152 = yylhsminor.yy152;
+ break;
+ case 116: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */
+{ yylhsminor.yy152 = addNodeToList(pCxt, yymsp[-2].minor.yy152, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy152 = yylhsminor.yy152;
+ break;
+ case 117: /* retention_list ::= retention */
+ case 139: /* multi_create_clause ::= create_subtable_clause */ yytestcase(yyruleno==139);
+ case 142: /* multi_drop_clause ::= drop_table_clause */ yytestcase(yyruleno==142);
+ case 149: /* column_def_list ::= column_def */ yytestcase(yyruleno==149);
+ case 192: /* rollup_func_list ::= rollup_func_name */ yytestcase(yyruleno==192);
+ case 197: /* col_name_list ::= col_name */ yytestcase(yyruleno==197);
+ case 247: /* func_list ::= func */ yytestcase(yyruleno==247);
+ case 321: /* literal_list ::= signed_literal */ yytestcase(yyruleno==321);
+ case 386: /* other_para_list ::= star_func_para */ yytestcase(yyruleno==386);
+ case 392: /* when_then_list ::= when_then_expr */ yytestcase(yyruleno==392);
+ case 447: /* select_list ::= select_item */ yytestcase(yyruleno==447);
+ case 458: /* partition_list ::= partition_item */ yytestcase(yyruleno==458);
+ case 510: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==510);
+{ yylhsminor.yy152 = createNodeList(pCxt, yymsp[0].minor.yy616); }
+ yymsp[0].minor.yy152 = yylhsminor.yy152;
+ break;
+ case 118: /* retention_list ::= retention_list NK_COMMA retention */
+ case 150: /* column_def_list ::= column_def_list NK_COMMA column_def */ yytestcase(yyruleno==150);
+ case 193: /* rollup_func_list ::= rollup_func_list NK_COMMA rollup_func_name */ yytestcase(yyruleno==193);
+ case 198: /* col_name_list ::= col_name_list NK_COMMA col_name */ yytestcase(yyruleno==198);
+ case 248: /* func_list ::= func_list NK_COMMA func */ yytestcase(yyruleno==248);
+ case 322: /* literal_list ::= literal_list NK_COMMA signed_literal */ yytestcase(yyruleno==322);
+ case 387: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==387);
+ case 448: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==448);
+ case 459: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==459);
+ case 511: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==511);
+{ yylhsminor.yy152 = addNodeToList(pCxt, yymsp[-2].minor.yy152, yymsp[0].minor.yy616); }
+ yymsp[-2].minor.yy152 = yylhsminor.yy152;
+ break;
+ case 119: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */
+{ yylhsminor.yy616 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 120: /* speed_opt ::= */
+ case 273: /* bufsize_opt ::= */ yytestcase(yyruleno==273);
+{ yymsp[1].minor.yy452 = 0; }
+ break;
+ case 121: /* speed_opt ::= MAX_SPEED NK_INTEGER */
+ case 274: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ yytestcase(yyruleno==274);
+{ yymsp[-1].minor.yy452 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); }
+ break;
+ case 122: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */
+ case 124: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==124);
+{ pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy89, yymsp[-5].minor.yy616, yymsp[-3].minor.yy152, yymsp[-1].minor.yy152, yymsp[0].minor.yy616); }
+ break;
+ case 123: /* cmd ::= CREATE TABLE multi_create_clause */
+{ pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy152); }
+ break;
+ case 125: /* cmd ::= DROP TABLE multi_drop_clause */
+{ pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[0].minor.yy152); }
+ break;
+ case 126: /* cmd ::= DROP STABLE exists_opt full_table_name */
+{ pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-1].minor.yy89, yymsp[0].minor.yy616); }
+ break;
+ case 127: /* cmd ::= ALTER TABLE alter_table_clause */
+ case 295: /* cmd ::= query_or_subquery */ yytestcase(yyruleno==295);
+{ pCxt->pRootNode = yymsp[0].minor.yy616; }
+ break;
+ case 128: /* cmd ::= ALTER STABLE alter_table_clause */
+{ pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy616); }
+ break;
+ case 129: /* alter_table_clause ::= full_table_name alter_table_options */
+{ yylhsminor.yy616 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy616, yymsp[0].minor.yy616); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 130: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name */
+{ yylhsminor.yy616 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy616, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-1].minor.yy673, yymsp[0].minor.yy784); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 131: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */
+{ yylhsminor.yy616 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy616, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy673); }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 132: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */
+{ yylhsminor.yy616 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy616, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy673, yymsp[0].minor.yy784); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 133: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */
+{ yylhsminor.yy616 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy616, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy673, &yymsp[0].minor.yy673); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 134: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */
+{ yylhsminor.yy616 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy616, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy673, yymsp[0].minor.yy784); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 135: /* alter_table_clause ::= full_table_name DROP TAG column_name */
+{ yylhsminor.yy616 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy616, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy673); }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 136: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */
+{ yylhsminor.yy616 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy616, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy673, yymsp[0].minor.yy784); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 137: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */
+{ yylhsminor.yy616 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy616, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy673, &yymsp[0].minor.yy673); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 138: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ signed_literal */
+{ yylhsminor.yy616 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy616, &yymsp[-2].minor.yy673, yymsp[0].minor.yy616); }
+ yymsp[-5].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 140: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
+ case 143: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==143);
+ case 393: /* when_then_list ::= when_then_list when_then_expr */ yytestcase(yyruleno==393);
+{ yylhsminor.yy152 = addNodeToList(pCxt, yymsp[-1].minor.yy152, yymsp[0].minor.yy616); }
+ yymsp[-1].minor.yy152 = yylhsminor.yy152;
break;
- case 133: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
- case 136: /* multi_drop_clause ::= multi_drop_clause drop_table_clause */ yytestcase(yyruleno==136);
-{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-1].minor.yy172, yymsp[0].minor.yy272); }
- yymsp[-1].minor.yy172 = yylhsminor.yy172;
+ case 141: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
+{ yylhsminor.yy616 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy89, yymsp[-8].minor.yy616, yymsp[-6].minor.yy616, yymsp[-5].minor.yy152, yymsp[-2].minor.yy152, yymsp[0].minor.yy616); }
+ yymsp[-9].minor.yy616 = yylhsminor.yy616;
break;
- case 134: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP expression_list NK_RP table_options */
-{ yylhsminor.yy272 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy293, yymsp[-8].minor.yy272, yymsp[-6].minor.yy272, yymsp[-5].minor.yy172, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); }
- yymsp[-9].minor.yy272 = yylhsminor.yy272;
+ case 144: /* drop_table_clause ::= exists_opt full_table_name */
+{ yylhsminor.yy616 = createDropTableClause(pCxt, yymsp[-1].minor.yy89, yymsp[0].minor.yy616); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 137: /* drop_table_clause ::= exists_opt full_table_name */
-{ yylhsminor.yy272 = createDropTableClause(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ case 145: /* specific_cols_opt ::= */
+ case 176: /* tags_def_opt ::= */ yytestcase(yyruleno==176);
+ case 456: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==456);
+ case 478: /* group_by_clause_opt ::= */ yytestcase(yyruleno==478);
+ case 497: /* order_by_clause_opt ::= */ yytestcase(yyruleno==497);
+{ yymsp[1].minor.yy152 = NULL; }
break;
- case 138: /* specific_cols_opt ::= */
- case 169: /* tags_def_opt ::= */ yytestcase(yyruleno==169);
- case 435: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==435);
- case 452: /* group_by_clause_opt ::= */ yytestcase(yyruleno==452);
- case 468: /* order_by_clause_opt ::= */ yytestcase(yyruleno==468);
-{ yymsp[1].minor.yy172 = NULL; }
+ case 146: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */
+{ yymsp[-2].minor.yy152 = yymsp[-1].minor.yy152; }
break;
- case 139: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */
-{ yymsp[-2].minor.yy172 = yymsp[-1].minor.yy172; }
+ case 147: /* full_table_name ::= table_name */
+{ yylhsminor.yy616 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy673, NULL); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
break;
- case 140: /* full_table_name ::= table_name */
-{ yylhsminor.yy272 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy209, NULL); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
+ case 148: /* full_table_name ::= db_name NK_DOT table_name */
+{ yylhsminor.yy616 = createRealTableNode(pCxt, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy673, NULL); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 141: /* full_table_name ::= db_name NK_DOT table_name */
-{ yylhsminor.yy272 = createRealTableNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209, NULL); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 151: /* column_def ::= column_name type_name */
+{ yylhsminor.yy616 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy673, yymsp[0].minor.yy784, NULL); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 144: /* column_def ::= column_name type_name */
-{ yylhsminor.yy272 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy209, yymsp[0].minor.yy616, NULL); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ case 152: /* column_def ::= column_name type_name COMMENT NK_STRING */
+{ yylhsminor.yy616 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy673, yymsp[-2].minor.yy784, &yymsp[0].minor.yy0); }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 145: /* column_def ::= column_name type_name COMMENT NK_STRING */
-{ yylhsminor.yy272 = createColumnDefNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-2].minor.yy616, &yymsp[0].minor.yy0); }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ case 153: /* type_name ::= BOOL */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BOOL); }
break;
- case 146: /* type_name ::= BOOL */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BOOL); }
+ case 154: /* type_name ::= TINYINT */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_TINYINT); }
break;
- case 147: /* type_name ::= TINYINT */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_TINYINT); }
+ case 155: /* type_name ::= SMALLINT */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_SMALLINT); }
break;
- case 148: /* type_name ::= SMALLINT */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_SMALLINT); }
+ case 156: /* type_name ::= INT */
+ case 157: /* type_name ::= INTEGER */ yytestcase(yyruleno==157);
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_INT); }
break;
- case 149: /* type_name ::= INT */
- case 150: /* type_name ::= INTEGER */ yytestcase(yyruleno==150);
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_INT); }
+ case 158: /* type_name ::= BIGINT */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BIGINT); }
break;
- case 151: /* type_name ::= BIGINT */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BIGINT); }
+ case 159: /* type_name ::= FLOAT */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_FLOAT); }
break;
- case 152: /* type_name ::= FLOAT */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_FLOAT); }
+ case 160: /* type_name ::= DOUBLE */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_DOUBLE); }
break;
- case 153: /* type_name ::= DOUBLE */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_DOUBLE); }
+ case 161: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); }
break;
- case 154: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); }
+ case 162: /* type_name ::= TIMESTAMP */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); }
break;
- case 155: /* type_name ::= TIMESTAMP */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); }
+ case 163: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); }
break;
- case 156: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); }
+ case 164: /* type_name ::= TINYINT UNSIGNED */
+{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UTINYINT); }
break;
- case 157: /* type_name ::= TINYINT UNSIGNED */
-{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UTINYINT); }
+ case 165: /* type_name ::= SMALLINT UNSIGNED */
+{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_USMALLINT); }
break;
- case 158: /* type_name ::= SMALLINT UNSIGNED */
-{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_USMALLINT); }
+ case 166: /* type_name ::= INT UNSIGNED */
+{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UINT); }
break;
- case 159: /* type_name ::= INT UNSIGNED */
-{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UINT); }
+ case 167: /* type_name ::= BIGINT UNSIGNED */
+{ yymsp[-1].minor.yy784 = createDataType(TSDB_DATA_TYPE_UBIGINT); }
break;
- case 160: /* type_name ::= BIGINT UNSIGNED */
-{ yymsp[-1].minor.yy616 = createDataType(TSDB_DATA_TYPE_UBIGINT); }
+ case 168: /* type_name ::= JSON */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_JSON); }
break;
- case 161: /* type_name ::= JSON */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_JSON); }
+ case 169: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); }
break;
- case 162: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); }
+ case 170: /* type_name ::= MEDIUMBLOB */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); }
break;
- case 163: /* type_name ::= MEDIUMBLOB */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); }
+ case 171: /* type_name ::= BLOB */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_BLOB); }
break;
- case 164: /* type_name ::= BLOB */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_BLOB); }
+ case 172: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy784 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); }
break;
- case 165: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy616 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); }
+ case 173: /* type_name ::= DECIMAL */
+{ yymsp[0].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 166: /* type_name ::= DECIMAL */
-{ yymsp[0].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 174: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
+{ yymsp[-3].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 167: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */
-{ yymsp[-3].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 175: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
+{ yymsp[-5].minor.yy784 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
break;
- case 168: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */
-{ yymsp[-5].minor.yy616 = createDataType(TSDB_DATA_TYPE_DECIMAL); }
+ case 177: /* tags_def_opt ::= tags_def */
+ case 385: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==385);
+{ yylhsminor.yy152 = yymsp[0].minor.yy152; }
+ yymsp[0].minor.yy152 = yylhsminor.yy152;
break;
- case 170: /* tags_def_opt ::= tags_def */
- case 371: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==371);
-{ yylhsminor.yy172 = yymsp[0].minor.yy172; }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 178: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */
+{ yymsp[-3].minor.yy152 = yymsp[-1].minor.yy152; }
break;
- case 171: /* tags_def ::= TAGS NK_LP column_def_list NK_RP */
-{ yymsp[-3].minor.yy172 = yymsp[-1].minor.yy172; }
+ case 179: /* table_options ::= */
+{ yymsp[1].minor.yy616 = createDefaultTableOptions(pCxt); }
break;
- case 172: /* table_options ::= */
-{ yymsp[1].minor.yy272 = createDefaultTableOptions(pCxt); }
+ case 180: /* table_options ::= table_options COMMENT NK_STRING */
+{ yylhsminor.yy616 = setTableOption(pCxt, yymsp[-2].minor.yy616, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 173: /* table_options ::= table_options COMMENT NK_STRING */
-{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 181: /* table_options ::= table_options MAX_DELAY duration_list */
+{ yylhsminor.yy616 = setTableOption(pCxt, yymsp[-2].minor.yy616, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy152); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 174: /* table_options ::= table_options MAX_DELAY duration_list */
-{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy172); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 182: /* table_options ::= table_options WATERMARK duration_list */
+{ yylhsminor.yy616 = setTableOption(pCxt, yymsp[-2].minor.yy616, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy152); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 175: /* table_options ::= table_options WATERMARK duration_list */
-{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy172); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 183: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
+{ yylhsminor.yy616 = setTableOption(pCxt, yymsp[-4].minor.yy616, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy152); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
break;
- case 176: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */
-{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-4].minor.yy272, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy172); }
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
+ case 184: /* table_options ::= table_options TTL NK_INTEGER */
+{ yylhsminor.yy616 = setTableOption(pCxt, yymsp[-2].minor.yy616, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 177: /* table_options ::= table_options TTL NK_INTEGER */
-{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-2].minor.yy272, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 185: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */
+{ yylhsminor.yy616 = setTableOption(pCxt, yymsp[-4].minor.yy616, TABLE_OPTION_SMA, yymsp[-1].minor.yy152); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
break;
- case 178: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */
-{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-4].minor.yy272, TABLE_OPTION_SMA, yymsp[-1].minor.yy172); }
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
+ case 186: /* alter_table_options ::= alter_table_option */
+{ yylhsminor.yy616 = createAlterTableOptions(pCxt); yylhsminor.yy616 = setTableOption(pCxt, yylhsminor.yy616, yymsp[0].minor.yy669.type, &yymsp[0].minor.yy669.val); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
break;
- case 179: /* alter_table_options ::= alter_table_option */
-{ yylhsminor.yy272 = createAlterTableOptions(pCxt); yylhsminor.yy272 = setTableOption(pCxt, yylhsminor.yy272, yymsp[0].minor.yy5.type, &yymsp[0].minor.yy5.val); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
+ case 187: /* alter_table_options ::= alter_table_options alter_table_option */
+{ yylhsminor.yy616 = setTableOption(pCxt, yymsp[-1].minor.yy616, yymsp[0].minor.yy669.type, &yymsp[0].minor.yy669.val); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 180: /* alter_table_options ::= alter_table_options alter_table_option */
-{ yylhsminor.yy272 = setTableOption(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy5.type, &yymsp[0].minor.yy5.val); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ case 188: /* alter_table_option ::= COMMENT NK_STRING */
+{ yymsp[-1].minor.yy669.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy669.val = yymsp[0].minor.yy0; }
break;
- case 181: /* alter_table_option ::= COMMENT NK_STRING */
-{ yymsp[-1].minor.yy5.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
+ case 189: /* alter_table_option ::= TTL NK_INTEGER */
+{ yymsp[-1].minor.yy669.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy669.val = yymsp[0].minor.yy0; }
break;
- case 182: /* alter_table_option ::= TTL NK_INTEGER */
-{ yymsp[-1].minor.yy5.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy5.val = yymsp[0].minor.yy0; }
+ case 190: /* duration_list ::= duration_literal */
+ case 351: /* expression_list ::= expr_or_subquery */ yytestcase(yyruleno==351);
+{ yylhsminor.yy152 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy616)); }
+ yymsp[0].minor.yy152 = yylhsminor.yy152;
break;
- case 183: /* duration_list ::= duration_literal */
- case 338: /* expression_list ::= expression */ yytestcase(yyruleno==338);
-{ yylhsminor.yy172 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 191: /* duration_list ::= duration_list NK_COMMA duration_literal */
+ case 352: /* expression_list ::= expression_list NK_COMMA expr_or_subquery */ yytestcase(yyruleno==352);
+{ yylhsminor.yy152 = addNodeToList(pCxt, yymsp[-2].minor.yy152, releaseRawExprNode(pCxt, yymsp[0].minor.yy616)); }
+ yymsp[-2].minor.yy152 = yylhsminor.yy152;
break;
- case 184: /* duration_list ::= duration_list NK_COMMA duration_literal */
- case 339: /* expression_list ::= expression_list NK_COMMA expression */ yytestcase(yyruleno==339);
-{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, releaseRawExprNode(pCxt, yymsp[0].minor.yy272)); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 194: /* rollup_func_name ::= function_name */
+{ yylhsminor.yy616 = createFunctionNode(pCxt, &yymsp[0].minor.yy673, NULL); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
break;
- case 187: /* rollup_func_name ::= function_name */
-{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[0].minor.yy209, NULL); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
+ case 195: /* rollup_func_name ::= FIRST */
+ case 196: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==196);
+{ yylhsminor.yy616 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
break;
- case 188: /* rollup_func_name ::= FIRST */
- case 189: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==189);
-{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
+ case 199: /* col_name ::= column_name */
+{ yylhsminor.yy616 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy673); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
break;
- case 192: /* col_name ::= column_name */
-{ yylhsminor.yy272 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy209); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 193: /* cmd ::= SHOW DNODES */
+ case 200: /* cmd ::= SHOW DNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT); }
break;
- case 194: /* cmd ::= SHOW USERS */
+ case 201: /* cmd ::= SHOW USERS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT); }
break;
- case 195: /* cmd ::= SHOW DATABASES */
+ case 202: /* cmd ::= SHOW DATABASES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT); }
break;
- case 196: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, OP_TYPE_LIKE); }
+ case 203: /* cmd ::= SHOW db_name_cond_opt TABLES like_pattern_opt */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TABLES_STMT, yymsp[-2].minor.yy616, yymsp[0].minor.yy616, OP_TYPE_LIKE); }
break;
- case 197: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, OP_TYPE_LIKE); }
+ case 204: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy616, yymsp[0].minor.yy616, OP_TYPE_LIKE); }
break;
- case 198: /* cmd ::= SHOW db_name_cond_opt VGROUPS */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy272, NULL, OP_TYPE_LIKE); }
+ case 205: /* cmd ::= SHOW db_name_cond_opt VGROUPS */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy616, NULL, OP_TYPE_LIKE); }
break;
- case 199: /* cmd ::= SHOW MNODES */
+ case 206: /* cmd ::= SHOW MNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT); }
break;
- case 200: /* cmd ::= SHOW MODULES */
+ case 207: /* cmd ::= SHOW MODULES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MODULES_STMT); }
break;
- case 201: /* cmd ::= SHOW QNODES */
+ case 208: /* cmd ::= SHOW QNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT); }
break;
- case 202: /* cmd ::= SHOW FUNCTIONS */
+ case 209: /* cmd ::= SHOW FUNCTIONS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT); }
break;
- case 203: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy272, yymsp[-1].minor.yy272, OP_TYPE_EQUAL); }
+ case 210: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy616, yymsp[-1].minor.yy616, OP_TYPE_EQUAL); }
break;
- case 204: /* cmd ::= SHOW STREAMS */
+ case 211: /* cmd ::= SHOW STREAMS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT); }
break;
- case 205: /* cmd ::= SHOW ACCOUNTS */
+ case 212: /* cmd ::= SHOW ACCOUNTS */
{ pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); }
break;
- case 206: /* cmd ::= SHOW APPS */
+ case 213: /* cmd ::= SHOW APPS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_APPS_STMT); }
break;
- case 207: /* cmd ::= SHOW CONNECTIONS */
+ case 214: /* cmd ::= SHOW CONNECTIONS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONNECTIONS_STMT); }
break;
- case 208: /* cmd ::= SHOW LICENCES */
- case 209: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==209);
+ case 215: /* cmd ::= SHOW LICENCES */
+ case 216: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==216);
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCES_STMT); }
break;
- case 210: /* cmd ::= SHOW CREATE DATABASE db_name */
-{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy209); }
+ case 217: /* cmd ::= SHOW CREATE DATABASE db_name */
+{ pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy673); }
break;
- case 211: /* cmd ::= SHOW CREATE TABLE full_table_name */
-{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy272); }
+ case 218: /* cmd ::= SHOW CREATE TABLE full_table_name */
+{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy616); }
break;
- case 212: /* cmd ::= SHOW CREATE STABLE full_table_name */
-{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy272); }
+ case 219: /* cmd ::= SHOW CREATE STABLE full_table_name */
+{ pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy616); }
break;
- case 213: /* cmd ::= SHOW QUERIES */
+ case 220: /* cmd ::= SHOW QUERIES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT); }
break;
- case 214: /* cmd ::= SHOW SCORES */
+ case 221: /* cmd ::= SHOW SCORES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SCORES_STMT); }
break;
- case 215: /* cmd ::= SHOW TOPICS */
+ case 222: /* cmd ::= SHOW TOPICS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TOPICS_STMT); }
break;
- case 216: /* cmd ::= SHOW VARIABLES */
+ case 223: /* cmd ::= SHOW VARIABLES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLES_STMT); }
break;
- case 217: /* cmd ::= SHOW LOCAL VARIABLES */
+ case 224: /* cmd ::= SHOW LOCAL VARIABLES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT); }
break;
- case 218: /* cmd ::= SHOW DNODE NK_INTEGER VARIABLES */
+ case 225: /* cmd ::= SHOW DNODE NK_INTEGER VARIABLES */
{ pCxt->pRootNode = createShowDnodeVariablesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[-1].minor.yy0)); }
break;
- case 219: /* cmd ::= SHOW BNODES */
+ case 226: /* cmd ::= SHOW BNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT); }
break;
- case 220: /* cmd ::= SHOW SNODES */
+ case 227: /* cmd ::= SHOW SNODES */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT); }
break;
- case 221: /* cmd ::= SHOW CLUSTER */
+ case 228: /* cmd ::= SHOW CLUSTER */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT); }
break;
- case 222: /* cmd ::= SHOW TRANSACTIONS */
+ case 229: /* cmd ::= SHOW TRANSACTIONS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT); }
break;
- case 223: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
-{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy272); }
+ case 230: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */
+{ pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy616); }
break;
- case 224: /* cmd ::= SHOW CONSUMERS */
+ case 231: /* cmd ::= SHOW CONSUMERS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); }
break;
- case 225: /* cmd ::= SHOW SUBSCRIPTIONS */
+ case 232: /* cmd ::= SHOW SUBSCRIPTIONS */
{ pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); }
break;
- case 226: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
-{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy272, yymsp[-1].minor.yy272, OP_TYPE_EQUAL); }
+ case 233: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */
+{ pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy616, yymsp[-1].minor.yy616, OP_TYPE_EQUAL); }
break;
- case 227: /* db_name_cond_opt ::= */
- case 232: /* from_db_opt ::= */ yytestcase(yyruleno==232);
-{ yymsp[1].minor.yy272 = createDefaultDatabaseCondValue(pCxt); }
+ case 234: /* cmd ::= SHOW VNODES NK_INTEGER */
+{ pCxt->pRootNode = createShowVnodesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0), NULL); }
break;
- case 228: /* db_name_cond_opt ::= db_name NK_DOT */
-{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy209); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ case 235: /* cmd ::= SHOW VNODES NK_STRING */
+{ pCxt->pRootNode = createShowVnodesStmt(pCxt, NULL, createValueNode(pCxt, TSDB_DATA_TYPE_VARCHAR, &yymsp[0].minor.yy0)); }
break;
- case 229: /* like_pattern_opt ::= */
- case 404: /* from_clause_opt ::= */ yytestcase(yyruleno==404);
- case 433: /* where_clause_opt ::= */ yytestcase(yyruleno==433);
- case 437: /* twindow_clause_opt ::= */ yytestcase(yyruleno==437);
- case 442: /* sliding_opt ::= */ yytestcase(yyruleno==442);
- case 444: /* fill_opt ::= */ yytestcase(yyruleno==444);
- case 456: /* having_clause_opt ::= */ yytestcase(yyruleno==456);
- case 458: /* range_opt ::= */ yytestcase(yyruleno==458);
- case 460: /* every_opt ::= */ yytestcase(yyruleno==460);
- case 470: /* slimit_clause_opt ::= */ yytestcase(yyruleno==470);
- case 474: /* limit_clause_opt ::= */ yytestcase(yyruleno==474);
-{ yymsp[1].minor.yy272 = NULL; }
+ case 236: /* db_name_cond_opt ::= */
+ case 241: /* from_db_opt ::= */ yytestcase(yyruleno==241);
+{ yymsp[1].minor.yy616 = createDefaultDatabaseCondValue(pCxt); }
break;
- case 230: /* like_pattern_opt ::= LIKE NK_STRING */
-{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
+ case 237: /* db_name_cond_opt ::= db_name NK_DOT */
+{ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy673); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 231: /* table_name_cond ::= table_name */
-{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy209); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
+ case 238: /* like_pattern_opt ::= */
+ case 283: /* subtable_opt ::= */ yytestcase(yyruleno==283);
+ case 395: /* case_when_else_opt ::= */ yytestcase(yyruleno==395);
+ case 425: /* from_clause_opt ::= */ yytestcase(yyruleno==425);
+ case 454: /* where_clause_opt ::= */ yytestcase(yyruleno==454);
+ case 463: /* twindow_clause_opt ::= */ yytestcase(yyruleno==463);
+ case 468: /* sliding_opt ::= */ yytestcase(yyruleno==468);
+ case 470: /* fill_opt ::= */ yytestcase(yyruleno==470);
+ case 482: /* having_clause_opt ::= */ yytestcase(yyruleno==482);
+ case 484: /* range_opt ::= */ yytestcase(yyruleno==484);
+ case 486: /* every_opt ::= */ yytestcase(yyruleno==486);
+ case 499: /* slimit_clause_opt ::= */ yytestcase(yyruleno==499);
+ case 503: /* limit_clause_opt ::= */ yytestcase(yyruleno==503);
+{ yymsp[1].minor.yy616 = NULL; }
break;
- case 233: /* from_db_opt ::= FROM db_name */
-{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy209); }
+ case 239: /* like_pattern_opt ::= LIKE NK_STRING */
+{ yymsp[-1].minor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
break;
- case 234: /* cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
-{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy293, yymsp[-3].minor.yy272, yymsp[-1].minor.yy272, NULL, yymsp[0].minor.yy272); }
+ case 240: /* table_name_cond ::= table_name */
+{ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy673); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
break;
- case 235: /* cmd ::= DROP INDEX exists_opt full_table_name */
-{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy293, yymsp[0].minor.yy272); }
+ case 242: /* from_db_opt ::= FROM db_name */
+{ yymsp[-1].minor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy673); }
break;
- case 236: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
-{ yymsp[-9].minor.yy272 = createIndexOption(pCxt, yymsp[-7].minor.yy172, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
+ case 243: /* cmd ::= CREATE SMA INDEX not_exists_opt full_table_name ON full_table_name index_options */
+{ pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy89, yymsp[-3].minor.yy616, yymsp[-1].minor.yy616, NULL, yymsp[0].minor.yy616); }
break;
- case 237: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
-{ yymsp[-11].minor.yy272 = createIndexOption(pCxt, yymsp[-9].minor.yy172, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
+ case 244: /* cmd ::= DROP INDEX exists_opt full_table_name */
+{ pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy89, yymsp[0].minor.yy616); }
break;
- case 240: /* func ::= function_name NK_LP expression_list NK_RP */
-{ yylhsminor.yy272 = createFunctionNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-1].minor.yy172); }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ case 245: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */
+{ yymsp[-9].minor.yy616 = createIndexOption(pCxt, yymsp[-7].minor.yy152, releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), NULL, yymsp[-1].minor.yy616, yymsp[0].minor.yy616); }
break;
- case 241: /* sma_stream_opt ::= */
- case 268: /* stream_options ::= */ yytestcase(yyruleno==268);
-{ yymsp[1].minor.yy272 = createStreamOptions(pCxt); }
+ case 246: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */
+{ yymsp[-11].minor.yy616 = createIndexOption(pCxt, yymsp[-9].minor.yy152, releaseRawExprNode(pCxt, yymsp[-5].minor.yy616), releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), yymsp[-1].minor.yy616, yymsp[0].minor.yy616); }
break;
- case 242: /* sma_stream_opt ::= stream_options WATERMARK duration_literal */
- case 272: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==272);
-{ ((SStreamOptions*)yymsp[-2].minor.yy272)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-2].minor.yy272; }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 249: /* func ::= function_name NK_LP expression_list NK_RP */
+{ yylhsminor.yy616 = createFunctionNode(pCxt, &yymsp[-3].minor.yy673, yymsp[-1].minor.yy152); }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 243: /* sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
-{ ((SStreamOptions*)yymsp[-2].minor.yy272)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-2].minor.yy272; }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 250: /* sma_stream_opt ::= */
+ case 277: /* stream_options ::= */ yytestcase(yyruleno==277);
+{ yymsp[1].minor.yy616 = createStreamOptions(pCxt); }
break;
- case 244: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_expression */
-{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy293, &yymsp[-2].minor.yy209, yymsp[0].minor.yy272); }
+ case 251: /* sma_stream_opt ::= stream_options WATERMARK duration_literal */
+ case 281: /* stream_options ::= stream_options WATERMARK duration_literal */ yytestcase(yyruleno==281);
+{ ((SStreamOptions*)yymsp[-2].minor.yy616)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy616); yylhsminor.yy616 = yymsp[-2].minor.yy616; }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 245: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy293, &yymsp[-3].minor.yy209, &yymsp[0].minor.yy209, false); }
+ case 252: /* sma_stream_opt ::= stream_options MAX_DELAY duration_literal */
+{ ((SStreamOptions*)yymsp[-2].minor.yy616)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy616); yylhsminor.yy616 = yymsp[-2].minor.yy616; }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 246: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, &yymsp[0].minor.yy209, true); }
+ case 253: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */
+{ pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy89, &yymsp[-2].minor.yy673, yymsp[0].minor.yy616); }
break;
- case 247: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy293, &yymsp[-3].minor.yy209, yymsp[0].minor.yy272, false); }
+ case 254: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
+{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy89, &yymsp[-3].minor.yy673, &yymsp[0].minor.yy673, false); }
break;
- case 248: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, yymsp[0].minor.yy272, true); }
+ case 255: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
+{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy89, &yymsp[-5].minor.yy673, &yymsp[0].minor.yy673, true); }
break;
- case 249: /* cmd ::= DROP TOPIC exists_opt topic_name */
-{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); }
+ case 256: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
+{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy89, &yymsp[-3].minor.yy673, yymsp[0].minor.yy616, false); }
break;
- case 250: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
-{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy293, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209); }
+ case 257: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
+{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy89, &yymsp[-5].minor.yy673, yymsp[0].minor.yy616, true); }
break;
- case 251: /* cmd ::= DESC full_table_name */
- case 252: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==252);
-{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy272); }
+ case 258: /* cmd ::= DROP TOPIC exists_opt topic_name */
+{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy89, &yymsp[0].minor.yy673); }
break;
- case 253: /* cmd ::= RESET QUERY CACHE */
-{ pCxt->pRootNode = createResetQueryCacheStmt(pCxt); }
+ case 259: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
+{ pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy89, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy673); }
+ break;
+ case 260: /* cmd ::= DESC full_table_name */
+ case 261: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==261);
+{ pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy616); }
break;
- case 254: /* cmd ::= EXPLAIN analyze_opt explain_options query_expression */
-{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy293, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
+ case 262: /* cmd ::= RESET QUERY CACHE */
+{ pCxt->pRootNode = createResetQueryCacheStmt(pCxt); }
break;
- case 256: /* analyze_opt ::= ANALYZE */
- case 263: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==263);
- case 424: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==424);
-{ yymsp[0].minor.yy293 = true; }
+ case 263: /* cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */
+{ pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy89, yymsp[-1].minor.yy616, yymsp[0].minor.yy616); }
break;
- case 257: /* explain_options ::= */
-{ yymsp[1].minor.yy272 = createDefaultExplainOptions(pCxt); }
+ case 265: /* analyze_opt ::= ANALYZE */
+ case 272: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==272);
+ case 445: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==445);
+{ yymsp[0].minor.yy89 = true; }
break;
- case 258: /* explain_options ::= explain_options VERBOSE NK_BOOL */
-{ yylhsminor.yy272 = setExplainVerbose(pCxt, yymsp[-2].minor.yy272, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 266: /* explain_options ::= */
+{ yymsp[1].minor.yy616 = createDefaultExplainOptions(pCxt); }
break;
- case 259: /* explain_options ::= explain_options RATIO NK_FLOAT */
-{ yylhsminor.yy272 = setExplainRatio(pCxt, yymsp[-2].minor.yy272, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 267: /* explain_options ::= explain_options VERBOSE NK_BOOL */
+{ yylhsminor.yy616 = setExplainVerbose(pCxt, yymsp[-2].minor.yy616, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 260: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
-{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy293, yymsp[-8].minor.yy293, &yymsp[-5].minor.yy209, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy616, yymsp[0].minor.yy232); }
+ case 268: /* explain_options ::= explain_options RATIO NK_FLOAT */
+{ yylhsminor.yy616 = setExplainRatio(pCxt, yymsp[-2].minor.yy616, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 261: /* cmd ::= DROP FUNCTION exists_opt function_name */
-{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); }
+ case 269: /* cmd ::= CREATE agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt */
+{ pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-6].minor.yy89, yymsp[-8].minor.yy89, &yymsp[-5].minor.yy673, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy784, yymsp[0].minor.yy452); }
break;
- case 264: /* bufsize_opt ::= */
-{ yymsp[1].minor.yy232 = 0; }
+ case 270: /* cmd ::= DROP FUNCTION exists_opt function_name */
+{ pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy89, &yymsp[0].minor.yy673); }
break;
- case 265: /* bufsize_opt ::= BUFSIZE NK_INTEGER */
-{ yymsp[-1].minor.yy232 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); }
+ case 275: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name tags_def_opt subtable_opt AS query_or_subquery */
+{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-8].minor.yy89, &yymsp[-7].minor.yy673, yymsp[-4].minor.yy616, yymsp[-6].minor.yy616, yymsp[-3].minor.yy152, yymsp[-2].minor.yy616, yymsp[0].minor.yy616); }
break;
- case 266: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name AS query_expression */
-{ pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-6].minor.yy293, &yymsp[-5].minor.yy209, yymsp[-2].minor.yy272, yymsp[-4].minor.yy272, yymsp[0].minor.yy272); }
+ case 276: /* cmd ::= DROP STREAM exists_opt stream_name */
+{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy89, &yymsp[0].minor.yy673); }
break;
- case 267: /* cmd ::= DROP STREAM exists_opt stream_name */
-{ pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy293, &yymsp[0].minor.yy209); }
+ case 278: /* stream_options ::= stream_options TRIGGER AT_ONCE */
+{ ((SStreamOptions*)yymsp[-2].minor.yy616)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy616 = yymsp[-2].minor.yy616; }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 269: /* stream_options ::= stream_options TRIGGER AT_ONCE */
-{ ((SStreamOptions*)yymsp[-2].minor.yy272)->triggerType = STREAM_TRIGGER_AT_ONCE; yylhsminor.yy272 = yymsp[-2].minor.yy272; }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 279: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
+{ ((SStreamOptions*)yymsp[-2].minor.yy616)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy616 = yymsp[-2].minor.yy616; }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 270: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
-{ ((SStreamOptions*)yymsp[-2].minor.yy272)->triggerType = STREAM_TRIGGER_WINDOW_CLOSE; yylhsminor.yy272 = yymsp[-2].minor.yy272; }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 280: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
+{ ((SStreamOptions*)yymsp[-3].minor.yy616)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy616)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy616); yylhsminor.yy616 = yymsp[-3].minor.yy616; }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 271: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
-{ ((SStreamOptions*)yymsp[-3].minor.yy272)->triggerType = STREAM_TRIGGER_MAX_DELAY; ((SStreamOptions*)yymsp[-3].minor.yy272)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); yylhsminor.yy272 = yymsp[-3].minor.yy272; }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ case 282: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
+{ ((SStreamOptions*)yymsp[-3].minor.yy616)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy616 = yymsp[-3].minor.yy616; }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 273: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
-{ ((SStreamOptions*)yymsp[-3].minor.yy272)->ignoreExpired = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); yylhsminor.yy272 = yymsp[-3].minor.yy272; }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ case 284: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */
+ case 469: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ yytestcase(yyruleno==469);
+ case 487: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==487);
+{ yymsp[-3].minor.yy616 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy616); }
break;
- case 274: /* cmd ::= KILL CONNECTION NK_INTEGER */
+ case 285: /* cmd ::= KILL CONNECTION NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); }
break;
- case 275: /* cmd ::= KILL QUERY NK_STRING */
+ case 286: /* cmd ::= KILL QUERY NK_STRING */
{ pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); }
break;
- case 276: /* cmd ::= KILL TRANSACTION NK_INTEGER */
+ case 287: /* cmd ::= KILL TRANSACTION NK_INTEGER */
{ pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); }
break;
- case 277: /* cmd ::= BALANCE VGROUP */
+ case 288: /* cmd ::= BALANCE VGROUP */
{ pCxt->pRootNode = createBalanceVgroupStmt(pCxt); }
break;
- case 278: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
+ case 289: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
{ pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 279: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
-{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy172); }
+ case 290: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
+{ pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy152); }
break;
- case 280: /* cmd ::= SPLIT VGROUP NK_INTEGER */
+ case 291: /* cmd ::= SPLIT VGROUP NK_INTEGER */
{ pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); }
break;
- case 281: /* dnode_list ::= DNODE NK_INTEGER */
-{ yymsp[-1].minor.yy172 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
- break;
- case 283: /* cmd ::= DELETE FROM full_table_name where_clause_opt */
-{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
- break;
- case 285: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_expression */
-{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy272, yymsp[-2].minor.yy172, yymsp[0].minor.yy272); }
- break;
- case 286: /* cmd ::= INSERT INTO full_table_name query_expression */
-{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy272, NULL, yymsp[0].minor.yy272); }
- break;
- case 287: /* literal ::= NK_INTEGER */
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 288: /* literal ::= NK_FLOAT */
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 289: /* literal ::= NK_STRING */
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 290: /* literal ::= NK_BOOL */
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 291: /* literal ::= TIMESTAMP NK_STRING */
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
- break;
- case 292: /* literal ::= duration_literal */
- case 302: /* signed_literal ::= signed */ yytestcase(yyruleno==302);
- case 322: /* expression ::= literal */ yytestcase(yyruleno==322);
- case 323: /* expression ::= pseudo_column */ yytestcase(yyruleno==323);
- case 324: /* expression ::= column_reference */ yytestcase(yyruleno==324);
- case 325: /* expression ::= function_expression */ yytestcase(yyruleno==325);
- case 326: /* expression ::= subquery */ yytestcase(yyruleno==326);
- case 354: /* function_expression ::= literal_func */ yytestcase(yyruleno==354);
- case 396: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==396);
- case 400: /* boolean_primary ::= predicate */ yytestcase(yyruleno==400);
- case 402: /* common_expression ::= expression */ yytestcase(yyruleno==402);
- case 403: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==403);
- case 406: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==406);
- case 408: /* table_reference ::= table_primary */ yytestcase(yyruleno==408);
- case 409: /* table_reference ::= joined_table */ yytestcase(yyruleno==409);
- case 413: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==413);
- case 463: /* query_expression_body ::= query_primary */ yytestcase(yyruleno==463);
- case 466: /* query_primary ::= query_specification */ yytestcase(yyruleno==466);
-{ yylhsminor.yy272 = yymsp[0].minor.yy272; }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 293: /* literal ::= NULL */
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 294: /* literal ::= NK_QUESTION */
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 295: /* duration_literal ::= NK_VARIABLE */
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 296: /* signed ::= NK_INTEGER */
-{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 297: /* signed ::= NK_PLUS NK_INTEGER */
-{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
- break;
- case 298: /* signed ::= NK_MINUS NK_INTEGER */
+ case 292: /* dnode_list ::= DNODE NK_INTEGER */
+{ yymsp[-1].minor.yy152 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); }
+ break;
+ case 294: /* cmd ::= DELETE FROM full_table_name where_clause_opt */
+{ pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy616, yymsp[0].minor.yy616); }
+ break;
+ case 296: /* cmd ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */
+{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-4].minor.yy616, yymsp[-2].minor.yy152, yymsp[0].minor.yy616); }
+ break;
+ case 297: /* cmd ::= INSERT INTO full_table_name query_or_subquery */
+{ pCxt->pRootNode = createInsertStmt(pCxt, yymsp[-1].minor.yy616, NULL, yymsp[0].minor.yy616); }
+ break;
+ case 298: /* literal ::= NK_INTEGER */
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 299: /* literal ::= NK_FLOAT */
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 300: /* literal ::= NK_STRING */
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 301: /* literal ::= NK_BOOL */
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 302: /* literal ::= TIMESTAMP NK_STRING */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 303: /* literal ::= duration_literal */
+ case 313: /* signed_literal ::= signed */ yytestcase(yyruleno==313);
+ case 333: /* expr_or_subquery ::= expression */ yytestcase(yyruleno==333);
+ case 334: /* expr_or_subquery ::= subquery */ yytestcase(yyruleno==334);
+ case 335: /* expression ::= literal */ yytestcase(yyruleno==335);
+ case 336: /* expression ::= pseudo_column */ yytestcase(yyruleno==336);
+ case 337: /* expression ::= column_reference */ yytestcase(yyruleno==337);
+ case 338: /* expression ::= function_expression */ yytestcase(yyruleno==338);
+ case 339: /* expression ::= case_when_expression */ yytestcase(yyruleno==339);
+ case 368: /* function_expression ::= literal_func */ yytestcase(yyruleno==368);
+ case 417: /* boolean_value_expression ::= boolean_primary */ yytestcase(yyruleno==417);
+ case 421: /* boolean_primary ::= predicate */ yytestcase(yyruleno==421);
+ case 423: /* common_expression ::= expr_or_subquery */ yytestcase(yyruleno==423);
+ case 424: /* common_expression ::= boolean_value_expression */ yytestcase(yyruleno==424);
+ case 427: /* table_reference_list ::= table_reference */ yytestcase(yyruleno==427);
+ case 429: /* table_reference ::= table_primary */ yytestcase(yyruleno==429);
+ case 430: /* table_reference ::= joined_table */ yytestcase(yyruleno==430);
+ case 434: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==434);
+ case 489: /* query_simple ::= query_specification */ yytestcase(yyruleno==489);
+ case 490: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==490);
+ case 493: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==493);
+ case 495: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==495);
+{ yylhsminor.yy616 = yymsp[0].minor.yy616; }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 304: /* literal ::= NULL */
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 305: /* literal ::= NK_QUESTION */
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 306: /* duration_literal ::= NK_VARIABLE */
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 307: /* signed ::= NK_INTEGER */
+{ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 308: /* signed ::= NK_PLUS NK_INTEGER */
+{ yymsp[-1].minor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); }
+ break;
+ case 309: /* signed ::= NK_MINUS NK_INTEGER */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t);
+ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t);
}
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 299: /* signed ::= NK_FLOAT */
-{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
+ case 310: /* signed ::= NK_FLOAT */
+{ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
break;
- case 300: /* signed ::= NK_PLUS NK_FLOAT */
-{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
+ case 311: /* signed ::= NK_PLUS NK_FLOAT */
+{ yymsp[-1].minor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); }
break;
- case 301: /* signed ::= NK_MINUS NK_FLOAT */
+ case 312: /* signed ::= NK_MINUS NK_FLOAT */
{
SToken t = yymsp[-1].minor.yy0;
t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z;
- yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t);
+ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t);
}
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
- break;
- case 303: /* signed_literal ::= NK_STRING */
-{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 304: /* signed_literal ::= NK_BOOL */
-{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 305: /* signed_literal ::= TIMESTAMP NK_STRING */
-{ yymsp[-1].minor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); }
- break;
- case 306: /* signed_literal ::= duration_literal */
- case 308: /* signed_literal ::= literal_func */ yytestcase(yyruleno==308);
- case 374: /* star_func_para ::= expression */ yytestcase(yyruleno==374);
- case 429: /* select_item ::= common_expression */ yytestcase(yyruleno==429);
- case 479: /* search_condition ::= common_expression */ yytestcase(yyruleno==479);
-{ yylhsminor.yy272 = releaseRawExprNode(pCxt, yymsp[0].minor.yy272); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 307: /* signed_literal ::= NULL */
-{ yylhsminor.yy272 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 309: /* signed_literal ::= NK_QUESTION */
-{ yylhsminor.yy272 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 327: /* expression ::= NK_LP expression NK_RP */
- case 401: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==401);
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 328: /* expression ::= NK_PLUS expression */
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 314: /* signed_literal ::= NK_STRING */
+{ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 315: /* signed_literal ::= NK_BOOL */
+{ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 316: /* signed_literal ::= TIMESTAMP NK_STRING */
+{ yymsp[-1].minor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); }
+ break;
+ case 317: /* signed_literal ::= duration_literal */
+ case 319: /* signed_literal ::= literal_func */ yytestcase(yyruleno==319);
+ case 388: /* star_func_para ::= expr_or_subquery */ yytestcase(yyruleno==388);
+ case 450: /* select_item ::= common_expression */ yytestcase(yyruleno==450);
+ case 460: /* partition_item ::= expr_or_subquery */ yytestcase(yyruleno==460);
+ case 494: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==494);
+ case 496: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==496);
+ case 509: /* search_condition ::= common_expression */ yytestcase(yyruleno==509);
+{ yylhsminor.yy616 = releaseRawExprNode(pCxt, yymsp[0].minor.yy616); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 318: /* signed_literal ::= NULL */
+{ yylhsminor.yy616 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 320: /* signed_literal ::= NK_QUESTION */
+{ yylhsminor.yy616 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 340: /* expression ::= NK_LP expression NK_RP */
+ case 422: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==422);
+ case 508: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==508);
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy616)); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 341: /* expression ::= NK_PLUS expr_or_subquery */
{
- SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy272));
+ SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy616));
}
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 329: /* expression ::= NK_MINUS expression */
+ case 342: /* expression ::= NK_MINUS expr_or_subquery */
{
- SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy272), NULL));
+ SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy616), NULL));
}
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 330: /* expression ::= expression NK_PLUS expression */
+ case 343: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 331: /* expression ::= expression NK_MINUS expression */
+ case 344: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 332: /* expression ::= expression NK_STAR expression */
+ case 345: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 333: /* expression ::= expression NK_SLASH expression */
+ case 346: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 334: /* expression ::= expression NK_REM expression */
+ case 347: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 335: /* expression ::= column_reference NK_ARROW NK_STRING */
+ case 348: /* expression ::= column_reference NK_ARROW NK_STRING */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 336: /* expression ::= expression NK_BITAND expression */
+ case 349: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 337: /* expression ::= expression NK_BITOR expression */
+ case 350: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 340: /* column_reference ::= column_name */
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy209, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy209)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 341: /* column_reference ::= table_name NK_DOT column_name */
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209, createColumnNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy209)); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 342: /* pseudo_column ::= ROWTS */
- case 343: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==343);
- case 345: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==345);
- case 346: /* pseudo_column ::= QEND */ yytestcase(yyruleno==346);
- case 347: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==347);
- case 348: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==348);
- case 349: /* pseudo_column ::= WEND */ yytestcase(yyruleno==349);
- case 350: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==350);
- case 356: /* literal_func ::= NOW */ yytestcase(yyruleno==356);
-{ yylhsminor.yy272 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
- break;
- case 344: /* pseudo_column ::= table_name NK_DOT TBNAME */
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy209)))); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 351: /* function_expression ::= function_name NK_LP expression_list NK_RP */
- case 352: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==352);
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy209, yymsp[-1].minor.yy172)); }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
- break;
- case 353: /* function_expression ::= CAST NK_LP expression AS type_name NK_RP */
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy616)); }
- yymsp[-5].minor.yy272 = yylhsminor.yy272;
- break;
- case 355: /* literal_func ::= noarg_func NK_LP NK_RP */
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy209, NULL)); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 370: /* star_func_para_list ::= NK_STAR */
-{ yylhsminor.yy172 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
- break;
- case 375: /* star_func_para ::= table_name NK_DOT NK_STAR */
- case 432: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==432);
-{ yylhsminor.yy272 = createColumnNode(pCxt, &yymsp[-2].minor.yy209, &yymsp[0].minor.yy0); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 376: /* predicate ::= expression compare_op expression */
- case 381: /* predicate ::= expression in_op in_predicate_value */ yytestcase(yyruleno==381);
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 353: /* column_reference ::= column_name */
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy673, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy673)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 354: /* column_reference ::= table_name NK_DOT column_name */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy673, createColumnNode(pCxt, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy673)); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 355: /* pseudo_column ::= ROWTS */
+ case 356: /* pseudo_column ::= TBNAME */ yytestcase(yyruleno==356);
+ case 358: /* pseudo_column ::= QSTART */ yytestcase(yyruleno==358);
+ case 359: /* pseudo_column ::= QEND */ yytestcase(yyruleno==359);
+ case 360: /* pseudo_column ::= QDURATION */ yytestcase(yyruleno==360);
+ case 361: /* pseudo_column ::= WSTART */ yytestcase(yyruleno==361);
+ case 362: /* pseudo_column ::= WEND */ yytestcase(yyruleno==362);
+ case 363: /* pseudo_column ::= WDURATION */ yytestcase(yyruleno==363);
+ case 364: /* pseudo_column ::= IROWTS */ yytestcase(yyruleno==364);
+ case 370: /* literal_func ::= NOW */ yytestcase(yyruleno==370);
+{ yylhsminor.yy616 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 357: /* pseudo_column ::= table_name NK_DOT TBNAME */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy673)))); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 365: /* function_expression ::= function_name NK_LP expression_list NK_RP */
+ case 366: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==366);
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy673, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy673, yymsp[-1].minor.yy152)); }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 367: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), yymsp[-1].minor.yy784)); }
+ yymsp[-5].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 369: /* literal_func ::= noarg_func NK_LP NK_RP */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy673, NULL)); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 384: /* star_func_para_list ::= NK_STAR */
+{ yylhsminor.yy152 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); }
+ yymsp[0].minor.yy152 = yylhsminor.yy152;
+ break;
+ case 389: /* star_func_para ::= table_name NK_DOT NK_STAR */
+ case 453: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==453);
+{ yylhsminor.yy616 = createColumnNode(pCxt, &yymsp[-2].minor.yy673, &yymsp[0].minor.yy0); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 390: /* case_when_expression ::= CASE when_then_list case_when_else_opt END */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, NULL, yymsp[-2].minor.yy152, yymsp[-1].minor.yy616)); }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 391: /* case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), yymsp[-2].minor.yy152, yymsp[-1].minor.yy616)); }
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
+ break;
+ case 394: /* when_then_expr ::= WHEN common_expression THEN common_expression */
+{ yymsp[-3].minor.yy616 = createWhenThenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)); }
+ break;
+ case 396: /* case_when_else_opt ::= ELSE common_expression */
+{ yymsp[-1].minor.yy616 = releaseRawExprNode(pCxt, yymsp[0].minor.yy616); }
+ break;
+ case 397: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */
+ case 402: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==402);
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy392, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy380, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 377: /* predicate ::= expression BETWEEN expression AND expression */
+ case 398: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy272), releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy616), releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-4].minor.yy272 = yylhsminor.yy272;
+ yymsp[-4].minor.yy616 = yylhsminor.yy616;
break;
- case 378: /* predicate ::= expression NOT BETWEEN expression AND expression */
+ case 399: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy616), releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-5].minor.yy272 = yylhsminor.yy272;
+ yymsp[-5].minor.yy616 = yylhsminor.yy616;
break;
- case 379: /* predicate ::= expression IS NULL */
+ case 400: /* predicate ::= expr_or_subquery IS NULL */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), NULL));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), NULL));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 380: /* predicate ::= expression IS NOT NULL */
+ case 401: /* predicate ::= expr_or_subquery IS NOT NULL */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), NULL));
}
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 382: /* compare_op ::= NK_LT */
-{ yymsp[0].minor.yy392 = OP_TYPE_LOWER_THAN; }
+ case 403: /* compare_op ::= NK_LT */
+{ yymsp[0].minor.yy380 = OP_TYPE_LOWER_THAN; }
break;
- case 383: /* compare_op ::= NK_GT */
-{ yymsp[0].minor.yy392 = OP_TYPE_GREATER_THAN; }
+ case 404: /* compare_op ::= NK_GT */
+{ yymsp[0].minor.yy380 = OP_TYPE_GREATER_THAN; }
break;
- case 384: /* compare_op ::= NK_LE */
-{ yymsp[0].minor.yy392 = OP_TYPE_LOWER_EQUAL; }
+ case 405: /* compare_op ::= NK_LE */
+{ yymsp[0].minor.yy380 = OP_TYPE_LOWER_EQUAL; }
break;
- case 385: /* compare_op ::= NK_GE */
-{ yymsp[0].minor.yy392 = OP_TYPE_GREATER_EQUAL; }
+ case 406: /* compare_op ::= NK_GE */
+{ yymsp[0].minor.yy380 = OP_TYPE_GREATER_EQUAL; }
break;
- case 386: /* compare_op ::= NK_NE */
-{ yymsp[0].minor.yy392 = OP_TYPE_NOT_EQUAL; }
+ case 407: /* compare_op ::= NK_NE */
+{ yymsp[0].minor.yy380 = OP_TYPE_NOT_EQUAL; }
break;
- case 387: /* compare_op ::= NK_EQ */
-{ yymsp[0].minor.yy392 = OP_TYPE_EQUAL; }
+ case 408: /* compare_op ::= NK_EQ */
+{ yymsp[0].minor.yy380 = OP_TYPE_EQUAL; }
break;
- case 388: /* compare_op ::= LIKE */
-{ yymsp[0].minor.yy392 = OP_TYPE_LIKE; }
+ case 409: /* compare_op ::= LIKE */
+{ yymsp[0].minor.yy380 = OP_TYPE_LIKE; }
break;
- case 389: /* compare_op ::= NOT LIKE */
-{ yymsp[-1].minor.yy392 = OP_TYPE_NOT_LIKE; }
+ case 410: /* compare_op ::= NOT LIKE */
+{ yymsp[-1].minor.yy380 = OP_TYPE_NOT_LIKE; }
break;
- case 390: /* compare_op ::= MATCH */
-{ yymsp[0].minor.yy392 = OP_TYPE_MATCH; }
+ case 411: /* compare_op ::= MATCH */
+{ yymsp[0].minor.yy380 = OP_TYPE_MATCH; }
break;
- case 391: /* compare_op ::= NMATCH */
-{ yymsp[0].minor.yy392 = OP_TYPE_NMATCH; }
+ case 412: /* compare_op ::= NMATCH */
+{ yymsp[0].minor.yy380 = OP_TYPE_NMATCH; }
break;
- case 392: /* compare_op ::= CONTAINS */
-{ yymsp[0].minor.yy392 = OP_TYPE_JSON_CONTAINS; }
+ case 413: /* compare_op ::= CONTAINS */
+{ yymsp[0].minor.yy380 = OP_TYPE_JSON_CONTAINS; }
break;
- case 393: /* in_op ::= IN */
-{ yymsp[0].minor.yy392 = OP_TYPE_IN; }
+ case 414: /* in_op ::= IN */
+{ yymsp[0].minor.yy380 = OP_TYPE_IN; }
break;
- case 394: /* in_op ::= NOT IN */
-{ yymsp[-1].minor.yy392 = OP_TYPE_NOT_IN; }
+ case 415: /* in_op ::= NOT IN */
+{ yymsp[-1].minor.yy380 = OP_TYPE_NOT_IN; }
break;
- case 395: /* in_predicate_value ::= NK_LP literal_list NK_RP */
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy172)); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 416: /* in_predicate_value ::= NK_LP literal_list NK_RP */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy152)); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 397: /* boolean_value_expression ::= NOT boolean_primary */
+ case 418: /* boolean_value_expression ::= NOT boolean_primary */
{
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy272), NULL));
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy616), NULL));
}
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 398: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
+ case 419: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 399: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
+ case 420: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */
{
- SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy272);
- SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy272);
- yylhsminor.yy272 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), releaseRawExprNode(pCxt, yymsp[0].minor.yy272)));
+ SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy616);
+ SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy616);
+ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), releaseRawExprNode(pCxt, yymsp[0].minor.yy616)));
}
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 405: /* from_clause_opt ::= FROM table_reference_list */
- case 434: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==434);
- case 457: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==457);
-{ yymsp[-1].minor.yy272 = yymsp[0].minor.yy272; }
+ case 426: /* from_clause_opt ::= FROM table_reference_list */
+ case 455: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==455);
+ case 483: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==483);
+{ yymsp[-1].minor.yy616 = yymsp[0].minor.yy616; }
break;
- case 407: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */
-{ yylhsminor.yy272 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy272, yymsp[0].minor.yy272, NULL); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 428: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */
+{ yylhsminor.yy616 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, yymsp[-2].minor.yy616, yymsp[0].minor.yy616, NULL); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 410: /* table_primary ::= table_name alias_opt */
-{ yylhsminor.yy272 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ case 431: /* table_primary ::= table_name alias_opt */
+{ yylhsminor.yy616 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy673, &yymsp[0].minor.yy673); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 411: /* table_primary ::= db_name NK_DOT table_name alias_opt */
-{ yylhsminor.yy272 = createRealTableNode(pCxt, &yymsp[-3].minor.yy209, &yymsp[-1].minor.yy209, &yymsp[0].minor.yy209); }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ case 432: /* table_primary ::= db_name NK_DOT table_name alias_opt */
+{ yylhsminor.yy616 = createRealTableNode(pCxt, &yymsp[-3].minor.yy673, &yymsp[-1].minor.yy673, &yymsp[0].minor.yy673); }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 412: /* table_primary ::= subquery alias_opt */
-{ yylhsminor.yy272 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272), &yymsp[0].minor.yy209); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ case 433: /* table_primary ::= subquery alias_opt */
+{ yylhsminor.yy616 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy616), &yymsp[0].minor.yy673); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 414: /* alias_opt ::= */
-{ yymsp[1].minor.yy209 = nil_token; }
+ case 435: /* alias_opt ::= */
+{ yymsp[1].minor.yy673 = nil_token; }
break;
- case 415: /* alias_opt ::= table_alias */
-{ yylhsminor.yy209 = yymsp[0].minor.yy209; }
- yymsp[0].minor.yy209 = yylhsminor.yy209;
+ case 436: /* alias_opt ::= table_alias */
+{ yylhsminor.yy673 = yymsp[0].minor.yy673; }
+ yymsp[0].minor.yy673 = yylhsminor.yy673;
break;
- case 416: /* alias_opt ::= AS table_alias */
-{ yymsp[-1].minor.yy209 = yymsp[0].minor.yy209; }
+ case 437: /* alias_opt ::= AS table_alias */
+{ yymsp[-1].minor.yy673 = yymsp[0].minor.yy673; }
break;
- case 417: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */
- case 418: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==418);
-{ yymsp[-2].minor.yy272 = yymsp[-1].minor.yy272; }
+ case 438: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */
+ case 439: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==439);
+{ yymsp[-2].minor.yy616 = yymsp[-1].minor.yy616; }
break;
- case 419: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
-{ yylhsminor.yy272 = createJoinTableNode(pCxt, yymsp[-4].minor.yy156, yymsp[-5].minor.yy272, yymsp[-2].minor.yy272, yymsp[0].minor.yy272); }
- yymsp[-5].minor.yy272 = yylhsminor.yy272;
+ case 440: /* joined_table ::= table_reference join_type JOIN table_reference ON search_condition */
+{ yylhsminor.yy616 = createJoinTableNode(pCxt, yymsp[-4].minor.yy596, yymsp[-5].minor.yy616, yymsp[-2].minor.yy616, yymsp[0].minor.yy616); }
+ yymsp[-5].minor.yy616 = yylhsminor.yy616;
break;
- case 420: /* join_type ::= */
-{ yymsp[1].minor.yy156 = JOIN_TYPE_INNER; }
+ case 441: /* join_type ::= */
+{ yymsp[1].minor.yy596 = JOIN_TYPE_INNER; }
break;
- case 421: /* join_type ::= INNER */
-{ yymsp[0].minor.yy156 = JOIN_TYPE_INNER; }
+ case 442: /* join_type ::= INNER */
+{ yymsp[0].minor.yy596 = JOIN_TYPE_INNER; }
break;
- case 422: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
+ case 443: /* query_specification ::= SELECT set_quantifier_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */
{
- yymsp[-11].minor.yy272 = createSelectStmt(pCxt, yymsp[-10].minor.yy293, yymsp[-9].minor.yy172, yymsp[-8].minor.yy272);
- yymsp[-11].minor.yy272 = addWhereClause(pCxt, yymsp[-11].minor.yy272, yymsp[-7].minor.yy272);
- yymsp[-11].minor.yy272 = addPartitionByClause(pCxt, yymsp[-11].minor.yy272, yymsp[-6].minor.yy172);
- yymsp[-11].minor.yy272 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy272, yymsp[-2].minor.yy272);
- yymsp[-11].minor.yy272 = addGroupByClause(pCxt, yymsp[-11].minor.yy272, yymsp[-1].minor.yy172);
- yymsp[-11].minor.yy272 = addHavingClause(pCxt, yymsp[-11].minor.yy272, yymsp[0].minor.yy272);
- yymsp[-11].minor.yy272 = addRangeClause(pCxt, yymsp[-11].minor.yy272, yymsp[-5].minor.yy272);
- yymsp[-11].minor.yy272 = addEveryClause(pCxt, yymsp[-11].minor.yy272, yymsp[-4].minor.yy272);
- yymsp[-11].minor.yy272 = addFillClause(pCxt, yymsp[-11].minor.yy272, yymsp[-3].minor.yy272);
+ yymsp[-11].minor.yy616 = createSelectStmt(pCxt, yymsp[-10].minor.yy89, yymsp[-9].minor.yy152, yymsp[-8].minor.yy616);
+ yymsp[-11].minor.yy616 = addWhereClause(pCxt, yymsp[-11].minor.yy616, yymsp[-7].minor.yy616);
+ yymsp[-11].minor.yy616 = addPartitionByClause(pCxt, yymsp[-11].minor.yy616, yymsp[-6].minor.yy152);
+ yymsp[-11].minor.yy616 = addWindowClauseClause(pCxt, yymsp[-11].minor.yy616, yymsp[-2].minor.yy616);
+ yymsp[-11].minor.yy616 = addGroupByClause(pCxt, yymsp[-11].minor.yy616, yymsp[-1].minor.yy152);
+ yymsp[-11].minor.yy616 = addHavingClause(pCxt, yymsp[-11].minor.yy616, yymsp[0].minor.yy616);
+ yymsp[-11].minor.yy616 = addRangeClause(pCxt, yymsp[-11].minor.yy616, yymsp[-5].minor.yy616);
+ yymsp[-11].minor.yy616 = addEveryClause(pCxt, yymsp[-11].minor.yy616, yymsp[-4].minor.yy616);
+ yymsp[-11].minor.yy616 = addFillClause(pCxt, yymsp[-11].minor.yy616, yymsp[-3].minor.yy616);
}
break;
- case 425: /* set_quantifier_opt ::= ALL */
-{ yymsp[0].minor.yy293 = false; }
+ case 446: /* set_quantifier_opt ::= ALL */
+{ yymsp[0].minor.yy89 = false; }
break;
- case 428: /* select_item ::= NK_STAR */
-{ yylhsminor.yy272 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); }
- yymsp[0].minor.yy272 = yylhsminor.yy272;
+ case 449: /* select_item ::= NK_STAR */
+{ yylhsminor.yy616 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); }
+ yymsp[0].minor.yy616 = yylhsminor.yy616;
break;
- case 430: /* select_item ::= common_expression column_alias */
-{ yylhsminor.yy272 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272), &yymsp[0].minor.yy209); }
- yymsp[-1].minor.yy272 = yylhsminor.yy272;
+ case 451: /* select_item ::= common_expression column_alias */
+ case 461: /* partition_item ::= expr_or_subquery column_alias */ yytestcase(yyruleno==461);
+{ yylhsminor.yy616 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy616), &yymsp[0].minor.yy673); }
+ yymsp[-1].minor.yy616 = yylhsminor.yy616;
break;
- case 431: /* select_item ::= common_expression AS column_alias */
-{ yylhsminor.yy272 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), &yymsp[0].minor.yy209); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 452: /* select_item ::= common_expression AS column_alias */
+ case 462: /* partition_item ::= expr_or_subquery AS column_alias */ yytestcase(yyruleno==462);
+{ yylhsminor.yy616 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), &yymsp[0].minor.yy673); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 436: /* partition_by_clause_opt ::= PARTITION BY expression_list */
- case 453: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==453);
- case 469: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==469);
-{ yymsp[-2].minor.yy172 = yymsp[0].minor.yy172; }
+ case 457: /* partition_by_clause_opt ::= PARTITION BY partition_list */
+ case 479: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==479);
+ case 498: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==498);
+{ yymsp[-2].minor.yy152 = yymsp[0].minor.yy152; }
break;
- case 438: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
-{ yymsp[-5].minor.yy272 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); }
+ case 464: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
+{ yymsp[-5].minor.yy616 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), releaseRawExprNode(pCxt, yymsp[-1].minor.yy616)); }
break;
- case 439: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expression NK_RP */
-{ yymsp[-3].minor.yy272 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); }
+ case 465: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */
+{ yymsp[-3].minor.yy616 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy616)); }
break;
- case 440: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
-{ yymsp[-5].minor.yy272 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), NULL, yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
+ case 466: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_RP sliding_opt fill_opt */
+{ yymsp[-5].minor.yy616 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), NULL, yymsp[-1].minor.yy616, yymsp[0].minor.yy616); }
break;
- case 441: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
-{ yymsp[-7].minor.yy272 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy272), releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), yymsp[-1].minor.yy272, yymsp[0].minor.yy272); }
+ case 467: /* twindow_clause_opt ::= INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt fill_opt */
+{ yymsp[-7].minor.yy616 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy616), releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), yymsp[-1].minor.yy616, yymsp[0].minor.yy616); }
break;
- case 443: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */
- case 461: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==461);
-{ yymsp[-3].minor.yy272 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy272); }
+ case 471: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */
+{ yymsp[-3].minor.yy616 = createFillNode(pCxt, yymsp[-1].minor.yy102, NULL); }
break;
- case 445: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */
-{ yymsp[-3].minor.yy272 = createFillNode(pCxt, yymsp[-1].minor.yy186, NULL); }
+ case 472: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
+{ yymsp[-5].minor.yy616 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy152)); }
break;
- case 446: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA literal_list NK_RP */
-{ yymsp[-5].minor.yy272 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy172)); }
+ case 473: /* fill_mode ::= NONE */
+{ yymsp[0].minor.yy102 = FILL_MODE_NONE; }
break;
- case 447: /* fill_mode ::= NONE */
-{ yymsp[0].minor.yy186 = FILL_MODE_NONE; }
+ case 474: /* fill_mode ::= PREV */
+{ yymsp[0].minor.yy102 = FILL_MODE_PREV; }
break;
- case 448: /* fill_mode ::= PREV */
-{ yymsp[0].minor.yy186 = FILL_MODE_PREV; }
+ case 475: /* fill_mode ::= NULL */
+{ yymsp[0].minor.yy102 = FILL_MODE_NULL; }
break;
- case 449: /* fill_mode ::= NULL */
-{ yymsp[0].minor.yy186 = FILL_MODE_NULL; }
+ case 476: /* fill_mode ::= LINEAR */
+{ yymsp[0].minor.yy102 = FILL_MODE_LINEAR; }
break;
- case 450: /* fill_mode ::= LINEAR */
-{ yymsp[0].minor.yy186 = FILL_MODE_LINEAR; }
+ case 477: /* fill_mode ::= NEXT */
+{ yymsp[0].minor.yy102 = FILL_MODE_NEXT; }
break;
- case 451: /* fill_mode ::= NEXT */
-{ yymsp[0].minor.yy186 = FILL_MODE_NEXT; }
+ case 480: /* group_by_list ::= expr_or_subquery */
+{ yylhsminor.yy152 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy616))); }
+ yymsp[0].minor.yy152 = yylhsminor.yy152;
break;
- case 454: /* group_by_list ::= expression */
-{ yylhsminor.yy172 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); }
- yymsp[0].minor.yy172 = yylhsminor.yy172;
+ case 481: /* group_by_list ::= group_by_list NK_COMMA expr_or_subquery */
+{ yylhsminor.yy152 = addNodeToList(pCxt, yymsp[-2].minor.yy152, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy616))); }
+ yymsp[-2].minor.yy152 = yylhsminor.yy152;
break;
- case 455: /* group_by_list ::= group_by_list NK_COMMA expression */
-{ yylhsminor.yy172 = addNodeToList(pCxt, yymsp[-2].minor.yy172, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy272))); }
- yymsp[-2].minor.yy172 = yylhsminor.yy172;
+ case 485: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */
+{ yymsp[-5].minor.yy616 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy616), releaseRawExprNode(pCxt, yymsp[-1].minor.yy616)); }
break;
- case 459: /* range_opt ::= RANGE NK_LP expression NK_COMMA expression NK_RP */
-{ yymsp[-5].minor.yy272 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy272), releaseRawExprNode(pCxt, yymsp[-1].minor.yy272)); }
- break;
- case 462: /* query_expression ::= query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt */
-{
- yylhsminor.yy272 = addOrderByClause(pCxt, yymsp[-3].minor.yy272, yymsp[-2].minor.yy172);
- yylhsminor.yy272 = addSlimitClause(pCxt, yylhsminor.yy272, yymsp[-1].minor.yy272);
- yylhsminor.yy272 = addLimitClause(pCxt, yylhsminor.yy272, yymsp[0].minor.yy272);
+ case 488: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */
+{
+ yylhsminor.yy616 = addOrderByClause(pCxt, yymsp[-3].minor.yy616, yymsp[-2].minor.yy152);
+ yylhsminor.yy616 = addSlimitClause(pCxt, yylhsminor.yy616, yymsp[-1].minor.yy616);
+ yylhsminor.yy616 = addLimitClause(pCxt, yylhsminor.yy616, yymsp[0].minor.yy616);
}
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 464: /* query_expression_body ::= query_expression_body UNION ALL query_expression_body */
-{ yylhsminor.yy272 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy272, yymsp[0].minor.yy272); }
- yymsp[-3].minor.yy272 = yylhsminor.yy272;
+ case 491: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */
+{ yylhsminor.yy616 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy616, yymsp[0].minor.yy616); }
+ yymsp[-3].minor.yy616 = yylhsminor.yy616;
break;
- case 465: /* query_expression_body ::= query_expression_body UNION query_expression_body */
-{ yylhsminor.yy272 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy272, yymsp[0].minor.yy272); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
- break;
- case 467: /* query_primary ::= NK_LP query_expression_body order_by_clause_opt slimit_clause_opt limit_clause_opt NK_RP */
-{
- yymsp[-5].minor.yy272 = addOrderByClause(pCxt, yymsp[-4].minor.yy272, yymsp[-3].minor.yy172);
- yymsp[-5].minor.yy272 = addSlimitClause(pCxt, yymsp[-5].minor.yy272, yymsp[-2].minor.yy272);
- yymsp[-5].minor.yy272 = addLimitClause(pCxt, yymsp[-5].minor.yy272, yymsp[-1].minor.yy272);
- }
+ case 492: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */
+{ yylhsminor.yy616 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy616, yymsp[0].minor.yy616); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 471: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
- case 475: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==475);
-{ yymsp[-1].minor.yy272 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
+ case 500: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ case 504: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==504);
+{ yymsp[-1].minor.yy616 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
break;
- case 472: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- case 476: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==476);
-{ yymsp[-3].minor.yy272 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
+ case 501: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ case 505: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==505);
+{ yymsp[-3].minor.yy616 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 473: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- case 477: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==477);
-{ yymsp[-3].minor.yy272 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
+ case 502: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ case 506: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==506);
+{ yymsp[-3].minor.yy616 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
break;
- case 478: /* subquery ::= NK_LP query_expression NK_RP */
-{ yylhsminor.yy272 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy272); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 507: /* subquery ::= NK_LP query_expression NK_RP */
+{ yylhsminor.yy616 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy616); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 482: /* sort_specification ::= expression ordering_specification_opt null_ordering_opt */
-{ yylhsminor.yy272 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy272), yymsp[-1].minor.yy818, yymsp[0].minor.yy493); }
- yymsp[-2].minor.yy272 = yylhsminor.yy272;
+ case 512: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */
+{ yylhsminor.yy616 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy616), yymsp[-1].minor.yy386, yymsp[0].minor.yy585); }
+ yymsp[-2].minor.yy616 = yylhsminor.yy616;
break;
- case 483: /* ordering_specification_opt ::= */
-{ yymsp[1].minor.yy818 = ORDER_ASC; }
+ case 513: /* ordering_specification_opt ::= */
+{ yymsp[1].minor.yy386 = ORDER_ASC; }
break;
- case 484: /* ordering_specification_opt ::= ASC */
-{ yymsp[0].minor.yy818 = ORDER_ASC; }
+ case 514: /* ordering_specification_opt ::= ASC */
+{ yymsp[0].minor.yy386 = ORDER_ASC; }
break;
- case 485: /* ordering_specification_opt ::= DESC */
-{ yymsp[0].minor.yy818 = ORDER_DESC; }
+ case 515: /* ordering_specification_opt ::= DESC */
+{ yymsp[0].minor.yy386 = ORDER_DESC; }
break;
- case 486: /* null_ordering_opt ::= */
-{ yymsp[1].minor.yy493 = NULL_ORDER_DEFAULT; }
+ case 516: /* null_ordering_opt ::= */
+{ yymsp[1].minor.yy585 = NULL_ORDER_DEFAULT; }
break;
- case 487: /* null_ordering_opt ::= NULLS FIRST */
-{ yymsp[-1].minor.yy493 = NULL_ORDER_FIRST; }
+ case 517: /* null_ordering_opt ::= NULLS FIRST */
+{ yymsp[-1].minor.yy585 = NULL_ORDER_FIRST; }
break;
- case 488: /* null_ordering_opt ::= NULLS LAST */
-{ yymsp[-1].minor.yy493 = NULL_ORDER_LAST; }
+ case 518: /* null_ordering_opt ::= NULLS LAST */
+{ yymsp[-1].minor.yy585 = NULL_ORDER_LAST; }
break;
default:
break;
diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp
index b376c33d1aca8951ed31297cd12a1843ebf47462..fcaa5af05ccdb3a9fe1cb8a51177cab679e0dc84 100644
--- a/source/libs/parser/test/mockCatalog.cpp
+++ b/source/libs/parser/test/mockCatalog.cpp
@@ -32,100 +32,63 @@
namespace {
void generateInformationSchema(MockCatalogService* mcs) {
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DATABASES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_FUNCTIONS, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_FUNC_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_INDEXES, TSDB_SYSTEM_TABLE, 3)
- .addColumn("index_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
- .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES, TSDB_SYSTEM_TABLE, 2)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
- .addColumn("stable_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLES, TSDB_SYSTEM_TABLE, 2)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
- .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLE_DISTRIBUTED, TSDB_SYSTEM_TABLE, 1)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USERS, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_USER_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS, TSDB_SYSTEM_TABLE, 1)
- .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_CONFIG_OPTION_LEN);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODE_VARIABLES, TSDB_SYSTEM_TABLE, 1)
- .addColumn("dnode_id", TSDB_DATA_TYPE_INT);
- builder.done();
- }
- {
- ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, TSDB_SYSTEM_TABLE, 1)
- .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_CLUSTER_ID_LEN);
- builder.done();
- }
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MNODES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_MODULES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_QNODES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("endpoint", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DATABASES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_FUNCTIONS, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_FUNC_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_INDEXES, TSDB_SYSTEM_TABLE, 3)
+ .addColumn("index_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STABLES, TSDB_SYSTEM_TABLE, 2)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .addColumn("stable_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLES, TSDB_SYSTEM_TABLE, 2)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TABLE_DISTRIBUTED, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_USERS, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_USER_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VGROUPS, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_CONFIG_OPTION_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DNODE_VARIABLES, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("dnode_id", TSDB_DATA_TYPE_INT)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, TSDB_SYSTEM_TABLE, 1)
+ .addColumn("name", TSDB_DATA_TYPE_BINARY, TSDB_CLUSTER_ID_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_VNODES, TSDB_SYSTEM_TABLE, 2)
+ .addColumn("dnode_id", TSDB_DATA_TYPE_INT)
+ .addColumn("dnode_ep", TSDB_DATA_TYPE_BINARY, TSDB_EP_LEN)
+ .done();
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_TAGS, TSDB_SYSTEM_TABLE, 2)
+ .addColumn("table_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN)
+ .addColumn("db_name", TSDB_DATA_TYPE_BINARY, TSDB_DB_NAME_LEN)
+ .done();
}
void generatePerformanceSchema(MockCatalogService* mcs) {
@@ -137,7 +100,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1)
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_STREAMS, TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
@@ -149,7 +112,7 @@ void generatePerformanceSchema(MockCatalogService* mcs) {
}
{
ITableBuilder& builder =
- mcs->createTableBuilder(TSDB_PERFORMANCE_SCHEMA_DB, TSDB_PERFS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1)
+ mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_SUBSCRIPTIONS, TSDB_SYSTEM_TABLE, 1)
.addColumn("stream_name", TSDB_DATA_TYPE_BINARY, TSDB_TABLE_NAME_LEN);
builder.done();
}
diff --git a/source/libs/parser/test/parAlterToBalanceTest.cpp b/source/libs/parser/test/parAlterToBalanceTest.cpp
index 1caba6eab0384019b01f0c4957a6b9b9ffa1a5d1..3a08ef97564c426c834e26ed0569f77a29184d76 100644
--- a/source/libs/parser/test/parAlterToBalanceTest.cpp
+++ b/source/libs/parser/test/parAlterToBalanceTest.cpp
@@ -88,6 +88,7 @@ TEST_F(ParserInitialATest, alterDnode) {
* | REPLICA int_value -- todo: enum 1, 3, default 1, unit replica
* | STRICT {'off' | 'on'} -- todo: default 'off'
* | WAL_LEVEL int_value -- enum 1, 2, default 1
+ * | SST_TRIGGER int_value -- rang [1, 16], default 8
* }
*/
TEST_F(ParserInitialATest, alterDatabase) {
@@ -112,6 +113,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
expect.cacheLast = -1;
expect.cacheLastSize = -1;
expect.replications = -1;
+ expect.sstTrigger = -1;
};
auto setAlterDbBuffer = [&](int32_t buffer) { expect.buffer = buffer; };
auto setAlterDbPageSize = [&](int32_t pageSize) { expect.pageSize = pageSize; };
@@ -128,6 +130,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
auto setAlterDbStrict = [&](int8_t strict) { expect.strict = strict; };
auto setAlterDbCacheModel = [&](int8_t cacheModel) { expect.cacheLast = cacheModel; };
auto setAlterDbReplica = [&](int8_t replications) { expect.replications = replications; };
+ auto setAlterDbSstTrigger = [&](int8_t sstTrigger) { expect.sstTrigger = sstTrigger; };
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_DATABASE_STMT);
@@ -146,6 +149,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
ASSERT_EQ(req.strict, expect.strict);
ASSERT_EQ(req.cacheLast, expect.cacheLast);
ASSERT_EQ(req.replications, expect.replications);
+ ASSERT_EQ(req.sstTrigger, expect.sstTrigger);
});
const int32_t MINUTE_PER_DAY = MILLISECOND_PER_DAY / MILLISECOND_PER_MINUTE;
@@ -157,7 +161,8 @@ TEST_F(ParserInitialATest, alterDatabase) {
setAlterDbFsync(200);
setAlterDbWal(1);
setAlterDbCacheModel(TSDB_CACHE_MODEL_LAST_ROW);
- run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 WAL_LEVEL 1");
+ setAlterDbSstTrigger(16);
+ run("ALTER DATABASE test CACHEMODEL 'last_row' CACHESIZE 32 WAL_FSYNC_PERIOD 200 KEEP 10 WAL_LEVEL 1 STT_TRIGGER 16");
clearAlterDbReq();
initAlterDb("test");
@@ -231,6 +236,8 @@ TEST_F(ParserInitialATest, alterDatabaseSemanticCheck) {
run("ALTER DATABASE test KEEP 1w", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL_LEVEL 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
run("ALTER DATABASE test WAL_LEVEL 3", TSDB_CODE_PAR_INVALID_DB_OPTION);
+ run("ALTER DATABASE test STT_TRIGGER 0", TSDB_CODE_PAR_INVALID_DB_OPTION);
+ run("ALTER DATABASE test STT_TRIGGER 17", TSDB_CODE_PAR_INVALID_DB_OPTION);
// Regardless of the specific sentence
run("ALTER DATABASE db WAL_LEVEL 0 # td-14436", TSDB_CODE_PAR_SYNTAX_ERROR, PARSER_STAGE_PARSE);
}
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index 68c4ac3706e2a42fd370a96a85d1adf6df162774..121bbaa733c3d4b019fbaf5638e32a2fe7ac37d2 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -111,10 +111,14 @@ TEST_F(ParserInitialCTest, createDatabase) {
expect.numOfVgroups = TSDB_DEFAULT_VN_PER_DB;
expect.numOfStables = TSDB_DEFAULT_DB_SINGLE_STABLE;
expect.schemaless = TSDB_DEFAULT_DB_SCHEMALESS;
- expect.walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD;
- expect.walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE;
- expect.walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD;
+ expect.walRetentionPeriod = TSDB_REP_DEF_DB_WAL_RET_PERIOD;
+ expect.walRetentionSize = TSDB_REP_DEF_DB_WAL_RET_SIZE;
+ expect.walRollPeriod = TSDB_REP_DEF_DB_WAL_ROLL_PERIOD;
expect.walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE;
+ expect.sstTrigger = TSDB_DEFAULT_SST_TRIGGER;
+ expect.hashPrefix = TSDB_DEFAULT_HASH_PREFIX;
+ expect.hashSuffix = TSDB_DEFAULT_HASH_SUFFIX;
+ expect.tsdbPageSize = TSDB_DEFAULT_TSDB_PAGESIZE;
};
auto setDbBufferFunc = [&](int32_t buffer) { expect.buffer = buffer; };
@@ -155,6 +159,10 @@ TEST_F(ParserInitialCTest, createDatabase) {
auto setDbWalRetentionSize = [&](int32_t walRetentionSize) { expect.walRetentionSize = walRetentionSize; };
auto setDbWalRollPeriod = [&](int32_t walRollPeriod) { expect.walRollPeriod = walRollPeriod; };
auto setDbWalSegmentSize = [&](int32_t walSegmentSize) { expect.walSegmentSize = walSegmentSize; };
+ auto setDbSstTrigger = [&](int32_t sstTrigger) { expect.sstTrigger = sstTrigger; };
+ auto setDbHashPrefix = [&](int32_t hashPrefix) { expect.hashPrefix = hashPrefix; };
+ auto setDbHashSuffix = [&](int32_t hashSuffix) { expect.hashSuffix = hashSuffix; };
+ auto setDbTsdbPageSize = [&](int32_t tsdbPageSize) { expect.tsdbPageSize = tsdbPageSize; };
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_DATABASE_STMT);
@@ -185,7 +193,10 @@ TEST_F(ParserInitialCTest, createDatabase) {
ASSERT_EQ(req.walRetentionSize, expect.walRetentionSize);
ASSERT_EQ(req.walRollPeriod, expect.walRollPeriod);
ASSERT_EQ(req.walSegmentSize, expect.walSegmentSize);
- // ASSERT_EQ(req.schemaless, expect.schemaless);
+ ASSERT_EQ(req.sstTrigger, expect.sstTrigger);
+ ASSERT_EQ(req.hashPrefix, expect.hashPrefix);
+ ASSERT_EQ(req.hashSuffix, expect.hashSuffix);
+ ASSERT_EQ(req.tsdbPageSize, expect.tsdbPageSize);
ASSERT_EQ(req.ignoreExist, expect.ignoreExist);
ASSERT_EQ(req.numOfRetensions, expect.numOfRetensions);
if (expect.numOfRetensions > 0) {
@@ -233,6 +244,10 @@ TEST_F(ParserInitialCTest, createDatabase) {
setDbWalRetentionSize(-1);
setDbWalRollPeriod(10);
setDbWalSegmentSize(20);
+ setDbSstTrigger(16);
+ setDbHashPrefix(3);
+ setDbHashSuffix(4);
+ setDbTsdbPageSize(32);
run("CREATE DATABASE IF NOT EXISTS wxy_db "
"BUFFER 64 "
"CACHEMODEL 'last_value' "
@@ -256,7 +271,11 @@ TEST_F(ParserInitialCTest, createDatabase) {
"WAL_RETENTION_PERIOD -1 "
"WAL_RETENTION_SIZE -1 "
"WAL_ROLL_PERIOD 10 "
- "WAL_SEGMENT_SIZE 20");
+ "WAL_SEGMENT_SIZE 20 "
+ "STT_TRIGGER 16 "
+ "TABLE_PREFIX 3 "
+ "TABLE_SUFFIX 4 "
+ "TSDB_PAGESIZE 32");
clearCreateDbReq();
setCreateDbReqFunc("wxy_db", 1);
@@ -266,6 +285,14 @@ TEST_F(ParserInitialCTest, createDatabase) {
"DURATION 100m "
"KEEP 1440m,300h,400d ");
clearCreateDbReq();
+
+ setCreateDbReqFunc("wxy_db", 1);
+ setDbReplicaFunc(3);
+ setDbWalRetentionPeriod(TSDB_REPS_DEF_DB_WAL_RET_PERIOD);
+ setDbWalRetentionSize(TSDB_REPS_DEF_DB_WAL_RET_SIZE);
+ setDbWalRollPeriod(TSDB_REPS_DEF_DB_WAL_ROLL_PERIOD);
+ run("CREATE DATABASE IF NOT EXISTS wxy_db REPLICA 3");
+ clearCreateDbReq();
}
TEST_F(ParserInitialCTest, createDatabaseSemanticCheck) {
@@ -583,6 +610,20 @@ TEST_F(ParserInitialCTest, createStream) {
expect.igExpired = igExpired;
};
+ auto addTag = [&](const char* pFieldName, uint8_t type, int32_t bytes = 0) {
+ SField field = {0};
+ strcpy(field.name, pFieldName);
+ field.type = type;
+ field.bytes = bytes > 0 ? bytes : tDataTypes[type].bytes;
+ field.flags |= COL_SMA_ON;
+
+ if (NULL == expect.pTags) {
+ expect.pTags = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SField));
+ }
+ taosArrayPush(expect.pTags, &field);
+ expect.numOfTags += 1;
+ };
+
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_CREATE_STREAM_STMT);
SCMCreateStreamReq req = {0};
@@ -598,6 +639,19 @@ TEST_F(ParserInitialCTest, createStream) {
ASSERT_EQ(req.maxDelay, expect.maxDelay);
ASSERT_EQ(req.watermark, expect.watermark);
ASSERT_EQ(req.igExpired, expect.igExpired);
+ ASSERT_EQ(req.numOfTags, expect.numOfTags);
+ if (expect.numOfTags > 0) {
+ ASSERT_EQ(taosArrayGetSize(req.pTags), expect.numOfTags);
+ ASSERT_EQ(taosArrayGetSize(req.pTags), taosArrayGetSize(expect.pTags));
+ for (int32_t i = 0; i < expect.numOfTags; ++i) {
+ SField* pField = (SField*)taosArrayGet(req.pTags, i);
+ SField* pExpectField = (SField*)taosArrayGet(expect.pTags, i);
+ ASSERT_EQ(std::string(pField->name), std::string(pExpectField->name));
+ ASSERT_EQ(pField->type, pExpectField->type);
+ ASSERT_EQ(pField->bytes, pExpectField->bytes);
+ ASSERT_EQ(pField->flags, pExpectField->flags);
+ }
+ }
tFreeSCMCreateStreamReq(&req);
});
@@ -613,6 +667,17 @@ TEST_F(ParserInitialCTest, createStream) {
run("CREATE STREAM IF NOT EXISTS s1 TRIGGER MAX_DELAY 20s WATERMARK 10s IGNORE EXPIRED 0 INTO st1 AS SELECT COUNT(*) "
"FROM t1 INTERVAL(10S)");
clearCreateStreamReq();
+
+ setCreateStreamReqFunc(
+ "s1", "test",
+ "create stream s1 into st3 tags(tname varchar(10), id int) subtable(concat('new-', tname)) as "
+ "select _wstart wstart, count(*) cnt from st1 partition by tbname tname, tag1 id interval(10s)",
+ "st3");
+ addTag("tname", TSDB_DATA_TYPE_VARCHAR, 10 + VARSTR_HEADER_SIZE);
+ addTag("id", TSDB_DATA_TYPE_INT);
+ run("CREATE STREAM s1 INTO st3 TAGS(tname VARCHAR(10), id INT) SUBTABLE(CONCAT('new-', tname)) "
+ "AS SELECT _WSTART wstart, COUNT(*) cnt FROM st1 PARTITION BY TBNAME tname, tag1 id INTERVAL(10S)");
+ clearCreateStreamReq();
}
TEST_F(ParserInitialCTest, createStreamSemanticCheck) {
diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp
index 716dd7ffc000c5995a1121314825c6f1081d7079..79a70384012224a077bd0633e319112a19b6e6e7 100644
--- a/source/libs/parser/test/parSelectTest.cpp
+++ b/source/libs/parser/test/parSelectTest.cpp
@@ -60,6 +60,9 @@ TEST_F(ParserSelectTest, expression) {
run("SELECT ts > 0, c1 between 10 and 20 and c2 = 'qaz' FROM t1");
run("SELECT c1 | 10, c2 & 20, c4 | c5 FROM t1");
+
+ run("SELECT CASE WHEN ts > '2020-1-1 10:10:10' THEN c1 + 10 ELSE c1 - 10 END FROM t1 "
+ "WHERE CASE c1 WHEN c3 + 20 THEN c3 - 1 WHEN c3 + 10 THEN c3 - 2 ELSE 10 END > 0");
}
TEST_F(ParserSelectTest, condition) {
@@ -312,6 +315,8 @@ TEST_F(ParserSelectTest, subquery) {
run("SELECT _C0 FROM (SELECT _ROWTS, ts FROM st1s1)");
run("SELECT ts FROM (SELECT t1.ts FROM st1s1 t1)");
+
+ run("(((SELECT t1.ts FROM st1s1 t1)))");
}
TEST_F(ParserSelectTest, subquerySemanticCheck) {
@@ -445,4 +450,11 @@ TEST_F(ParserSelectTest, withoutFromSemanticCheck) {
run("SELECT TBNAME", TSDB_CODE_PAR_INVALID_TBNAME);
}
+TEST_F(ParserSelectTest, joinSemanticCheck) {
+ useDb("root", "test");
+
+ run("SELECT * FROM (SELECT tag1, SUM(c1) s FROM st1 GROUP BY tag1) t1, st1 t2 where t1.tag1 = t2.tag1",
+ TSDB_CODE_PAR_NOT_SUPPORT_JOIN);
+}
+
} // namespace ParserTest
diff --git a/source/libs/parser/test/parShowToUse.cpp b/source/libs/parser/test/parShowToUse.cpp
index 6590378565849e8b39bab100a324823e2d665848..36e70dd01598e3806c88ae367548fb0b1b815ece 100644
--- a/source/libs/parser/test/parShowToUse.cpp
+++ b/source/libs/parser/test/parShowToUse.cpp
@@ -196,6 +196,12 @@ TEST_F(ParserShowToUseTest, showTableDistributed) {
run("SHOW TABLE DISTRIBUTED st1");
}
+TEST_F(ParserShowToUseTest, showTags) {
+ useDb("root", "test");
+
+ run("SHOW TAGS FROM st1s1");
+}
+
// todo SHOW topics
TEST_F(ParserShowToUseTest, showUsers) {
@@ -213,12 +219,18 @@ TEST_F(ParserShowToUseTest, showVariables) {
TEST_F(ParserShowToUseTest, showVgroups) {
useDb("root", "test");
- run("SHOW vgroups");
+ run("SHOW VGROUPS");
- run("SHOW test.vgroups");
+ run("SHOW test.VGROUPS");
}
-// todo SHOW vnodes
+TEST_F(ParserShowToUseTest, showVnodes) {
+ useDb("root", "test");
+
+ run("SHOW VNODES 1");
+
+ run("SHOW VNODES 'node1:7030'");
+}
TEST_F(ParserShowToUseTest, splitVgroup) {
useDb("root", "test");
@@ -244,7 +256,10 @@ TEST_F(ParserShowToUseTest, trimDatabase) {
STrimDbReq expect = {0};
- auto setTrimDbReq = [&](const char* pDb) { snprintf(expect.db, sizeof(expect.db), "0.%s", pDb); };
+ auto setTrimDbReq = [&](const char* pDb, int32_t maxSpeed = 0) {
+ snprintf(expect.db, sizeof(expect.db), "0.%s", pDb);
+ expect.maxSpeed = maxSpeed;
+ };
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_TRIM_DATABASE_STMT);
@@ -252,10 +267,14 @@ TEST_F(ParserShowToUseTest, trimDatabase) {
STrimDbReq req = {0};
ASSERT_EQ(tDeserializeSTrimDbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req), TSDB_CODE_SUCCESS);
ASSERT_EQ(std::string(req.db), std::string(expect.db));
+ ASSERT_EQ(req.maxSpeed, expect.maxSpeed);
});
setTrimDbReq("wxy_db");
run("TRIM DATABASE wxy_db");
+
+ setTrimDbReq("wxy_db", 100);
+ run("TRIM DATABASE wxy_db MAX_SPEED 100");
}
TEST_F(ParserShowToUseTest, useDatabase) {
diff --git a/source/libs/parser/test/parTestMain.cpp b/source/libs/parser/test/parTestMain.cpp
index bcaccbab071e06ee6d76b6498fb52d0330a066d9..9e67249238063d5e45b09ce9eca737e78ae3d33c 100644
--- a/source/libs/parser/test/parTestMain.cpp
+++ b/source/libs/parser/test/parTestMain.cpp
@@ -86,6 +86,7 @@ static void parseArg(int argc, char* argv[]) {
{"dump", no_argument, NULL, 'd'},
{"async", required_argument, NULL, 'a'},
{"skipSql", required_argument, NULL, 's'},
+ {"log", required_argument, NULL, 'l'},
{0, 0, 0, 0}
};
// clang-format on
@@ -100,6 +101,9 @@ static void parseArg(int argc, char* argv[]) {
case 's':
setSkipSqlNum(optarg);
break;
+ case 'l':
+ setLogLevel(optarg);
+ break;
default:
break;
}
diff --git a/source/libs/parser/test/parTestUtil.cpp b/source/libs/parser/test/parTestUtil.cpp
index 360b904c170e50682b17d9c99a8ec1cd679a6db0..14c991917bf534fb2719b2c9d90aff9325042615 100644
--- a/source/libs/parser/test/parTestUtil.cpp
+++ b/source/libs/parser/test/parTestUtil.cpp
@@ -119,12 +119,18 @@ class ParserTestBaseImpl {
TEST_INTERFACE_ASYNC_API
};
- static void _destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request) {
+ static void destoryParseContext(SParseContext* pCxt) {
+ taosArrayDestroy(pCxt->pTableMetaPos);
+ taosArrayDestroy(pCxt->pTableVgroupPos);
+ delete pCxt;
+ }
+
+ static void destoryParseMetaCacheWarpper(SParseMetaCache* pMetaCache, bool request) {
destoryParseMetaCache(pMetaCache, request);
delete pMetaCache;
}
- static void _destroyQuery(SQuery** pQuery) {
+ static void destroyQuery(SQuery** pQuery) {
if (nullptr == pQuery) {
return;
}
@@ -303,10 +309,10 @@ class ParserTestBaseImpl {
setParseContext(sql, &cxt);
if (qIsInsertValuesSql(cxt.pSql, cxt.sqlLen)) {
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
doParseInsertSql(&cxt, query.get(), nullptr);
} else {
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
doParse(&cxt, query.get());
SQuery* pQuery = *(query.get());
@@ -335,7 +341,7 @@ class ParserTestBaseImpl {
SParseContext cxt = {0};
setParseContext(sql, &cxt);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
doParseSql(&cxt, query.get());
SQuery* pQuery = *(query.get());
@@ -354,26 +360,26 @@ class ParserTestBaseImpl {
void runAsyncInternalFuncs(const string& sql, int32_t expect, ParserStage checkStage) {
reset(expect, checkStage, TEST_INTERFACE_ASYNC_INTERNAL);
try {
- SParseContext cxt = {0};
- setParseContext(sql, &cxt, true);
+ unique_ptr > cxt(new SParseContext(), destoryParseContext);
+ setParseContext(sql, cxt.get(), true);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
bool request = true;
unique_ptr > metaCache(
- new SParseMetaCache(), bind(_destoryParseMetaCache, _1, cref(request)));
- bool isInsertValues = qIsInsertValuesSql(cxt.pSql, cxt.sqlLen);
+ new SParseMetaCache(), bind(destoryParseMetaCacheWarpper, _1, cref(request)));
+ bool isInsertValues = qIsInsertValuesSql(cxt->pSql, cxt->sqlLen);
if (isInsertValues) {
- doParseInsertSyntax(&cxt, query.get(), metaCache.get());
+ doParseInsertSyntax(cxt.get(), query.get(), metaCache.get());
} else {
- doParse(&cxt, query.get());
- doCollectMetaKey(&cxt, *(query.get()), metaCache.get());
+ doParse(cxt.get(), query.get());
+ doCollectMetaKey(cxt.get(), *(query.get()), metaCache.get());
}
SQuery* pQuery = *(query.get());
unique_ptr catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
- doBuildCatalogReq(&cxt, metaCache.get(), catalogReq.get());
+ doBuildCatalogReq(cxt.get(), metaCache.get(), catalogReq.get());
string err;
thread t1([&]() {
@@ -386,13 +392,13 @@ class ParserTestBaseImpl {
doPutMetaDataToCache(catalogReq.get(), metaData.get(), metaCache.get(), isInsertValues);
if (isInsertValues) {
- doParseInsertSql(&cxt, query.get(), metaCache.get());
+ doParseInsertSql(cxt.get(), query.get(), metaCache.get());
} else {
- doAuthenticate(&cxt, pQuery, metaCache.get());
+ doAuthenticate(cxt.get(), pQuery, metaCache.get());
- doTranslate(&cxt, pQuery, metaCache.get());
+ doTranslate(cxt.get(), pQuery, metaCache.get());
- doCalculateConstant(&cxt, pQuery);
+ doCalculateConstant(cxt.get(), pQuery);
}
} catch (const TerminateFlag& e) {
// success and terminate
@@ -423,13 +429,13 @@ class ParserTestBaseImpl {
void runAsyncApis(const string& sql, int32_t expect, ParserStage checkStage) {
reset(expect, checkStage, TEST_INTERFACE_ASYNC_API);
try {
- SParseContext cxt = {0};
- setParseContext(sql, &cxt);
+ unique_ptr > cxt(new SParseContext(), destoryParseContext);
+ setParseContext(sql, cxt.get());
unique_ptr catalogReq(new SCatalogReq(),
MockCatalogService::destoryCatalogReq);
- unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), _destroyQuery);
- doParseSqlSyntax(&cxt, query.get(), catalogReq.get());
+ unique_ptr query((SQuery**)taosMemoryCalloc(1, sizeof(SQuery*)), destroyQuery);
+ doParseSqlSyntax(cxt.get(), query.get(), catalogReq.get());
SQuery* pQuery = *(query.get());
string err;
@@ -438,7 +444,7 @@ class ParserTestBaseImpl {
unique_ptr metaData(new SMetaData(), MockCatalogService::destoryMetaData);
doGetAllMeta(catalogReq.get(), metaData.get());
- doAnalyseSqlSemantic(&cxt, catalogReq.get(), metaData.get(), pQuery);
+ doAnalyseSqlSemantic(cxt.get(), catalogReq.get(), metaData.get(), pQuery);
} catch (const TerminateFlag& e) {
// success and terminate
} catch (const runtime_error& e) {
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 0667c5f5b9e2984d119dab5297abb16e374977fb..8c87f60b9f1e874d09e1e975a81d58c4fdbf04c9 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -197,28 +197,21 @@ static EScanType getScanType(SLogicPlanContext* pCxt, SNodeList* pScanPseudoCols
return SCAN_TYPE_TABLE;
}
-static SNode* createPrimaryKeyCol(uint64_t tableId) {
+static SNode* createFirstCol(uint64_t tableId, const SSchema* pSchema) {
SColumnNode* pCol = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
if (NULL == pCol) {
return NULL;
}
- pCol->node.resType.type = TSDB_DATA_TYPE_TIMESTAMP;
- pCol->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes;
+ pCol->node.resType.type = pSchema->type;
+ pCol->node.resType.bytes = pSchema->bytes;
pCol->tableId = tableId;
- pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ pCol->colId = pSchema->colId;
pCol->colType = COLUMN_TYPE_COLUMN;
- strcpy(pCol->colName, "#primarykey");
+ strcpy(pCol->colName, pSchema->name);
return (SNode*)pCol;
}
-static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
- if (NULL == *pCols) {
- *pCols = nodesMakeList();
- if (NULL == *pCols) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
- }
-
+static int32_t addPrimaryKeyCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
bool found = false;
SNode* pCol = NULL;
FOREACH(pCol, *pCols) {
@@ -229,13 +222,25 @@ static int32_t addPrimaryKeyCol(uint64_t tableId, SNodeList** pCols) {
}
if (!found) {
- if (TSDB_CODE_SUCCESS != nodesListStrictAppend(*pCols, createPrimaryKeyCol(tableId))) {
- return TSDB_CODE_OUT_OF_MEMORY;
- }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
}
return TSDB_CODE_SUCCESS;
}
+static int32_t addSystableFirstCol(uint64_t tableId, const SSchema* pSchema, SNodeList** pCols) {
+ if (LIST_LENGTH(*pCols) > 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+ return nodesListMakeStrictAppend(pCols, createFirstCol(tableId, pSchema));
+}
+
+static int32_t addDefaultScanCol(const STableMeta* pMeta, SNodeList** pCols) {
+ if (TSDB_SYSTEM_TABLE == pMeta->tableType) {
+ return addSystableFirstCol(pMeta->uid, pMeta->schema, pCols);
+ }
+ return addPrimaryKeyCol(pMeta->uid, pMeta->schema, pCols);
+}
+
static int32_t makeScanLogicNode(SLogicPlanContext* pCxt, SRealTableNode* pRealTable, bool hasRepeatScanFuncs,
SLogicNode** pLogicNode) {
SScanLogicNode* pScan = (SScanLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_SCAN);
@@ -299,8 +304,8 @@ static int32_t createScanLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
pScan->hasNormalCols = true;
}
- if (TSDB_CODE_SUCCESS == code && SCAN_TYPE_SYSTEM_TABLE != pScan->scanType) {
- code = addPrimaryKeyCol(pScan->tableId, &pScan->pScanCols);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = addDefaultScanCol(pRealTable->pMeta, &pScan->pScanCols);
}
// set output
@@ -606,6 +611,8 @@ static int32_t createIndefRowsFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt
return code;
}
+static bool isInterpFunc(int32_t funcId) { return fmIsInterpFunc(funcId) || fmIsInterpPseudoColumnFunc(funcId); }
+
static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) {
if (!pSelect->hasInterpFunc) {
return TSDB_CODE_SUCCESS;
@@ -620,7 +627,7 @@ static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p
pInterpFunc->node.requireDataOrder = getRequireDataOrder(true, pSelect);
pInterpFunc->node.resultDataOrder = pInterpFunc->node.requireDataOrder;
- int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, fmIsInterpFunc, &pInterpFunc->pFuncs);
+ int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, isInterpFunc, &pInterpFunc->pFuncs);
if (TSDB_CODE_SUCCESS == code) {
code = rewriteExprsForSelect(pInterpFunc->pFuncs, pSelect, SQL_CLAUSE_SELECT);
}
@@ -787,10 +794,8 @@ static int32_t createWindowLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSele
static EDealRes needFillValueImpl(SNode* pNode, void* pContext) {
if (QUERY_NODE_COLUMN == nodeType(pNode)) {
SColumnNode* pCol = (SColumnNode*)pNode;
- if (COLUMN_TYPE_WINDOW_START != pCol->colType &&
- COLUMN_TYPE_WINDOW_END != pCol->colType &&
- COLUMN_TYPE_WINDOW_DURATION != pCol->colType &&
- COLUMN_TYPE_GROUP_KEY != pCol->colType) {
+ if (COLUMN_TYPE_WINDOW_START != pCol->colType && COLUMN_TYPE_WINDOW_END != pCol->colType &&
+ COLUMN_TYPE_WINDOW_DURATION != pCol->colType && COLUMN_TYPE_GROUP_KEY != pCol->colType) {
*(bool*)pContext = true;
return DEAL_RES_END;
}
@@ -1008,7 +1013,8 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
int32_t code =
nodesCollectColumns(pSelect, SQL_CLAUSE_PARTITION_BY, NULL, COLLECT_COL_TYPE_ALL, &pPartition->node.pTargets);
if (TSDB_CODE_SUCCESS == code && NULL == pPartition->node.pTargets) {
- code = nodesListMakeStrictAppend(&pPartition->node.pTargets, nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
+ code = nodesListMakeStrictAppend(&pPartition->node.pTargets,
+ nodesCloneNode(nodesListGetNode(pCxt->pCurrRoot->pTargets, 0)));
}
if (TSDB_CODE_SUCCESS == code) {
@@ -1018,6 +1024,20 @@ static int32_t createPartitionLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pS
}
}
+ if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pTags) {
+ pPartition->pTags = nodesCloneList(pSelect->pTags);
+ if (NULL == pPartition->pTags) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+
+ if (TSDB_CODE_SUCCESS == code && NULL != pSelect->pSubtable) {
+ pPartition->pSubtable = nodesCloneNode(pSelect->pSubtable);
+ if (NULL == pPartition->pSubtable) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ }
+ }
+
if (TSDB_CODE_SUCCESS == code) {
*pLogicNode = (SLogicNode*)pPartition;
} else {
@@ -1368,9 +1388,21 @@ static int32_t createDeleteAggLogicNode(SLogicPlanContext* pCxt, SDeleteStmt* pD
}
int32_t code = nodesListMakeStrictAppend(&pAgg->pAggFuncs, nodesCloneNode(pDelete->pCountFunc));
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListStrictAppend(pAgg->pAggFuncs, nodesCloneNode(pDelete->pFirstFunc));
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesListStrictAppend(pAgg->pAggFuncs, nodesCloneNode(pDelete->pLastFunc));
+ }
if (TSDB_CODE_SUCCESS == code) {
code = rewriteExpr(pAgg->pAggFuncs, &pDelete->pCountFunc);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = rewriteExpr(pAgg->pAggFuncs, &pDelete->pFirstFunc);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = rewriteExpr(pAgg->pAggFuncs, &pDelete->pLastFunc);
+ }
// set the output
if (TSDB_CODE_SUCCESS == code) {
code = createColumnByRewriteExprs(pAgg->pAggFuncs, &pAgg->node.pTargets);
@@ -1401,7 +1433,9 @@ static int32_t createVnodeModifLogicNodeByDelete(SLogicPlanContext* pCxt, SDelet
strcpy(pModify->tsColName, pRealTable->pMeta->schema->name);
pModify->deleteTimeRange = pDelete->timeRange;
pModify->pAffectedRows = nodesCloneNode(pDelete->pCountFunc);
- if (NULL == pModify->pAffectedRows) {
+ pModify->pStartTs = nodesCloneNode(pDelete->pFirstFunc);
+ pModify->pEndTs = nodesCloneNode(pDelete->pLastFunc);
+ if (NULL == pModify->pAffectedRows || NULL == pModify->pStartTs || NULL == pModify->pEndTs) {
nodesDestroyNode((SNode*)pModify);
return TSDB_CODE_OUT_OF_MEMORY;
}
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 45ab3903a9e9eb6df844244b6fc7cd8d009ebd47..927969bf9ceedbef2b56339a77af51041bf61f40 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -16,6 +16,7 @@
#include "filter.h"
#include "functionMgt.h"
#include "planInt.h"
+#include "tglobal.h"
#include "ttime.h"
#define OPTIMIZE_FLAG_MASK(n) (1 << n)
@@ -616,7 +617,7 @@ static bool pushDownCondOptIsPriKey(SNode* pNode, SNodeList* pTableCols) {
return false;
}
SColumnNode* pCol = (SColumnNode*)pNode;
- if (PRIMARYKEY_TIMESTAMP_COL_ID != pCol->colId) {
+ if (PRIMARYKEY_TIMESTAMP_COL_ID != pCol->colId || TSDB_SYSTEM_TABLE == pCol->tableType) {
return false;
}
return pushDownCondOptBelongThisTable(pNode, pTableCols);
@@ -1084,7 +1085,7 @@ static int32_t sortPriKeyOptGetSequencingNodesImpl(SLogicNode* pNode, bool* pNot
switch (nodeType(pNode)) {
case QUERY_NODE_LOGIC_PLAN_SCAN: {
SScanLogicNode* pScan = (SScanLogicNode*)pNode;
- if (NULL != pScan->pGroupTags) {
+ if (NULL != pScan->pGroupTags || TSDB_SYSTEM_TABLE == pScan->tableType) {
*pNotOptimize = true;
return TSDB_CODE_SUCCESS;
}
@@ -1614,6 +1615,8 @@ static int32_t partTagsOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub
SScanLogicNode* pScan = (SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0);
if (QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode)) {
TSWAP(((SPartitionLogicNode*)pNode)->pPartitionKeys, pScan->pGroupTags);
+ TSWAP(((SPartitionLogicNode*)pNode)->pTags, pScan->pTags);
+ TSWAP(((SPartitionLogicNode*)pNode)->pSubtable, pScan->pSubtable);
int32_t code = replaceLogicNode(pLogicSubplan, pNode, (SLogicNode*)pScan);
if (TSDB_CODE_SUCCESS == code) {
code = adjustLogicNodeDataRequirement((SLogicNode*)pScan, pNode->resultDataOrder);
@@ -1665,7 +1668,10 @@ static bool eliminateProjOptMayBeOptimized(SLogicNode* pNode) {
return false;
}
- if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren)) {
+ // Super table scan requires project operator to merge packets to improve performance.
+ if (QUERY_NODE_LOGIC_PLAN_PROJECT != nodeType(pNode) || 1 != LIST_LENGTH(pNode->pChildren) ||
+ (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0)) &&
+ TSDB_SUPER_TABLE == ((SScanLogicNode*)nodesListGetNode(pNode->pChildren, 0))->tableType)) {
return false;
}
@@ -2407,7 +2413,7 @@ static const SOptimizeRule optimizeRuleSet[] = {
static const int32_t optimizeRuleNum = (sizeof(optimizeRuleSet) / sizeof(SOptimizeRule));
static void dumpLogicSubplan(const char* pRuleName, SLogicSubplan* pSubplan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index cafae18dbe812546f6ef931d804ca4a9e5c1a6fa..810b82b9fc21d2c311266f36575297f9543797b7 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -563,7 +563,16 @@ static int32_t createTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubp
pTableScan->igExpired = pScanLogicNode->igExpired;
pTableScan->assignBlockUid = pCxt->pPlanCxt->rSmaQuery ? true : false;
- return createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode);
+ int32_t code = createScanPhysiNodeFinalize(pCxt, pSubplan, pScanLogicNode, (SScanPhysiNode*)pTableScan, pPhyNode);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setListSlotId(pCxt, pTableScan->scan.node.pOutputDataBlockDesc->dataBlockId, -1, pScanLogicNode->pTags,
+ &pTableScan->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setNodeSlotId(pCxt, pTableScan->scan.node.pOutputDataBlockDesc->dataBlockId, -1, pScanLogicNode->pSubtable,
+ &pTableScan->pSubtable);
+ }
+ return code;
}
static int32_t createSystemTableScanPhysiNode(SPhysiPlanContext* pCxt, SSubplan* pSubplan,
@@ -1046,7 +1055,8 @@ static int32_t doCreateExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogic
return TSDB_CODE_OUT_OF_MEMORY;
}
- pExchange->srcGroupId = pExchangeLogicNode->srcGroupId;
+ pExchange->srcStartGroupId = pExchangeLogicNode->srcStartGroupId;
+ pExchange->srcEndGroupId = pExchangeLogicNode->srcEndGroupId;
*pPhyNode = (SPhysiNode*)pExchange;
return TSDB_CODE_SUCCESS;
@@ -1321,10 +1331,10 @@ static int32_t createSortPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
return code;
}
-static int32_t createPartitionPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
- SPartitionLogicNode* pPartLogicNode, SPhysiNode** pPhyNode) {
- SPartitionPhysiNode* pPart =
- (SPartitionPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pPartLogicNode, QUERY_NODE_PHYSICAL_PLAN_PARTITION);
+static int32_t createPartitionPhysiNodeImpl(SPhysiPlanContext* pCxt, SNodeList* pChildren,
+ SPartitionLogicNode* pPartLogicNode, ENodeType type,
+ SPhysiNode** pPhyNode) {
+ SPartitionPhysiNode* pPart = (SPartitionPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pPartLogicNode, type);
if (NULL == pPart) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -1369,9 +1379,39 @@ static int32_t createPartitionPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChi
return code;
}
+static int32_t createStreamPartitionPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
+ SPartitionLogicNode* pPartLogicNode, SPhysiNode** pPhyNode) {
+ SStreamPartitionPhysiNode* pPart = NULL;
+ int32_t code = createPartitionPhysiNodeImpl(pCxt, pChildren, pPartLogicNode,
+ QUERY_NODE_PHYSICAL_PLAN_STREAM_PARTITION, (SPhysiNode**)&pPart);
+ SDataBlockDescNode* pChildTupe = (((SPhysiNode*)nodesListGetNode(pChildren, 0))->pOutputDataBlockDesc);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setListSlotId(pCxt, pChildTupe->dataBlockId, -1, pPartLogicNode->pTags, &pPart->pTags);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setNodeSlotId(pCxt, pChildTupe->dataBlockId, -1, pPartLogicNode->pSubtable, &pPart->pSubtable);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ *pPhyNode = (SPhysiNode*)pPart;
+ } else {
+ nodesDestroyNode((SNode*)pPart);
+ }
+ return code;
+}
+
+static int32_t createPartitionPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren,
+ SPartitionLogicNode* pPartLogicNode, SPhysiNode** pPhyNode) {
+ if (pCxt->pPlanCxt->streamQuery) {
+ return createStreamPartitionPhysiNode(pCxt, pChildren, pPartLogicNode, pPhyNode);
+ }
+ return createPartitionPhysiNodeImpl(pCxt, pChildren, pPartLogicNode, QUERY_NODE_PHYSICAL_PLAN_PARTITION, pPhyNode);
+}
+
static int32_t createFillPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren, SFillLogicNode* pFillNode,
SPhysiNode** pPhyNode) {
- SFillPhysiNode* pFill = (SFillPhysiNode*)makePhysiNode(pCxt, (SLogicNode*)pFillNode, QUERY_NODE_PHYSICAL_PLAN_FILL);
+ SFillPhysiNode* pFill = (SFillPhysiNode*)makePhysiNode(
+ pCxt, (SLogicNode*)pFillNode,
+ pCxt->pPlanCxt->streamQuery ? QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL : QUERY_NODE_PHYSICAL_PLAN_FILL);
if (NULL == pFill) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -1424,7 +1464,8 @@ static int32_t createExchangePhysiNodeByMerge(SMergePhysiNode* pMerge) {
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- pExchange->srcGroupId = pMerge->srcGroupId;
+ pExchange->srcStartGroupId = pMerge->srcGroupId;
+ pExchange->srcEndGroupId = pMerge->srcGroupId;
pExchange->singleChannel = true;
pExchange->node.pParent = (SPhysiNode*)pMerge;
pExchange->node.pOutputDataBlockDesc = (SDataBlockDescNode*)nodesCloneNode((SNode*)pMerge->node.pOutputDataBlockDesc);
@@ -1669,6 +1710,12 @@ static int32_t createDataDeleter(SPhysiPlanContext* pCxt, SVnodeModifyLogicNode*
int32_t code = setNodeSlotId(pCxt, pRoot->pOutputDataBlockDesc->dataBlockId, -1, pModify->pAffectedRows,
&pDeleter->pAffectedRows);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setNodeSlotId(pCxt, pRoot->pOutputDataBlockDesc->dataBlockId, -1, pModify->pStartTs, &pDeleter->pStartTs);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = setNodeSlotId(pCxt, pRoot->pOutputDataBlockDesc->dataBlockId, -1, pModify->pEndTs, &pDeleter->pEndTs);
+ }
if (TSDB_CODE_SUCCESS == code) {
pDeleter->sink.pInputDataBlockDesc = (SDataBlockDescNode*)nodesCloneNode((SNode*)pRoot->pOutputDataBlockDesc);
if (NULL == pDeleter->sink.pInputDataBlockDesc) {
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index c582994b7c319778477238ab26b99ce844cb8c1c..74ed3b57a4417aa43662bb722d3b78f3b13ee002 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -84,7 +84,8 @@ static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pChild, SE
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- pExchange->srcGroupId = pCxt->groupId;
+ pExchange->srcStartGroupId = pCxt->groupId;
+ pExchange->srcEndGroupId = pCxt->groupId;
pExchange->node.precision = pChild->precision;
pExchange->node.pTargets = nodesCloneList(pChild->pTargets);
if (NULL == pExchange->node.pTargets) {
@@ -112,7 +113,8 @@ static int32_t splCreateExchangeNodeForSubplan(SSplitContext* pCxt, SLogicSubpla
static bool splIsChildSubplan(SLogicNode* pLogicNode, int32_t groupId) {
if (QUERY_NODE_LOGIC_PLAN_EXCHANGE == nodeType(pLogicNode)) {
- return ((SExchangeLogicNode*)pLogicNode)->srcGroupId == groupId;
+ return groupId >= ((SExchangeLogicNode*)pLogicNode)->srcStartGroupId &&
+ groupId <= ((SExchangeLogicNode*)pLogicNode)->srcEndGroupId;
}
if (QUERY_NODE_LOGIC_PLAN_MERGE == nodeType(pLogicNode)) {
@@ -915,20 +917,30 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
}
static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
- int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pInfo->pSplitNode, SUBPLAN_TYPE_MERGE);
+ SLogicNode* pSplitNode = pInfo->pSplitNode;
+ if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) &&
+ NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) {
+ pSplitNode = pInfo->pSplitNode->pParent;
+ }
+ int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pSplitNode, SUBPLAN_TYPE_MERGE);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
- (SNode*)splCreateScanSubplan(pCxt, pInfo->pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
+ (SNode*)splCreateScanSubplan(pCxt, pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
}
++(pCxt->groupId);
return code;
}
static int32_t stbSplSplitScanNodeWithPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
- int32_t code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pInfo->pSplitNode, NULL, pInfo->pSplitNode, true);
+ SLogicNode* pSplitNode = pInfo->pSplitNode;
+ if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) &&
+ NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) {
+ pSplitNode = pInfo->pSplitNode->pParent;
+ }
+ int32_t code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pSplitNode, NULL, pSplitNode, true);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
- (SNode*)splCreateScanSubplan(pCxt, pInfo->pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
+ (SNode*)splCreateScanSubplan(pCxt, pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
}
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
++(pCxt->groupId);
@@ -997,6 +1009,7 @@ static int32_t stbSplSplitMergeScanNode(SSplitContext* pCxt, SLogicSubplan* pSub
code = stbSplCreateMergeNode(pCxt, pSubplan, (SLogicNode*)pScan, pMergeKeys, pMergeScan, groupSort);
}
if (TSDB_CODE_SUCCESS == code) {
+ nodesDestroyNode((SNode*)pScan);
code = nodesListMakeStrictAppend(&pSubplan->pChildren,
(SNode*)splCreateScanSubplan(pCxt, pMergeScan, SPLIT_FLAG_STABLE_SPLIT));
}
@@ -1173,6 +1186,7 @@ static int32_t unionSplitSubplan(SSplitContext* pCxt, SLogicSubplan* pUnionSubpl
if (TSDB_CODE_SUCCESS != code) {
break;
}
+ ++(pCxt->groupId);
}
if (TSDB_CODE_SUCCESS == code) {
nodesDestroyList(pSubplanChildren);
@@ -1196,12 +1210,14 @@ static bool unAllSplFindSplitNode(SSplitContext* pCxt, SLogicSubplan* pSubplan,
return false;
}
-static int32_t unAllSplCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SProjectLogicNode* pProject) {
+static int32_t unAllSplCreateExchangeNode(SSplitContext* pCxt, int32_t startGroupId, SLogicSubplan* pSubplan,
+ SProjectLogicNode* pProject) {
SExchangeLogicNode* pExchange = (SExchangeLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE);
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- pExchange->srcGroupId = pCxt->groupId;
+ pExchange->srcStartGroupId = startGroupId;
+ pExchange->srcEndGroupId = pCxt->groupId - 1;
pExchange->node.precision = pProject->node.precision;
pExchange->node.pTargets = nodesCloneList(pProject->node.pTargets);
if (NULL == pExchange->node.pTargets) {
@@ -1235,11 +1251,11 @@ static int32_t unionAllSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan) {
return TSDB_CODE_SUCCESS;
}
+ int32_t startGroupId = pCxt->groupId;
int32_t code = unionSplitSubplan(pCxt, info.pSubplan, (SLogicNode*)info.pProject);
if (TSDB_CODE_SUCCESS == code) {
- code = unAllSplCreateExchangeNode(pCxt, info.pSubplan, info.pProject);
+ code = unAllSplCreateExchangeNode(pCxt, startGroupId, info.pSubplan, info.pProject);
}
- ++(pCxt->groupId);
pCxt->split = true;
return code;
}
@@ -1249,12 +1265,14 @@ typedef struct SUnionDistinctSplitInfo {
SLogicSubplan* pSubplan;
} SUnionDistinctSplitInfo;
-static int32_t unDistSplCreateExchangeNode(SSplitContext* pCxt, SLogicSubplan* pSubplan, SAggLogicNode* pAgg) {
+static int32_t unDistSplCreateExchangeNode(SSplitContext* pCxt, int32_t startGroupId, SLogicSubplan* pSubplan,
+ SAggLogicNode* pAgg) {
SExchangeLogicNode* pExchange = (SExchangeLogicNode*)nodesMakeNode(QUERY_NODE_LOGIC_PLAN_EXCHANGE);
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
- pExchange->srcGroupId = pCxt->groupId;
+ pExchange->srcStartGroupId = startGroupId;
+ pExchange->srcEndGroupId = pCxt->groupId - 1;
pExchange->node.precision = pAgg->node.precision;
pExchange->node.pTargets = nodesCloneList(pAgg->pGroupKeys);
if (NULL == pExchange->node.pTargets) {
@@ -1282,11 +1300,11 @@ static int32_t unionDistinctSplit(SSplitContext* pCxt, SLogicSubplan* pSubplan)
return TSDB_CODE_SUCCESS;
}
+ int32_t startGroupId = pCxt->groupId;
int32_t code = unionSplitSubplan(pCxt, info.pSubplan, (SLogicNode*)info.pAgg);
if (TSDB_CODE_SUCCESS == code) {
- code = unDistSplCreateExchangeNode(pCxt, info.pSubplan, info.pAgg);
+ code = unDistSplCreateExchangeNode(pCxt, startGroupId, info.pSubplan, info.pAgg);
}
- ++(pCxt->groupId);
pCxt->split = true;
return code;
}
@@ -1419,7 +1437,7 @@ static const SSplitRule splitRuleSet[] = {
{.pName = "SingleTableJoinSplit", .splitFunc = singleTableJoinSplit},
{.pName = "UnionAllSplit", .splitFunc = unionAllSplit},
{.pName = "UnionDistinctSplit", .splitFunc = unionDistinctSplit},
- {.pName = "SmaIndexSplit", .splitFunc = smaIndexSplit},
+ {.pName = "SmaIndexSplit", .splitFunc = smaIndexSplit}, // not used yet
{.pName = "InsertSelectSplit", .splitFunc = insertSelectSplit}
};
// clang-format on
@@ -1427,7 +1445,7 @@ static const SSplitRule splitRuleSet[] = {
static const int32_t splitRuleNum = (sizeof(splitRuleSet) / sizeof(SSplitRule));
static void dumpLogicSubplan(const char* pRuleName, SLogicSubplan* pSubplan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
diff --git a/source/libs/planner/src/planner.c b/source/libs/planner/src/planner.c
index c1296982e0217ae9b3c2e67b210f1922492cf547..53549c122d958bbb043f2e9c885d91ea2b742ad1 100644
--- a/source/libs/planner/src/planner.c
+++ b/source/libs/planner/src/planner.c
@@ -17,9 +17,10 @@
#include "planInt.h"
#include "scalar.h"
+#include "tglobal.h"
static void dumpQueryPlan(SQueryPlan* pPlan) {
- if (0 == (qDebugFlag & DEBUG_DEBUG)) {
+ if (!tsQueryPlannerTrace) {
return;
}
char* pStr = NULL;
@@ -32,7 +33,10 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo
SLogicSubplan* pLogicSubplan = NULL;
SQueryLogicPlan* pLogicPlan = NULL;
- int32_t code = createLogicPlan(pCxt, &pLogicSubplan);
+ int32_t code = nodesAcquireAllocator(pCxt->allocatorId);
+ if (TSDB_CODE_SUCCESS == code) {
+ code = createLogicPlan(pCxt, &pLogicSubplan);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = optimizeLogicPlan(pCxt, pLogicSubplan);
}
@@ -48,6 +52,7 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo
if (TSDB_CODE_SUCCESS == code) {
dumpQueryPlan(*pPlan);
}
+ nodesReleaseAllocator(pCxt->allocatorId);
nodesDestroyNode((SNode*)pLogicSubplan);
nodesDestroyNode((SNode*)pLogicPlan);
@@ -58,7 +63,7 @@ int32_t qCreateQueryPlan(SPlanContext* pCxt, SQueryPlan** pPlan, SArray* pExecNo
static int32_t setSubplanExecutionNode(SPhysiNode* pNode, int32_t groupId, SDownstreamSourceNode* pSource) {
if (QUERY_NODE_PHYSICAL_PLAN_EXCHANGE == nodeType(pNode)) {
SExchangePhysiNode* pExchange = (SExchangePhysiNode*)pNode;
- if (pExchange->srcGroupId == groupId) {
+ if (groupId >= pExchange->srcStartGroupId && groupId <= pExchange->srcEndGroupId) {
return nodesListMakeStrictAppend(&pExchange->pSrcEndPoints, nodesCloneNode((SNode*)pSource));
}
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE == nodeType(pNode)) {
@@ -122,6 +127,21 @@ int32_t qSubPlanToString(const SSubplan* pSubplan, char** pStr, int32_t* pLen) {
int32_t qStringToSubplan(const char* pStr, SSubplan** pSubplan) { return nodesStringToNode(pStr, (SNode**)pSubplan); }
+int32_t qSubPlanToMsg(const SSubplan* pSubplan, char** pStr, int32_t* pLen) {
+ if (SUBPLAN_TYPE_MODIFY == pSubplan->subplanType && NULL == pSubplan->pNode) {
+ SDataInserterNode* insert = (SDataInserterNode*)pSubplan->pDataSink;
+ *pLen = insert->size;
+ *pStr = insert->pData;
+ insert->pData = NULL;
+ return TSDB_CODE_SUCCESS;
+ }
+ return nodesNodeToMsg((const SNode*)pSubplan, pStr, pLen);
+}
+
+int32_t qMsgToSubplan(const char* pStr, int32_t len, SSubplan** pSubplan) {
+ return nodesMsgToNode(pStr, len, (SNode**)pSubplan);
+}
+
char* qQueryPlanToString(const SQueryPlan* pPlan) {
char* pStr = NULL;
int32_t len = 0;
diff --git a/source/libs/planner/test/planBasicTest.cpp b/source/libs/planner/test/planBasicTest.cpp
index 27ec409d52a912834ae6e3ec6e2e6a41f2812fe1..aeb78f4030e5dbd36210a86ffa0220f48ea48501 100644
--- a/source/libs/planner/test/planBasicTest.cpp
+++ b/source/libs/planner/test/planBasicTest.cpp
@@ -40,6 +40,13 @@ TEST_F(PlanBasicTest, whereClause) {
run("SELECT ts, c1 FROM t1 WHERE ts > NOW AND ts IS NULL AND (c1 > 0 OR c3 < 20)");
}
+TEST_F(PlanBasicTest, caseWhen) {
+ useDb("root", "test");
+
+ run("SELECT CASE WHEN ts > '2020-1-1 10:10:10' THEN c1 + 10 ELSE c1 - 10 END FROM t1 "
+ "WHERE CASE c1 WHEN c2 + 20 THEN c4 - 1 WHEN c2 + 10 THEN c4 - 2 ELSE 10 END > 0");
+}
+
TEST_F(PlanBasicTest, func) {
useDb("root", "test");
@@ -94,6 +101,8 @@ TEST_F(PlanBasicTest, interpFunc) {
useDb("root", "test");
run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
+
+ run("SELECT _IROWTS, INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
}
TEST_F(PlanBasicTest, lastRowFunc) {
diff --git a/source/libs/planner/test/planGroupByTest.cpp b/source/libs/planner/test/planGroupByTest.cpp
index a553d3addc7e445412a56a579e2fda7e02f742ea..8b8f92bd4f770b1ae652fe9358acf6a822181d0f 100644
--- a/source/libs/planner/test/planGroupByTest.cpp
+++ b/source/libs/planner/test/planGroupByTest.cpp
@@ -40,6 +40,8 @@ TEST_F(PlanGroupByTest, basic) {
run("SELECT COUNT(*) FROM st1 GROUP BY c1");
run("SELECT SUM(c1) FROM st1 GROUP BY c2 HAVING SUM(c1) IS NOT NULL");
+
+ run("SELECT AVG(c1) FROM st1");
}
TEST_F(PlanGroupByTest, withPartitionBy) {
diff --git a/source/libs/planner/test/planJoinTest.cpp b/source/libs/planner/test/planJoinTest.cpp
index 66ef4d3f1914957232a34e67944ed07c69a2e30c..535bb0b416943d59fcd32009b23594d07d3a38f1 100644
--- a/source/libs/planner/test/planJoinTest.cpp
+++ b/source/libs/planner/test/planJoinTest.cpp
@@ -28,6 +28,8 @@ TEST_F(PlanJoinTest, basic) {
run("SELECT t1.*, t2.* FROM st1s1 t1, st1s2 t2 WHERE t1.ts = t2.ts");
run("SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts");
+
+ run("SELECT t1.c1, t2.c1 FROM st1 t1 JOIN st2 t2 ON t1.ts = t2.ts");
}
TEST_F(PlanJoinTest, complex) {
@@ -56,9 +58,3 @@ TEST_F(PlanJoinTest, multiJoin) {
run("SELECT t1.c1, t2.c1 FROM st1s1 t1 JOIN st1s2 t2 ON t1.ts = t2.ts JOIN st1s3 t3 ON t1.ts = t3.ts");
}
-
-TEST_F(PlanJoinTest, stable) {
- useDb("root", "test");
-
- run("SELECT t1.c1, t2.c1 FROM st1 t1 JOIN st2 t2 ON t1.ts = t2.ts ");
-}
diff --git a/source/libs/planner/test/planOptimizeTest.cpp b/source/libs/planner/test/planOptimizeTest.cpp
index 6c5b760564b1e73f09c92fc03d30a958c898c2fc..c2a0aee847f8ad1f6a03cf675c01fabdab3c4eff 100644
--- a/source/libs/planner/test/planOptimizeTest.cpp
+++ b/source/libs/planner/test/planOptimizeTest.cpp
@@ -35,6 +35,8 @@ TEST_F(PlanOptimizeTest, scanPath) {
run("SELECT LAST(c1) FROM t1 WHERE ts BETWEEN '2022-7-29 11:10:10' AND '2022-7-30 11:10:10' INTERVAL(10S) "
"FILL(LINEAR)");
+
+ run("SELECT COUNT(TBNAME) FROM t1");
}
TEST_F(PlanOptimizeTest, pushDownCondition) {
diff --git a/source/libs/planner/test/planOtherTest.cpp b/source/libs/planner/test/planOtherTest.cpp
index 350ccd0d927c9773059cfb2c027a0ca2292e4d13..2ad997bb46e5bf7f6c6253a61a9badc7f6ad4f4e 100644
--- a/source/libs/planner/test/planOtherTest.cpp
+++ b/source/libs/planner/test/planOtherTest.cpp
@@ -32,6 +32,12 @@ TEST_F(PlanOtherTest, createStream) {
run("create stream if not exists s1 trigger window_close watermark 10s into st1 as select count(*) from t1 "
"interval(10s)");
+
+ run("CREATE STREAM s1 INTO st3 TAGS(tname VARCHAR(10), id INT) SUBTABLE(CONCAT('new-', tname)) "
+ "AS SELECT _WSTART wstart, COUNT(*) cnt FROM st1 PARTITION BY TBNAME tname, c1 id INTERVAL(10S)");
+
+ run("CREATE STREAM s1 INTO st3 TAGS(tname VARCHAR(10), id INT) SUBTABLE(CONCAT('new-', tname)) "
+ "AS SELECT _WSTART wstart, COUNT(*) cnt FROM st1 PARTITION BY TBNAME tname, tag1 id INTERVAL(10S)");
}
TEST_F(PlanOtherTest, createStreamUseSTable) {
@@ -78,6 +84,8 @@ TEST_F(PlanOtherTest, show) {
run("SHOW TABLE DISTRIBUTED st1");
run("SHOW DNODE 1 VARIABLES");
+
+ run("SHOW TAGS FROM st1s1");
}
TEST_F(PlanOtherTest, delete) {
diff --git a/source/libs/planner/test/planSetOpTest.cpp b/source/libs/planner/test/planSetOpTest.cpp
index de6d7466b8f715f0cb28e7832f07648bec8ab307..5348952db87428aabb8d14504edab656f96e070c 100644
--- a/source/libs/planner/test/planSetOpTest.cpp
+++ b/source/libs/planner/test/planSetOpTest.cpp
@@ -44,6 +44,8 @@ TEST_F(PlanSetOpTest, unionAllWithSubquery) {
run("SELECT ts FROM (SELECT ts FROM st1s1) UNION ALL SELECT ts FROM (SELECT ts FROM st1s2)");
// super table
run("SELECT ts FROM (SELECT ts FROM st1) UNION ALL SELECT ts FROM (SELECT ts FROM st1)");
+
+ run("(SELECT SERVER_STATUS()) UNION ALL (SELECT SERVER_STATUS())");
}
TEST_F(PlanSetOpTest, unionAllWithOrderBy) {
diff --git a/source/libs/planner/test/planSysTbTest.cpp b/source/libs/planner/test/planSysTbTest.cpp
index 921f86f09a41d36448ab0d435ab6a439645b9bfc..6b40e381cc18cb75cc9271352cd654d31a74242b 100644
--- a/source/libs/planner/test/planSysTbTest.cpp
+++ b/source/libs/planner/test/planSysTbTest.cpp
@@ -32,3 +32,9 @@ TEST_F(PlanSysTableTest, informationSchema) {
run("SELECT * FROM information_schema.ins_databases WHERE name = 'information_schema'");
}
+
+TEST_F(PlanSysTableTest, withAgg) {
+ useDb("root", "information_schema");
+
+ run("SELECT COUNT(1) FROM ins_users");
+}
diff --git a/source/libs/planner/test/planTestMain.cpp b/source/libs/planner/test/planTestMain.cpp
index 8f6fc832a2d8c4722c02781d2a357606a1eb481b..df6e72ce46e0aad05f62e45ba66e38c8f0c9fc96 100644
--- a/source/libs/planner/test/planTestMain.cpp
+++ b/source/libs/planner/test/planTestMain.cpp
@@ -22,6 +22,7 @@
#include "mockCatalog.h"
#include "parser.h"
#include "planTestUtil.h"
+#include "tglobal.h"
class PlannerEnv : public testing::Environment {
public:
@@ -30,6 +31,8 @@ class PlannerEnv : public testing::Environment {
initMetaDataEnv();
generateMetaData();
initLog(TD_TMP_DIR_PATH "td");
+ initCfg();
+ nodesInitAllocatorSet();
}
virtual void TearDown() {
@@ -37,6 +40,7 @@ class PlannerEnv : public testing::Environment {
qCleanupKeywordsTable();
fmFuncMgtDestroy();
taosCloseLog();
+ nodesDestroyAllocatorSet();
}
PlannerEnv() {}
@@ -67,6 +71,8 @@ class PlannerEnv : public testing::Environment {
std::cout << "failed to init log file" << std::endl;
}
}
+
+ void initCfg() { tsQueryPlannerTrace = true; }
};
static void parseArg(int argc, char* argv[]) {
@@ -79,6 +85,7 @@ static void parseArg(int argc, char* argv[]) {
{"limitSql", required_argument, NULL, 'i'},
{"log", required_argument, NULL, 'l'},
{"queryPolicy", required_argument, NULL, 'q'},
+ {"useNodeAllocator", required_argument, NULL, 'a'},
{0, 0, 0, 0}
};
// clang-format on
@@ -99,6 +106,9 @@ static void parseArg(int argc, char* argv[]) {
case 'q':
setQueryPolicy(optarg);
break;
+ case 'a':
+ setUseNodeAllocator(optarg);
+ break;
default:
break;
}
diff --git a/source/libs/planner/test/planTestUtil.cpp b/source/libs/planner/test/planTestUtil.cpp
index f904643be91298fd8bcd1164334663af32851e98..73d695195cab5b1d5257c1b783e0c3a5dfe05840 100644
--- a/source/libs/planner/test/planTestUtil.cpp
+++ b/source/libs/planner/test/planTestUtil.cpp
@@ -19,6 +19,7 @@
#include
#include
+#include
#include "cmdnodes.h"
#include "mockCatalogService.h"
@@ -40,6 +41,7 @@ using namespace testing;
enum DumpModule {
DUMP_MODULE_NOTHING = 1,
+ DUMP_MODULE_SQL,
DUMP_MODULE_PARSER,
DUMP_MODULE_LOGIC,
DUMP_MODULE_OPTIMIZED,
@@ -55,10 +57,13 @@ int32_t g_skipSql = 0;
int32_t g_limitSql = 0;
int32_t g_logLevel = 131;
int32_t g_queryPolicy = QUERY_POLICY_VNODE;
+bool g_useNodeAllocator = false;
void setDumpModule(const char* pModule) {
if (NULL == pModule) {
g_dumpModule = DUMP_MODULE_ALL;
+ } else if (0 == strncasecmp(pModule, "sql", strlen(pModule))) {
+ g_dumpModule = DUMP_MODULE_SQL;
} else if (0 == strncasecmp(pModule, "parser", strlen(pModule))) {
g_dumpModule = DUMP_MODULE_PARSER;
} else if (0 == strncasecmp(pModule, "logic", strlen(pModule))) {
@@ -78,10 +83,11 @@ void setDumpModule(const char* pModule) {
}
}
-void setSkipSqlNum(const char* pNum) { g_skipSql = stoi(pNum); }
-void setLimitSqlNum(const char* pNum) { g_limitSql = stoi(pNum); }
-void setLogLevel(const char* pLogLevel) { g_logLevel = stoi(pLogLevel); }
-void setQueryPolicy(const char* pQueryPolicy) { g_queryPolicy = stoi(pQueryPolicy); }
+void setSkipSqlNum(const char* pArg) { g_skipSql = stoi(pArg); }
+void setLimitSqlNum(const char* pArg) { g_limitSql = stoi(pArg); }
+void setLogLevel(const char* pArg) { g_logLevel = stoi(pArg); }
+void setQueryPolicy(const char* pArg) { g_queryPolicy = stoi(pArg); }
+void setUseNodeAllocator(const char* pArg) { g_useNodeAllocator = stoi(pArg); }
int32_t getLogLevel() { return g_logLevel; }
@@ -123,6 +129,12 @@ class PlannerTestBaseImpl {
}
void runImpl(const string& sql, int32_t queryPolicy) {
+ int64_t allocatorId = 0;
+ if (g_useNodeAllocator) {
+ nodesCreateAllocator(sqlNo_, 32 * 1024, &allocatorId);
+ nodesAcquireAllocator(allocatorId);
+ }
+
reset();
tsQueryPolicy = queryPolicy;
try {
@@ -154,8 +166,13 @@ class PlannerTestBaseImpl {
dump(g_dumpModule);
} catch (...) {
dump(DUMP_MODULE_ALL);
+ nodesReleaseAllocator(allocatorId);
+ nodesDestroyAllocator(allocatorId);
throw;
}
+
+ nodesReleaseAllocator(allocatorId);
+ nodesDestroyAllocator(allocatorId);
}
void prepare(const string& sql) {
@@ -215,6 +232,8 @@ class PlannerTestBaseImpl {
doCreatePhysiPlan(&cxt, pLogicPlan, &pPlan);
unique_ptr plan(pPlan, (void (*)(SQueryPlan*))nodesDestroyNode);
+ checkPlanMsg((SNode*)pPlan);
+
dump(g_dumpModule);
} catch (...) {
dump(DUMP_MODULE_ALL);
@@ -452,6 +471,32 @@ class PlannerTestBaseImpl {
return str;
}
+ void checkPlanMsg(const SNode* pRoot) {
+ char* pStr = NULL;
+ int32_t len = 0;
+ DO_WITH_THROW(nodesNodeToMsg, pRoot, &pStr, &len)
+
+ string copyStr(pStr, len);
+ SNode* pNode = NULL;
+ char* pNewStr = NULL;
+ int32_t newlen = 0;
+ DO_WITH_THROW(nodesMsgToNode, copyStr.c_str(), len, &pNode)
+ DO_WITH_THROW(nodesNodeToMsg, pNode, &pNewStr, &newlen)
+ if (newlen != len || 0 != memcmp(pStr, pNewStr, len)) {
+ cout << "nodesNodeToMsg error!!!!!!!!!!!!!! len = " << len << ", newlen = " << newlen << endl;
+ taosMemoryFreeClear(pNewStr);
+ DO_WITH_THROW(nodesNodeToString, pRoot, false, &pNewStr, &newlen)
+ cout << "orac node: " << pNewStr << endl;
+ taosMemoryFreeClear(pNewStr);
+ DO_WITH_THROW(nodesNodeToString, pNode, false, &pNewStr, &newlen)
+ cout << "new node: " << pNewStr << endl;
+ }
+ nodesDestroyNode(pNode);
+ taosMemoryFreeClear(pNewStr);
+
+ taosMemoryFreeClear(pStr);
+ }
+
caseEnv caseEnv_;
stmtEnv stmtEnv_;
stmtRes res_;
diff --git a/source/libs/planner/test/planTestUtil.h b/source/libs/planner/test/planTestUtil.h
index b0ddd726a6d4c0cf3b9294990e593cf67839823b..be8b51f769d4764048bdf9ded777dfb1f3dd6e56 100644
--- a/source/libs/planner/test/planTestUtil.h
+++ b/source/libs/planner/test/planTestUtil.h
@@ -41,11 +41,12 @@ class PlannerTestBase : public testing::Test {
std::unique_ptr impl_;
};
-extern void setDumpModule(const char* pModule);
-extern void setSkipSqlNum(const char* pNum);
-extern void setLimitSqlNum(const char* pNum);
-extern void setLogLevel(const char* pLogLevel);
-extern void setQueryPolicy(const char* pQueryPolicy);
+extern void setDumpModule(const char* pArg);
+extern void setSkipSqlNum(const char* pArg);
+extern void setLimitSqlNum(const char* pArg);
+extern void setLogLevel(const char* pArg);
+extern void setQueryPolicy(const char* pArg);
+extern void setUseNodeAllocator(const char* pArg);
extern int32_t getLogLevel();
#endif // PLAN_TEST_UTIL_H
diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c
index d848016e46482614972d5e85469e4297136d6cc0..8162b922cec8fd779907df851bc1d8b545cafc28 100644
--- a/source/libs/qcom/src/queryUtil.c
+++ b/source/libs/qcom/src/queryUtil.c
@@ -134,8 +134,7 @@ int32_t taosAsyncExec(__async_exec_fn_t execFn, void* execParam, int32_t* code)
schedMsg.thandle = execParam;
schedMsg.msg = code;
- taosScheduleTask(&pTaskQueue, &schedMsg);
- return 0;
+ return taosScheduleTask(&pTaskQueue, &schedMsg);
}
void destroySendMsgInfo(SMsgSendInfo* pMsgBody) {
@@ -472,5 +471,3 @@ int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst) {
return TSDB_CODE_SUCCESS;
}
-
-
diff --git a/source/libs/qcom/src/querymsg.c b/source/libs/qcom/src/querymsg.c
index e2d3ac1583926da6fe9d9aff82392c4fcc3c2d65..e54937114cd1bc0e011e8dc9d8ed44a710bc1807 100644
--- a/source/libs/qcom/src/querymsg.c
+++ b/source/libs/qcom/src/querymsg.c
@@ -38,6 +38,8 @@ int32_t queryBuildUseDbOutput(SUseDbOutput *pOut, SUseDbRsp *usedbRsp) {
pOut->dbVgroup->vgVersion = usedbRsp->vgVersion;
pOut->dbVgroup->hashMethod = usedbRsp->hashMethod;
+ pOut->dbVgroup->hashPrefix = usedbRsp->hashPrefix;
+ pOut->dbVgroup->hashSuffix = usedbRsp->hashSuffix;
qDebug("Got %d vgroup for db %s", usedbRsp->vgNum, usedbRsp->db);
diff --git a/source/libs/qworker/inc/qwInt.h b/source/libs/qworker/inc/qwInt.h
index 729ac474e4720a13a2da8d463820d05db72970f0..3a3226359998d23da690aa361c5dffd1ce7d12fc 100644
--- a/source/libs/qworker/inc/qwInt.h
+++ b/source/libs/qworker/inc/qwInt.h
@@ -29,7 +29,7 @@ extern "C" {
#include "executor.h"
#include "trpc.h"
-#define QW_DEFAULT_SCHEDULER_NUMBER 10000
+#define QW_DEFAULT_SCHEDULER_NUMBER 100
#define QW_DEFAULT_TASK_NUMBER 10000
#define QW_DEFAULT_SCH_TASK_NUMBER 10000
#define QW_DEFAULT_SHORT_RUN_TIMES 2
@@ -83,22 +83,6 @@ typedef struct SQWDebug {
extern SQWDebug gQWDebug;
-typedef struct SQWMsgInfo {
- int8_t taskType;
- int8_t explain;
- int8_t needFetch;
-} SQWMsgInfo;
-
-typedef struct SQWMsg {
- void *node;
- int32_t code;
- int32_t msgType;
- char *msg;
- int32_t msgLen;
- SQWMsgInfo msgInfo;
- SRpcHandleInfo connInfo;
-} SQWMsg;
-
typedef struct SQWHbParam {
bool inUse;
int32_t qwrId;
@@ -133,6 +117,7 @@ typedef struct SQWTaskCtx {
int8_t taskType;
int8_t explain;
int8_t needFetch;
+ int8_t localExec;
int32_t msgType;
int32_t fetchType;
int32_t execId;
@@ -150,6 +135,7 @@ typedef struct SQWTaskCtx {
int8_t events[QW_EVENT_MAX];
+ SArray *explainRes;
void *taskHandle;
void *sinkHandle;
STbVerInfo tbInfo;
@@ -205,6 +191,7 @@ typedef struct SQWorker {
SHashObj *ctxHash; // key: queryId+taskId, value: SQWTaskCtx
SMsgCb msgCb;
SQWStat stat;
+ int32_t *destroyed;
} SQWorker;
typedef struct SQWorkerMgmt {
diff --git a/source/libs/qworker/inc/qwMsg.h b/source/libs/qworker/inc/qwMsg.h
index 3ff5b5950f6e241af51d77da843d7f00e9fdd0b6..b46c5d6baf90cdbba9883b7a0cfe24f175c7c4b5 100644
--- a/source/libs/qworker/inc/qwMsg.h
+++ b/source/libs/qworker/inc/qwMsg.h
@@ -42,7 +42,7 @@ int32_t qwBuildAndSendQueryRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t c
int32_t qwBuildAndSendExplainRsp(SRpcHandleInfo *pConn, SArray* pExecList);
int32_t qwBuildAndSendErrorRsp(int32_t rspType, SRpcHandleInfo *pConn, int32_t code);
void qwFreeFetchRsp(void *msg);
-int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp);
+int32_t qwMallocFetchRsp(int8_t rpcMalloc, int32_t length, SRetrieveTableRsp **rsp);
int32_t qwBuildAndSendHbRsp(SRpcHandleInfo *pConn, SSchedulerHbRsp *rsp, int32_t code);
int32_t qwRegisterQueryBrokenLinkArg(QW_FPARAMS_DEF, SRpcHandleInfo *pConn);
int32_t qwRegisterHbBrokenLinkArg(SQWorker *mgmt, uint64_t sId, SRpcHandleInfo *pConn);
diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c
index ecc25e9c2df6fd1d0efb35cc21d871fde64784c5..e4271dfcdaf2579ce884646a52bee523e419c3c5 100644
--- a/source/libs/qworker/src/qwMsg.c
+++ b/source/libs/qworker/src/qwMsg.c
@@ -9,10 +9,10 @@
#include "tmsg.h"
#include "tname.h"
-int32_t qwMallocFetchRsp(int32_t length, SRetrieveTableRsp **rsp) {
+int32_t qwMallocFetchRsp(int8_t rpcMalloc, int32_t length, SRetrieveTableRsp **rsp) {
int32_t msgSize = sizeof(SRetrieveTableRsp) + length;
- SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcReallocCont(*rsp, msgSize);
+ SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)(rpcMalloc ? rpcReallocCont(*rsp, msgSize) : taosMemoryRealloc(*rsp, msgSize));
if (NULL == pRsp) {
qError("rpcMallocCont %d failed", msgSize);
QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
@@ -632,6 +632,7 @@ int32_t qWorkerProcessDeleteMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, SD
QW_ERR_JRET(qwProcessDelete(QW_FPARAMS(), &qwMsg, pRes));
+ taosMemoryFreeClear(req.msg);
QW_SCH_TASK_DLOG("processDelete end, node:%p", node);
_return:
diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c
index c7bf7ab7e7e6fbe30d2a8ae9abd2c51d51dac590..9f1a9a3146b0aeba875d051d731f750779b94e42 100644
--- a/source/libs/qworker/src/qwUtil.c
+++ b/source/libs/qworker/src/qwUtil.c
@@ -275,6 +275,7 @@ void qwFreeTaskHandle(qTaskInfo_t *taskHandle) {
qTaskInfo_t otaskHandle = atomic_load_ptr(taskHandle);
if (otaskHandle && atomic_val_compare_exchange_ptr(taskHandle, otaskHandle, NULL)) {
qDestroyTask(otaskHandle);
+ qDebug("task handle destryed");
}
}
@@ -305,6 +306,7 @@ void qwFreeTaskCtx(SQWTaskCtx *ctx) {
if (ctx->sinkHandle) {
dsDestroyDataSinker(ctx->sinkHandle);
ctx->sinkHandle = NULL;
+ qDebug("sink handle destryed");
}
}
@@ -452,6 +454,10 @@ void qwDestroySchStatus(SQWSchStatus *pStatus) { taosHashCleanup(pStatus->tasksH
void qwDestroyImpl(void *pMgmt) {
SQWorker *mgmt = (SQWorker *)pMgmt;
+ int8_t nodeType = mgmt->nodeType;
+ int32_t nodeId = mgmt->nodeId;
+
+ qDebug("start to destroy qworker, type:%d, id:%d, handle:%p", nodeType, nodeId, mgmt);
taosTmrStop(mgmt->hbTimer);
mgmt->hbTimer = NULL;
@@ -479,11 +485,15 @@ void qwDestroyImpl(void *pMgmt) {
}
taosHashCleanup(mgmt->schHash);
+ *mgmt->destroyed = 1;
+
taosMemoryFree(mgmt);
atomic_sub_fetch_32(&gQwMgmt.qwNum, 1);
qwCloseRef();
+
+ qDebug("qworker destroyed, type:%d, id:%d, handle:%p", nodeType, nodeId, mgmt);
}
int32_t qwOpenRef(void) {
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index f006096ce20a45e18a5b9d990c9c63b621638ac5..ae9dd82a58eca25aab73a17bc43ef2441e3c08c0 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -9,6 +9,7 @@
#include "tcommon.h"
#include "tmsg.h"
#include "tname.h"
+#include "tdatablock.h"
SQWorkerMgmt gQwMgmt = {
.lock = 0,
@@ -16,6 +17,11 @@ SQWorkerMgmt gQwMgmt = {
.qwNum = 0,
};
+static void freeBlock(void* param) {
+ SSDataBlock* pBlock = *(SSDataBlock**)param;
+ blockDataDestroy(pBlock);
+}
+
int32_t qwProcessHbLinkBroken(SQWorker *mgmt, SQWMsg *qwMsg, SSchedulerHbReq *req) {
int32_t code = 0;
SSchedulerHbRsp rsp = {0};
@@ -57,11 +63,25 @@ int32_t qwHandleTaskComplete(QW_FPARAMS_DEF, SQWTaskCtx *ctx) {
SArray *execInfoList = taosArrayInit(4, sizeof(SExplainExecInfo));
QW_ERR_RET(qGetExplainExecInfo(taskHandle, execInfoList));
- SRpcHandleInfo connInfo = ctx->ctrlConnInfo;
- connInfo.ahandle = NULL;
- int32_t code = qwBuildAndSendExplainRsp(&connInfo, execInfoList);
- taosArrayDestroyEx(execInfoList, freeItem);
- QW_ERR_RET(code);
+ if (ctx->localExec) {
+ SExplainLocalRsp localRsp = {0};
+ localRsp.rsp.numOfPlans = taosArrayGetSize(execInfoList);
+ SExplainExecInfo *pExec = taosMemoryCalloc(localRsp.rsp.numOfPlans, sizeof(SExplainExecInfo));
+ memcpy(pExec, taosArrayGet(execInfoList, 0), localRsp.rsp.numOfPlans * sizeof(SExplainExecInfo));
+ localRsp.rsp.subplanInfo = pExec;
+ localRsp.qId = qId;
+ localRsp.tId = tId;
+ localRsp.rId = rId;
+ localRsp.eId = eId;
+ taosArrayPush(ctx->explainRes, &localRsp);
+ taosArrayDestroy(execInfoList);
+ } else {
+ SRpcHandleInfo connInfo = ctx->ctrlConnInfo;
+ connInfo.ahandle = NULL;
+ int32_t code = qwBuildAndSendExplainRsp(&connInfo, execInfoList);
+ taosArrayDestroyEx(execInfoList, freeItem);
+ QW_ERR_RET(code);
+ }
}
if (!ctx->needFetch) {
@@ -80,15 +100,19 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
int32_t execNum = 0;
qTaskInfo_t taskHandle = ctx->taskHandle;
DataSinkHandle sinkHandle = ctx->sinkHandle;
+ SLocalFetch localFetch = {(void*)mgmt, ctx->localExec, qWorkerProcessLocalFetch, ctx->explainRes};
SArray *pResList = taosArrayInit(4, POINTER_BYTES);
while (true) {
QW_TASK_DLOG("start to execTask, loopIdx:%d", i++);
// if *taskHandle is NULL, it's killed right now
+ bool hasMore = false;
+
if (taskHandle) {
qwDbgSimulateSleep();
- code = qExecTaskOpt(taskHandle, pResList, &useconds);
+
+ code = qExecTaskOpt(taskHandle, pResList, &useconds, &hasMore, &localFetch);
if (code) {
if (code != TSDB_CODE_OPS_NOT_SUPPORT) {
QW_TASK_ELOG("qExecTask failed, code:%x - %s", code, tstrerror(code));
@@ -101,20 +125,8 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
++execNum;
- if (taosArrayGetSize(pResList) == 0) {
- QW_TASK_DLOG("qExecTask end with empty res, useconds:%" PRIu64, useconds);
- dsEndPut(sinkHandle, useconds);
-
- QW_ERR_JRET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
-
- if (queryStop) {
- *queryStop = true;
- }
-
- break;
- }
-
- for (int32_t j = 0; j < taosArrayGetSize(pResList); ++j) {
+ size_t numOfResBlock = taosArrayGetSize(pResList);
+ for (int32_t j = 0; j < numOfResBlock; ++j) {
SSDataBlock *pRes = taosArrayGetP(pResList, j);
ASSERT(pRes->info.rows > 0);
@@ -128,6 +140,23 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
QW_TASK_DLOG("data put into sink, rows:%d, continueExecTask:%d", pRes->info.rows, qcontinue);
}
+ if (numOfResBlock == 0 || (hasMore == false)) {
+ if (numOfResBlock == 0) {
+ QW_TASK_DLOG("qExecTask end with empty res, useconds:%" PRIu64, useconds);
+ } else {
+ QW_TASK_DLOG("qExecTask done", "");
+ }
+
+ dsEndPut(sinkHandle, useconds);
+ QW_ERR_JRET(qwHandleTaskComplete(QW_FPARAMS(), ctx));
+
+ if (queryStop) {
+ *queryStop = true;
+ }
+
+ break;
+ }
+
if (!qcontinue) {
if (queryStop) {
*queryStop = true;
@@ -150,8 +179,7 @@ int32_t qwExecTask(QW_FPARAMS_DEF, SQWTaskCtx *ctx, bool *queryStop) {
}
_return:
-
- taosArrayDestroy(pResList);
+ taosArrayDestroyEx(pResList, freeBlock);
QW_RET(code);
}
@@ -229,7 +257,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
qwUpdateTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_SUCC);
if (NULL == rsp) {
- QW_ERR_RET(qwMallocFetchRsp(len, &rsp));
+ QW_ERR_RET(qwMallocFetchRsp(!ctx->localExec, len, &rsp));
*pOutput = output;
} else {
pOutput->queryEnd = output.queryEnd;
@@ -250,7 +278,7 @@ int32_t qwGetQueryResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen,
*dataLen += len;
- QW_ERR_RET(qwMallocFetchRsp(*dataLen, &rsp));
+ QW_ERR_RET(qwMallocFetchRsp(!ctx->localExec, *dataLen, &rsp));
output.pData = rsp->data + *dataLen - len;
code = dsGetDataBlock(ctx->sinkHandle, &output);
@@ -474,16 +502,18 @@ _return:
}
if (QW_PHASE_POST_QUERY == phase && ctx) {
- ctx->queryRsped = true;
-
- bool rsped = false;
- SQWMsg qwMsg = {.msgType = ctx->msgType, .connInfo = ctx->ctrlConnInfo};
- qwDbgSimulateRedirect(&qwMsg, ctx, &rsped);
- qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
- if (!rsped) {
- qwBuildAndSendQueryRsp(input->msgType + 1, &ctx->ctrlConnInfo, code, ctx);
- QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ if (!ctx->localExec) {
+ bool rsped = false;
+ SQWMsg qwMsg = {.msgType = ctx->msgType, .connInfo = ctx->ctrlConnInfo};
+ qwDbgSimulateRedirect(&qwMsg, ctx, &rsped);
+ qwDbgSimulateDead(QW_FPARAMS(), ctx, &rsped);
+ if (!rsped) {
+ qwBuildAndSendQueryRsp(input->msgType + 1, &ctx->ctrlConnInfo, code, ctx);
+ QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", ctx->ctrlConnInfo.handle, code, tstrerror(code));
+ }
}
+
+ ctx->queryRsped = true;
}
if (ctx) {
@@ -512,11 +542,6 @@ int32_t qwAbortPrerocessQuery(QW_FPARAMS_DEF) {
int32_t qwPreprocessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg) {
int32_t code = 0;
- bool queryRsped = false;
- SSubplan *plan = NULL;
- SQWPhaseInput input = {0};
- qTaskInfo_t pTaskInfo = NULL;
- DataSinkHandle sinkHandle = NULL;
SQWTaskCtx *ctx = NULL;
QW_ERR_JRET(qwRegisterQueryBrokenLinkArg(QW_FPARAMS(), &qwMsg->connInfo));
@@ -556,10 +581,11 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) {
ctx->explain = qwMsg->msgInfo.explain;
ctx->needFetch = qwMsg->msgInfo.needFetch;
ctx->msgType = qwMsg->msgType;
+ ctx->localExec = false;
// QW_TASK_DLOGL("subplan json string, len:%d, %s", qwMsg->msgLen, qwMsg->msg);
- code = qStringToSubplan(qwMsg->msg, &plan);
+ code = qMsgToSubplan(qwMsg->msg, qwMsg->msgLen, &plan);
if (TSDB_CODE_SUCCESS != code) {
code = TSDB_CODE_INVALID_MSG;
QW_TASK_ELOG("task physical plan to subplan failed, code:%x - %s", code, tstrerror(code));
@@ -578,19 +604,12 @@ int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) {
QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
}
- // QW_ERR_JRET(qwBuildAndSendQueryRsp(&qwMsg->connInfo, code));
- // QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
-
- // queryRsped = true;
-
ctx->level = plan->level;
atomic_store_ptr(&ctx->taskHandle, pTaskInfo);
atomic_store_ptr(&ctx->sinkHandle, sinkHandle);
- if (pTaskInfo && sinkHandle) {
- qwSaveTbVersionInfo(pTaskInfo, ctx);
- QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, NULL));
- }
+ qwSaveTbVersionInfo(pTaskInfo, ctx);
+ QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, NULL));
_return:
@@ -600,11 +619,6 @@ _return:
input.msgType = qwMsg->msgType;
code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_QUERY, &input, NULL);
- // if (!queryRsped) {
- // qwBuildAndSendQueryRsp(&qwMsg->connInfo, code);
- // QW_TASK_DLOG("query msg rsped, handle:%p, code:%x - %s", qwMsg->connInfo.handle, code, tstrerror(code));
- //}
-
QW_RET(TSDB_CODE_SUCCESS);
}
@@ -915,13 +929,13 @@ void qwProcessHbTimerEvent(void *param, void *tmrId) {
void *pIter = taosHashIterate(mgmt->schHash, NULL);
while (pIter) {
- SQWSchStatus *sch = (SQWSchStatus *)pIter;
- if (NULL == sch->hbConnInfo.handle) {
+ SQWSchStatus *sch1 = (SQWSchStatus *)pIter;
+ if (NULL == sch1->hbConnInfo.handle) {
uint64_t *sId = taosHashGetKey(pIter, NULL);
QW_TLOG("cancel send hb to sch %" PRIx64 " cause of no connection handle", *sId);
- if (sch->hbBrokenTs > 0 && ((currentMs - sch->hbBrokenTs) > QW_SCH_TIMEOUT_MSEC) &&
- taosHashGetSize(sch->tasksHash) <= 0) {
+ if (sch1->hbBrokenTs > 0 && ((currentMs - sch1->hbBrokenTs) > QW_SCH_TIMEOUT_MSEC) &&
+ taosHashGetSize(sch1->tasksHash) <= 0) {
taosArrayPush(pExpiredSch, sId);
}
@@ -968,7 +982,7 @@ int32_t qwProcessDelete(QW_FPARAMS_DEF, SQWMsg *qwMsg, SDeleteRes *pRes) {
DataSinkHandle sinkHandle = NULL;
SQWTaskCtx ctx = {0};
- code = qStringToSubplan(qwMsg->msg, &plan);
+ code = qMsgToSubplan(qwMsg->msg, qwMsg->msgLen, &plan);
if (TSDB_CODE_SUCCESS != code) {
code = TSDB_CODE_INVALID_MSG;
QW_TASK_ELOG("task physical plan to subplan failed, code:%x - %s", code, tstrerror(code));
@@ -1000,8 +1014,8 @@ _return:
QW_RET(TSDB_CODE_SUCCESS);
}
-int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qWorkerMgmt, const SMsgCb *pMsgCb) {
- if (NULL == qWorkerMgmt || pMsgCb->mgmt == NULL) {
+int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, void **qWorkerMgmt, const SMsgCb *pMsgCb) {
+ if (NULL == qWorkerMgmt || (pMsgCb && pMsgCb->mgmt == NULL)) {
qError("invalid param to init qworker");
QW_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
@@ -1024,22 +1038,9 @@ int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qW
QW_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- if (cfg) {
- mgmt->cfg = *cfg;
- if (0 == mgmt->cfg.maxSchedulerNum) {
- mgmt->cfg.maxSchedulerNum = QW_DEFAULT_SCHEDULER_NUMBER;
- }
- if (0 == mgmt->cfg.maxTaskNum) {
- mgmt->cfg.maxTaskNum = QW_DEFAULT_TASK_NUMBER;
- }
- if (0 == mgmt->cfg.maxSchTaskNum) {
- mgmt->cfg.maxSchTaskNum = QW_DEFAULT_SCH_TASK_NUMBER;
- }
- } else {
- mgmt->cfg.maxSchedulerNum = QW_DEFAULT_SCHEDULER_NUMBER;
- mgmt->cfg.maxTaskNum = QW_DEFAULT_TASK_NUMBER;
- mgmt->cfg.maxSchTaskNum = QW_DEFAULT_SCH_TASK_NUMBER;
- }
+ mgmt->cfg.maxSchedulerNum = QW_DEFAULT_SCHEDULER_NUMBER;
+ mgmt->cfg.maxTaskNum = QW_DEFAULT_TASK_NUMBER;
+ mgmt->cfg.maxSchTaskNum = QW_DEFAULT_SCH_TASK_NUMBER;
mgmt->schHash = taosHashInit(mgmt->cfg.maxSchedulerNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_UBIGINT), false,
HASH_ENTRY_LOCK);
@@ -1064,7 +1065,11 @@ int32_t qWorkerInit(int8_t nodeType, int32_t nodeId, SQWorkerCfg *cfg, void **qW
mgmt->nodeType = nodeType;
mgmt->nodeId = nodeId;
- mgmt->msgCb = *pMsgCb;
+ if (pMsgCb) {
+ mgmt->msgCb = *pMsgCb;
+ } else {
+ memset(&mgmt->msgCb, 0, sizeof(mgmt->msgCb));
+ }
mgmt->refId = taosAddRef(gQwMgmt.qwRef, mgmt);
if (mgmt->refId < 0) {
@@ -1108,10 +1113,17 @@ void qWorkerDestroy(void **qWorkerMgmt) {
return;
}
+ int32_t destroyed = 0;
SQWorker *mgmt = *qWorkerMgmt;
-
+ mgmt->destroyed = &destroyed;
+
if (taosRemoveRef(gQwMgmt.qwRef, mgmt->refId)) {
qError("remove qw from ref list failed, refId:%" PRIx64, mgmt->refId);
+ return;
+ }
+
+ while (0 == destroyed) {
+ taosMsleep(2);
}
}
@@ -1140,3 +1152,112 @@ int32_t qWorkerGetStat(SReadHandle *handle, void *qWorkerMgmt, SQWorkerStat *pSt
return TSDB_CODE_SUCCESS;
}
+
+int32_t qWorkerProcessLocalQuery(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId, SQWMsg *qwMsg, SArray *explainRes) {
+ SQWorker *mgmt = (SQWorker*)pMgmt;
+ int32_t code = 0;
+ SQWTaskCtx *ctx = NULL;
+ SSubplan *plan = (SSubplan *)qwMsg->msg;
+ SQWPhaseInput input = {0};
+ qTaskInfo_t pTaskInfo = NULL;
+ DataSinkHandle sinkHandle = NULL;
+ SReadHandle rHandle = {0};
+
+ QW_ERR_JRET(qwAddTaskCtx(QW_FPARAMS()));
+ QW_ERR_JRET(qwAddTaskStatus(QW_FPARAMS(), JOB_TASK_STATUS_INIT));
+
+ QW_ERR_JRET(qwHandlePrePhaseEvents(QW_FPARAMS(), QW_PHASE_PRE_QUERY, &input, NULL));
+ QW_ERR_JRET(qwAcquireTaskCtx(QW_FPARAMS(), &ctx));
+
+ ctx->taskType = qwMsg->msgInfo.taskType;
+ ctx->explain = qwMsg->msgInfo.explain;
+ ctx->needFetch = qwMsg->msgInfo.needFetch;
+ ctx->msgType = qwMsg->msgType;
+ ctx->localExec = true;
+ ctx->explainRes = explainRes;
+
+ rHandle.pMsgCb = taosMemoryCalloc(1, sizeof(SMsgCb));
+ rHandle.pMsgCb->clientRpc = qwMsg->connInfo.handle;
+
+ code = qCreateExecTask(&rHandle, mgmt->nodeId, tId, plan, &pTaskInfo, &sinkHandle, NULL, OPTR_EXEC_MODEL_BATCH);
+ if (code) {
+ QW_TASK_ELOG("qCreateExecTask failed, code:%x - %s", code, tstrerror(code));
+ QW_ERR_JRET(code);
+ }
+
+ if (NULL == sinkHandle || NULL == pTaskInfo) {
+ QW_TASK_ELOG("create task result error, taskHandle:%p, sinkHandle:%p", pTaskInfo, sinkHandle);
+ QW_ERR_JRET(TSDB_CODE_QRY_APP_ERROR);
+ }
+
+ ctx->level = plan->level;
+ atomic_store_ptr(&ctx->taskHandle, pTaskInfo);
+ atomic_store_ptr(&ctx->sinkHandle, sinkHandle);
+
+ QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, NULL));
+
+_return:
+
+ taosMemoryFree(rHandle.pMsgCb);
+
+ input.code = code;
+ input.msgType = qwMsg->msgType;
+ code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_QUERY, &input, NULL);
+
+ if (ctx) {
+ QW_UPDATE_RSP_CODE(ctx, code);
+ qwReleaseTaskCtx(mgmt, ctx);
+ }
+
+ QW_RET(code);
+}
+
+int32_t qWorkerProcessLocalFetch(void *pMgmt, uint64_t sId, uint64_t qId, uint64_t tId, int64_t rId, int32_t eId, void** pRsp, SArray* explainRes) {
+ SQWorker *mgmt = (SQWorker*)pMgmt;
+ int32_t code = 0;
+ int32_t dataLen = 0;
+ SQWTaskCtx *ctx = NULL;
+ void *rsp = NULL;
+ bool queryStop = false;
+
+ SQWPhaseInput input = {0};
+
+ QW_ERR_JRET(qwHandlePrePhaseEvents(QW_FPARAMS(), QW_PHASE_PRE_FETCH, &input, NULL));
+
+ QW_ERR_JRET(qwGetTaskCtx(QW_FPARAMS(), &ctx));
+
+ ctx->msgType = TDMT_SCH_MERGE_FETCH;
+ ctx->explainRes = explainRes;
+
+ SOutputData sOutput = {0};
+
+ while (true) {
+ QW_ERR_JRET(qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput));
+
+ if (NULL == rsp) {
+ QW_ERR_JRET(qwExecTask(QW_FPARAMS(), ctx, &queryStop));
+
+ continue;
+ } else {
+ bool qComplete = (DS_BUF_EMPTY == sOutput.bufStatus && sOutput.queryEnd);
+
+ qwBuildFetchRsp(rsp, &sOutput, dataLen, qComplete);
+ if (qComplete) {
+ atomic_store_8((int8_t *)&ctx->queryEnd, true);
+ }
+
+ break;
+ }
+ }
+
+_return:
+
+ *pRsp = rsp;
+
+ input.code = code;
+ code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_FETCH, &input, NULL);
+
+ QW_RET(code);
+}
+
+
diff --git a/source/libs/qworker/test/qworkerTests.cpp b/source/libs/qworker/test/qworkerTests.cpp
index 1f76ea1e7e294090019d8f67e108b1b97cb84d6b..60d6594c1b62389147fd7f8dd7c5de0ee4b4211b 100644
--- a/source/libs/qworker/test/qworkerTests.cpp
+++ b/source/libs/qworker/test/qworkerTests.cpp
@@ -877,7 +877,7 @@ TEST(seqTest, normalCase) {
SMsgCb msgCb = {0};
msgCb.mgmt = (void *)mockPointer;
msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue;
- code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
+ code = qWorkerInit(NODE_TYPE_VNODE, 1, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
code = qWorkerProcessQueryMsg(mockPointer, mgmt, &queryRpc, 0);
@@ -913,7 +913,7 @@ TEST(seqTest, cancelFirst) {
SMsgCb msgCb = {0};
msgCb.mgmt = (void *)mockPointer;
msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue;
- code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
+ code = qWorkerInit(NODE_TYPE_VNODE, 1, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
code = qWorkerProcessDropMsg(mockPointer, mgmt, &dropRpc, 0);
@@ -950,7 +950,7 @@ TEST(seqTest, randCase) {
SMsgCb msgCb = {0};
msgCb.mgmt = (void *)mockPointer;
msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue;
- code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
+ code = qWorkerInit(NODE_TYPE_VNODE, 1, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
int32_t t = 0;
@@ -1021,7 +1021,7 @@ TEST(seqTest, multithreadRand) {
SMsgCb msgCb = {0};
msgCb.mgmt = (void *)mockPointer;
msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue;
- code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
+ code = qWorkerInit(NODE_TYPE_VNODE, 1, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
TdThreadAttr thattr;
@@ -1084,7 +1084,7 @@ TEST(rcTest, shortExecshortDelay) {
SMsgCb msgCb = {0};
msgCb.mgmt = (void *)mockPointer;
msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue;
- code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
+ code = qWorkerInit(NODE_TYPE_VNODE, 1, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
qwtTestMaxExecTaskUsec = 0;
@@ -1168,7 +1168,7 @@ TEST(rcTest, longExecshortDelay) {
SMsgCb msgCb = {0};
msgCb.mgmt = (void *)mockPointer;
msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue;
- code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
+ code = qWorkerInit(NODE_TYPE_VNODE, 1, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
qwtTestMaxExecTaskUsec = 1000000;
@@ -1254,7 +1254,7 @@ TEST(rcTest, shortExeclongDelay) {
SMsgCb msgCb = {0};
msgCb.mgmt = (void *)mockPointer;
msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue;
- code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
+ code = qWorkerInit(NODE_TYPE_VNODE, 1, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
qwtTestMaxExecTaskUsec = 0;
@@ -1338,7 +1338,7 @@ TEST(rcTest, dropTest) {
SMsgCb msgCb = {0};
msgCb.mgmt = (void *)mockPointer;
msgCb.putToQueueFp = (PutToQueueFp)qwtPutReqToQueue;
- code = qWorkerInit(NODE_TYPE_VNODE, 1, NULL, &mgmt, &msgCb);
+ code = qWorkerInit(NODE_TYPE_VNODE, 1, &mgmt, &msgCb);
ASSERT_EQ(code, 0);
tsem_init(&qwtTestQuerySem, 0, 0);
diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h
index 23693c785aa17921e5ba4420fe6477fa72b27392..ef3d3f79f959805681d0bc0e2e0b101cd8518388 100644
--- a/source/libs/scalar/inc/filterInt.h
+++ b/source/libs/scalar/inc/filterInt.h
@@ -99,7 +99,7 @@ typedef struct SFilterRange {
typedef bool (*rangeCompFunc) (const void *, const void *, const void *, const void *, __compar_fn_t);
typedef int32_t(*filter_desc_compare_func)(const void *, const void *);
-typedef bool(*filter_exec_func)(void *, int32_t, int8_t**, SColumnDataAgg *, int16_t);
+typedef bool(*filter_exec_func)(void*, int32_t, SColumnInfoData*, SColumnDataAgg*, int16_t, int32_t*);
typedef int32_t (*filer_get_col_from_name)(void *, int32_t, char*, void **);
typedef struct SFilterRangeCompare {
@@ -276,7 +276,7 @@ struct SFilterInfo {
#define FILTER_CLR_FLAG(st, f) st &= (~f)
#define SIMPLE_COPY_VALUES(dst, src) *((int64_t *)dst) = *((int64_t *)src)
-#define FILTER_PACKAGE_UNIT_HASH_KEY(v, optr, idx1, idx2) do { char *_t = (char *)v; _t[0] = optr; *(uint32_t *)(_t + 1) = idx1; *(uint32_t *)(_t + 3) = idx2; } while (0)
+#define FLT_PACKAGE_UNIT_HASH_KEY(v, op1, op2, lidx, ridx, ridx2) do { char *_t = (char *)(v); _t[0] = (op1); _t[1] = (op2); *(uint32_t *)(_t + 2) = (lidx); *(uint32_t *)(_t + 2 + sizeof(uint32_t)) = (ridx); } while (0)
#define FILTER_GREATER(cr,sflag,eflag) ((cr > 0) || ((cr == 0) && (FILTER_GET_FLAG(sflag,RANGE_FLG_EXCLUDE) || FILTER_GET_FLAG(eflag,RANGE_FLG_EXCLUDE))))
#define FILTER_COPY_RA(dst, src) do { (dst)->sflag = (src)->sflag; (dst)->eflag = (src)->eflag; (dst)->s = (src)->s; (dst)->e = (src)->e; } while (0)
@@ -350,6 +350,7 @@ struct SFilterInfo {
extern bool filterDoCompare(__compar_fn_t func, uint8_t optr, void *left, void *right);
extern __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr);
+extern __compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr);
#ifdef __cplusplus
}
diff --git a/source/libs/scalar/inc/sclInt.h b/source/libs/scalar/inc/sclInt.h
index d423b92da7e83589aacc6d384c0e2cafa0949038..15e9026ddbc2eea8ad4e066519dd4bbea9767b7e 100644
--- a/source/libs/scalar/inc/sclInt.h
+++ b/source/libs/scalar/inc/sclInt.h
@@ -45,6 +45,9 @@ typedef struct SScalarCtx {
#define SCL_IS_CONST_CALC(_ctx) (NULL == (_ctx)->pBlockList)
//#define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type) && (((SValueNode *)_node)->placeholderNo <= 0))
#define SCL_IS_NULL_VALUE_NODE(_node) ((QUERY_NODE_VALUE == nodeType(_node)) && (TSDB_DATA_TYPE_NULL == ((SValueNode *)_node)->node.resType.type))
+#define SCL_IS_COMPARISON_OPERATOR(_opType) ((_opType) >= OP_TYPE_GREATER_THAN && (_opType) < OP_TYPE_IS_NOT_UNKNOWN)
+#define SCL_DOWNGRADE_DATETYPE(_type) ((_type) == TSDB_DATA_TYPE_BIGINT || TSDB_DATA_TYPE_DOUBLE == (_type) || (_type) == TSDB_DATA_TYPE_UBIGINT)
+#define SCL_NO_NEED_CONVERT_COMPARISION(_ltype, _rtype, _optr) (IS_NUMERIC_TYPE(_ltype) && IS_NUMERIC_TYPE(_rtype) && ((_optr) >= OP_TYPE_GREATER_THAN && (_optr) <= OP_TYPE_NOT_EQUAL))
#define sclFatal(...) qFatal(__VA_ARGS__)
#define sclError(...) qError(__VA_ARGS__)
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 4377dbf14ec55dae53d41859af8480886f4cce51..ac063cb50dcbb81a2fec19728444b9b76c69fa70 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -132,6 +132,77 @@ __compar_fn_t gDataCompare[] = {compareInt32Val, compareInt8Val, compareInt16Val
compareChkNotInString, compareStrPatternNotMatch, compareWStrPatternNotMatch
};
+__compar_fn_t gInt8SignCompare[] = {
+ compareInt8Val, compareInt8Int16, compareInt8Int32, compareInt8Int64, compareInt8Float, compareInt8Double
+};
+__compar_fn_t gInt8UsignCompare[] = {
+ compareInt8Uint8, compareInt8Uint16, compareInt8Uint32, compareInt8Uint64
+};
+
+__compar_fn_t gInt16SignCompare[] = {
+ compareInt16Int8, compareInt16Val, compareInt16Int32, compareInt16Int64, compareInt16Float, compareInt16Double
+};
+__compar_fn_t gInt16UsignCompare[] = {
+ compareInt16Uint8, compareInt16Uint16, compareInt16Uint32, compareInt16Uint64
+};
+
+__compar_fn_t gInt32SignCompare[] = {
+ compareInt32Int8, compareInt32Int16, compareInt32Val, compareInt32Int64, compareInt32Float, compareInt32Double
+};
+__compar_fn_t gInt32UsignCompare[] = {
+ compareInt32Uint8, compareInt32Uint16, compareInt32Uint32, compareInt32Uint64
+};
+
+__compar_fn_t gInt64SignCompare[] = {
+ compareInt64Int8, compareInt64Int16, compareInt64Int32, compareInt64Val, compareInt64Float, compareInt64Double
+};
+__compar_fn_t gInt64UsignCompare[] = {
+ compareInt64Uint8, compareInt64Uint16, compareInt64Uint32, compareInt64Uint64
+};
+
+__compar_fn_t gFloatSignCompare[] = {
+ compareFloatInt8, compareFloatInt16, compareFloatInt32, compareFloatInt64, compareFloatVal, compareFloatDouble
+};
+__compar_fn_t gFloatUsignCompare[] = {
+ compareFloatUint8, compareFloatUint16, compareFloatUint32, compareFloatUint64
+};
+
+__compar_fn_t gDoubleSignCompare[] = {
+ compareDoubleInt8, compareDoubleInt16, compareDoubleInt32, compareDoubleInt64, compareDoubleFloat, compareDoubleVal
+};
+__compar_fn_t gDoubleUsignCompare[] = {
+ compareDoubleUint8, compareDoubleUint16, compareDoubleUint32, compareDoubleUint64
+};
+
+__compar_fn_t gUint8SignCompare[] = {
+ compareUint8Int8, compareUint8Int16, compareUint8Int32, compareUint8Int64, compareUint8Float, compareUint8Double
+};
+__compar_fn_t gUint8UsignCompare[] = {
+ compareUint8Val, compareUint8Uint16, compareUint8Uint32, compareUint8Uint64
+};
+
+__compar_fn_t gUint16SignCompare[] = {
+ compareUint16Int8, compareUint16Int16, compareUint16Int32, compareUint16Int64, compareUint16Float, compareUint16Double
+};
+__compar_fn_t gUint16UsignCompare[] = {
+ compareUint16Uint8, compareUint16Val, compareUint16Uint32, compareUint16Uint64
+};
+
+__compar_fn_t gUint32SignCompare[] = {
+ compareUint32Int8, compareUint32Int16, compareUint32Int32, compareUint32Int64, compareUint32Float, compareUint32Double
+};
+__compar_fn_t gUint32UsignCompare[] = {
+ compareUint32Uint8, compareUint32Uint16, compareUint32Val, compareUint32Uint64
+};
+
+__compar_fn_t gUint64SignCompare[] = {
+ compareUint64Int8, compareUint64Int16, compareUint64Int32, compareUint64Int64, compareUint64Float, compareUint64Double
+};
+__compar_fn_t gUint64UsignCompare[] = {
+ compareUint64Uint8, compareUint64Uint16, compareUint64Uint32, compareUint64Val
+};
+
+
int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
int8_t comparFn = 0;
@@ -257,6 +328,93 @@ __compar_fn_t filterGetCompFunc(int32_t type, int32_t optr) {
return gDataCompare[filterGetCompFuncIdx(type, optr)];
}
+__compar_fn_t filterGetCompFuncEx(int32_t lType, int32_t rType, int32_t optr) {
+ switch (lType) {
+ case TSDB_DATA_TYPE_TINYINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt8SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_SMALLINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt16SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_INT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt32SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_BIGINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gInt64SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gInt64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gFloatSignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gFloatUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gDoubleSignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gDoubleUsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint8SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint8UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_USMALLINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint16SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint16UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint32SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint32UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT: {
+ if (IS_SIGNED_NUMERIC_TYPE(rType) || IS_FLOAT_TYPE(rType)) {
+ return gUint64SignCompare[rType - TSDB_DATA_TYPE_TINYINT];
+ } else {
+ return gUint64UsignCompare[rType - TSDB_DATA_TYPE_UTINYINT];
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return NULL;
+}
static FORCE_INLINE int32_t filterCompareGroupCtx(const void *pLeft, const void *pRight) {
SFilterGroupCtx *left = *((SFilterGroupCtx**)pLeft), *right = *((SFilterGroupCtx**)pRight);
@@ -910,14 +1068,14 @@ int32_t filterAddFieldFromNode(SFilterInfo *info, SNode *node, SFilterFieldId *f
return TSDB_CODE_SUCCESS;
}
-int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+int32_t filterAddUnitImpl(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint8_t optr2, SFilterFieldId *right2, uint32_t *uidx) {
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
if (info->pctx.unitHash == NULL) {
info->pctx.unitHash = taosHashInit(FILTER_DEFAULT_GROUP_SIZE * FILTER_DEFAULT_UNIT_SIZE, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, false);
} else {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- void *hu = taosHashGet(info->pctx.unitHash, &v, sizeof(v));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ void *hu = taosHashGet(info->pctx.unitHash, v, sizeof(v));
if (hu) {
*uidx = *(uint32_t *)hu;
return TSDB_CODE_SUCCESS;
@@ -939,7 +1097,11 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
if (right) {
u->right = *right;
}
-
+ u->compare.optr2 = optr2;
+ if (right2) {
+ u->right2 = *right2;
+ }
+
if (u->right.type == FLD_TYPE_VALUE) {
SFilterField *val = FILTER_UNIT_RIGHT_FIELD(info, u);
assert(FILTER_GET_FLAG(val->flag, FLD_TYPE_VALUE));
@@ -960,9 +1122,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
*uidx = info->unitNum;
if (FILTER_GET_FLAG(info->options, FLT_OPTION_NEED_UNIQE)) {
- int64_t v = 0;
- FILTER_PACKAGE_UNIT_HASH_KEY(&v, optr, left->idx, right ? right->idx : -1);
- taosHashPut(info->pctx.unitHash, &v, sizeof(v), uidx, sizeof(*uidx));
+ char v[14] = {0};
+ FLT_PACKAGE_UNIT_HASH_KEY(&v, optr, optr2, left->idx, (right ? right->idx : -1), (right2 ? right2->idx : -1));
+ taosHashPut(info->pctx.unitHash, v, sizeof(v), uidx, sizeof(*uidx));
}
++info->unitNum;
@@ -971,6 +1133,9 @@ int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFi
}
+int32_t filterAddUnit(SFilterInfo *info, uint8_t optr, SFilterFieldId *left, SFilterFieldId *right, uint32_t *uidx) {
+ return filterAddUnitImpl(info, optr, left, right, 0, NULL, uidx);
+}
int32_t filterAddUnitToGroup(SFilterGroup *group, uint32_t unitIdx) {
if (group->unitNum >= group->unitSize) {
@@ -1147,8 +1312,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &ra->e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(ra->sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(ra->eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
return TSDB_CODE_SUCCESS;
}
@@ -1222,8 +1387,8 @@ int32_t filterAddGroupUnitFromCtx(SFilterInfo *dst, SFilterInfo *src, SFilterRan
SIMPLE_COPY_VALUES(data2, &r->ra.e);
filterAddField(dst, NULL, &data2, FLD_TYPE_VALUE, &right2, tDataTypes[type].bytes, true);
- filterAddUnit(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right, &uidx);
- filterAddUnitRight(dst, FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, uidx);
+ filterAddUnitImpl(dst, FILTER_GET_FLAG(r->ra.sflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_GREATER_THAN : OP_TYPE_GREATER_EQUAL, &left, &right,
+ FILTER_GET_FLAG(r->ra.eflag, RANGE_FLG_EXCLUDE) ? OP_TYPE_LOWER_THAN : OP_TYPE_LOWER_EQUAL, &right2, &uidx);
filterAddUnitToGroup(g, uidx);
}
@@ -2073,6 +2238,44 @@ int32_t filterMergeGroupUnits(SFilterInfo *info, SFilterGroupCtx** gRes, int32_t
return TSDB_CODE_SUCCESS;
}
+bool filterIsSameUnits(SFilterColInfo* pCol1, SFilterColInfo* pCol2) {
+ if (pCol1->type != pCol2->type) {
+ return false;
+ }
+
+ if (RANGE_TYPE_MR_CTX == pCol1->type) {
+ SFilterRangeCtx* pCtx1 = (SFilterRangeCtx*)pCol1->info;
+ SFilterRangeCtx* pCtx2 = (SFilterRangeCtx*)pCol2->info;
+
+ if ((pCtx1->isnull != pCtx2->isnull) || (pCtx1->notnull != pCtx2->notnull) || (pCtx1->isrange != pCtx2->isrange)) {
+ return false;
+ }
+
+
+ SFilterRangeNode* pNode1 = pCtx1->rs;
+ SFilterRangeNode* pNode2 = pCtx2->rs;
+
+ while (true) {
+ if (NULL == pNode1 && NULL == pNode2) {
+ break;
+ }
+
+ if (NULL == pNode1 || NULL == pNode2) {
+ return false;
+ }
+
+ if (pNode1->ra.s != pNode2->ra.s || pNode1->ra.e != pNode2->ra.e || pNode1->ra.sflag != pNode2->ra.sflag || pNode1->ra.eflag != pNode2->ra.eflag) {
+ return false;
+ }
+
+ pNode1 = pNode1->next;
+ pNode2 = pNode2->next;
+ }
+ }
+
+ return true;
+}
+
void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool *conflict) {
uint32_t idx1 = 0, idx2 = 0, m = 0, n = 0;
bool equal = false;
@@ -2098,6 +2301,11 @@ void filterCheckColConflict(SFilterGroupCtx* gRes1, SFilterGroupCtx* gRes2, bool
return;
}
+ if (!filterIsSameUnits(&gRes1->colInfo[idx1], &gRes2->colInfo[idx2])) {
+ *conflict = true;
+ return;
+ }
+
// for long in operation
if (gRes1->colInfo[idx1].optr == OP_TYPE_EQUAL && gRes2->colInfo[idx2].optr == OP_TYPE_EQUAL) {
SFilterRangeCtx* ctx = gRes1->colInfo[idx1].info;
@@ -2711,17 +2919,22 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
for (uint32_t g = 0; g < info->groupNum; ++g) {
SFilterGroup *group = &info->groups[g];
+ // first is block unint num for a group, following append unitNum blkUnitIdx for this group
*unitNum = group->unitNum;
all = 0;
empty = 0;
+ // save group idx start pointer
+ uint32_t * pGroupIdx = unitIdx;
for (uint32_t u = 0; u < group->unitNum; ++u) {
uint32_t uidx = group->unitIdxs[u];
if (info->blkUnitRes[uidx] == 1) {
+ // blkUnitRes == 1 is always true, so need not compare every time, delete this unit from group
--(*unitNum);
all = 1;
continue;
} else if (info->blkUnitRes[uidx] == -1) {
+ // blkUnitRes == -1 is alwary false, so in group is alwary false, need delete this group from blkGroupNum
*unitNum = 0;
empty = 1;
break;
@@ -2731,6 +2944,9 @@ int32_t filterRmUnitByRange(SFilterInfo *info, SColumnDataAgg *pDataStatis, int3
}
if (*unitNum == 0) {
+ // if unit num is zero, reset unitIdx to start on this group
+ unitIdx = pGroupIdx;
+
--info->blkGroupNum;
assert(empty || all);
@@ -2760,14 +2976,12 @@ _return:
return TSDB_CODE_SUCCESS;
}
-bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, SColumnInfoData* pRes, SColumnDataAgg *statis, int16_t numOfCols) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
uint32_t *unitIdx = NULL;
- if (*p == NULL) {
- *p = taosMemoryCalloc(numOfRows, sizeof(int8_t));
- }
+ int8_t* p = (int8_t*)pRes->pData;
for (int32_t i = 0; i < numOfRows; ++i) {
//FILTER_UNIT_CLR_F(info);
@@ -2786,35 +3000,35 @@ bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p,
uint8_t optr = cunit->optr;
if (colDataIsNull((SColumnInfoData *)(cunit->colData), 0, i, NULL)) {
- (*p)[i] = optr == OP_TYPE_IS_NULL ? true : false;
+ p[i] = (optr == OP_TYPE_IS_NULL) ? true : false;
} else {
if (optr == OP_TYPE_IS_NOT_NULL) {
- (*p)[i] = 1;
+ p[i] = 1;
} else if (optr == OP_TYPE_IS_NULL) {
- (*p)[i] = 0;
+ p[i] = 0;
} else if (cunit->rfunc >= 0) {
- (*p)[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
+ p[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
} else {
- (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
+ p[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
}
//FILTER_UNIT_SET_R(info, uidx, p[i]);
//FILTER_UNIT_SET_F(info, uidx);
}
- if ((*p)[i] == 0) {
+ if (p[i] == 0) {
break;
}
}
- if ((*p)[i]) {
+ if (p[i]) {
break;
}
unitIdx += unitNum;
}
- if ((*p)[i] == 0) {
+ if (p[i] == 0) {
all = false;
}
}
@@ -2824,7 +3038,7 @@ bool filterExecuteBasedOnStatisImpl(void *pinfo, int32_t numOfRows, int8_t** p,
-int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols, bool* all) {
+int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, SColumnInfoData* p, SColumnDataAgg *statis, int16_t numOfCols, bool* all) {
if (statis && numOfRows >= FILTER_RM_UNIT_MIN_ROWS) {
info->blkFlag = 0;
@@ -2842,7 +3056,6 @@ int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, int8_t*
assert(info->unitNum > 1);
*all = filterExecuteBasedOnStatisImpl(info, numOfRows, p, statis, numOfCols);
-
goto _return;
}
}
@@ -2851,67 +3064,70 @@ int32_t filterExecuteBasedOnStatis(SFilterInfo *info, int32_t numOfRows, int8_t*
_return:
info->blkFlag = 0;
-
return TSDB_CODE_SUCCESS;
}
-
-static FORCE_INLINE bool filterExecuteImplAll(void *info, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+static FORCE_INLINE bool filterExecuteImplAll(void *info, int32_t numOfRows, SColumnInfoData* p, SColumnDataAgg *statis, int16_t numOfCols, int32_t* numOfQualified) {
return true;
}
-static FORCE_INLINE bool filterExecuteImplEmpty(void *info, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+
+static FORCE_INLINE bool filterExecuteImplEmpty(void *info, int32_t numOfRows, SColumnInfoData* p, SColumnDataAgg *statis, int16_t numOfCols, int32_t* numOfQualified) {
return false;
}
-static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+
+static FORCE_INLINE bool filterExecuteImplIsNull(void *pinfo, int32_t numOfRows, SColumnInfoData *pRes,
+ SColumnDataAgg *statis, int16_t numOfCols, int32_t *numOfQualified) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
- if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
- return all;
- }
+ int8_t* p = (int8_t*)pRes->pData;
- if (*p == NULL) {
- *p = taosMemoryCalloc(numOfRows, sizeof(int8_t));
+ if (filterExecuteBasedOnStatis(info, numOfRows, pRes, statis, numOfCols, &all) == 0) {
+ return all;
}
for (int32_t i = 0; i < numOfRows; ++i) {
uint32_t uidx = info->groups[0].unitIdxs[0];
void *colData = colDataGetData((SColumnInfoData *)info->cunits[uidx].colData, i);
- (*p)[i] = ((colData == NULL) || colDataIsNull((SColumnInfoData *)info->cunits[uidx].colData, 0, i, NULL));
+ p[i] = ((colData == NULL) || colDataIsNull((SColumnInfoData *)info->cunits[uidx].colData, 0, i, NULL));
- if ((*p)[i] == 0) {
+ if (p[i] == 0) {
all = false;
+ } else {
+ (*numOfQualified) += 1;
}
}
return all;
}
-static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+
+static FORCE_INLINE bool filterExecuteImplNotNull(void *pinfo, int32_t numOfRows, SColumnInfoData *pRes,
+ SColumnDataAgg *statis, int16_t numOfCols, int32_t *numOfQualified) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
- if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ if (filterExecuteBasedOnStatis(info, numOfRows, pRes, statis, numOfCols, &all) == 0) {
return all;
}
- if (*p == NULL) {
- *p = taosMemoryCalloc(numOfRows, sizeof(int8_t));
- }
+ int8_t* p = (int8_t*)pRes->pData;
for (int32_t i = 0; i < numOfRows; ++i) {
uint32_t uidx = info->groups[0].unitIdxs[0];
void *colData = colDataGetData((SColumnInfoData *)info->cunits[uidx].colData, i);
- (*p)[i] = ((colData != NULL) && !colDataIsNull((SColumnInfoData *)info->cunits[uidx].colData, 0, i, NULL));
- if ((*p)[i] == 0) {
+ p[i] = ((colData != NULL) && !colDataIsNull((SColumnInfoData *)info->cunits[uidx].colData, 0, i, NULL));
+ if (p[i] == 0) {
all = false;
+ } else {
+ (*numOfQualified) += 1;
}
}
return all;
}
-bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, SColumnInfoData* pRes, SColumnDataAgg *statis, int16_t numOfCols, int32_t* numOfQualified) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
uint16_t dataSize = info->cunits[0].dataSize;
@@ -2920,49 +3136,49 @@ bool filterExecuteImplRange(void *pinfo, int32_t numOfRows, int8_t** p, SColumnD
void *valData2 = info->cunits[0].valData2;
__compar_fn_t func = gDataCompare[info->cunits[0].func];
- if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ if (filterExecuteBasedOnStatis(info, numOfRows, pRes, statis, numOfCols, &all) == 0) {
return all;
}
- if (*p == NULL) {
- *p = taosMemoryCalloc(numOfRows, sizeof(int8_t));
- }
+ int8_t* p = (int8_t*) pRes->pData;
for (int32_t i = 0; i < numOfRows; ++i) {
- void *colData = colDataGetData((SColumnInfoData *)info->cunits[0].colData, i);
SColumnInfoData* pData = info->cunits[0].colData;
+
+ void *colData = colDataGetData(pData, i);
if (colData == NULL || colDataIsNull_s(pData, i)) {
all = false;
continue;
}
- (*p)[i] = (*rfunc)(colData, colData, valData, valData2, func);
+ p[i] = (*rfunc)(colData, colData, valData, valData2, func);
- if ((*p)[i] == 0) {
+ if (p[i] == 0) {
all = false;
+ } else {
+ (*numOfQualified)++;
}
}
return all;
}
-bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, SColumnInfoData *pRes, SColumnDataAgg *statis,
+ int16_t numOfCols, int32_t *numOfQualified) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
- if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ if (filterExecuteBasedOnStatis(info, numOfRows, pRes, statis, numOfCols, &all) == 0) {
return all;
}
- if (*p == NULL) {
- *p = taosMemoryCalloc(numOfRows, sizeof(int8_t));
- }
+ int8_t* p = (int8_t*) pRes->pData;
for (int32_t i = 0; i < numOfRows; ++i) {
uint32_t uidx = info->groups[0].unitIdxs[0];
void *colData = colDataGetData((SColumnInfoData *)info->cunits[uidx].colData, i);
if (colData == NULL || colDataIsNull_s((SColumnInfoData *)info->cunits[uidx].colData, i)) {
- (*p)[i] = 0;
+ p[i] = 0;
all = false;
continue;
}
@@ -2975,33 +3191,33 @@ bool filterExecuteImplMisc(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDa
qError("castConvert1 taosUcs4ToMbs error");
}else{
varDataSetLen(newColData, len);
- (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData);
+ p[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, newColData, info->cunits[uidx].valData);
}
taosMemoryFreeClear(newColData);
}else{
- (*p)[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData);
+ p[i] = filterDoCompare(gDataCompare[info->cunits[uidx].func], info->cunits[uidx].optr, colData, info->cunits[uidx].valData);
}
- if ((*p)[i] == 0) {
+ if (p[i] == 0) {
all = false;
+ } else {
+ (*numOfQualified) += 1;
}
}
return all;
}
-
-bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+bool filterExecuteImpl(void *pinfo, int32_t numOfRows, SColumnInfoData *pRes, SColumnDataAgg *statis, int16_t numOfCols,
+ int32_t *numOfQualified) {
SFilterInfo *info = (SFilterInfo *)pinfo;
bool all = true;
- if (filterExecuteBasedOnStatis(info, numOfRows, p, statis, numOfCols, &all) == 0) {
+ if (filterExecuteBasedOnStatis(info, numOfRows, pRes, statis, numOfCols, &all) == 0) {
return all;
}
- if (*p == NULL) {
- *p = taosMemoryCalloc(numOfRows, sizeof(int8_t));
- }
+ int8_t* p = (int8_t*) pRes->pData;
for (int32_t i = 0; i < numOfRows; ++i) {
//FILTER_UNIT_CLR_F(info);
@@ -3019,14 +3235,14 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAg
uint8_t optr = cunit->optr;
if (colData == NULL || colDataIsNull((SColumnInfoData *)(cunit->colData), 0, i, NULL)) {
- (*p)[i] = optr == OP_TYPE_IS_NULL ? true : false;
+ p[i] = optr == OP_TYPE_IS_NULL ? true : false;
} else {
if (optr == OP_TYPE_IS_NOT_NULL) {
- (*p)[i] = 1;
+ p[i] = 1;
} else if (optr == OP_TYPE_IS_NULL) {
- (*p)[i] = 0;
+ p[i] = 0;
} else if (cunit->rfunc >= 0) {
- (*p)[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
+ p[i] = (*gRangeCompare[cunit->rfunc])(colData, colData, cunit->valData, cunit->valData2, gDataCompare[cunit->func]);
} else {
if(cunit->dataType == TSDB_DATA_TYPE_NCHAR && (cunit->optr == OP_TYPE_MATCH || cunit->optr == OP_TYPE_NMATCH)){
char *newColData = taosMemoryCalloc(cunit->dataSize * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE, 1);
@@ -3035,11 +3251,11 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAg
qError("castConvert1 taosUcs4ToMbs error");
}else{
varDataSetLen(newColData, len);
- (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData);
+ p[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, newColData, cunit->valData);
}
taosMemoryFreeClear(newColData);
}else{
- (*p)[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
+ p[i] = filterDoCompare(gDataCompare[cunit->func], cunit->optr, colData, cunit->valData);
}
}
@@ -3047,18 +3263,20 @@ bool filterExecuteImpl(void *pinfo, int32_t numOfRows, int8_t** p, SColumnDataAg
//FILTER_UNIT_SET_F(info, uidx);
}
- if ((*p)[i] == 0) {
+ if (p[i] == 0) {
break;
}
}
- if ((*p)[i]) {
+ if (p[i]) {
break;
}
}
- if ((*p)[i] == 0) {
+ if (p[i] == 0) {
all = false;
+ } else {
+ (*numOfQualified) += 1;
}
}
@@ -3810,37 +4028,62 @@ _return:
FLT_RET(code);
}
-bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, int8_t** p, SColumnDataAgg *statis, int16_t numOfCols) {
+bool filterExecute(SFilterInfo *info, SSDataBlock *pSrc, SColumnInfoData** p, SColumnDataAgg *statis, int16_t numOfCols, int32_t *pResultStatus) {
if (NULL == info) {
+ *pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
return false;
}
- if (info->scalarMode) {
- SScalarParam output = {0};
+ SScalarParam output = {0};
+ SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
- SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
- int32_t code = sclCreateColumnInfoData(&type, pSrc->info.rows, &output);
- if (code != TSDB_CODE_SUCCESS) {
- return code;
- }
+ int32_t code = sclCreateColumnInfoData(&type, pSrc->info.rows, &output);
+ if (code != TSDB_CODE_SUCCESS) {
+ return false;
+ }
+ if (info->scalarMode) {
SArray *pList = taosArrayInit(1, POINTER_BYTES);
taosArrayPush(pList, &pSrc);
FLT_ERR_RET(scalarCalculate(info->sclCtx.node, pList, &output));
- *p = taosMemoryMalloc(output.numOfRows * sizeof(bool));
-
- memcpy(*p, output.columnData->pData, output.numOfRows);
- colDataDestroy(output.columnData);
- taosMemoryFree(output.columnData);
+ *p = output.columnData;
taosArrayDestroy(pList);
+
+ if (output.numOfQualified == output.numOfRows) {
+ *pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
+ } else if (output.numOfQualified == 0) {
+ *pResultStatus = FILTER_RESULT_NONE_QUALIFIED;
+ } else {
+ *pResultStatus = FILTER_RESULT_PARTIAL_QUALIFIED;
+ }
return false;
- }
+ } else {
+ *p = output.columnData;
+ output.numOfRows = pSrc->info.rows;
- return (*info->func)(info, pSrc->info.rows, p, statis, numOfCols);
-}
+ bool keep = (*info->func)(info, pSrc->info.rows, *p, statis, numOfCols, &output.numOfQualified);
+ // todo this should be return during filter procedure
+ int32_t num = 0;
+ for(int32_t i = 0; i < output.numOfRows; ++i) {
+ if (((int8_t*)((*p)->pData))[i] == 1) {
+ ++num;
+ }
+ }
+
+ if (num == output.numOfRows) {
+ *pResultStatus = FILTER_RESULT_ALL_QUALIFIED;
+ } else if (num == 0) {
+ *pResultStatus = FILTER_RESULT_NONE_QUALIFIED;
+ } else {
+ *pResultStatus = FILTER_RESULT_PARTIAL_QUALIFIED;
+ }
+
+ return keep;
+ }
+}
typedef struct SClassifyConditionCxt {
bool hasPrimaryKey;
diff --git a/source/libs/scalar/src/scalar.c b/source/libs/scalar/src/scalar.c
index 6634a29f4091773c89988940c9ab6ed5de2487da..9cba94d85a1ffbfe2f2f1634589039e3a96b7585 100644
--- a/source/libs/scalar/src/scalar.c
+++ b/source/libs/scalar/src/scalar.c
@@ -9,6 +9,7 @@
#include "scalar.h"
#include "tudf.h"
#include "ttime.h"
+#include "tcompare.h"
int32_t scalarGetOperatorParamNum(EOperatorType type) {
if (OP_TYPE_IS_NULL == type || OP_TYPE_IS_NOT_NULL == type || OP_TYPE_IS_TRUE == type || OP_TYPE_IS_NOT_TRUE == type
@@ -219,6 +220,82 @@ void sclFreeParamList(SScalarParam *param, int32_t paramNum) {
taosMemoryFree(param);
}
+void sclDowngradeValueType(SValueNode *valueNode) {
+ switch (valueNode->node.resType.type) {
+ case TSDB_DATA_TYPE_BIGINT: {
+ int8_t i8 = valueNode->datum.i;
+ if (i8 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_TINYINT;
+ *(int8_t*)&valueNode->typeData = i8;
+ break;
+ }
+ int16_t i16 = valueNode->datum.i;
+ if (i16 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_SMALLINT;
+ *(int16_t*)&valueNode->typeData = i16;
+ break;
+ }
+ int32_t i32 = valueNode->datum.i;
+ if (i32 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_INT;
+ *(int32_t*)&valueNode->typeData = i32;
+ break;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_UBIGINT:{
+ uint8_t u8 = valueNode->datum.i;
+ if (u8 == valueNode->datum.i) {
+ int8_t i8 = valueNode->datum.i;
+ if (i8 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_TINYINT;
+ *(int8_t*)&valueNode->typeData = i8;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_UTINYINT;
+ *(uint8_t*)&valueNode->typeData = u8;
+ }
+ break;
+ }
+ uint16_t u16 = valueNode->datum.i;
+ if (u16 == valueNode->datum.i) {
+ int16_t i16 = valueNode->datum.i;
+ if (i16 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_SMALLINT;
+ *(int16_t*)&valueNode->typeData = i16;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_USMALLINT;
+ *(uint16_t*)&valueNode->typeData = u16;
+ }
+ break;
+ }
+ uint32_t u32 = valueNode->datum.i;
+ if (u32 == valueNode->datum.i) {
+ int32_t i32 = valueNode->datum.i;
+ if (i32 == valueNode->datum.i) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_INT;
+ *(int32_t*)&valueNode->typeData = i32;
+ } else {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_UINT;
+ *(uint32_t*)&valueNode->typeData = u32;
+ }
+ break;
+ }
+ break;
+ }
+ case TSDB_DATA_TYPE_DOUBLE: {
+ float f = valueNode->datum.d;
+ if (FLT_EQUAL(f, valueNode->datum.d)) {
+ valueNode->node.resType.type = TSDB_DATA_TYPE_FLOAT;
+ *(float*)&valueNode->typeData = f;
+ break;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
int32_t sclInitParam(SNode* node, SScalarParam *param, SScalarCtx *ctx, int32_t *rowNum) {
switch (nodeType(node)) {
case QUERY_NODE_LEFT_VALUE: {
@@ -529,6 +606,8 @@ int32_t sclExecLogic(SLogicConditionNode *node, SScalarCtx *ctx, SScalarParam *o
SCL_ERR_JRET(code);
}
+ int32_t numOfQualified = 0;
+
bool value = false;
bool complete = true;
for (int32_t i = 0; i < rowNum; ++i) {
@@ -554,6 +633,9 @@ int32_t sclExecLogic(SLogicConditionNode *node, SScalarCtx *ctx, SScalarParam *o
if (complete) {
colDataAppend(output->columnData, i, (char*) &value, false);
+ if (value) {
+ numOfQualified++;
+ }
}
}
@@ -562,8 +644,9 @@ int32_t sclExecLogic(SLogicConditionNode *node, SScalarCtx *ctx, SScalarParam *o
output->numOfRows = 0;
}
-_return:
+ output->numOfQualified = numOfQualified;
+_return:
sclFreeParamList(params, paramNum);
SCL_RET(code);
}
@@ -675,6 +758,10 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
return DEAL_RES_ERROR;
}
}
+
+ if (SCL_IS_COMPARISON_OPERATOR(node->opType) && SCL_DOWNGRADE_DATETYPE(valueNode->node.resType.type)) {
+ sclDowngradeValueType(valueNode);
+ }
}
if (node->pRight && (QUERY_NODE_VALUE == nodeType(node->pRight))) {
@@ -692,6 +779,10 @@ EDealRes sclRewriteNonConstOperator(SNode** pNode, SScalarCtx *ctx) {
return DEAL_RES_ERROR;
}
}
+
+ if (SCL_IS_COMPARISON_OPERATOR(node->opType) && SCL_DOWNGRADE_DATETYPE(valueNode->node.resType.type)) {
+ sclDowngradeValueType(valueNode);
+ }
}
if (node->pRight && (QUERY_NODE_NODE_LIST == nodeType(node->pRight))) {
@@ -762,7 +853,7 @@ EDealRes sclRewriteFunction(SNode** pNode, SScalarCtx *ctx) {
memcpy(res->datum.p, output.columnData->pData, len);
} else if (IS_VAR_DATA_TYPE(type)) {
//res->datum.p = taosMemoryCalloc(res->node.resType.bytes + VARSTR_HEADER_SIZE + 1, 1);
- res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData), 1);
+ res->datum.p = taosMemoryCalloc(varDataTLen(output.columnData->pData) + 1, 1);
res->node.resType.bytes = varDataTLen(output.columnData->pData);
memcpy(res->datum.p, output.columnData->pData, varDataTLen(output.columnData->pData));
} else {
@@ -1157,6 +1248,7 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
colInfoDataEnsureCapacity(pDst->columnData, res->numOfRows);
colDataAssign(pDst->columnData, res->columnData, res->numOfRows, NULL);
pDst->numOfRows = res->numOfRows;
+ pDst->numOfQualified = res->numOfQualified;
}
sclFreeParam(res);
@@ -1164,7 +1256,6 @@ int32_t scalarCalculate(SNode *pNode, SArray *pBlockList, SScalarParam *pDst) {
}
_return:
- //nodesDestroyNode(pNode);
sclFreeRes(ctx.pRes);
return code;
}
diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c
index aaa70ef5ae5f8ab00ce88b56433885cd00004893..fe2a970aaa811cc02f589c845e3c62b7e70af8e8 100644
--- a/source/libs/scalar/src/sclvector.c
+++ b/source/libs/scalar/src/sclvector.c
@@ -909,11 +909,11 @@ int32_t vectorConvertImpl(const SScalarParam* pIn, SScalarParam* pOut, int32_t*
int8_t gConvertTypes[TSDB_DATA_TYPE_BLOB+1][TSDB_DATA_TYPE_BLOB+1] = {
/* NULL BOOL TINY SMAL INT BIG FLOA DOUB VARC TIME NCHA UTIN USMA UINT UBIG JSON VARB DECI BLOB */
/*NULL*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-/*BOOL*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 0, 12, 13, 14, 0, 7, 0, 0,
+/*BOOL*/ 0, 0, 2, 3, 4, 5, 6, 7, 7, 9, 7, 11, 12, 13, 14, 0, 7, 0, 0,
/*TINY*/ 0, 0, 0, 3, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0,
/*SMAL*/ 0, 0, 0, 0, 4, 5, 6, 7, 7, 9, 7, 3, 4, 5, 7, 0, 7, 0, 0,
/*INT */ 0, 0, 0, 0, 0, 5, 6, 7, 7, 9, 7, 4, 4, 5, 7, 0, 7, 0, 0,
-/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 0, 7, 5, 5, 5, 7, 0, 7, 0, 0,
+/*BIGI*/ 0, 0, 0, 0, 0, 0, 6, 7, 7, 9, 7, 5, 5, 5, 7, 0, 7, 0, 0,
/*FLOA*/ 0, 0, 0, 0, 0, 0, 0, 7, 7, 6, 7, 6, 6, 6, 6, 0, 7, 0, 0,
/*DOUB*/ 0, 0, 0, 0, 0, 0, 0, 0, 7, 7, 7, 7, 7, 7, 7, 0, 7, 0, 0,
/*VARC*/ 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 8, 7, 7, 7, 7, 0, 0, 0, 0,
@@ -1475,19 +1475,19 @@ void vectorMathMinus(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pO
void vectorAssign(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord) {
SColumnInfoData *pOutputCol = pOut->columnData;
-
pOut->numOfRows = pLeft->numOfRows;
-// if (IS_HELPER_NULL(pRight->columnData, 0)) {
if(colDataIsNull_s(pRight->columnData, 0)){
- for (int32_t i = 0; i < pOut->numOfRows; ++i) {
- colDataAppend(pOutputCol, i, NULL, true);
- }
+ colDataAppendNNULL(pOutputCol, 0, pOut->numOfRows);
} else {
+ char* d = colDataGetData(pRight->columnData, 0);
for (int32_t i = 0; i < pOut->numOfRows; ++i) {
- colDataAppend(pOutputCol, i, colDataGetData(pRight->columnData, 0), false);
+ colDataAppend(pOutputCol, i, d, false);
}
}
+
+ ASSERT(pRight->numOfQualified == 1 || pRight->numOfQualified == 0);
+ pOut->numOfQualified = pRight->numOfQualified * pOut->numOfRows;
}
void vectorConcat(SScalarParam* pLeft, SScalarParam* pRight, void *out, int32_t _ord) {
@@ -1646,45 +1646,71 @@ void vectorBitOr(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
doReleaseVec(pRightCol, rightConvert);
}
-#define VEC_COM_INNER(pCol, index1, index2) \
- for (; i < pCol->numOfRows && i >= 0; i += step) {\
- if (IS_HELPER_NULL(pLeft->columnData, index1) || IS_HELPER_NULL(pRight->columnData, index2)) {\
- bool res = false;\
- colDataAppendInt8(pOut->columnData, i, (int8_t*)&res);\
- continue;\
- }\
- char *pLeftData = colDataGetData(pLeft->columnData, index1);\
- char *pRightData = colDataGetData(pRight->columnData, index2);\
- int64_t leftOut = 0;\
- int64_t rightOut = 0;\
- bool freeLeft = false;\
- bool freeRight = false;\
- bool isJsonnull = false;\
- bool result = convertJsonValue(&fp, optr, GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight),\
- &pLeftData, &pRightData, &leftOut, &rightOut, &isJsonnull, &freeLeft, &freeRight);\
- if(isJsonnull){\
- ASSERT(0);\
- }\
- if(!pLeftData || !pRightData){\
- result = false;\
- }\
- if(!result){\
- colDataAppendInt8(pOut->columnData, i, (int8_t*)&result);\
- }else{\
- bool res = filterDoCompare(fp, optr, pLeftData, pRightData);\
- colDataAppendInt8(pOut->columnData, i, (int8_t*)&res);\
- }\
- if(freeLeft) taosMemoryFreeClear(pLeftData);\
- if(freeRight) taosMemoryFreeClear(pRightData);\
+int32_t doVectorCompareImpl(int32_t numOfRows, SScalarParam *pOut, int32_t startIndex, int32_t step, __compar_fn_t fp,
+ SScalarParam *pLeft, SScalarParam *pRight, int32_t optr) {
+ int32_t num = 0;
+
+ for (int32_t i = startIndex; i < numOfRows && i >= 0; i += step) {
+ int32_t leftIndex = (i >= pLeft->numOfRows)? 0:i;
+ int32_t rightIndex = (i >= pRight->numOfRows)? 0:i;
+
+ if (IS_HELPER_NULL(pLeft->columnData, leftIndex) || IS_HELPER_NULL(pRight->columnData, rightIndex)) {
+ bool res = false;
+ colDataAppendInt8(pOut->columnData, i, (int8_t *)&res);
+ continue;
+ }
+
+ char * pLeftData = colDataGetData(pLeft->columnData, leftIndex);
+ char * pRightData = colDataGetData(pRight->columnData, rightIndex);
+ int64_t leftOut = 0;
+ int64_t rightOut = 0;
+ bool freeLeft = false;
+ bool freeRight = false;
+ bool isJsonnull = false;
+
+ bool result = convertJsonValue(&fp, optr, GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight), &pLeftData, &pRightData,
+ &leftOut, &rightOut, &isJsonnull, &freeLeft, &freeRight);
+ if (isJsonnull) {
+ ASSERT(0);
+ }
+
+ if (!pLeftData || !pRightData) {
+ result = false;
+ }
+
+ if (!result) {
+ colDataAppendInt8(pOut->columnData, i, (int8_t *)&result);
+ } else {
+ bool res = filterDoCompare(fp, optr, pLeftData, pRightData);
+ colDataAppendInt8(pOut->columnData, i, (int8_t *)&res);
+ if (res) {
+ ++num;
+ }
+ }
+
+ if (freeLeft) {
+ taosMemoryFreeClear(pLeftData);
+ }
+
+ if (freeRight) {
+ taosMemoryFreeClear(pRightData);
+ }
}
+ return num;
+}
+
void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
int32_t i = ((_ord) == TSDB_ORDER_ASC) ? 0 : TMAX(pLeft->numOfRows, pRight->numOfRows) - 1;
int32_t step = ((_ord) == TSDB_ORDER_ASC) ? 1 : -1;
-
- __compar_fn_t fp = filterGetCompFunc(GET_PARAM_TYPE(pLeft), optr);
- if(terrno != TSDB_CODE_SUCCESS){
- return;
+ int32_t lType = GET_PARAM_TYPE(pLeft);
+ int32_t rType = GET_PARAM_TYPE(pRight);
+ __compar_fn_t fp = NULL;
+
+ if (lType == rType) {
+ fp = filterGetCompFunc(lType, optr);
+ } else {
+ fp = filterGetCompFuncEx(lType, rType, optr);
}
pOut->numOfRows = TMAX(pLeft->numOfRows, pRight->numOfRows);
@@ -1700,38 +1726,38 @@ void vectorCompareImpl(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *
char *pLeftData = colDataGetData(pLeft->columnData, i);
bool res = filterDoCompare(fp, optr, pLeftData, pRight->pHashFilter);
colDataAppendInt8(pOut->columnData, i, (int8_t*)&res);
+ if (res) {
+ pOut->numOfQualified++;
+ }
}
- return;
- }
-
- if (pLeft->numOfRows == pRight->numOfRows) {
- VEC_COM_INNER(pLeft, i, i)
- } else if (pRight->numOfRows == 1) {
- VEC_COM_INNER(pLeft, i, 0)
- } else if (pLeft->numOfRows == 1) {
- VEC_COM_INNER(pRight, 0, i)
+ } else { // normal compare
+ pOut->numOfQualified = doVectorCompareImpl(pOut->numOfRows, pOut, i, step, fp, pLeft, pRight, optr);
}
}
void vectorCompare(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut, int32_t _ord, int32_t optr) {
SScalarParam pLeftOut = {0};
SScalarParam pRightOut = {0};
-
- vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut);
-
SScalarParam *param1 = NULL;
SScalarParam *param2 = NULL;
- if (pLeftOut.columnData != NULL) {
- param1 = &pLeftOut;
- } else {
+ if (SCL_NO_NEED_CONVERT_COMPARISION(GET_PARAM_TYPE(pLeft), GET_PARAM_TYPE(pRight), optr)) {
param1 = pLeft;
- }
-
- if (pRightOut.columnData != NULL) {
- param2 = &pRightOut;
- } else {
param2 = pRight;
+ } else {
+ vectorConvert(pLeft, pRight, &pLeftOut, &pRightOut);
+
+ if (pLeftOut.columnData != NULL) {
+ param1 = &pLeftOut;
+ } else {
+ param1 = pLeft;
+ }
+
+ if (pRightOut.columnData != NULL) {
+ param2 = &pRightOut;
+ } else {
+ param2 = pRight;
+ }
}
vectorCompareImpl(param1, param2, pOut, _ord, optr);
diff --git a/source/libs/scheduler/CMakeLists.txt b/source/libs/scheduler/CMakeLists.txt
index 88180391ddaae2b5dfd2b2f33a3bc4a34cac8e09..3288120b67518aa532db7579a7677086899514c7 100644
--- a/source/libs/scheduler/CMakeLists.txt
+++ b/source/libs/scheduler/CMakeLists.txt
@@ -9,7 +9,7 @@ target_include_directories(
target_link_libraries(
scheduler
- PUBLIC os util nodes planner qcom common catalog transport command
+ PUBLIC os util nodes planner qcom common catalog transport command qworker executor
)
if(${BUILD_TEST})
diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h
index 957fd46ba5a767858a3bb5bbe50142b4f1c1ce47..7ced4f626c413d7f208d524e833c3b2626678751 100644
--- a/source/libs/scheduler/inc/schInt.h
+++ b/source/libs/scheduler/inc/schInt.h
@@ -151,6 +151,7 @@ typedef struct SSchedulerMgmt {
SSchStat stat;
SRWLatch hbLock;
SHashObj *hbConnections;
+ void *queryMgmt;
} SSchedulerMgmt;
typedef struct SSchCallbackParamHeader {
@@ -235,8 +236,10 @@ typedef struct SSchTask {
typedef struct SSchJobAttr {
EExplainMode explainMode;
bool queryJob;
+ bool insertJob;
bool needFetch;
bool needFlowCtrl;
+ bool localExec;
} SSchJobAttr;
typedef struct {
@@ -254,7 +257,8 @@ typedef struct SSchJob {
SRequestConnInfo conn;
SArray *nodeList; // qnode/vnode list, SArray
SArray *levels; // starting from 0. SArray
- SQueryPlan *pDag;
+ SQueryPlan *pDag;
+ int64_t allocatorRefId;
SArray *dataSrcTasks; // SArray
int32_t levelIdx;
@@ -262,7 +266,7 @@ typedef struct SSchJob {
SHashObj *taskList;
SHashObj *execTasks; // executing and executed tasks, key:taskid, value:SQueryTask*
SHashObj *flowCtrl; // key is ep, element is SSchFlowControl
-
+
SExplainCtx *explainCtx;
int8_t status;
SQueryNodeAddr resNode;
@@ -283,8 +287,9 @@ typedef struct SSchJob {
} SSchJob;
typedef struct SSchTaskCtx {
- int64_t jobRid;
+ int64_t jobRid;
SSchTask *pTask;
+ bool asyncLaunch;
} SSchTaskCtx;
extern SSchedulerMgmt schMgmt;
@@ -302,6 +307,8 @@ extern SSchedulerMgmt schMgmt;
#define SCH_IS_DATA_BIND_QRY_TASK(task) ((task)->plan->subplanType == SUBPLAN_TYPE_SCAN)
#define SCH_IS_DATA_BIND_TASK(task) (((task)->plan->subplanType == SUBPLAN_TYPE_SCAN) || ((task)->plan->subplanType == SUBPLAN_TYPE_MODIFY))
#define SCH_IS_LEAF_TASK(_job, _task) (((_task)->level->level + 1) == (_job)->levelNum)
+#define SCH_IS_DATA_MERGE_TASK(task) (!SCH_IS_DATA_BIND_TASK(task))
+#define SCH_IS_LOCAL_EXEC_TASK(_job, _task) ((_job)->attr.localExec && SCH_IS_QUERY_JOB(_job) && (!SCH_IS_INSERT_JOB(_job)) && (!SCH_IS_DATA_BIND_QRY_TASK(_task)))
#define SCH_SET_TASK_STATUS(task, st) atomic_store_8(&(task)->status, st)
#define SCH_GET_TASK_STATUS(task) atomic_load_8(&(task)->status)
@@ -324,8 +331,9 @@ extern SSchedulerMgmt schMgmt;
#define SCH_FETCH_TYPE(_pSrcTask) (SCH_IS_DATA_BIND_QRY_TASK(_pSrcTask) ? TDMT_SCH_FETCH : TDMT_SCH_MERGE_FETCH)
#define SCH_TASK_NEED_FETCH(_task) ((_task)->plan->subplanType != SUBPLAN_TYPE_MODIFY)
-#define SCH_SET_JOB_TYPE(_job, type) do { if ((type) != SUBPLAN_TYPE_MODIFY) { (_job)->attr.queryJob = true; } } while (0)
+#define SCH_SET_JOB_TYPE(_job, type) do { if ((type) != SUBPLAN_TYPE_MODIFY) { (_job)->attr.queryJob = true; } else { (_job)->attr.insertJob = true; } } while (0)
#define SCH_IS_QUERY_JOB(_job) ((_job)->attr.queryJob)
+#define SCH_IS_INSERT_JOB(_job) ((_job)->attr.insertJob)
#define SCH_JOB_NEED_FETCH(_job) ((_job)->attr.needFetch)
#define SCH_JOB_NEED_WAIT(_job) (!SCH_IS_QUERY_JOB(_job))
#define SCH_JOB_NEED_DROP(_job) (SCH_IS_QUERY_JOB(_job))
@@ -500,6 +508,8 @@ void schDirectPostJobRes(SSchedulerReq* pReq, int32_t errCode);
int32_t schHandleJobFailure(SSchJob *pJob, int32_t errCode);
int32_t schHandleJobDrop(SSchJob *pJob, int32_t errCode);
bool schChkCurrentOp(SSchJob *pJob, int32_t op, int8_t sync);
+int32_t schProcessFetchRsp(SSchJob *pJob, SSchTask *pTask, char *msg, int32_t rspCode);
+int32_t schProcessExplainRsp(SSchJob *pJob, SSchTask *pTask, SExplainRsp *rsp);
extern SSchDebug gSCHDebug;
diff --git a/source/libs/scheduler/src/schJob.c b/source/libs/scheduler/src/schJob.c
index 98501427ab7b006daa78bc5d1c6c7c8d377572a0..69495c8b7aa0901807ed81a146418f0c6b6264db 100644
--- a/source/libs/scheduler/src/schJob.c
+++ b/source/libs/scheduler/src/schJob.c
@@ -47,8 +47,7 @@ void schUpdateJobErrCode(SSchJob *pJob, int32_t errCode) {
return;
_return:
-
- SCH_JOB_DLOG("job errCode updated to %x - %s", errCode, tstrerror(errCode));
+ SCH_JOB_DLOG("job errCode updated to %s", tstrerror(errCode));
}
bool schJobDone(SSchJob *pJob) {
@@ -491,7 +490,7 @@ int32_t schProcessOnJobFailure(SSchJob *pJob, int32_t errCode) {
int32_t code = atomic_load_32(&pJob->errCode);
if (code) {
- SCH_JOB_DLOG("job failed with error: %s", tstrerror(code));
+ SCH_JOB_DLOG("job failed with error %s", tstrerror(code));
}
schPostJobRes(pJob, 0);
@@ -673,6 +672,7 @@ void schFreeJobImpl(void *job) {
destroyQueryExecRes(&pJob->execRes);
qDestroyQueryPlan(pJob->pDag);
+ nodesReleaseAllocatorWeakRef(pJob->allocatorRefId);
taosMemoryFreeClear(pJob->userRes.execRes);
taosMemoryFreeClear(pJob->fetchRes);
@@ -719,11 +719,13 @@ int32_t schInitJob(int64_t *pJobId, SSchedulerReq *pReq) {
}
pJob->attr.explainMode = pReq->pDag->explainInfo.mode;
+ pJob->attr.localExec = pReq->localReq;
pJob->conn = *pReq->pConn;
if (pReq->sql) {
pJob->sql = strdup(pReq->sql);
}
pJob->pDag = pReq->pDag;
+ pJob->allocatorRefId = nodesMakeAllocatorWeakRef(pReq->allocatorRefId);
pJob->chkKillFp = pReq->chkKillFp;
pJob->chkKillParam = pReq->chkKillParam;
pJob->userRes.execFp = pReq->execFp;
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index 5a64aaaebb3860d2c6729ac8eb1e00be0cc9cda1..1b59c06140daad7370c7721c172b3da2a8163732 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -72,6 +72,71 @@ int32_t schValidateRspMsgType(SSchJob *pJob, SSchTask *pTask, int32_t msgType) {
return TSDB_CODE_SUCCESS;
}
+int32_t schProcessFetchRsp(SSchJob *pJob, SSchTask *pTask, char *msg, int32_t rspCode) {
+ SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)msg;
+ int32_t code = 0;
+
+ SCH_ERR_JRET(rspCode);
+
+ if (NULL == msg) {
+ SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
+ }
+
+ if (SCH_IS_EXPLAIN_JOB(pJob)) {
+ if (rsp->completed) {
+ SRetrieveTableRsp *pRsp = NULL;
+ SCH_ERR_JRET(qExecExplainEnd(pJob->explainCtx, &pRsp));
+ if (pRsp) {
+ SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp));
+ }
+
+ taosMemoryFreeClear(msg);
+
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SCH_ERR_JRET(schLaunchFetchTask(pJob));
+
+ taosMemoryFreeClear(msg);
+
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (pJob->fetchRes) {
+ SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->fetchRes);
+ SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
+ }
+
+ atomic_store_ptr(&pJob->fetchRes, rsp);
+ atomic_add_fetch_32(&pJob->resNumOfRows, htonl(rsp->numOfRows));
+
+ if (rsp->completed) {
+ SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCC);
+ }
+
+ SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed);
+
+ msg = NULL;
+ schProcessOnDataFetched(pJob);
+
+_return:
+
+ taosMemoryFreeClear(msg);
+
+ SCH_RET(code);
+}
+
+int32_t schProcessExplainRsp(SSchJob *pJob, SSchTask *pTask, SExplainRsp *rsp) {
+ SRetrieveTableRsp *pRsp = NULL;
+ SCH_ERR_RET(qExplainUpdateExecInfo(pJob->explainCtx, rsp, pTask->plan->id.groupId, &pRsp));
+
+ if (pRsp) {
+ SCH_ERR_RET(schProcessOnExplainDone(pJob, pTask, pRsp));
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
// Note: no more task error processing, handled in function internal
int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDataBuf *pMsg, int32_t rspCode) {
int32_t code = 0;
@@ -301,65 +366,20 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
SExplainRsp rsp = {0};
if (tDeserializeSExplainRsp(msg, msgSize, &rsp)) {
- taosMemoryFree(rsp.subplanInfo);
+ tFreeSExplainRsp(&rsp);
SCH_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- SRetrieveTableRsp *pRsp = NULL;
- SCH_ERR_JRET(qExplainUpdateExecInfo(pJob->explainCtx, &rsp, pTask->plan->id.groupId, &pRsp));
+ SCH_ERR_JRET(schProcessExplainRsp(pJob, pTask, &rsp));
- if (pRsp) {
- SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp));
- }
+ taosMemoryFreeClear(msg);
break;
}
case TDMT_SCH_FETCH_RSP:
case TDMT_SCH_MERGE_FETCH_RSP: {
- SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)msg;
-
- SCH_ERR_JRET(rspCode);
- if (NULL == msg) {
- SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
- }
-
- if (SCH_IS_EXPLAIN_JOB(pJob)) {
- if (rsp->completed) {
- SRetrieveTableRsp *pRsp = NULL;
- SCH_ERR_JRET(qExecExplainEnd(pJob->explainCtx, &pRsp));
- if (pRsp) {
- SCH_ERR_JRET(schProcessOnExplainDone(pJob, pTask, pRsp));
- }
-
- taosMemoryFreeClear(msg);
-
- return TSDB_CODE_SUCCESS;
- }
-
- SCH_ERR_JRET(schLaunchFetchTask(pJob));
-
- taosMemoryFreeClear(msg);
-
- return TSDB_CODE_SUCCESS;
- }
-
- if (pJob->fetchRes) {
- SCH_TASK_ELOG("got fetch rsp while res already exists, res:%p", pJob->fetchRes);
- taosMemoryFreeClear(rsp);
- SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
- }
-
- atomic_store_ptr(&pJob->fetchRes, rsp);
- atomic_add_fetch_32(&pJob->resNumOfRows, htonl(rsp->numOfRows));
-
- if (rsp->completed) {
- SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_SUCC);
- }
-
- SCH_TASK_DLOG("got fetch rsp, rows:%d, complete:%d", htonl(rsp->numOfRows), rsp->completed);
-
+ code = schProcessFetchRsp(pJob, pTask, msg, rspCode);
msg = NULL;
-
- schProcessOnDataFetched(pJob);
+ SCH_ERR_JRET(code);
break;
}
case TDMT_SCH_DROP_TASK_RSP: {
@@ -384,8 +404,7 @@ _return:
taosMemoryFreeClear(msg);
SCH_RET(schProcessOnTaskFailure(pJob, pTask, code));
-}
-
+}
int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
int32_t code = 0;
SSchTaskCallbackParam *pParam = (SSchTaskCallbackParam *)param;
@@ -396,7 +415,7 @@ int32_t schHandleCallback(void *param, SDataBuf *pMsg, int32_t rspCode) {
tstrerror(rspCode));
SCH_ERR_JRET(schProcessOnCbBegin(&pJob, &pTask, pParam->queryId, pParam->refId, pParam->taskId));
-
+
code = schHandleResponseMsg(pJob, pTask, pParam->execId, pMsg, rspCode);
pMsg->pData = NULL;
diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c
index 9cab39c30122072207daa9e9639ab92645fc1633..b6f96a188e4fc06f365b6ccf527c3587abb959af 100644
--- a/source/libs/scheduler/src/schTask.c
+++ b/source/libs/scheduler/src/schTask.c
@@ -20,6 +20,8 @@
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
+#include "qworker.h"
+#include "tglobal.h"
void schFreeTask(SSchJob *pJob, SSchTask *pTask) {
schDeregisterTaskHb(pJob, pTask);
@@ -52,7 +54,7 @@ void schInitTaskRetryTimes(SSchJob *pJob, SSchTask *pTask, SSchLevel *pLevel) {
int32_t nodeNum = taosArrayGetSize(pJob->nodeList);
pTask->maxRetryTimes = TMAX(nodeNum, SCH_DEFAULT_MAX_RETRY_NUM);
}
-
+
pTask->maxExecTimes = pTask->maxRetryTimes * (pLevel->level + 1);
}
@@ -89,6 +91,10 @@ _return:
}
int32_t schRecordTaskSucceedNode(SSchJob *pJob, SSchTask *pTask) {
+ if (SCH_IS_LOCAL_EXEC_TASK(pJob, pTask)) {
+ return TSDB_CODE_SUCCESS;
+ }
+
SQueryNodeAddr *addr = taosArrayGet(pTask->candidateAddrs, pTask->candidateIdx);
if (NULL == addr) {
SCH_TASK_ELOG("taosArrayGet candidate addr failed, idx:%d, size:%d", pTask->candidateIdx,
@@ -138,14 +144,10 @@ int32_t schUpdateTaskExecNode(SSchJob *pJob, SSchTask *pTask, void *handle, int3
return TSDB_CODE_SUCCESS;
}
- if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it
- SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
- return TSDB_CODE_SUCCESS;
- }
-
SSchNodeInfo *nodeInfo = taosHashGet(pTask->execNodes, &execId, sizeof(execId));
if (NULL == nodeInfo) { // ignore it
- SCH_TASK_DLOG("handle not updated since execId %d already not exist, current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
+ SCH_TASK_DLOG("handle not updated since execId %d already not exist, current execId %d, waitRetry %d", execId,
+ pTask->execId, pTask->waitRetry);
return TSDB_CODE_SUCCESS;
}
@@ -160,11 +162,16 @@ int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, v
if (dropExecNode) {
SCH_RET(schDropTaskExecNode(pJob, pTask, handle, execId));
}
+
+ schUpdateTaskExecNode(pJob, pTask, handle, execId);
+ if ((execId != pTask->execId) || pTask->waitRetry) { // ignore it
+ SCH_TASK_DLOG("handle not updated since execId %d is already not current execId %d, waitRetry %d", execId, pTask->execId, pTask->waitRetry);
+ SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR);
+ }
+
SCH_SET_TASK_HANDLE(pTask, handle);
- schUpdateTaskExecNode(pJob, pTask, handle, execId);
-
return TSDB_CODE_SUCCESS;
}
@@ -228,7 +235,6 @@ int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode)
SCH_RET(errCode);
}
-// Note: no more task error processing, handled in function internal
int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
bool moved = false;
int32_t code = 0;
@@ -293,6 +299,7 @@ int32_t schProcessOnTaskSuccess(SSchJob *pJob, SSchTask *pTask) {
.execId = pTask->execId,
.addr = pTask->succeedAddr,
.fetchMsgType = SCH_FETCH_TYPE(pTask),
+ .localExec = SCH_IS_LOCAL_EXEC_TASK(pJob, pTask),
};
qSetSubplanExecutionNode(parent->plan, pTask->plan->id.groupId, &source);
SCH_UNLOCK(SCH_WRITE, &parent->planLock);
@@ -314,7 +321,7 @@ int32_t schRescheduleTask(SSchJob *pJob, SSchTask *pTask) {
if (!schMgmt.cfg.enableReSchedule) {
return TSDB_CODE_SUCCESS;
}
-
+
if (SCH_IS_DATA_BIND_TASK(pTask)) {
return TSDB_CODE_SUCCESS;
}
@@ -341,7 +348,8 @@ int32_t schDoTaskRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32
}
if (((pTask->execId + 1) >= pTask->maxExecTimes) || ((pTask->retryTimes + 1) > pTask->maxRetryTimes)) {
- SCH_TASK_DLOG("task no more retry since reach max times %d:%d, execId %d", pTask->maxRetryTimes, pTask->maxExecTimes, pTask->execId);
+ SCH_TASK_DLOG("task no more retry since reach max times %d:%d, execId %d", pTask->maxRetryTimes,
+ pTask->maxExecTimes, pTask->execId);
schHandleJobFailure(pJob, rspCode);
return TSDB_CODE_SUCCESS;
}
@@ -349,7 +357,7 @@ int32_t schDoTaskRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32
pTask->waitRetry = true;
schDropTaskOnExecNode(pJob, pTask);
taosHashClear(pTask->execNodes);
- SCH_ERR_JRET(schRemoveTaskFromExecList(pJob, pTask));
+ schRemoveTaskFromExecList(pJob, pTask);
schDeregisterTaskHb(pJob, pTask);
atomic_sub_fetch_32(&pTask->level->taskLaunchedNum, 1);
taosMemoryFreeClear(pTask->msg);
@@ -427,12 +435,14 @@ int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32
code = schDoTaskRedirect(pJob, pTask, pData, rspCode);
taosMemoryFree(pData->pData);
+ taosMemoryFree(pData->pEpSet);
SCH_RET(code);
_return:
taosMemoryFree(pData->pData);
+ taosMemoryFree(pData->pEpSet);
SCH_RET(schProcessOnTaskFailure(pJob, pTask, code));
}
@@ -548,7 +558,8 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo
if ((pTask->retryTimes + 1) > pTask->maxRetryTimes) {
*needRetry = false;
- SCH_TASK_DLOG("task no more retry since reach max retry times, retryTimes:%d/%d", pTask->retryTimes, pTask->maxRetryTimes);
+ SCH_TASK_DLOG("task no more retry since reach max retry times, retryTimes:%d/%d", pTask->retryTimes,
+ pTask->maxRetryTimes);
return TSDB_CODE_SUCCESS;
}
@@ -564,25 +575,25 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo
return TSDB_CODE_SUCCESS;
}
-/*
- if (SCH_IS_DATA_BIND_TASK(pTask)) {
- if ((pTask->execId + 1) >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) {
- *needRetry = false;
- SCH_TASK_DLOG("task no more retry since all ep tried, execId:%d, epNum:%d", pTask->execId,
- SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode));
- return TSDB_CODE_SUCCESS;
- }
- } else {
- int32_t candidateNum = taosArrayGetSize(pTask->candidateAddrs);
+ /*
+ if (SCH_IS_DATA_BIND_TASK(pTask)) {
+ if ((pTask->execId + 1) >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) {
+ *needRetry = false;
+ SCH_TASK_DLOG("task no more retry since all ep tried, execId:%d, epNum:%d", pTask->execId,
+ SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode));
+ return TSDB_CODE_SUCCESS;
+ }
+ } else {
+ int32_t candidateNum = taosArrayGetSize(pTask->candidateAddrs);
- if ((pTask->candidateIdx + 1) >= candidateNum && (TSDB_CODE_SCH_TIMEOUT_ERROR != errCode)) {
- *needRetry = false;
- SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d",
- pTask->candidateIdx, candidateNum);
- return TSDB_CODE_SUCCESS;
+ if ((pTask->candidateIdx + 1) >= candidateNum && (TSDB_CODE_SCH_TIMEOUT_ERROR != errCode)) {
+ *needRetry = false;
+ SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d",
+ pTask->candidateIdx, candidateNum);
+ return TSDB_CODE_SUCCESS;
+ }
}
- }
-*/
+ */
*needRetry = true;
SCH_TASK_DLOG("task need the %dth retry, errCode:%x - %s", pTask->execId + 1, errCode, tstrerror(errCode));
@@ -593,7 +604,7 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo
int32_t schHandleTaskRetry(SSchJob *pJob, SSchTask *pTask) {
atomic_sub_fetch_32(&pTask->level->taskLaunchedNum, 1);
- SCH_ERR_RET(schRemoveTaskFromExecList(pJob, pTask));
+ schRemoveTaskFromExecList(pJob, pTask);
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_INIT);
if (SCH_TASK_NEED_FLOW_CTRL(pJob, pTask)) {
@@ -630,8 +641,9 @@ int32_t schSetAddrsFromNodeList(SSchJob *pJob, SSchTask *pTask) {
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
- SCH_TASK_TLOG("set %dth candidate addr, id %d, inUse:%d/%d, fqdn:%s, port:%d", i, naddr->nodeId, naddr->epSet.inUse, naddr->epSet.numOfEps,
- SCH_GET_CUR_EP(naddr)->fqdn, SCH_GET_CUR_EP(naddr)->port);
+ SCH_TASK_TLOG("set %dth candidate addr, id %d, inUse:%d/%d, fqdn:%s, port:%d", i, naddr->nodeId,
+ naddr->epSet.inUse, naddr->epSet.numOfEps, SCH_GET_CUR_EP(naddr)->fqdn,
+ SCH_GET_CUR_EP(naddr)->port);
++addNum;
}
@@ -711,10 +723,10 @@ int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask) {
if (candidateNum <= 1) {
goto _return;
}
-
+
switch (schMgmt.cfg.schPolicy) {
case SCH_LOAD_SEQ:
- case SCH_ALL:
+ case SCH_ALL:
default:
if (++pTask->candidateIdx >= candidateNum) {
pTask->candidateIdx = 0;
@@ -732,15 +744,14 @@ int32_t schSwitchTaskCandidateAddr(SSchJob *pJob, SSchTask *pTask) {
_return:
SCH_TASK_DLOG("switch task candiateIdx to %d/%d", pTask->candidateIdx, candidateNum);
-
+
return TSDB_CODE_SUCCESS;
}
int32_t schRemoveTaskFromExecList(SSchJob *pJob, SSchTask *pTask) {
int32_t code = taosHashRemove(pJob->execTasks, &pTask->taskId, sizeof(pTask->taskId));
if (code) {
- SCH_TASK_ELOG("task failed to rm from execTask list, code:%x", code);
- SCH_ERR_RET(TSDB_CODE_SCH_INTERNAL_ERROR);
+ SCH_TASK_WLOG("task already not in execTask list, code:%x", code);
}
return TSDB_CODE_SUCCESS;
@@ -759,7 +770,7 @@ void schDropTaskOnExecNode(SSchJob *pJob, SSchTask *pTask) {
return;
}
- int32_t i = 0;
+ int32_t i = 0;
SSchNodeInfo *nodeInfo = taosHashIterate(pTask->execNodes, NULL);
while (nodeInfo) {
if (nodeInfo->handle) {
@@ -819,25 +830,145 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) {
return TSDB_CODE_SUCCESS;
}
+int32_t schHandleExplainRes(SArray *pExplainRes) {
+ int32_t code = 0;
+ int32_t resNum = taosArrayGetSize(pExplainRes);
+ if (resNum <= 0) {
+ goto _return;
+ }
+
+ SSchTask *pTask = NULL;
+ SSchJob *pJob = NULL;
+
+ for (int32_t i = 0; i < resNum; ++i) {
+ SExplainLocalRsp* localRsp = taosArrayGet(pExplainRes, i);
+
+ qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ", begin to handle LOCAL explain rsp msg", localRsp->qId, localRsp->tId);
+
+ pJob = schAcquireJob(localRsp->rId);
+ if (NULL == pJob) {
+ qWarn("QID:0x%" PRIx64 ",TID:0x%" PRIx64 "job no exist, may be dropped, refId:0x%" PRIx64, localRsp->qId, localRsp->tId, localRsp->rId);
+ SCH_ERR_JRET(TSDB_CODE_QRY_JOB_NOT_EXIST);
+ }
+
+ int8_t status = 0;
+ if (schJobNeedToStop(pJob, &status)) {
+ SCH_TASK_DLOG("will not do further processing cause of job status %s", jobTaskStatusStr(status));
+ schReleaseJob(pJob->refId);
+ SCH_ERR_JRET(TSDB_CODE_SCH_IGNORE_ERROR);
+ }
+
+ code = schGetTaskInJob(pJob, localRsp->tId, &pTask);
+
+ if (TSDB_CODE_SUCCESS == code) {
+ code = schProcessExplainRsp(pJob, pTask, &localRsp->rsp);
+ }
+
+ schReleaseJob(pJob->refId);
+
+ qDebug("QID:0x%" PRIx64 ",TID:0x%" PRIx64 ", end to handle LOCAL explain rsp msg, code:%x", localRsp->qId, localRsp->tId, code);
+
+ SCH_ERR_JRET(code);
+
+ localRsp->rsp.numOfPlans = 0;
+ localRsp->rsp.subplanInfo = NULL;
+ pTask = NULL;
+ pJob = NULL;
+ }
+
+_return:
+
+ for (int32_t i = 0; i < resNum; ++i) {
+ SExplainLocalRsp* localRsp = taosArrayGet(pExplainRes, i);
+ tFreeSExplainRsp(&localRsp->rsp);
+ }
+
+ taosArrayDestroy(pExplainRes);
+
+ SCH_RET(code);
+}
+
+int32_t schLaunchRemoteTask(SSchJob *pJob, SSchTask *pTask) {
+ SSubplan *plan = pTask->plan;
+ int32_t code = 0;
+
+ if (NULL == pTask->msg) { // TODO add more detailed reason for failure
+ code = qSubPlanToMsg(plan, &pTask->msg, &pTask->msgLen);
+ if (TSDB_CODE_SUCCESS != code) {
+ SCH_TASK_ELOG("failed to create physical plan, code:%s, msg:%p, len:%d", tstrerror(code), pTask->msg,
+ pTask->msgLen);
+ SCH_ERR_RET(code);
+ } else if (tsQueryPlannerTrace) {
+ char *msg = NULL;
+ int32_t msgLen = 0;
+ qSubPlanToString(plan, &msg, &msgLen);
+ SCH_TASK_DLOGL("physical plan len:%d, %s", msgLen, msg);
+ taosMemoryFree(msg);
+ }
+ }
+
+ SCH_ERR_RET(schSetTaskCandidateAddrs(pJob, pTask));
+
+ if (SCH_IS_QUERY_JOB(pJob)) {
+ SCH_ERR_RET(schEnsureHbConnection(pJob, pTask));
+ }
+
+ SCH_RET(schBuildAndSendMsg(pJob, pTask, NULL, plan->msgType));
+}
+
+int32_t schLaunchLocalTask(SSchJob *pJob, SSchTask *pTask) {
+ //SCH_ERR_JRET(schSetTaskCandidateAddrs(pJob, pTask));
+ if (NULL == schMgmt.queryMgmt) {
+ SCH_ERR_RET(qWorkerInit(NODE_TYPE_CLIENT, CLIENT_HANDLE, (void **)&schMgmt.queryMgmt, NULL));
+ }
+
+ SArray *explainRes = NULL;
+ SQWMsg qwMsg = {0};
+ qwMsg.msgInfo.taskType = TASK_TYPE_TEMP;
+ qwMsg.msgInfo.explain = SCH_IS_EXPLAIN_JOB(pJob);
+ qwMsg.msgInfo.needFetch = SCH_TASK_NEED_FETCH(pTask);
+ qwMsg.msg = pTask->plan;
+ qwMsg.msgType = pTask->plan->msgType;
+ qwMsg.connInfo.handle = pJob->conn.pTrans;
+
+ if (SCH_IS_EXPLAIN_JOB(pJob)) {
+ explainRes = taosArrayInit(pJob->taskNum, sizeof(SExplainLocalRsp));
+ }
+
+ SCH_ERR_RET(qWorkerProcessLocalQuery(schMgmt.queryMgmt, schMgmt.sId, pJob->queryId, pTask->taskId, pJob->refId, pTask->execId, &qwMsg, explainRes));
+
+ if (SCH_IS_EXPLAIN_JOB(pJob)) {
+ SCH_ERR_RET(schHandleExplainRes(explainRes));
+ }
+
+ SCH_RET(schProcessOnTaskSuccess(pJob, pTask));
+}
+
int32_t schLaunchTaskImpl(void *param) {
SSchTaskCtx *pCtx = (SSchTaskCtx *)param;
- SSchJob *pJob = schAcquireJob(pCtx->jobRid);
+ SSchJob *pJob = schAcquireJob(pCtx->jobRid);
if (NULL == pJob) {
- taosMemoryFree(param);
qDebug("job refId 0x%" PRIx64 " already not exist", pCtx->jobRid);
+ taosMemoryFree(param);
SCH_RET(TSDB_CODE_SCH_JOB_IS_DROPPING);
}
-
+
SSchTask *pTask = pCtx->pTask;
- int8_t status = 0;
- int32_t code = 0;
+
+ if (pCtx->asyncLaunch) {
+ SCH_LOCK_TASK(pTask);
+ }
+
+ int8_t status = 0;
+ int32_t code = 0;
atomic_add_fetch_32(&pTask->level->taskLaunchedNum, 1);
pTask->execId++;
pTask->retryTimes++;
pTask->waitRetry = false;
- SCH_TASK_DLOG("start to launch task, execId %d, retry %d", pTask->execId, pTask->retryTimes);
+ SCH_TASK_DLOG("start to launch %s task, execId %d, retry %d", SCH_IS_LOCAL_EXEC_TASK(pJob, pTask) ? "LOCAL" : "REMOTE",
+ pTask->execId, pTask->retryTimes);
SCH_LOG_TASK_START_TS(pTask);
@@ -852,31 +983,14 @@ int32_t schLaunchTaskImpl(void *param) {
SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_EXEC);
}
- SSubplan *plan = pTask->plan;
-
- if (NULL == pTask->msg) { // TODO add more detailed reason for failure
- code = qSubPlanToString(plan, &pTask->msg, &pTask->msgLen);
- if (TSDB_CODE_SUCCESS != code) {
- SCH_TASK_ELOG("failed to create physical plan, code:%s, msg:%p, len:%d", tstrerror(code), pTask->msg,
- pTask->msgLen);
- SCH_ERR_JRET(code);
- } else {
- SCH_TASK_DLOGL("physical plan len:%d, %s", pTask->msgLen, pTask->msg);
- }
- }
-
- SCH_ERR_JRET(schSetTaskCandidateAddrs(pJob, pTask));
-
- if (SCH_IS_QUERY_JOB(pJob)) {
- SCH_ERR_JRET(schEnsureHbConnection(pJob, pTask));
+ if (SCH_IS_LOCAL_EXEC_TASK(pJob, pTask)) {
+ SCH_ERR_JRET(schLaunchLocalTask(pJob, pTask));
+ } else {
+ SCH_ERR_JRET(schLaunchRemoteTask(pJob, pTask));
}
- SCH_ERR_JRET(schBuildAndSendMsg(pJob, pTask, NULL, plan->msgType));
-
_return:
- taosMemoryFree(param);
-
if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) {
if (code) {
code = schProcessOnTaskFailure(pJob, pTask, code);
@@ -886,27 +1000,33 @@ _return:
}
}
+ if (pCtx->asyncLaunch) {
+ SCH_UNLOCK_TASK(pTask);
+ }
+
schReleaseJob(pJob->refId);
+ taosMemoryFree(param);
+
SCH_RET(code);
}
-int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
-
+int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) {
SSchTaskCtx *param = taosMemoryCalloc(1, sizeof(SSchTaskCtx));
if (NULL == param) {
SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
-
+
param->jobRid = pJob->refId;
param->pTask = pTask;
if (pJob->taskNum >= SCH_MIN_AYSNC_EXEC_NUM) {
+ param->asyncLaunch = true;
taosAsyncExec(schLaunchTaskImpl, param, NULL);
} else {
SCH_ERR_RET(schLaunchTaskImpl(param));
}
-
+
return TSDB_CODE_SUCCESS;
}
@@ -961,6 +1081,29 @@ void schDropTaskInHashList(SSchJob *pJob, SHashObj *list) {
}
}
+int32_t schExecRemoteFetch(SSchJob *pJob, SSchTask *pTask) {
+ SCH_RET(schBuildAndSendMsg(pJob, pJob->fetchTask, &pJob->resNode, SCH_FETCH_TYPE(pJob->fetchTask)));
+}
+
+int32_t schExecLocalFetch(SSchJob *pJob, SSchTask *pTask) {
+ void *pRsp = NULL;
+ SArray *explainRes = NULL;
+
+ if (SCH_IS_EXPLAIN_JOB(pJob)) {
+ explainRes = taosArrayInit(pJob->taskNum, sizeof(SExplainLocalRsp));
+ }
+
+ SCH_ERR_RET(qWorkerProcessLocalFetch(schMgmt.queryMgmt, schMgmt.sId, pJob->queryId, pTask->taskId, pJob->refId, pTask->execId, &pRsp, explainRes));
+
+ if (SCH_IS_EXPLAIN_JOB(pJob)) {
+ SCH_ERR_RET(schHandleExplainRes(explainRes));
+ }
+
+ SCH_ERR_RET(schProcessFetchRsp(pJob, pTask, pRsp, TSDB_CODE_SUCCESS));
+
+ return TSDB_CODE_SUCCESS;
+}
+
// Note: no more error processing, handled in function internal
int32_t schLaunchFetchTask(SSchJob *pJob) {
int32_t code = 0;
@@ -971,7 +1114,11 @@ int32_t schLaunchFetchTask(SSchJob *pJob) {
return TSDB_CODE_SUCCESS;
}
- SCH_ERR_JRET(schBuildAndSendMsg(pJob, pJob->fetchTask, &pJob->resNode, SCH_FETCH_TYPE(pJob->fetchTask)));
+ if (SCH_IS_LOCAL_EXEC_TASK(pJob, pJob->fetchTask)) {
+ SCH_ERR_JRET(schExecLocalFetch(pJob, pJob->fetchTask));
+ } else {
+ SCH_ERR_JRET(schExecRemoteFetch(pJob, pJob->fetchTask));
+ }
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c
index f1ec7f5e043fc7810e54377f811e90d3dcc29b0f..0ccaef385744738969973b19a38182cbac4399d9 100644
--- a/source/libs/scheduler/src/scheduler.c
+++ b/source/libs/scheduler/src/scheduler.c
@@ -17,6 +17,7 @@
#include "schInt.h"
#include "tmsg.h"
#include "tref.h"
+#include "qworker.h"
SSchedulerMgmt schMgmt = {
.jobRef = -1,
@@ -192,4 +193,7 @@ void schedulerDestroy(void) {
schMgmt.hbConnections = NULL;
}
SCH_UNLOCK(SCH_WRITE, &schMgmt.hbLock);
+
+ qWorkerDestroy(&schMgmt.queryMgmt);
+ schMgmt.queryMgmt = NULL;
}
diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c
index d6e87c27366da27dda96a41c7a9d2fda92c652a9..4a63cd3bb28cdbb31ad4f2ca7531787fccb7e7d4 100644
--- a/source/libs/stream/src/stream.c
+++ b/source/libs/stream/src/stream.c
@@ -182,7 +182,7 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
pReq->upstreamTaskId);
streamTaskEnqueue(pTask, pReq, pRsp);
- tFreeStreamDispatchReq(pReq);
+ tDeleteStreamDispatchReq(pReq);
if (exec) {
if (streamTryExec(pTask) < 0) {
diff --git a/source/libs/stream/src/streamData.c b/source/libs/stream/src/streamData.c
index cd5f499c34a4db63e6e9c4820f5abeb1076c394a..e6705a77b28a07e04e5439ed1c9bbf561ff7c3df 100644
--- a/source/libs/stream/src/streamData.c
+++ b/source/libs/stream/src/streamData.c
@@ -179,5 +179,15 @@ void streamFreeQitem(SStreamQueueItem* data) {
taosArrayDestroy(pMerge->reqs);
taosArrayDestroy(pMerge->dataRefs);
taosFreeQitem(pMerge);
+ } else if (type == STREAM_INPUT__REF_DATA_BLOCK) {
+ SStreamRefDataBlock* pRefBlock = (SStreamRefDataBlock*)data;
+
+ int32_t ref = atomic_sub_fetch_32(pRefBlock->dataRef, 1);
+ ASSERT(ref >= 0);
+ if (ref == 0) {
+ blockDataDestroy(pRefBlock->pBlock);
+ taosMemoryFree(pRefBlock->dataRef);
+ }
+ taosFreeQitem(pRefBlock);
}
}
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index 9d4010f60e5fcb222e235181a2ce12b8d4dc4102..e6960ae35086c471f7891e551a8fd17ec4776ef1 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -62,7 +62,7 @@ int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq) {
return 0;
}
-void tFreeStreamDispatchReq(SStreamDispatchReq* pReq) {
+void tDeleteStreamDispatchReq(SStreamDispatchReq* pReq) {
taosArrayDestroyP(pReq->data, taosMemoryFree);
taosArrayDestroy(pReq->dataLen);
}
@@ -95,7 +95,10 @@ int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq) {
return 0;
}
+void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq) { taosMemoryFree(pReq->pRetrieve); }
+
int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock) {
+ int32_t code = -1;
SRetrieveTableRsp* pRetrieve = NULL;
void* buf = NULL;
int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
@@ -143,7 +146,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
buf = rpcMallocCont(sizeof(SMsgHead) + len);
if (buf == NULL) {
- goto FAIL;
+ goto CLEAR;
}
((SMsgHead*)buf)->vgId = htonl(pEpInfo->nodeId);
@@ -151,6 +154,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
SEncoder encoder;
tEncoderInit(&encoder, abuf, len);
tEncodeStreamRetrieveReq(&encoder, &req);
+ tEncoderClear(&encoder);
SRpcMsg rpcMsg = {
.code = 0,
@@ -161,17 +165,18 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
if (tmsgSendReq(&pEpInfo->epSet, &rpcMsg) < 0) {
ASSERT(0);
- return -1;
+ goto CLEAR;
}
+ buf = NULL;
qDebug("task %d(child %d) send retrieve req to task %d at node %d, reqId %" PRId64, pTask->taskId,
pTask->selfChildId, pEpInfo->taskId, pEpInfo->nodeId, req.reqId);
}
- return 0;
-FAIL:
- if (pRetrieve) taosMemoryFree(pRetrieve);
- if (buf) taosMemoryFree(buf);
- return -1;
+ code = 0;
+CLEAR:
+ taosMemoryFree(pRetrieve);
+ rpcFreeCont(buf);
+ return code;
}
static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) {
@@ -243,6 +248,39 @@ FAIL:
return 0;
}
+int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
+ int64_t groupId) {
+ char* ctbName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, groupId);
+ SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+
+ /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
+ SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
+ uint32_t hashValue =
+ taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
+ taosMemoryFree(ctbName);
+
+ bool found = false;
+ // TODO: optimize search
+ int32_t j;
+ for (j = 0; j < vgSz; j++) {
+ SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j);
+ ASSERT(pVgInfo->vgId > 0);
+ if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) {
+ if (streamAddBlockToDispatchMsg(pDataBlock, &pReqs[j]) < 0) {
+ return -1;
+ }
+ if (pReqs[j].blockNum == 0) {
+ atomic_add_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
+ }
+ pReqs[j].blockNum++;
+ found = true;
+ break;
+ }
+ }
+ ASSERT(found);
+ return 0;
+}
+
int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pData) {
int32_t code = -1;
int32_t blockNum = taosArrayGetSize(pData->blocks);
@@ -317,20 +355,10 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
for (int32_t i = 0; i < blockNum; i++) {
SSDataBlock* pDataBlock = taosArrayGet(pData->blocks, i);
- char* ctbName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, pDataBlock->info.groupId);
-
- // TODO: get hash function by hashMethod
- uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));
- taosMemoryFree(ctbName);
-
- bool found = false;
- // TODO: optimize search
- int32_t j;
- for (j = 0; j < vgSz; j++) {
- SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, j);
- ASSERT(pVgInfo->vgId > 0);
- if (hashValue >= pVgInfo->hashBegin && hashValue <= pVgInfo->hashEnd) {
+ // TODO: do not use broadcast
+ if (pDataBlock->info.type == STREAM_DELETE_RESULT) {
+ for (int32_t j = 0; j < vgSz; j++) {
if (streamAddBlockToDispatchMsg(pDataBlock, &pReqs[j]) < 0) {
goto FAIL_SHUFFLE_DISPATCH;
}
@@ -338,11 +366,13 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
atomic_add_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
}
pReqs[j].blockNum++;
- found = true;
- break;
}
+ continue;
+ }
+
+ if (streamSearchAndAddBlock(pTask, pReqs, pDataBlock, vgSz, pDataBlock->info.groupId) < 0) {
+ goto FAIL_SHUFFLE_DISPATCH;
}
- ASSERT(found);
}
for (int32_t i = 0; i < vgSz; i++) {
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index 102bad742652005df440b5d4d7a87bcef34ba636..5ad5aa549d28a6b8c4835177dcb11df5418fe57c 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -38,6 +38,9 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, const void* data, SArray*
SArray* blocks = pMerged->reqs;
qDebug("task %d %p set submit input (merged), batch num: %d", pTask->taskId, pTask, (int32_t)blocks->size);
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_INPUT__MERGED_SUBMIT);
+ } else if (pItem->type == STREAM_INPUT__REF_DATA_BLOCK) {
+ const SStreamRefDataBlock* pRefBlock = (const SStreamRefDataBlock*)data;
+ qSetMultiStreamInput(exec, pRefBlock->pBlock, 1, STREAM_INPUT__DATA_BLOCK);
} else {
ASSERT(0);
}
diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c
index 20a2f7d332ce97511ec7bcd752539ff626ce0f54..1442ed2e0509e37d8b21806dc05343adcaa0f32c 100644
--- a/source/libs/stream/src/streamMeta.c
+++ b/source/libs/stream/src/streamMeta.c
@@ -265,6 +265,8 @@ int32_t streamLoadTasks(SStreamMeta* pMeta) {
}
}
+ tdbFree(pKey);
+ tdbFree(pVal);
if (tdbTbcClose(pCur) < 0) {
return -1;
}
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
index dfd6f012cc4f64d252f75a20f761c6f87fc05b78..3428a8582385dbc2a48ed856e1af7f64ea7d74d5 100644
--- a/source/libs/stream/src/streamState.c
+++ b/source/libs/stream/src/streamState.c
@@ -18,14 +18,19 @@
#include "tcommon.h"
#include "ttimer.h"
-SStreamState* streamStateOpen(char* path, SStreamTask* pTask) {
+SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath) {
SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState));
if (pState == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
+
char statePath[300];
- sprintf(statePath, "%s/%d", path, pTask->taskId);
+ if (!specPath) {
+ sprintf(statePath, "%s/%d", path, pTask->taskId);
+ } else {
+ strncpy(statePath, path, 300);
+ }
if (tdbOpen(statePath, 4096, 256, &pState->db) < 0) {
goto _err;
}
@@ -35,6 +40,15 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) {
goto _err;
}
+ // todo refactor
+ if (tdbTbOpen("func.state.db", sizeof(SWinKey), -1, SWinKeyCmpr, pState->db, &pState->pFillStateDb) < 0) {
+ goto _err;
+ }
+
+ if (tdbTbOpen("func.state.db", sizeof(STupleKey), -1, STupleKeyCmpr, pState->db, &pState->pFuncStateDb) < 0) {
+ goto _err;
+ }
+
if (streamStateBegin(pState) < 0) {
goto _err;
}
@@ -44,8 +58,10 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask) {
return pState;
_err:
- if (pState->pStateDb) tdbTbClose(pState->pStateDb);
- if (pState->db) tdbClose(pState->db);
+ tdbTbClose(pState->pStateDb);
+ tdbTbClose(pState->pFuncStateDb);
+ tdbTbClose(pState->pFillStateDb);
+ tdbClose(pState->db);
taosMemoryFree(pState);
return NULL;
}
@@ -53,6 +69,8 @@ _err:
void streamStateClose(SStreamState* pState) {
tdbCommit(pState->db, &pState->txn);
tdbTbClose(pState->pStateDb);
+ tdbTbClose(pState->pFuncStateDb);
+ tdbTbClose(pState->pFillStateDb);
tdbClose(pState->db);
taosMemoryFree(pState);
@@ -101,17 +119,61 @@ int32_t streamStateAbort(SStreamState* pState) {
return 0;
}
+int32_t streamStateFuncPut(SStreamState* pState, const STupleKey* key, const void* value, int32_t vLen) {
+ return tdbTbUpsert(pState->pFuncStateDb, key, sizeof(STupleKey), value, vLen, &pState->txn);
+}
+int32_t streamStateFuncGet(SStreamState* pState, const STupleKey* key, void** pVal, int32_t* pVLen) {
+ return tdbTbGet(pState->pFuncStateDb, key, sizeof(STupleKey), pVal, pVLen);
+}
+
+int32_t streamStateFuncDel(SStreamState* pState, const STupleKey* key) {
+ return tdbTbDelete(pState->pFuncStateDb, key, sizeof(STupleKey), &pState->txn);
+}
+
int32_t streamStatePut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
return tdbTbUpsert(pState->pStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
}
+
+// todo refactor
+int32_t streamStateFillPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) {
+ return tdbTbUpsert(pState->pFillStateDb, key, sizeof(SWinKey), value, vLen, &pState->txn);
+}
+
int32_t streamStateGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
return tdbTbGet(pState->pStateDb, key, sizeof(SWinKey), pVal, pVLen);
}
+// todo refactor
+int32_t streamStateFillGet(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
+ return tdbTbGet(pState->pFillStateDb, key, sizeof(SWinKey), pVal, pVLen);
+}
+
int32_t streamStateDel(SStreamState* pState, const SWinKey* key) {
return tdbTbDelete(pState->pStateDb, key, sizeof(SWinKey), &pState->txn);
}
+// todo refactor
+int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key) {
+ return tdbTbDelete(pState->pFillStateDb, key, sizeof(SWinKey), &pState->txn);
+}
+
+int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen) {
+ // todo refactor
+ int32_t size = *pVLen;
+ if (streamStateGet(pState, key, pVal, pVLen) == 0) {
+ return 0;
+ }
+ *pVal = tdbRealloc(NULL, size);
+ memset(*pVal, 0, size);
+ return 0;
+}
+
+int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal) {
+ // todo refactor
+ streamFreeVal(pVal);
+ return 0;
+}
+
SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
if (pCur == NULL) return NULL;
@@ -126,6 +188,31 @@ SStreamStateCur* streamStateGetCur(SStreamState* pState, const SWinKey* key) {
return pCur;
}
+SStreamStateCur* streamStateFillGetCur(SStreamState* pState, const SWinKey* key) {
+ SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
+ if (pCur == NULL) return NULL;
+ tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL);
+
+ int32_t c;
+ tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c);
+ if (c != 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
+ return pCur;
+}
+
+SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key) {
+ SStreamStateCur* pCur = streamStateFillGetCur(pState, key);
+ if (pCur) {
+ int32_t code = streamStateGetGroupKVByCur(pCur, key, NULL, 0);
+ if (code == 0) {
+ return pCur;
+ }
+ }
+ return NULL;
+}
+
int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
const SWinKey* pKTmp = NULL;
int32_t kLen;
@@ -136,6 +223,17 @@ int32_t streamStateGetKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void**
return 0;
}
+int32_t streamStateGetGroupKVByCur(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen) {
+ uint64_t groupId = pKey->groupId;
+ int32_t code = streamStateGetKVByCur(pCur, pKey, pVal, pVLen);
+ if (code == 0) {
+ if (pKey->groupId == groupId) {
+ return 0;
+ }
+ }
+ return -1;
+}
+
int32_t streamStateSeekFirst(SStreamState* pState, SStreamStateCur* pCur) {
//
return tdbTbcMoveToFirst(pCur->pCur);
@@ -146,14 +244,19 @@ int32_t streamStateSeekLast(SStreamState* pState, SStreamStateCur* pCur) {
return tdbTbcMoveToLast(pCur->pCur);
}
-SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key) {
+SStreamStateCur* streamStateFillSeekKeyNext(SStreamState* pState, const SWinKey* key) {
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
if (pCur == NULL) {
return NULL;
}
+ if (tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
int32_t c;
if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) {
+ tdbTbcClose(pCur->pCur);
taosMemoryFree(pCur);
return NULL;
}
@@ -167,14 +270,19 @@ SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key
return pCur;
}
-SStreamStateCur* streamStateSeekKeyPrev(SStreamState* pState, const SWinKey* key) {
+SStreamStateCur* streamStateFillSeekKeyPrev(SStreamState* pState, const SWinKey* key) {
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
if (pCur == NULL) {
return NULL;
}
+ if (tdbTbcOpen(pState->pFillStateDb, &pCur->pCur, NULL) < 0) {
+ taosMemoryFree(pCur);
+ return NULL;
+ }
int32_t c;
if (tdbTbcMoveTo(pCur->pCur, key, sizeof(SWinKey), &c) < 0) {
+ tdbTbcClose(pCur->pCur);
taosMemoryFree(pCur);
return NULL;
}
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index d053662bd30287d5d9589a3881c8588fd3eb82ec..332f7ad2fd7be60f532b1394eb2d72adf985b82a 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -170,8 +170,17 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
if (ts < maxTs - pInfo->watermark) {
// this window has been closed.
if (pInfo->pCloseWinSBF) {
- return tScalableBfPut(pInfo->pCloseWinSBF, &ts, sizeof(TSKEY));
+ res = tScalableBfPut(pInfo->pCloseWinSBF, &ts, sizeof(TSKEY));
+ if (res == TSDB_CODE_SUCCESS) {
+ return false;
+ } else {
+ qDebug("===stream===Update close window sbf. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
+ return true;
+ }
}
+ qDebug("===stream===Update close window. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ maxTs, *pMapMaxTs, ts);
return true;
}
@@ -193,7 +202,7 @@ bool updateInfoIsUpdated(SUpdateInfo *pInfo, uint64_t tableId, TSKEY ts) {
}
if (ts < pInfo->minTS) {
- qDebug("===stream===Update. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
+ qDebug("===stream===Update min ts. tableId:%" PRIu64 ", maxTs:%" PRIu64 ", mapMaxTs:%" PRIu64 ", ts:%" PRIu64, tableId,
maxTs, *pMapMaxTs, ts);
return true;
} else if (res == TSDB_CODE_SUCCESS) {
diff --git a/source/libs/sync/CMakeLists.txt b/source/libs/sync/CMakeLists.txt
index 551849c6f29f3def8b275877aba28f7048ea1793..6025070cb72adcf9e3753ca674695edb48266b3f 100644
--- a/source/libs/sync/CMakeLists.txt
+++ b/source/libs/sync/CMakeLists.txt
@@ -15,6 +15,6 @@ target_include_directories(
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
-if(${BUILD_TEST})
+if(BUILD_TEST AND BUILD_SYNC_TEST)
add_subdirectory(test)
-endif(${BUILD_TEST})
+endif()
diff --git a/source/libs/sync/inc/syncUtil.h b/source/libs/sync/inc/syncUtil.h
index 7ecff7ae97bf04da20c1817c413a44a0dd32e8c8..96e22720e8b2b0fb4defc5d5b46e8d765fc54efb 100644
--- a/source/libs/sync/inc/syncUtil.h
+++ b/source/libs/sync/inc/syncUtil.h
@@ -32,7 +32,7 @@ uint64_t syncUtilAddr2U64(const char* host, uint16_t port);
void syncUtilU642Addr(uint64_t u64, char* host, size_t len, uint16_t* port);
void syncUtilnodeInfo2EpSet(const SNodeInfo* pNodeInfo, SEpSet* pEpSet);
void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet);
-void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId);
+bool syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId);
bool syncUtilSameId(const SRaftId* pId1, const SRaftId* pId2);
bool syncUtilEmptyId(const SRaftId* pId);
diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c
index 1e68fe346c71d65e0594643da5b94e2dd1ab204d..b604d25816e300560571458fde7153196e77eee5 100644
--- a/source/libs/sync/src/syncCommit.c
+++ b/source/libs/sync/src/syncCommit.c
@@ -69,15 +69,26 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
if (agree) {
// term
- SSyncRaftEntry* pEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, index);
- ASSERT(pEntry != NULL);
-
+ SSyncRaftEntry* pEntry = NULL;
+ SLRUCache* pCache = pSyncNode->pLogStore->pCache;
+ LRUHandle* h = taosLRUCacheLookup(pCache, &index, sizeof(index));
+ if (h) {
+ pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
+ } else {
+ pEntry = pSyncNode->pLogStore->getEntry(pSyncNode->pLogStore, index);
+ ASSERT(pEntry != NULL);
+ }
// cannot commit, even if quorum agree. need check term!
if (pEntry->term <= pSyncNode->pRaftStore->currentTerm) {
// update commit index
newCommitIndex = index;
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
+
break;
} else {
do {
@@ -88,7 +99,11 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
} while (0);
}
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
}
}
diff --git a/source/libs/sync/src/syncIndexMgr.c b/source/libs/sync/src/syncIndexMgr.c
index 07c4fa8429dc539609d3ae788caab3352b0a3e60..28b5313ac514ef98f4295cd547b947447d9c09bc 100644
--- a/source/libs/sync/src/syncIndexMgr.c
+++ b/source/libs/sync/src/syncIndexMgr.c
@@ -20,7 +20,10 @@
SSyncIndexMgr *syncIndexMgrCreate(SSyncNode *pSyncNode) {
SSyncIndexMgr *pSyncIndexMgr = taosMemoryMalloc(sizeof(SSyncIndexMgr));
- ASSERT(pSyncIndexMgr != NULL);
+ if (pSyncIndexMgr == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
memset(pSyncIndexMgr, 0, sizeof(SSyncIndexMgr));
pSyncIndexMgr->replicas = &(pSyncNode->replicasId);
@@ -163,6 +166,7 @@ int64_t syncIndexMgrGetStartTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pR
}
}
ASSERT(0);
+ return -1;
}
void syncIndexMgrSetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftId, int64_t recvTime) {
@@ -190,6 +194,7 @@ int64_t syncIndexMgrGetRecvTime(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRa
}
}
ASSERT(0);
+ return -1;
}
// for debug -------------------
@@ -245,4 +250,5 @@ SyncTerm syncIndexMgrGetTerm(SSyncIndexMgr *pSyncIndexMgr, const SRaftId *pRaftI
}
}
ASSERT(0);
-}
\ No newline at end of file
+ return -1;
+}
diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c
index 51098374b03531142c9c12443fa5b02efddc3aca..52feb625a8d1fb7bf2eddba814859e064fac89fe 100644
--- a/source/libs/sync/src/syncMain.c
+++ b/source/libs/sync/src/syncMain.c
@@ -51,15 +51,17 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths);
int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg);
int32_t syncNodeOnPingReplyCb(SSyncNode* ths, SyncPingReply* pMsg);
-// life cycle
-static void syncFreeNode(void* param);
// ---------------------------------
+static void syncNodeFreeCb(void *param) {
+ syncNodeClose(param);
+ param = NULL;
+}
int32_t syncInit() {
int32_t ret = 0;
if (!syncEnvIsStart()) {
- tsNodeRefId = taosOpenRef(200, syncFreeNode);
+ tsNodeRefId = taosOpenRef(200, syncNodeFreeCb);
if (tsNodeRefId < 0) {
sError("failed to init node ref");
syncCleanUp();
@@ -86,11 +88,15 @@ void syncCleanUp() {
int64_t syncOpen(const SSyncInfo* pSyncInfo) {
SSyncNode* pSyncNode = syncNodeOpen(pSyncInfo);
- ASSERT(pSyncNode != NULL);
+ if (pSyncNode == NULL) {
+ sError("failed to open sync node. vgId:%d", pSyncInfo->vgId);
+ return -1;
+ }
pSyncNode->rid = taosAddRef(tsNodeRefId, pSyncNode);
if (pSyncNode->rid < 0) {
- syncFreeNode(pSyncNode);
+ syncNodeClose(pSyncNode);
+ pSyncNode = NULL;
return -1;
}
@@ -136,11 +142,9 @@ void syncStartStandBy(int64_t rid) {
void syncStop(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) return;
-
int32_t vgId = pSyncNode->vgId;
- syncNodeClose(pSyncNode);
-
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
+
taosRemoveRef(tsNodeRefId, rid);
sDebug("vgId:%d, sync rid:%" PRId64 " is removed from rsetId:%" PRId64, vgId, rid, tsNodeRefId);
}
@@ -210,7 +214,7 @@ int32_t syncReconfigBuild(int64_t rid, const SSyncCfg* pNewCfg, SRpcMsg* pRpcMsg
if (!syncNodeCheckNewConfig(pSyncNode, pNewCfg)) {
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
terrno = TSDB_CODE_SYN_NEW_CONFIG_ERROR;
- sError("syncNodeCheckNewConfig error");
+ sError("invalid new config. vgId:%d", pSyncNode->vgId);
return -1;
}
@@ -237,7 +241,7 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg* pNewCfg) {
if (!syncNodeCheckNewConfig(pSyncNode, pNewCfg)) {
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
terrno = TSDB_CODE_SYN_NEW_CONFIG_ERROR;
- sError("syncNodeCheckNewConfig error");
+ sError("invalid new config. vgId:%d", pSyncNode->vgId);
return -1;
}
@@ -392,29 +396,6 @@ bool syncIsReady(int64_t rid) {
return b;
}
-bool syncIsReadyForRead(int64_t rid) {
- SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
- if (pSyncNode == NULL) {
- return false;
- }
- ASSERT(rid == pSyncNode->rid);
-
- // TODO: last not noop?
- SyncIndex lastIndex = syncNodeGetLastIndex(pSyncNode);
- bool b = (pSyncNode->state == TAOS_SYNC_STATE_LEADER) && (pSyncNode->commitIndex >= lastIndex - SYNC_MAX_READ_RANGE);
- taosReleaseRef(tsNodeRefId, pSyncNode->rid);
-
- // if false, set error code
- if (false == b) {
- if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
- terrno = TSDB_CODE_SYN_NOT_LEADER;
- } else {
- terrno = TSDB_CODE_APP_NOT_READY;
- }
- }
- return b;
-}
-
bool syncIsRestoreFinish(int64_t rid) {
SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid);
if (pSyncNode == NULL) {
@@ -941,16 +922,18 @@ _END:
SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
SSyncInfo* pSyncInfo = (SSyncInfo*)pOldSyncInfo;
- SSyncNode* pSyncNode = (SSyncNode*)taosMemoryMalloc(sizeof(SSyncNode));
- ASSERT(pSyncNode != NULL);
- memset(pSyncNode, 0, sizeof(SSyncNode));
+ SSyncNode* pSyncNode = (SSyncNode*)taosMemoryCalloc(1, sizeof(SSyncNode));
+ if (pSyncNode == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ goto _error;
+ }
int32_t ret = 0;
if (!taosDirExist((char*)(pSyncInfo->path))) {
if (taosMkDir(pSyncInfo->path) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);
sError("failed to create dir:%s since %s", pSyncInfo->path, terrstr());
- return NULL;
+ goto _error;
}
}
@@ -963,15 +946,21 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
meta.lastConfigIndex = SYNC_INDEX_INVALID;
meta.batchSize = pSyncInfo->batchSize;
ret = raftCfgCreateFile((SSyncCfg*)&(pSyncInfo->syncCfg), meta, pSyncNode->configPath);
- ASSERT(ret == 0);
-
+ if (ret != 0) {
+ sError("failed to create raft cfg file. configPath: %s", pSyncNode->configPath);
+ goto _error;
+ }
} else {
// update syncCfg by raft_config.json
pSyncNode->pRaftCfg = raftCfgOpen(pSyncNode->configPath);
- ASSERT(pSyncNode->pRaftCfg != NULL);
+ if (pSyncNode->pRaftCfg == NULL) {
+ sError("failed to open raft cfg file. path:%s", pSyncNode->configPath);
+ goto _error;
+ }
pSyncInfo->syncCfg = pSyncNode->pRaftCfg->cfg;
raftCfgClose(pSyncNode->pRaftCfg);
+ pSyncNode->pRaftCfg = NULL;
}
// init by SSyncInfo
@@ -988,11 +977,17 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
// init raft config
pSyncNode->pRaftCfg = raftCfgOpen(pSyncNode->configPath);
- ASSERT(pSyncNode->pRaftCfg != NULL);
+ if (pSyncNode->pRaftCfg == NULL) {
+ sError("failed to open raft cfg file. path:%s", pSyncNode->configPath);
+ goto _error;
+ }
// init internal
pSyncNode->myNodeInfo = pSyncNode->pRaftCfg->cfg.nodeInfo[pSyncNode->pRaftCfg->cfg.myIndex];
- syncUtilnodeInfo2raftId(&pSyncNode->myNodeInfo, pSyncNode->vgId, &pSyncNode->myRaftId);
+ if (!syncUtilnodeInfo2raftId(&pSyncNode->myNodeInfo, pSyncNode->vgId, &pSyncNode->myRaftId)) {
+ sError("failed to determine my raft member id. vgId:%d", pSyncNode->vgId);
+ goto _error;
+ }
// init peersNum, peers, peersId
pSyncNode->peersNum = pSyncNode->pRaftCfg->cfg.replicaNum - 1;
@@ -1004,17 +999,24 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
}
}
for (int i = 0; i < pSyncNode->peersNum; ++i) {
- syncUtilnodeInfo2raftId(&pSyncNode->peersNodeInfo[i], pSyncNode->vgId, &pSyncNode->peersId[i]);
+ if (!syncUtilnodeInfo2raftId(&pSyncNode->peersNodeInfo[i], pSyncNode->vgId, &pSyncNode->peersId[i])) {
+ sError("failed to determine raft member id. vgId:%d, peer:%d", pSyncNode->vgId, i);
+ goto _error;
+ }
}
// init replicaNum, replicasId
pSyncNode->replicaNum = pSyncNode->pRaftCfg->cfg.replicaNum;
for (int i = 0; i < pSyncNode->pRaftCfg->cfg.replicaNum; ++i) {
- syncUtilnodeInfo2raftId(&pSyncNode->pRaftCfg->cfg.nodeInfo[i], pSyncNode->vgId, &pSyncNode->replicasId[i]);
+ if(!syncUtilnodeInfo2raftId(&pSyncNode->pRaftCfg->cfg.nodeInfo[i], pSyncNode->vgId, &pSyncNode->replicasId[i])) {
+ sError("failed to determine raft member id. vgId:%d, replica:%d", pSyncNode->vgId, i);
+ goto _error;
+ }
}
// init raft algorithm
pSyncNode->pFsm = pSyncInfo->pFsm;
+ pSyncInfo->pFsm = NULL;
pSyncNode->quorum = syncUtilQuorum(pSyncNode->pRaftCfg->cfg.replicaNum);
pSyncNode->leaderCache = EMPTY_RAFT_ID;
@@ -1047,29 +1049,50 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
// init TLA+ server vars
pSyncNode->state = TAOS_SYNC_STATE_FOLLOWER;
pSyncNode->pRaftStore = raftStoreOpen(pSyncNode->raftStorePath);
- ASSERT(pSyncNode->pRaftStore != NULL);
+ if (pSyncNode->pRaftStore == NULL) {
+ sError("failed to open raft store. path: %s", pSyncNode->raftStorePath);
+ goto _error;
+ }
// init TLA+ candidate vars
pSyncNode->pVotesGranted = voteGrantedCreate(pSyncNode);
- ASSERT(pSyncNode->pVotesGranted != NULL);
+ if (pSyncNode->pVotesGranted == NULL) {
+ sError("failed to create VotesGranted. vgId:%d", pSyncNode->vgId);
+ goto _error;
+ }
pSyncNode->pVotesRespond = votesRespondCreate(pSyncNode);
- ASSERT(pSyncNode->pVotesRespond != NULL);
+ if (pSyncNode->pVotesRespond == NULL) {
+ sError("failed to create VotesRespond. vgId:%d", pSyncNode->vgId);
+ goto _error;
+ }
// init TLA+ leader vars
pSyncNode->pNextIndex = syncIndexMgrCreate(pSyncNode);
- ASSERT(pSyncNode->pNextIndex != NULL);
+ if (pSyncNode->pNextIndex == NULL) {
+ sError("failed to create SyncIndexMgr. vgId:%d", pSyncNode->vgId);
+ goto _error;
+ }
pSyncNode->pMatchIndex = syncIndexMgrCreate(pSyncNode);
- ASSERT(pSyncNode->pMatchIndex != NULL);
+ if (pSyncNode->pMatchIndex == NULL) {
+ sError("failed to create SyncIndexMgr. vgId:%d", pSyncNode->vgId);
+ goto _error;
+ }
// init TLA+ log vars
pSyncNode->pLogStore = logStoreCreate(pSyncNode);
- ASSERT(pSyncNode->pLogStore != NULL);
+ if (pSyncNode->pLogStore == NULL) {
+ sError("failed to create SyncLogStore. vgId:%d", pSyncNode->vgId);
+ goto _error;
+ }
SyncIndex commitIndex = SYNC_INDEX_INVALID;
if (pSyncNode->pFsm != NULL && pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
SSnapshot snapshot = {0};
int32_t code = pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
- ASSERT(code == 0);
+ if (code != 0) {
+ sError("failed to get snapshot info. vgId:%d, code:%d", pSyncNode->vgId, code);
+ goto _error;
+ }
if (snapshot.lastApplyIndex > commitIndex) {
commitIndex = snapshot.lastApplyIndex;
syncNodeEventLog(pSyncNode, "reset commit index by snapshot");
@@ -1132,7 +1155,10 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
// tools
pSyncNode->pSyncRespMgr = syncRespMgrCreate(pSyncNode, SYNC_RESP_TTL_MS);
- ASSERT(pSyncNode->pSyncRespMgr != NULL);
+ if (pSyncNode->pSyncRespMgr == NULL) {
+ sError("failed to create SyncRespMgr. vgId:%d", pSyncNode->vgId);
+ goto _error;
+ }
// restore state
pSyncNode->restoreFinish = false;
@@ -1162,6 +1188,15 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) {
syncNodeEventLog(pSyncNode, "sync open");
return pSyncNode;
+
+_error:
+ if (pSyncInfo->pFsm) {
+ taosMemoryFree(pSyncInfo->pFsm);
+ pSyncInfo->pFsm = NULL;
+ }
+ syncNodeClose(pSyncNode);
+ pSyncNode = NULL;
+ return NULL;
}
void syncNodeMaybeUpdateCommitBySnapshot(SSyncNode* pSyncNode) {
@@ -1214,20 +1249,28 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode) {
void syncNodeClose(SSyncNode* pSyncNode) {
syncNodeEventLog(pSyncNode, "sync close");
-
+ if (pSyncNode == NULL) {
+ return;
+ }
int32_t ret;
- ASSERT(pSyncNode != NULL);
ret = raftStoreClose(pSyncNode->pRaftStore);
ASSERT(ret == 0);
syncRespMgrDestroy(pSyncNode->pSyncRespMgr);
+ pSyncNode->pSyncRespMgr = NULL;
voteGrantedDestroy(pSyncNode->pVotesGranted);
+ pSyncNode->pVotesGranted = NULL;
votesRespondDestory(pSyncNode->pVotesRespond);
+ pSyncNode->pVotesRespond = NULL;
syncIndexMgrDestroy(pSyncNode->pNextIndex);
+ pSyncNode->pNextIndex = NULL;
syncIndexMgrDestroy(pSyncNode->pMatchIndex);
+ pSyncNode->pMatchIndex = NULL;
logStoreDestory(pSyncNode->pLogStore);
+ pSyncNode->pLogStore = NULL;
raftCfgClose(pSyncNode->pRaftCfg);
+ pSyncNode->pRaftCfg = NULL;
syncNodeStopPingTimer(pSyncNode);
syncNodeStopElectTimer(pSyncNode);
@@ -1249,8 +1292,7 @@ void syncNodeClose(SSyncNode* pSyncNode) {
pSyncNode->pNewNodeReceiver = NULL;
}
- // free memory in syncFreeNode
- // taosMemoryFree(pSyncNode);
+ taosMemoryFree(pSyncNode);
}
// option
@@ -2534,7 +2576,7 @@ static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) {
return;
}
} else {
- sError("syncNodeEqHeartbeatTimer FpEqMsg is NULL");
+ sError("vgId:%d, enqueue msg cb ptr (i.e. FpEqMsg) not set.", pSyncNode->vgId);
}
syncTimeoutDestroy(pSyncMsg);
@@ -2581,6 +2623,20 @@ static int32_t syncNodeEqNoop(SSyncNode* ths) {
return ret;
}
+static void deleteCacheEntry(const void* key, size_t keyLen, void* value) { taosMemoryFree(value); }
+
+static int32_t syncCacheEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry, LRUHandle** h) {
+ int code = 0;
+ int entryLen = sizeof(*pEntry) + pEntry->dataLen;
+ LRUStatus status = taosLRUCacheInsert(pLogStore->pCache, &pEntry->index, sizeof(pEntry->index), pEntry, entryLen,
+ deleteCacheEntry, h, TAOS_LRU_PRIORITY_LOW);
+ if (status != TAOS_LRU_STATUS_OK) {
+ code = -1;
+ }
+
+ return code;
+}
+
static int32_t syncNodeAppendNoop(SSyncNode* ths) {
int32_t ret = 0;
@@ -2589,13 +2645,21 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) {
SSyncRaftEntry* pEntry = syncEntryBuildNoop(term, index, ths->vgId);
ASSERT(pEntry != NULL);
+ LRUHandle* h = NULL;
+ syncCacheEntry(ths->pLogStore, pEntry, &h);
+
if (ths->state == TAOS_SYNC_STATE_LEADER) {
int32_t code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry);
ASSERT(code == 0);
syncNodeReplicate(ths, false);
}
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(ths->pLogStore->pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
+
return ret;
}
@@ -2654,6 +2718,9 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncI
SSyncRaftEntry* pEntry = syncEntryBuild2((SyncClientRequest*)pMsg, term, index);
ASSERT(pEntry != NULL);
+ LRUHandle* h = NULL;
+ syncCacheEntry(ths->pLogStore, pEntry, &h);
+
if (ths->state == TAOS_SYNC_STATE_LEADER) {
// append entry
code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry);
@@ -2685,7 +2752,12 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncI
}
}
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(ths->pLogStore->pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
+
return ret;
}
@@ -2744,14 +2816,6 @@ int32_t syncNodeOnClientRequestBatchCb(SSyncNode* ths, SyncClientRequestBatch* p
return 0;
}
-static void syncFreeNode(void* param) {
- SSyncNode* pNode = param;
- // inner object already free
- // syncNodePrint2((char*)"==syncFreeNode==", pNode);
-
- taosMemoryFree(pNode);
-}
-
const char* syncStr(ESyncState state) {
switch (state) {
case TAOS_SYNC_STATE_FOLLOWER:
@@ -2766,8 +2830,6 @@ const char* syncStr(ESyncState state) {
}
int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* pEntry) {
- SyncLeaderTransfer* pSyncLeaderTransfer = syncLeaderTransferFromRpcMsg2(pRpcMsg);
-
if (ths->state != TAOS_SYNC_STATE_FOLLOWER) {
syncNodeEventLog(ths, "I am not follower, can not do leader transfer");
return 0;
@@ -2799,6 +2861,8 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p
}
*/
+ SyncLeaderTransfer* pSyncLeaderTransfer = syncLeaderTransferFromRpcMsg2(pRpcMsg);
+
do {
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "do leader transfer, index:%ld", pEntry->index);
@@ -2973,9 +3037,15 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex,
for (SyncIndex i = beginIndex; i <= endIndex; ++i) {
if (i != SYNC_INDEX_INVALID) {
SSyncRaftEntry* pEntry;
- code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, i, &pEntry);
- ASSERT(code == 0);
- ASSERT(pEntry != NULL);
+ SLRUCache* pCache = ths->pLogStore->pCache;
+ LRUHandle* h = taosLRUCacheLookup(pCache, &i, sizeof(i));
+ if (h) {
+ pEntry = (SSyncRaftEntry*)taosLRUCacheValue(pCache, h);
+ } else {
+ code = ths->pLogStore->syncLogGetEntry(ths->pLogStore, i, &pEntry);
+ ASSERT(code == 0);
+ ASSERT(pEntry != NULL);
+ }
SRpcMsg rpcMsg;
syncEntry2OriginalRpc(pEntry, &rpcMsg);
@@ -3058,7 +3128,11 @@ int32_t syncNodeCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endIndex,
}
rpcFreeCont(rpcMsg.pCont);
- syncEntryDestory(pEntry);
+ if (h) {
+ taosLRUCacheRelease(pCache, h, false);
+ } else {
+ syncEntryDestory(pEntry);
+ }
}
}
}
diff --git a/source/libs/sync/src/syncMessage.c b/source/libs/sync/src/syncMessage.c
index b42aba560fa1c26ef9426b55729c1d39cafa8a24..faebe5bbecb16012831ed103de520c14accc81d5 100644
--- a/source/libs/sync/src/syncMessage.c
+++ b/source/libs/sync/src/syncMessage.c
@@ -1992,6 +1992,313 @@ void syncAppendEntriesReplyLog2(char* s, const SyncAppendEntriesReply* pMsg) {
}
}
+// ---- message process SyncHeartbeat----
+SyncHeartbeat* syncHeartbeatBuild(int32_t vgId) {
+ uint32_t bytes = sizeof(SyncHeartbeat);
+ SyncHeartbeat* pMsg = taosMemoryMalloc(bytes);
+ memset(pMsg, 0, bytes);
+ pMsg->bytes = bytes;
+ pMsg->vgId = vgId;
+ pMsg->msgType = TDMT_SYNC_HEARTBEAT;
+ return pMsg;
+}
+
+void syncHeartbeatDestroy(SyncHeartbeat* pMsg) {
+ if (pMsg != NULL) {
+ taosMemoryFree(pMsg);
+ }
+}
+
+void syncHeartbeatSerialize(const SyncHeartbeat* pMsg, char* buf, uint32_t bufLen) {
+ ASSERT(pMsg->bytes <= bufLen);
+ memcpy(buf, pMsg, pMsg->bytes);
+}
+
+void syncHeartbeatDeserialize(const char* buf, uint32_t len, SyncHeartbeat* pMsg) {
+ memcpy(pMsg, buf, len);
+ ASSERT(len == pMsg->bytes);
+}
+
+char* syncHeartbeatSerialize2(const SyncHeartbeat* pMsg, uint32_t* len) {
+ char* buf = taosMemoryMalloc(pMsg->bytes);
+ ASSERT(buf != NULL);
+ syncHeartbeatSerialize(pMsg, buf, pMsg->bytes);
+ if (len != NULL) {
+ *len = pMsg->bytes;
+ }
+ return buf;
+}
+
+SyncHeartbeat* syncHeartbeatDeserialize2(const char* buf, uint32_t len) {
+ uint32_t bytes = *((uint32_t*)buf);
+ SyncHeartbeat* pMsg = taosMemoryMalloc(bytes);
+ ASSERT(pMsg != NULL);
+ syncHeartbeatDeserialize(buf, len, pMsg);
+ ASSERT(len == pMsg->bytes);
+ return pMsg;
+}
+
+void syncHeartbeat2RpcMsg(const SyncHeartbeat* pMsg, SRpcMsg* pRpcMsg) {
+ memset(pRpcMsg, 0, sizeof(*pRpcMsg));
+ pRpcMsg->msgType = pMsg->msgType;
+ pRpcMsg->contLen = pMsg->bytes;
+ pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen);
+ syncHeartbeatSerialize(pMsg, pRpcMsg->pCont, pRpcMsg->contLen);
+}
+
+void syncHeartbeatFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeat* pMsg) {
+ syncHeartbeatDeserialize(pRpcMsg->pCont, pRpcMsg->contLen, pMsg);
+}
+
+SyncHeartbeat* syncHeartbeatFromRpcMsg2(const SRpcMsg* pRpcMsg) {
+ SyncHeartbeat* pMsg = syncHeartbeatDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ ASSERT(pMsg != NULL);
+ return pMsg;
+}
+
+cJSON* syncHeartbeat2Json(const SyncHeartbeat* pMsg) {
+ char u64buf[128] = {0};
+ cJSON* pRoot = cJSON_CreateObject();
+
+ if (pMsg != NULL) {
+ cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
+ cJSON_AddNumberToObject(pRoot, "vgId", pMsg->vgId);
+ cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
+
+ cJSON* pSrcId = cJSON_CreateObject();
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->srcId.addr);
+ cJSON_AddStringToObject(pSrcId, "addr", u64buf);
+ {
+ uint64_t u64 = pMsg->srcId.addr;
+ cJSON* pTmp = pSrcId;
+ char host[128] = {0};
+ uint16_t port;
+ syncUtilU642Addr(u64, host, sizeof(host), &port);
+ cJSON_AddStringToObject(pTmp, "addr_host", host);
+ cJSON_AddNumberToObject(pTmp, "addr_port", port);
+ }
+ cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
+ cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
+
+ cJSON* pDestId = cJSON_CreateObject();
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->destId.addr);
+ cJSON_AddStringToObject(pDestId, "addr", u64buf);
+ {
+ uint64_t u64 = pMsg->destId.addr;
+ cJSON* pTmp = pDestId;
+ char host[128] = {0};
+ uint16_t port;
+ syncUtilU642Addr(u64, host, sizeof(host), &port);
+ cJSON_AddStringToObject(pTmp, "addr_host", host);
+ cJSON_AddNumberToObject(pTmp, "addr_port", port);
+ }
+ cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
+ cJSON_AddItemToObject(pRoot, "destId", pDestId);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->term);
+ cJSON_AddStringToObject(pRoot, "term", u64buf);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->privateTerm);
+ cJSON_AddStringToObject(pRoot, "privateTerm", u64buf);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->commitIndex);
+ cJSON_AddStringToObject(pRoot, "commitIndex", u64buf);
+ }
+
+ cJSON* pJson = cJSON_CreateObject();
+ cJSON_AddItemToObject(pJson, "SyncHeartbeat", pRoot);
+ return pJson;
+}
+
+char* syncHeartbeat2Str(const SyncHeartbeat* pMsg) {
+ cJSON* pJson = syncHeartbeat2Json(pMsg);
+ char* serialized = cJSON_Print(pJson);
+ cJSON_Delete(pJson);
+ return serialized;
+}
+
+void syncHeartbeatPrint(const SyncHeartbeat* pMsg) {
+ char* serialized = syncHeartbeat2Str(pMsg);
+ printf("syncHeartbeatPrint | len:%" PRIu64 " | %s \n", strlen(serialized), serialized);
+ fflush(NULL);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatPrint2(char* s, const SyncHeartbeat* pMsg) {
+ char* serialized = syncHeartbeat2Str(pMsg);
+ printf("syncHeartbeatPrint2 | len:%" PRIu64 " | %s | %s \n", strlen(serialized), s, serialized);
+ fflush(NULL);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatLog(const SyncHeartbeat* pMsg) {
+ char* serialized = syncHeartbeat2Str(pMsg);
+ sTrace("syncHeartbeatLog | len:%" PRIu64 " | %s", strlen(serialized), serialized);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatLog2(char* s, const SyncHeartbeat* pMsg) {
+ if (gRaftDetailLog) {
+ char* serialized = syncHeartbeat2Str(pMsg);
+ sTrace("syncHeartbeatLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized);
+ taosMemoryFree(serialized);
+ }
+}
+
+// ---- message process SyncHeartbeatReply----
+SyncHeartbeatReply* syncHeartbeatReplyBuild(int32_t vgId) {
+ uint32_t bytes = sizeof(SyncHeartbeatReply);
+ SyncHeartbeatReply* pMsg = taosMemoryMalloc(bytes);
+ memset(pMsg, 0, bytes);
+ pMsg->bytes = bytes;
+ pMsg->vgId = vgId;
+ pMsg->msgType = TDMT_SYNC_HEARTBEAT_REPLY;
+ return pMsg;
+}
+
+void syncHeartbeatReplyDestroy(SyncHeartbeatReply* pMsg) {
+ if (pMsg != NULL) {
+ taosMemoryFree(pMsg);
+ }
+}
+
+void syncHeartbeatReplySerialize(const SyncHeartbeatReply* pMsg, char* buf, uint32_t bufLen) {
+ ASSERT(pMsg->bytes <= bufLen);
+ memcpy(buf, pMsg, pMsg->bytes);
+}
+
+void syncHeartbeatReplyDeserialize(const char* buf, uint32_t len, SyncHeartbeatReply* pMsg) {
+ memcpy(pMsg, buf, len);
+ ASSERT(len == pMsg->bytes);
+}
+
+char* syncHeartbeatReplySerialize2(const SyncHeartbeatReply* pMsg, uint32_t* len) {
+ char* buf = taosMemoryMalloc(pMsg->bytes);
+ ASSERT(buf != NULL);
+ syncHeartbeatReplySerialize(pMsg, buf, pMsg->bytes);
+ if (len != NULL) {
+ *len = pMsg->bytes;
+ }
+ return buf;
+}
+
+SyncHeartbeatReply* syncHeartbeatReplyDeserialize2(const char* buf, uint32_t len) {
+ uint32_t bytes = *((uint32_t*)buf);
+ SyncHeartbeatReply* pMsg = taosMemoryMalloc(bytes);
+ ASSERT(pMsg != NULL);
+ syncHeartbeatReplyDeserialize(buf, len, pMsg);
+ ASSERT(len == pMsg->bytes);
+ return pMsg;
+}
+
+void syncHeartbeatReply2RpcMsg(const SyncHeartbeatReply* pMsg, SRpcMsg* pRpcMsg) {
+ memset(pRpcMsg, 0, sizeof(*pRpcMsg));
+ pRpcMsg->msgType = pMsg->msgType;
+ pRpcMsg->contLen = pMsg->bytes;
+ pRpcMsg->pCont = rpcMallocCont(pRpcMsg->contLen);
+ syncHeartbeatReplySerialize(pMsg, pRpcMsg->pCont, pRpcMsg->contLen);
+}
+
+void syncHeartbeatReplyFromRpcMsg(const SRpcMsg* pRpcMsg, SyncHeartbeatReply* pMsg) {
+ syncHeartbeatReplyDeserialize(pRpcMsg->pCont, pRpcMsg->contLen, pMsg);
+}
+
+SyncHeartbeatReply* syncHeartbeatReplyFromRpcMsg2(const SRpcMsg* pRpcMsg) {
+ SyncHeartbeatReply* pMsg = syncHeartbeatReplyDeserialize2(pRpcMsg->pCont, pRpcMsg->contLen);
+ ASSERT(pMsg != NULL);
+ return pMsg;
+}
+
+cJSON* syncHeartbeatReply2Json(const SyncHeartbeatReply* pMsg) {
+ char u64buf[128] = {0};
+ cJSON* pRoot = cJSON_CreateObject();
+
+ if (pMsg != NULL) {
+ cJSON_AddNumberToObject(pRoot, "bytes", pMsg->bytes);
+ cJSON_AddNumberToObject(pRoot, "vgId", pMsg->vgId);
+ cJSON_AddNumberToObject(pRoot, "msgType", pMsg->msgType);
+
+ cJSON* pSrcId = cJSON_CreateObject();
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->srcId.addr);
+ cJSON_AddStringToObject(pSrcId, "addr", u64buf);
+ {
+ uint64_t u64 = pMsg->srcId.addr;
+ cJSON* pTmp = pSrcId;
+ char host[128] = {0};
+ uint16_t port;
+ syncUtilU642Addr(u64, host, sizeof(host), &port);
+ cJSON_AddStringToObject(pTmp, "addr_host", host);
+ cJSON_AddNumberToObject(pTmp, "addr_port", port);
+ }
+ cJSON_AddNumberToObject(pSrcId, "vgId", pMsg->srcId.vgId);
+ cJSON_AddItemToObject(pRoot, "srcId", pSrcId);
+
+ cJSON* pDestId = cJSON_CreateObject();
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->destId.addr);
+ cJSON_AddStringToObject(pDestId, "addr", u64buf);
+ {
+ uint64_t u64 = pMsg->destId.addr;
+ cJSON* pTmp = pDestId;
+ char host[128] = {0};
+ uint16_t port;
+ syncUtilU642Addr(u64, host, sizeof(host), &port);
+ cJSON_AddStringToObject(pTmp, "addr_host", host);
+ cJSON_AddNumberToObject(pTmp, "addr_port", port);
+ }
+ cJSON_AddNumberToObject(pDestId, "vgId", pMsg->destId.vgId);
+ cJSON_AddItemToObject(pRoot, "destId", pDestId);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->privateTerm);
+ cJSON_AddStringToObject(pRoot, "privateTerm", u64buf);
+
+ snprintf(u64buf, sizeof(u64buf), "%" PRIu64, pMsg->term);
+ cJSON_AddStringToObject(pRoot, "term", u64buf);
+
+ cJSON_AddStringToObject(pRoot, "matchIndex", u64buf);
+ snprintf(u64buf, sizeof(u64buf), "%" PRId64, pMsg->startTime);
+ cJSON_AddStringToObject(pRoot, "startTime", u64buf);
+ }
+
+ cJSON* pJson = cJSON_CreateObject();
+ cJSON_AddItemToObject(pJson, "SyncHeartbeatReply", pRoot);
+ return pJson;
+}
+
+char* syncHeartbeatReply2Str(const SyncHeartbeatReply* pMsg) {
+ cJSON* pJson = syncHeartbeatReply2Json(pMsg);
+ char* serialized = cJSON_Print(pJson);
+ cJSON_Delete(pJson);
+ return serialized;
+}
+
+void syncHeartbeatReplyPrint(const SyncHeartbeatReply* pMsg) {
+ char* serialized = syncHeartbeatReply2Str(pMsg);
+ printf("syncHeartbeatReplyPrint | len:%" PRIu64 " | %s \n", strlen(serialized), serialized);
+ fflush(NULL);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatReplyPrint2(char* s, const SyncHeartbeatReply* pMsg) {
+ char* serialized = syncHeartbeatReply2Str(pMsg);
+ printf("syncHeartbeatReplyPrint2 | len:%" PRIu64 " | %s | %s \n", strlen(serialized), s, serialized);
+ fflush(NULL);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatReplyLog(const SyncHeartbeatReply* pMsg) {
+ char* serialized = syncHeartbeatReply2Str(pMsg);
+ sTrace("syncHeartbeatReplyLog | len:%" PRIu64 " | %s", strlen(serialized), serialized);
+ taosMemoryFree(serialized);
+}
+
+void syncHeartbeatReplyLog2(char* s, const SyncHeartbeatReply* pMsg) {
+ if (gRaftDetailLog) {
+ char* serialized = syncHeartbeatReply2Str(pMsg);
+ sTrace("syncHeartbeatReplyLog2 | len:%" PRIu64 " | %s | %s", strlen(serialized), s, serialized);
+ taosMemoryFree(serialized);
+ }
+}
+
// ---- message process SyncApplyMsg----
SyncApplyMsg* syncApplyMsgBuild(uint32_t dataLen) {
uint32_t bytes = sizeof(SyncApplyMsg) + dataLen;
diff --git a/source/libs/sync/src/syncRaftCfg.c b/source/libs/sync/src/syncRaftCfg.c
index ab404d1b9af744b51b508cd1f870482c79756ea1..57126d0871da0ab80cf718260377fa8754581d65 100644
--- a/source/libs/sync/src/syncRaftCfg.c
+++ b/source/libs/sync/src/syncRaftCfg.c
@@ -364,8 +364,6 @@ int32_t raftCfgCreateFile(SSyncCfg *pCfg, SRaftCfgMeta meta, const char *path) {
int32_t sysErr = errno;
const char *sysErrStr = strerror(errno);
sError("create raft cfg file error, err:%d %X, msg:%s, syserr:%d, sysmsg:%s", err, err, errStr, sysErr, sysErrStr);
- ASSERT(0);
-
return -1;
}
diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c
index 0649e064e45391cfe9082c24264a33b762d1a279..496c8419de7f56a96d544997f989ef6d16de4de3 100644
--- a/source/libs/sync/src/syncRaftLog.c
+++ b/source/libs/sync/src/syncRaftLog.c
@@ -53,6 +53,15 @@ SSyncLogStore* logStoreCreate(SSyncNode* pSyncNode) {
SSyncLogStore* pLogStore = taosMemoryMalloc(sizeof(SSyncLogStore));
ASSERT(pLogStore != NULL);
+ pLogStore->pCache = taosLRUCacheInit(10 * 1024 * 1024, 1, .5);
+ if (pLogStore->pCache == NULL) {
+ terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
+ taosMemoryFree(pLogStore);
+ return NULL;
+ }
+
+ taosLRUCacheSetStrictCapacity(pLogStore->pCache, false);
+
pLogStore->data = taosMemoryMalloc(sizeof(SSyncLogStoreData));
ASSERT(pLogStore->data != NULL);
@@ -102,6 +111,10 @@ void logStoreDestory(SSyncLogStore* pLogStore) {
taosThreadMutexDestroy(&(pData->mutex));
taosMemoryFree(pLogStore->data);
+
+ taosLRUCacheEraseUnrefEntries(pLogStore->pCache);
+ taosLRUCacheCleanup(pLogStore->pCache);
+
taosMemoryFree(pLogStore);
}
}
@@ -243,7 +256,7 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr
static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, SSyncRaftEntry** ppEntry) {
SSyncLogStoreData* pData = pLogStore->data;
SWal* pWal = pData->pWal;
- int32_t code;
+ int32_t code = 0;
*ppEntry = NULL;
@@ -257,6 +270,7 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index,
taosThreadMutexLock(&(pData->mutex));
code = walReadVer(pWalHandle, index);
+ // code = walReadVerCached(pWalHandle, index);
if (code != 0) {
int32_t err = terrno;
const char* errStr = tstrerror(err);
@@ -412,6 +426,7 @@ SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
ASSERT(pWalHandle != NULL);
int32_t code = walReadVer(pWalHandle, index);
+ // int32_t code = walReadVerCached(pWalHandle, index);
if (code != 0) {
int32_t err = terrno;
const char* errStr = tstrerror(err);
diff --git a/source/libs/sync/src/syncRaftStore.c b/source/libs/sync/src/syncRaftStore.c
index 908587303624a305e03fcd7034417394eb9e71c0..22b47a2c45e14216b1554c9e606f2d66b1d07b5e 100644
--- a/source/libs/sync/src/syncRaftStore.c
+++ b/source/libs/sync/src/syncRaftStore.c
@@ -28,7 +28,7 @@ SRaftStore *raftStoreOpen(const char *path) {
SRaftStore *pRaftStore = taosMemoryMalloc(sizeof(SRaftStore));
if (pRaftStore == NULL) {
- sError("raftStoreOpen malloc error");
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
memset(pRaftStore, 0, sizeof(*pRaftStore));
@@ -46,7 +46,7 @@ SRaftStore *raftStoreOpen(const char *path) {
ASSERT(pRaftStore->pFile != NULL);
int len = taosReadFile(pRaftStore->pFile, storeBuf, RAFT_STORE_BLOCK_SIZE);
- ASSERT(len == RAFT_STORE_BLOCK_SIZE);
+ ASSERT(len > 0);
ret = raftStoreDeserialize(pRaftStore, storeBuf, len);
ASSERT(ret == 0);
@@ -72,7 +72,9 @@ static int32_t raftStoreInit(SRaftStore *pRaftStore) {
}
int32_t raftStoreClose(SRaftStore *pRaftStore) {
- ASSERT(pRaftStore != NULL);
+ if (pRaftStore == NULL) {
+ return 0;
+ }
taosCloseFile(&pRaftStore->pFile);
taosMemoryFree(pRaftStore);
diff --git a/source/libs/sync/src/syncRespMgr.c b/source/libs/sync/src/syncRespMgr.c
index d7ed864180335eb5a07ea58298c574fb71265fe2..103c2254768a3b5bfc6d5c8090828f999b2e8c9e 100644
--- a/source/libs/sync/src/syncRespMgr.c
+++ b/source/libs/sync/src/syncRespMgr.c
@@ -19,6 +19,10 @@
SSyncRespMgr *syncRespMgrCreate(void *data, int64_t ttl) {
SSyncRespMgr *pObj = (SSyncRespMgr *)taosMemoryMalloc(sizeof(SSyncRespMgr));
+ if (pObj == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
memset(pObj, 0, sizeof(SSyncRespMgr));
pObj->pRespHash =
diff --git a/source/libs/sync/src/syncSnapshot.c b/source/libs/sync/src/syncSnapshot.c
index 5489a107e76082106961a0ed107413e5ec9b4a64..68d81813ac760a37b78bfdc12aee59624b296c48 100644
--- a/source/libs/sync/src/syncSnapshot.c
+++ b/source/libs/sync/src/syncSnapshot.c
@@ -35,7 +35,10 @@ SSyncSnapshotSender *snapshotSenderCreate(SSyncNode *pSyncNode, int32_t replicaI
SSyncSnapshotSender *pSender = NULL;
if (condition) {
pSender = taosMemoryMalloc(sizeof(SSyncSnapshotSender));
- ASSERT(pSender != NULL);
+ if (pSender == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
memset(pSender, 0, sizeof(*pSender));
pSender->start = false;
@@ -583,7 +586,7 @@ static int32_t snapshotReceiverFinish(SSyncSnapshotReceiver *pReceiver, SyncSnap
&(pReceiver->snapshot));
if (code != 0) {
syncNodeErrorLog(pReceiver->pSyncNode, "snapshot stop writer true error");
- ASSERT(0);
+ // ASSERT(0);
return -1;
}
pReceiver->pWriter = NULL;
diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c
index af15c377fbc36ae523776824962f282462ff2bc9..c3c8131cbb31c3d4ac0b9fb59afc7bc751096329 100644
--- a/source/libs/sync/src/syncTimeout.c
+++ b/source/libs/sync/src/syncTimeout.c
@@ -91,16 +91,16 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) {
} else if (pMsg->timeoutType == SYNC_TIMEOUT_ELECTION) {
if (atomic_load_64(&ths->electTimerLogicClockUser) <= pMsg->logicClock) {
++(ths->electTimerCounter);
- sInfo("vgId:%d, sync timeout, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
- ths->electTimerCounter, ths->electTimerLogicClockUser);
+ sTrace("vgId:%d, sync timer, type:election count:%d, electTimerLogicClockUser:%ld", ths->vgId,
+ ths->electTimerCounter, ths->electTimerLogicClockUser);
syncNodeElect(ths);
}
} else if (pMsg->timeoutType == SYNC_TIMEOUT_HEARTBEAT) {
if (atomic_load_64(&ths->heartbeatTimerLogicClockUser) <= pMsg->logicClock) {
++(ths->heartbeatTimerCounter);
- sInfo("vgId:%d, sync timeout, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
- ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
+ sTrace("vgId:%d, sync timer, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId,
+ ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser);
syncNodeReplicate(ths, true);
}
} else {
diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c
index 15e94baee4f4d7ff32d2d956cc04f9d2ca3e240f..6f234631dae8620df80fe5aba0366a5cb8ad62c7 100644
--- a/source/libs/sync/src/syncUtil.c
+++ b/source/libs/sync/src/syncUtil.c
@@ -26,7 +26,8 @@ uint64_t syncUtilAddr2U64(const char* host, uint16_t port) {
uint32_t hostU32 = taosGetIpv4FromFqdn(host);
if (hostU32 == (uint32_t)-1) {
- sError("Get IP address error");
+ sError("failed to resolve ipv4 addr. host:%s", host);
+ terrno = TSDB_CODE_TSC_INVALID_FQDN;
return -1;
}
@@ -57,7 +58,7 @@ void syncUtilU642Addr(uint64_t u64, char* host, size_t len, uint16_t* port) {
struct in_addr addr;
addr.s_addr = hostU32;
- snprintf(host, len, "%s", taosInetNtoa(addr));
+ taosInetNtoa(addr, host, len);
*port = (uint16_t)((u64 & 0x00000000FFFF0000) >> 16);
}
@@ -84,13 +85,18 @@ void syncUtilraftId2EpSet(const SRaftId* raftId, SEpSet* pEpSet) {
addEpIntoEpSet(pEpSet, host, port);
}
-void syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId) {
+bool syncUtilnodeInfo2raftId(const SNodeInfo* pNodeInfo, SyncGroupId vgId, SRaftId* raftId) {
uint32_t ipv4 = taosGetIpv4FromFqdn(pNodeInfo->nodeFqdn);
- ASSERT(ipv4 != 0xFFFFFFFF);
+ if (ipv4 == 0xFFFFFFFF || ipv4 == 1) {
+ sError("failed to resolve ipv4 addr. fqdn: %s", pNodeInfo->nodeFqdn);
+ terrno = TSDB_CODE_TSC_INVALID_FQDN;
+ return false;
+ }
char ipbuf[128] = {0};
tinet_ntoa(ipbuf, ipv4);
raftId->addr = syncUtilAddr2U64(ipbuf, pNodeInfo->nodePort);
raftId->vgId = vgId;
+ return true;
}
bool syncUtilSameId(const SRaftId* pId1, const SRaftId* pId2) {
@@ -310,4 +316,4 @@ void syncUtilJson2Line(char* jsonStr) {
q++;
}
}
-}
\ No newline at end of file
+}
diff --git a/source/libs/sync/src/syncVoteMgr.c b/source/libs/sync/src/syncVoteMgr.c
index 1d46d71a05b66d30fba02ec342014a56f388e73e..641bb32d2d56a0bf745c6cb4c5fb43ccaaaa6f75 100644
--- a/source/libs/sync/src/syncVoteMgr.c
+++ b/source/libs/sync/src/syncVoteMgr.c
@@ -24,7 +24,10 @@ static void voteGrantedClearVotes(SVotesGranted *pVotesGranted) {
SVotesGranted *voteGrantedCreate(SSyncNode *pSyncNode) {
SVotesGranted *pVotesGranted = taosMemoryMalloc(sizeof(SVotesGranted));
- ASSERT(pVotesGranted != NULL);
+ if (pVotesGranted == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
memset(pVotesGranted, 0, sizeof(SVotesGranted));
pVotesGranted->replicas = &(pSyncNode->replicasId);
diff --git a/source/libs/sync/test/CMakeLists.txt b/source/libs/sync/test/CMakeLists.txt
index 72845d0c1d1a9378a3a189f4037c6fc646c8a536..b9cc7a391dde35e2569f30000752b3ef175fc824 100644
--- a/source/libs/sync/test/CMakeLists.txt
+++ b/source/libs/sync/test/CMakeLists.txt
@@ -57,6 +57,8 @@ add_executable(syncLeaderTransferTest "")
add_executable(syncReconfigFinishTest "")
add_executable(syncRestoreFromSnapshot "")
add_executable(syncRaftCfgIndexTest "")
+add_executable(syncHeartbeatTest "")
+add_executable(syncHeartbeatReplyTest "")
target_sources(syncTest
@@ -295,6 +297,14 @@ target_sources(syncRaftCfgIndexTest
PRIVATE
"syncRaftCfgIndexTest.cpp"
)
+target_sources(syncHeartbeatTest
+ PRIVATE
+ "syncHeartbeatTest.cpp"
+)
+target_sources(syncHeartbeatReplyTest
+ PRIVATE
+ "syncHeartbeatReplyTest.cpp"
+)
target_include_directories(syncTest
@@ -592,6 +602,16 @@ target_include_directories(syncRaftCfgIndexTest
"${TD_SOURCE_DIR}/include/libs/sync"
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
)
+target_include_directories(syncHeartbeatTest
+ PUBLIC
+ "${TD_SOURCE_DIR}/include/libs/sync"
+ "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
+)
+target_include_directories(syncHeartbeatReplyTest
+ PUBLIC
+ "${TD_SOURCE_DIR}/include/libs/sync"
+ "${CMAKE_CURRENT_SOURCE_DIR}/../inc"
+)
target_link_libraries(syncTest
@@ -830,6 +850,14 @@ target_link_libraries(syncRaftCfgIndexTest
sync
gtest_main
)
+target_link_libraries(syncHeartbeatTest
+ sync
+ gtest_main
+)
+target_link_libraries(syncHeartbeatReplyTest
+ sync
+ gtest_main
+)
enable_testing()
diff --git a/source/libs/sync/test/syncHeartbeatReplyTest.cpp b/source/libs/sync/test/syncHeartbeatReplyTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0ccd7b70bb8f552fa14c0a615993abbab678a8a0
--- /dev/null
+++ b/source/libs/sync/test/syncHeartbeatReplyTest.cpp
@@ -0,0 +1,105 @@
+#include
+#include
+#include "syncIO.h"
+#include "syncInt.h"
+#include "syncMessage.h"
+#include "syncUtil.h"
+
+void logTest() {
+ sTrace("--- sync log test: trace");
+ sDebug("--- sync log test: debug");
+ sInfo("--- sync log test: info");
+ sWarn("--- sync log test: warn");
+ sError("--- sync log test: error");
+ sFatal("--- sync log test: fatal");
+}
+
+SyncHeartbeatReply *createMsg() {
+ SyncHeartbeatReply *pMsg = syncHeartbeatReplyBuild(1000);
+ pMsg->srcId.addr = syncUtilAddr2U64("127.0.0.1", 1234);
+ pMsg->srcId.vgId = 100;
+ pMsg->destId.addr = syncUtilAddr2U64("127.0.0.1", 5678);
+ pMsg->destId.vgId = 100;
+
+ pMsg->term = 33;
+ pMsg->privateTerm = 44;
+ pMsg->startTime = taosGetTimestampMs();
+ return pMsg;
+}
+
+void test1() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ syncHeartbeatReplyLog2((char *)"test1:", pMsg);
+ syncHeartbeatReplyDestroy(pMsg);
+}
+
+void test2() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ uint32_t len = pMsg->bytes;
+ char * serialized = (char *)taosMemoryMalloc(len);
+ syncHeartbeatReplySerialize(pMsg, serialized, len);
+ SyncHeartbeatReply *pMsg2 = syncHeartbeatReplyBuild(1000);
+ syncHeartbeatReplyDeserialize(serialized, len, pMsg2);
+ syncHeartbeatReplyLog2((char *)"test2: syncHeartbeatReplySerialize -> syncHeartbeatReplyDeserialize ",
+ pMsg2);
+
+ taosMemoryFree(serialized);
+ syncHeartbeatReplyDestroy(pMsg);
+ syncHeartbeatReplyDestroy(pMsg2);
+}
+
+void test3() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ uint32_t len;
+ char * serialized = syncHeartbeatReplySerialize2(pMsg, &len);
+ SyncHeartbeatReply *pMsg2 = syncHeartbeatReplyDeserialize2(serialized, len);
+ syncHeartbeatReplyLog2((char *)"test3: syncHeartbeatReplySerialize3 -> syncHeartbeatReplyDeserialize2 ",
+ pMsg2);
+
+ taosMemoryFree(serialized);
+ syncHeartbeatReplyDestroy(pMsg);
+ syncHeartbeatReplyDestroy(pMsg2);
+}
+
+void test4() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ SRpcMsg rpcMsg;
+ syncHeartbeatReply2RpcMsg(pMsg, &rpcMsg);
+ SyncHeartbeatReply *pMsg2 = syncHeartbeatReplyBuild(1000);
+ syncHeartbeatReplyFromRpcMsg(&rpcMsg, pMsg2);
+ syncHeartbeatReplyLog2((char *)"test4: syncHeartbeatReply2RpcMsg -> syncHeartbeatReplyFromRpcMsg ",
+ pMsg2);
+
+ rpcFreeCont(rpcMsg.pCont);
+ syncHeartbeatReplyDestroy(pMsg);
+ syncHeartbeatReplyDestroy(pMsg2);
+}
+
+void test5() {
+ SyncHeartbeatReply *pMsg = createMsg();
+ SRpcMsg rpcMsg;
+ syncHeartbeatReply2RpcMsg(pMsg, &rpcMsg);
+ SyncHeartbeatReply *pMsg2 = syncHeartbeatReplyFromRpcMsg2(&rpcMsg);
+ syncHeartbeatReplyLog2((char *)"test5: syncHeartbeatReply2RpcMsg -> syncHeartbeatReplyFromRpcMsg2 ",
+ pMsg2);
+
+ rpcFreeCont(rpcMsg.pCont);
+ syncHeartbeatReplyDestroy(pMsg);
+ syncHeartbeatReplyDestroy(pMsg2);
+}
+
+int main() {
+ gRaftDetailLog = true;
+
+ tsAsyncLog = 0;
+ sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
+ logTest();
+
+ test1();
+ test2();
+ test3();
+ test4();
+ test5();
+
+ return 0;
+}
diff --git a/source/libs/sync/test/syncHeartbeatTest.cpp b/source/libs/sync/test/syncHeartbeatTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d910c828f1e13f4e6484c7b8ad7ea6758f626750
--- /dev/null
+++ b/source/libs/sync/test/syncHeartbeatTest.cpp
@@ -0,0 +1,99 @@
+#include
+#include
+#include "syncIO.h"
+#include "syncInt.h"
+#include "syncMessage.h"
+#include "syncUtil.h"
+
+void logTest() {
+ sTrace("--- sync log test: trace");
+ sDebug("--- sync log test: debug");
+ sInfo("--- sync log test: info");
+ sWarn("--- sync log test: warn");
+ sError("--- sync log test: error");
+ sFatal("--- sync log test: fatal");
+}
+
+SyncHeartbeat *createMsg() {
+ SyncHeartbeat *pMsg = syncHeartbeatBuild(789);
+ pMsg->srcId.addr = syncUtilAddr2U64("127.0.0.1", 1234);
+ pMsg->srcId.vgId = 100;
+ pMsg->destId.addr = syncUtilAddr2U64("127.0.0.1", 5678);
+ pMsg->destId.vgId = 100;
+ pMsg->term = 8;
+ pMsg->commitIndex = 33;
+ pMsg->privateTerm = 44;
+ return pMsg;
+}
+
+void test1() {
+ SyncHeartbeat *pMsg = createMsg();
+ syncHeartbeatLog2((char *)"test1:", pMsg);
+ syncHeartbeatDestroy(pMsg);
+}
+
+void test2() {
+ SyncHeartbeat *pMsg = createMsg();
+ uint32_t len = pMsg->bytes;
+ char * serialized = (char *)taosMemoryMalloc(len);
+ syncHeartbeatSerialize(pMsg, serialized, len);
+ SyncHeartbeat *pMsg2 = syncHeartbeatBuild(789);
+ syncHeartbeatDeserialize(serialized, len, pMsg2);
+ syncHeartbeatLog2((char *)"test2: syncHeartbeatSerialize -> syncHeartbeatDeserialize ", pMsg2);
+
+ taosMemoryFree(serialized);
+ syncHeartbeatDestroy(pMsg);
+ syncHeartbeatDestroy(pMsg2);
+}
+
+void test3() {
+ SyncHeartbeat *pMsg = createMsg();
+ uint32_t len;
+ char * serialized = syncHeartbeatSerialize2(pMsg, &len);
+ SyncHeartbeat *pMsg2 = syncHeartbeatDeserialize2(serialized, len);
+ syncHeartbeatLog2((char *)"test3: syncHeartbeatSerialize2 -> syncHeartbeatDeserialize2 ", pMsg2);
+
+ taosMemoryFree(serialized);
+ syncHeartbeatDestroy(pMsg);
+ syncHeartbeatDestroy(pMsg2);
+}
+
+void test4() {
+ SyncHeartbeat *pMsg = createMsg();
+ SRpcMsg rpcMsg;
+ syncHeartbeat2RpcMsg(pMsg, &rpcMsg);
+ SyncHeartbeat *pMsg2 = (SyncHeartbeat *)taosMemoryMalloc(rpcMsg.contLen);
+ syncHeartbeatFromRpcMsg(&rpcMsg, pMsg2);
+ syncHeartbeatLog2((char *)"test4: syncHeartbeat2RpcMsg -> syncHeartbeatFromRpcMsg ", pMsg2);
+
+ rpcFreeCont(rpcMsg.pCont);
+ syncHeartbeatDestroy(pMsg);
+ syncHeartbeatDestroy(pMsg2);
+}
+
+void test5() {
+ SyncHeartbeat *pMsg = createMsg();
+ SRpcMsg rpcMsg;
+ syncHeartbeat2RpcMsg(pMsg, &rpcMsg);
+ SyncHeartbeat *pMsg2 =syncHeartbeatFromRpcMsg2(&rpcMsg);
+ syncHeartbeatLog2((char *)"test5: syncHeartbeat2RpcMsg -> syncHeartbeatFromRpcMsg2 ", pMsg2);
+
+ rpcFreeCont(rpcMsg.pCont);
+ syncHeartbeatDestroy(pMsg);
+ syncHeartbeatDestroy(pMsg2);
+}
+
+int main() {
+ tsAsyncLog = 0;
+ sDebugFlag = DEBUG_TRACE + DEBUG_SCREEN + DEBUG_FILE;
+ gRaftDetailLog = true;
+ logTest();
+
+ test1();
+ test2();
+ test3();
+ test4();
+ test5();
+
+ return 0;
+}
diff --git a/source/libs/tdb/src/db/tdbBtree.c b/source/libs/tdb/src/db/tdbBtree.c
index 2ab5fd2e9c2f1156ab9bba3c0ed384807f25e8cd..c5204ef59e83f9eb3585e38d9adb031e16f6945a 100644
--- a/source/libs/tdb/src/db/tdbBtree.c
+++ b/source/libs/tdb/src/db/tdbBtree.c
@@ -489,7 +489,7 @@ static int tdbBtreeBalanceDeeper(SBTree *pBt, SPage *pRoot, SPage **ppChild, TXN
}
// Copy the root page content to the child page
- tdbPageCopy(pRoot, pChild);
+ tdbPageCopy(pRoot, pChild, 0);
// Reinitialize the root page
zArg.flags = TDB_BTREE_ROOT;
@@ -509,7 +509,7 @@ static int tdbBtreeBalanceDeeper(SBTree *pBt, SPage *pRoot, SPage **ppChild, TXN
static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTxn) {
int ret;
- int nOlds;
+ int nOlds, pageIdx;
SPage *pOlds[3] = {0};
SCell *pDivCell[3] = {0};
int szDivCell[3];
@@ -742,7 +742,7 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
for (int i = 0; i < nOlds; i++) {
tdbPageCreate(pOlds[0]->pageSize, &pOldsCopy[i], tdbDefaultMalloc, NULL);
tdbBtreeInitPage(pOldsCopy[i], &iarg, 0);
- tdbPageCopy(pOlds[i], pOldsCopy[i]);
+ tdbPageCopy(pOlds[i], pOldsCopy[i], 0);
}
iNew = 0;
nNewCells = 0;
@@ -782,6 +782,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
pBt);
tdbPageInsertCell(pParent, sIdx++, pNewCell, szNewCell, 0);
tdbOsFree(pNewCell);
+
+ if (TDB_CELLDECODER_FREE_VAL(&cd)) {
+ tdbFree(cd.pVal);
+ cd.pVal = NULL;
+ }
}
// move to next new page
@@ -835,7 +840,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
i8 flags = TDB_BTREE_ROOT | TDB_BTREE_PAGE_IS_LEAF(pNews[0]);
// copy content to the parent page
tdbBtreeInitPage(pParent, &(SBtreeInitPageArg){.flags = flags, .pBt = pBt}, 0);
- tdbPageCopy(pNews[0], pParent);
+ tdbPageCopy(pNews[0], pParent, 1);
+
+ if (!TDB_BTREE_PAGE_IS_LEAF(pNews[0])) {
+ ((SIntHdr *)(pParent->pData))->pgno = ((SIntHdr *)(pNews[0]->pData))->pgno;
+ }
}
for (int i = 0; i < 3; i++) {
@@ -844,13 +853,11 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
}
}
- // TODO: here is not corrent for drop case
- for (int i = 0; i < nNews; i++) {
- if (i < nOlds) {
- tdbPagerReturnPage(pBt->pPager, pOlds[i], pTxn);
- } else {
- tdbPagerReturnPage(pBt->pPager, pNews[i], pTxn);
- }
+ for (pageIdx = 0; pageIdx < nOlds; ++pageIdx) {
+ tdbPagerReturnPage(pBt->pPager, pOlds[pageIdx], pTxn);
+ }
+ for (; pageIdx < nNews; ++pageIdx) {
+ tdbPagerReturnPage(pBt->pPager, pNews[pageIdx], pTxn);
}
return 0;
diff --git a/source/libs/tdb/src/db/tdbPCache.c b/source/libs/tdb/src/db/tdbPCache.c
index 76d95cbb91221b6fac5a72a0ddeb0e5493158102..62541585911a5dfdc84c0d2fb84724c83efc5475 100644
--- a/source/libs/tdb/src/db/tdbPCache.c
+++ b/source/libs/tdb/src/db/tdbPCache.c
@@ -98,6 +98,7 @@ SPage *tdbPCacheFetch(SPCache *pCache, const SPgid *pPgid, TXN *pTxn) {
// printf("thread %" PRId64 " fetch page %d pgno %d pPage %p nRef %d\n", taosGetSelfPthreadId(), pPage->id,
// TDB_PAGE_PGNO(pPage), pPage, nRef);
+ tdbDebug("pcache/fetch page %p/%d/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id, nRef);
return pPage;
}
diff --git a/source/libs/tdb/src/db/tdbPage.c b/source/libs/tdb/src/db/tdbPage.c
index a3f376b929291780bdd57cbf99f5db6035e70aff..f4878ea861b342724896e844d4796d4bdd598c01 100644
--- a/source/libs/tdb/src/db/tdbPage.c
+++ b/source/libs/tdb/src/db/tdbPage.c
@@ -80,6 +80,7 @@ int tdbPageDestroy(SPage *pPage, void (*xFree)(void *arg, void *ptr), void *arg)
ASSERT(xFree);
for (int iOvfl = 0; iOvfl < pPage->nOverflow; iOvfl++) {
+ tdbDebug("tdbPage/destroy/free ovfl cell: %p/%p", pPage->apOvfl[iOvfl], pPage);
tdbOsFree(pPage->apOvfl[iOvfl]);
}
@@ -152,7 +153,7 @@ int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl
pNewCell = (SCell *)tdbOsMalloc(szCell);
memcpy(pNewCell, pCell, szCell);
- tdbDebug("tdbPage/new ovfl cell: %p", pNewCell);
+ tdbDebug("tdbPage/insert/new ovfl cell: %p/%p", pNewCell, pPage);
pPage->apOvfl[iOvfl] = pNewCell;
pPage->aiOvfl[iOvfl] = idx;
@@ -202,7 +203,7 @@ int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt) {
if (pPage->aiOvfl[iOvfl] == idx) {
// remove the over flow cell
tdbOsFree(pPage->apOvfl[iOvfl]);
- tdbDebug("tdbPage/free ovfl cell: %p", pPage->apOvfl[iOvfl]);
+ tdbDebug("tdbPage/drop/free ovfl cell: %p", pPage->apOvfl[iOvfl]);
for (; (++iOvfl) < pPage->nOverflow;) {
pPage->aiOvfl[iOvfl - 1] = pPage->aiOvfl[iOvfl] - 1;
pPage->apOvfl[iOvfl - 1] = pPage->apOvfl[iOvfl];
@@ -229,7 +230,7 @@ int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt) {
return 0;
}
-void tdbPageCopy(SPage *pFromPage, SPage *pToPage) {
+void tdbPageCopy(SPage *pFromPage, SPage *pToPage, int deepCopyOvfl) {
int delta, nFree;
pToPage->pFreeStart = pToPage->pPageHdr + (pFromPage->pFreeStart - pFromPage->pPageHdr);
@@ -250,8 +251,16 @@ void tdbPageCopy(SPage *pFromPage, SPage *pToPage) {
// Copy the overflow cells
for (int iOvfl = 0; iOvfl < pFromPage->nOverflow; iOvfl++) {
+ SCell *pNewCell = pFromPage->apOvfl[iOvfl];
+ if (deepCopyOvfl) {
+ int szCell = (*pFromPage->xCellSize)(pFromPage, pFromPage->apOvfl[iOvfl], 0, NULL, NULL);
+ pNewCell = (SCell *)tdbOsMalloc(szCell);
+ memcpy(pNewCell, pFromPage->apOvfl[iOvfl], szCell);
+ tdbDebug("tdbPage/copy/new ovfl cell: %p/%p/%p", pNewCell, pToPage, pFromPage);
+ }
+
+ pToPage->apOvfl[iOvfl] = pNewCell;
pToPage->aiOvfl[iOvfl] = pFromPage->aiOvfl[iOvfl];
- pToPage->apOvfl[iOvfl] = pFromPage->apOvfl[iOvfl];
}
pToPage->nOverflow = pFromPage->nOverflow;
}
diff --git a/source/libs/tdb/src/db/tdbPager.c b/source/libs/tdb/src/db/tdbPager.c
index 4de99e8b1bde34c7f6583d0aedc205074d7c1cca..04711eb6a0af802f61cc39da3fba1282dcaf0c83 100644
--- a/source/libs/tdb/src/db/tdbPager.c
+++ b/source/libs/tdb/src/db/tdbPager.c
@@ -34,6 +34,22 @@ static int tdbPagerInitPage(SPager *pPager, SPage *pPage, int (*initPage)(SPage
static int tdbPagerWritePageToJournal(SPager *pPager, SPage *pPage);
static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage);
+static FORCE_INLINE int32_t pageCmpFn(const void *lhs, const void *rhs) {
+ SPage *pPageL = (SPage *)(((uint8_t *)lhs) - sizeof(SRBTreeNode));
+ SPage *pPageR = (SPage *)(((uint8_t *)rhs) - sizeof(SRBTreeNode));
+
+ SPgno pgnoL = TDB_PAGE_PGNO(pPageL);
+ SPgno pgnoR = TDB_PAGE_PGNO(pPageR);
+
+ if (pgnoL < pgnoR) {
+ return -1;
+ } else if (pgnoL > pgnoR) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager) {
uint8_t *pPtr;
SPager *pPager;
@@ -83,6 +99,8 @@ int tdbPagerOpen(SPCache *pCache, const char *fileName, SPager **ppPager) {
ret = tdbGetFileSize(pPager->fd, pPager->pageSize, &(pPager->dbOrigSize));
pPager->dbFileSize = pPager->dbOrigSize;
+ tRBTreeCreate(&pPager->rbt, pageCmpFn);
+
*ppPager = pPager;
return 0;
}
@@ -166,10 +184,11 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) {
// ref page one more time so the page will not be release
tdbRefPage(pPage);
+ tdbDebug("pcache/mdirty page %p/%d/%d", pPage, TDB_PAGE_PGNO(pPage), pPage->id);
// Set page as dirty
pPage->isDirty = 1;
-
+ /*
// Add page to dirty list(TODO: NOT use O(n^2) algorithm)
for (ppPage = &pPager->pDirty; (*ppPage) && TDB_PAGE_PGNO(*ppPage) < TDB_PAGE_PGNO(pPage);
ppPage = &((*ppPage)->pDirtyNext)) {
@@ -184,6 +203,8 @@ int tdbPagerWrite(SPager *pPager, SPage *pPage) {
ASSERT(*ppPage == NULL || TDB_PAGE_PGNO(*ppPage) > TDB_PAGE_PGNO(pPage));
pPage->pDirtyNext = *ppPage;
*ppPage = pPage;
+ */
+ tRBTreePut(&pPager->rbt, (SRBTreeNode *)pPage);
// Write page to journal if neccessary
if (TDB_PAGE_PGNO(pPage) <= pPager->dbOrigSize) {
@@ -228,8 +249,10 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
}
// loop to write the dirty pages to file
- for (pPage = pPager->pDirty; pPage; pPage = pPage->pDirtyNext) {
- // TODO: update the page footer
+ SRBTreeIter iter = tRBTreeIterCreate(&pPager->rbt, 1);
+ SRBTreeNode *pNode = NULL;
+ while ((pNode = tRBTreeIterNext(&iter)) != NULL) {
+ pPage = (SPage *)pNode;
ret = tdbPagerWritePageToDB(pPager, pPage);
if (ret < 0) {
ASSERT(0);
@@ -237,19 +260,22 @@ int tdbPagerCommit(SPager *pPager, TXN *pTxn) {
}
}
- tdbTrace("tdbttl commit:%p, %d", pPager, pPager->dbOrigSize);
+ tdbTrace("tdbttl commit:%p, %d/%d", pPager, pPager->dbOrigSize, pPager->dbFileSize);
pPager->dbOrigSize = pPager->dbFileSize;
// release the page
- for (pPage = pPager->pDirty; pPage; pPage = pPager->pDirty) {
- pPager->pDirty = pPage->pDirtyNext;
- pPage->pDirtyNext = NULL;
+ iter = tRBTreeIterCreate(&pPager->rbt, 1);
+ while ((pNode = tRBTreeIterNext(&iter)) != NULL) {
+ pPage = (SPage *)pNode;
pPage->isDirty = 0;
+ tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage);
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
}
+ tRBTreeCreate(&pPager->rbt, pageCmpFn);
+
// sync the db file
tdbOsFSync(pPager->fd);
@@ -307,15 +333,19 @@ int tdbPagerAbort(SPager *pPager, TXN *pTxn) {
}
*/
// 3, release the dirty pages
- for (pPage = pPager->pDirty; pPage; pPage = pPager->pDirty) {
- pPager->pDirty = pPage->pDirtyNext;
- pPage->pDirtyNext = NULL;
+ SRBTreeIter iter = tRBTreeIterCreate(&pPager->rbt, 1);
+ SRBTreeNode *pNode = NULL;
+ while ((pNode = tRBTreeIterNext(&iter)) != NULL) {
+ pPage = (SPage *)pNode;
pPage->isDirty = 0;
+ tRBTreeDrop(&pPager->rbt, (SRBTreeNode *)pPage);
tdbPCacheRelease(pPager->pCache, pPage, pTxn);
}
+ tRBTreeCreate(&pPager->rbt, pageCmpFn);
+
// 4, remove the journal file
tdbOsClose(pPager->jfd);
tdbOsRemove(pPager->jFileName);
@@ -496,12 +526,19 @@ static int tdbPagerWritePageToJournal(SPager *pPager, SPage *pPage) {
return 0;
}
-
+/*
+struct TdFile {
+ TdThreadRwlock rwlock;
+ int refId;
+ int fd;
+ FILE *fp;
+} TdFile;
+*/
static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage) {
i64 offset;
int ret;
- offset = pPage->pageSize * (TDB_PAGE_PGNO(pPage) - 1);
+ offset = (i64)pPage->pageSize * (TDB_PAGE_PGNO(pPage) - 1);
if (tdbOsLSeek(pPager->fd, offset, SEEK_SET) < 0) {
ASSERT(0);
return -1;
@@ -513,6 +550,7 @@ static int tdbPagerWritePageToDB(SPager *pPager, SPage *pPage) {
return -1;
}
+ // pwrite(pPager->fd->fd, pPage->pData, pPage->pageSize, offset);
return 0;
}
diff --git a/source/libs/tdb/src/inc/tdbInt.h b/source/libs/tdb/src/inc/tdbInt.h
index 49126b80b6e5dd11f30a7cddf581f42994db7bec..29a9665c156a4514846c5549e7a90d97a8df4984 100644
--- a/source/libs/tdb/src/inc/tdbInt.h
+++ b/source/libs/tdb/src/inc/tdbInt.h
@@ -19,6 +19,7 @@
#include "tdb.h"
#include "tlog.h"
+#include "trbtree.h"
#ifdef __cplusplus
extern "C" {
@@ -256,6 +257,7 @@ typedef struct {
#pragma pack(pop)
struct SPage {
+ SRBTreeNode node; // must be the first field for pageCmpFn to work
tdb_spinlock_t lock;
int pageSize;
u8 *pData;
@@ -280,13 +282,13 @@ struct SPage {
static inline i32 tdbRefPage(SPage *pPage) {
i32 nRef = atomic_add_fetch_32(&((pPage)->nRef), 1);
- tdbTrace("ref page %d, nRef %d", pPage->id, nRef);
+ // tdbTrace("ref page %p/%d, nRef %d", pPage, pPage->id, nRef);
return nRef;
}
static inline i32 tdbUnrefPage(SPage *pPage) {
i32 nRef = atomic_sub_fetch_32(&((pPage)->nRef), 1);
- tdbTrace("unref page %d, nRef %d", pPage->id, nRef);
+ // tdbTrace("unref page %p/%d, nRef %d", pPage, pPage->id, nRef);
return nRef;
}
@@ -331,7 +333,7 @@ void tdbPageInit(SPage *pPage, u8 szAmHdr, int (*xCellSize)(const SPage *, SCell
int tdbPageInsertCell(SPage *pPage, int idx, SCell *pCell, int szCell, u8 asOvfl);
int tdbPageDropCell(SPage *pPage, int idx, TXN *pTxn, SBTree *pBt);
int tdbPageUpdateCell(SPage *pPage, int idx, SCell *pCell, int szCell, TXN *pTxn, SBTree *pBt);
-void tdbPageCopy(SPage *pFromPage, SPage *pToPage);
+void tdbPageCopy(SPage *pFromPage, SPage *pToPage, int copyOvflCells);
int tdbPageCapacity(int pageSize, int amHdrSize);
static inline SCell *tdbPageGetCell(SPage *pPage, int idx) {
@@ -389,6 +391,7 @@ struct SPager {
SPgno dbFileSize;
SPgno dbOrigSize;
SPage *pDirty;
+ SRBTree rbt;
u8 inTran;
SPager *pNext; // used by TDB
SPager *pHashNext; // used by TDB
diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c
index 62aec219df57f1a8edd11456ca631e315d553803..4600e5e568fc5644024c633e08328a54247b28b5 100644
--- a/source/libs/tfs/src/tfs.c
+++ b/source/libs/tfs/src/tfs.c
@@ -113,6 +113,8 @@ SDiskSize tfsGetSize(STfs *pTfs) {
return size;
}
+int32_t tfsGetLevel(STfs *pTfs) { return pTfs->nlevel; }
+
int32_t tfsAllocDisk(STfs *pTfs, int32_t expLevel, SDiskID *pDiskId) {
pDiskId->level = expLevel;
pDiskId->id = -1;
diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c
index 386ea95dd795b93bcaa2826d471a6e4c97f81b7b..c1d8dbef62f2111fa1194bd66e8e5949a7b3303a 100644
--- a/source/libs/transport/src/thttp.c
+++ b/source/libs/transport/src/thttp.c
@@ -21,6 +21,7 @@
#include "taoserror.h"
#include "tlog.h"
+// clang-format on
#define HTTP_RECV_BUF_SIZE 1024
@@ -29,7 +30,7 @@ typedef struct SHttpClient {
uv_tcp_t tcp;
uv_write_t req;
uv_buf_t* wbuf;
- char *rbuf;
+ char* rbuf;
char* addr;
uint16_t port;
} SHttpClient;
@@ -125,40 +126,50 @@ _OVER:
return code;
}
-static void destroyHttpClient(SHttpClient* cli) {
+static FORCE_INLINE void destroyHttpClient(SHttpClient* cli) {
taosMemoryFree(cli->wbuf);
taosMemoryFree(cli->rbuf);
taosMemoryFree(cli->addr);
taosMemoryFree(cli);
-
}
-static void clientCloseCb(uv_handle_t* handle) {
+static FORCE_INLINE void clientCloseCb(uv_handle_t* handle) {
SHttpClient* cli = handle->data;
destroyHttpClient(cli);
}
-static void clientAllocBuffCb(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
- SHttpClient* cli = handle->data;
- buf->base = cli->rbuf;
- buf->len = HTTP_RECV_BUF_SIZE;
+static FORCE_INLINE void clientAllocBuffCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf) {
+ SHttpClient* cli = handle->data;
+ buf->base = cli->rbuf;
+ buf->len = HTTP_RECV_BUF_SIZE;
}
-static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf) {
- SHttpClient* cli = handle->data;
+static FORCE_INLINE void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t* buf) {
+ SHttpClient* cli = handle->data;
if (nread < 0) {
uError("http-report recv error:%s", uv_err_name(nread));
} else {
- uTrace("http-report succ to recv %d bytes, just ignore it", nread);
+ uTrace("http-report succ to recv %d bytes", nread);
}
uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
-}
+}
static void clientSentCb(uv_write_t* req, int32_t status) {
SHttpClient* cli = req->data;
if (status != 0) {
terrno = TAOS_SYSTEM_ERROR(status);
uError("http-report failed to send data %s", uv_strerror(status));
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
+ return;
} else {
uTrace("http-report succ to send data");
}
- uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb);
+ status = uv_read_start((uv_stream_t*)&cli->tcp, clientAllocBuffCb, clientRecvCb);
+ if (status != 0) {
+ terrno = TAOS_SYSTEM_ERROR(status);
+ uError("http-report failed to recv data,reason:%s, dst:%s:%d", uv_strerror(status), cli->addr, cli->port);
+ if (!uv_is_closing((uv_handle_t*)&cli->tcp)) {
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
+ } else {
+ destroyHttpClient(cli);
+ }
+ }
}
static void clientConnCb(uv_connect_t* req, int32_t status) {
SHttpClient* cli = req->data;
@@ -168,10 +179,19 @@ static void clientConnCb(uv_connect_t* req, int32_t status) {
uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
return;
}
- uv_write(&cli->req, (uv_stream_t*)&cli->tcp, cli->wbuf, 2, clientSentCb);
+ status = uv_write(&cli->req, (uv_stream_t*)&cli->tcp, cli->wbuf, 2, clientSentCb);
+ if (0 != status) {
+ terrno = TAOS_SYSTEM_ERROR(status);
+ uError("http-report failed to send data,reason:%s, dst:%s:%d", uv_strerror(status), cli->addr, cli->port);
+ if (!uv_is_closing((uv_handle_t*)&cli->tcp)) {
+ uv_close((uv_handle_t*)&cli->tcp, clientCloseCb);
+ } else {
+ destroyHttpClient(cli);
+ }
+ }
}
-static int32_t taosBuildDstAddr(const char* server, uint16_t port, struct sockaddr_in* dest) {
+static FORCE_INLINE int32_t taosBuildDstAddr(const char* server, uint16_t port, struct sockaddr_in* dest) {
uint32_t ip = taosGetIpv4FromFqdn(server);
if (ip == 0xffffffff) {
terrno = TAOS_SYSTEM_ERROR(errno);
@@ -210,7 +230,7 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
cli->tcp.data = cli;
cli->req.data = cli;
cli->wbuf = wb;
- cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
+ cli->rbuf = taosMemoryCalloc(1, HTTP_RECV_BUF_SIZE);
cli->addr = tstrdup(server);
cli->port = port;
@@ -218,17 +238,25 @@ int32_t taosSendHttpReport(const char* server, uint16_t port, char* pCont, int32
uv_tcp_init(loop, &cli->tcp);
// set up timeout to avoid stuck;
int32_t fd = taosCreateSocketWithTimeout(5);
- uv_tcp_open((uv_tcp_t*)&cli->tcp, fd);
- int32_t ret = uv_tcp_connect(&cli->conn, &cli->tcp, (const struct sockaddr*)&dest, clientConnCb);
+ int ret = uv_tcp_open((uv_tcp_t*)&cli->tcp, fd);
if (ret != 0) {
- uError("http-report failed to connect to server, reason:%s, dst:%s:%d", uv_strerror(ret), cli->addr, cli->port);
+ uError("http-report failed to open socket, reason:%s, dst:%s:%d", uv_strerror(ret), cli->addr, cli->port);
destroyHttpClient(cli);
uv_stop(loop);
+ terrno = TAOS_SYSTEM_ERROR(ret);
+ } else {
+ ret = uv_tcp_connect(&cli->conn, &cli->tcp, (const struct sockaddr*)&dest, clientConnCb);
+ if (ret != 0) {
+ uError("http-report failed to connect to http-server, reason:%s, dst:%s:%d", uv_strerror(ret), cli->addr,
+ cli->port);
+ destroyHttpClient(cli);
+ uv_stop(loop);
+ terrno = TAOS_SYSTEM_ERROR(ret);
+ }
}
uv_run(loop, UV_RUN_DEFAULT);
uv_loop_close(loop);
return terrno;
}
-// clang-format on
diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c
index 0a0dcef378bde92a18b9455b203774a3c28aa428..9e0a8f2a10c282cc8ef20e59f89aed477d5c1eef 100644
--- a/source/libs/transport/src/trans.c
+++ b/source/libs/transport/src/trans.c
@@ -43,7 +43,7 @@ void* rpcOpen(const SRpcInit* pInit) {
return NULL;
}
if (pInit->label) {
- tstrncpy(pRpc->label, pInit->label, strlen(pInit->label) + 1);
+ tstrncpy(pRpc->label, pInit->label, TSDB_LABEL_LEN);
}
// register callback handle
pRpc->cfp = pInit->cfp;
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 7052b0b915137678d6aff528a26540a973cd74f5..191011111d289f2d261f3ac21299c1e2a4493836 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -16,7 +16,7 @@
#include "transComm.h"
typedef struct SConnList {
- queue conn;
+ queue conns;
int32_t size;
} SConnList;
@@ -69,11 +69,9 @@ typedef struct SCliThrd {
SAsyncPool* asyncPool;
uv_prepare_t* prepare;
void* pool; // conn pool
-
+ // timer handles
SArray* timerList;
-
// msg queue
-
queue msg;
TdThreadMutex msgMtx;
SDelayQueue* delayQueue;
@@ -107,11 +105,11 @@ static void doCloseIdleConn(void* param);
static void cliReadTimeoutCb(uv_timer_t* handle);
// register timer in each thread to clear expire conn
// static void cliTimeoutCb(uv_timer_t* handle);
-// alloc buf for recv
-static void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
-// callback after read nbytes from socket
+// alloc buffer for recv
+static FORCE_INLINE void cliAllocRecvBufferCb(uv_handle_t* handle, size_t suggested_size, uv_buf_t* buf);
+// callback after recv nbytes from socket
static void cliRecvCb(uv_stream_t* cli, ssize_t nread, const uv_buf_t* buf);
-// callback after write data to socket
+// callback after send data to socket
static void cliSendCb(uv_write_t* req, int status);
// callback after conn to server
static void cliConnCb(uv_connect_t* req, int status);
@@ -129,19 +127,14 @@ static SCliConn* cliCreateConn(SCliThrd* thrd);
static void cliDestroyConn(SCliConn* pConn, bool clear /*clear tcp handle or not*/);
static void cliDestroy(uv_handle_t* handle);
static void cliSend(SCliConn* pConn);
+static void cliDestroyConnMsgs(SCliConn* conn, bool destroy);
-static bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
- if (code != 0) return false;
- if (pCtx->retryCnt == 0) return false;
- if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
- return true;
-}
+// cli util func
+static FORCE_INLINE bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx);
+static FORCE_INLINE void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
+
+static FORCE_INLINE int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* resp);
-void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr);
-/*
- * set TCP connection timeout per-socket level
- */
-static int cliCreateSocket();
// process data read from server, add decompress etc later
static void cliHandleResp(SCliConn* conn);
// handle except about conn
@@ -155,13 +148,11 @@ static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd);
static void (*cliAsyncHandle[])(SCliMsg* pMsg, SCliThrd* pThrd) = {cliHandleReq, cliHandleQuit, cliHandleRelease, NULL,
cliHandleUpdate};
-static void cliSendQuit(SCliThrd* thrd);
-static void destroyUserdata(STransMsg* userdata);
-
-static int cliRBChoseIdx(STrans* pTransInst);
+static FORCE_INLINE void destroyUserdata(STransMsg* userdata);
+static FORCE_INLINE void destroyCmsg(void* cmsg);
+static FORCE_INLINE int cliRBChoseIdx(STrans* pTransInst);
+static FORCE_INLINE void transDestroyConnCtx(STransConnCtx* ctx);
-static void destroyCmsg(void* cmsg);
-static void transDestroyConnCtx(STransConnCtx* ctx);
// thread obj
static SCliThrd* createThrdObj();
static void destroyThrdObj(SCliThrd* pThrd);
@@ -169,15 +160,14 @@ static void destroyThrdObj(SCliThrd* pThrd);
static void cliWalkCb(uv_handle_t* handle, void* arg);
static void cliReleaseUnfinishedMsg(SCliConn* conn) {
- SCliMsg* pMsg = NULL;
for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
- pMsg = transQueueGet(&conn->cliMsgs, i);
- if (pMsg != NULL && pMsg->ctx != NULL) {
- if (conn->ctx.freeFunc != NULL) {
- conn->ctx.freeFunc(pMsg->ctx->ahandle);
+ SCliMsg* msg = transQueueGet(&conn->cliMsgs, i);
+ if (msg != NULL && msg->ctx != NULL && msg->ctx->ahandle != (void*)0x9527) {
+ if (conn->ctx.freeFunc != NULL && msg->ctx->ahandle != NULL) {
+ conn->ctx.freeFunc(msg->ctx->ahandle);
}
}
- destroyCmsg(pMsg);
+ destroyCmsg(msg);
}
}
#define CLI_RELEASE_UV(loop) \
@@ -206,20 +196,22 @@ static void cliReleaseUnfinishedMsg(SCliConn* conn) {
#define CONN_GET_HOST_THREAD(conn) (conn ? ((SCliConn*)conn)->hostThrd : NULL)
#define CONN_GET_INST_LABEL(conn) (((STrans*)(((SCliThrd*)(conn)->hostThrd)->pTransInst))->label)
-#define CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle) \
- do { \
- int i = 0, sz = transQueueSize(&conn->cliMsgs); \
- for (; i < sz; i++) { \
- pMsg = transQueueGet(&conn->cliMsgs, i); \
- if (pMsg != NULL && pMsg->ctx != NULL && (uint64_t)pMsg->ctx->ahandle == ahandle) { \
- break; \
- } \
- } \
- if (i == sz) { \
- pMsg = NULL; \
- } else { \
- pMsg = transQueueRm(&conn->cliMsgs, i); \
- } \
+#define CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle) \
+ do { \
+ int i = 0, sz = transQueueSize(&conn->cliMsgs); \
+ for (; i < sz; i++) { \
+ pMsg = transQueueGet(&conn->cliMsgs, i); \
+ if (pMsg->ctx != NULL && (uint64_t)pMsg->ctx->ahandle == ahandle) { \
+ break; \
+ } \
+ } \
+ if (i == sz) { \
+ pMsg = NULL; \
+ tDebug("msg not found, %" PRIu64 "", ahandle); \
+ } else { \
+ pMsg = transQueueRm(&conn->cliMsgs, i); \
+ tDebug("msg found, %" PRIu64 "", ahandle); \
+ } \
} while (0)
#define CONN_GET_NEXT_SENDMSG(conn) \
do { \
@@ -297,7 +289,12 @@ bool cliMaySendCachedMsg(SCliConn* conn) {
if (!transQueueEmpty(&conn->cliMsgs)) {
SCliMsg* pCliMsg = NULL;
CONN_GET_NEXT_SENDMSG(conn);
- cliSend(conn);
+ if (pCliMsg == NULL)
+ return false;
+ else {
+ cliSend(conn);
+ return true;
+ }
}
return false;
_RETURN:
@@ -377,15 +374,19 @@ void cliHandleResp(SCliConn* conn) {
if (pCtx == NULL && CONN_NO_PERSIST_BY_APP(conn)) {
tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn);
+ transFreeMsg(transMsg.pCont);
return;
}
if (CONN_RELEASE_BY_SERVER(conn) && transMsg.info.ahandle == NULL) {
tDebug("%s except, conn %p read while cli ignore it", CONN_GET_INST_LABEL(conn), conn);
+ transFreeMsg(transMsg.pCont);
return;
}
- if (cliAppCb(conn, &transMsg, pMsg) != 0) {
- return;
+ if (pMsg == NULL || (pMsg && pMsg->type != Release)) {
+ if (cliAppCb(conn, &transMsg, pMsg) != 0) {
+ return;
+ }
}
destroyCmsg(pMsg);
@@ -394,7 +395,7 @@ void cliHandleResp(SCliConn* conn) {
}
if (CONN_NO_PERSIST_BY_APP(conn)) {
- addConnToPool(pThrd->pool, conn);
+ return addConnToPool(pThrd->pool, conn);
}
uv_read_start((uv_stream_t*)conn->stream, cliAllocRecvBufferCb, cliRecvCb);
@@ -433,17 +434,20 @@ void cliHandleExceptImpl(SCliConn* pConn, int32_t code) {
transMsg.info.ahandle);
}
} else {
- transMsg.info.ahandle = pCtx ? pCtx->ahandle : NULL;
+ transMsg.info.ahandle = (pMsg->type != Release && pCtx) ? pCtx->ahandle : NULL;
}
if (pCtx == NULL || pCtx->pSem == NULL) {
if (transMsg.info.ahandle == NULL) {
+ if (REQUEST_NO_RESP(&pMsg->msg) || pMsg->type == Release) destroyCmsg(pMsg);
once = true;
continue;
}
}
- if (cliAppCb(pConn, &transMsg, pMsg) != 0) {
- return;
+ if (pMsg == NULL || (pMsg && pMsg->type != Release)) {
+ if (cliAppCb(pConn, &transMsg, pMsg) != 0) {
+ return;
+ }
}
destroyCmsg(pMsg);
tTrace("%s conn %p start to destroy, ref:%d", CONN_GET_INST_LABEL(pConn), pConn, T_REF_VAL_GET(pConn));
@@ -470,8 +474,8 @@ void* createConnPool(int size) {
void* destroyConnPool(void* pool) {
SConnList* connList = taosHashIterate((SHashObj*)pool, NULL);
while (connList != NULL) {
- while (!QUEUE_IS_EMPTY(&connList->conn)) {
- queue* h = QUEUE_HEAD(&connList->conn);
+ while (!QUEUE_IS_EMPTY(&connList->conns)) {
+ queue* h = QUEUE_HEAD(&connList->conns);
SCliConn* c = QUEUE_DATA(h, SCliConn, q);
cliDestroyConn(c, true);
}
@@ -484,21 +488,21 @@ void* destroyConnPool(void* pool) {
static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) {
char key[32] = {0};
CONN_CONSTRUCT_HASH_KEY(key, ip, port);
- SHashObj* pPool = pool;
- SConnList* plist = taosHashGet(pPool, key, strlen(key));
+
+ SConnList* plist = taosHashGet((SHashObj*)pool, key, strlen(key));
if (plist == NULL) {
SConnList list = {0};
- taosHashPut(pPool, key, strlen(key), (void*)&list, sizeof(list));
- plist = taosHashGet(pPool, key, strlen(key));
- QUEUE_INIT(&plist->conn);
+ taosHashPut((SHashObj*)pool, key, strlen(key), (void*)&list, sizeof(list));
+ plist = taosHashGet((SHashObj*)pool, key, strlen(key));
+ QUEUE_INIT(&plist->conns);
}
- if (QUEUE_IS_EMPTY(&plist->conn)) {
+ if (QUEUE_IS_EMPTY(&plist->conns)) {
return NULL;
}
plist->size -= 1;
- queue* h = QUEUE_HEAD(&plist->conn);
+ queue* h = QUEUE_HEAD(&plist->conns);
SCliConn* conn = QUEUE_DATA(h, SCliConn, q);
conn->status = ConnNormal;
QUEUE_REMOVE(&conn->q);
@@ -514,22 +518,21 @@ static void addConnToPool(void* pool, SCliConn* conn) {
if (conn->status == ConnInPool) {
return;
}
- SCliThrd* thrd = conn->hostThrd;
- CONN_HANDLE_THREAD_QUIT(thrd);
-
allocConnRef(conn, true);
+ SCliThrd* thrd = conn->hostThrd;
if (conn->timer != NULL) {
uv_timer_stop(conn->timer);
taosArrayPush(thrd->timerList, &conn->timer);
conn->timer->data = NULL;
conn->timer = NULL;
}
+ if (T_REF_VAL_GET(conn) > 1) {
+ transUnrefCliHandle(conn);
+ }
+
+ cliDestroyConnMsgs(conn, false);
- STrans* pTransInst = thrd->pTransInst;
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
- transCtxCleanup(&conn->ctx);
conn->status = ConnInPool;
if (conn->list == NULL) {
@@ -540,18 +543,15 @@ static void addConnToPool(void* pool, SCliConn* conn) {
} else {
tTrace("%s conn %p added to conn pool, read buf cap:%d", CONN_GET_INST_LABEL(conn), conn, conn->readBuf.cap);
}
- assert(conn->list != NULL);
- QUEUE_INIT(&conn->q);
- QUEUE_PUSH(&conn->list->conn, &conn->q);
+ QUEUE_PUSH(&conn->list->conns, &conn->q);
conn->list->size += 1;
- conn->task = NULL;
- assert(!QUEUE_IS_EMPTY(&conn->list->conn));
-
if (conn->list->size >= 50) {
STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg));
arg->param1 = conn;
arg->param2 = thrd;
+
+ STrans* pTransInst = thrd->pTransInst;
conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime));
}
}
@@ -691,11 +691,10 @@ static void cliDestroy(uv_handle_t* handle) {
transRemoveExHandle(transGetRefMgt(), conn->refId);
taosMemoryFree(conn->ip);
- conn->stream->data = NULL;
taosMemoryFree(conn->stream);
- transCtxCleanup(&conn->ctx);
- cliReleaseUnfinishedMsg(conn);
- transQueueDestroy(&conn->cliMsgs);
+
+ cliDestroyConnMsgs(conn, true);
+
tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn);
transReqQueueClear(&conn->wreqQueue);
transDestroyBuffer(&conn->readBuf);
@@ -714,6 +713,9 @@ static bool cliHandleNoResp(SCliConn* conn) {
if (cliMaySendCachedMsg(conn) == false) {
SCliThrd* thrd = conn->hostThrd;
addConnToPool(thrd->pool, conn);
+ res = false;
+ } else {
+ res = true;
}
}
}
@@ -738,8 +740,6 @@ static void cliSendCb(uv_write_t* req, int status) {
}
void cliSend(SCliConn* pConn) {
- CONN_HANDLE_BROKEN(pConn);
-
assert(!transQueueEmpty(&pConn->cliMsgs));
SCliMsg* pCliMsg = NULL;
@@ -756,8 +756,8 @@ void cliSend(SCliConn* pConn) {
pMsg->pCont = (void*)rpcMallocCont(0);
pMsg->contLen = 0;
}
- int msgLen = transMsgLenFromCont(pMsg->contLen);
+ int msgLen = transMsgLenFromCont(pMsg->contLen);
STransMsgHead* pHead = transHeadFromCont(pMsg->pCont);
pHead->ahandle = pCtx != NULL ? (uint64_t)pCtx->ahandle : 0;
pHead->noResp = REQUEST_NO_RESP(pMsg) ? 1 : 0;
@@ -769,8 +769,6 @@ void cliSend(SCliConn* pConn) {
pHead->traceId = pMsg->info.traceId;
pHead->magicNum = htonl(TRANS_MAGIC_NUM);
- uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
-
STraceId* trace = &pMsg->info.traceId;
tGDebug("%s conn %p %s is sent to %s, local info %s, len:%d", CONN_GET_INST_LABEL(pConn), pConn,
TMSG_INFO(pHead->msgType), pConn->dst, pConn->src, pMsg->contLen);
@@ -792,8 +790,16 @@ void cliSend(SCliConn* pConn) {
tGTrace("%s conn %p start timer for msg:%s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pMsg->msgType));
uv_timer_start((uv_timer_t*)pConn->timer, cliReadTimeoutCb, TRANS_READ_TIMEOUT, 0);
}
+
+ uv_buf_t wb = uv_buf_init((char*)pHead, msgLen);
uv_write_t* req = transReqQueuePush(&pConn->wreqQueue);
- uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
+
+ int status = uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
+ if (status != 0) {
+ tGError("%s conn %p failed to sent msg:%s, errmsg:%s", CONN_GET_INST_LABEL(pConn), pConn, TMSG_INFO(pMsg->msgType),
+ uv_err_name(status));
+ cliHandleExcept(pConn);
+ }
return;
_RETURN:
return;
@@ -807,7 +813,6 @@ void cliConnCb(uv_connect_t* req, int status) {
cliHandleExcept(pConn);
return;
}
- // int addrlen = sizeof(pConn->addr);
struct sockaddr peername, sockname;
int addrlen = sizeof(peername);
@@ -840,7 +845,7 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
int64_t refId = (int64_t)(pMsg->msg.info.handle);
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), refId);
if (exh == NULL) {
- tDebug("%" PRId64 " already release", refId);
+ tDebug("%" PRId64 " already released", refId);
destroyCmsg(pMsg);
return;
}
@@ -856,6 +861,9 @@ static void cliHandleRelease(SCliMsg* pMsg, SCliThrd* pThrd) {
return;
}
cliSend(conn);
+ } else {
+ tError("%s conn %p already released", CONN_GET_INST_LABEL(conn), conn);
+ destroyCmsg(pMsg);
}
}
static void cliHandleUpdate(SCliMsg* pMsg, SCliThrd* pThrd) {
@@ -894,17 +902,35 @@ SCliConn* cliGetConn(SCliMsg* pMsg, SCliThrd* pThrd, bool* ignore) {
}
return conn;
}
-void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
+FORCE_INLINE void cliMayCvtFqdnToIp(SEpSet* pEpSet, SCvtAddr* pCvtAddr) {
if (pCvtAddr->cvt == false) {
return;
}
- for (int i = 0; i < pEpSet->numOfEps && pEpSet->numOfEps == 1; i++) {
- if (strncmp(pEpSet->eps[i].fqdn, pCvtAddr->fqdn, TSDB_FQDN_LEN) == 0) {
- memset(pEpSet->eps[i].fqdn, 0, TSDB_FQDN_LEN);
- memcpy(pEpSet->eps[i].fqdn, pCvtAddr->ip, TSDB_FQDN_LEN);
- }
+ if (pEpSet->numOfEps == 1 && strncmp(pEpSet->eps[0].fqdn, pCvtAddr->fqdn, TSDB_FQDN_LEN) == 0) {
+ memset(pEpSet->eps[0].fqdn, 0, TSDB_FQDN_LEN);
+ memcpy(pEpSet->eps[0].fqdn, pCvtAddr->ip, TSDB_FQDN_LEN);
}
}
+
+FORCE_INLINE bool cliIsEpsetUpdated(int32_t code, STransConnCtx* pCtx) {
+ if (code != 0) return false;
+ if (pCtx->retryCnt == 0) return false;
+ if (transEpSetIsEqual(&pCtx->epSet, &pCtx->origEpSet)) return false;
+ return true;
+}
+FORCE_INLINE int32_t cliBuildExceptResp(SCliMsg* pMsg, STransMsg* pResp) {
+ if (pMsg == NULL) return -1;
+
+ memset(pResp, 0, sizeof(STransMsg));
+
+ pResp->code = TSDB_CODE_RPC_BROKEN_LINK;
+ pResp->msgType = pMsg->msg.msgType + 1;
+ pResp->info.ahandle = pMsg->ctx ? pMsg->ctx->ahandle : NULL;
+ pResp->info.traceId = pMsg->msg.info.traceId;
+
+ return 0;
+}
+
void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
STrans* pTransInst = pThrd->pTransInst;
STransConnCtx* pCtx = pMsg->ctx;
@@ -920,14 +946,11 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
SCliConn* conn = cliGetConn(pMsg, pThrd, &ignore);
if (ignore == true) {
// persist conn already release by server
- STransMsg resp = {0};
- resp.code = TSDB_CODE_RPC_BROKEN_LINK;
- resp.msgType = pMsg->msg.msgType + 1;
-
- resp.info.ahandle = pMsg && pMsg->ctx ? pMsg->ctx->ahandle : NULL;
- resp.info.traceId = pMsg->msg.info.traceId;
-
- pTransInst->cfp(pTransInst->parent, &resp, NULL);
+ STransMsg resp;
+ cliBuildExceptResp(pMsg, &resp);
+ if (pMsg->type != Release) {
+ pTransInst->cfp(pTransInst->parent, &resp, NULL);
+ }
destroyCmsg(pMsg);
return;
}
@@ -973,6 +996,8 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) {
return;
}
}
+ STraceId* trace = &pMsg->msg.info.traceId;
+ tGTrace("%s conn %p ready", pTransInst->label, conn);
}
static void cliAsyncCb(uv_async_t* handle) {
SAsyncItem* item = handle->data;
@@ -991,9 +1016,6 @@ static void cliAsyncCb(uv_async_t* handle) {
QUEUE_REMOVE(h);
SCliMsg* pMsg = QUEUE_DATA(h, SCliMsg, q);
- if (pMsg == NULL) {
- continue;
- }
(*cliAsyncHandle[pMsg->type])(pMsg, pThrd);
count++;
}
@@ -1035,24 +1057,58 @@ static void cliPrepareCb(uv_prepare_t* handle) {
if (thrd->stopMsg != NULL) cliHandleQuit(thrd->stopMsg, thrd);
}
+void cliDestroyConnMsgs(SCliConn* conn, bool destroy) {
+ transCtxCleanup(&conn->ctx);
+ cliReleaseUnfinishedMsg(conn);
+ if (destroy == 1) {
+ transQueueDestroy(&conn->cliMsgs);
+ } else {
+ transQueueClear(&conn->cliMsgs);
+ }
+}
+
+void cliIteraConnMsgs(SCliConn* conn) {
+ SCliThrd* pThrd = conn->hostThrd;
+ STrans* pTransInst = pThrd->pTransInst;
+
+ for (int i = 0; i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cmsg = transQueueGet(&conn->cliMsgs, i);
+ if (cmsg->type == Release || REQUEST_NO_RESP(&cmsg->msg) || cmsg->msg.msgType == TDMT_SCH_DROP_TASK) {
+ continue;
+ }
+
+ STransMsg resp = {0};
+ if (-1 == cliBuildExceptResp(cmsg, &resp)) {
+ continue;
+ }
+ pTransInst->cfp(pTransInst->parent, &resp, NULL);
+
+ cmsg->ctx->ahandle = NULL;
+ }
+}
bool cliRecvReleaseReq(SCliConn* conn, STransMsgHead* pHead) {
if (pHead->release == 1 && (pHead->msgLen) == sizeof(*pHead)) {
uint64_t ahandle = pHead->ahandle;
+ tDebug("ahandle = %" PRIu64 "", ahandle);
SCliMsg* pMsg = NULL;
CONN_GET_MSGCTX_BY_AHANDLE(conn, ahandle);
+
transClearBuffer(&conn->readBuf);
transFreeMsg(transContFromHead((char*)pHead));
- if (transQueueSize(&conn->cliMsgs) > 0 && ahandle == 0) {
- SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, 0);
- if (cliMsg->type == Release) return true;
+
+ for (int i = 0; ahandle == 0 && i < transQueueSize(&conn->cliMsgs); i++) {
+ SCliMsg* cliMsg = transQueueGet(&conn->cliMsgs, i);
+ if (cliMsg->type == Release) {
+ assert(pMsg == NULL);
+ return true;
+ }
}
+
+ cliIteraConnMsgs(conn);
+
tDebug("%s conn %p receive release request, refId:%" PRId64 "", CONN_GET_INST_LABEL(conn), conn, conn->refId);
- if (T_REF_VAL_GET(conn) > 1) {
- transUnrefCliHandle(conn);
- }
destroyCmsg(pMsg);
- cliReleaseUnfinishedMsg(conn);
- transQueueClear(&conn->cliMsgs);
+
addConnToPool(((SCliThrd*)conn->hostThrd)->pool, conn);
return true;
}
@@ -1090,14 +1146,15 @@ void* transInitClient(uint32_t ip, uint32_t port, char* label, int numOfThreads,
return cli;
}
-static void destroyUserdata(STransMsg* userdata) {
+static FORCE_INLINE void destroyUserdata(STransMsg* userdata) {
if (userdata->pCont == NULL) {
return;
}
transFreeMsg(userdata->pCont);
userdata->pCont = NULL;
}
-static void destroyCmsg(void* arg) {
+
+static FORCE_INLINE void destroyCmsg(void* arg) {
SCliMsg* pMsg = arg;
if (pMsg == NULL) {
return;
@@ -1163,7 +1220,7 @@ static void destroyThrdObj(SCliThrd* pThrd) {
taosMemoryFree(pThrd);
}
-static void transDestroyConnCtx(STransConnCtx* ctx) {
+static FORCE_INLINE void transDestroyConnCtx(STransConnCtx* ctx) {
//
taosMemoryFree(ctx);
}
@@ -1182,7 +1239,7 @@ void cliWalkCb(uv_handle_t* handle, void* arg) {
}
}
-int cliRBChoseIdx(STrans* pTransInst) {
+FORCE_INLINE int cliRBChoseIdx(STrans* pTransInst) {
int8_t index = pTransInst->index;
if (pTransInst->numOfThreads == 0) {
return -1;
@@ -1192,7 +1249,7 @@ int cliRBChoseIdx(STrans* pTransInst) {
}
return index % pTransInst->numOfThreads;
}
-static void doDelayTask(void* param) {
+static FORCE_INLINE void doDelayTask(void* param) {
STaskArg* arg = param;
SCliMsg* pMsg = arg->param1;
SCliThrd* pThrd = arg->param2;
@@ -1226,13 +1283,13 @@ static void cliSchedMsgToNextNode(SCliMsg* pMsg, SCliThrd* pThrd) {
transDQSched(pThrd->delayQueue, doDelayTask, arg, TRANS_RETRY_INTERVAL);
}
-void cliCompareAndSwap(int8_t* val, int8_t exp, int8_t newVal) {
+FORCE_INLINE void cliCompareAndSwap(int8_t* val, int8_t exp, int8_t newVal) {
if (*val != exp) {
*val = newVal;
}
}
-bool cliTryExtractEpSet(STransMsg* pResp, SEpSet* dst) {
+FORCE_INLINE bool cliTryExtractEpSet(STransMsg* pResp, SEpSet* dst) {
if ((pResp == NULL || pResp->info.hasEpSet == 0)) {
return false;
}
@@ -1262,15 +1319,11 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) {
STrans* pTransInst = pThrd->pTransInst;
if (pMsg == NULL || pMsg->ctx == NULL) {
- tTrace("%s conn %p handle resp", pTransInst->label, pConn);
+ tDebug("%s conn %p handle resp", pTransInst->label, pConn);
pTransInst->cfp(pTransInst->parent, pResp, NULL);
return 0;
}
- /*
- * no retry
- * 1. query conn
- * 2. rpc thread already receive quit msg
- */
+
STransConnCtx* pCtx = pMsg->ctx;
int32_t code = pResp->code;
@@ -1368,53 +1421,57 @@ void transUnrefCliHandle(void* handle) {
cliDestroyConn((SCliConn*)handle, true);
}
}
-SCliThrd* transGetWorkThrdFromHandle(int64_t handle, bool* validHandle) {
+static FORCE_INLINE SCliThrd* transGetWorkThrdFromHandle(STrans* trans, int64_t handle) {
SCliThrd* pThrd = NULL;
SExHandle* exh = transAcquireExHandle(transGetRefMgt(), handle);
if (exh == NULL) {
return NULL;
}
- *validHandle = true;
+ if (exh->pThrd == NULL && trans != NULL) {
+ int idx = cliRBChoseIdx(trans);
+ if (idx < 0) return NULL;
+ exh->pThrd = ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
+ }
+
pThrd = exh->pThrd;
transReleaseExHandle(transGetRefMgt(), handle);
return pThrd;
}
-SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle, bool* validHandle) {
+SCliThrd* transGetWorkThrd(STrans* trans, int64_t handle) {
if (handle == 0) {
int idx = cliRBChoseIdx(trans);
if (idx < 0) return NULL;
return ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
}
- SCliThrd* pThrd = transGetWorkThrdFromHandle(handle, validHandle);
- if (*validHandle == true && pThrd == NULL) {
- int idx = cliRBChoseIdx(trans);
- if (idx < 0) return NULL;
- pThrd = ((SCliObj*)trans->tcphandle)->pThreadObj[idx];
- }
+ SCliThrd* pThrd = transGetWorkThrdFromHandle(trans, handle);
return pThrd;
}
int transReleaseCliHandle(void* handle) {
int idx = -1;
bool valid = false;
- SCliThrd* pThrd = transGetWorkThrdFromHandle((int64_t)handle, &valid);
+ SCliThrd* pThrd = transGetWorkThrdFromHandle(NULL, (int64_t)handle);
if (pThrd == NULL) {
return -1;
}
- STransMsg tmsg = {.info.handle = handle};
+ STransMsg tmsg = {.info.handle = handle, .info.ahandle = (void*)0x9527};
TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64());
+ STransConnCtx* pCtx = taosMemoryCalloc(1, sizeof(STransConnCtx));
+ pCtx->ahandle = tmsg.info.ahandle;
+
SCliMsg* cmsg = taosMemoryCalloc(1, sizeof(SCliMsg));
cmsg->msg = tmsg;
cmsg->type = Release;
+ cmsg->ctx = pCtx;
STraceId* trace = &tmsg.info.traceId;
tGDebug("send release request at thread:%08" PRId64 "", pThrd->pid);
if (0 != transAsyncSend(pThrd->asyncPool, &cmsg->q)) {
- taosMemoryFree(cmsg);
+ destroyCmsg(cmsg);
return -1;
}
return 0;
@@ -1427,9 +1484,8 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran
return -1;
}
- bool valid = false;
- SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle, &valid);
- if (pThrd == NULL && valid == false) {
+ SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle);
+ if (pThrd == NULL) {
transFreeMsg(pReq->pCont);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
return TSDB_CODE_RPC_BROKEN_LINK;
@@ -1442,9 +1498,7 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran
pCtx->ahandle = pReq->info.ahandle;
pCtx->msgType = pReq->msgType;
- if (ctx != NULL) {
- pCtx->appCtx = *ctx;
- }
+ if (ctx != NULL) pCtx->appCtx = *ctx;
SCliMsg* cliMsg = taosMemoryCalloc(1, sizeof(SCliMsg));
cliMsg->ctx = pCtx;
@@ -1472,9 +1526,8 @@ int transSendRecv(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STransMs
return -1;
}
- bool valid = false;
- SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle, &valid);
- if (pThrd == NULL && valid == false) {
+ SCliThrd* pThrd = transGetWorkThrd(pTransInst, (int64_t)pReq->info.handle);
+ if (pThrd == NULL) {
transFreeMsg(pReq->pCont);
transReleaseExHandle(transGetInstMgt(), (int64_t)shandle);
return TSDB_CODE_RPC_BROKEN_LINK;
@@ -1558,6 +1611,7 @@ int64_t transAllocHandle() {
SExHandle* exh = taosMemoryCalloc(1, sizeof(SExHandle));
exh->refId = transAddExHandle(transGetRefMgt(), exh);
tDebug("pre alloc refId %" PRId64 "", exh->refId);
+
return exh->refId;
}
#endif
diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c
index a4d679b281512ff13757eab7c9c42a11e0edb36b..5f3171ee0e840ee7f558a13b4ad315bd9bcfb856 100644
--- a/source/libs/transport/src/transComm.c
+++ b/source/libs/transport/src/transComm.c
@@ -287,10 +287,10 @@ void transCtxMerge(STransCtx* dst, STransCtx* src) {
STransCtxVal* sVal = (STransCtxVal*)iter;
key = taosHashGetKey(sVal, &klen);
- STransCtxVal* dVal = taosHashGet(dst->args, key, klen);
- if (dVal) {
- dst->freeFunc(dVal->val);
- }
+ // STransCtxVal* dVal = taosHashGet(dst->args, key, klen);
+ // if (dVal) {
+ // dst->freeFunc(dVal->val);
+ // }
taosHashPut(dst->args, key, klen, sVal, sizeof(*sVal));
iter = taosHashIterate(src->args, iter);
}
@@ -424,7 +424,7 @@ void transQueueDestroy(STransQueue* queue) {
taosArrayDestroy(queue->q);
}
-static int32_t timeCompare(const HeapNode* a, const HeapNode* b) {
+static FORCE_INLINE int32_t timeCompare(const HeapNode* a, const HeapNode* b) {
SDelayTask* arg1 = container_of(a, SDelayTask, node);
SDelayTask* arg2 = container_of(b, SDelayTask, node);
if (arg1->execTime > arg2->execTime) {
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index 207b967923fad439ed043a71752d127bac13934c..70a47fe079ed2d2fbd4638a3273f69c75bb8d60a 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -125,17 +125,17 @@ static void uvWorkAfterTask(uv_work_t* req, int status);
static void uvWalkCb(uv_handle_t* handle, void* arg);
static void uvFreeCb(uv_handle_t* handle);
-static void uvStartSendRespImpl(SSvrMsg* smsg);
+static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg);
+
static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
static void uvStartSendResp(SSvrMsg* msg);
static void uvNotifyLinkBrokenToApp(SSvrConn* conn);
-static void destroySmsg(SSvrMsg* smsg);
-// check whether already read complete packet
-static SSvrConn* createConn(void* hThrd);
-static void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/);
-static void destroyConnRegArg(SSvrConn* conn);
+static FORCE_INLINE void destroySmsg(SSvrMsg* smsg);
+static FORCE_INLINE SSvrConn* createConn(void* hThrd);
+static FORCE_INLINE void destroyConn(SSvrConn* conn, bool clear /*clear handle or not*/);
+static FORCE_INLINE void destroyConnRegArg(SSvrConn* conn);
static int reallocConnRef(SSvrConn* conn);
@@ -413,7 +413,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
wb->len = len;
}
-static void uvStartSendRespImpl(SSvrMsg* smsg) {
+static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg) {
SSvrConn* pConn = smsg->pConn;
if (pConn->broken) {
return;
@@ -447,7 +447,7 @@ static void uvStartSendResp(SSvrMsg* smsg) {
return;
}
-static void destroySmsg(SSvrMsg* smsg) {
+static FORCE_INLINE void destroySmsg(SSvrMsg* smsg) {
if (smsg == NULL) {
return;
}
@@ -492,7 +492,6 @@ void uvWorkerAsyncCb(uv_async_t* handle) {
// release handle to rpc init
if (msg->type == Quit) {
(*transAsyncHandle[msg->type])(msg, pThrd);
- continue;
} else {
STransMsg transMsg = msg->msg;
@@ -771,7 +770,7 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
// conn set
QUEUE_INIT(&pThrd->conn);
- pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 1, pThrd, uvWorkerAsyncCb);
+ pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 5, pThrd, uvWorkerAsyncCb);
uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb);
// uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
return true;
@@ -813,7 +812,7 @@ void* transWorkerThread(void* arg) {
return NULL;
}
-static SSvrConn* createConn(void* hThrd) {
+static FORCE_INLINE SSvrConn* createConn(void* hThrd) {
SWorkThrd* pThrd = hThrd;
SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn));
@@ -843,7 +842,7 @@ static SSvrConn* createConn(void* hThrd) {
return pConn;
}
-static void destroyConn(SSvrConn* conn, bool clear) {
+static FORCE_INLINE void destroyConn(SSvrConn* conn, bool clear) {
if (conn == NULL) {
return;
}
@@ -855,7 +854,7 @@ static void destroyConn(SSvrConn* conn, bool clear) {
}
}
}
-static void destroyConnRegArg(SSvrConn* conn) {
+static FORCE_INLINE void destroyConnRegArg(SSvrConn* conn) {
if (conn->regArg.init == 1) {
transFreeMsg(conn->regArg.msg.pCont);
conn->regArg.init = 0;
@@ -907,23 +906,30 @@ static void uvDestroyConn(uv_handle_t* handle) {
}
}
static void uvPipeListenCb(uv_stream_t* handle, int status) {
- ASSERT(status == 0);
+ if (status != 0) {
+ tError("server failed to init pipe, errmsg: %s", uv_err_name(status));
+ return;
+ }
SServerObj* srv = container_of(handle, SServerObj, pipeListen);
uv_pipe_t* pipe = &(srv->pipe[srv->numOfWorkerReady][0]);
- ASSERT(0 == uv_pipe_init(srv->loop, pipe, 1));
- ASSERT(0 == uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe));
- ASSERT(1 == uv_is_readable((uv_stream_t*)pipe));
- ASSERT(1 == uv_is_writable((uv_stream_t*)pipe));
- ASSERT(0 == uv_is_closing((uv_handle_t*)pipe));
+ int ret = uv_pipe_init(srv->loop, pipe, 1);
+ assert(ret == 0);
- srv->numOfWorkerReady++;
+ ret = uv_accept((uv_stream_t*)&srv->pipeListen, (uv_stream_t*)pipe);
+ assert(ret == 0);
- // ASSERT(0 == uv_listen((uv_stream_t*)&ctx.send.tcp, 512, uvOnAcceptCb));
+ ret = uv_is_readable((uv_stream_t*)pipe);
+ assert(ret == 1);
- // r = uv_read_start((uv_stream_t*)&ctx.channel, alloc_cb, read_cb);
- // ASSERT(r == 0);
+ ret = uv_is_writable((uv_stream_t*)pipe);
+ assert(ret == 1);
+
+ ret = uv_is_closing((uv_handle_t*)pipe);
+ assert(ret == 0);
+
+ srv->numOfWorkerReady++;
}
void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads, void* fp, void* shandle) {
@@ -938,7 +944,12 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->port = port;
uv_loop_init(srv->loop);
- assert(0 == uv_pipe_init(srv->loop, &srv->pipeListen, 0));
+ int ret = uv_pipe_init(srv->loop, &srv->pipeListen, 0);
+ if (ret != 0) {
+ tError("failed to init pipe, errmsg: %s", uv_err_name(ret));
+ goto End;
+ }
+
#ifdef WINDOWS
char pipeName[64];
snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc.%p-" PRIu64, taosSafeRand(), GetCurrentProcessId());
@@ -947,8 +958,17 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
snprintf(pipeName, sizeof(pipeName), "%s%spipe.trans.rpc.%08X-" PRIu64, tsTempDir, TD_DIRSEP, taosSafeRand(),
taosGetSelfPthreadId());
#endif
- assert(0 == uv_pipe_bind(&srv->pipeListen, pipeName));
- assert(0 == uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb));
+ ret = uv_pipe_bind(&srv->pipeListen, pipeName);
+ if (ret != 0) {
+ tError("failed to bind pipe, errmsg: %s", uv_err_name(ret));
+ goto End;
+ }
+
+ ret = uv_listen((uv_stream_t*)&srv->pipeListen, SOMAXCONN, uvPipeListenCb);
+ if (ret != 0) {
+ tError("failed to listen pipe, errmsg: %s", uv_err_name(ret));
+ goto End;
+ }
for (int i = 0; i < srv->numOfThreads; i++) {
SWorkThrd* thrd = (SWorkThrd*)taosMemoryCalloc(1, sizeof(SWorkThrd));
@@ -1071,12 +1091,12 @@ void transCloseServer(void* arg) {
if (srv->inited) {
uv_async_send(srv->pAcceptAsync);
taosThreadJoin(srv->thread, NULL);
- }
- SRV_RELEASE_UV(srv->loop);
+ SRV_RELEASE_UV(srv->loop);
- for (int i = 0; i < srv->numOfThreads; i++) {
- sendQuitToWorkThrd(srv->pThreadObj[i]);
- destroyWorkThrd(srv->pThreadObj[i]);
+ for (int i = 0; i < srv->numOfThreads; i++) {
+ sendQuitToWorkThrd(srv->pThreadObj[i]);
+ destroyWorkThrd(srv->pThreadObj[i]);
+ }
}
taosMemoryFree(srv->pThreadObj);
@@ -1128,6 +1148,7 @@ int transReleaseSrvHandle(void* handle) {
tTrace("%s conn %p start to release", transLabel(pThrd->pTransInst), exh->handle);
transAsyncSend(pThrd->asyncPool, &m->q);
+
transReleaseExHandle(transGetRefMgt(), refId);
return 0;
_return1:
@@ -1157,8 +1178,10 @@ int transSendResponse(const STransMsg* msg) {
STraceId* trace = (STraceId*)&msg->info.traceId;
tGTrace("conn %p start to send resp (1/2)", exh->handle);
transAsyncSend(pThrd->asyncPool, &m->q);
+
transReleaseExHandle(transGetRefMgt(), refId);
return 0;
+
_return1:
tTrace("handle %p failed to send resp", exh);
rpcFreeCont(msg->pCont);
@@ -1187,6 +1210,7 @@ int transRegisterMsg(const STransMsg* msg) {
STrans* pTransInst = pThrd->pTransInst;
tTrace("%s conn %p start to register brokenlink callback", transLabel(pTransInst), exh->handle);
transAsyncSend(pThrd->asyncPool, &m->q);
+
transReleaseExHandle(transGetRefMgt(), refId);
return 0;
diff --git a/source/libs/wal/src/walMeta.c b/source/libs/wal/src/walMeta.c
index 93ced912f8e2358c2aab6f04957ce060cf61c924..c69046f707a1fddb7a593771ad15535a70615ff8 100644
--- a/source/libs/wal/src/walMeta.c
+++ b/source/libs/wal/src/walMeta.c
@@ -116,7 +116,6 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
}
#endif
}
- // TODO truncate file
if (found == NULL) {
// file corrupted, no complete log
@@ -125,8 +124,20 @@ static FORCE_INLINE int64_t walScanLogGetLastVer(SWal* pWal) {
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
+
+ // truncate file
SWalCkHead* lastEntry = (SWalCkHead*)found;
int64_t retVer = lastEntry->head.version;
+ int64_t lastEntryBeginOffset = offset + (int64_t)((char*)found - (char*)buf);
+ int64_t lastEntryEndOffset = lastEntryBeginOffset + sizeof(SWalCkHead) + lastEntry->head.bodyLen;
+ if (lastEntryEndOffset != fileSize) {
+ wWarn("vgId:%d repair meta truncate file %s to %ld, orig size %ld", pWal->cfg.vgId, fnameStr, lastEntryEndOffset,
+ fileSize);
+ taosFtruncateFile(pFile, lastEntryEndOffset);
+ ((SWalFileInfo*)taosArrayGetLast(pWal->fileInfoSet))->fileSize = lastEntryEndOffset;
+ pWal->totSize -= (fileSize - lastEntryEndOffset);
+ }
+
taosCloseFile(&pFile);
taosMemoryFree(buf);
@@ -226,16 +237,92 @@ int walCheckAndRepairMeta(SWal* pWal) {
}
}
- // TODO: set fileSize and lastVer if necessary
-
return 0;
}
int walCheckAndRepairIdx(SWal* pWal) {
- // TODO: iterate all log files
- // if idx not found, scan log and write idx
- // if found, check complete by first and last entry of each idx file
- // if idx incomplete, binary search last valid entry, and then build other part
+ int32_t sz = taosArrayGetSize(pWal->fileInfoSet);
+ for (int32_t i = 0; i < sz; i++) {
+ SWalFileInfo* pFileInfo = taosArrayGet(pWal->fileInfoSet, i);
+
+ char fnameStr[WAL_FILE_LEN];
+ walBuildIdxName(pWal, pFileInfo->firstVer, fnameStr);
+ int64_t fsize;
+ TdFilePtr pIdxFile = taosOpenFile(fnameStr, TD_FILE_READ | TD_FILE_WRITE | TD_FILE_CREATE);
+ if (pIdxFile == NULL) {
+ ASSERT(0);
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ wError("vgId:%d, cannot open file %s, since %s", pWal->cfg.vgId, fnameStr, terrstr());
+ return -1;
+ }
+
+ taosFStatFile(pIdxFile, &fsize, NULL);
+ if (fsize == (pFileInfo->lastVer - pFileInfo->firstVer + 1) * sizeof(SWalIdxEntry)) {
+ taosCloseFile(&pIdxFile);
+ continue;
+ }
+
+ int32_t left = fsize % sizeof(SWalIdxEntry);
+ int64_t offset = taosLSeekFile(pIdxFile, -left, SEEK_END);
+ if (left != 0) {
+ taosFtruncateFile(pIdxFile, offset);
+ wWarn("vgId:%d wal truncate file %s to offset %ld since size invalid, file size %ld", pWal->cfg.vgId, fnameStr,
+ offset, fsize);
+ }
+ offset -= sizeof(SWalIdxEntry);
+
+ SWalIdxEntry idxEntry = {.ver = pFileInfo->firstVer};
+ while (1) {
+ if (offset < 0) {
+ taosLSeekFile(pIdxFile, 0, SEEK_SET);
+ taosWriteFile(pIdxFile, &idxEntry, sizeof(SWalIdxEntry));
+ break;
+ }
+ taosLSeekFile(pIdxFile, offset, SEEK_SET);
+ int64_t contLen = taosReadFile(pIdxFile, &idxEntry, sizeof(SWalIdxEntry));
+ if (contLen < 0 || contLen != sizeof(SWalIdxEntry)) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return -1;
+ }
+ if ((idxEntry.ver - pFileInfo->firstVer) * sizeof(SWalIdxEntry) != offset) {
+ taosFtruncateFile(pIdxFile, offset);
+ wWarn("vgId:%d wal truncate file %s to offset %ld since entry invalid, entry ver %ld, entry offset %ld",
+ pWal->cfg.vgId, fnameStr, offset, idxEntry.ver, idxEntry.offset);
+ offset -= sizeof(SWalIdxEntry);
+ } else {
+ break;
+ }
+ }
+
+ if (idxEntry.ver < pFileInfo->lastVer) {
+ char fLogNameStr[WAL_FILE_LEN];
+ walBuildLogName(pWal, pFileInfo->firstVer, fLogNameStr);
+ TdFilePtr pLogFile = taosOpenFile(fLogNameStr, TD_FILE_READ);
+ if (pLogFile == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ wError("vgId:%d, cannot open file %s, since %s", pWal->cfg.vgId, fLogNameStr, terrstr());
+ return -1;
+ }
+ while (idxEntry.ver < pFileInfo->lastVer) {
+ taosLSeekFile(pLogFile, idxEntry.offset, SEEK_SET);
+ SWalCkHead ckHead;
+ taosReadFile(pLogFile, &ckHead, sizeof(SWalCkHead));
+ if (idxEntry.ver != ckHead.head.version) {
+ // todo truncate this idx also
+ taosCloseFile(&pLogFile);
+ wError("vgId:%d, invalid repair case, log seek to %ld to find ver %ld, actual ver %ld", pWal->cfg.vgId,
+ idxEntry.offset, idxEntry.ver, ckHead.head.version);
+ return -1;
+ }
+ idxEntry.ver = ckHead.head.version + 1;
+ idxEntry.offset = idxEntry.offset + sizeof(SWalCkHead) + ckHead.head.bodyLen;
+ wWarn("vgId:%d wal idx append new entry %ld %ld", pWal->cfg.vgId, idxEntry.ver, idxEntry.offset);
+ taosWriteFile(pIdxFile, &idxEntry, sizeof(SWalIdxEntry));
+ }
+ taosCloseFile(&pLogFile);
+ }
+ taosCloseFile(&pIdxFile);
+ }
return 0;
}
@@ -268,7 +355,7 @@ int walRollFileInfo(SWal* pWal) {
char* walMetaSerialize(SWal* pWal) {
char buf[30];
ASSERT(pWal->fileInfoSet);
- int sz = pWal->fileInfoSet->size;
+ int sz = taosArrayGetSize(pWal->fileInfoSet);
cJSON* pRoot = cJSON_CreateObject();
cJSON* pMeta = cJSON_CreateObject();
cJSON* pFiles = cJSON_CreateArray();
@@ -384,8 +471,10 @@ static int walFindCurMetaVer(SWal* pWal) {
int code = regexec(&walMetaRegexPattern, name, 0, NULL, 0);
if (code == 0) {
sscanf(name, "meta-ver%d", &metaVer);
+ wDebug("vgId:%d, wal find current meta: %s is the meta file, ver %d", pWal->cfg.vgId, name, metaVer);
break;
}
+ wDebug("vgId:%d, wal find current meta: %s is not meta file", pWal->cfg.vgId, name);
}
taosCloseDir(&pDir);
regfree(&walMetaRegexPattern);
@@ -422,6 +511,7 @@ int walLoadMeta(SWal* pWal) {
// find existing meta file
int metaVer = walFindCurMetaVer(pWal);
if (metaVer == -1) {
+ wDebug("vgId:%d wal find meta ver %d", pWal->cfg.vgId, metaVer);
return -1;
}
char fnameStr[WAL_FILE_LEN];
diff --git a/source/libs/wal/src/walMgmt.c b/source/libs/wal/src/walMgmt.c
index c939c8c43685ef3dc6ea0b4fde1cd5fbfb33a8d1..a55f00d27702294f6bf996690c80ca5e3765428a 100644
--- a/source/libs/wal/src/walMgmt.c
+++ b/source/libs/wal/src/walMgmt.c
@@ -149,15 +149,21 @@ SWal *walOpen(const char *path, SWalCfg *pCfg) {
walLoadMeta(pWal);
if (walCheckAndRepairMeta(pWal) < 0) {
+ wError("vgId:%d cannot open wal since repair meta file failed", pWal->cfg.vgId);
taosHashCleanup(pWal->pRefHash);
taosRemoveRef(tsWal.refSetId, pWal->refId);
taosThreadMutexDestroy(&pWal->mutex);
taosArrayDestroy(pWal->fileInfoSet);
- taosMemoryFree(pWal);
return NULL;
}
if (walCheckAndRepairIdx(pWal) < 0) {
+ wError("vgId:%d cannot open wal since repair idx file failed", pWal->cfg.vgId);
+ taosHashCleanup(pWal->pRefHash);
+ taosRemoveRef(tsWal.refSetId, pWal->refId);
+ taosThreadMutexDestroy(&pWal->mutex);
+ taosArrayDestroy(pWal->fileInfoSet);
+ return NULL;
}
wDebug("vgId:%d, wal:%p is opened, level:%d fsyncPeriod:%d", pWal->cfg.vgId, pWal, pWal->cfg.level,
diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c
index a5b5a2b7b4cac113978d8278ecf0a57686a67257..5c437e6f7aeb942e02e79314e4140af0cdfe4323 100644
--- a/source/libs/wal/src/walRead.c
+++ b/source/libs/wal/src/walRead.c
@@ -168,6 +168,9 @@ static int32_t walReadChangeFile(SWalReader *pReader, int64_t fileFirstVer) {
}
pReader->pIdxFile = pIdxFile;
+
+ pReader->curFileFirstVer = fileFirstVer;
+
return 0;
}
@@ -372,7 +375,7 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead) {
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead) {
int64_t code;
- ASSERT(pRead->curVersion == pHead->head.version);
+ // ASSERT(pRead->curVersion == pHead->head.version);
code = taosLSeekFile(pRead->pLogFile, pHead->head.bodyLen, SEEK_CUR);
if (code < 0) {
@@ -415,7 +418,8 @@ int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead) {
}
if (walValidBodyCksum(*ppHead) != 0) {
- wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId, ver);
+ wError("vgId:%d, wal fetch body error, index:%" PRId64 ", since body checksum not passed", pRead->pWal->cfg.vgId,
+ ver);
pRead->curInvalid = 1;
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
diff --git a/source/os/src/osDir.c b/source/os/src/osDir.c
index b755a35815fb64d6fa11ff3e0c35efc647318b83..cf74525543c0743e8d44225de7b888fdbd1ae790 100644
--- a/source/os/src/osDir.c
+++ b/source/os/src/osDir.c
@@ -31,6 +31,15 @@ typedef struct TdDir {
HANDLE hFind;
} TdDir;
+enum
+ {
+ WRDE_NOSPACE = 1, /* Ran out of memory. */
+ WRDE_BADCHAR, /* A metachar appears in the wrong place. */
+ WRDE_BADVAL, /* Undefined var reference with WRDE_UNDEF. */
+ WRDE_CMDSUB, /* Command substitution with WRDE_NOCMD. */
+ WRDE_SYNTAX /* Shell syntax error. */
+ };
+
int wordexp(char *words, wordexp_t *pwordexp, int flags) {
pwordexp->we_offs = 0;
pwordexp->we_wordc = 1;
@@ -49,6 +58,24 @@ int wordexp(char *words, wordexp_t *pwordexp, int flags) {
void wordfree(wordexp_t *pwordexp) {}
+#elif defined(DARWIN)
+
+#include
+#include
+#include
+#include
+#include
+
+typedef struct dirent dirent;
+typedef struct dirent TdDirEntry;
+
+typedef struct TdDir {
+ TdDirEntry dirEntry;
+ TdDirEntry dirEntry1;
+ TdDirEntryPtr dirEntryPtr;
+ DIR *pDir;
+} TdDir;
+
#else
#include
@@ -133,6 +160,7 @@ int32_t taosMulMkDir(const char *dirname) {
code = mkdir(temp, 0755);
#endif
if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return code;
}
*pos = TD_DIRSEP[0];
@@ -146,6 +174,7 @@ int32_t taosMulMkDir(const char *dirname) {
code = mkdir(temp, 0755);
#endif
if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return code;
}
}
@@ -158,6 +187,66 @@ int32_t taosMulMkDir(const char *dirname) {
return code;
}
+int32_t taosMulModeMkDir(const char *dirname, int mode) {
+ if (dirname == NULL) return -1;
+ char temp[1024];
+ char *pos = temp;
+ int32_t code = 0;
+#ifdef WINDOWS
+ taosRealPath(dirname, temp, sizeof(temp));
+ if (temp[1] == ':') pos += 3;
+#else
+ strcpy(temp, dirname);
+#endif
+
+ if (taosDirExist(temp)) {
+ chmod(temp, mode);
+ return code;
+ }
+
+ if (strncmp(temp, TD_DIRSEP, 1) == 0) {
+ pos += 1;
+ } else if (strncmp(temp, "." TD_DIRSEP, 2) == 0) {
+ pos += 2;
+ }
+
+ for (; *pos != '\0'; pos++) {
+ if (*pos == TD_DIRSEP[0]) {
+ *pos = '\0';
+#ifdef WINDOWS
+ code = _mkdir(temp, mode);
+#else
+ code = mkdir(temp, mode);
+#endif
+ if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return code;
+ }
+ *pos = TD_DIRSEP[0];
+ }
+ }
+
+ if (*(pos - 1) != TD_DIRSEP[0]) {
+#ifdef WINDOWS
+ code = _mkdir(temp, mode);
+#else
+ code = mkdir(temp, mode);
+#endif
+ if (code < 0 && errno != EEXIST) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
+ return code;
+ }
+ }
+
+ if (code < 0 && errno == EEXIST) {
+ chmod(temp, mode);
+ return 0;
+ }
+
+ chmod(temp, mode);
+ return code;
+}
+
void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
TdDirPtr pDir = taosOpenDir(dirname);
if (pDir == NULL) return;
@@ -203,9 +292,21 @@ void taosRemoveOldFiles(const char *dirname, int32_t keepDays) {
int32_t taosExpandDir(const char *dirname, char *outname, int32_t maxlen) {
wordexp_t full_path;
- if (0 != wordexp(dirname, &full_path, 0)) {
- printf("failed to expand path:%s since %s", dirname, strerror(errno));
- wordfree(&full_path);
+ switch (wordexp (dirname, &full_path, 0)) {
+ case 0:
+ break;
+ case WRDE_NOSPACE:
+ wordfree (&full_path);
+ // printf("failed to expand path:%s since Out of memory\n", dirname);
+ return -1;
+ case WRDE_BADCHAR:
+ // printf("failed to expand path:%s since illegal occurrence of newline or one of |, &, ;, <, >, (, ), {, }\n", dirname);
+ return -1;
+ case WRDE_SYNTAX:
+ // printf("failed to expand path:%s since Shell syntax error, such as unbalanced parentheses or unmatched quotes\n", dirname);
+ return -1;
+ default:
+ // printf("failed to expand path:%s since %s\n", dirname, strerror(errno));
return -1;
}
@@ -260,7 +361,7 @@ char *taosDirName(char *name) {
name[0] = 0;
}
return name;
-#elif defined(_TD_DARWIN_64)
+#else
char *end = strrchr(name, '/');
if (end != NULL) {
*end = '\0';
@@ -268,8 +369,6 @@ char *taosDirName(char *name) {
name[0] = 0;
}
return name;
-#else
- return dirname(name);
#endif
}
@@ -279,7 +378,9 @@ char *taosDirEntryBaseName(char *name) {
_splitpath(name, NULL, NULL, Filename1, Ext1);
return name + (strlen(name) - strlen(Filename1) - strlen(Ext1));
#else
- return (char *)basename(name);
+ char *pPoint = strchr(name, '.');
+ if (pPoint != NULL) pPoint = 0;
+ return name;
#endif
}
@@ -303,6 +404,13 @@ TdDirPtr taosOpenDir(const char *dirname) {
return NULL;
}
return pDir;
+#elif defined(DARWIN)
+ DIR *pDir = opendir(dirname);
+ if (pDir == NULL) return NULL;
+ TdDirPtr dirPtr = (TdDirPtr)taosMemoryMalloc(sizeof(TdDir));
+ dirPtr->dirEntryPtr = (TdDirEntryPtr)&(dirPtr->dirEntry1);
+ dirPtr->pDir = pDir;
+ return dirPtr;
#else
return (TdDirPtr)opendir(dirname);
#endif
@@ -317,6 +425,12 @@ TdDirEntryPtr taosReadDir(TdDirPtr pDir) {
return NULL;
}
return (TdDirEntryPtr) & (pDir->dirEntry.findFileData);
+#elif defined(DARWIN)
+ if (readdir_r(pDir->pDir, (dirent*)&(pDir->dirEntry), (dirent**)&(pDir->dirEntryPtr)) == 0) {
+ return pDir->dirEntryPtr;
+ } else {
+ return NULL;
+ }
#else
return (TdDirEntryPtr)readdir((DIR *)pDir);
#endif
@@ -353,6 +467,11 @@ int32_t taosCloseDir(TdDirPtr *ppDir) {
taosMemoryFree(*ppDir);
*ppDir = NULL;
return 0;
+#elif defined(DARWIN)
+ closedir((*ppDir)->pDir);
+ taosMemoryFree(*ppDir);
+ *ppDir = NULL;
+ return 0;
#else
closedir((DIR *)*ppDir);
*ppDir = NULL;
diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c
index f9797f631969d8a692d84078211e7e27174517e6..fab933755a73ba23be962cb76b34da002b8a3702 100644
--- a/source/os/src/osFile.c
+++ b/source/os/src/osFile.c
@@ -313,6 +313,7 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
assert(!(tdFileOptions & TD_FILE_EXCL));
fp = fopen(path, mode);
if (fp == NULL) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
} else {
@@ -335,6 +336,7 @@ TdFilePtr taosOpenFile(const char *path, int32_t tdFileOptions) {
fd = open(path, access, S_IRWXU | S_IRWXG | S_IRWXO);
#endif
if (fd == -1) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return NULL;
}
}
diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c
index a7d2ba85311b8f2a9ababbde0f1f5857cb354484..310804da8dc5845d71b68897918f49e6afd1752e 100644
--- a/source/os/src/osSemaphore.c
+++ b/source/os/src/osSemaphore.c
@@ -106,475 +106,40 @@ int32_t tsem_timewait(tsem_t* sem, int64_t nanosecs) {
#elif defined(_TD_DARWIN_64)
-/*
- * darwin implementation
- */
-
#include
-// #define SEM_USE_PTHREAD
-// #define SEM_USE_POSIX
-// #define SEM_USE_SEM
-
-// #ifdef SEM_USE_SEM
-// #include
-// #include
-// #include
-// #include
-
-// static TdThread sem_thread;
-// static TdThreadOnce sem_once;
-// static task_t sem_port;
-// static volatile int sem_inited = 0;
-// static semaphore_t sem_exit;
-
-// static void *sem_thread_routine(void *arg) {
-// (void)arg;
-// setThreadName("sem_thrd");
-
-// sem_port = mach_task_self();
-// kern_return_t ret = semaphore_create(sem_port, &sem_exit, SYNC_POLICY_FIFO, 0);
-// if (ret != KERN_SUCCESS) {
-// fprintf(stderr, "==%s[%d]%s()==failed to create sem_exit\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__);
-// sem_inited = -1;
-// return NULL;
-// }
-// sem_inited = 1;
-// semaphore_wait(sem_exit);
-// return NULL;
-// }
-
-// static void once_init(void) {
-// int r = 0;
-// r = taosThreadCreate(&sem_thread, NULL, sem_thread_routine, NULL);
-// if (r) {
-// fprintf(stderr, "==%s[%d]%s()==failed to create thread\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__);
-// return;
-// }
-// while (sem_inited == 0) {
-// ;
-// }
-// }
-// #endif
-
-// struct tsem_s {
-// #ifdef SEM_USE_PTHREAD
-// TdThreadMutex lock;
-// TdThreadCond cond;
-// volatile int64_t val;
-// #elif defined(SEM_USE_POSIX)
-// size_t id;
-// sem_t *sem;
-// #elif defined(SEM_USE_SEM)
-// semaphore_t sem;
-// #else // SEM_USE_PTHREAD
-// dispatch_semaphore_t sem;
-// #endif // SEM_USE_PTHREAD
-
-// volatile unsigned int valid : 1;
-// };
-
-// int tsem_init(tsem_t *sem, int pshared, unsigned int value) {
-// // fprintf(stderr, "==%s[%d]%s():[%p]==creating\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// if (*sem) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==already initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// struct tsem_s *p = (struct tsem_s *)taosMemoryCalloc(1, sizeof(*p));
-// if (!p) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==out of memory\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-
-// #ifdef SEM_USE_PTHREAD
-// int r = taosThreadMutexInit(&p->lock, NULL);
-// do {
-// if (r) break;
-// r = taosThreadCondInit(&p->cond, NULL);
-// if (r) {
-// taosThreadMutexDestroy(&p->lock);
-// break;
-// }
-// p->val = value;
-// } while (0);
-// if (r) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-// #elif defined(SEM_USE_POSIX)
-// static size_t tick = 0;
-// do {
-// size_t id = atomic_add_fetch_64(&tick, 1);
-// if (id == SEM_VALUE_MAX) {
-// atomic_store_64(&tick, 0);
-// id = 0;
-// }
-// char name[NAME_MAX - 4];
-// snprintf(name, sizeof(name), "/t" PRId64, id);
-// p->sem = sem_open(name, O_CREAT | O_EXCL, pshared, value);
-// p->id = id;
-// if (p->sem != SEM_FAILED) break;
-// int e = errno;
-// if (e == EEXIST) continue;
-// if (e == EINTR) continue;
-// fprintf(stderr, "==%s[%d]%s():[%p]==not created[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem,
-// e, strerror(e));
-// abort();
-// } while (p->sem == SEM_FAILED);
-// #elif defined(SEM_USE_SEM)
-// taosThreadOnce(&sem_once, once_init);
-// if (sem_inited != 1) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal resource init failed\n", taosDirEntryBaseName(__FILE__), __LINE__,
-// __func__, sem);
-// errno = ENOMEM;
-// return -1;
-// }
-// kern_return_t ret = semaphore_create(sem_port, &p->sem, SYNC_POLICY_FIFO, value);
-// if (ret != KERN_SUCCESS) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==semophore_create failed\n", taosDirEntryBaseName(__FILE__), __LINE__,
-// __func__,
-// sem);
-// // we fail-fast here, because we have less-doc about semaphore_create for the moment
-// abort();
-// }
-// #else // SEM_USE_PTHREAD
-// p->sem = dispatch_semaphore_create(value);
-// if (p->sem == NULL) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==not created\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-// #endif // SEM_USE_PTHREAD
-
-// p->valid = 1;
-
-// *sem = p;
-
-// return 0;
-// }
-
-// int tsem_wait(tsem_t *sem) {
-// if (!*sem) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-// struct tsem_s *p = *sem;
-// if (!p->valid) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem); abort();
-// }
-// #ifdef SEM_USE_PTHREAD
-// if (taosThreadMutexLock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// p->val -= 1;
-// if (p->val < 0) {
-// if (taosThreadCondWait(&p->cond, &p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__,
-// __func__,
-// sem);
-// abort();
-// }
-// }
-// if (taosThreadMutexUnlock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// return 0;
-// #elif defined(SEM_USE_POSIX)
-// return sem_wait(p->sem);
-// #elif defined(SEM_USE_SEM)
-// return semaphore_wait(p->sem);
-// #else // SEM_USE_PTHREAD
-// return dispatch_semaphore_wait(p->sem, DISPATCH_TIME_FOREVER);
-// #endif // SEM_USE_PTHREAD
-// }
-
-// int tsem_post(tsem_t *sem) {
-// if (!*sem) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// abort();
-// }
-// struct tsem_s *p = *sem;
-// if (!p->valid) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem); abort();
-// }
-// #ifdef SEM_USE_PTHREAD
-// if (taosThreadMutexLock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// p->val += 1;
-// if (p->val <= 0) {
-// if (taosThreadCondSignal(&p->cond)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__,
-// __func__,
-// sem);
-// abort();
-// }
-// }
-// if (taosThreadMutexUnlock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// return 0;
-// #elif defined(SEM_USE_POSIX)
-// return sem_post(p->sem);
-// #elif defined(SEM_USE_SEM)
-// return semaphore_signal(p->sem);
-// #else // SEM_USE_PTHREAD
-// return dispatch_semaphore_signal(p->sem);
-// #endif // SEM_USE_PTHREAD
-// }
-
-// int tsem_destroy(tsem_t *sem) {
-// // fprintf(stderr, "==%s[%d]%s():[%p]==destroying\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__, sem);
-// if (!*sem) {
-// // fprintf(stderr, "==%s[%d]%s():[%p]==not initialized\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// // abort();
-// return 0;
-// }
-// struct tsem_s *p = *sem;
-// if (!p->valid) {
-// // fprintf(stderr, "==%s[%d]%s():[%p]==already destroyed\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// // sem); abort();
-// return 0;
-// }
-// #ifdef SEM_USE_PTHREAD
-// if (taosThreadMutexLock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// p->valid = 0;
-// if (taosThreadCondDestroy(&p->cond)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// if (taosThreadMutexUnlock(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// if (taosThreadMutexDestroy(&p->lock)) {
-// fprintf(stderr, "==%s[%d]%s():[%p]==internal logic error\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem);
-// abort();
-// }
-// #elif defined(SEM_USE_POSIX)
-// char name[NAME_MAX - 4];
-// snprintf(name, sizeof(name), "/t" PRId64, p->id);
-// int r = sem_unlink(name);
-// if (r) {
-// int e = errno;
-// fprintf(stderr, "==%s[%d]%s():[%p]==unlink failed[%d]%s\n", taosDirEntryBaseName(__FILE__), __LINE__, __func__,
-// sem,
-// e, strerror(e));
-// abort();
-// }
-// #elif defined(SEM_USE_SEM)
-// semaphore_destroy(sem_port, p->sem);
-// #else // SEM_USE_PTHREAD
-// #endif // SEM_USE_PTHREAD
-
-// p->valid = 0;
-// taosMemoryFree(p);
-
-// *sem = NULL;
-// return 0;
-// }
-typedef struct {
- pthread_mutex_t count_lock;
- pthread_cond_t count_bump;
- unsigned int count;
-} bosal_sem_t;
-
int tsem_init(tsem_t *psem, int flags, unsigned int count) {
- bosal_sem_t *pnewsem;
- int result;
-
- pnewsem = (bosal_sem_t *)malloc(sizeof(bosal_sem_t));
- if (!pnewsem) {
- return -1;
- }
- result = pthread_mutex_init(&pnewsem->count_lock, NULL);
- if (result) {
- free(pnewsem);
- return result;
- }
- result = pthread_cond_init(&pnewsem->count_bump, NULL);
- if (result) {
- pthread_mutex_destroy(&pnewsem->count_lock);
- free(pnewsem);
- return result;
- }
- pnewsem->count = count;
- *psem = (tsem_t)pnewsem;
+ *psem = dispatch_semaphore_create(count);
+ if (*psem == NULL) return -1;
return 0;
}
int tsem_destroy(tsem_t *psem) {
- bosal_sem_t *poldsem;
-
- if (!psem) {
- return EINVAL;
- }
- poldsem = (bosal_sem_t *)*psem;
-
- pthread_mutex_destroy(&poldsem->count_lock);
- pthread_cond_destroy(&poldsem->count_bump);
- free(poldsem);
+ if (psem == NULL || *psem == NULL) return -1;
+ // dispatch_release(*psem);
+ // *psem = NULL;
return 0;
}
int tsem_post(tsem_t *psem) {
- bosal_sem_t *pxsem;
- int result, xresult;
-
- if (!psem) {
- return EINVAL;
- }
- pxsem = (bosal_sem_t *)*psem;
-
- result = pthread_mutex_lock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- pxsem->count = pxsem->count + 1;
-
- xresult = pthread_cond_signal(&pxsem->count_bump);
-
- result = pthread_mutex_unlock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- if (xresult) {
- errno = xresult;
- return -1;
- }
- return 0;
-}
-
-int tsem_trywait(tsem_t *psem) {
- bosal_sem_t *pxsem;
- int result, xresult;
-
- if (!psem) {
- return EINVAL;
- }
- pxsem = (bosal_sem_t *)*psem;
-
- result = pthread_mutex_lock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- xresult = 0;
-
- if (pxsem->count > 0) {
- pxsem->count--;
- } else {
- xresult = EAGAIN;
- }
- result = pthread_mutex_unlock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- if (xresult) {
- errno = xresult;
- return -1;
- }
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_signal(*psem);
return 0;
}
int tsem_wait(tsem_t *psem) {
- bosal_sem_t *pxsem;
- int result, xresult;
-
- if (!psem) {
- return EINVAL;
- }
- pxsem = (bosal_sem_t *)*psem;
-
- result = pthread_mutex_lock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- xresult = 0;
-
- if (pxsem->count == 0) {
- xresult = pthread_cond_wait(&pxsem->count_bump, &pxsem->count_lock);
- }
- if (!xresult) {
- if (pxsem->count > 0) {
- pxsem->count--;
- }
- }
- result = pthread_mutex_unlock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- if (xresult) {
- errno = xresult;
- return -1;
- }
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_wait(*psem, DISPATCH_TIME_FOREVER);
return 0;
}
int tsem_timewait(tsem_t *psem, int64_t nanosecs) {
- struct timespec abstim = {
- .tv_sec = 0,
- .tv_nsec = nanosecs,
- };
-
- bosal_sem_t *pxsem;
- int result, xresult;
-
- if (!psem) {
- return EINVAL;
- }
- pxsem = (bosal_sem_t *)*psem;
-
- result = pthread_mutex_lock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- xresult = 0;
-
- if (pxsem->count == 0) {
- xresult = pthread_cond_timedwait(&pxsem->count_bump, &pxsem->count_lock, &abstim);
- }
- if (!xresult) {
- if (pxsem->count > 0) {
- pxsem->count--;
- }
- }
- result = pthread_mutex_unlock(&pxsem->count_lock);
- if (result) {
- return result;
- }
- if (xresult) {
- errno = xresult;
- return -1;
- }
+ if (psem == NULL || *psem == NULL) return -1;
+ dispatch_semaphore_wait(*psem, nanosecs);
return 0;
}
-bool taosCheckPthreadValid(TdThread thread) {
- int32_t ret = taosThreadKill(thread, 0);
- if (ret == ESRCH) return false;
- if (ret == EINVAL) return false;
- // alive
- return true;
-}
+bool taosCheckPthreadValid(TdThread thread) { return thread != 0; }
int64_t taosGetSelfPthreadId() {
TdThread thread = taosThreadSelf();
diff --git a/source/os/src/osSocket.c b/source/os/src/osSocket.c
index f34032056cba6a2e2160e7b7a54abe79a959d301..b34d071773591600e06adc9b08873a89b7e2e1b4 100644
--- a/source/os/src/osSocket.c
+++ b/source/os/src/osSocket.c
@@ -312,14 +312,8 @@ uint32_t taosInetAddr(const char *ipAddr) {
return inet_addr(ipAddr);
#endif
}
-const char *taosInetNtoa(struct in_addr ipInt) {
-#ifdef WINDOWS
- // not thread safe, only for debug usage while print log
- static char tmpDstStr[16];
- return inet_ntop(AF_INET, &ipInt, tmpDstStr, INET6_ADDRSTRLEN);
-#else
- return inet_ntoa(ipInt);
-#endif
+const char *taosInetNtoa(struct in_addr ipInt, char *dstStr, int32_t len) {
+ return inet_ntop(AF_INET, &ipInt, dstStr, len);
}
#ifndef SIGPIPE
@@ -670,7 +664,7 @@ int taosGetLocalIp(const char *eth, char *ip) {
return -1;
}
memcpy(&sin, &ifr.ifr_addr, sizeof(sin));
- snprintf(ip, 64, "%s", inet_ntoa(sin.sin_addr));
+ taosInetNtoa(sin.sin_addr, ip, 64);
taosCloseSocketNoCheck1(fd);
#endif
return 0;
diff --git a/source/os/src/osString.c b/source/os/src/osString.c
index db3aaa49a6351e4b6a26a4a7b601f4e267c4f4c7..3619e9584791900321647399b01e76be3af928a3 100644
--- a/source/os/src/osString.c
+++ b/source/os/src/osString.c
@@ -306,8 +306,6 @@ int32_t taosMbsToWchars(TdWchar *pWchars, const char *pStrs, int32_t size) { ret
int32_t taosWcharToMb(char *pStr, TdWchar wchar) { return wctomb(pStr, wchar); }
-int32_t taosWcharsToMbs(char *pStrs, TdWchar *pWchars, int32_t size) { return wcstombs(pStrs, pWchars, size); }
-
char *taosStrCaseStr(const char *str, const char *pattern) {
size_t i;
@@ -326,6 +324,9 @@ char *taosStrCaseStr(const char *str, const char *pattern) {
int64_t taosStr2Int64(const char *str, char **pEnd, int32_t radix) {
int64_t tmp = strtoll(str, pEnd, radix);
+#ifdef DARWIN
+ if (errno == EINVAL) errno = 0;
+#endif
#ifdef TD_CHECK_STR_TO_INT_ERROR
assert(errno != ERANGE);
assert(errno != EINVAL);
@@ -335,6 +336,9 @@ int64_t taosStr2Int64(const char *str, char **pEnd, int32_t radix) {
uint64_t taosStr2UInt64(const char *str, char **pEnd, int32_t radix) {
uint64_t tmp = strtoull(str, pEnd, radix);
+#ifdef DARWIN
+ if (errno == EINVAL) errno = 0;
+#endif
#ifdef TD_CHECK_STR_TO_INT_ERROR
assert(errno != ERANGE);
assert(errno != EINVAL);
@@ -344,6 +348,9 @@ uint64_t taosStr2UInt64(const char *str, char **pEnd, int32_t radix) {
int32_t taosStr2Int32(const char *str, char **pEnd, int32_t radix) {
int32_t tmp = strtol(str, pEnd, radix);
+#ifdef DARWIN
+ if (errno == EINVAL) errno = 0;
+#endif
#ifdef TD_CHECK_STR_TO_INT_ERROR
assert(errno != ERANGE);
assert(errno != EINVAL);
@@ -353,6 +360,9 @@ int32_t taosStr2Int32(const char *str, char **pEnd, int32_t radix) {
uint32_t taosStr2UInt32(const char *str, char **pEnd, int32_t radix) {
uint32_t tmp = strtol(str, pEnd, radix);
+#ifdef DARWIN
+ if (errno == EINVAL) errno = 0;
+#endif
#ifdef TD_CHECK_STR_TO_INT_ERROR
assert(errno != ERANGE);
assert(errno != EINVAL);
@@ -362,6 +372,9 @@ uint32_t taosStr2UInt32(const char *str, char **pEnd, int32_t radix) {
int16_t taosStr2Int16(const char *str, char **pEnd, int32_t radix) {
int32_t tmp = strtol(str, pEnd, radix);
+#ifdef DARWIN
+ if (errno == EINVAL) errno = 0;
+#endif
#ifdef TD_CHECK_STR_TO_INT_ERROR
assert(errno != ERANGE);
assert(errno != EINVAL);
@@ -373,6 +386,9 @@ int16_t taosStr2Int16(const char *str, char **pEnd, int32_t radix) {
uint16_t taosStr2UInt16(const char *str, char **pEnd, int32_t radix) {
uint32_t tmp = strtoul(str, pEnd, radix);
+#ifdef DARWIN
+ if (errno == EINVAL) errno = 0;
+#endif
#ifdef TD_CHECK_STR_TO_INT_ERROR
assert(errno != ERANGE);
assert(errno != EINVAL);
@@ -394,6 +410,9 @@ int8_t taosStr2Int8(const char *str, char **pEnd, int32_t radix) {
uint8_t taosStr2UInt8(const char *str, char **pEnd, int32_t radix) {
uint32_t tmp = strtoul(str, pEnd, radix);
+#ifdef DARWIN
+ if (errno == EINVAL) errno = 0;
+#endif
#ifdef TD_CHECK_STR_TO_INT_ERROR
assert(errno != ERANGE);
assert(errno != EINVAL);
diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c
index 3aa3f4f29ea402b08fb3733451f3c9a1b8df153a..a57bd4ee632a7abf829d95f8f47cbbfba94684bd 100644
--- a/source/os/src/osSysinfo.c
+++ b/source/os/src/osSysinfo.c
@@ -143,8 +143,11 @@ static int32_t taosGetSysCpuInfo(SysCpuInfo *cpuInfo) {
cpuInfo->user = CompareFileTime(&pre_userTime, &userTime);
cpuInfo->nice = 0;
}
-#elif defined(_TD_DARWIN_64)
- assert(0);
+#elif defined(DARWIN)
+ cpuInfo->idle = 0;
+ cpuInfo->system = 0;
+ cpuInfo->user = 0;
+ cpuInfo->nice = 0;
#else
TdFilePtr pFile = taosOpenFile(tsSysCpuFile, TD_FILE_READ | TD_FILE_STREAM);
if (pFile == NULL) {
@@ -180,8 +183,11 @@ static int32_t taosGetProcCpuInfo(ProcCpuInfo *cpuInfo) {
cpuInfo->cutime = 0;
cpuInfo->cstime = 0;
}
-#elif defined(_TD_DARWIN_64)
- assert(0);
+#elif defined(DARWIN)
+ cpuInfo->stime = 0;
+ cpuInfo->utime = 0;
+ cpuInfo->cutime = 0;
+ cpuInfo->cstime = 0;
#else
TdFilePtr pFile = taosOpenFile(tsProcCpuFile, TD_FILE_READ | TD_FILE_STREAM);
if (pFile == NULL) {
@@ -344,30 +350,27 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) {
*numOfCores = si.dwNumberOfProcessors;
return 0;
#elif defined(_TD_DARWIN_64)
- char *line = NULL;
- size_t size = 0;
+ char buf[16];
int32_t done = 0;
int32_t code = -1;
- TdFilePtr pFile = taosOpenFile("/proc/cpuinfo", TD_FILE_READ | TD_FILE_STREAM);
- if (pFile == NULL) return false;
-
- while (done != 3 && (size = taosGetLineFile(pFile, &line)) != -1) {
- line[size - 1] = '\0';
- if (((done & 1) == 0) && strncmp(line, "model name", 10) == 0) {
- const char *v = strchr(line, ':') + 2;
- tstrncpy(cpuModel, v, maxLen);
- code = 0;
- done |= 1;
- } else if (((done & 2) == 0) && strncmp(line, "cpu cores", 9) == 0) {
- const char *v = strchr(line, ':') + 2;
- *numOfCores = atof(v);
- done |= 2;
- }
+ TdCmdPtr pCmd = taosOpenCmd("sysctl -n machdep.cpu.brand_string");
+ if (pCmd == NULL) return code;
+ if (taosGetsCmd(pCmd, maxLen, cpuModel) > 0) {
+ code = 0;
+ done |= 1;
}
+ taosCloseCmd(&pCmd);
- if (line != NULL) taosMemoryFree(line);
- taosCloseFile(&pFile);
+ pCmd = taosOpenCmd("sysctl -n machdep.cpu.core_count");
+ if (pCmd == NULL) return code;
+ memset(buf, 0, sizeof(buf));
+ if (taosGetsCmd(pCmd, sizeof(buf) - 1, buf) > 0) {
+ code = 0;
+ done |= 2;
+ *numOfCores = atof(buf);
+ }
+ taosCloseCmd(&pCmd);
return code;
#else
@@ -595,6 +598,7 @@ int32_t taosGetDiskSize(char *dataDir, SDiskSize *diskSize) {
#else
struct statvfs info;
if (statvfs(dataDir, &info)) {
+ terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
} else {
diskSize->total = info.f_blocks * info.f_frsize;
diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c
index 2ce2033a00ef92523cf724c9a16e4bb7ecbed2ef..34a09c3e6c6610ec1114238867cf9f068ebd5557 100644
--- a/source/os/src/osTimezone.c
+++ b/source/os/src/osTimezone.c
@@ -891,6 +891,7 @@ void taosGetSystemTimezone(char *outTimezoneStr, enum TdTimezone *tsTimezone) {
time_t tx1 = taosGetTimestampSec();
struct tm tm1;
taosLocalTime(&tx1, &tm1);
+ daylight = tm1.tm_isdst;
/*
* format example:
diff --git a/source/util/src/talgo.c b/source/util/src/talgo.c
index 5353cd9bfec94b460fc1f5c3d9ad657ead6ad76b..699f0db7a193b1e0390efd12de6f639de5b69f86 100644
--- a/source/util/src/talgo.c
+++ b/source/util/src/talgo.c
@@ -201,6 +201,7 @@ void *taosbsearch(const void *key, const void *base, int32_t nmemb, int32_t size
return (c > 0) ? p : (midx > 0 ? p - size : NULL);
} else {
ASSERT(0);
+ return NULL;
}
}
diff --git a/source/util/src/tarray.c b/source/util/src/tarray.c
index 454739348eec9cd4fa59f5ec359890be2c09c816..4f170c203c1f25af55db51c86d1fadacb41deb35 100644
--- a/source/util/src/tarray.c
+++ b/source/util/src/tarray.c
@@ -386,11 +386,12 @@ void* taosArrayDestroy(SArray* pArray) {
}
void taosArrayDestroyP(SArray* pArray, FDelete fp) {
- if(!pArray) return;
- for (int32_t i = 0; i < pArray->size; i++) {
- fp(*(void**)TARRAY_GET_ELEM(pArray, i));
+ if (pArray) {
+ for (int32_t i = 0; i < pArray->size; i++) {
+ fp(*(void**)TARRAY_GET_ELEM(pArray, i));
+ }
+ taosArrayDestroy(pArray);
}
- taosArrayDestroy(pArray);
}
void taosArrayDestroyEx(SArray* pArray, FDelete fp) {
diff --git a/source/util/src/tcompare.c b/source/util/src/tcompare.c
index fe3065b2b78a46a85d6dc04b90fcff4e0fe80f03..cbda4e46557e7931d1ce5dea31c2baa4f2d6ddef 100644
--- a/source/util/src/tcompare.c
+++ b/source/util/src/tcompare.c
@@ -186,15 +186,16 @@ int32_t compareLenPrefixedStr(const void *pLeft, const void *pRight) {
int32_t len1 = varDataLen(pLeft);
int32_t len2 = varDataLen(pRight);
- if (len1 != len2) {
- return len1 > len2 ? 1 : -1;
- } else {
- int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), len1);
- if (ret == 0) {
+ int32_t minLen = TMIN(len1, len2);
+ int32_t ret = strncmp(varDataVal(pLeft), varDataVal(pRight), minLen);
+ if (ret == 0) {
+ if (len1 == len2) {
return 0;
} else {
- return ret > 0 ? 1 : -1;
+ return len1 > len2 ? 1 : -1;
}
+ } else {
+ return ret > 0 ? 1 : -1;
}
}
@@ -243,9 +244,760 @@ int32_t compareJsonVal(const void *pLeft, const void *pRight) {
return 0;
}else{
assert(0);
+ return 0;
}
}
+int32_t compareInt8Int16(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Int32(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Int64(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Float(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Double(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint8(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint16(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint32(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt8Uint64(const void *pLeft, const void *pRight) {
+ int8_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int8(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int32(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Int64(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Float(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Double(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint8(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint16(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint32(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt16Uint64(const void *pLeft, const void *pRight) {
+ int16_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+
+int32_t compareInt32Int8(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Int16(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Int64(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Float(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Double(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint8(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint16(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint32(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt32Uint64(const void *pLeft, const void *pRight) {
+ int32_t left = GET_INT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int8(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int16(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Int32(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Float(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Double(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint8(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint16(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint32(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareInt64Uint64(const void *pLeft, const void *pRight) {
+ int64_t left = GET_INT64_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt8(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt16(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt32(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatInt64(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatDouble(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+
+ if (isnan(left) && isnan(right)) {
+ return 0;
+ }
+
+ if (isnan(left)) {
+ return -1;
+ }
+
+ if (isnan(right)) {
+ return 1;
+ }
+
+ if (FLT_EQUAL(left, right)) {
+ return 0;
+ }
+ return FLT_GREATER(left, right) ? 1 : -1;
+}
+
+int32_t compareFloatUint8(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint16(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint32(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareFloatUint64(const void *pLeft, const void *pRight) {
+ float left = GET_FLOAT_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt8(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt16(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt32(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleInt64(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleFloat(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+
+ if (isnan(left) && isnan(right)) {
+ return 0;
+ }
+
+ if (isnan(left)) {
+ return -1;
+ }
+
+ if (isnan(right)) {
+ return 1;
+ }
+
+ if (FLT_EQUAL(left, right)) {
+ return 0;
+ }
+ return FLT_GREATER(left, right) ? 1 : -1;
+}
+
+int32_t compareDoubleUint8(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint16(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint32(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareDoubleUint64(const void *pLeft, const void *pRight) {
+ double left = GET_DOUBLE_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int8(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int16(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int32(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Int64(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Float(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Double(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint16(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint32(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint8Uint64(const void *pLeft, const void *pRight) {
+ uint8_t left = GET_UINT8_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int8(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int16(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int32(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Int64(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Float(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Double(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint8(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint32(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint16Uint64(const void *pLeft, const void *pRight) {
+ uint16_t left = GET_UINT16_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int8(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int16(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int32(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Int64(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Float(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Double(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint8(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint16(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint32Uint64(const void *pLeft, const void *pRight) {
+ uint32_t left = GET_UINT32_VAL(pLeft);
+ uint64_t right = GET_UINT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int8(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int8_t right = GET_INT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int16(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int16_t right = GET_INT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int32(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int32_t right = GET_INT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Int64(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ int64_t right = GET_INT64_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Float(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ float right = GET_FLOAT_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Double(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ double right = GET_DOUBLE_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint8(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint8_t right = GET_UINT8_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint16(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint16_t right = GET_UINT16_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+int32_t compareUint64Uint32(const void *pLeft, const void *pRight) {
+ uint64_t left = GET_UINT64_VAL(pLeft);
+ uint32_t right = GET_UINT32_VAL(pRight);
+ if (left > right) return 1;
+ if (left < right) return -1;
+ return 0;
+}
+
+
int32_t compareJsonValDesc(const void *pLeft, const void *pRight) {
return compareJsonVal(pRight, pLeft);
}
diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c
index ba877915b13b6e522367637bd7713edc8feee0f3..62b9d876281bcdf53247f1f37330df9026d28718 100644
--- a/source/util/src/tcompression.c
+++ b/source/util/src/tcompression.c
@@ -50,6 +50,7 @@
#define _DEFAULT_SOURCE
#include "tcompression.h"
#include "lz4.h"
+#include "tRealloc.h"
#include "tlog.h"
#ifdef TD_TSZ
@@ -814,24 +815,24 @@ int32_t tsCompressFloatImp(const char *const input, const int32_t nelements, cha
uint32_t predicted = prev_value;
uint32_t diff = curr.bits ^ predicted;
- int32_t leading_zeros = FLOAT_BYTES * BITS_PER_BYTE;
- int32_t trailing_zeros = leading_zeros;
+ int32_t clz = FLOAT_BYTES * BITS_PER_BYTE;
+ int32_t ctz = clz;
if (diff) {
- trailing_zeros = BUILDIN_CTZ(diff);
- leading_zeros = BUILDIN_CLZ(diff);
+ ctz = BUILDIN_CTZ(diff);
+ clz = BUILDIN_CLZ(diff);
}
uint8_t nbytes = 0;
uint8_t flag;
- if (trailing_zeros > leading_zeros) {
- nbytes = (uint8_t)(FLOAT_BYTES - trailing_zeros / BITS_PER_BYTE);
+ if (ctz > clz) {
+ nbytes = (uint8_t)(FLOAT_BYTES - ctz / BITS_PER_BYTE);
if (nbytes > 0) nbytes--;
flag = ((uint8_t)1 << 3) | nbytes;
} else {
- nbytes = (uint8_t)(FLOAT_BYTES - leading_zeros / BITS_PER_BYTE);
+ nbytes = (uint8_t)(FLOAT_BYTES - clz / BITS_PER_BYTE);
if (nbytes > 0) nbytes--;
flag = nbytes;
}
@@ -994,3 +995,621 @@ int32_t tsDecompressDoubleLossyImp(const char *input, int32_t compressedSize, co
return tdszDecompress(SZ_DOUBLE, input + 1, compressedSize - 1, nelements, output);
}
#endif
+
+/*************************************************************************
+ * STREAM COMPRESSION
+ *************************************************************************/
+#define I64_SAFE_ADD(a, b) (((a) >= 0 && (b) <= INT64_MAX - (b)) || ((a) < 0 && (b) >= INT64_MIN - (a)))
+typedef struct SCompressor SCompressor;
+
+static int32_t tCompBool(SCompressor *pCmprsor, const void *pData, int32_t nData);
+static int32_t tCompInt(SCompressor *pCmprsor, const void *pData, int32_t nData);
+static int32_t tCompFloat(SCompressor *pCmprsor, const void *pData, int32_t nData);
+static int32_t tCompDouble(SCompressor *pCmprsor, const void *pData, int32_t nData);
+static int32_t tCompTimestamp(SCompressor *pCmprsor, const void *pData, int32_t nData);
+static int32_t tCompBinary(SCompressor *pCmprsor, const void *pData, int32_t nData);
+static struct {
+ int8_t type;
+ int32_t bytes;
+ int8_t isVarLen;
+ int32_t (*cmprFn)(SCompressor *, const void *, int32_t nData);
+} DATA_TYPE_INFO[] = {
+ {TSDB_DATA_TYPE_NULL, 0, 0, NULL}, // TSDB_DATA_TYPE_NULL
+ {TSDB_DATA_TYPE_BOOL, 1, 0, tCompBool}, // TSDB_DATA_TYPE_BOOL
+ {TSDB_DATA_TYPE_TINYINT, 1, 0, tCompInt}, // TSDB_DATA_TYPE_TINYINT
+ {TSDB_DATA_TYPE_SMALLINT, 2, 0, tCompInt}, // TSDB_DATA_TYPE_SMALLINT
+ {TSDB_DATA_TYPE_INT, 4, 0, tCompInt}, // TSDB_DATA_TYPE_INT
+ {TSDB_DATA_TYPE_BIGINT, 8, 0, tCompInt}, // TSDB_DATA_TYPE_BIGINT
+ {TSDB_DATA_TYPE_FLOAT, 4, 0, tCompFloat}, // TSDB_DATA_TYPE_FLOAT
+ {TSDB_DATA_TYPE_DOUBLE, 8, 0, tCompDouble}, // TSDB_DATA_TYPE_DOUBLE
+ {TSDB_DATA_TYPE_VARCHAR, 1, 1, tCompBinary}, // TSDB_DATA_TYPE_VARCHAR
+ {TSDB_DATA_TYPE_TIMESTAMP, 8, 0, tCompTimestamp}, // pTSDB_DATA_TYPE_TIMESTAMP
+ {TSDB_DATA_TYPE_NCHAR, 1, 1, tCompBinary}, // TSDB_DATA_TYPE_NCHAR
+ {TSDB_DATA_TYPE_UTINYINT, 1, 0, tCompInt}, // TSDB_DATA_TYPE_UTINYINT
+ {TSDB_DATA_TYPE_USMALLINT, 2, 0, tCompInt}, // TSDB_DATA_TYPE_USMALLINT
+ {TSDB_DATA_TYPE_UINT, 4, 0, tCompInt}, // TSDB_DATA_TYPE_UINT
+ {TSDB_DATA_TYPE_UBIGINT, 8, 0, tCompInt}, // TSDB_DATA_TYPE_UBIGINT
+ {TSDB_DATA_TYPE_JSON, 1, 1, tCompBinary}, // TSDB_DATA_TYPE_JSON
+ {TSDB_DATA_TYPE_VARBINARY, 1, 1, tCompBinary}, // TSDB_DATA_TYPE_VARBINARY
+ {TSDB_DATA_TYPE_DECIMAL, 1, 1, tCompBinary}, // TSDB_DATA_TYPE_DECIMAL
+ {TSDB_DATA_TYPE_BLOB, 1, 1, tCompBinary}, // TSDB_DATA_TYPE_BLOB
+ {TSDB_DATA_TYPE_MEDIUMBLOB, 1, 1, tCompBinary}, // TSDB_DATA_TYPE_MEDIUMBLOB
+};
+
+struct SCompressor {
+ int8_t type;
+ int8_t cmprAlg;
+ int8_t autoAlloc;
+ int32_t nVal;
+ uint8_t *aBuf[2];
+ int64_t nBuf[2];
+ union {
+ // Timestamp ----
+ struct {
+ int64_t ts_prev_val;
+ int64_t ts_prev_delta;
+ uint8_t *ts_flag_p;
+ };
+ // Integer ----
+ struct {
+ int64_t i_prev;
+ int32_t i_selector;
+ int32_t i_start;
+ int32_t i_end;
+ uint64_t i_aZigzag[241];
+ int8_t i_aBitN[241];
+ };
+ // Float ----
+ struct {
+ uint32_t f_prev;
+ uint8_t *f_flag_p;
+ };
+ // Double ----
+ struct {
+ uint64_t d_prev;
+ uint8_t *d_flag_p;
+ };
+ };
+};
+
+// Timestamp =====================================================
+static int32_t tCompSetCopyMode(SCompressor *pCmprsor) {
+ int32_t code = 0;
+
+ if (pCmprsor->nVal) {
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[1], sizeof(int64_t) * pCmprsor->nVal);
+ if (code) return code;
+ }
+ pCmprsor->nBuf[1] = 0;
+
+ int64_t n = 1;
+ int64_t value;
+ int64_t delta;
+ uint64_t vZigzag;
+ while (n < pCmprsor->nBuf[0]) {
+ uint8_t aN[2];
+ aN[0] = pCmprsor->aBuf[0][n] & 0xf;
+ aN[1] = pCmprsor->aBuf[0][n] >> 4;
+
+ n++;
+
+ for (int32_t i = 0; i < 2; i++) {
+ vZigzag = 0;
+ for (uint8_t j = 0; j < aN[i]; j++) {
+ vZigzag |= (((uint64_t)pCmprsor->aBuf[0][n]) << (8 * j));
+ n++;
+ }
+
+ int64_t delta_of_delta = ZIGZAG_DECODE(int64_t, vZigzag);
+ if (pCmprsor->nBuf[1] == 0) {
+ delta = 0;
+ value = delta_of_delta;
+ } else {
+ delta = delta_of_delta + delta;
+ value = delta + value;
+ }
+
+ memcpy(pCmprsor->aBuf[1] + pCmprsor->nBuf[1], &value, sizeof(int64_t));
+ pCmprsor->nBuf[1] += sizeof(int64_t);
+
+ if (n >= pCmprsor->nBuf[0]) break;
+ }
+ }
+
+ ASSERT(n == pCmprsor->nBuf[0]);
+
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[1] + 1);
+ if (code) return code;
+ }
+ memcpy(pCmprsor->aBuf[0] + 1, pCmprsor->aBuf[1], pCmprsor->nBuf[1]);
+ pCmprsor->nBuf[0] = 1 + pCmprsor->nBuf[1];
+ }
+ pCmprsor->aBuf[0][0] = 0;
+
+ return code;
+}
+static int32_t tCompTimestamp(SCompressor *pCmprsor, const void *pData, int32_t nData) {
+ int32_t code = 0;
+
+ int64_t ts = *(int64_t *)pData;
+ ASSERT(pCmprsor->type == TSDB_DATA_TYPE_TIMESTAMP);
+ ASSERT(nData == 8);
+
+ if (pCmprsor->aBuf[0][0] == 1) {
+ if (pCmprsor->nVal == 0) {
+ pCmprsor->ts_prev_val = ts;
+ pCmprsor->ts_prev_delta = -ts;
+ }
+
+ if (!I64_SAFE_ADD(ts, -pCmprsor->ts_prev_val)) {
+ code = tCompSetCopyMode(pCmprsor);
+ if (code) return code;
+ goto _copy_cmpr;
+ }
+ int64_t delta = ts - pCmprsor->ts_prev_val;
+
+ if (!I64_SAFE_ADD(delta, -pCmprsor->ts_prev_delta)) {
+ code = tCompSetCopyMode(pCmprsor);
+ if (code) return code;
+ goto _copy_cmpr;
+ }
+ int64_t delta_of_delta = delta - pCmprsor->ts_prev_delta;
+ uint64_t vZigzag = ZIGZAG_ENCODE(int64_t, delta_of_delta);
+
+ pCmprsor->ts_prev_val = ts;
+ pCmprsor->ts_prev_delta = delta;
+
+ if ((pCmprsor->nVal & 0x1) == 0) {
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[0] + 17);
+ if (code) return code;
+ }
+
+ pCmprsor->ts_flag_p = pCmprsor->aBuf[0] + pCmprsor->nBuf[0];
+ pCmprsor->nBuf[0]++;
+ pCmprsor->ts_flag_p[0] = 0;
+ while (vZigzag) {
+ pCmprsor->aBuf[0][pCmprsor->nBuf[0]] = (vZigzag & 0xff);
+ pCmprsor->nBuf[0]++;
+ pCmprsor->ts_flag_p[0]++;
+ vZigzag >>= 8;
+ }
+ } else {
+ while (vZigzag) {
+ pCmprsor->aBuf[0][pCmprsor->nBuf[0]] = (vZigzag & 0xff);
+ pCmprsor->nBuf[0]++;
+ pCmprsor->ts_flag_p[0] += 0x10;
+ vZigzag >>= 8;
+ }
+ }
+ } else {
+ _copy_cmpr:
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[0] + sizeof(ts));
+ if (code) return code;
+ }
+
+ memcpy(pCmprsor->aBuf[0] + pCmprsor->nBuf[0], &ts, sizeof(ts));
+ pCmprsor->nBuf[0] += sizeof(ts);
+ }
+ pCmprsor->nVal++;
+
+ return code;
+}
+
+// Integer =====================================================
+#define SIMPLE8B_MAX ((uint64_t)1152921504606846974LL)
+static const uint8_t BIT_PER_INTEGER[] = {0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60};
+static const int32_t SELECTOR_TO_ELEMS[] = {240, 120, 60, 30, 20, 15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1};
+static const uint8_t BIT_TO_SELECTOR[] = {0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 12,
+ 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15};
+
+static int32_t tCompInt(SCompressor *pCmprsor, const void *pData, int32_t nData) {
+ int32_t code = 0;
+
+ ASSERT(nData == DATA_TYPE_INFO[pCmprsor->type].bytes);
+
+ if (pCmprsor->aBuf[0][0] == 0) {
+ int64_t val;
+
+ switch (pCmprsor->type) {
+ case TSDB_DATA_TYPE_TINYINT:
+ val = *(int8_t *)pData;
+ break;
+ case TSDB_DATA_TYPE_SMALLINT:
+ val = *(int16_t *)pData;
+ break;
+ case TSDB_DATA_TYPE_INT:
+ val = *(int32_t *)pData;
+ break;
+ case TSDB_DATA_TYPE_BIGINT:
+ val = *(int64_t *)pData;
+ break;
+ case TSDB_DATA_TYPE_UTINYINT:
+ val = *(uint8_t *)pData;
+ break;
+ case TSDB_DATA_TYPE_USMALLINT:
+ val = *(uint16_t *)pData;
+ break;
+ case TSDB_DATA_TYPE_UINT:
+ val = *(uint32_t *)pData;
+ break;
+ case TSDB_DATA_TYPE_UBIGINT:
+ val = *(int64_t *)pData;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ if (!I64_SAFE_ADD(val, -pCmprsor->i_prev)) {
+ // TODO
+ goto _copy_cmpr;
+ }
+
+ int64_t diff = val - pCmprsor->i_prev;
+ uint64_t vZigzag = ZIGZAG_ENCODE(int64_t, diff);
+ if (vZigzag >= SIMPLE8B_MAX) {
+ // TODO
+ goto _copy_cmpr;
+ }
+
+ int8_t nBit = (vZigzag) ? (64 - BUILDIN_CLZL(vZigzag)) : 0;
+ pCmprsor->i_prev = val;
+
+ while (1) {
+ int32_t nEle = (pCmprsor->i_end + 241 - pCmprsor->i_start) % 241;
+
+ if (nEle + 1 <= SELECTOR_TO_ELEMS[pCmprsor->i_selector] && nEle + 1 <= SELECTOR_TO_ELEMS[BIT_TO_SELECTOR[nBit]]) {
+ if (pCmprsor->i_selector < BIT_TO_SELECTOR[nBit]) {
+ pCmprsor->i_selector = BIT_TO_SELECTOR[nBit];
+ }
+ pCmprsor->i_end = (pCmprsor->i_end + 1) % 241;
+ pCmprsor->i_aZigzag[pCmprsor->i_end] = vZigzag;
+ pCmprsor->i_aBitN[pCmprsor->i_end] = nBit;
+ break;
+ } else {
+ while (nEle < SELECTOR_TO_ELEMS[pCmprsor->i_selector]) {
+ pCmprsor->i_selector++;
+ }
+ nEle = SELECTOR_TO_ELEMS[pCmprsor->i_selector];
+
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[0] + sizeof(uint64_t));
+ if (code) return code;
+ }
+
+ uint64_t *bp = (uint64_t *)(pCmprsor->aBuf[0] + pCmprsor->nBuf[0]);
+ pCmprsor->nBuf[0] += sizeof(uint64_t);
+ bp[0] = pCmprsor->i_selector;
+ uint8_t bits = BIT_PER_INTEGER[pCmprsor->i_selector];
+ for (int32_t iVal = 0; iVal < nEle; iVal++) {
+ bp[0] |= ((pCmprsor->i_aZigzag[pCmprsor->i_start] & ((((uint64_t)1) << bits) - 1)) << (bits * iVal + 4));
+ pCmprsor->i_start = (pCmprsor->i_start + 1) % 241;
+ }
+
+ // reset and continue
+ pCmprsor->i_selector = 0;
+ for (int32_t iVal = pCmprsor->i_start; iVal < pCmprsor->i_end; iVal = (iVal + 1) % 241) {
+ if (pCmprsor->i_selector < BIT_TO_SELECTOR[pCmprsor->i_aBitN[iVal]]) {
+ pCmprsor->i_selector = BIT_TO_SELECTOR[pCmprsor->i_aBitN[iVal]];
+ }
+ }
+ }
+ }
+ } else {
+ _copy_cmpr:
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[0] + nData);
+ if (code) return code;
+
+ memcpy(pCmprsor->aBuf[0] + pCmprsor->nBuf[0], pData, nData);
+ pCmprsor->nBuf[0] += nData;
+ }
+ pCmprsor->nVal++;
+
+ return code;
+}
+
+// Float =====================================================
+static int32_t tCompFloat(SCompressor *pCmprsor, const void *pData, int32_t nData) {
+ int32_t code = 0;
+
+ ASSERT(nData == sizeof(float));
+
+ union {
+ float f;
+ uint32_t u;
+ } val = {.f = *(float *)pData};
+
+ uint32_t diff = val.u ^ pCmprsor->f_prev;
+ pCmprsor->f_prev = val.u;
+
+ int32_t clz, ctz;
+ if (diff) {
+ clz = BUILDIN_CLZ(diff);
+ ctz = BUILDIN_CTZ(diff);
+ } else {
+ clz = 32;
+ ctz = 32;
+ }
+
+ uint8_t nBytes;
+ if (clz < ctz) {
+ nBytes = sizeof(uint32_t) - ctz / BITS_PER_BYTE;
+ if (nBytes) diff >>= (32 - nBytes * BITS_PER_BYTE);
+ } else {
+ nBytes = sizeof(uint32_t) - clz / BITS_PER_BYTE;
+ }
+ if (nBytes == 0) nBytes++;
+
+ if ((pCmprsor->nVal & 0x1) == 0) {
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[0] + 9);
+ if (code) return code;
+ }
+
+ pCmprsor->f_flag_p = &pCmprsor->aBuf[0][pCmprsor->nBuf[0]];
+ pCmprsor->nBuf[0]++;
+
+ if (clz < ctz) {
+ pCmprsor->f_flag_p[0] = (0x08 | (nBytes - 1));
+ } else {
+ pCmprsor->f_flag_p[0] = nBytes - 1;
+ }
+ } else {
+ if (clz < ctz) {
+ pCmprsor->f_flag_p[0] |= ((0x08 | (nBytes - 1)) << 4);
+ } else {
+ pCmprsor->f_flag_p[0] |= ((nBytes - 1) << 4);
+ }
+ }
+ for (; nBytes; nBytes--) {
+ pCmprsor->aBuf[0][pCmprsor->nBuf[0]] = (diff & 0xff);
+ pCmprsor->nBuf[0]++;
+ diff >>= BITS_PER_BYTE;
+ }
+ pCmprsor->nVal++;
+
+ return code;
+}
+
+// Double =====================================================
+static int32_t tCompDouble(SCompressor *pCmprsor, const void *pData, int32_t nData) {
+ int32_t code = 0;
+
+ ASSERT(nData == sizeof(double));
+
+ union {
+ double d;
+ uint64_t u;
+ } val = {.d = *(double *)pData};
+
+ uint64_t diff = val.u ^ pCmprsor->d_prev;
+ pCmprsor->d_prev = val.u;
+
+ int32_t clz, ctz;
+ if (diff) {
+ clz = BUILDIN_CLZL(diff);
+ ctz = BUILDIN_CTZL(diff);
+ } else {
+ clz = 64;
+ ctz = 64;
+ }
+
+ uint8_t nBytes;
+ if (clz < ctz) {
+ nBytes = sizeof(uint64_t) - ctz / BITS_PER_BYTE;
+ if (nBytes) diff >>= (64 - nBytes * BITS_PER_BYTE);
+ } else {
+ nBytes = sizeof(uint64_t) - clz / BITS_PER_BYTE;
+ }
+ if (nBytes == 0) nBytes++;
+
+ if ((pCmprsor->nVal & 0x1) == 0) {
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[0] + 17);
+ if (code) return code;
+ }
+
+ pCmprsor->d_flag_p = &pCmprsor->aBuf[0][pCmprsor->nBuf[0]];
+ pCmprsor->nBuf[0]++;
+
+ if (clz < ctz) {
+ pCmprsor->d_flag_p[0] = (0x08 | (nBytes - 1));
+ } else {
+ pCmprsor->d_flag_p[0] = nBytes - 1;
+ }
+ } else {
+ if (clz < ctz) {
+ pCmprsor->d_flag_p[0] |= ((0x08 | (nBytes - 1)) << 4);
+ } else {
+ pCmprsor->d_flag_p[0] |= ((nBytes - 1) << 4);
+ }
+ }
+ for (; nBytes; nBytes--) {
+ pCmprsor->aBuf[0][pCmprsor->nBuf[0]] = (diff & 0xff);
+ pCmprsor->nBuf[0]++;
+ diff >>= BITS_PER_BYTE;
+ }
+ pCmprsor->nVal++;
+
+ return code;
+}
+
+// Binary =====================================================
+static int32_t tCompBinary(SCompressor *pCmprsor, const void *pData, int32_t nData) {
+ int32_t code = 0;
+
+ if (nData) {
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[0] + nData);
+ if (code) return code;
+ }
+
+ memcpy(pCmprsor->aBuf[0] + pCmprsor->nBuf[0], pData, nData);
+ pCmprsor->nBuf[0] += nData;
+ }
+ pCmprsor->nVal++;
+
+ return code;
+}
+
+// Bool =====================================================
+static const uint8_t BOOL_CMPR_TABLE[] = {0b01, 0b0100, 0b010000, 0b01000000};
+
+static int32_t tCompBool(SCompressor *pCmprsor, const void *pData, int32_t nData) {
+ int32_t code = 0;
+
+ bool vBool = *(int8_t *)pData;
+
+ int32_t mod4 = pCmprsor->nVal & 3;
+ if (mod4 == 0) {
+ pCmprsor->nBuf[0]++;
+
+ if (pCmprsor->autoAlloc) {
+ code = tRealloc(&pCmprsor->aBuf[0], pCmprsor->nBuf[0]);
+ if (code) return code;
+ }
+
+ pCmprsor->aBuf[0][pCmprsor->nBuf[0] - 1] = 0;
+ }
+ if (vBool) {
+ pCmprsor->aBuf[0][pCmprsor->nBuf[0] - 1] |= BOOL_CMPR_TABLE[mod4];
+ }
+ pCmprsor->nVal++;
+
+ return code;
+}
+
+// SCompressor =====================================================
+int32_t tCompressorCreate(SCompressor **ppCmprsor) {
+ int32_t code = 0;
+
+ *ppCmprsor = (SCompressor *)taosMemoryCalloc(1, sizeof(SCompressor));
+ if ((*ppCmprsor) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _exit;
+ }
+
+ code = tRealloc(&(*ppCmprsor)->aBuf[0], 1024);
+ if (code) {
+ taosMemoryFree(*ppCmprsor);
+ *ppCmprsor = NULL;
+ goto _exit;
+ }
+
+_exit:
+ return code;
+}
+
+int32_t tCompressorDestroy(SCompressor *pCmprsor) {
+ int32_t code = 0;
+
+ if (pCmprsor) {
+ int32_t nBuf = sizeof(pCmprsor->aBuf) / sizeof(pCmprsor->aBuf[0]);
+ for (int32_t iBuf = 0; iBuf < nBuf; iBuf++) {
+ tFree(pCmprsor->aBuf[iBuf]);
+ }
+
+ taosMemoryFree(pCmprsor);
+ }
+
+ return code;
+}
+
+int32_t tCompressorReset(SCompressor *pCmprsor, int8_t type, int8_t cmprAlg, int8_t autoAlloc) {
+ int32_t code = 0;
+
+ pCmprsor->type = type;
+ pCmprsor->cmprAlg = cmprAlg;
+ pCmprsor->autoAlloc = autoAlloc;
+ pCmprsor->nVal = 0;
+
+ switch (type) {
+ case TSDB_DATA_TYPE_TIMESTAMP:
+ pCmprsor->ts_prev_val = 0;
+ pCmprsor->ts_prev_delta = 0;
+ pCmprsor->ts_flag_p = NULL;
+ pCmprsor->aBuf[0][0] = 1; // For timestamp, 1 means compressed, 0 otherwise
+ pCmprsor->nBuf[0] = 1;
+ break;
+ case TSDB_DATA_TYPE_BOOL:
+ pCmprsor->nBuf[0] = 0;
+ break;
+ case TSDB_DATA_TYPE_BINARY:
+ pCmprsor->nBuf[0] = 0;
+ break;
+ case TSDB_DATA_TYPE_FLOAT:
+ pCmprsor->f_prev = 0;
+ pCmprsor->f_flag_p = NULL;
+ pCmprsor->aBuf[0][0] = 0; // 0 means compressed, 1 otherwise (for backward compatibility)
+ pCmprsor->nBuf[0] = 1;
+ break;
+ case TSDB_DATA_TYPE_DOUBLE:
+ pCmprsor->d_prev = 0;
+ pCmprsor->d_flag_p = NULL;
+ pCmprsor->aBuf[0][0] = 0; // 0 means compressed, 1 otherwise (for backward compatibility)
+ pCmprsor->nBuf[0] = 1;
+ break;
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_UTINYINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ case TSDB_DATA_TYPE_UINT:
+ case TSDB_DATA_TYPE_UBIGINT:
+ pCmprsor->i_prev = 0;
+ pCmprsor->i_selector = 0;
+ pCmprsor->i_start = 0;
+ pCmprsor->i_end = 0;
+ pCmprsor->aBuf[0][0] = 0; // 0 means compressed, 1 otherwise (for backward compatibility)
+ pCmprsor->nBuf[0] = 1;
+ break;
+ default:
+ break;
+ }
+
+ return code;
+}
+
+int32_t tCompGen(SCompressor *pCmprsor, const uint8_t **ppData, int64_t *nData) {
+ int32_t code = 0;
+
+ if (pCmprsor->nVal == 0) {
+ *ppData = NULL;
+ *nData = 0;
+ return code;
+ }
+
+ if (pCmprsor->cmprAlg == TWO_STAGE_COMP /*|| IS_VAR_DATA_TYPE(pCmprsor->type)*/) {
+ code = tRealloc(&pCmprsor->aBuf[1], pCmprsor->nBuf[0] + 1);
+ if (code) return code;
+
+ int64_t ret = LZ4_compress_default(pCmprsor->aBuf[0], pCmprsor->aBuf[1] + 1, pCmprsor->nBuf[0], pCmprsor->nBuf[0]);
+ if (ret) {
+ pCmprsor->aBuf[1][0] = 0;
+ pCmprsor->nBuf[1] = ret + 1;
+ } else {
+ pCmprsor->aBuf[1][0] = 1;
+ memcpy(pCmprsor->aBuf[1] + 1, pCmprsor->aBuf[0], pCmprsor->nBuf[0]);
+ pCmprsor->nBuf[1] = pCmprsor->nBuf[0] + 1;
+ }
+
+ *ppData = pCmprsor->aBuf[1];
+ *nData = pCmprsor->nBuf[1];
+ } else {
+ *ppData = pCmprsor->aBuf[0];
+ *nData = pCmprsor->nBuf[0];
+ }
+
+ return code;
+}
+
+int32_t tCompress(SCompressor *pCmprsor, const void *pData, int64_t nData) {
+ return DATA_TYPE_INFO[pCmprsor->type].cmprFn(pCmprsor, pData, nData);
+}
\ No newline at end of file
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 662a3f0c88012191f3a7d76c78eb6d06a8b20292..1906a771275d16f1aa90841db28b69eb0a401b2e 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -121,7 +121,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TSC_CONN_KILLED, "Connection killed")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_SQL_SYNTAX_ERROR, "Syntax error in SQL")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_DB_NOT_SELECTED, "Database not specified or available")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_INVALID_TABLE_NAME, "Table does not exist")
-TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long, check maxSQLLength config")
+TAOS_DEFINE_ERROR(TSDB_CODE_TSC_EXCEED_SQL_LIMIT, "SQL statement too long")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_FILE_EMPTY, "File is empty")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_LINE_SYNTAX_ERROR, "Syntax error in Line")
TAOS_DEFINE_ERROR(TSDB_CODE_TSC_NO_META_CACHED, "No table meta cached")
@@ -288,6 +288,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_CONSUMER_NOT_READY, "Consumer not ready")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_SUBSCRIBED, "Topic subscribed cannot be dropped")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_TOPIC_MUST_BE_DELETED, "Topic must be dropped first")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CGROUP_USED, "Consumer group being used by some consumer")
+TAOS_DEFINE_ERROR(TSDB_CODE_MND_IN_REBALANCE, "Topic being rebalanced")
// mnode-stream
TAOS_DEFINE_ERROR(TSDB_CODE_MND_STREAM_ALREADY_EXIST, "Stream already exists")
@@ -579,6 +580,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_NUM, "Invalid function par
TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_TYPE, "Invalid function para type")
TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_FUNTION_PARA_VALUE, "Invalid function para value")
TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_NOT_BUILTIN_FUNTION, "Not buildin function")
+TAOS_DEFINE_ERROR(TSDB_CODE_FUNC_DUP_TIMESTAMP, "Duplicate timestamps not allowed in function")
//udf
TAOS_DEFINE_ERROR(TSDB_CODE_UDF_STOPPING, "udf is stopping")
@@ -618,10 +620,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REMOVE_EXISTS, "Rsma remove exists"
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP, "Rsma fetch msg is messed up")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_EMPTY_INFO, "Rsma info is empty")
TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_SCHEMA, "Rsma invalid schema")
+TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REGEX_MATCH, "Rsma regex match")
+TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_STREAM_STATE_OPEN, "Rsma stream state open")
+TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_STREAM_STATE_COMMIT, "Rsma stream state commit")
//index
TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding")
-TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Invalid index file")
+TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_INVALID_FILE, "Index file is invalid")
//tmq
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index 2e8239c68f0861486d2d6175d698dc76ed92b128..46203658f15848fffca902606ba0df50647dac86 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -97,7 +97,7 @@ int32_t tqDebugFlag = 135;
int32_t fsDebugFlag = 135;
int32_t metaDebugFlag = 135;
int32_t udfDebugFlag = 135;
-int32_t smaDebugFlag = 135;
+int32_t smaDebugFlag = 131;
int32_t idxDebugFlag = 135;
int64_t dbgEmptyW = 0;
@@ -429,7 +429,7 @@ static inline int32_t taosBuildLogHead(char *buffer, const char *flags) {
}
static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *buffer, int32_t len) {
- if ((dflag & DEBUG_FILE) && tsLogObj.logHandle && tsLogObj.logHandle->pFile != NULL) {
+ if ((dflag & DEBUG_FILE) && tsLogObj.logHandle && tsLogObj.logHandle->pFile != NULL && osLogSpaceAvailable()) {
taosUpdateLogNums(level);
if (tsAsyncLog) {
taosPushLogBuffer(tsLogObj.logHandle, buffer, len);
@@ -446,12 +446,14 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b
}
if (dflag & DEBUG_SCREEN) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-result"
write(1, buffer, (uint32_t)len);
+#pragma GCC diagnostic pop
}
}
void taosPrintLog(const char *flags, ELogLevel level, int32_t dflag, const char *format, ...) {
- if (!osLogSpaceAvailable()) return;
if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return;
char buffer[LOG_MAX_LINE_BUFFER_SIZE];
diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c
index ac2128dd70b70b88dd32617067ff0ce8e0d3172a..e1a8697ad53519b3f0db59f28ee6f82355ac82dd 100644
--- a/source/util/src/tpagedbuf.c
+++ b/source/util/src/tpagedbuf.c
@@ -33,7 +33,7 @@ struct SDiskbasedBuf {
int32_t pageSize; // current used page size
int32_t inMemPages; // numOfPages that are allocated in memory
SList* freePgList; // free page list
- SHashObj* groupSet; // id hash table, todo remove it
+ SArray* pIdList; // page id list
SHashObj* all;
SList* lruList;
void* emptyDummyIdList; // dummy id list
@@ -241,26 +241,7 @@ static int32_t loadPageFromDisk(SDiskbasedBuf* pBuf, SPageInfo* pg) {
return 0;
}
-static SIDList addNewGroup(SDiskbasedBuf* pBuf, int32_t groupId) {
- assert(taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t)) == NULL);
-
- SArray* pa = taosArrayInit(1, POINTER_BYTES);
- int32_t ret = taosHashPut(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t), &pa, POINTER_BYTES);
- assert(ret == 0);
-
- return pa;
-}
-
-static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t pageId) {
- SIDList list = NULL;
-
- char** p = taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t));
- if (p == NULL) { // it is a new group id
- list = addNewGroup(pBuf, groupId);
- } else {
- list = (SIDList)(*p);
- }
-
+static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t pageId) {
pBuf->numOfPages += 1;
SPageInfo* ppi = taosMemoryMalloc(sizeof(SPageInfo));
@@ -273,7 +254,7 @@ static SPageInfo* registerPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t pag
ppi->pn = NULL;
ppi->dirty = false;
- return *(SPageInfo**)taosArrayPush(list, &ppi);
+ return *(SPageInfo**)taosArrayPush(pBuf->pIdList, &ppi);
}
static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
@@ -293,16 +274,6 @@ static SListNode* getEldestUnrefedPage(SDiskbasedBuf* pBuf) {
}
}
- // int32_t pos = listNEles(pBuf->lruList);
- // SListIter iter1 = {0};
- // tdListInitIter(pBuf->lruList, &iter1, TD_LIST_BACKWARD);
- // SListNode* pn1 = NULL;
- // while((pn1 = tdListNext(&iter1)) != NULL) {
- // SPageInfo* pageInfo = *(SPageInfo**) pn1->data;
- // printf("page %d is used, dirty:%d, pos:%d\n", pageInfo->pageId, pageInfo->dirty, pos - 1);
- // pos -= 1;
- // }
-
return pn;
}
@@ -313,7 +284,6 @@ static char* evacOneDataPage(SDiskbasedBuf* pBuf) {
// all pages are referenced by user, try to allocate new space
if (pn == NULL) {
- assert(0);
int32_t prev = pBuf->inMemPages;
// increase by 50% of previous mem pages
@@ -333,7 +303,6 @@ static char* evacOneDataPage(SDiskbasedBuf* pBuf) {
bufPage = flushPageToDisk(pBuf, d);
}
- ASSERT((bufPage != NULL) || terrno != TSDB_CODE_SUCCESS);
return bufPage;
}
@@ -382,7 +351,8 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
// init id hash table
_hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT);
- pPBuf->groupSet = taosHashInit(10, fn, true, false);
+ pPBuf->pIdList = taosArrayInit(4, POINTER_BYTES);
+
pPBuf->assistBuf = taosMemoryMalloc(pPBuf->pageSize + 2); // EXTRA BYTES
pPBuf->all = taosHashInit(10, fn, true, false);
@@ -399,18 +369,12 @@ int32_t createDiskbasedBuf(SDiskbasedBuf** pBuf, int32_t pagesize, int32_t inMem
return TSDB_CODE_SUCCESS;
}
-void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
+void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t* pageId) {
pBuf->statis.getPages += 1;
char* availablePage = NULL;
if (NO_IN_MEM_AVAILABLE_PAGES(pBuf)) {
availablePage = evacOneDataPage(pBuf);
-
- // Failed to allocate a new buffer page, and there is an error occurs.
- if (availablePage == NULL) {
- assert(0);
- return NULL;
- }
}
SPageInfo* pi = NULL;
@@ -425,7 +389,7 @@ void* getNewBufPage(SDiskbasedBuf* pBuf, int32_t groupId, int32_t* pageId) {
*pageId = (++pBuf->allocateId);
// register page id info
- pi = registerPage(pBuf, groupId, *pageId);
+ pi = registerPage(pBuf, *pageId);
// add to hash map
taosHashPut(pBuf->all, pageId, sizeof(int32_t), &pi, POINTER_BYTES);
@@ -526,19 +490,11 @@ void releaseBufPageInfo(SDiskbasedBuf* pBuf, SPageInfo* pi) {
pBuf->statis.releasePages += 1;
}
-size_t getNumOfBufGroupId(const SDiskbasedBuf* pBuf) { return taosHashGetSize(pBuf->groupSet); }
-
size_t getTotalBufSize(const SDiskbasedBuf* pBuf) { return (size_t)pBuf->totalBufSize; }
-SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf, int32_t groupId) {
- assert(pBuf != NULL);
-
- char** p = taosHashGet(pBuf->groupSet, (const char*)&groupId, sizeof(int32_t));
- if (p == NULL) { // it is a new group id
- return pBuf->emptyDummyIdList;
- } else {
- return (SArray*)(*p);
- }
+SIDList getDataBufPagesIdList(SDiskbasedBuf* pBuf) {
+ ASSERT(pBuf != NULL);
+ return pBuf->pIdList;
}
void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
@@ -578,26 +534,21 @@ void destroyDiskbasedBuf(SDiskbasedBuf* pBuf) {
taosRemoveFile(pBuf->path);
taosMemoryFreeClear(pBuf->path);
- SArray** p = taosHashIterate(pBuf->groupSet, NULL);
- while (p) {
- size_t n = taosArrayGetSize(*p);
- for (int32_t i = 0; i < n; ++i) {
- SPageInfo* pi = taosArrayGetP(*p, i);
- taosMemoryFreeClear(pi->pData);
- taosMemoryFreeClear(pi);
- }
-
- taosArrayDestroy(*p);
- p = taosHashIterate(pBuf->groupSet, p);
+ size_t n = taosArrayGetSize(pBuf->pIdList);
+ for (int32_t i = 0; i < n; ++i) {
+ SPageInfo* pi = taosArrayGetP(pBuf->pIdList, i);
+ taosMemoryFreeClear(pi->pData);
+ taosMemoryFreeClear(pi);
}
+ taosArrayDestroy(pBuf->pIdList);
+
tdListFree(pBuf->lruList);
tdListFree(pBuf->freePgList);
taosArrayDestroy(pBuf->emptyDummyIdList);
taosArrayDestroy(pBuf->pFree);
- taosHashCleanup(pBuf->groupSet);
taosHashCleanup(pBuf->all);
taosMemoryFreeClear(pBuf->id);
@@ -661,36 +612,36 @@ void dBufPrintStatis(const SDiskbasedBuf* pBuf) {
pBuf->totalBufSize / 1024.0, pBuf->numOfPages, listNEles(pBuf->lruList) * pBuf->pageSize / 1024.0,
listNEles(pBuf->lruList), pBuf->fileSize / 1024.0, pBuf->pageSize / 1024.0f, pBuf->id);
- printf(
- "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb\n",
- ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f, ps->loadPages,
- ps->loadBytes / (1024.0 * ps->loadPages));
+ if (ps->loadPages > 0) {
+ printf(
+ "Get/Release pages:%d/%d, flushToDisk:%.2f Kb (%d Pages), loadFromDisk:%.2f Kb (%d Pages), avgPageSize:%.2f Kb\n",
+ ps->getPages, ps->releasePages, ps->flushBytes / 1024.0f, ps->flushPages, ps->loadBytes / 1024.0f,
+ ps->loadPages, ps->loadBytes / (1024.0 * ps->loadPages));
+ } else {
+ printf("no page loaded\n");
+ }
}
void clearDiskbasedBuf(SDiskbasedBuf* pBuf) {
- SArray** p = taosHashIterate(pBuf->groupSet, NULL);
- while (p) {
- size_t n = taosArrayGetSize(*p);
- for (int32_t i = 0; i < n; ++i) {
- SPageInfo* pi = taosArrayGetP(*p, i);
- taosMemoryFreeClear(pi->pData);
- taosMemoryFreeClear(pi);
- }
- taosArrayDestroy(*p);
- p = taosHashIterate(pBuf->groupSet, p);
+ size_t n = taosArrayGetSize(pBuf->pIdList);
+ for (int32_t i = 0; i < n; ++i) {
+ SPageInfo* pi = taosArrayGetP(pBuf->pIdList, i);
+ taosMemoryFreeClear(pi->pData);
+ taosMemoryFreeClear(pi);
}
+ taosArrayClear(pBuf->pIdList);
+
tdListEmpty(pBuf->lruList);
tdListEmpty(pBuf->freePgList);
taosArrayClear(pBuf->emptyDummyIdList);
taosArrayClear(pBuf->pFree);
- taosHashClear(pBuf->groupSet);
taosHashClear(pBuf->all);
pBuf->numOfPages = 0; // all pages are in buffer in the first place
pBuf->totalBufSize = 0;
pBuf->allocateId = -1;
pBuf->fileSize = 0;
-}
\ No newline at end of file
+}
diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c
index eb70002680cd8df2849ffa4fcb6c7c27ddf330d4..eb62e12a19ed44da50ade5bcb9ba5423530b08dd 100644
--- a/source/util/src/tqueue.c
+++ b/source/util/src/tqueue.c
@@ -151,15 +151,15 @@ int64_t taosQueueMemorySize(STaosQueue *queue) {
void *taosAllocateQitem(int32_t size, EQItype itype) {
STaosQnode *pNode = taosMemoryCalloc(1, sizeof(STaosQnode) + size);
- pNode->size = size;
- pNode->itype = itype;
- pNode->timestamp = taosGetTimestampUs();
-
if (pNode == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
+ pNode->size = size;
+ pNode->itype = itype;
+ pNode->timestamp = taosGetTimestampUs();
+
if (itype == RPC_QITEM) {
int64_t alloced = atomic_add_fetch_64(&tsRpcQueueMemoryUsed, size);
if (alloced > tsRpcQueueMemoryAllowed) {
diff --git a/source/util/src/trbtree.c b/source/util/src/trbtree.c
index 0970485dade90bb8719a2fa39facb047e07bcfff..65f1bac60aa51c48fe65895dbe0ded241d22590d 100644
--- a/source/util/src/trbtree.c
+++ b/source/util/src/trbtree.c
@@ -13,179 +13,297 @@
* along with this program. If not, see .
*/
-#include "os.h"
+#include "trbtree.h"
-typedef int32_t (*tRBTreeCmprFn)(void *, void *);
-
-typedef struct SRBTree SRBTree;
-typedef struct SRBTreeNode SRBTreeNode;
-typedef struct SRBTreeIter SRBTreeIter;
-
-struct SRBTreeNode {
- enum { RED, BLACK } color;
- SRBTreeNode *parent;
- SRBTreeNode *left;
- SRBTreeNode *right;
- uint8_t payload[];
-};
-
-struct SRBTree {
- tRBTreeCmprFn cmprFn;
- SRBTreeNode *root;
-};
-
-struct SRBTreeIter {
- SRBTree *pTree;
-};
-
-#define RBTREE_NODE_COLOR(N) ((N) ? (N)->color : BLACK)
-
-// APIs ================================================
-static void tRBTreeRotateLeft(SRBTree *pTree, SRBTreeNode *pNode) {
- SRBTreeNode *right = pNode->right;
-
- pNode->right = right->left;
- if (pNode->right) {
- pNode->right->parent = pNode;
+static void tRBTreeRotateLeft(SRBTree *pTree, SRBTreeNode *x) {
+ SRBTreeNode *y = x->right;
+ x->right = y->left;
+ if (y->left != pTree->NIL) {
+ y->left->parent = x;
}
-
- right->parent = pNode->parent;
- if (pNode->parent == NULL) {
- pTree->root = right;
- } else if (pNode == pNode->parent->left) {
- pNode->parent->left = right;
+ y->parent = x->parent;
+ if (x->parent == pTree->NIL) {
+ pTree->root = y;
+ } else if (x == x->parent->left) {
+ x->parent->left = y;
} else {
- pNode->parent->right = right;
+ x->parent->right = y;
}
-
- right->left = pNode;
- pNode->parent = right;
+ y->left = x;
+ x->parent = y;
}
-static void tRBTreeRotateRight(SRBTree *pTree, SRBTreeNode *pNode) {
- SRBTreeNode *left = pNode->left;
-
- pNode->left = left->right;
- if (pNode->left) {
- pNode->left->parent = pNode;
+static void tRBTreeRotateRight(SRBTree *pTree, SRBTreeNode *x) {
+ SRBTreeNode *y = x->left;
+ x->left = y->right;
+ if (y->right != pTree->NIL) {
+ y->right->parent = x;
}
-
- left->parent = pNode->parent;
- if (pNode->parent == NULL) {
- pTree->root = left;
- } else if (pNode == pNode->parent->left) {
- pNode->parent->left = left;
+ y->parent = x->parent;
+ if (x->parent == pTree->NIL) {
+ pTree->root = y;
+ } else if (x == x->parent->right) {
+ x->parent->right = y;
} else {
- pNode->parent->right = left;
+ x->parent->left = y;
}
-
- left->right = pNode;
- pNode->parent = left;
+ y->right = x;
+ x->parent = y;
}
-#define tRBTreeCreate(compare) \
- (SRBTree) { .cmprFn = (compare), .root = NULL }
-
-SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *pNew) {
- pNew->left = NULL;
- pNew->right = NULL;
- pNew->color = RED;
-
- // insert
- if (pTree->root == NULL) {
- pNew->parent = NULL;
- pTree->root = pNew;
- } else {
- SRBTreeNode *pNode = pTree->root;
- while (true) {
- ASSERT(pNode);
-
- int32_t c = pTree->cmprFn(pNew->payload, pNode->payload);
- if (c < 0) {
- if (pNode->left) {
- pNode = pNode->left;
- } else {
- pNew->parent = pNode;
- pNode->left = pNew;
- break;
- }
- } else if (c > 0) {
- if (pNode->right) {
- pNode = pNode->right;
- } else {
- pNew->parent = pNode;
- pNode->right = pNew;
- break;
+static void tRBTreePutFix(SRBTree *pTree, SRBTreeNode *z) {
+ while (z->parent->color == RED) {
+ if (z->parent == z->parent->parent->left) { // z.parent is the left child
+
+ SRBTreeNode *y = z->parent->parent->right; // uncle of z
+
+ if (y->color == RED) { // case 1
+ z->parent->color = BLACK;
+ y->color = BLACK;
+ z->parent->parent->color = RED;
+ z = z->parent->parent;
+ } else { // case2 or case3
+ if (z == z->parent->right) { // case2
+ z = z->parent; // marked z.parent as new z
+ tRBTreeRotateLeft(pTree, z);
}
+ // case3
+ z->parent->color = BLACK; // made parent black
+ z->parent->parent->color = RED; // made parent red
+ tRBTreeRotateRight(pTree, z->parent->parent);
+ }
+ } else { // z.parent is the right child
+ SRBTreeNode *y = z->parent->parent->left; // uncle of z
+
+ if (y->color == RED) {
+ z->parent->color = BLACK;
+ y->color = BLACK;
+ z->parent->parent->color = RED;
+ z = z->parent->parent;
} else {
- return NULL;
+ if (z == z->parent->left) {
+ z = z->parent; // marked z.parent as new z
+ tRBTreeRotateRight(pTree, z);
+ }
+ z->parent->color = BLACK; // made parent black
+ z->parent->parent->color = RED; // made parent red
+ tRBTreeRotateLeft(pTree, z->parent->parent);
}
}
}
+ pTree->root->color = BLACK;
+}
- // fix
- SRBTreeNode *pNode = pNew;
- while (pNode->parent && pNode->parent->color == RED) {
- SRBTreeNode *p = pNode->parent;
- SRBTreeNode *g = p->parent;
-
- if (p == g->left) {
- SRBTreeNode *u = g->right;
-
- if (RBTREE_NODE_COLOR(u) == RED) {
- p->color = BLACK;
- u->color = BLACK;
- g->color = RED;
- pNode = g;
+static void tRBTreeTransplant(SRBTree *pTree, SRBTreeNode *u, SRBTreeNode *v) {
+ if (u->parent == pTree->NIL)
+ pTree->root = v;
+ else if (u == u->parent->left)
+ u->parent->left = v;
+ else
+ u->parent->right = v;
+ v->parent = u->parent;
+}
+
+static void tRBTreeDropFix(SRBTree *pTree, SRBTreeNode *x) {
+ while (x != pTree->root && x->color == BLACK) {
+ if (x == x->parent->left) {
+ SRBTreeNode *w = x->parent->right;
+ if (w->color == RED) {
+ w->color = BLACK;
+ x->parent->color = RED;
+ tRBTreeRotateLeft(pTree, x->parent);
+ w = x->parent->right;
+ }
+ if (w->left->color == BLACK && w->right->color == BLACK) {
+ w->color = RED;
+ x = x->parent;
} else {
- if (pNode == p->right) {
- pNode = p;
- tRBTreeRotateLeft(pTree, pNode);
+ if (w->right->color == BLACK) {
+ w->left->color = BLACK;
+ w->color = RED;
+ tRBTreeRotateRight(pTree, w);
+ w = x->parent->right;
}
- pNode->parent->color = BLACK;
- pNode->parent->parent->color = RED;
- tRBTreeRotateRight(pTree, pNode->parent->parent);
+ w->color = x->parent->color;
+ x->parent->color = BLACK;
+ w->right->color = BLACK;
+ tRBTreeRotateLeft(pTree, x->parent);
+ x = pTree->root;
}
} else {
- SRBTreeNode *u = g->left;
-
- if (RBTREE_NODE_COLOR(u) == RED) {
- p->color = BLACK;
- u->color = BLACK;
- g->color = RED;
+ SRBTreeNode *w = x->parent->left;
+ if (w->color == RED) {
+ w->color = BLACK;
+ x->parent->color = RED;
+ tRBTreeRotateRight(pTree, x->parent);
+ w = x->parent->left;
+ }
+ if (w->right->color == BLACK && w->left->color == BLACK) {
+ w->color = RED;
+ x = x->parent;
} else {
- if (pNode == p->left) {
- pNode = p;
- tRBTreeRotateRight(pTree, pNode);
+ if (w->left->color == BLACK) {
+ w->right->color = BLACK;
+ w->color = RED;
+ tRBTreeRotateLeft(pTree, w);
+ w = x->parent->left;
}
- pNode->parent->color = BLACK;
- pNode->parent->parent->color = RED;
- tRBTreeRotateLeft(pTree, pNode->parent->parent);
+ w->color = x->parent->color;
+ x->parent->color = BLACK;
+ w->left->color = BLACK;
+ tRBTreeRotateRight(pTree, x->parent);
+ x = pTree->root;
}
}
}
+ x->color = BLACK;
+}
- pTree->root->color = BLACK;
- return pNew;
+static SRBTreeNode *tRBTreeSuccessor(SRBTree *pTree, SRBTreeNode *pNode) {
+ if (pNode->right != pTree->NIL) {
+ pNode = pNode->right;
+ while (pNode->left != pTree->NIL) {
+ pNode = pNode->left;
+ }
+ } else {
+ while (true) {
+ if (pNode->parent == pTree->NIL || pNode == pNode->parent->left) {
+ pNode = pNode->parent;
+ break;
+ } else {
+ pNode = pNode->parent;
+ }
+ }
+ }
+
+ return pNode;
}
-SRBTreeNode *tRBTreeDrop(SRBTree *pTree, void *pKey) {
- SRBTreeNode *pNode = pTree->root;
+static SRBTreeNode *tRBTreePredecessor(SRBTree *pTree, SRBTreeNode *pNode) {
+ if (pNode->left != pTree->NIL) {
+ pNode = pNode->left;
+ while (pNode->right != pTree->NIL) {
+ pNode = pNode->right;
+ }
+ } else {
+ while (true) {
+ if (pNode->parent == pTree->NIL || pNode == pNode->parent->right) {
+ pNode = pNode->parent;
+ break;
+ } else {
+ pNode = pNode->parent;
+ }
+ }
+ }
- while (pNode) {
- int32_t c = pTree->cmprFn(pKey, pNode->payload);
+ return pNode;
+}
+void tRBTreeCreate(SRBTree *pTree, tRBTreeCmprFn cmprFn) {
+ pTree->cmprFn = cmprFn;
+ pTree->n = 0;
+ pTree->NIL = &pTree->NILNODE;
+ pTree->NIL->color = BLACK;
+ pTree->NIL->parent = NULL;
+ pTree->NIL->left = NULL;
+ pTree->NIL->right = NULL;
+ pTree->root = pTree->NIL;
+ pTree->min = pTree->NIL;
+ pTree->max = pTree->NIL;
+}
+
+SRBTreeNode *tRBTreePut(SRBTree *pTree, SRBTreeNode *z) {
+ SRBTreeNode *y = pTree->NIL; // variable for the parent of the added node
+ SRBTreeNode *temp = pTree->root;
+
+ while (temp != pTree->NIL) {
+ y = temp;
+
+ int32_t c = pTree->cmprFn(RBTREE_NODE_PAYLOAD(z), RBTREE_NODE_PAYLOAD(temp));
if (c < 0) {
- pNode = pNode->left;
+ temp = temp->left;
} else if (c > 0) {
- pNode = pNode->right;
+ temp = temp->right;
} else {
- break;
+ return NULL;
}
}
+ z->parent = y;
+
+ if (y == pTree->NIL) {
+ pTree->root = z;
+ } else if (pTree->cmprFn(RBTREE_NODE_PAYLOAD(z), RBTREE_NODE_PAYLOAD(y)) < 0) {
+ y->left = z;
+ } else {
+ y->right = z;
+ }
+
+ z->color = RED;
+ z->left = pTree->NIL;
+ z->right = pTree->NIL;
+
+ tRBTreePutFix(pTree, z);
+
+ // update min/max node
+ if (pTree->min == pTree->NIL || pTree->cmprFn(RBTREE_NODE_PAYLOAD(pTree->min), RBTREE_NODE_PAYLOAD(z)) > 0) {
+ pTree->min = z;
+ }
+ if (pTree->max == pTree->NIL || pTree->cmprFn(RBTREE_NODE_PAYLOAD(pTree->max), RBTREE_NODE_PAYLOAD(z)) < 0) {
+ pTree->max = z;
+ }
+ pTree->n++;
+ return z;
+}
+
+void tRBTreeDrop(SRBTree *pTree, SRBTreeNode *z) {
+ SRBTreeNode *y = z;
+ SRBTreeNode *x;
+ ECOLOR y_orignal_color = y->color;
+
+ // update min/max node
+ if (pTree->min == z) {
+ pTree->min = tRBTreeSuccessor(pTree, pTree->min);
+ }
+ if (pTree->max == z) {
+ pTree->max = tRBTreePredecessor(pTree, pTree->max);
+ }
+
+ // drop impl
+ if (z->left == pTree->NIL) {
+ x = z->right;
+ tRBTreeTransplant(pTree, z, z->right);
+ } else if (z->right == pTree->NIL) {
+ x = z->left;
+ tRBTreeTransplant(pTree, z, z->left);
+ } else {
+ y = tRBTreeSuccessor(pTree, z);
+ y_orignal_color = y->color;
+ x = y->right;
+ if (y->parent == z) {
+ x->parent = z;
+ } else {
+ tRBTreeTransplant(pTree, y, y->right);
+ y->right = z->right;
+ y->right->parent = y;
+ }
+ tRBTreeTransplant(pTree, z, y);
+ y->left = z->left;
+ y->left->parent = y;
+ y->color = z->color;
+ }
+
+ // fix
+ if (y_orignal_color == BLACK) {
+ tRBTreeDropFix(pTree, x);
+ }
+ pTree->n--;
+}
+
+SRBTreeNode *tRBTreeDropByKey(SRBTree *pTree, void *pKey) {
+ SRBTreeNode *pNode = tRBTreeGet(pTree, pKey);
if (pNode) {
- // TODO
+ tRBTreeDrop(pTree, pNode);
}
return pNode;
@@ -194,8 +312,8 @@ SRBTreeNode *tRBTreeDrop(SRBTree *pTree, void *pKey) {
SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey) {
SRBTreeNode *pNode = pTree->root;
- while (pNode) {
- int32_t c = pTree->cmprFn(pKey, pNode->payload);
+ while (pNode != pTree->NIL) {
+ int32_t c = pTree->cmprFn(pKey, RBTREE_NODE_PAYLOAD(pNode));
if (c < 0) {
pNode = pNode->left;
@@ -206,5 +324,23 @@ SRBTreeNode *tRBTreeGet(SRBTree *pTree, void *pKey) {
}
}
- return pNode;
+ return (pNode == pTree->NIL) ? NULL : pNode;
}
+
+// SRBTreeIter ================================================
+SRBTreeNode *tRBTreeIterNext(SRBTreeIter *pIter) {
+ SRBTreeNode *pNode = pIter->pNode;
+
+ if (pIter->pNode != pIter->pTree->NIL) {
+ if (pIter->asc) {
+ // ascend
+ pIter->pNode = tRBTreeSuccessor(pIter->pTree, pIter->pNode);
+ } else {
+ // descend
+ pIter->pNode = tRBTreePredecessor(pIter->pTree, pIter->pNode);
+ }
+ }
+
+_exit:
+ return (pNode == pIter->pTree->NIL) ? NULL : pNode;
+}
\ No newline at end of file
diff --git a/source/util/src/tsched.c b/source/util/src/tsched.c
index 89471c4347e49152b2b1fff0313ef29da92c70ff..9cf9e2c4316a850ac31b15aaab8a864deb4d32eb 100644
--- a/source/util/src/tsched.c
+++ b/source/util/src/tsched.c
@@ -149,18 +149,18 @@ void *taosProcessSchedQueue(void *scheduler) {
return NULL;
}
-void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
+int taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
SSchedQueue *pSched = (SSchedQueue *)queueScheduler;
int32_t ret = 0;
if (pSched == NULL) {
uError("sched is not ready, msg:%p is dropped", pMsg);
- return;
+ return -1;
}
if (atomic_load_8(&pSched->stop)) {
uError("sched is already stopped, msg:%p is dropped", pMsg);
- return;
+ return -1;
}
if ((ret = tsem_wait(&pSched->emptySem)) != 0) {
@@ -185,6 +185,7 @@ void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) {
uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno));
ASSERT(0);
}
+ return ret;
}
void taosCleanUpScheduler(void *param) {
@@ -192,11 +193,11 @@ void taosCleanUpScheduler(void *param) {
if (pSched == NULL) return;
uDebug("start to cleanup %s schedQsueue", pSched->label);
-
+
atomic_store_8(&pSched->stop, 1);
taosMsleep(200);
-
+
for (int32_t i = 0; i < pSched->numOfThreads; ++i) {
if (taosCheckPthreadValid(pSched->qthread[i])) {
tsem_post(&pSched->fullSem);
@@ -220,7 +221,7 @@ void taosCleanUpScheduler(void *param) {
if (pSched->queue) taosMemoryFree(pSched->queue);
if (pSched->qthread) taosMemoryFree(pSched->qthread);
- //taosMemoryFree(pSched);
+ // taosMemoryFree(pSched);
}
// for debug purpose, dump the scheduler status every 1min.
diff --git a/source/util/src/tuuid.c b/source/util/src/tuuid.c
index 9101aec949873eb976517581c6790e1bc2dac3b9..d192b1229d79e9a77ac3ea5291024060c6f6102c 100644
--- a/source/util/src/tuuid.c
+++ b/source/util/src/tuuid.c
@@ -20,8 +20,8 @@ static int32_t tUUIDSerialNo = 0;
int32_t tGenIdPI32(void) {
if (tUUIDHashId == 0) {
- char uid[64];
- int32_t code = taosGetSystemUUID(uid, tListLen(uid));
+ char uid[65] = {0};
+ int32_t code = taosGetSystemUUID(uid, sizeof(uid));
if (code != TSDB_CODE_SUCCESS) {
terrno = TAOS_SYSTEM_ERROR(errno);
} else {
@@ -51,11 +51,11 @@ int64_t tGenIdPI64(void) {
int64_t id;
while (true) {
- int64_t ts = taosGetTimestampMs();
+ int64_t ts = taosGetTimestampMs() >> 8;
uint64_t pid = taosGetPId();
int32_t val = atomic_add_fetch_32(&tUUIDSerialNo, 1);
- id = ((tUUIDHashId & 0x07FF) << 52) | ((pid & 0x0FFF) << 40) | ((ts & 0xFFFFFF) << 16) | (val & 0xFFFF);
+ id = ((tUUIDHashId & 0x07FF) << 52) | ((pid & 0x0F) << 48) | ((ts & 0x3FFFFFF) << 20) | (val & 0xFFFFF);
if (id) {
break;
}
diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c
index 1f0731812c9ec739adfdb2a0c17451938cb5e007..d9ded20070f1f290c777a4e768540bba77fc15f7 100644
--- a/source/util/src/tworker.c
+++ b/source/util/src/tworker.c
@@ -46,7 +46,7 @@ int32_t tQWorkerInit(SQWorkerPool *pool) {
void tQWorkerCleanup(SQWorkerPool *pool) {
for (int32_t i = 0; i < pool->max; ++i) {
SQWorker *worker = pool->workers + i;
- if (worker == NULL) continue;
+ // if (worker == NULL) continue;
if (taosCheckPthreadValid(worker->thread)) {
taosQsetThreadResume(pool->qset);
}
@@ -54,7 +54,7 @@ void tQWorkerCleanup(SQWorkerPool *pool) {
for (int32_t i = 0; i < pool->max; ++i) {
SQWorker *worker = pool->workers + i;
- if (worker == NULL) continue;
+ // if (worker == NULL) continue;
if (taosCheckPthreadValid(worker->thread)) {
taosThreadJoin(worker->thread, NULL);
taosThreadClear(&worker->thread);
@@ -138,8 +138,8 @@ STaosQueue *tQWorkerAllocQueue(SQWorkerPool *pool, void *ahandle, FItem fp) {
}
void tQWorkerFreeQueue(SQWorkerPool *pool, STaosQueue *queue) {
- taosCloseQueue(queue);
uDebug("worker:%s, queue:%p is freed", pool->name, queue);
+ taosCloseQueue(queue);
}
int32_t tWWorkerInit(SWWorkerPool *pool) {
@@ -283,8 +283,8 @@ STaosQueue *tWWorkerAllocQueue(SWWorkerPool *pool, void *ahandle, FItems fp) {
}
void tWWorkerFreeQueue(SWWorkerPool *pool, STaosQueue *queue) {
- taosCloseQueue(queue);
uDebug("worker:%s, queue:%p is freed", pool->name, queue);
+ taosCloseQueue(queue);
}
int32_t tSingleWorkerInit(SSingleWorker *pWorker, const SSingleWorkerCfg *pCfg) {
diff --git a/source/util/src/version.c.in b/source/util/src/version.c.in
index be1a4a404875739cdef349a901e52e195c2a9cde..cb307b57fce37ba4243aea83995e66612f3c4371 100644
--- a/source/util/src/version.c.in
+++ b/source/util/src/version.c.in
@@ -1,4 +1,4 @@
-char version[12] = "${TD_VER_NUMBER}";
+char version[64] = "${TD_VER_NUMBER}";
char compatible_version[12] = "${TD_VER_COMPATIBLE}";
char gitinfo[48] = "${TD_VER_GIT}";
char buildinfo[64] = "Built at ${TD_VER_DATE}";
diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt
index d2a503e6613909c39af8cfb961dd5b51f36120c3..6e42ef7e75eac38a5a072bfef8521152fd74ae06 100644
--- a/source/util/test/CMakeLists.txt
+++ b/source/util/test/CMakeLists.txt
@@ -75,4 +75,12 @@ target_link_libraries(taosbsearchTest os util gtest_main)
add_test(
NAME taosbsearchTest
COMMAND taosbsearchTest
+)
+
+# trbtreeTest
+add_executable(rbtreeTest "trbtreeTest.cpp")
+target_link_libraries(rbtreeTest os util gtest_main)
+add_test(
+ NAME rbtreeTest
+ COMMAND rbtreeTest
)
\ No newline at end of file
diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp
index eaf198a483aa5e3e90595d2417516aa53f754331..534c17758714820e9a9f2bf6b81d23ac121fcaf4 100644
--- a/source/util/test/pageBufferTest.cpp
+++ b/source/util/test/pageBufferTest.cpp
@@ -18,37 +18,37 @@ void simpleTest() {
int32_t pageId = 0;
int32_t groupId = 0;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
ASSERT_EQ(getTotalBufSize(pBuf), 1024);
- SIDList list = getDataBufPagesIdList(pBuf, groupId);
+ SIDList list = getDataBufPagesIdList(pBuf);
ASSERT_EQ(taosArrayGetSize(list), 1);
- ASSERT_EQ(getNumOfBufGroupId(pBuf), 1);
+ //ASSERT_EQ(getNumOfBufGroupId(pBuf), 1);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t == pBufPage1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage4);
releaseBufPage(pBuf, pBufPage2);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage5);
@@ -64,7 +64,7 @@ void writeDownTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
*(int32_t*)(pBufPage->data) = nx;
@@ -73,22 +73,22 @@ void writeDownTest() {
setBufPageDirty(pBufPage, true);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
@@ -98,7 +98,7 @@ void writeDownTest() {
SFilePage* pBufPagex = static_cast(getBufPage(pBuf, writePageId));
ASSERT_EQ(*(int32_t*)pBufPagex->data, nx);
- SArray* pa = getDataBufPagesIdList(pBuf, groupId);
+ SArray* pa = getDataBufPagesIdList(pBuf);
ASSERT_EQ(taosArrayGetSize(pa), 5);
destroyDiskbasedBuf(pBuf);
@@ -113,32 +113,32 @@ void recyclePageTest() {
int32_t groupId = 0;
int32_t nx = 12345;
- SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage = static_cast(getNewBufPage(pBuf, &pageId));
ASSERT_TRUE(pBufPage != NULL);
releaseBufPage(pBuf, pBufPage);
- SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage1 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t1 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t1 == pBufPage1);
ASSERT_TRUE(pageId == 1);
- SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage2 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t2 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t2 == pBufPage2);
ASSERT_TRUE(pageId == 2);
- SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage3 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t3 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t3 == pBufPage3);
ASSERT_TRUE(pageId == 3);
- SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage4 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t4 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t4 == pBufPage4);
ASSERT_TRUE(pageId == 4);
releaseBufPage(pBuf, t4);
- SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, groupId, &pageId));
+ SFilePage* pBufPage5 = static_cast(getNewBufPage(pBuf, &pageId));
SFilePage* t5 = static_cast(getBufPage(pBuf, pageId));
ASSERT_TRUE(t5 == pBufPage5);
ASSERT_TRUE(pageId == 5);
@@ -152,7 +152,7 @@ void recyclePageTest() {
SFilePage* pBufPagex1 = static_cast(getBufPage(pBuf, 1));
- SArray* pa = getDataBufPagesIdList(pBuf, groupId);
+ SArray* pa = getDataBufPagesIdList(pBuf);
ASSERT_EQ(taosArrayGetSize(pa), 6);
destroyDiskbasedBuf(pBuf);
diff --git a/source/util/test/trbtreeTest.cpp b/source/util/test/trbtreeTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cabf315df0a2d32c6f2adcf1ac81a3e1565e6e0c
--- /dev/null
+++ b/source/util/test/trbtreeTest.cpp
@@ -0,0 +1,40 @@
+#include
+
+#include
+#include
+
+#include "trbtree.h"
+
+static int32_t tCmprInteger(const void *p1, const void *p2) {
+ if (*(int *)p1 < *(int *)p2) {
+ return -1;
+ } else if (*(int *)p1 > *(int *)p2) {
+ return 1;
+ }
+ return 0;
+}
+
+TEST(trbtreeTest, rbtree_test1) {
+#if 0
+ SRBTree rt;
+ tRBTreeCreate(&rt, tCmprInteger);
+ int a[] = {1, 3, 4, 2, 7, 5, 8};
+
+ for (int i = 0; i < sizeof(a) / sizeof(a[0]); i++) {
+ SRBTreeNode *pNode = (SRBTreeNode *)taosMemoryMalloc(sizeof(*pNode) + sizeof(int));
+ *(int *)pNode->payload = a[i];
+
+ tRBTreePut(&rt, pNode);
+ }
+
+ SRBTreeIter rti = tRBTreeIterCreate(&rt, 1);
+ SRBTreeNode *pNode = tRBTreeIterNext(&rti);
+ int la = 0;
+ while (pNode) {
+ GTEST_ASSERT_GT(*(int *)pNode->payload, la);
+ la = *(int *)pNode->payload;
+ // printf("%d\n", la);
+ pNode = tRBTreeIterNext(&rti);
+ }
+#endif
+}
\ No newline at end of file
diff --git a/source/util/test/trefTest.c b/source/util/test/trefTest.c
index 89561e61fe02e4665828bc8336ec79f6df46fc68..5a6790ef1da3b5a6b015c836d09150b06e0a003f 100644
--- a/source/util/test/trefTest.c
+++ b/source/util/test/trefTest.c
@@ -94,7 +94,7 @@ void *openRefSpace(void *param) {
pSpace->rsetId = taosOpenRef(50, myfree);
if (pSpace->rsetId < 0) {
- printf("failed to open ref, reson:%s\n", tstrerror(pSpace->rsetId));
+ printf("failed to open ref, reason:%s\n", tstrerror(pSpace->rsetId));
return NULL;
}
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py
index 99e8cd36a4cf1dae08baf93ef4d6338bb08dc7bd..a44ad4c1d0b5cc8e4ba181c1bf941454929f2df6 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py
@@ -22,9 +22,9 @@ from util.dnodes import *
class TDTestCase:
def caseDescription(self):
- '''
+ """
[TD-13823] taosBenchmark test cases
- '''
+ """
return
def init(self, conn, logSql):
@@ -34,19 +34,19 @@ class TDTestCase:
def getPath(self, tool="taosBenchmark"):
selfPath = os.path.dirname(os.path.realpath(__file__))
- if ("community" in selfPath):
- projPath = selfPath[:selfPath.find("community")]
+ if "community" in selfPath:
+ projPath = selfPath[: selfPath.find("community")]
else:
- projPath = selfPath[:selfPath.find("tests")]
+ projPath = selfPath[: selfPath.find("tests")]
paths = []
for root, dirs, files in os.walk(projPath):
- if ((tool) in files):
+ if (tool) in files:
rootRealPath = os.path.dirname(os.path.realpath(root))
- if ("packaging" not in rootRealPath):
+ if "packaging" not in rootRealPath:
paths.append(os.path.join(root, tool))
break
- if (len(paths) == 0):
+ if len(paths) == 0:
tdLog.exit("taosBenchmark not found!")
return
else:
@@ -55,7 +55,7 @@ class TDTestCase:
def run(self):
binPath = self.getPath()
- cmd = "%s -n 100 -t 100 -y" %binPath
+ cmd = "%s -n 100 -t 100 -y" % binPath
tdLog.info("%s" % cmd)
os.system("%s" % cmd)
tdSql.execute("use test")
@@ -77,14 +77,16 @@ class TDTestCase:
tdSql.checkData(4, 3, "TAG")
tdSql.checkData(5, 0, "location")
tdSql.checkData(5, 1, "VARCHAR")
- tdSql.checkData(5, 2, 16)
+ tdSql.checkData(5, 2, 24)
tdSql.checkData(5, 3, "TAG")
tdSql.query("select count(*) from test.meters where groupid >= 0")
tdSql.checkData(0, 0, 10000)
- tdSql.query("select count(*) from test.meters where location = 'San Francisco' or location = 'Los Angles' or location = 'San Diego' or location = 'San Jose' or \
- location = 'Palo Alto' or location = 'Campbell' or location = 'Mountain View' or location = 'Sunnyvale' or location = 'Santa Clara' or location = 'Cupertino' ")
+ tdSql.query(
+ "select count(*) from test.meters where location = 'California.SanFrancisco' or location = 'California.LosAngles' or location = 'California.SanDiego' or location = 'California.SanJose' or \
+ location = 'California.PaloAlto' or location = 'California.Campbell' or location = 'California.MountainView' or location = 'California.Sunnyvale' or location = 'California.SantaClara' or location = 'California.Cupertino' "
+ )
tdSql.checkData(0, 0, 10000)
def stop(self):
diff --git a/tests/docs-examples-test/csharp.sh b/tests/docs-examples-test/csharp.sh
index a8f1ce4119f0f41d8372c9ceb8fef6053caa1563..8d1031ab8f8367e260804a58ecc2ae4f9822550c 100644
--- a/tests/docs-examples-test/csharp.sh
+++ b/tests/docs-examples-test/csharp.sh
@@ -6,23 +6,32 @@ pgrep taosd || taosd >> /dev/null 2>&1 &
pgrep taosadapter || taosadapter >> /dev/null 2>&1 &
cd ../../docs/examples/csharp
-dotnet run --project connect.csproj
+dotnet run --project connect/connect.csproj
taos -s "drop database if exists power"
-dotnet run --project sqlinsert.csproj
-dotnet run --project query.csproj
-dotnet run --project asyncquery.csproj
-dotnet run --project subscribe.csproj
+dotnet run --project sqlInsert/sqlinsert.csproj
+dotnet run --project query/query.csproj
+dotnet run --project asyncQuery/asyncquery.csproj
+dotnet run --project subscribe/subscribe.csproj
taos -s "drop topic if exists topic_example"
taos -s "drop database if exists power"
-dotnet run --project stmtinsert.csproj
+dotnet run --project stmtInsert/stmtinsert.csproj
taos -s "drop database if exists test"
-dotnet run --project influxdbline.csproj
+dotnet run --project influxdbLine/influxdbline.csproj
taos -s "drop database if exists test"
-dotnet run --project optstelnet.csproj
+dotnet run --project optsTelnet/optstelnet.csproj
taos -s "drop database if exists test"
-dotnet run --project optsjson.csproj
\ No newline at end of file
+dotnet run --project optsJSON/optsJSON.csproj
+
+taos -s "create database if exists test"
+dotnet run --project wsConnect/wsConnect.csproj
+dotnet run --project wsInsert/wsInsert.csproj
+dotnet run --project wsStmt/wsStmt.csproj
+dotnet run --project wsQuery/wsQuery.csproj
+
+taos -s "drop database if exists test"
+taos -s "drop database if exists power"
\ No newline at end of file
diff --git a/tests/docs-examples-test/jdbc.sh b/tests/docs-examples-test/jdbc.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d71085a40306956ea8d25e9b575c97ae9945df76
--- /dev/null
+++ b/tests/docs-examples-test/jdbc.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+pgrep taosd || taosd >> /dev/null 2>&1 &
+pgrep taosadapter || taosadapter >> /dev/null 2>&1 &
+cd ../../docs/examples/java
+
+mvn clean test > jdbc-out.log 2>&1
+tail -n 20 jdbc-out.log
+
+cases=`grep 'Tests run' jdbc-out.log | awk 'END{print $3}'`
+totalJDBCCases=`echo ${cases/%,}`
+failed=`grep 'Tests run' jdbc-out.log | awk 'END{print $5}'`
+JDBCFailed=`echo ${failed/%,}`
+error=`grep 'Tests run' jdbc-out.log | awk 'END{print $7}'`
+JDBCError=`echo ${error/%,}`
+
+totalJDBCFailed=`expr $JDBCFailed + $JDBCError`
+totalJDBCSuccess=`expr $totalJDBCCases - $totalJDBCFailed`
+
+if [ "$totalJDBCSuccess" -gt "0" ]; then
+ echo -e "\n${GREEN} ### Total $totalJDBCSuccess JDBC case(s) succeed! ### ${NC}"
+fi
+
+if [ "$totalJDBCFailed" -ne "0" ]; then
+ echo -e "\n${RED} ### Total $totalJDBCFailed JDBC case(s) failed! ### ${NC}"
+ exit 8
+fi
\ No newline at end of file
diff --git a/tests/docs-examples-test/node.sh b/tests/docs-examples-test/node.sh
index 02839048155dfe75bdfa872ca88d0d717b3c9304..41acf7c7b4dfbcfa0f5f758770198b35fc707adc 100644
--- a/tests/docs-examples-test/node.sh
+++ b/tests/docs-examples-test/node.sh
@@ -23,7 +23,7 @@ node query_example.js
node async_query_example.js
-node subscribe_demo.js
+# node subscribe_demo.js
taos -s "drop topic if exists topic_name_example"
taos -s "drop database if exists power"
@@ -39,4 +39,4 @@ taos -s "drop database if exists test"
node opentsdb_telnet_example.js
taos -s "drop database if exists test"
-node opentsdb_json_example.js
\ No newline at end of file
+node opentsdb_json_example.js
diff --git a/tests/parallel_test/run_container.sh b/tests/parallel_test/run_container.sh
index f0ee9be46fa5c3f399cde738cad29aa3f03ea7b8..bb57f238f029e2098f70fc553a8e6280ff46e5c4 100755
--- a/tests/parallel_test/run_container.sh
+++ b/tests/parallel_test/run_container.sh
@@ -79,9 +79,11 @@ fi
ulimit -c unlimited
TMP_DIR=$WORKDIR/tmp
-
+SOURCEDIR=$WORKDIR/src
MOUNT_DIR=""
+packageName="TDengine-server-3.0.1.0-Linux-x64.tar.gz"
rm -rf ${TMP_DIR}/thread_volume/$thread_no/sim
+mkdir -p $SOURCEDIR
mkdir -p ${TMP_DIR}/thread_volume/$thread_no/sim/tsim
mkdir -p ${TMP_DIR}/thread_volume/$thread_no/coredump
rm -rf ${TMP_DIR}/thread_volume/$thread_no/coredump/*
@@ -90,6 +92,11 @@ if [ ! -d "${TMP_DIR}/thread_volume/$thread_no/$exec_dir" ]; then
echo "cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/"
cp -rf ${REPDIR}/tests/$subdir ${TMP_DIR}/thread_volume/$thread_no/
fi
+
+if [ ! -f "${SOURCEDIR}/${packageName}" ]; then
+ wget -P ${SOURCEDIR} https://taosdata.com/assets-download/3.0/${packageName}
+fi
+
MOUNT_DIR="$TMP_DIR/thread_volume/$thread_no/$exec_dir:$CONTAINER_TESTDIR/tests/$exec_dir"
echo "$thread_no -> ${exec_dir}:$cmd"
coredump_dir=`cat /proc/sys/kernel/core_pattern | xargs dirname`
@@ -97,6 +104,7 @@ coredump_dir=`cat /proc/sys/kernel/core_pattern | xargs dirname`
docker run \
-v $REP_MOUNT_PARAM \
-v $MOUNT_DIR \
+ -v ${SOURCEDIR}:/usr/local/src/ \
-v "$TMP_DIR/thread_volume/$thread_no/sim:${SIM_DIR}" \
-v ${TMP_DIR}/thread_volume/$thread_no/coredump:$coredump_dir \
-v $WORKDIR/taos-connector-python/taos:/usr/local/lib/python3.8/site-packages/taos:ro \
diff --git a/tests/pytest/tools/taosdumpTest2.py b/tests/pytest/tools/taosdumpTest2.py
index a8117ec04c79aff5c00dcfa604c1124854473d30..8a85ce10ed53946abe4f8ecd4a022752e07f94c1 100644
--- a/tests/pytest/tools/taosdumpTest2.py
+++ b/tests/pytest/tools/taosdumpTest2.py
@@ -11,15 +11,19 @@
# -*- coding: utf-8 -*-
+from logging.config import dictConfig
import sys
import os
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
+import string
+import random
class TDTestCase:
+
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
@@ -47,12 +51,19 @@ class TDTestCase:
return ""
return paths[0]
+ def generateString(self, length):
+ chars = string.ascii_uppercase + string.ascii_lowercase
+ v = ""
+ for i in range(length):
+ v += random.choice(chars)
+ return v
+
def run(self):
if not os.path.exists("./taosdumptest/tmp"):
os.makedirs("./taosdumptest/tmp")
else:
- os.system("rm -rf ./taosdumptest/tmp")
- os.makedirs("./taosdumptest/tmp")
+ print("directory exists")
+ os.system("rm -rf ./taosdumptest/tmp/*")
tdSql.prepare()
@@ -76,17 +87,19 @@ class TDTestCase:
tdLog.info("taosdump found in %s" % binPath)
os.system("rm ./taosdumptest/tmp/*.sql")
+ os.system("rm ./taosdumptest/tmp/*.avro*")
+ os.system("rm -rf ./taosdumptest/taosdump.*")
os.system(
- "%s --databases db -o ./taosdumptest/tmp -B 32766 -L 1048576" %
+ "%s --databases db -o ./taosdumptest/tmp " %
binPath)
tdSql.execute("drop database db")
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(2)
- os.system("%s -i ./taosdumptest/tmp" % binPath)
+ os.system("%s -i ./taosdumptest/tmp -y" % binPath)
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(3)
tdSql.checkData(2, 0, 'db')
@@ -105,23 +118,22 @@ class TDTestCase:
"create table stb(ts timestamp, c1 binary(16374), c2 binary(16374), c3 binary(16374)) tags(t1 nchar(256))")
tdSql.execute(
"insert into t1 using stb tags('t1') values(now, '%s', '%s', '%s')" %
- ("16374",
- "16374",
- "16374"))
+ (self.generateString(16374),
+ self.generateString(16374),
+ self.generateString(16374)))
-# sys.exit(0)
os.system("rm ./taosdumptest/tmp/*.sql")
os.system("rm ./taosdumptest/tmp/*.avro*")
+ os.system("rm -rf ./taosdumptest/tmp/taosdump.*")
os.system("%s -D test -o ./taosdumptest/tmp -y" % binPath)
tdSql.execute("drop database test")
- tdSql.query("select * from information_schema.ins_databases")
+ tdSql.query("show databases")
tdSql.checkRows(3)
os.system("%s -i ./taosdumptest/tmp -y" % binPath)
tdSql.execute("use test")
- tdSql.error("show vnodes '' ")
tdSql.query("show stables")
tdSql.checkRows(1)
tdSql.checkData(0, 0, 'stb')
diff --git a/tests/pytest/util/common.py b/tests/pytest/util/common.py
index 33ef92bf735a5211044ebd37c3c8300abd8843a8..9ffebcbdad5f0fa07e26f1bb4d249643ab7bbe42 100644
--- a/tests/pytest/util/common.py
+++ b/tests/pytest/util/common.py
@@ -199,22 +199,22 @@ class TDCom:
res = requests.post(url, sql.encode("utf-8"), headers = self.preDefine()[0])
return res
- def cleanTb(self, type="taosc"):
+ def cleanTb(self, type="taosc", dbname="db"):
'''
type is taosc or restful
'''
- query_sql = "show stables"
+ query_sql = f"show {dbname}.stables"
res_row_list = tdSql.query(query_sql, True)
stb_list = map(lambda x: x[0], res_row_list)
for stb in stb_list:
if type == "taosc":
- tdSql.execute(f'drop table if exists `{stb}`')
+ tdSql.execute(f'drop table if exists {dbname}.`{stb}`')
if not stb[0].isdigit():
- tdSql.execute(f'drop table if exists {stb}')
+ tdSql.execute(f'drop table if exists {dbname}.{stb}')
elif type == "restful":
- self.restApiPost(f"drop table if exists `{stb}`")
+ self.restApiPost(f"drop table if exists {dbname}.`{stb}`")
if not stb[0].isdigit():
- self.restApiPost(f"drop table if exists {stb}")
+ self.restApiPost(f"drop table if exists {dbname}.{stb}")
def dateToTs(self, datetime_input):
return int(time.mktime(time.strptime(datetime_input, "%Y-%m-%d %H:%M:%S.%f")))
diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py
index e530695d1e53c4628fb28175b308b67d149c16a3..4bcbe190d564a3d734d8c720af88eb1493867019 100644
--- a/tests/pytest/util/dnodes.py
+++ b/tests/pytest/util/dnodes.py
@@ -36,9 +36,9 @@ class TDSimClient:
"rpcDebugFlag": "143",
"tmrDebugFlag": "131",
"cDebugFlag": "143",
- "udebugFlag": "143",
- "jnidebugFlag": "143",
- "qdebugFlag": "143",
+ "uDebugFlag": "143",
+ "jniDebugFlag": "143",
+ "qDebugFlag": "143",
"supportVnodes": "1024",
"telemetryReporting": "0",
}
@@ -134,7 +134,6 @@ class TDDnode:
"uDebugFlag": "131",
"sDebugFlag": "143",
"wDebugFlag": "143",
- "qdebugFlag": "143",
"numOfLogLines": "100000000",
"statusInterval": "1",
"supportVnodes": "1024",
@@ -416,23 +415,15 @@ class TDDnode:
i += 1
if i > 50:
break
- tailCmdStr = 'tail -f '
- popen = subprocess.Popen(
- tailCmdStr + logFile,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- shell=True)
- pid = popen.pid
- # print('Popen.pid:' + str(pid))
- timeout = time.time() + 60 * 2
- while True:
- line = popen.stdout.readline().strip()
- if bkey in line:
- popen.kill()
- break
- if time.time() > timeout:
- tdLog.exit('wait too long for taosd start')
- tdLog.debug("the dnode:%d has been started." % (self.index))
+ with open(logFile) as f:
+ timeout = time.time() + 60 * 2
+ while True:
+ line = f.readline().encode('utf-8')
+ if bkey in line:
+ break
+ if time.time() > timeout:
+ tdLog.exit('wait too long for taosd start')
+ tdLog.debug("the dnode:%d has been started." % (self.index))
else:
tdLog.debug(
"wait 10 seconds for the dnode:%d to start." %
@@ -481,19 +472,21 @@ class TDDnode:
toBeKilled = "valgrind.bin"
if self.running != 0:
- psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
+ psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
-
+ psCmd, shell=True).decode("utf-8").strip()
+
onlyKillOnceWindows = 0
while(processID):
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
+ if platform.system().lower() == 'windows':
+ killCmd = "kill -INT %s > nul 2>&1" % processID
os.system(killCmd)
onlyKillOnceWindows = 1
time.sleep(1)
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True).decode("utf-8").strip()
if not platform.system().lower() == 'windows':
for port in range(6030, 6041):
fuserCmd = "fuser -k -n tcp %d > /dev/null" % port
@@ -517,11 +510,11 @@ class TDDnode:
if self.running != 0:
if platform.system().lower() == 'windows':
- psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^' ^&^& echo aa') do @(ps | grep %%a | awk '{print $1}' )" % (self.index)
+ psCmd = "for /f %%a in ('wmic process where \"name='taosd.exe' and CommandLine like '%%dnode%d%%'\" get processId ^| xargs echo ^| awk ^'{print $2}^' ^&^& echo aa') do @(ps | grep %%a | awk '{print $1}' | xargs)" % (self.index)
else:
- psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}'" % (toBeKilled,self.index)
+ psCmd = "ps -ef|grep -w %s| grep dnode%d|grep -v grep | awk '{print $2}' | xargs" % (toBeKilled,self.index)
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True).decode("utf-8").strip()
onlyKillOnceWindows = 0
while(processID):
@@ -531,7 +524,7 @@ class TDDnode:
onlyKillOnceWindows = 1
time.sleep(1)
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True).decode("utf-8").strip()
if self.valgrind:
time.sleep(2)
@@ -548,9 +541,9 @@ class TDDnode:
toBeKilled = "valgrind.bin"
if self.running != 0:
- psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
+ psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs" % toBeKilled
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True).decode("utf-8").strip()
onlyKillOnceWindows = 0
while(processID):
@@ -560,7 +553,7 @@ class TDDnode:
onlyKillOnceWindows = 1
time.sleep(1)
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True).decode("utf-8").strip()
for port in range(6030, 6041):
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
@@ -705,15 +698,15 @@ class TDDnodes:
for i in range(len(self.dnodes)):
self.dnodes[i].stop()
- psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}'"
- processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
+ psCmd = "ps -ef | grep -w taosd | grep 'root' | grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
+ processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
if processID:
cmd = "sudo systemctl stop taosd"
os.system(cmd)
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
- psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}'"
- processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
+ psCmd = "ps -ef|grep -w taosd| grep -v grep| grep -v defunct | awk '{print $2}' | xargs"
+ processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
while(processID):
if platform.system().lower() == 'windows':
killCmd = "kill -9 %s > nul 2>&1" % processID
@@ -722,11 +715,11 @@ class TDDnodes:
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True).decode("utf-8").strip()
if self.killValgrind == 1:
- psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}'"
- processID = subprocess.check_output(psCmd, shell=True).decode("utf-8")
+ psCmd = "ps -ef|grep -w valgrind.bin| grep -v grep | awk '{print $2}' | xargs"
+ processID = subprocess.check_output(psCmd, shell=True).decode("utf-8").strip()
while(processID):
if platform.system().lower() == 'windows':
killCmd = "kill -TERM %s > nul 2>&1" % processID
@@ -735,7 +728,7 @@ class TDDnodes:
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(
- psCmd, shell=True).decode("utf-8")
+ psCmd, shell=True).decode("utf-8").strip()
# if os.system(cmd) != 0 :
# tdLog.exit(cmd)
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index 753c41e094701271ca3b49a53eabde1461bd1e08..b320cf5995fd0063352f0da7a2dc04933022a7d2 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -102,7 +102,7 @@ class TDSql:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
- raise Exception(repr(e))
+ raise Exception(repr(e))
i+=1
time.sleep(1)
pass
@@ -225,25 +225,21 @@ class TDSql:
# suppose user want to check nanosecond timestamp if a longer data passed
if (len(data) >= 28):
if pd.to_datetime(self.queryResult[row][col]) == pd.to_datetime(data):
- tdLog.info("sql:%s, row:%d col:%d data:%d == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
else:
if self.queryResult[row][col] == _parse_datetime(data):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
return
if str(self.queryResult[row][col]) == str(data):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
return
+
elif isinstance(data, float):
if abs(data) >= 1 and abs((self.queryResult[row][col] - data) / data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
elif abs(data) < 1 and abs(self.queryResult[row][col] - data) <= 0.000001:
- tdLog.info("sql:%s, row:%d col:%d data:%f == expect:%f" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
else:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
@@ -254,21 +250,7 @@ class TDSql:
args = (caller.filename, caller.lineno, self.sql, row, col, self.queryResult[row][col], data)
tdLog.exit("%s(%d) failed: sql:%s row:%d col:%d data:%s != expect:%s" % args)
- if data is None:
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, str):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, datetime.date):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- elif isinstance(data, float):
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%s" %
- (self.sql, row, col, self.queryResult[row][col], data))
- else:
- tdLog.info("sql:%s, row:%d col:%d data:%s == expect:%d" %
- (self.sql, row, col, self.queryResult[row][col], data))
+ tdLog.info(f"sql:{self.sql}, row:{row} col:{col} data:{self.queryResult[row][col]} == expect:{data}")
def getData(self, row, col):
self.checkRowCol(row, col)
@@ -307,7 +289,7 @@ class TDSql:
caller = inspect.getframeinfo(inspect.stack()[1][0])
args = (caller.filename, caller.lineno, sql, repr(e))
tdLog.notice("%s(%d) failed: sql:%s, %s" % args)
- raise Exception(repr(e))
+ raise Exception(repr(e))
i+=1
time.sleep(1)
pass
@@ -329,7 +311,7 @@ class TDSql:
tdLog.exit("%s(%d) failed: sql:%s, col_name_list:%s != expect_col_name_list:%s" % args)
def __check_equal(self, elm, expect_elm):
- if not type(elm) in(list, tuple) and elm == expect_elm:
+ if elm == expect_elm:
return True
if type(elm) in(list, tuple) and type(expect_elm) in(list, tuple):
if len(elm) != len(expect_elm):
diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c
index ada2039460b431363555025ec7984f6b2f1b354a..f39d5e6528275900350ffaefbee18d43ce9a9e81 100644
--- a/tests/script/api/batchprepare.c
+++ b/tests/script/api/batchprepare.c
@@ -2598,7 +2598,6 @@ void runAll(TAOS *taos) {
printf("%s Begin\n", gCaseCtrl.caseCatalog);
runCaseList(taos);
-#if 0
strcpy(gCaseCtrl.caseCatalog, "Micro DB precision Test");
printf("%s Begin\n", gCaseCtrl.caseCatalog);
gCaseCtrl.precision = TIME_PRECISION_MICRO;
@@ -2654,7 +2653,6 @@ void runAll(TAOS *taos) {
gCaseCtrl.bindColNum = 6;
runCaseList(taos);
gCaseCtrl.bindColNum = 0;
-#endif
/*
strcpy(gCaseCtrl.caseCatalog, "Bind Col Type Test");
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 97295d75e072c3c561f579bfc8cb2c15489da858..82f73a4fdd209e84d85d8616ece777cf37e02dfb 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -221,6 +221,7 @@
./test.sh -f tsim/table/describe.sim
./test.sh -f tsim/table/double.sim
./test.sh -f tsim/table/float.sim
+./test.sh -f tsim/table/hash.sim
./test.sh -f tsim/table/int.sim
./test.sh -f tsim/table/limit.sim
./test.sh -f tsim/table/smallint.sim
@@ -248,6 +249,12 @@
./test.sh -f tsim/stream/windowClose.sim
./test.sh -f tsim/stream/ignoreExpiredData.sim
./test.sh -f tsim/stream/sliding.sim
+#./test.sh -f tsim/stream/partitionbyColumnInterval.sim
+#./test.sh -f tsim/stream/partitionbyColumnSession.sim
+#./test.sh -f tsim/stream/partitionbyColumnState.sim
+#./test.sh -f tsim/stream/deleteInterval.sim
+#./test.sh -f tsim/stream/deleteSession.sim
+#./test.sh -f tsim/stream/deleteState.sim
# ---- transaction ----
./test.sh -f tsim/trans/lossdata1.sim
@@ -296,7 +303,7 @@
./test.sh -f tsim/insert/backquote.sim -m
# unsupport ./test.sh -f tsim/parser/fourArithmetic-basic.sim -m
./test.sh -f tsim/query/interval-offset.sim -m
-./test.sh -f tsim/tmq/basic3.sim -m
+# unsupport ./test.sh -f tsim/tmq/basic3.sim -m
./test.sh -f tsim/stable/vnode3.sim -m
./test.sh -f tsim/qnode/basic1.sim -m
# unsupport ./test.sh -f tsim/mnode/basic1.sim -m
@@ -344,6 +351,7 @@
# --- scalar ----
./test.sh -f tsim/scalar/in.sim
./test.sh -f tsim/scalar/scalar.sim
+./test.sh -f tsim/scalar/filter.sim
# ---- alter ----
./test.sh -f tsim/alter/cached_schema_after_alter.sim
diff --git a/tests/script/sh/abs_max.c b/tests/script/sh/abs_max.c
deleted file mode 100644
index d623adacf941e26d0de74c0c582beb7ca83c9c13..0000000000000000000000000000000000000000
--- a/tests/script/sh/abs_max.c
+++ /dev/null
@@ -1,88 +0,0 @@
-#include
-#include
-#include