diff --git a/README-CN.md b/README-CN.md
index 81d64941af4cf8ccf1c55199abbbb025b5f2e17a..44542747eb15fd279ae35eb437381f717811ec63 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -15,7 +15,7 @@
[](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[](https://bestpractices.coreinfrastructure.org/projects/4201)
-简体中文 | [English](README.md) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
+简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/cn/careers/)
# TDengine 简介
diff --git a/cmake/cmake.define b/cmake/cmake.define
index 0d5c21604a71fe0b6f119cf0ba29c66ebee93709..f3caf49da339d0055476df8175a3041ba7ba69e2 100644
--- a/cmake/cmake.define
+++ b/cmake/cmake.define
@@ -123,8 +123,8 @@ ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -O3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ELSE ()
- SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
- SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -g3 -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-reserved-user-defined-literal -g3 -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
# disable all assert
diff --git a/cmake/cmake.options b/cmake/cmake.options
index 3bc5e202a57bd4085c1c9b42688f79581291beb2..555b72cbdf4955ec5945103716125b67c2ea6ed3 100644
--- a/cmake/cmake.options
+++ b/cmake/cmake.options
@@ -77,6 +77,12 @@ ELSEIF (TD_DARWIN_64)
ENDIF ()
ENDIF ()
+option(
+ BUILD_GEOS
+ "If build geos on Windows"
+ ON
+ )
+
option(
BUILD_SHARED_LIBS
""
diff --git a/cmake/cmake.platform b/cmake/cmake.platform
index cb09bf20859d61e89c4d60e40a0ac99367ed4f84..ba747c6134c7032239b6d1d7b87186be66257069 100644
--- a/cmake/cmake.platform
+++ b/cmake/cmake.platform
@@ -57,8 +57,6 @@ IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux" OR ${CMAKE_SYSTEM_NAME} MATCHES "Darwin
SET(TD_DARWIN TRUE)
SET(OSTYPE "macOS")
ADD_DEFINITIONS("-DDARWIN -Wno-tautological-pointer-compare")
- INCLUDE_DIRECTORIES(/usr/local/include)
- LINK_DIRECTORIES(/usr/local/lib)
IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64")
MESSAGE("Current system arch is arm64")
diff --git a/cmake/cmake.version b/cmake/cmake.version
index 6d893c0627d3dbae5ecbc2e5a1bf55584b59d96f..3d0dc80902c5ebd7daf64a91ac37f72fcf7023bf 100644
--- a/cmake/cmake.version
+++ b/cmake/cmake.version
@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
- SET(TD_VER_NUMBER "3.0.4.3")
+ SET(TD_VER_NUMBER "3.0.5.0")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in
index 391972960a4ef0a5942f530519c20c637483b164..ef6ed4af1d1171bc91867d0aa9cd8dc184852ced 100644
--- a/cmake/taosadapter_CMakeLists.txt.in
+++ b/cmake/taosadapter_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
- GIT_TAG 283b50d
+ GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
index d9d2f120690e714122756a36821a6fe6398151c2..9a6a5329ae3af930055d253d8a454b9e55c89794 100644
--- a/cmake/taostools_CMakeLists.txt.in
+++ b/cmake/taostools_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 4378702
+ GIT_TAG 3.0
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index b3333dee9918cc7167799ca6381c6ff0cba2f443..59986a3b3c4eaf9aac9711c13fb39bdd003cf698 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -231,11 +231,16 @@ if(${BUILD_WITH_ROCKSDB})
if(${TD_LINUX})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-but-set-variable -Wno-error=unused-variable -Wno-error=unused-function -Wno-errno=unused-private-field -Wno-error=unused-result")
endif(${TD_LINUX})
+ MESSAGE(STATUS "CXXXX STATUS CONFIG: " ${CMAKE_CXX_FLAGS})
if(${TD_DARWIN})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
endif(${TD_DARWIN})
+ if (${TD_DARWIN_ARM64})
+ set(HAS_ARMV8_CRC true)
+ endif(${TD_DARWIN_ARM64})
+
if (${TD_WINDOWS})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4244 /wd4819")
endif(${TD_WINDOWS})
@@ -248,7 +253,7 @@ if(${BUILD_WITH_ROCKSDB})
endif(${TD_DARWIN})
if(${TD_WINDOWS})
- option(WITH_JNI "" ON)
+ option(WITH_JNI "" OFF)
endif(${TD_WINDOWS})
if(${TD_WINDOWS})
@@ -260,7 +265,7 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_FALLOCATE "" OFF)
option(WITH_JEMALLOC "" OFF)
option(WITH_GFLAGS "" OFF)
- option(PORTABLE "" ON)
+ option(PORTABLE "" OFF)
option(WITH_LIBURING "" OFF)
option(FAIL_ON_WARNINGS OFF)
@@ -268,8 +273,11 @@ if(${BUILD_WITH_ROCKSDB})
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
-
+ IF (TD_LINUX)
+ option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON)
+ ELSE()
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
+ ENDIF()
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md
index f107512e9c14459453d6424e6f05770bf510bdde..825d3c6f8b9faf1d9147efed03697648d5c99ae0 100644
--- a/docs/en/07-develop/09-udf.md
+++ b/docs/en/07-develop/09-udf.md
@@ -10,7 +10,7 @@ User-defined functions can be scalar functions or aggregate functions. Scalar fu
TDengine supports user-defined functions written in C or Python. This document describes the usage of user-defined functions.
-## Implement a UDF in C
+## Implement a UDF in C
When you create a user-defined function, you must implement standard interface functions:
- For scalar functions, implement the `scalarfn` interface function.
@@ -111,13 +111,13 @@ Interface functions return a value that indicates whether the operation was succ
For information about the parameters for interface functions, see Data Model
#### Scalar Interface
- `int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
-
+ `int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
+
Replace `scalarfn` with the name of your function. This function performs scalar calculations on data blocks. You can configure a value through the parameters in the `resultColumn` structure.
The parameters in the function are defined as follows:
- inputDataBlock: The data block to input.
- - resultColumn: The column to output. The column to output.
+ - resultColumn: The column to output. The column to output.
#### Aggregate Interface
@@ -197,7 +197,7 @@ The data structure is described as follows:
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
-- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
+- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
- SUdfInterBuf defines the intermediate structure `buffer` and the number of results in the buffer `numOfResult`.
Additional functions are defined in `taosudf.h` to make it easier to work with these structures.
@@ -270,29 +270,95 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
## Implement a UDF in Python
+### Prepare Environment
+
+1. Prepare Python Environment
+
+Please follow standard procedure of python environment preparation.
+
+2. Install Python package `taospyudf`
+
+```shell
+pip3 install taospyudf
+```
+
+During this process, some C++ code needs to be compiled. So it's required to have `cmake` and `gcc` on your system. The compiled `libtaospyudf.so` will be automatically copied to `/usr/local/lib` path. If you are not root user, please use `sudo`. After installation is done, please check using the command below.
+
+```shell
+root@slave11 ~/udf $ ls -l /usr/local/lib/libtaos*
+-rw-r--r-- 1 root root 671344 May 24 22:54 /usr/local/lib/libtaospyudf.so
+```
+
+Then execute the command below.
+
+```shell
+ldconfig
+```
+
+3. If you want to utilize some 3rd party python packages in your Python UDF, please set configuration parameter `UdfdLdLibPath` to the value of `PYTHONPATH` before starting `taosd`.
+
+4. Launch `taosd` service
+
+Please refer to [Get Started](../../get-started)
+
+### Interface definition
+
+#### Introduction to Interface
+
Implement the specified interface functions when implementing a UDF in Python.
- implement `process` function for the scalar UDF.
- implement `start`, `reduce`, `finish` for the aggregate UDF.
- implement `init` for initialization and `destroy` for termination.
-### Implement a Scalar UDF in Python
+#### Scalar UDF Interface
The implementation of a scalar UDF is described as follows:
+```Python
+def process(input: datablock) -> tuple[output_type]:
+```
+
+Description: this function prcesses datablock, which is the input; you can use datablock.data(row, col) to access the python object at location(row,col); the output is a tuple object consisted of objects of type outputtype
+
+#### Aggregate UDF Interface
+
+The implementation of an aggregate function is described as follows:
+
+```Python
+def start() -> bytes:
+def reduce(inputs: datablock, buf: bytes) -> bytes
+def finish(buf: bytes) -> output_type:
+```
+
+Description: first the start() is invoked to generate the initial result `buffer`; then the input data is divided into multiple row blocks, and reduce() is invoked for each block `inputs` and current intermediate result `buf`; finally finish() is invoked to generate the final result from intermediate `buf`, the final result can only contains 0 or 1 data.
+
+#### Initialization and Cleanup Interface
+
+```python
+def init()
+def destroy()
+```
+
+Description: init() does the work of initialization before processing any data; destroy() does the work of cleanup after the data is processed.
+
+### Python UDF Template
+
+#### Scalar Template
+
```Python
def init():
# initialization
def destroy():
# destroy
def process(input: datablock) -> tuple[output_type]:
- # process input datablock,
+ # process input datablock,
# datablock.data(row, col) is to access the python object in location(row,col)
- # return tuple object consisted of object of type outputtype
+ # return tuple object consisted of object of type outputtype
```
-### Implement an Aggregate UDF in Python
+Note:process() must be implemeted, init() and destroy() must be defined too but they can do nothing.
-The implementation of an aggregate function is described as follows:
+#### Aggregate Template
```Python
def init():
@@ -303,41 +369,15 @@ def start() -> bytes:
#return serialize(init_state)
def reduce(inputs: datablock, buf: bytes) -> bytes
# deserialize buf to state
- # reduce the inputs and state into new_state.
- # use inputs.data(i,j) to access python ojbect of location(i,j)
+ # reduce the inputs and state into new_state.
+ # use inputs.data(i,j) to access python object of location(i,j)
# serialize new_state into new_state_bytes
- return new_state_bytes
+ return new_state_bytes
def finish(buf: bytes) -> output_type:
- #return obj of type outputtype
+ #return obj of type outputtype
```
-### Python UDF Interface Definition
-
-#### Scalar interface
-```Python
-def process(input: datablock) -> tuple[output_type]:
-```
-- `input` is a data block two-dimension matrix-like object, of which method `data(row, col)` returns the Python object located at location (`row`, `col`)
-- return a Python tuple object, of which each item is a Python object of type `output_type`
-
-#### Aggregate Interface
-```Python
-def start() -> bytes:
-def reduce(input: datablock, buf: bytes) -> bytes
-def finish(buf: bytes) -> output_type:
-```
-
-- first `start()` is called to return the initial result in type `bytes`
-- then the input data are divided into multiple data blocks and for each block `input`, `reduce` is called with the data block `input` and the current result `buf` bytes and generates a new intermediate result buffer.
-- finally, the `finish` function is called on the intermediate result `buf` and outputs 0 or 1 data of type `output_type`
-
-
-#### Initialization and Cleanup Interface
-```Python
-def init()
-def destroy()
-```
-Implement `init` for initialization and `destroy` for termination.
+Note: aggregate UDF requires init(), destroy(), start(), reduce() and finish() to be impemented. start() generates the initial result in buffer, then the input data is divided into multiple row data blocks, reduce() is invoked for each data block `inputs` and intermediate `buf`, finally finish() is invoked to generate final result from the intermediate result `buf`.
### Data Mapping between TDengine SQL and Python UDF
@@ -353,15 +393,463 @@ The following table describes the mapping between TDengine SQL data type and Pyt
|TIMESTAMP | int |
|JSON and other types | Not Supported |
-### Installing Python UDF
-1. Install Python package `taospyudf` that executes Python UDF
-```bash
-sudo pip install taospyudf
-ldconfig
+### Development Guide
+
+In this section we will demonstrate 5 examples of developing UDF in Python language. In this guide, you will learn the development skills from easy case to hard case, the examples include:
+1. A scalar function which accepts only one integer as input and outputs ln(n^2 + 1)。
+2. A scalar function which accepts n integers, like(x1, x2, ..., xn)and output the sum of the product of each input and its sequence number, i.e. x1 + 2 * x2 + ... + n * xn。
+3. A scalar function which accepts a timestamp and output the next closest Sunday of the timestamp. In this case, we will demonstrate how to use 3rd party library `moment`.
+4. An aggregate function which calculates the difference between the maximum and the minimum of a specific column, i.e. same functionality of built-in spread().
+
+In the guide, some debugging skills of using Python UDF will be explained too.
+
+We assume you are using Linux system and already have TDengine 3.0.4.0+ and Python 3.x.
+
+Note:**You can't use print() function to output log inside a UDF, you have to write the log to a specific file or use logging module of Python.**
+
+#### Sample 1: Simplest UDF
+
+This scalar UDF accepts an integer as input and output ln(n^2 + 1).
+
+Firstly, please compose a Python source code file in your system and save it, e.g. `/root/udf/myfun.py`, the code is like below.
+
+```python
+from math import log
+
+def init():
+ pass
+
+def destroy():
+ pass
+
+def process(block):
+ rows, _ = block.shape()
+ return [log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
+```
+
+This program consists of 3 functions, init() and destroy() do nothing, but they have to be defined even though there is nothing to do in them because they are critical parts of a python UDF. The most important function is process(), which accepts a data block and the data block object has two methods:
+1. shape() returns the number of rows and the number of columns of the data block
+2. data(i, j) returns the value at (i,j) in the block
+
+The output of the process() function of a scalar UDF returns exactly same number of data as the number of input rows. We will ignore the number of columns because we just want to compute on the first column.
+
+Then, we create the UDF using the SQL command below.
+
+```sql
+create function myfun as '/root/udf/myfun.py' outputtype double language 'Python'
+```
+
+Here is the output example, it may change a little depending on your version being used.
+
+```shell
+ taos> create function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
+Create OK, 0 row(s) affected (0.005202s)
+```
+
+Then, we used the `show` command to prove the creation of the UDF is successful.
+
+```text
+taos> show functions;
+ name |
+=================================
+ myfun |
+Query OK, 1 row(s) in set (0.005767s)
+```
+
+Next, we can try to test the function. Before executing the UDF, we need to prepare some data using the command below in TDengine CLI.
+
+```sql
+create database test;
+create table t(ts timestamp, v1 int, v2 int, v3 int);
+insert into t values('2023-05-01 12:13:14', 1, 2, 3);
+insert into t values('2023-05-03 08:09:10', 2, 3, 4);
+insert into t values('2023-05-10 07:06:05', 3, 4, 5);
+```
+
+Execute the UDF to test it:
+
+```sql
+taos> select myfun(v1, v2) from t;
+
+DB error: udf function execution failure (0.011088s)
+```
+
+Unfortunately, the UDF execution failed. We need to check the log `udfd` daemon to find out why.
+
+```shell
+tail -10 /var/log/taos/udfd.log
+```
+
+Below is the output.
+
+```text
+05/24 22:46:28.733545 01665799 UDF ERROR can not load library libtaospyudf.so. error: operation not permitted
+05/24 22:46:28.733561 01665799 UDF ERROR can not load python plugin. lib path libtaospyudf.so
+```
+
+From the error message we can find out that `libtaospyudf.so` was not loaded successfully. Please refer to the [Prepare Environment] section.
+
+After correcting environment issues, execute the UDF:
+
+```sql
+taos> select myfun(v1) from t;
+ myfun(v1) |
+============================
+ 0.693147181 |
+ 1.609437912 |
+ 2.302585093 |
+```
+
+Now, we have finished the first PDF in Python, and learned some basic debugging skills.
+
+#### Sample 2: Abnormal Processing
+
+The `myfun` UDF example in sample 1 has passed, but it has two drawbacks.
+
+1. It the program accepts only one column of data as input, but it doesn't throw exception if you passes multiple columns.
+
+```sql
+taos> select myfun(v1, v2) from t;
+ myfun(v1, v2) |
+============================
+ 0.693147181 |
+ 1.609437912 |
+ 2.302585093 |
+```
+
+2. `null` value is not processed. We expect the program to throw exception and terminate if `null` is passed as input.
+
+So, we try to optimize the process() function as below.
+
+```python
+def process(block):
+ rows, cols = block.shape()
+ if cols > 1:
+ raise Exception(f"require 1 parameter but given {cols}")
+ return [ None if block.data(i, 0) is None else log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
+```
+
+The update the UDF with command below.
+
+```sql
+create or replace function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
+```
+
+At this time, if we pass two arguments to `myfun`, the execution would fail.
+
+```sql
+taos> select myfun(v1, v2) from t;
+
+DB error: udf function execution failure (0.014643s)
+```
+
+However, the exception is not shown to end user, but displayed in the log file `/var/log/taos/taospyudf.log`
+
+```text
+2023-05-24 23:21:06.790 ERROR [1666188] [doPyUdfScalarProc@507] call pyUdfScalar proc function. context 0x7faade26d180. error: Exception: require 1 parameter but given 2
+
+At:
+ /var/lib/taos//.udf/myfun_3_1884e1281d9.py(12): process
+
+```
+
+Now, we have learned how to update a UDF and check the log of a UDF.
+
+Note: Prior to TDengine 3.0.5.0 (excluding), updating a UDF requires to restart `taosd` service. After 3.0.5.0, restarting is not required.
+
+#### Sample 3: UDF with n arguments
+
+A UDF which accepts n intergers, likee (x1, x2, ..., xn) and output the sum of the product of each value and its sequence number: 1 * x1 + 2 * x2 + ... + n * xn. If there is `null` in the input, then the result is `null`. The difference from sample 1 is that it can accept any number of columns as input and process each column. Assume the program is written in /root/udf/nsum.py:
+
+```python
+def init():
+ pass
+
+
+def destroy():
+ pass
+
+
+def process(block):
+ rows, cols = block.shape()
+ result = []
+ for i in range(rows):
+ total = 0
+ for j in range(cols):
+ v = block.data(i, j)
+ if v is None:
+ total = None
+ break
+ total += (j + 1) * block.data(i, j)
+ result.append(total)
+ return result
+```
+
+Crate and test the UDF:
+
+```sql
+create function nsum as '/root/udf/nsum.py' outputtype double language 'Python';
+```
+
+```sql
+taos> insert into t values('2023-05-25 09:09:15', 6, null, 8);
+Insert OK, 1 row(s) affected (0.003675s)
+
+taos> select ts, v1, v2, v3, nsum(v1, v2, v3) from t;
+ ts | v1 | v2 | v3 | nsum(v1, v2, v3) |
+================================================================================================
+ 2023-05-01 12:13:14.000 | 1 | 2 | 3 | 14.000000000 |
+ 2023-05-03 08:09:10.000 | 2 | 3 | 4 | 20.000000000 |
+ 2023-05-10 07:06:05.000 | 3 | 4 | 5 | 26.000000000 |
+ 2023-05-25 09:09:15.000 | 6 | NULL | 8 | NULL |
+Query OK, 4 row(s) in set (0.010653s)
+```
+
+#### Sample 4: Utilize 3rd party package
+
+A UDF which accepts a timestamp and output the next closed Sunday. This sample requires to use third party package `moment`, you need to install it firslty.
+
+```shell
+pip3 install moment
+```
+
+Then compose the Python code in /root/udf/nextsunday.py
+
+```python
+import moment
+
+
+def init():
+ pass
+
+
+def destroy():
+ pass
+
+
+def process(block):
+ rows, cols = block.shape()
+ if cols > 1:
+ raise Exception("require only 1 parameter")
+ if not type(block.data(0, 0)) is int:
+ raise Exception("type error")
+ return [moment.unix(block.data(i, 0)).replace(weekday=7).format('YYYY-MM-DD')
+ for i in range(rows)]
+```
+
+UDF framework will map the TDengine timestamp to Python int type, so this function only accepts an integer representing millisecond. process() firstly validates the parameters, then use `moment` to replace the time, format the result and output.
+
+Create and test the UDF.
+
+```sql
+create function nextsunday as '/root/udf/nextsunday.py' outputtype binary(10) language 'Python';
+```
+
+If your `taosd` is started using `systemd`, you may encounter the error below. Next we will show how to debug.
+
+```sql
+taos> select ts, nextsunday(ts) from t;
+
+DB error: udf function execution failure (1.123615s)
+```
+
+```shell
+ tail -20 taospyudf.log
+2023-05-25 11:42:34.541 ERROR [1679419] [PyUdf::PyUdf@217] py udf load module failure. error ModuleNotFoundError: No module named 'moment'
+```
+
+This is because `moment` doesn't exist in the default library search path of python UDF, please check the log file `taosdpyudf.log`.
+
+```shell
+grep 'sys path' taospyudf.log | tail -1
+```
+
+```text
+2023-05-25 10:58:48.554 INFO [1679419] [doPyOpen@592] python sys path: ['', '/lib/python38.zip', '/lib/python3.8', '/lib/python3.8/lib-dynload', '/lib/python3/dist-packages', '/var/lib/taos//.udf']
+```
+
+You may find that the default library search path is `/lib/python3/dist-packages` (just for example, it may be different in your system), but `moment` is installed to `/usr/local/lib/python3.8/dist-packages` (for example, it may be different in your system). Then we change the library search path of python UDF.
+
+Check `sys.path`, which must include the packages you install with pip3 command previously, as shown below:
+
+```python
+>>> import sys
+>>> ":".join(sys.path)
+'/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages'
+```
+
+Copy the output and edit /var/taos/taos.cfg to add below configuration parameter.
+
+```shell
+UdfdLdLibPath /usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages
+```
+
+Save it, then restart `taosd`, using `systemctl restart taosd`, and test again, it will succeed this time.
+
+Note: If your cluster consists of multiple `taosd` instances, you have to repeat same process for each of them.
+
+```sql
+taos> select ts, nextsunday(ts) from t;
+ ts | nextsunday(ts) |
+===========================================
+ 2023-05-01 12:13:14.000 | 2023-05-07 |
+ 2023-05-03 08:09:10.000 | 2023-05-07 |
+ 2023-05-10 07:06:05.000 | 2023-05-14 |
+ 2023-05-25 09:09:15.000 | 2023-05-28 |
+Query OK, 4 row(s) in set (1.011474s)
```
-2. If PYTHONPATH is needed to find Python packages when the Python UDF executes, include the PYTHONPATH contents into the udfdLdLibPath variable of the taos.cfg configuration file
-
-### Python UDF Sample Code
+
+#### Sample 5: Aggregate Function
+
+An aggregate function which calculates the difference of the maximum and the minimum in a column. An aggregate funnction takes multiple rows as input and output only one data. The execution process of an aggregate UDF is like map-reduce, the framework divides the input into multiple parts, each mapper processes one block and the reducer aggregates the result of the mappers. The reduce() of Python UDF has the functionality of both map() and reduce(). The reduce() takes two arguments: the data to be processed; and the result of other tasks executing reduce(). For exmaple, assume the code is in `/root/udf/myspread.py`.
+
+```python
+import io
+import math
+import pickle
+
+LOG_FILE: io.TextIOBase = None
+
+
+def init():
+ global LOG_FILE
+ LOG_FILE = open("/var/log/taos/spread.log", "wt")
+ log("init function myspead success")
+
+
+def log(o):
+ LOG_FILE.write(str(o) + '\n')
+
+
+def destroy():
+ log("close log file: spread.log")
+ LOG_FILE.close()
+
+
+def start():
+ return pickle.dumps((-math.inf, math.inf))
+
+
+def reduce(block, buf):
+ max_number, min_number = pickle.loads(buf)
+ log(f"initial max_number={max_number}, min_number={min_number}")
+ rows, _ = block.shape()
+ for i in range(rows):
+ v = block.data(i, 0)
+ if v > max_number:
+ log(f"max_number={v}")
+ max_number = v
+ if v < min_number:
+ log(f"min_number={v}")
+ min_number = v
+ return pickle.dumps((max_number, min_number))
+
+
+def finish(buf):
+ max_number, min_number = pickle.loads(buf)
+ return max_number - min_number
+```
+
+In this example, we implemented an aggregate function, and added some logging.
+1. init() opens a file for logging
+2. log() is the function for logging, it converts the input object to string and output with an end of line
+3. destroy() closes the log file \
+4. start() returns the initial buffer for storing the intermediate result
+5. reduce() processes each daa block and aggregates the result
+6. finish() converts the final buffer() to final result\
+
+Create the UDF.
+
+```sql
+create or replace aggregate function myspread as '/root/udf/myspread.py' outputtype double bufsize 128 language 'Python';
+```
+
+This SQL command has two important different points from the command creating scalar UDF.
+1. keyword `aggregate` is used
+2. keyword `bufsize` is used to specify the memory size for storing the intermediate result. In this example, the result is 32 bytes, but we specified 128 bytes for `bufsize`. You can use the `python` CLI to print actual size.
+
+```python
+>>> len(pickle.dumps((12345.6789, 23456789.9877)))
+32
+```
+
+Test this function, you can see the result is same as built-in spread() function. \
+
+```sql
+taos> select myspread(v1) from t;
+ myspread(v1) |
+============================
+ 5.000000000 |
+Query OK, 1 row(s) in set (0.013486s)
+
+taos> select spread(v1) from t;
+ spread(v1) |
+============================
+ 5.000000000 |
+Query OK, 1 row(s) in set (0.005501s)
+```
+
+At last, check the log file, we can see that the reduce() function is executed 3 times, max value is updated 3 times and min value is updated only one time.
+
+```shell
+root@slave11 /var/log/taos $ cat spread.log
+init function myspead success
+initial max_number=-inf, min_number=inf
+max_number=1
+min_number=1
+initial max_number=1, min_number=1
+max_number=2
+max_number=3
+initial max_number=3, min_number=1
+max_number=6
+close log file: spread.log
+```
+
+### SQL Commands
+
+1. Create Scalar UDF
+
+```sql
+CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
+```
+
+2. Create Aggregate UDF
+
+```sql
+CREATE AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type LANGUAGE 'Python';
+```
+
+3. Update Scalar UDF
+
+```sql
+CREATE OR REPLACE FUNCTION function_name AS OUTPUTTYPE int LANGUAGE 'Python';
+```
+
+4. Update Aggregate UDF
+
+```sql
+CREATE OR REPLACE AGGREGATE FUNCTION function_name AS OUTPUTTYPE BUFSIZE buf_size int LANGUAGE 'Python';
+```
+
+Note: If keyword `AGGREGATE` used, the UDF will be treated as aggregate UDF despite what it was before; Similarly, if there is no keyword `aggregate`, the UDF will be treated as scalar function despite what it was before.
+
+5. Show the UDF
+
+The version of a UDF is increased by one every time it's updated.
+
+```sql
+select * from ins_functions \G;
+```
+
+6. Show and Drop existing UDF
+
+```sql
+SHOW functions;
+DROP FUNCTION function_name;
+```
+
+### More Python UDF Samples
+
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
@@ -377,7 +865,7 @@ The `pybitand` function implements bitwise addition for multiple columns. If the
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
-The `pyl2norm` function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
+The `pyl2norm` function finds the second-order norm for all data in the input columns. This squares the values, takes a cumulative sum, and finds the square root.
pyl2norm.py
@@ -387,5 +875,16 @@ The `pyl2norm` function finds the second-order norm for all data in the input co
+#### Aggregate Function [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
+
+The `pycumsum` function finds the cumulative sum for all data in the input columns.
+
+pycumsum.py
+
+```c
+{{#include tests/script/sh/pycumsum.py}}
+```
+
+
## Manage and Use UDF
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md
index 70bea97dba24a7e3278afa2780a778630e375fb2..13007d5bb1beef28a7307b648754ee2bced41a21 100644
--- a/docs/en/12-taos-sql/01-data-type.md
+++ b/docs/en/12-taos-sql/01-data-type.md
@@ -45,7 +45,7 @@ In TDengine, the data types below can be used when specifying a column or tag.
:::note
- Only ASCII visible characters are suggested to be used in a column or tag of BINARY type. Multi-byte characters must be stored in NCHAR type.
-- The length of BINARY can be up to 16,374 bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
+- The length of BINARY can be up to 16,374(data column is 65,517 and tag column is 16,382 since version 3.0.5.0) bytes. The string value must be quoted with single quotes. You must specify a length in bytes for a BINARY value, for example binary(20) for up to twenty single-byte characters. If the data exceeds the specified length, an error will occur. The literal single quote inside the string must be preceded with back slash like `\'`
- Numeric values in SQL statements will be determined as integer or float type according to whether there is decimal point or whether scientific notation is used, so attention must be paid to avoid overflow. For example, 9999999999999999999 will be considered as overflow because it exceeds the upper limit of long integer, but 9999999999999999999.0 will be considered as a legal float number.
:::
diff --git a/docs/en/12-taos-sql/03-table.md b/docs/en/12-taos-sql/03-table.md
index f61d1f51471bf042aea79023bff4f74c43831eb8..7f39fb58673d1b79d184884087f09327568d67ca 100644
--- a/docs/en/12-taos-sql/03-table.md
+++ b/docs/en/12-taos-sql/03-table.md
@@ -45,7 +45,7 @@ table_option: {
1. The first column of a table MUST be of type TIMESTAMP. It is automatically set as the primary key.
2. The maximum length of the table name is 192 bytes.
-3. The maximum length of each row is 48k bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
+3. The maximum length of each row is 48k(64k since version 3.0.5.0) bytes, please note that the extra 2 bytes used by each BINARY/NCHAR column are also counted.
4. The name of the subtable can only consist of characters from the English alphabet, digits and underscore. Table names can't start with a digit. Table names are case insensitive.
5. The maximum length in bytes must be specified when using BINARY or NCHAR types.
6. Escape character "\`" can be used to avoid the conflict between table names and reserved keywords, above rules will be bypassed when using escape character on table names, but the upper limit for the name length is still valid. The table names specified using escape character are case sensitive.
diff --git a/docs/en/12-taos-sql/06-select.md b/docs/en/12-taos-sql/06-select.md
index ea0d7abc16b18a92147642d86b167d96903ebfda..b28d5acb1828ddb67cf1fc2615973409362063af 100644
--- a/docs/en/12-taos-sql/06-select.md
+++ b/docs/en/12-taos-sql/06-select.md
@@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause:
- RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
+ RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause:
PARTITION BY expr [, expr] ...
diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md
index a204415d651f0b8f784fc1602133eff3ff378133..b517bcb3ccdd90b52d778914ba77db3dba71d393 100644
--- a/docs/en/12-taos-sql/10-function.md
+++ b/docs/en/12-taos-sql/10-function.md
@@ -889,9 +889,10 @@ ignore_null_values: {
- `INTERP` is used to get the value that matches the specified time slice from a column. If no such value exists an interpolation value will be returned based on `FILL` parameter.
- The input data of `INTERP` is the value of the specified column and a `where` clause can be used to filter the original data. If no `where` condition is specified then all original data is the input.
- `INTERP` must be used along with `RANGE`, `EVERY`, `FILL` keywords.
-- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
+- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range. timestamp2 is the ending point of the output time range.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
+- When only one timestamp value is specified in `RANGE` clause, `INTERP` is used to generate interpolation at this point in time. In this case, `EVERY` clause can be omitted. For example, SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
@@ -902,7 +903,7 @@ ignore_null_values: {
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
```sql
-SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
+SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
```
### LAST
@@ -1008,8 +1009,7 @@ SAMPLE(expr, k)
**More explanations**:
-This function cannot be used in expression calculation.
-- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
+- This function cannot be used in expression calculation.
### TAIL
@@ -1088,7 +1088,6 @@ CSUM(expr)
- Arithmetic operation can't be performed on the result of `csum` function
- Can only be used with aggregate functions This function can be used with supertables and standard tables.
-- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### DERIVATIVE
@@ -1112,7 +1111,6 @@ ignore_negative: {
**More explanation**:
-- It can be used together with `PARTITION BY tbname` against a STable.
- It can be used together with a selected column. For example: select \_rowts, DERIVATIVE() from.
### DIFF
@@ -1175,7 +1173,6 @@ MAVG(expr, k)
- Arithmetic operation can't be performed on the result of `MAVG`.
- Can only be used with data columns, can't be used with tags. - Can't be used with aggregate functions.
-- Must be used with `PARTITION BY tbname` when it's used on a STable to force the result on each single timeline
### STATECOUNT
@@ -1201,7 +1198,6 @@ STATECOUNT(expr, oper, val)
**More explanations**:
-- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window
@@ -1229,7 +1225,6 @@ STATEDURATION(expr, oper, val, unit)
**More explanations**:
-- Must be used together with `PARTITION BY tbname` when it's used on a STable to force the result into each single timeline]
- Can't be used with window operation, like interval/state_window/session_window
@@ -1247,7 +1242,6 @@ TWA(expr)
**Applicable table types**: standard tables and supertables
-- Must be used together with `PARTITION BY tbname` to force the result into each single timeline.
## System Information Functions
diff --git a/docs/en/12-taos-sql/19-limit.md b/docs/en/12-taos-sql/19-limit.md
index 654fae7560d3c24df1353b87952a3868ca307418..22ad2055e4633fcf63baeb4470d0f2aafddac5b3 100644
--- a/docs/en/12-taos-sql/19-limit.md
+++ b/docs/en/12-taos-sql/19-limit.md
@@ -26,7 +26,7 @@ The following characters cannot occur in a password: single quotation marks ('),
- Maximum length of database name is 64 bytes
- Maximum length of table name is 192 bytes, excluding the database name prefix and the separator.
-- Maximum length of each data row is 48K bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
+- Maximum length of each data row is 48K(64K since version 3.0.5.0) bytes. Note that the upper limit includes the extra 2 bytes consumed by each column of BINARY/NCHAR type.
- The maximum length of a column name is 64 bytes.
- Maximum number of columns is 4096. There must be at least 2 columns, and the first column must be timestamp.
- The maximum length of a tag name is 64 bytes
diff --git a/docs/en/14-reference/03-connector/04-java.mdx b/docs/en/14-reference/03-connector/04-java.mdx
index 260b38b24ffd00682b393fc2e3d14142c0e6f3af..db49e5f3952761c78f0796e706cb4a9b567467f5 100644
--- a/docs/en/14-reference/03-connector/04-java.mdx
+++ b/docs/en/14-reference/03-connector/04-java.mdx
@@ -959,6 +959,7 @@ The preceding example uses the SQL statement `select ts, speed from speed_table`
```java
Properties config = new Properties();
+config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("enable.auto.commit", "true");
config.setProperty("group.id", "group1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
@@ -966,12 +967,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
TaosConsumer consumer = new TaosConsumer<>(config);
```
+- bootstrap.servers: `ip:port` where the TDengine server is located, or `ip:port` where the taosAdapter is located if WebSocket connection is used.
- enable.auto.commit: Specifies whether to commit automatically.
- group.id: consumer: Specifies the group that the consumer is in.
- value.deserializer: To deserialize the results, you can inherit `com.taosdata.jdbc.tmq.ReferenceDeserializer` and specify the result set bean. You can also inherit `com.taosdata.jdbc.tmq.Deserializer` and perform custom deserialization based on the SQL result set.
- td.connect.type: Specifies the type connect with TDengine, `jni` or `WebSocket`. default is `jni`
- httpConnectTimeout: WebSocket connection timeout in milliseconds, the default value is 5000 ms. It only takes effect when using WebSocket type.
- messageWaitTimeout: socket timeout in milliseconds, the default value is 10000 ms. It only takes effect when using WebSocket type.
+- httpPoolSize: Maximum number of concurrent requests on the a connection。It only takes effect when using WebSocket type.
- For more information, see [Consumer Parameters](../../../develop/tmq).
#### Subscribe to consume data
@@ -1015,10 +1018,20 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException {
Properties config = new Properties();
+ config.setProperty("td.connect.type", "jni");
+ config.setProperty("bootstrap.servers", "localhost:6030");
+ config.setProperty("td.connect.user", "root");
+ config.setProperty("td.connect.pass", "taosdata");
+ config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
+ config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group1");
+ config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
+ config.setProperty("value.deserializer.encoding", "UTF-8");
+ config.setProperty("experimental.snapshot.enable", "true");
+
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@@ -1090,12 +1103,19 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException {
Properties config = new Properties();
- config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.type", "ws");
+ config.setProperty("bootstrap.servers", "localhost:6041");
+ config.setProperty("td.connect.user", "root");
+ config.setProperty("td.connect.pass", "taosdata");
+ config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
+ config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group2");
+ config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
+ config.setProperty("value.deserializer.encoding", "UTF-8");
+ config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@@ -1236,6 +1256,7 @@ The source code of the sample application is under `TDengine/examples/JDBC`:
- connectionPools: using taos-jdbcdriver in connection pools such as HikariCP, Druid, dbcp, c3p0, etc.
- SpringJdbcTemplate: using taos-jdbcdriver in Spring JdbcTemplate.
- mybatisplus-demo: using taos-jdbcdriver in Springboot + Mybatis.
+- consumer-demo: consumer TDengine data example, the consumption rate can be controlled by parameters.
[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
diff --git a/docs/en/14-reference/03-connector/05-go.mdx b/docs/en/14-reference/03-connector/05-go.mdx
index 0088f230069b15f12e455b5b6d2d6fa261fc5a14..06d643c6c83e677a0cdcade91296ae2339f80fda 100644
--- a/docs/en/14-reference/03-connector/05-go.mdx
+++ b/docs/en/14-reference/03-connector/05-go.mdx
@@ -29,7 +29,7 @@ REST connections are supported on all platforms that can run Go.
## Version support
-Please refer to [version support list](/reference/connector#version-support)
+Please refer to [version support list](https://github.com/taosdata/driver-go#remind)
## Supported features
@@ -379,6 +379,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information.
+* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
+
+Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
+
+* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
+Note: `ignoredTimeoutMs` is reserved for compatibility purpose
+
+Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
+
* `func (c *Consumer) Unsubscribe() error`
Unsubscribe.
@@ -468,6 +477,15 @@ Note: `tmq.TopicPartition` is reserved for compatibility purpose
Commit information.
+* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
+
+Get Assignment(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
+
+* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
+Note: `ignoredTimeoutMs` is reserved for compatibility purpose
+
+Seek offset(TDengine >= 3.0.5.0 and driver-go >= v3.5.0 are required).
+
* `func (c *Consumer) Unsubscribe() error`
Unsubscribe.
@@ -476,7 +494,7 @@ Unsubscribe.
Close consumer.
-For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
+For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
### parameter binding via WebSocket
@@ -524,7 +542,7 @@ For a complete example see [GitHub sample file](https://github.com/taosdata/driv
Closes the parameter binding.
-For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
+For a complete example see [GitHub sample file](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
## API Reference
diff --git a/docs/en/14-reference/03-connector/06-rust.mdx b/docs/en/14-reference/03-connector/06-rust.mdx
index 99c3d2c066d9e0e269ecc5cc03f6efa106b1906f..f32e32f2adffb288b5ca8499994411597a30dcfd 100644
--- a/docs/en/14-reference/03-connector/06-rust.mdx
+++ b/docs/en/14-reference/03-connector/06-rust.mdx
@@ -27,9 +27,14 @@ The source code for the Rust connectors is located on [GitHub](https://github.co
Native connections are supported on the same platforms as the TDengine client driver.
Websocket connections are supported on all platforms that can run Go.
-## Version support
+## Version history
-Please refer to [version support list](/reference/connector#version-support)
+| connector-rust version | TDengine version | major features |
+| :----------------: | :--------------: | :--------------------------------------------------: |
+| v0.8.8 | 3.0.5.0 or later | TMQ: Get consuming progress and seek offset to consume. |
+| v0.8.0 | 3.0.4.0 | Support schemaless insert. |
+| v0.7.6 | 3.0.3.0 | Support req_id in query. |
+| v0.6.0 | 3.0.0.0 | Base features. |
The Rust Connector is still under rapid development and is not guaranteed to be backward compatible before 1.0. We recommend using TDengine version 3.0 or higher to avoid known issues.
@@ -499,6 +504,22 @@ The TMQ is of [futures::Stream](https://docs.rs/futures/latest/futures/stream/in
}
```
+Get assignments:
+
+Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
+
+```rust
+let assignments = consumer.assignments().await.unwrap();
+```
+
+Seek offset:
+
+Version requirements connector-rust >= v0.8.8, TDengine >= 3.0.5.0
+
+```rust
+consumer.offset_seek(topic, vgroup_id, offset).await;
+```
+
Unsubscribe:
```rust
@@ -513,7 +534,7 @@ The following parameters can be configured for the TMQ DSN. Only `group.id` is m
- `enable.auto.commit`: Automatically commits. This can be enabled when data consistency is not essential.
- `auto.commit.interval.ms`: Interval for automatic commits.
-For more information, see [GitHub sample file](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
+For more information, see [GitHub sample file](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
For information about other structure APIs, see the [Rust documentation](https://docs.rs/taos).
diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx
index a83f2047d02f7161d85a2f7ecb1c2a983ecfc22e..b263af8ea6afcebe23726d5baa8dd4246e239963 100644
--- a/docs/en/14-reference/03-connector/07-python.mdx
+++ b/docs/en/14-reference/03-connector/07-python.mdx
@@ -362,7 +362,7 @@ By using the optional req_id parameter, you can specify a request ID that can be
##### TaosConnection class
-The `TaosConnection` class contains both an implementation of the PEP249 Connection interface (e.g., the `cursor()` method and the `close()` method) and many extensions (e.g., the `execute()`, `query()`, `schemaless_insert()`, and `subscribe()` methods).
+As the way to connect introduced above but add `req_id` argument.
```python title="execute method"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
@@ -372,13 +372,9 @@ The `TaosConnection` class contains both an implementation of the PEP249 Connect
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
```
-:::tip
-The queried results can only be fetched once. For example, only one of `fetch_all()` and `fetch_all_into_dict()` can be used in the example above. Repeated fetches will result in an empty list.
-:::
-
##### Use of TaosResult class
-In the above example of using the `TaosConnection` class, we have shown two ways to get the result of a query: `fetch_all()` and `fetch_all_into_dict()`. In addition, `TaosResult` also provides methods to iterate through the result set by rows (`rows_iter`) or by data blocks (`blocks_iter`). Using these two methods will be more efficient in scenarios where the query has a large amount of data.
+As the way to fetch data introduced above but add `req_id` argument.
```python title="blocks_iter method"
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
@@ -391,17 +387,12 @@ The `TaosConnection` class and the `TaosResult` class already implement all the
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
```
-:::note
-The TaosCursor class uses native connections for write and query operations. In a client-side multi-threaded scenario, this cursor instance must remain thread exclusive and cannot be shared across threads for use, otherwise, it will result in errors in the returned results.
-
-:::
-
##### Use of TaosRestCursor class
-The `TaosRestCursor` class is an implementation of the PEP249 Cursor interface.
+As the way to connect introduced above but add `req_id` argument.
```python title="Use of TaosRestCursor"
{{#include docs/examples/python/connect_rest_with_req_id_examples.py:basic}}
@@ -421,8 +412,11 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
+
+As the way to connect introduced above but add `req_id` argument.
+
```python
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
```
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index 3ce63fb6cceded0f309691ed47ad8bda38db5bfa..52ded6208a5f946dd0901410605907c826813226 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -111,7 +111,7 @@ The parameters described in this document by the effect that they have on the sy
| Attribute | Description |
| ------------- | ---------------------------------------------- |
| Applicable | Client/Server |
-| Meaning | The maximum waiting time to get avaliable conn |
+| Meaning | The maximum waiting time to get available conn |
| Value Range | 10-50000000(ms) |
| Default Value | 500000 |
diff --git a/docs/en/14-reference/13-schemaless/13-schemaless.md b/docs/en/14-reference/13-schemaless/13-schemaless.md
index aad0e63a4228ca303302d4a3970182355f750d53..3ae9098a73b3cc6f5f1e970886e33c40558a683b 100644
--- a/docs/en/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/en/14-reference/13-schemaless/13-schemaless.md
@@ -90,7 +90,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
Note: TDengine 3.0.3.0 and later automatically detect whether order is consistent. This parameter is no longer used.
:::tip
-All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
+All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed 48 KB(64 KB since version 3.0.5.0) and the total length of a tag value cannot exceed 16 KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
:::
## Time resolution recognition
diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md
index f09ebb274c9554aface21c58c40b9d2e8589d4f2..1fc2b57a13de5cf91ac66069fb034afe56ed611d 100644
--- a/docs/en/20-third-party/11-kafka.md
+++ b/docs/en/20-third-party/11-kafka.md
@@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se

-## What is Confluent?
-
-[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include:
-
-1. Schema Registry
-2. REST Proxy
-3. Non-Java Clients
-4. Many packaged Kafka Connect plugins
-5. GUI for managing and monitoring Kafka - Confluent Control Center
-
-Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version.
-
-
-Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components.
-
## Prerequisites
1. Linux operating system
2. Java 8 and Maven installed
-3. Git is installed
+3. Git/curl/vi is installed
4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install)
-## Install Confluent
-
-Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation.
+## Install Kafka
Execute in any directory:
````
-curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
-tar xzf confluent-7.1.1.tar.gz -C /opt/
+curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
+tar xzf kafka_2.13-3.4.0.tgz -C /opt/
+ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
````
-Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH.
+Then you need to add the `$KAFKA_HOME/bin` directory to the PATH.
```title=".profile"
-export CONFLUENT_HOME=/opt/confluent-7.1.1
-export PATH=$CONFLUENT_HOME/bin:$PATH
+export KAFKA_HOME=/opt/kafka
+export PATH=$PATH:$KAFKA_HOME/bin
```
Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile)
-After the installation is complete, you can enter `confluent version` for simple verification:
-
-```
-# confluent version
-confluent - Confluent CLI
-
-Version: v2.6.1
-Git Ref: 6d920590
-Build Date: 2022-02-18T06:14:21Z
-Go Version: go1.17.6 (linux/amd64)
-Development: false
-```
-
## Install TDengine Connector plugin
### Install from source code
-```
+```shell
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine
-mvn clean package
-unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
+mvn clean package -Dmaven.test.skip=true
+unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
```
-The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path.
-
-### Install with confluent-hub
+The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path.
-[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`.
-**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**.
+### Add configuration file
-## Start Confluent
+add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`.
-```
-confluent local services start
+```properties
+plugin.path=/usr/share/java,/opt/kafka/components
```
-:::note
-Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins.
-:::
+## Start Kafka Services
-:::tip
-If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.:
-
-```title="Console output log" {1}
-Using CONFLUENT_CURRENT: /tmp/confluent.106668
-Starting ZooKeeper
-ZooKeeper is [UP]
-Starting Kafka
-Kafka is [UP]
-Starting Schema Registry
-Schema Registry is [UP]
-Starting Kafka REST
-Kafka REST is [UP]
-Starting Connect
-Connect is [UP]
-Starting ksqlDB Server
-ksqlDB Server is [UP]
-Starting Control Center
-Control Center is [UP]
-```
+Use command bellow to start all services:
-To clear data, execute `rm -rf /tmp/confluent.106668`.
-:::
+```shell
+zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
-### Check Confluent Services Status
+kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
-Use command bellow to check the status of all service:
+connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
-```
-confluent local services status
-```
-
-The expected output is:
-```
-Connect is [UP]
-Control Center is [UP]
-Kafka is [UP]
-Kafka REST is [UP]
-ksqlDB Server is [UP]
-Schema Registry is [UP]
-ZooKeeper is [UP]
```
### Check Successfully Loaded Plugin
After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully:
-```
-confluent local services connect plugin list
-```
-
-The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow:
-```
-Available Connect Plugins:
-[
- {
- "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
- "type": "sink",
- "version": "1.0.0"
- },
- {
- "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
- "type": "source",
- "version": "1.0.0"
- },
-......
+```shell
+curl http://localhost:8083/connectors
```
-If not, please check the log file of Kafka Connect. To view the log file path, please execute:
+The output as bellow:
+```txt
+[]
```
-echo `cat /tmp/confluent.current`/connect/connect.stdout
-```
-It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout`
-
-Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`.
## The use of TDengine Sink Connector
@@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref
The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format.
-### Add configuration file
+### Add Sink Connector configuration file
-```
+```shell
mkdir ~/test
cd ~/test
-vi sink-demo.properties
+vi sink-demo.json
```
-sink-demo.properties' content is following:
-
-```ini title="sink-demo.properties"
-name=TDengineSinkConnector
-connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
-tasks.max=1
-topics=meters
-connection.url=jdbc:TAOS://127.0.0.1:6030
-connection.user=root
-connection.password=taosdata
-connection.database=power
-db.schemaless=line
-data.precision=ns
-key.converter=org.apache.kafka.connect.storage.StringConverter
-value.converter=org.apache.kafka.connect.storage.StringConverter
+sink-demo.json' content is following:
+
+```json title="sink-demo.json"
+{
+ "name": "TDengineSinkConnector",
+ "config": {
+ "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
+ "tasks.max": "1",
+ "topics": "meters",
+ "connection.url": "jdbc:TAOS://127.0.0.1:6030",
+ "connection.user": "root",
+ "connection.password": "taosdata",
+ "connection.database": "power",
+ "db.schemaless": "line",
+ "data.precision": "ns",
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "value.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "errors.tolerance": "all",
+ "errors.deadletterqueue.topic.name": "dead_letter_topic",
+ "errors.deadletterqueue.topic.replication.factor": 1
+ }
+}
```
Key configuration instructions:
-1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power.
-2. `db.schemaless=line` means the data in the InfluxDB Line protocol format.
+1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power.
+2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format.
-### Create Connector instance
+### Create Sink Connector instance
-````
-confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
+````shell
+curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
````
If the above command is executed successfully, the output is as follows:
@@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows:
"tasks.max": "1",
"topics": "meters",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
- "name": "TDengineSinkConnector"
+ "name": "TDengineSinkConnector",
+ "errors.tolerance": "all",
+ "errors.deadletterqueue.topic.name": "dead_letter_topic",
+ "errors.deadletterqueue.topic.replication.factor": "1",
},
"tasks": [],
"type": "sink"
@@ -258,7 +182,7 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0
Use kafka-console-producer to write test data to the topic `meters`.
```
-cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
+cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
```
:::note
@@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat
Use the TDengine CLI to verify that the sync was successful.
-```
+```sql
taos> use power;
Database changed.
taos> select * from meters;
- ts | current | voltage | phase | groupid | location |
+ _ts | current | voltage | phase | groupid | location |
===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
@@ -293,29 +217,34 @@ TDengine Source Connector will convert the data in TDengine data table into [Inf
The following sample program synchronizes the data in the database test to the topic tdengine-source-test.
-### Add configuration file
+### Add Source Connector configuration file
-```
-vi source-demo.properties
+```shell
+vi source-demo.json
```
Input following content:
-```ini title="source-demo.properties"
-name=TDengineSourceConnector
-connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
-tasks.max=1
-connection.url=jdbc:TAOS://127.0.0.1:6030
-connection.username=root
-connection.password=taosdata
-connection.database=test
-connection.attempts=3
-connection.backoff.ms=5000
-topic.prefix=tdengine-source-
-poll.interval.ms=1000
-fetch.max.rows=100
-key.converter=org.apache.kafka.connect.storage.StringConverter
-value.converter=org.apache.kafka.connect.storage.StringConverter
+```json title="source-demo.json"
+{
+ "name":"TDengineSourceConnector",
+ "config":{
+ "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
+ "tasks.max": 1,
+ "connection.url": "jdbc:TAOS://127.0.0.1:6030",
+ "connection.username": "root",
+ "connection.password": "taosdata",
+ "connection.database": "test",
+ "connection.attempts": 3,
+ "connection.backoff.ms": 5000,
+ "topic.prefix": "tdengine-source",
+ "poll.interval.ms": 1000,
+ "fetch.max.rows": 100,
+ "topic.per.stable": true,
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "value.converter": "org.apache.kafka.connect.storage.StringConverter"
+ }
+}
```
### Prepare test data
@@ -340,40 +269,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
Use TDengine CLI to execute SQL script
-```
+```shell
taos -f prepare-source-data.sql
```
### Create Connector instance
-````
-confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
-````
+```shell
+curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
+```
### View topic data
Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format.
-````
-kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
+````shell
+kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
````
output:
-````
+```txt
......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
......
-````
+```
All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data:
-````
+```sql
USE test;
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
-````
+```
Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted.
@@ -383,16 +312,16 @@ After testing, use the unload command to stop the loaded connector.
View currently active connectors:
-````
-confluent local services connect connector status
-````
+```shell
+curl http://localhost:8083/connectors
+```
You should now have two active connectors if you followed the previous steps. Use the following command to unload:
-````
-confluent local services connect connector unload TDengineSinkConnector
-confluent local services connect connector unload TDengineSourceConnector
-````
+```shell
+curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
+curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
+```
## Configuration reference
@@ -430,19 +359,14 @@ The following configuration items apply to TDengine Sink Connector and TDengine
6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000.
7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `--`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `-`.
-
-
## Other notes
-1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually.
-2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect.
+1. To use Kafka Connect, refer to .
## Feedback
-https://github.com/taosdata/kafka-connect-tdengine/issues
+
## Reference
-1. https://www.confluent.io/what-is-apache-kafka
-2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
-3. https://docs.confluent.io/platform/current/platform.html
+1. For more information, see
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index c7836d12987a1d2c87e577493b9ffc7973443611..a9336697f2d2fb339a0ebe779ec330750b08aae1 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3";
+## 3.0.5.0
+
+
+
## 3.0.4.2
diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md
index 9f8dbfee7e8dbce75c62e0978a9be6b13a5b5f35..28c2ff7a7fe30cb9364c5b7387f595420fa2973d 100644
--- a/docs/en/28-releases/02-tools.md
+++ b/docs/en/28-releases/02-tools.md
@@ -10,6 +10,10 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3";
+## 2.5.1
+
+
+
## 2.5.0
diff --git a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
index b5cdedc34f00dc2914f62176e4bd8b1d80a01bf6..3c5d2867e230658536cabfa36252662656cb7f02 100644
--- a/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java
@@ -53,20 +53,28 @@ public class SubscribeDemo {
// create consumer
Properties properties = new Properties();
+ properties.getProperty(TMQConstants.CONNECT_TYPE, "jni");
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6030");
+ properties.setProperty(TMQConstants.CONNECT_USER, "root");
+ properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
- properties.setProperty(TMQConstants.GROUP_ID, "test");
+ properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
+ properties.setProperty(TMQConstants.GROUP_ID, "test1");
+ properties.setProperty(TMQConstants.CLIENT_ID, "1");
+ properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer");
+ properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
+ properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data
try (TaosConsumer consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) {
ConsumerRecords meters = consumer.poll(Duration.ofMillis(100));
- for (ConsumerRecord recode : meters) {
- Meters meter = recode.value();
+ for (ConsumerRecord r : meters) {
+ Meters meter = r.value();
System.out.println(meter);
}
}
diff --git a/docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java b/docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java
index 83cb04f55244aacf860573a42ddc952e1e522803..03f7e3a11e8ae64ed641097746f9d44da73bf3d6 100644
--- a/docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java
@@ -1,5 +1,6 @@
package com.taos.example;
+import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TMQConstants;
import com.taosdata.jdbc.tmq.TaosConsumer;
@@ -54,18 +55,26 @@ public class WebsocketSubscribeDemo {
Properties properties = new Properties();
properties.setProperty(TMQConstants.BOOTSTRAP_SERVERS, "127.0.0.1:6041");
properties.setProperty(TMQConstants.CONNECT_TYPE, "ws");
+ properties.setProperty(TMQConstants.CONNECT_USER, "root");
+ properties.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
+ properties.setProperty(TMQConstants.AUTO_OFFSET_RESET, "earliest");
properties.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
properties.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
- properties.setProperty(TMQConstants.GROUP_ID, "test");
+ properties.setProperty(TMQConstants.AUTO_COMMIT_INTERVAL, "1000");
+ properties.setProperty(TMQConstants.GROUP_ID, "test2");
+ properties.setProperty(TMQConstants.CLIENT_ID, "1");
properties.setProperty(TMQConstants.VALUE_DESERIALIZER,
"com.taos.example.MetersDeserializer");
+ properties.setProperty(TMQConstants.VALUE_DESERIALIZER_ENCODING, "UTF-8");
+ properties.setProperty(TMQConstants.EXPERIMENTAL_SNAPSHOT_ENABLE, "true");
// poll data
try (TaosConsumer consumer = new TaosConsumer<>(properties)) {
consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) {
ConsumerRecords meters = consumer.poll(Duration.ofMillis(100));
- for (Meters meter : meters) {
+ for (ConsumerRecord r : meters) {
+ Meters meter = (Meters) r.value();
System.out.println(meter);
}
}
diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md
index 99ecd903b40a1a67a447415d1392ec65a5ecc18b..ae11273a39779bd5cc83968f48767cace7ff346a 100644
--- a/docs/zh/07-develop/09-udf.md
+++ b/docs/zh/07-develop/09-udf.md
@@ -271,26 +271,90 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
## 用 Python 语言实现 UDF
+### 准备环境
+
+1. 准备好 Python 运行环境
+
+2. 安装 Python 包 `taospyudf`
+
+```shell
+pip3 install taospyudf
+```
+
+安装过程中会编译 C++ 源码,因此系统上要有 cmake 和 gcc。编译生成的 libtaospyudf.so 文件自动会被复制到 /usr/local/lib/ 目录,因此如果是非 root 用户,安装时需加 sudo。安装完可以检查这个目录是否有了这个文件:
+
+```shell
+root@slave11 ~/udf $ ls -l /usr/local/lib/libtaos*
+-rw-r--r-- 1 root root 671344 May 24 22:54 /usr/local/lib/libtaospyudf.so
+```
+
+然后执行命令
+```shell
+ldconfig
+```
+
+3. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
+
+4. 启动 `taosd` 服务
+细节请参考 [快速开始](../../get-started)
+
+### 接口定义
+
+#### 接口概述
+
使用 Python 语言实现 UDF 时,需要实现规定的接口函数
- 标量函数需要实现标量接口函数 process 。
- 聚合函数需要实现聚合接口函数 start ,reduce ,finish。
- 如果需要初始化,实现 init;如果需要清理工作,实现 destroy。
-### 用 Python 实现标量函数
+#### 标量函数接口
+```Python
+def process(input: datablock) -> tuple[output_type]:
+```
+
+说明:
+ - input:datablock 类似二维矩阵,通过成员方法 data(row,col)返回位于 row 行,col 列的 python 对象
+ - 返回值是一个 Python 对象元组,每个元素类型为输出类型。
+
+#### 聚合函数接口
+```Python
+def start() -> bytes:
+def reduce(inputs: datablock, buf: bytes) -> bytes
+def finish(buf: bytes) -> output_type:
+```
+
+说明:
+ - 首先调用 start 生成最初结果 buffer
+ - 然后输入数据会被分为多个行数据块,对每个数据块 inputs 和当前中间结果 buf 调用 reduce,得到新的中间结果
+ - 最后再调用 finish 从中间结果 buf 产生最终输出,最终输出只能含 0 或 1 条数据。
+
+#### 初始化和销毁接口
+```Python
+def init()
+def destroy()
+```
+
+说明:
+ - init 完成初始化工作
+ - destroy 完成清理工作
+
+### Python UDF 函数模板
+
+#### 标量函数实现模板
标量函数实现模版如下
+
```Python
def init():
# initialization
def destroy():
# destroy
-def process(input: datablock) -> tuple[output_type]:
- # process input datablock,
- # datablock.data(row, col) is to access the python object in location(row,col)
- # return tuple object consisted of object of type outputtype
+def process(input: datablock) -> tuple[output_type]:
```
-### 用 Python 实现聚合函数
+注意:定义标题函数最重要是要实现 process 函数,同时必须定义 init 和 destroy 函数即使什么都不做
+
+#### 聚合函数实现模板
聚合函数实现模版如下
```Python
@@ -303,41 +367,16 @@ def start() -> bytes:
def reduce(inputs: datablock, buf: bytes) -> bytes
# deserialize buf to state
# reduce the inputs and state into new_state.
- # use inputs.data(i,j) to access python ojbect of location(i,j)
+ # use inputs.data(i,j) to access python object of location(i,j)
# serialize new_state into new_state_bytes
return new_state_bytes
def finish(buf: bytes) -> output_type:
#return obj of type outputtype
```
-### Python UDF 接口函数定义
-
-#### 标量函数接口
-```Python
-def process(input: datablock) -> tuple[output_type]:
-```
-- input:datablock 类似二维矩阵,通过成员方法 data(row,col)返回位于 row 行,col 列的 python 对象
-- 返回值是一个 Python 对象元组,每个元素类型为输出类型。
-
-#### 聚合函数接口
-```Python
-def start() -> bytes:
-def reduce(inputs: datablock, buf: bytes) -> bytes
-def finish(buf: bytes) -> output_type:
-```
-
-首先调用 start 生成最初结果 buffer,然后输入数据会被分为多个行数据块,对每个数据块 inputs 和当前中间结果 buf 调用 reduce,得到新的中间结果,最后再调用 finish 从中间结果 buf 产生最终输出,最终输出只能含 0 或 1 条数据。
-
-
-#### 初始化和销毁接口
-```Python
-def init()
-def destroy()
-```
-
-其中 init 完成初始化工作。 destroy 完成清理工作。如果没有初始化工作,无需定义 init 函数。如果没有清理工作,无需定义 destroy 函数。
+注意:定义聚合函数最重要是要实现 start, reduce 和 finish,且必须定义 init 和 destroy 函数。start 生成最初结果 buffer,然后输入数据会被分为多个行数据块,对每个数据块 inputs 和当前中间结果 buf 调用 reduce,得到新的中间结果,最后再调用 finish 从中间结果 buf 产生最终输出。
-### Python 和 TDengine之间的数据类型映射
+### 数据类型映射
下表描述了TDengine SQL数据类型和Python数据类型的映射。任何类型的NULL值都映射成Python的None值。
@@ -351,15 +390,461 @@ def destroy()
|TIMESTAMP | int |
|JSON and other types | 不支持 |
-### Python UDF 环境的安装
-1. 安装 taospyudf 包。此包执行Python UDF程序。
-```bash
-sudo pip install taospyudf
-ldconfig
+### 开发指南
+
+本文内容由浅入深包括 4 个示例程序:
+1. 定义一个只接收一个整数的标量函数: 输入 n, 输出 ln(n^2 + 1)。
+2. 定义一个接收 n 个整数的标量函数, 输入 (x1, x2, ..., xn), 输出每个值和它们的序号的乘积的和: x1 + 2 * x2 + ... + n * xn。
+3. 定义一个标量函数,输入一个时间戳,输出距离这个时间最近的下一个周日。完成这个函数要用到第三方库 moment。我们在这个示例中讲解使用第三方库的注意事项。
+4. 定义一个聚合函数,计算某一列最大值和最小值的差, 也就是实现 TDengien 内置的 spread 函数。
+同时也包含大量实用的 debug 技巧。
+本文假设你用的是 Linux 系统,且已安装好了 TDengine 3.0.4.0+ 和 Python 3.x。
+
+注意:**UDF 内无法通过 print 函数输出日志,需要自己写文件或用 python 内置的 logging 库写文件**。
+
+#### 最简单的 UDF
+
+编写一个只接收一个整数的 UDF 函数: 输入 n, 输出 ln(n^2 + 1)。
+首先编写一个 Python 文件,存在系统某个目录,比如 /root/udf/myfun.py 内容如下
+
+```python
+from math import log
+
+def init():
+ pass
+
+def destroy():
+ pass
+
+def process(block):
+ rows, _ = block.shape()
+ return [log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
+```
+
+这个文件包含 3 个函数, init 和 destroy 都是空函数,它们是 UDF 的生命周期函数,即使什么都不做也要定义。最关键的是 process 函数, 它接受一个数据块,这个数据块对象有两个方法:
+1. shape() 返回数据块的行数和列数
+2. data(i, j) 返回 i 行 j 列的数据
+标量函数的 process 方法传人的数据块有多少行,就需要返回多少个数据。上述代码中我们忽略的列数,因为我们只想对每行的第一个数做计算。
+接下来我们创建对应的 UDF 函数,在 TDengine CLI 中执行下面语句:
+
+```sql
+create function myfun as '/root/udf/myfun.py' outputtype double language 'Python'
+```
+其输出如下
+
+```shell
+ taos> create function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
+Create OK, 0 row(s) affected (0.005202s)
+```
+
+看起来很顺利,接下来 show 一下系统中所有的自定义函数,确认创建成功:
+
+```text
+taos> show functions;
+ name |
+=================================
+ myfun |
+Query OK, 1 row(s) in set (0.005767s)
+```
+
+接下来就来测试一下这个函数,测试之前先执行下面的 SQL 命令,制造些测试数据,在 TDengine CLI 中执行下述命令
+
+```sql
+create database test;
+create table t(ts timestamp, v1 int, v2 int, v3 int);
+insert into t values('2023-05-01 12:13:14', 1, 2, 3);
+insert into t values('2023-05-03 08:09:10', 2, 3, 4);
+insert into t values('2023-05-10 07:06:05', 3, 4, 5);
+```
+
+测试 myfun 函数:
+
+```sql
+taos> select myfun(v1, v2) from t;
+
+DB error: udf function execution failure (0.011088s)
+```
+
+不幸的是执行失败了,什么原因呢?
+查看 udfd 进程的日志
+
+```shell
+tail -10 /var/log/taos/udfd.log
+```
+
+发现以下错误信息:
+
+```text
+05/24 22:46:28.733545 01665799 UDF ERROR can not load library libtaospyudf.so. error: operation not permitted
+05/24 22:46:28.733561 01665799 UDF ERROR can not load python plugin. lib path libtaospyudf.so
+```
+
+错误很明确:没有加载到 Python 插件 libtaospyudf.so,如果遇到此错误,请参考前面的准备环境一节。
+
+修复环境错误后再次执行,如下:
+
+```sql
+taos> select myfun(v1) from t;
+ myfun(v1) |
+============================
+ 0.693147181 |
+ 1.609437912 |
+ 2.302585093 |
+```
+
+至此,我们完成了第一个 UDF 😊,并学会了简单的 debug 方法。
+
+#### 示例二:异常处理
+
+上面的 myfun 虽然测试测试通过了,但是有两个缺点:
+
+1. 这个标量函数只接受 1 列数据作为输入,如果用户传入了多列也不会抛异常。
+
+```sql
+taos> select myfun(v1, v2) from t;
+ myfun(v1, v2) |
+============================
+ 0.693147181 |
+ 1.609437912 |
+ 2.302585093 |
+```
+
+2. 没有处理 null 值。我们期望如果输入有 null,则会抛异常终止执行。
+因此 process 函数改进如下:
+
+```python
+def process(block):
+ rows, cols = block.shape()
+ if cols > 1:
+ raise Exception(f"require 1 parameter but given {cols}")
+ return [ None if block.data(i, 0) is None else log(block.data(i, 0) ** 2 + 1) for i in range(rows)]
+```
+
+然后执行下面的语句更新已有的 UDF:
+
+```sql
+create or replace function myfun as '/root/udf/myfun.py' outputtype double language 'Python';
+```
+
+再传入 myfun 两个参数,就会执行失败了
+
+```sql
+taos> select myfun(v1, v2) from t;
+
+DB error: udf function execution failure (0.014643s)
+```
+
+但遗憾的是我们自定义的异常信息没有展示给用户,而是在插件的日志文件 /var/log/taos/taospyudf.log 中:
+
+```text
+2023-05-24 23:21:06.790 ERROR [1666188] [doPyUdfScalarProc@507] call pyUdfScalar proc function. context 0x7faade26d180. error: Exception: require 1 parameter but given 2
+
+At:
+ /var/lib/taos//.udf/myfun_3_1884e1281d9.py(12): process
+
+```
+
+至此,我们学会了如何更新 UDF,并查看 UDF 输出的错误日志。
+(注:如果 UDF 更新后未生效,在 TDengine 3.0.5.0 以前(不含)的版本中需要重启 taosd,在 3.0.5.0 及之后的版本中不需要重启 taosd 即可生效。)
+
+#### 示例三: 接收 n 个参数的 UDF
+
+编写一个 UDF:输入(x1, x2, ..., xn), 输出每个值和它们的序号的乘积的和: 1 * x1 + 2 * x2 + ... + n * xn。如果 x1 至 xn 中包含 null,则结果为 null。
+这个示例与示例一的区别是,可以接受任意多列作为输入,且要处理每一列的值。编写 UDF 文件 /root/udf/nsum.py:
+
+```python
+def init():
+ pass
+
+
+def destroy():
+ pass
+
+
+def process(block):
+ rows, cols = block.shape()
+ result = []
+ for i in range(rows):
+ total = 0
+ for j in range(cols):
+ v = block.data(i, j)
+ if v is None:
+ total = None
+ break
+ total += (j + 1) * block.data(i, j)
+ result.append(total)
+ return result
+```
+
+创建 UDF:
+
+```sql
+create function nsum as '/root/udf/nsum.py' outputtype double language 'Python';
+```
+
+测试 UDF:
+
+```sql
+taos> insert into t values('2023-05-25 09:09:15', 6, null, 8);
+Insert OK, 1 row(s) affected (0.003675s)
+
+taos> select ts, v1, v2, v3, nsum(v1, v2, v3) from t;
+ ts | v1 | v2 | v3 | nsum(v1, v2, v3) |
+================================================================================================
+ 2023-05-01 12:13:14.000 | 1 | 2 | 3 | 14.000000000 |
+ 2023-05-03 08:09:10.000 | 2 | 3 | 4 | 20.000000000 |
+ 2023-05-10 07:06:05.000 | 3 | 4 | 5 | 26.000000000 |
+ 2023-05-25 09:09:15.000 | 6 | NULL | 8 | NULL |
+Query OK, 4 row(s) in set (0.010653s)
+```
+
+#### 示例四:使用第三方库
+
+编写一个 UDF,输入一个时间戳,输出距离这个时间最近的下一个周日。比如今天是 2023-05-25, 则下一个周日是 2023-05-28。
+完成这个函数要用到第三方库 momen。先安装这个库:
+
+```shell
+pip3 install moment
+```
+
+然后编写 UDF 文件 /root/udf/nextsunday.py
+
+```python
+import moment
+
+
+def init():
+ pass
+
+
+def destroy():
+ pass
+
+
+def process(block):
+ rows, cols = block.shape()
+ if cols > 1:
+ raise Exception("require only 1 parameter")
+ if not type(block.data(0, 0)) is int:
+ raise Exception("type error")
+ return [moment.unix(block.data(i, 0)).replace(weekday=7).format('YYYY-MM-DD')
+ for i in range(rows)]
+```
+
+UDF 框架会将 TDengine 的 timestamp 类型映射为 Python 的 int 类型,所以这个函数只接受一个表示毫秒数的整数。process 方法先做参数检查,然后用 moment 包替换时间的星期为星期日,最后格式化输出。输出的字符串长度是固定的10个字符长,因此可以这样创建 UDF 函数:
+
+```sql
+create function nextsunday as '/root/udf/nextsunday.py' outputtype binary(10) language 'Python';
+```
+
+此时测试函数,如果你是用 systemctl 启动的 taosd,肯定会遇到错误:
+
+```sql
+taos> select ts, nextsunday(ts) from t;
+
+DB error: udf function execution failure (1.123615s)
+```
+
+```shell
+ tail -20 taospyudf.log
+2023-05-25 11:42:34.541 ERROR [1679419] [PyUdf::PyUdf@217] py udf load module failure. error ModuleNotFoundError: No module named 'moment'
+```
+
+这是因为 “moment” 所在位置不在 python udf 插件默认的库搜索路径中。怎么确认这一点呢?通过以下命令搜索 taospyudf.log:
+
+```shell
+grep 'sys path' taospyudf.log | tail -1
+```
+
+输出如下
+
+```text
+2023-05-25 10:58:48.554 INFO [1679419] [doPyOpen@592] python sys path: ['', '/lib/python38.zip', '/lib/python3.8', '/lib/python3.8/lib-dynload', '/lib/python3/dist-packages', '/var/lib/taos//.udf']
+```
+
+发现 python udf 插件默认搜索的第三方库安装路径是: /lib/python3/dist-packages,而 moment 默认安装到了 /usr/local/lib/python3.8/dist-packages。下面我们修改 python udf 插件默认的库搜索路径。
+先打开 python3 命令行,查看当前的 sys.path
+
+```python
+>>> import sys
+>>> ":".join(sys.path)
+'/usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages'
+```
+
+复制上面脚本的输出的字符串,然后编辑 /var/taos/taos.cfg 加入以下配置:
+
+```shell
+UdfdLdLibPath /usr/lib/python3.8:/usr/lib/python3.8/lib-dynload:/usr/local/lib/python3.8/dist-packages:/usr/lib/python3/dist-packages
+```
+
+保存后执行 systemctl restart taosd, 再测试就不报错了:
+
+```sql
+taos> select ts, nextsunday(ts) from t;
+ ts | nextsunday(ts) |
+===========================================
+ 2023-05-01 12:13:14.000 | 2023-05-07 |
+ 2023-05-03 08:09:10.000 | 2023-05-07 |
+ 2023-05-10 07:06:05.000 | 2023-05-14 |
+ 2023-05-25 09:09:15.000 | 2023-05-28 |
+Query OK, 4 row(s) in set (1.011474s)
+```
+
+#### 示例五:聚合函数
+
+编写一个聚合函数,计算某一列最大值和最小值的差。
+聚合函数与标量函数的区别是:标量函数是多行输入对应多个输出,聚合函数是多行输入对应一个输出。聚合函数的执行过程有点像经典的 map-reduce 框架的执行过程,框架把数据分成若干块,每个 mapper 处理一个块,reducer 再把 mapper 的结果做聚合。不一样的地方在于,对于 TDengine Python UDF 中的 reduce 函数既有 map 的功能又有 reduce 的功能。reduce 函数接受两个参数:一个是自己要处理的数据,一个是别的任务执行 reduce 函数的处理结果。如下面的示例 /root/udf/myspread.py:
+
+```python
+import io
+import math
+import pickle
+
+LOG_FILE: io.TextIOBase = None
+
+
+def init():
+ global LOG_FILE
+ LOG_FILE = open("/var/log/taos/spread.log", "wt")
+ log("init function myspead success")
+
+
+def log(o):
+ LOG_FILE.write(str(o) + '\n')
+
+
+def destroy():
+ log("close log file: spread.log")
+ LOG_FILE.close()
+
+
+def start():
+ return pickle.dumps((-math.inf, math.inf))
+
+
+def reduce(block, buf):
+ max_number, min_number = pickle.loads(buf)
+ log(f"initial max_number={max_number}, min_number={min_number}")
+ rows, _ = block.shape()
+ for i in range(rows):
+ v = block.data(i, 0)
+ if v > max_number:
+ log(f"max_number={v}")
+ max_number = v
+ if v < min_number:
+ log(f"min_number={v}")
+ min_number = v
+ return pickle.dumps((max_number, min_number))
+
+
+def finish(buf):
+ max_number, min_number = pickle.loads(buf)
+ return max_number - min_number
+```
+
+在这个示例中我们不光定义了一个聚合函数,还添加记录执行日志的功能,讲解如下:
+1. init 函数不再是空函数,而是打开了一个文件用于写执行日志
+2. log 函数是记录日志的工具,自动将传入的对象转成字符串,加换行符输出
+3. destroy 函数用来在执行结束关闭文件
+4. start 返回了初始的 buffer,用来存聚合函数的中间结果,我们把最大值初始化为负无穷大,最小值初始化为正无穷大
+5. reduce 处理每个数据块并聚合结果
+6. finish 函数将最终的 buffer 转换成最终的输出
+执行下面的 SQL语句创建对应的 UDF:
+
+```sql
+create or replace aggregate function myspread as '/root/udf/myspread.py' outputtype double bufsize 128 language 'Python';
+```
+
+这个 SQL 语句与创建标量函数的 SQL 语句有两个重要区别:
+1. 增加了 aggregate 关键字
+2. 增加了 bufsize 关键字,用来指定存储中间结果的内存大小,这个数值可以大于实际使用的数值。本例中间结果是两个浮点数组成的 tuple,序列化后实际占用大小只有 32 个字节,但指定的 bufsize 是128,可以用 python 命令行打印实际占用的字节数
+
+```python
+>>> len(pickle.dumps((12345.6789, 23456789.9877)))
+32
+```
+
+测试这个函数,可以看到 myspread 的输出结果和内置的 spread 函数的输出结果是一致的。
+
+```sql
+taos> select myspread(v1) from t;
+ myspread(v1) |
+============================
+ 5.000000000 |
+Query OK, 1 row(s) in set (0.013486s)
+
+taos> select spread(v1) from t;
+ spread(v1) |
+============================
+ 5.000000000 |
+Query OK, 1 row(s) in set (0.005501s)
+```
+
+最后,查看我们自己打印的执行日志,从日志可以看出,reduce 函数被执行了 3 次。执行过程中 max 值被更新了 4 次, min 值只被更新 1 次。
+
+```shell
+root@slave11 /var/log/taos $ cat spread.log
+init function myspead success
+initial max_number=-inf, min_number=inf
+max_number=1
+min_number=1
+initial max_number=1, min_number=1
+max_number=2
+max_number=3
+initial max_number=3, min_number=1
+max_number=6
+close log file: spread.log
```
-2. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
+
+通过这个示例,我们学会了如何定义聚合函数,并打印自定义的日志信息。
+
+### SQL 命令
+
+1. 创建标量函数的语法
+
+```sql
+CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type LANGUAGE 'Python';
+```
+
+2. 创建聚合函数的语法
+
+```sql
+CREATE AGGREGATE FUNCTION function_name library_path OUTPUTTYPE output_type LANGUAGE 'Python';
+```
+
+3. 更新标量函数
+
+```sql
+CREATE OR REPLACE FUNCTION function_name AS OUTPUTTYPE int LANGUAGE 'Python';
+```
+
+4. 更新聚合函数
+
+```sql
+CREATE OR REPLACE AGGREGATE FUNCTION function_name AS OUTPUTTYPE BUFSIZE buf_size int LANGUAGE 'Python';
+```
+
+注意:如果加了 “AGGREGATE” 关键字,更新之后函数将被当作聚合函数,无论之前是什么类型的函数。相反,如果没有加 “AGGREGATE” 关键字,更新之后的函数将被当作标量函数,无论之前是什么类型的函数。
+
+5. 查看函数信息
+
+ 同名的 UDF 每更新一次,版本号会增加 1。
+
+```sql
+select * from ins_functions \G;
+```
+
+6. 查看和删除已有的 UDF
+
+```sql
+SHOW functions;
+DROP FUNCTION function_name;
+```
+
+
+上面的命令可以查看 UDF 的完整信息
-### Python UDF 示例代码
+### 更多 Python UDF 示例代码
#### 标量函数示例 [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
pybitand 实现多列的按位与功能。如果只有一列,返回这一列。pybitand 忽略空值。
@@ -386,6 +871,17 @@ pyl2norm 实现了输入列的所有数据的二阶范数,即对每个数据
+#### 聚合函数示例 [pycumsum](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pycumsum.py)
+
+pycumsum 使用 numpy 计算输入列所有数据的累积和。
+
+pycumsum.py
+
+```c
+{{#include tests/script/sh/pycumsum.py}}
+```
+
+
## 管理和使用 UDF
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
diff --git a/docs/zh/08-connector/14-java.mdx b/docs/zh/08-connector/14-java.mdx
index 35332a96022ceca87901708985d15a73e6e6f96d..46800226d77cc880359f73b71c9e0a396954399a 100644
--- a/docs/zh/08-connector/14-java.mdx
+++ b/docs/zh/08-connector/14-java.mdx
@@ -962,6 +962,7 @@ statement.executeUpdate("create topic if not exists topic_speed as select ts, sp
```java
Properties config = new Properties();
+config.setProperty("bootstrap.servers", "localhost:6030");
config.setProperty("enable.auto.commit", "true");
config.setProperty("group.id", "group1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer");
@@ -969,12 +970,14 @@ config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.Res
TaosConsumer consumer = new TaosConsumer<>(config);
```
+- bootstrap.servers: TDengine 服务端所在的`ip:port`,如果使用 WebSocket 连接,则为 taosAdapter 所在的`ip:port`。
- enable.auto.commit: 是否允许自动提交。
- group.id: consumer: 所在的 group。
- value.deserializer: 结果集反序列化方法,可以继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer`,并指定结果集 bean,实现反序列化。也可以继承 `com.taosdata.jdbc.tmq.Deserializer`,根据 SQL 的 resultSet 自定义反序列化方式。
- td.connect.type: 连接方式。jni:表示使用动态库连接的方式,ws/WebSocket:表示使用 WebSocket 进行数据通信。默认为 jni 方式。
-- httpConnectTimeout:创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
-- messageWaitTimeout:数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
+- httpConnectTimeout: 创建连接超时参数,单位 ms,默认为 5000 ms。仅在 WebSocket 连接下有效。
+- messageWaitTimeout: 数据传输超时参数,单位 ms,默认为 10000 ms。仅在 WebSocket 连接下有效。
+- httpPoolSize: 同一个连接下最大并行请求数。仅在 WebSocket 连接下有效。
其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group)
#### 订阅消费数据
@@ -1016,10 +1019,19 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException {
Properties config = new Properties();
+ config.setProperty("td.connect.type", "jni");
+ config.setProperty("bootstrap.servers", "localhost:6030");
+ config.setProperty("td.connect.user", "root");
+ config.setProperty("td.connect.pass", "taosdata");
+ config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
+ config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group1");
+ config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
+ config.setProperty("value.deserializer.encoding", "UTF-8");
+ config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@@ -1093,12 +1105,19 @@ public abstract class ConsumerLoop {
public ConsumerLoop() throws SQLException {
Properties config = new Properties();
- config.setProperty("bootstrap.servers", "localhost:6041");
config.setProperty("td.connect.type", "ws");
+ config.setProperty("bootstrap.servers", "localhost:6041");
+ config.setProperty("td.connect.user", "root");
+ config.setProperty("td.connect.pass", "taosdata");
+ config.setProperty("auto.offset.reset", "earliest");
config.setProperty("msg.with.table.name", "true");
config.setProperty("enable.auto.commit", "true");
+ config.setProperty("auto.commit.interval.ms", "1000");
config.setProperty("group.id", "group2");
+ config.setProperty("client.id", "1");
config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ConsumerLoop$ResultDeserializer");
+ config.setProperty("value.deserializer.encoding", "UTF-8");
+ config.setProperty("experimental.snapshot.enable", "true");
this.consumer = new TaosConsumer<>(config);
this.topics = Collections.singletonList("topic_speed");
@@ -1239,6 +1258,7 @@ public static void main(String[] args) throws Exception {
- connectionPools:HikariCP, Druid, dbcp, c3p0 等连接池中使用 taos-jdbcdriver。
- SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。
- mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。
+- consumer-demo:Consumer 消费 TDengine 数据示例,可通过参数控制消费速度。
请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC)
diff --git a/docs/zh/08-connector/20-go.mdx b/docs/zh/08-connector/20-go.mdx
index fd6df992b530fdce4427e34ef8bc227f449eaac7..d431be35cb0b709cdc6c5cadd2c8043702acbe11 100644
--- a/docs/zh/08-connector/20-go.mdx
+++ b/docs/zh/08-connector/20-go.mdx
@@ -30,7 +30,7 @@ REST 连接支持所有能运行 Go 的平台。
## 版本支持
-请参考[版本支持列表](../#版本支持)
+请参考[版本支持列表](https://github.com/taosdata/driver-go#remind)
## 支持的功能特性
@@ -383,6 +383,15 @@ func main() {
提交消息。
+* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
+
+ 获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
+
+* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
+注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
+
+ 按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
+
* `func (c *Consumer) Close() error`
关闭连接。
@@ -468,11 +477,20 @@ func main() {
提交消息。
+* `func (c *Consumer) Assignment() (partitions []tmq.TopicPartition, err error)`
+
+ 获取消费进度。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
+
+* `func (c *Consumer) Seek(partition tmq.TopicPartition, ignoredTimeoutMs int) error`
+注意:出于兼容目的保留 `ignoredTimeoutMs` 参数,当前未使用
+
+ 按照指定的进度消费。(需要 TDengine >= 3.0.5.0, driver-go >= v3.5.0)
+
* `func (c *Consumer) Close() error`
关闭连接。
-完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/tmqoverws/main.go)
+完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/tmqoverws/main.go)
### 通过 WebSocket 进行参数绑定
@@ -520,7 +538,7 @@ func main() {
结束参数绑定。
-完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/3.0/examples/stmtoverws/main.go)
+完整参数绑定示例参见 [GitHub 示例文件](https://github.com/taosdata/driver-go/blob/main/examples/stmtoverws/main.go)
## API 参考
diff --git a/docs/zh/08-connector/26-rust.mdx b/docs/zh/08-connector/26-rust.mdx
index d4ca25be8109a2aa28d0805edcd0add5d052138b..a02757b14e02bb985861309e16864d3ee5972192 100644
--- a/docs/zh/08-connector/26-rust.mdx
+++ b/docs/zh/08-connector/26-rust.mdx
@@ -26,9 +26,14 @@ import RustQuery from "../07-develop/04-query-data/_rust.mdx"
原生连接支持的平台和 TDengine 客户端驱动支持的平台一致。
Websocket 连接支持所有能运行 Rust 的平台。
-## 版本支持
+## 版本历史
-请参考[版本支持列表](../#版本支持)
+| Rust 连接器版本 | TDengine 版本 | 主要功能 |
+| :----------------: | :--------------: | :--------------------------------------------------: |
+| v0.8.8 | 3.0.5.0 or later | 消息订阅:获取消费进度及按照指定进度开始消费。 |
+| v0.8.0 | 3.0.4.0 | 支持无模式写入。 |
+| v0.7.6 | 3.0.3.0 | 支持在请求中使用 req_id。 |
+| v0.6.0 | 3.0.0.0 | 基础功能。 |
Rust 连接器仍然在快速开发中,1.0 之前无法保证其向后兼容。建议使用 3.0 版本以上的 TDengine,以避免已知问题。
@@ -65,6 +70,13 @@ taos = "*"
taos = { version = "*", default-features = false, features = ["ws"] }
```
+当仅启用 `ws` 特性时,可同时指定 `r2d2` 使得在同步(blocking/sync)模式下使用 [r2d2] 作为连接池:
+
+```toml
+[dependencies]
+taos = { version = "*", default-features = false, features = ["r2d2", "ws"] }
+```
+
@@ -257,26 +269,24 @@ let conn: Taos = cfg.build();
### 连接池
-在复杂应用中,建议启用连接池。[taos] 的连接池使用 [r2d2] 实现。
+在复杂应用中,建议启用连接池。[taos] 的连接池默认(异步模式)使用 [deadpool] 实现。
如下,可以生成一个默认参数的连接池。
```rust
-let pool = TaosBuilder::from_dsn(dsn)?.pool()?;
+let pool: Pool = TaosBuilder::from_dsn("taos:///")
+ .unwrap()
+ .pool()
+ .unwrap();
```
同样可以使用连接池的构造器,对连接池参数进行设置:
```rust
-let dsn = "taos://localhost:6030";
-
-let opts = PoolBuilder::new()
- .max_size(5000) // max connections
- .max_lifetime(Some(Duration::from_secs(60 * 60))) // lifetime of each connection
- .min_idle(Some(1000)) // minimal idle connections
- .connection_timeout(Duration::from_secs(2));
-
-let pool = TaosBuilder::from_dsn(dsn)?.with_pool_builder(opts)?;
+let pool: Pool = Pool::builder(Manager::from_dsn(self.dsn.clone()).unwrap().0)
+ .max_size(88) // 最大连接数
+ .build()
+ .unwrap();
```
在应用代码中,使用 `pool.get()?` 来获取一个连接对象 [Taos]。
@@ -497,6 +507,22 @@ TMQ 消息队列是一个 [futures::Stream](https://docs.rs/futures/latest/futur
}
```
+获取消费进度:
+
+版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
+
+```rust
+let assignments = consumer.assignments().await.unwrap();
+```
+
+按照指定的进度消费:
+
+版本要求 connector-rust >= v0.8.8, TDengine >= 3.0.5.0
+
+```rust
+consumer.offset_seek(topic, vgroup_id, offset).await;
+```
+
停止订阅:
```rust
@@ -511,11 +537,12 @@ consumer.unsubscribe().await;
- `enable.auto.commit`: 当设置为 `true` 时,将启用自动标记模式,当对数据一致性不敏感时,可以启用此方式。
- `auto.commit.interval.ms`: 自动标记的时间间隔。
-完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/taos-connector-rust/blob/main/examples/subscribe.rs).
+完整订阅示例参见 [GitHub 示例文件](https://github.com/taosdata/TDengine/blob/3.0/docs/examples/rust/nativeexample/examples/subscribe_demo.rs).
其他相关结构体 API 使用说明请移步 Rust 文档托管网页:。
[taos]: https://github.com/taosdata/rust-connector-taos
+[deadpool]: https://crates.io/crates/deadpool
[r2d2]: https://crates.io/crates/r2d2
[TaosBuilder]: https://docs.rs/taos/latest/taos/struct.TaosBuilder.html
[TaosCfg]: https://docs.rs/taos/latest/taos/struct.TaosCfg.html
diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx
index 1cff142e11d39e6afe86fab187697d222f37a9dd..1037d66f17e619e9b01688447320f981f3679604 100644
--- a/docs/zh/08-connector/30-python.mdx
+++ b/docs/zh/08-connector/30-python.mdx
@@ -362,7 +362,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
##### TaosConnection 类的使用
-`TaosConnection` 类既包含对 PEP249 Connection 接口的实现(如:`cursor`方法和 `close` 方法),也包含很多扩展功能(如: `execute`、 `query`、`schemaless_insert` 和 `subscribe` 方法。
+类似上文介绍的使用方法,增加 `req_id` 参数。
```python title="execute 方法"
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:insert}}
@@ -372,13 +372,9 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
{{#include docs/examples/python/connection_usage_native_reference_with_req_id.py:query}}
```
-:::tip
-查询结果只能获取一次。比如上面的示例中 `fetch_all()` 和 `fetch_all_into_dict()` 只能用一个。重复获取得到的结果为空列表。
-:::
-
##### TaosResult 类的使用
-上面 `TaosConnection` 类的使用示例中,我们已经展示了两种获取查询结果的方法: `fetch_all()` 和 `fetch_all_into_dict()`。除此之外 `TaosResult` 还提供了按行迭代(`rows_iter`)或按数据块迭代(`blocks_iter`)结果集的方法。在查询数据量较大的场景,使用这两个方法会更高效。
+类似上文介绍的使用方法,增加 `req_id` 参数。
```python title="blocks_iter 方法"
{{#include docs/examples/python/result_set_with_req_id_examples.py}}
@@ -391,14 +387,11 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
{{#include docs/examples/python/cursor_usage_native_reference_with_req_id.py}}
```
-:::note
-TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线程的场景下,这个游标实例必须保持线程独享,不能跨线程共享使用,否则会导致返回结果出现错误。
-
-:::
-
+类似上文介绍的使用方法,增加 `req_id` 参数。
+
##### TaosRestCursor 类的使用
`TaosRestCursor` 类是对 PEP249 Cursor 接口的实现。
@@ -420,8 +413,11 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
+
+类似上文介绍的使用方法,增加 `req_id` 参数。
+
```python
{{#include docs/examples/python/connect_websocket_with_req_id_examples.py:basic}}
```
diff --git a/docs/zh/12-taos-sql/01-data-type.md b/docs/zh/12-taos-sql/01-data-type.md
index f014573ca691d3df9e5f27ff5af533035381e55a..4a4c1d6ec69b95e8cf3c38b5b5a8d2a4cb335d62 100644
--- a/docs/zh/12-taos-sql/01-data-type.md
+++ b/docs/zh/12-taos-sql/01-data-type.md
@@ -45,9 +45,9 @@ CREATE DATABASE db_name PRECISION 'ns';
:::note
-- 表的每行长度不能超过 48KB(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
+- 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB)(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)。
- 虽然 BINARY 类型在底层存储上支持字节型的二进制字符,但不同编程语言对二进制数据的处理方式并不保证一致,因此建议在 BINARY 类型中只存储 ASCII 可见字符,而避免存储不可见字符。多字节的数据,例如中文字符,则需要使用 NCHAR 类型进行保存。如果强行使用 BINARY 类型保存中文字符,虽然有时也能正常读写,但并不带有字符集信息,很容易出现数据乱码甚至数据损坏等情况。
-- BINARY 类型理论上最长可以有 16,374 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
+- BINARY 类型理论上最长可以有 16,374(从 3.0.5.0 版本开始,数据列为 65,517,标签列为 16,382) 字节。BINARY 仅支持字符串输入,字符串两端需使用单引号引用。使用时须指定大小,如 BINARY(20) 定义了最长为 20 个单字节字符的字符串,每个字符占 1 字节的存储空间,总共固定占用 20 字节的空间,此时如果用户字符串超出 20 字节将会报错。对于字符串内的单引号,可以用转义字符反斜线加单引号来表示,即 `\'`。
- SQL 语句中的数值类型将依据是否存在小数点,或使用科学计数法表示,来判断数值类型是否为整型或者浮点型,因此在使用时要注意相应类型越界的情况。例如,9999999999999999999 会认为超过长整型的上边界而溢出,而 9999999999999999999.0 会被认为是有效的浮点数。
:::
diff --git a/docs/zh/12-taos-sql/02-database.md b/docs/zh/12-taos-sql/02-database.md
index a2a09141206fa3f07a9e8289ca009fde2ebcec98..b329413aa821c2aa10b3f93927e6deb8a8b3ad46 100644
--- a/docs/zh/12-taos-sql/02-database.md
+++ b/docs/zh/12-taos-sql/02-database.md
@@ -121,6 +121,8 @@ alter_database_option: {
| WAL_LEVEL value
| WAL_FSYNC_PERIOD value
| KEEP value
+ | WAL_RETENTION_PERIOD value
+ | WAL_RETENTION_SIZE value
}
```
diff --git a/docs/zh/12-taos-sql/03-table.md b/docs/zh/12-taos-sql/03-table.md
index 5687c7e740766ad7a3fbea03ff42ce3137bb140e..2e66ac4002f1d535615893d8ddb04f163aa7a498 100644
--- a/docs/zh/12-taos-sql/03-table.md
+++ b/docs/zh/12-taos-sql/03-table.md
@@ -43,7 +43,7 @@ table_option: {
1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键;
2. 表名最大长度为 192;
-3. 表的每行长度不能超过 48KB;(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
+3. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
4. 子表名只能由字母、数字和下划线组成,且不能以数字开头,不区分大小写
5. 使用数据类型 binary 或 nchar,需指定其最长的字节数,如 binary(20),表示 20 字节;
6. 为了兼容支持更多形式的表名,TDengine 引入新的转义符 "\`",可以让表名与关键词不冲突,同时不受限于上述表名称合法性约束检查。但是同样具有长度限制要求。使用转义字符以后,不再对转义字符中的内容进行大小写统一。
diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md
index 870df734718fde878cc3f4f234b6e0bcfe8a5303..5bc67755f04adea322a77b4eb93af9de4e41e679 100644
--- a/docs/zh/12-taos-sql/06-select.md
+++ b/docs/zh/12-taos-sql/06-select.md
@@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause:
- RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
+ RANGE(ts_val [, ts_val]) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause:
PARTITION BY expr [, expr] ...
diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md
index 248554f931c4b5481c55976fd36a40793c9c3985..416d41614d01b6c887d8ea462937b2b4fc509a55 100644
--- a/docs/zh/12-taos-sql/10-function.md
+++ b/docs/zh/12-taos-sql/10-function.md
@@ -890,9 +890,10 @@ ignore_null_values: {
- INTERP 用于在指定时间断面获取指定列的记录值,如果该时间断面不存在符合条件的行数据,那么会根据 FILL 参数的设定进行插值。
- INTERP 的输入数据为指定列的数据,可以通过条件语句(where 子句)来对原始列数据进行过滤,如果没有指定过滤条件则输入为全部数据。
- INTERP 需要同时与 RANGE,EVERY 和 FILL 关键字一起使用。
-- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
+- INTERP 的输出时间范围根据 RANGE(timestamp1, timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1 为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2 为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
+- INTERP 可以在 RANGE 字段中只指定唯一的时间戳对单个时间点进行插值,在这种情况下,EVERY 字段可以省略。例如:SELECT INTERP(col) FROM tb RANGE('2023-01-01 00:00:00') FILL(linear).
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线。
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
@@ -1001,7 +1002,6 @@ SAMPLE(expr, k)
**使用说明**:
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
-- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
### TAIL
@@ -1080,7 +1080,6 @@ CSUM(expr)
- 不支持 +、-、*、/ 运算,如 csum(col1) + csum(col2)。
- 只能与聚合(Aggregation)函数一起使用。 该函数可以应用在普通表和超级表上。
-- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### DERIVATIVE
@@ -1104,7 +1103,6 @@ ignore_negative: {
**使用说明**:
-- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
### DIFF
@@ -1167,7 +1165,6 @@ MAVG(expr, k)
- 不支持 +、-、*、/ 运算,如 mavg(col1, k1) + mavg(col2, k1);
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
-- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
### STATECOUNT
@@ -1193,7 +1190,6 @@ STATECOUNT(expr, oper, val)
**使用说明**:
-- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
@@ -1221,7 +1217,6 @@ STATEDURATION(expr, oper, val, unit)
**使用说明**:
-- 该函数可以应用在普通表上,在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)
- 不能和窗口操作一起使用,例如 interval/state_window/session_window。
@@ -1239,8 +1234,6 @@ TWA(expr)
**适用于**:表和超级表。
-**使用说明**: TWA 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
-
## 系统信息函数
diff --git a/docs/zh/12-taos-sql/19-limit.md b/docs/zh/12-taos-sql/19-limit.md
index 7b6692f1b733042fde3103de1e9fa06bf4dda542..e5a492580ec8ee63d41ca0f09370b96356fd9489 100644
--- a/docs/zh/12-taos-sql/19-limit.md
+++ b/docs/zh/12-taos-sql/19-limit.md
@@ -26,7 +26,7 @@ description: 合法字符集和命名中的限制规则
- 数据库名最大长度为 64 字节
- 表名最大长度为 192 字节,不包括数据库名前缀和分隔符
-- 每行数据最大长度 48KB (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
+- 每行数据最大长度 48KB(从 3.0.5.0 版本开始为 64KB) (注意:数据行内每个 BINARY/NCHAR 类型的列还会额外占用 2 个字节的存储位置)
- 列名最大长度为 64 字节
- 最多允许 4096 列,最少需要 2 列,第一列必须是时间戳。
- 标签名最大长度为 64 字节
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 3ede85f86d69062e99456dbd30e97996dc111816..68f44d1e65b9f913b0a65903782e102b208c59fd 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -91,11 +91,30 @@ taos --dump-config
### maxShellConns
| 属性 | 说明 |
-| -------- | ----------------------- |
+| --------| ----------------------- |
| 适用范围 | 仅服务端适用 |
-| 含义 | 一个 dnode 容许的连接数 |
+| 含义 | 一个 dnode 容许的连接数 |
| 取值范围 | 10-50000000 |
-| 缺省值 | 5000 |
+| 缺省值 | 5000 |
+
+### numOfRpcSessions
+
+| 属性 | 说明 |
+| --------| ---------------------- |
+| 适用范围 | 客户端和服务端都适用 |
+| 含义 | 一个客户端能创建的最大连接数|
+| 取值范围 | 100-100000 |
+| 缺省值 | 10000 |
+
+### timeToGetAvailableConn
+
+| 属性 | 说明 |
+| -------- | --------------------|
+| 适用范围 | 客户端和服务端都适用 |
+| 含义 |获得可用连接的最长等待时间|
+| 取值范围 | 10-50000000(单位为毫秒)|
+| 缺省值 | 500000 |
+
### numOfRpcSessions
diff --git a/docs/zh/14-reference/13-schemaless/13-schemaless.md b/docs/zh/14-reference/13-schemaless/13-schemaless.md
index e5f232c1fc506a6e37cb128cab129a7fe539d60a..6c2007938bfc23a609c752b9c02bab4869b19aee 100644
--- a/docs/zh/14-reference/13-schemaless/13-schemaless.md
+++ b/docs/zh/14-reference/13-schemaless/13-schemaless.md
@@ -87,7 +87,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
:::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
-48KB,标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
+48KB(从 3.0.5.0 版本开始为 64KB),标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
:::
diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md
index 97e78c2fde7bd16d2c28284746c879e7a263c473..641e2d51745680b331060efbb8c3d4fc388f6a24 100644
--- a/docs/zh/20-third-party/11-kafka.md
+++ b/docs/zh/20-third-party/11-kafka.md
@@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送

-## 什么是 Confluent?
-
-[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括:
-
-1. Schema Registry
-2. REST 代理
-3. 非 Java 客户端
-4. 很多打包好的 Kafka Connect 插件
-5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心
-
-这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。
-
-
-Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。
-
## 前置条件
运行本教程中示例的前提条件。
1. Linux 操作系统
2. 已安装 Java 8 和 Maven
-3. 已安装 Git
+3. 已安装 Git、curl、vi
4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install)
-## 安装 Confluent
-
-Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。
+## 安装 Kafka
在任意目录下执行:
-```
-curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz
-tar xzf confluent-7.1.1.tar.gz -C /opt/
+```shell
+curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz
+tar xzf kafka_2.13-3.4.0.tgz -C /opt/
+ln -s /opt/kafka_2.13-3.4.0 /opt/kafka
```
-然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。
+然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。
```title=".profile"
-export CONFLUENT_HOME=/opt/confluent-7.1.1
-export PATH=$CONFLUENT_HOME/bin:$PATH
+export KAFKA_HOME=/opt/kafka
+export PATH=$PATH:$KAFKA_HOME/bin
```
以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile)
-安装完成之后,可以输入`confluent version`做简单验证:
-
-```
-# confluent version
-confluent - Confluent CLI
-
-Version: v2.6.1
-Git Ref: 6d920590
-Build Date: 2022-02-18T06:14:21Z
-Go Version: go1.17.6 (linux/amd64)
-Development: false
-```
-
## 安装 TDengine Connector 插件
-### 从源码安装
+### 编译插件
-```
+```shell
git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine
-mvn clean package
-unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
+mvn clean package -Dmaven.test.skip=true
+unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
```
-以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。
+以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$KAFKA_HOME/components/`。
-### 用 confluent-hub 安装
+### 配置插件
-[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。
-**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。
+将 kafka-connect-tdengine 插件加入 `$KAFKA_HOME/config/connect-distributed.properties` 配置文件 plugin.path 中
-## 启动 Confluent
-
-```
-confluent local services start
+```properties
+plugin.path=/usr/share/java,/opt/kafka/components
```
-:::note
-一定要先安装插件再启动 Confluent, 否则加载插件会失败。
-:::
+## 启动 Kafka
-:::tip
-若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 :
-
-```title="控制台输出日志" {1}
-Using CONFLUENT_CURRENT: /tmp/confluent.106668
-Starting ZooKeeper
-ZooKeeper is [UP]
-Starting Kafka
-Kafka is [UP]
-Starting Schema Registry
-Schema Registry is [UP]
-Starting Kafka REST
-Kafka REST is [UP]
-Starting Connect
-Connect is [UP]
-Starting ksqlDB Server
-ksqlDB Server is [UP]
-Starting Control Center
-Control Center is [UP]
-```
+```shell
+zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties
-清空数据可执行 `rm -rf /tmp/confluent.106668`。
-:::
+kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
-### 验证各个组件是否启动成功
-
-输入命令:
-
-```
-confluent local services status
-```
-
-如果各组件都启动成功,会得到如下输出:
-
-```
-Connect is [UP]
-Control Center is [UP]
-Kafka is [UP]
-Kafka REST is [UP]
-ksqlDB Server is [UP]
-Schema Registry is [UP]
-ZooKeeper is [UP]
+connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties
```
-### 验证插件是否安装成功
+### 验证 kafka Connect 是否启动成功
-在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件:
+输入命令:
-```
-confluent local services connect plugin list
+```shell
+curl http://localhost:8083/connectors
```
-如果成功安装,会输出如下:
-
-```txt {4,9}
-Available Connect Plugins:
-[
- {
- "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector",
- "type": "sink",
- "version": "1.0.0"
- },
- {
- "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
- "type": "source",
- "version": "1.0.0"
- },
-......
-```
+如果各组件都启动成功,会得到如下输出:
-如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径:
+```txt
+[]
```
-echo `cat /tmp/confluent.current`/connect/connect.stdout
-```
-该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。
-
-与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。
-
## TDengine Sink Connector 的使用
@@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn
下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。
-### 添加配置文件
+### 添加 Sink Connector 配置文件
-```
+```shell
mkdir ~/test
cd ~/test
-vi sink-demo.properties
+vi sink-demo.json
```
-sink-demo.properties 内容如下:
-
-```ini title="sink-demo.properties"
-name=TDengineSinkConnector
-connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector
-tasks.max=1
-topics=meters
-connection.url=jdbc:TAOS://127.0.0.1:6030
-connection.user=root
-connection.password=taosdata
-connection.database=power
-db.schemaless=line
-data.precision=ns
-key.converter=org.apache.kafka.connect.storage.StringConverter
-value.converter=org.apache.kafka.connect.storage.StringConverter
+sink-demo.json 内容如下:
+
+```json title="sink-demo.json"
+{
+ "name": "TDengineSinkConnector",
+ "config": {
+ "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector",
+ "tasks.max": "1",
+ "topics": "meters",
+ "connection.url": "jdbc:TAOS://127.0.0.1:6030",
+ "connection.user": "root",
+ "connection.password": "taosdata",
+ "connection.database": "power",
+ "db.schemaless": "line",
+ "data.precision": "ns",
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "value.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "errors.tolerance": "all",
+ "errors.deadletterqueue.topic.name": "dead_letter_topic",
+ "errors.deadletterqueue.topic.replication.factor": 1
+ }
+}
```
关键配置说明:
-1. `topics=meters` 和 `connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。
-2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。
+1. `"topics": "meters"` 和 `"connection.database": "power"`, 表示订阅主题 meters 的数据,并写入数据库 power。
+2. `"db.schemaless": "line"`, 表示使用 InfluxDB Line 协议格式的数据。
-### 创建 Connector 实例
+### 创建 Sink Connector 实例
-```
-confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties
+```shell
+curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
```
若以上命令执行成功,则有如下输出:
@@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config .
"tasks.max": "1",
"topics": "meters",
"value.converter": "org.apache.kafka.connect.storage.StringConverter",
- "name": "TDengineSinkConnector"
+ "name": "TDengineSinkConnector",
+ "errors.tolerance": "all",
+ "errors.deadletterqueue.topic.name": "dead_letter_topic",
+ "errors.deadletterqueue.topic.replication.factor": "1",
},
"tasks": [],
"type": "sink"
@@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0
使用 kafka-console-producer 向主题 meters 添加测试数据。
-```
-cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters
+```shell
+cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters
```
:::note
@@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic
使用 TDengine CLI 验证同步是否成功。
-```
+```sql
taos> use power;
Database changed.
taos> select * from meters;
- ts | current | voltage | phase | groupid | location |
+ _ts | current | voltage | phase | groupid | location |
===============================================================================================================================================================
2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles |
2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles |
@@ -297,29 +216,34 @@ TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [Influx
下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。
-### 添加配置文件
+### 添加 Source Connector 配置文件
-```
-vi source-demo.properties
+```shell
+vi source-demo.json
```
输入以下内容:
-```ini title="source-demo.properties"
-name=TDengineSourceConnector
-connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector
-tasks.max=1
-connection.url=jdbc:TAOS://127.0.0.1:6030
-connection.username=root
-connection.password=taosdata
-connection.database=test
-connection.attempts=3
-connection.backoff.ms=5000
-topic.prefix=tdengine-source-
-poll.interval.ms=1000
-fetch.max.rows=100
-key.converter=org.apache.kafka.connect.storage.StringConverter
-value.converter=org.apache.kafka.connect.storage.StringConverter
+```json title="source-demo.json"
+{
+ "name":"TDengineSourceConnector",
+ "config":{
+ "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector",
+ "tasks.max": 1,
+ "connection.url": "jdbc:TAOS://127.0.0.1:6030",
+ "connection.username": "root",
+ "connection.password": "taosdata",
+ "connection.database": "test",
+ "connection.attempts": 3,
+ "connection.backoff.ms": 5000,
+ "topic.prefix": "tdengine-source",
+ "poll.interval.ms": 1000,
+ "fetch.max.rows": 100,
+ "topic.per.stable": true,
+ "key.converter": "org.apache.kafka.connect.storage.StringConverter",
+ "value.converter": "org.apache.kafka.connect.storage.StringConverter"
+ }
+}
```
### 准备测试数据
@@ -344,27 +268,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1
使用 TDengine CLI, 执行 SQL 文件。
-```
+```shell
taos -f prepare-source-data.sql
```
-### 创建 Connector 实例
+### 创建 Source Connector 实例
-```
-confluent local services connect connector load TDengineSourceConnector --config source-demo.properties
+```shell
+curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json"
```
### 查看 topic 数据
使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。
-```
-kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test
+```shell
+kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters
```
输出:
-```
+```txt
......
meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000
meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000
@@ -373,7 +297,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2
此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据:
-```
+```sql
USE test;
INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38);
INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
@@ -387,15 +311,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22);
查看当前活跃的 connector:
-```
-confluent local services connect connector status
+```shell
+curl http://localhost:8083/connectors
```
如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload:
-```
-confluent local services connect connector unload TDengineSinkConnector
-confluent local services connect connector unload TDengineSourceConnector
+```shell
+curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector
+curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector
```
## 配置参考
@@ -442,15 +366,12 @@ confluent local services connect connector unload TDengineSourceConnector
## 其他说明
-1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。
-2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。
+1. 关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档:。
## 问题反馈
-无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。
+无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈:。
## 参考
-1. https://www.confluent.io/what-is-apache-kafka
-2. https://developer.confluent.io/learn-kafka/kafka-connect/intro
-3. https://docs.confluent.io/platform/current/platform.html
+1.
diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md
index bf46f3ca1f7ebc6dda17e3cc30fe4d8b1a17867e..15397049dd79d5888242dd4ed17d8395f1d1096e 100644
--- a/docs/zh/27-train-faq/01-faq.md
+++ b/docs/zh/27-train-faq/01-faq.md
@@ -247,10 +247,17 @@ launchctl limit maxfiles
该提示是创建 db 的 vnode 数量不够了,需要的 vnode 不能超过了 dnode 中 vnode 的上限。因为系统默认是一个 dnode 中有 CPU 核数两倍的 vnode,也可以通过配置文件中的参数 supportVnodes 控制。
正常调大 taos.cfg 中 supportVnodes 参数即可。
-### 21 【查询】在服务器上的使用 tao-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
+### 21 在服务器上的使用 taos-CLI 能查到指定时间段的数据,但在客户端机器上查不到?
这种情况是因为客户端与服务器上设置的时区不一致导致的,调整客户端与服务器的时区一致即可解决。
-### 22 【表名】表名确认是存在的,但写入或查询时报表不存在错误,非常奇怪,什么原因?
+### 22 表名确认是存在的,但在写入或查询时返回表名不存在,什么原因?
TDengine 中的所有名称,包括数据库名、表名等都是区分大小写的,如果这些名称在程序或 taos-CLI 中没有使用反引号(`)括起来使用,即使你输入的是大写的,引擎也会转化成小写来使用,如果名称前后加上了反引号,引擎就不会再转化成小写,会保持原样来使用。
+### 23 在 taos-CLI 中查询,字段内容不能完全显示出来怎么办?
+可以使用 \G 参数来竖式显示,如 show databases\G; (为了输入方便,在"\"后加 TAB 键,会自动补全后面的内容)
+### 24 使用 taosBenchmark 测试工具写入数据查询很快,为什么我写入的数据查询非常慢?
+TDengine 在写入数据时如果有很严重的乱序写入问题,会严重影响查询性能,所以需要在写入前解决乱序的问题。如果业务是从 kafka 消费写入,请合理设计消费者,尽可能的一个子表数据由一个消费者去消费并写入,避免由设计产生的乱序。
+
+### 25 我想统计下前后两条写入记录之间的时间差值是多少?
+使用 DIFF 函数,可以查看时间列或数值列前后两条记录的差值,非常方便,详细说明见 SQL手册->函数->DIFF
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index 2b28bae7454bd377a9198a37a479c1da0e6f2640..3ee19de84f0177d92555c05520137fce5deb911d 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3";
+## 3.0.5.0
+
+
+
## 3.0.4.2
diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md
index 7f93483ed464a95b1a215f8121b78da29ccb7a04..fbd12b1440abd862f72361e5eb9b4443638f18cd 100644
--- a/docs/zh/28-releases/02-tools.md
+++ b/docs/zh/28-releases/02-tools.md
@@ -10,6 +10,10 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3";
+## 2.5.1
+
+
+
## 2.5.0
diff --git a/examples/JDBC/consumer-demo/pom.xml b/examples/JDBC/consumer-demo/pom.xml
new file mode 100644
index 0000000000000000000000000000000000000000..aa3cb154e5df957c3ca8b7447c2a5a02168b6be7
--- /dev/null
+++ b/examples/JDBC/consumer-demo/pom.xml
@@ -0,0 +1,70 @@
+
+
+ 4.0.0
+
+ com.taosdata
+ consumer
+ 1.0-SNAPSHOT
+
+
+ 8
+ 8
+
+
+
+
+ com.taosdata.jdbc
+ taos-jdbcdriver
+ 3.2.1
+
+
+ com.google.guava
+ guava
+ 30.1.1-jre
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-assembly-plugin
+ 3.3.0
+
+
+ ConsumerDemo
+
+ ConsumerDemo
+
+
+ com.taosdata.ConsumerDemo
+
+
+
+ jar-with-dependencies
+
+
+ package
+
+ single
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+ 8
+ 8
+ UTF-8
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/examples/JDBC/consumer-demo/readme.md b/examples/JDBC/consumer-demo/readme.md
new file mode 100644
index 0000000000000000000000000000000000000000..c211b017a74cd2a84bbab6953bb31500d73b3ba2
--- /dev/null
+++ b/examples/JDBC/consumer-demo/readme.md
@@ -0,0 +1,52 @@
+# How to Run the Consumer Demo Code On Linux OS
+TDengine's Consumer demo project is organized in a Maven way so that users can easily compile, package and run the project. If you don't have Maven on your server, you may install it using
+```
+sudo apt-get install maven
+```
+
+## Install TDengine Client and TaosAdapter
+Make sure you have already installed a tdengine client on your current develop environment.
+Download the tdengine package on our website: ``https://www.taosdata.com/cn/all-downloads/`` and install the client.
+
+## Run Consumer Demo using mvn plugin
+run command:
+```
+mvn clean compile exec:java -Dexec.mainClass="com.taosdata.ConsumerDemo"
+```
+
+## Custom configuration
+```shell
+# the host of TDengine server
+export TAOS_HOST="127.0.0.1"
+
+# the port of TDengine server
+export TAOS_PORT="6041"
+
+# the consumer type, can be "ws" or "jni"
+export TAOS_TYPE="ws"
+
+# the number of consumers
+export TAOS_JDBC_CONSUMER_NUM="1"
+
+# the number of processors to consume
+export TAOS_JDBC_PROCESSOR_NUM="2"
+
+# the number of records to be consumed per processor per second
+export TAOS_JDBC_RATE_PER_PROCESSOR="1000"
+
+# poll wait time in ms
+export TAOS_JDBC_POLL_SLEEP="100"
+```
+
+## Run Consumer Demo using jar
+
+To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
+```
+mvn clean package assembly:single
+```
+
+To run ConsumerDemo.jar, go to ``TDengine/tests/examples/JDBC/consumer-demo`` and execute
+```
+java -jar target/ConsumerDemo-jar-with-dependencies.jar
+```
+
diff --git a/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Bean.java b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Bean.java
new file mode 100644
index 0000000000000000000000000000000000000000..2f2467b3713b2d6bcedd708f3ea4b20d3db53c23
--- /dev/null
+++ b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Bean.java
@@ -0,0 +1,43 @@
+package com.taosdata;
+
+import java.sql.Timestamp;
+
+public class Bean {
+ private Timestamp ts;
+ private Integer c1;
+ private String c2;
+
+ public Timestamp getTs() {
+ return ts;
+ }
+
+ public void setTs(Timestamp ts) {
+ this.ts = ts;
+ }
+
+ public Integer getC1() {
+ return c1;
+ }
+
+ public void setC1(Integer c1) {
+ this.c1 = c1;
+ }
+
+ public String getC2() {
+ return c2;
+ }
+
+ public void setC2(String c2) {
+ this.c2 = c2;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder("Bean {");
+ sb.append("ts=").append(ts);
+ sb.append(", c1=").append(c1);
+ sb.append(", c2='").append(c2).append('\'');
+ sb.append('}');
+ return sb.toString();
+ }
+}
diff --git a/examples/JDBC/consumer-demo/src/main/java/com/taosdata/BeanDeserializer.java b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/BeanDeserializer.java
new file mode 100644
index 0000000000000000000000000000000000000000..478af9e70da51657f9d2f7a49e7b919bf5399d55
--- /dev/null
+++ b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/BeanDeserializer.java
@@ -0,0 +1,6 @@
+package com.taosdata;
+
+import com.taosdata.jdbc.tmq.ReferenceDeserializer;
+
+public class BeanDeserializer extends ReferenceDeserializer {
+}
diff --git a/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Config.java b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Config.java
new file mode 100644
index 0000000000000000000000000000000000000000..08579926e3cbc1dcaa4c8c01027340d8c2635cb2
--- /dev/null
+++ b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Config.java
@@ -0,0 +1,78 @@
+package com.taosdata;
+
+public class Config {
+ public static final String TOPIC = "test_consumer";
+ public static final String TAOS_HOST = "127.0.0.1";
+ public static final String TAOS_PORT = "6041";
+ public static final String TAOS_TYPE = "ws";
+ public static final int TAOS_JDBC_CONSUMER_NUM = 1;
+ public static final int TAOS_JDBC_PROCESSOR_NUM = 2;
+ public static final int TAOS_JDBC_RATE_PER_PROCESSOR = 1000;
+ public static final int TAOS_JDBC_POLL_SLEEP = 100;
+
+ private final int consumerNum;
+ private final int processCapacity;
+ private final int rate;
+ private final int pollSleep;
+ private final String type;
+ private final String host;
+ private final String port;
+
+ public Config(String type, String host, String port, int consumerNum, int processCapacity, int rate, int pollSleep) {
+ this.type = type;
+ this.consumerNum = consumerNum;
+ this.processCapacity = processCapacity;
+ this.rate = rate;
+ this.pollSleep = pollSleep;
+ this.host = host;
+ this.port = port;
+ }
+
+ public int getConsumerNum() {
+ return consumerNum;
+ }
+
+ public int getProcessCapacity() {
+ return processCapacity;
+ }
+
+ public int getRate() {
+ return rate;
+ }
+
+ public int getPollSleep() {
+ return pollSleep;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public String getPort() {
+ return port;
+ }
+
+ public String getType() {
+ return type;
+ }
+
+ public static Config getFromENV() {
+ String host = System.getenv("TAOS_HOST") != null ? System.getenv("TAOS_HOST") : TAOS_HOST;
+ String port = System.getenv("TAOS_PORT") != null ? System.getenv("TAOS_PORT") : TAOS_PORT;
+ String type = System.getenv("TAOS_TYPE") != null ? System.getenv("TAOS_TYPE") : TAOS_TYPE;
+
+ String c = System.getenv("TAOS_JDBC_CONSUMER_NUM");
+ int num = c != null ? Integer.parseInt(c) : TAOS_JDBC_CONSUMER_NUM;
+
+ String p = System.getenv("TAOS_JDBC_PROCESSOR_NUM");
+ int capacity = p != null ? Integer.parseInt(p) : TAOS_JDBC_PROCESSOR_NUM;
+
+ String r = System.getenv("TAOS_JDBC_RATE_PER_PROCESSOR");
+ int rate = r != null ? Integer.parseInt(r) : TAOS_JDBC_RATE_PER_PROCESSOR;
+
+ String s = System.getenv("TAOS_JDBC_POLL_SLEEP");
+ int sleep = s != null ? Integer.parseInt(s) : TAOS_JDBC_POLL_SLEEP;
+
+ return new Config(type, host, port, num, capacity, rate, sleep);
+ }
+}
diff --git a/examples/JDBC/consumer-demo/src/main/java/com/taosdata/ConsumerDemo.java b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/ConsumerDemo.java
new file mode 100644
index 0000000000000000000000000000000000000000..7c7719c63986cc309363c13fecb5fafe0243cdba
--- /dev/null
+++ b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/ConsumerDemo.java
@@ -0,0 +1,65 @@
+package com.taosdata;
+
+import com.taosdata.jdbc.tmq.TMQConstants;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Properties;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static com.taosdata.Config.*;
+
+public class ConsumerDemo {
+ public static void main(String[] args) throws SQLException {
+ // Config
+ Config config = Config.getFromENV();
+ // Generated data
+ mockData();
+
+ Properties prop = new Properties();
+ prop.setProperty(TMQConstants.CONNECT_TYPE, config.getType());
+ prop.setProperty(TMQConstants.BOOTSTRAP_SERVERS, config.getHost() + ":" + config.getPort());
+ prop.setProperty(TMQConstants.CONNECT_USER, "root");
+ prop.setProperty(TMQConstants.CONNECT_PASS, "taosdata");
+ prop.setProperty(TMQConstants.MSG_WITH_TABLE_NAME, "true");
+ prop.setProperty(TMQConstants.ENABLE_AUTO_COMMIT, "true");
+ prop.setProperty(TMQConstants.GROUP_ID, "gId");
+ prop.setProperty(TMQConstants.VALUE_DESERIALIZER, "com.taosdata.BeanDeserializer");
+ for (int i = 0; i < config.getConsumerNum() - 1; i++) {
+ new Thread(new Worker(prop, config)).start();
+ }
+ new Worker(prop, config).run();
+ }
+
+ public static void mockData() throws SQLException {
+ String dbName = "test_consumer";
+ String tableName = "st";
+ String url = "jdbc:TAOS-RS://" + TAOS_HOST + ":" + TAOS_PORT + "/?user=root&password=taosdata&batchfetch=true";
+ Connection connection = DriverManager.getConnection(url);
+ Statement statement = connection.createStatement();
+ statement.executeUpdate("create database if not exists " + dbName + " WAL_RETENTION_PERIOD 3650");
+ statement.executeUpdate("use " + dbName);
+ statement.executeUpdate("create table if not exists " + tableName + " (ts timestamp, c1 int, c2 nchar(100)) ");
+ statement.executeUpdate("create topic if not exists " + TOPIC + " as select ts, c1, c2 from " + tableName);
+
+ ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(r -> {
+ Thread t = new Thread(r);
+ t.setName("mock-data-thread-" + t.getId());
+ return t;
+ });
+ AtomicInteger atomic = new AtomicInteger();
+ scheduledExecutorService.scheduleWithFixedDelay(() -> {
+ int i = atomic.getAndIncrement();
+ try {
+ statement.executeUpdate("insert into " + tableName + " values(now, " + i + ",'" + i + "')");
+ } catch (SQLException e) {
+ // ignore
+ }
+ }, 0, 10, TimeUnit.MILLISECONDS);
+ }
+}
diff --git a/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Worker.java b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Worker.java
new file mode 100644
index 0000000000000000000000000000000000000000..f6e21cd7294333aeb96a6e99f0cafe326073f5fa
--- /dev/null
+++ b/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Worker.java
@@ -0,0 +1,60 @@
+package com.taosdata;
+
+import com.google.common.util.concurrent.RateLimiter;
+import com.taosdata.jdbc.tmq.ConsumerRecord;
+import com.taosdata.jdbc.tmq.ConsumerRecords;
+import com.taosdata.jdbc.tmq.TaosConsumer;
+
+import java.sql.SQLException;
+import java.time.Duration;
+import java.time.LocalDateTime;
+import java.util.Collections;
+import java.util.Properties;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.Semaphore;
+
+public class Worker implements Runnable {
+
+ int sleepTime;
+ int rate;
+
+ ForkJoinPool pool = new ForkJoinPool();
+ Semaphore semaphore;
+
+ TaosConsumer consumer;
+
+ public Worker(Properties prop, Config config) throws SQLException {
+ consumer = new TaosConsumer<>(prop);
+ consumer.subscribe(Collections.singletonList(Config.TOPIC));
+ semaphore = new Semaphore(config.getProcessCapacity());
+ sleepTime = config.getPollSleep();
+ rate = config.getRate();
+ }
+
+ @Override
+ public void run() {
+ while (!Thread.interrupted()) {
+ try {
+ // 控制请求频率
+ if (semaphore.tryAcquire()) {
+ ConsumerRecords records = consumer.poll(Duration.ofMillis(sleepTime));
+ pool.submit(() -> {
+ RateLimiter limiter = RateLimiter.create(rate);
+ try {
+ for (ConsumerRecord record : records) {
+ // 流量控制
+ limiter.acquire();
+ // 业务处理数据
+ System.out.println("[" + LocalDateTime.now() + "] Thread id:" + Thread.currentThread().getId() + " -> " + record.value());
+ }
+ } finally {
+ semaphore.release();
+ }
+ });
+ }
+ } catch (SQLException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+}
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index 520a8e9c2c03a6e455e1cc29846d42c6ba3e4561..d2352e100c3a3ef016a57f48f9db0f26809fc7e3 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -37,6 +37,13 @@ extern "C" {
)
// clang-format on
+typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
+
+typedef struct STableKeyInfo {
+ uint64_t uid;
+ uint64_t groupId;
+} STableKeyInfo;
+
typedef struct SWinKey {
uint64_t groupId;
TSKEY ts;
@@ -224,6 +231,7 @@ typedef struct SColumnInfoData {
};
SColumnInfo info; // column info
bool hasNull; // if current column data has null value.
+ bool reassigned; // if current column data is reassigned.
} SColumnInfoData;
typedef struct SQueryTableDataCond {
diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h
index 33c571fc1be38ab246e0b2890dca0d9fce2365fe..6cb7d8852310082e0f1431c265c01752f5d527b7 100644
--- a/include/common/tdatablock.h
+++ b/include/common/tdatablock.h
@@ -178,6 +178,7 @@ int32_t getJsonValueLen(const char* data);
int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
int32_t colDataAppend(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull);
+int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData);
int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue);
int32_t colDataMergeCol(SColumnInfoData* pColumnInfoData, int32_t numOfRow1, int32_t* capacity,
const SColumnInfoData* pSource, int32_t numOfRow2);
@@ -247,6 +248,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData
tb_uid_t suid);
char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId);
+int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf);
static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) {
return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock);
diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h
index 8be5cb4d413b9392702c628d72f27cb0662687a3..e04bdd1b07c00be375a86056976a1ee8aad7d558 100644
--- a/include/common/tdataformat.h
+++ b/include/common/tdataformat.h
@@ -145,7 +145,7 @@ int32_t tColDataCopy(SColData *pColDataFrom, SColData *pColData, xMallocFn xMall
extern void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull);
// for stmt bind
-int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind);
+int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen);
void tColDataSortMerge(SArray *colDataArr);
// for raw block
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index abee37d12288e4550758a558912807649e8a181b..01281a6dc75884fa0672c42b02de876da5cedaf1 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -29,7 +29,6 @@ extern "C" {
#define SLOW_LOG_TYPE_OTHERS 0x4
#define SLOW_LOG_TYPE_ALL 0xFFFFFFFF
-
// cluster
extern char tsFirst[];
extern char tsSecond[];
@@ -83,6 +82,7 @@ extern int64_t tsVndCommitMaxIntervalMs;
// mnode
extern int64_t tsMndSdbWriteDelta;
extern int64_t tsMndLogRetention;
+extern bool tsMndSkipGrant;
// monitor
extern bool tsEnableMonitor;
@@ -131,7 +131,7 @@ extern int32_t tsSlowLogScope;
// client
extern int32_t tsMinSlidingTime;
extern int32_t tsMinIntervalTime;
-extern int32_t tsMaxMemUsedByInsert;
+extern int32_t tsMaxInsertBatchRows;
// build info
extern char version[];
@@ -180,6 +180,8 @@ extern int32_t tsRpcRetryInterval;
extern bool tsDisableStream;
extern int64_t tsStreamBufferSize;
extern int64_t tsCheckpointInterval;
+extern bool tsFilterScalarMode;
+extern int32_t tsMaxStreamBackendCache;
// #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index f4502030fdfc4d4ad8b68808ab72892f496a0229..d78e771fcf08af0f864ce42a3b944ab93b16b833 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -2009,10 +2009,8 @@ typedef struct {
int8_t withMeta;
char* sql;
char subDbName[TSDB_DB_FNAME_LEN];
- union {
- char* ast;
- char subStbName[TSDB_TABLE_FNAME_LEN];
- };
+ char* ast;
+ char subStbName[TSDB_TABLE_FNAME_LEN];
} SCMCreateTopicReq;
int32_t tSerializeSCMCreateTopicReq(void* buf, int32_t bufLen, const SCMCreateTopicReq* pReq);
@@ -2809,37 +2807,49 @@ typedef struct {
int64_t suid;
} SMqRebVgReq;
-static FORCE_INLINE int32_t tEncodeSMqRebVgReq(void** buf, const SMqRebVgReq* pReq) {
- int32_t tlen = 0;
- tlen += taosEncodeFixedI64(buf, pReq->leftForVer);
- tlen += taosEncodeFixedI32(buf, pReq->vgId);
- tlen += taosEncodeFixedI64(buf, pReq->oldConsumerId);
- tlen += taosEncodeFixedI64(buf, pReq->newConsumerId);
- tlen += taosEncodeString(buf, pReq->subKey);
- tlen += taosEncodeFixedI8(buf, pReq->subType);
- tlen += taosEncodeFixedI8(buf, pReq->withMeta);
+static FORCE_INLINE int tEncodeSMqRebVgReq(SEncoder *pCoder, const SMqRebVgReq* pReq) {
+ if (tStartEncode(pCoder) < 0) return -1;
+ if (tEncodeI64(pCoder, pReq->leftForVer) < 0) return -1;
+ if (tEncodeI32(pCoder, pReq->vgId) < 0) return -1;
+ if (tEncodeI64(pCoder, pReq->oldConsumerId) < 0) return -1;
+ if (tEncodeI64(pCoder, pReq->newConsumerId) < 0) return -1;
+ if (tEncodeCStr(pCoder, pReq->subKey) < 0) return -1;
+ if (tEncodeI8(pCoder, pReq->subType) < 0) return -1;
+ if (tEncodeI8(pCoder, pReq->withMeta) < 0) return -1;
+
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
- tlen += taosEncodeString(buf, pReq->qmsg);
+ if (tEncodeCStr(pCoder, pReq->qmsg) < 0) return -1;
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
- tlen += taosEncodeFixedI64(buf, pReq->suid);
+ if (tEncodeI64(pCoder, pReq->suid) < 0) return -1;
+ if (tEncodeCStr(pCoder, pReq->qmsg) < 0) return -1;
}
- return tlen;
+ tEndEncode(pCoder);
+ return 0;
}
-static FORCE_INLINE void* tDecodeSMqRebVgReq(const void* buf, SMqRebVgReq* pReq) {
- buf = taosDecodeFixedI64(buf, &pReq->leftForVer);
- buf = taosDecodeFixedI32(buf, &pReq->vgId);
- buf = taosDecodeFixedI64(buf, &pReq->oldConsumerId);
- buf = taosDecodeFixedI64(buf, &pReq->newConsumerId);
- buf = taosDecodeStringTo(buf, pReq->subKey);
- buf = taosDecodeFixedI8(buf, &pReq->subType);
- buf = taosDecodeFixedI8(buf, &pReq->withMeta);
+static FORCE_INLINE int tDecodeSMqRebVgReq(SDecoder *pCoder, SMqRebVgReq* pReq) {
+ if (tStartDecode(pCoder) < 0) return -1;
+
+ if (tDecodeI64(pCoder, &pReq->leftForVer) < 0) return -1;
+
+ if (tDecodeI32(pCoder, &pReq->vgId) < 0) return -1;
+ if (tDecodeI64(pCoder, &pReq->oldConsumerId) < 0) return -1;
+ if (tDecodeI64(pCoder, &pReq->newConsumerId) < 0) return -1;
+ if (tDecodeCStrTo(pCoder, pReq->subKey) < 0) return -1;
+ if (tDecodeI8(pCoder, &pReq->subType) < 0) return -1;
+ if (tDecodeI8(pCoder, &pReq->withMeta) < 0) return -1;
+
if (pReq->subType == TOPIC_SUB_TYPE__COLUMN) {
- buf = taosDecodeString(buf, &pReq->qmsg);
+ if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
} else if (pReq->subType == TOPIC_SUB_TYPE__TABLE) {
- buf = taosDecodeFixedI64(buf, &pReq->suid);
+ if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1;
+ if (!tDecodeIsEnd(pCoder)){
+ if (tDecodeCStr(pCoder, &pReq->qmsg) < 0) return -1;
+ }
}
- return (void*)buf;
+
+ tEndDecode(pCoder);
+ return 0;
}
typedef struct {
diff --git a/include/common/ttime.h b/include/common/ttime.h
index f189959f22ca4bee4b07518aff00e7bbc527fb87..de74e48100e337a516d6fa3848317074be56ec1f 100644
--- a/include/common/ttime.h
+++ b/include/common/ttime.h
@@ -23,7 +23,7 @@
extern "C" {
#endif
-#define TIME_IS_VAR_DURATION(_t) ((_t) == 'n' || (_t) == 'y' || (_t) == 'N' || (_t) == 'Y')
+#define IS_CALENDAR_TIME_DURATION(_t) ((_t) == 'n' || (_t) == 'y' || (_t) == 'N' || (_t) == 'Y')
#define TIME_UNIT_NANOSECOND 'b'
#define TIME_UNIT_MICROSECOND 'u'
@@ -74,7 +74,7 @@ static FORCE_INLINE int64_t taosGetTimestampToday(int32_t precision) {
int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision);
-int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision);
+int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval);
int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char unit, int32_t precision);
int32_t parseAbsoluteDuration(const char* token, int32_t tokenlen, int64_t* ts, char* unit, int32_t timePrecision);
diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h
index 8e6c014be997285bf0e129ab41dace03c2131909..5410e9af887043930fc039a4c5de3466e4198437 100644
--- a/include/common/ttokendef.h
+++ b/include/common/ttokendef.h
@@ -354,6 +354,7 @@
#define TK_WAL 336
+
#define TK_NK_SPACE 600
#define TK_NK_COMMENT 601
#define TK_NK_ILLEGAL 602
diff --git a/include/libs/executor/dataSinkMgt.h b/include/libs/executor/dataSinkMgt.h
index ce7d038d42c8f37bec730111d6d26d946c47a168..0a9037d21cff5e87e5f0da3b0948651e291ff781 100644
--- a/include/libs/executor/dataSinkMgt.h
+++ b/include/libs/executor/dataSinkMgt.h
@@ -59,7 +59,7 @@ typedef struct SDataSinkMgtCfg {
uint32_t maxDataBlockNumPerQuery;
} SDataSinkMgtCfg;
-int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg);
+int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI);
typedef struct SInputData {
const struct SSDataBlock* pData;
diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h
index 1fb00e743fb33f35b2a2dd4344b2f81214832c2d..3f53976c67834285593ec45e4cfb98001f14ea52 100644
--- a/include/libs/executor/executor.h
+++ b/include/libs/executor/executor.h
@@ -23,6 +23,7 @@ extern "C" {
#include "query.h"
#include "tcommon.h"
#include "tmsgcb.h"
+#include "storageapi.h"
typedef void* qTaskInfo_t;
typedef void* DataSinkHandle;
@@ -41,7 +42,6 @@ typedef struct {
typedef struct {
void* tqReader;
- void* meta;
void* config;
void* vnode;
void* mnd;
@@ -51,10 +51,10 @@ typedef struct {
bool initTableReader;
bool initTqReader;
int32_t numOfVgroups;
+ void* sContext; // SSnapContext*
- void* sContext; // SSnapContext*
-
- void* pStateBackend;
+ void* pStateBackend;
+ struct SStorageAPI api;
} SReadHandle;
// in queue mode, data streams are seperated by msg
@@ -82,6 +82,8 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int32_t vgId, int32_t* numOfCols,
uint64_t id);
+int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList, void* pTaskInfo);
+
/**
* set the task Id, usually used by message queue process
* @param tinfo
@@ -90,6 +92,8 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int3
*/
void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId);
+//void qSetTaskCode(qTaskInfo_t tinfo, int32_t code);
+
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo);
// todo refactor
@@ -186,7 +190,17 @@ int32_t qSerializeTaskStatus(qTaskInfo_t tinfo, char** pOutput, int32_t* len);
int32_t qDeserializeTaskStatus(qTaskInfo_t tinfo, const char* pInput, int32_t len);
-STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key);
+void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order);
+void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery);
+STimeWindow getAlignQueryTimeWindow(const SInterval* pInterval, int64_t key);
+/**
+ * return the scan info, in the form of tuple of two items, including table uid and current timestamp
+ * @param tinfo
+ * @param uid
+ * @param ts
+ * @return
+ */
+int32_t qGetStreamScanStatus(qTaskInfo_t tinfo, uint64_t* uid, int64_t* ts);
SArray* qGetQueriedTableListInfo(qTaskInfo_t tinfo);
diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h
new file mode 100644
index 0000000000000000000000000000000000000000..e2944d13dadf12391fd256846ab23cf7fb11e3fc
--- /dev/null
+++ b/include/libs/executor/storageapi.h
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#ifndef TDENGINE_STORAGEAPI_H
+#define TDENGINE_STORAGEAPI_H
+
+#include "function.h"
+#include "index.h"
+#include "taosdef.h"
+#include "tcommon.h"
+#include "tmsg.h"
+#include "tscalablebf.h"
+#include "tsimplehash.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define TIMEWINDOW_RANGE_CONTAINED 1
+#define TIMEWINDOW_RANGE_EXTERNAL 2
+
+#define CACHESCAN_RETRIEVE_TYPE_ALL 0x1
+#define CACHESCAN_RETRIEVE_TYPE_SINGLE 0x2
+#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
+#define CACHESCAN_RETRIEVE_LAST 0x8
+
+#define META_READER_NOLOCK 0x1
+
+typedef struct SMeta SMeta;
+typedef TSKEY (*GetTsFun)(void*);
+
+typedef struct SMetaEntry {
+ int64_t version;
+ int8_t type;
+ int8_t flags; // TODO: need refactor?
+ tb_uid_t uid;
+ char* name;
+ union {
+ struct {
+ SSchemaWrapper schemaRow;
+ SSchemaWrapper schemaTag;
+ SRSmaParam rsmaParam;
+ } stbEntry;
+ struct {
+ int64_t ctime;
+ int32_t ttlDays;
+ int32_t commentLen;
+ char* comment;
+ tb_uid_t suid;
+ uint8_t* pTags;
+ } ctbEntry;
+ struct {
+ int64_t ctime;
+ int32_t ttlDays;
+ int32_t commentLen;
+ char* comment;
+ int32_t ncid; // next column id
+ SSchemaWrapper schemaRow;
+ } ntbEntry;
+ struct {
+ STSma* tsma;
+ } smaEntry;
+ };
+
+ uint8_t* pBuf;
+} SMetaEntry;
+
+typedef struct SMetaReader {
+ int32_t flags;
+ void* pMeta;
+ SDecoder coder;
+ SMetaEntry me;
+ void* pBuf;
+ int32_t szBuf;
+ struct SStoreMeta* pAPI;
+} SMetaReader;
+
+typedef struct SMTbCursor {
+ void* pMeta;
+ void* pDbc;
+ void* pKey;
+ void* pVal;
+ int32_t kLen;
+ int32_t vLen;
+ SMetaReader mr;
+ int8_t paused;
+} SMTbCursor;
+
+typedef struct SRowBuffPos {
+ void* pRowBuff;
+ void* pKey;
+ bool beFlushed;
+ bool beUsed;
+} SRowBuffPos;
+
+// tq
+typedef struct SMetaTableInfo {
+ int64_t suid;
+ int64_t uid;
+ SSchemaWrapper* schema;
+ char tbName[TSDB_TABLE_NAME_LEN];
+} SMetaTableInfo;
+
+typedef struct SSnapContext {
+ SMeta* pMeta; // todo remove it
+ int64_t snapVersion;
+ void* pCur;
+ int64_t suid;
+ int8_t subType;
+ SHashObj* idVersion;
+ SHashObj* suidInfo;
+ SArray* idList;
+ int32_t index;
+ bool withMeta;
+ bool queryMeta; // true-get meta, false-get data
+} SSnapContext;
+
+typedef struct {
+ int64_t uid;
+ int64_t ctbNum;
+} SMetaStbStats;
+
+// void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
+// int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
+// int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
+// int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
+// bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
+// bool tqCurrentBlockConsumed(const STqReader* pReader);
+// int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
+// bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
+// bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
+// int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t
+// *uid); SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx); int32_t setForSnapShot(SSnapContext
+// *ctx, int64_t uid); int32_t destroySnapContext(SSnapContext *ctx);
+
+// clang-format off
+/*-------------------------------------------------new api format---------------------------------------------------*/
+typedef struct TsdReader {
+ int32_t (*tsdReaderOpen)(void* pVnode, SQueryTableDataCond* pCond, void* pTableList, int32_t numOfTables,
+ SSDataBlock* pResBlock, void** ppReader, const char* idstr, bool countOnly,
+ SHashObj** pIgnoreTables);
+ void (*tsdReaderClose)();
+ void (*tsdSetReaderTaskId)(void *pReader, const char *pId);
+ int32_t (*tsdSetQueryTableList)();
+ int32_t (*tsdNextDataBlock)();
+
+ int32_t (*tsdReaderRetrieveBlockSMAInfo)();
+ SSDataBlock *(*tsdReaderRetrieveDataBlock)();
+
+ void (*tsdReaderReleaseDataBlock)();
+
+ int32_t (*tsdReaderResetStatus)();
+ int32_t (*tsdReaderGetDataBlockDistInfo)();
+ int64_t (*tsdReaderGetNumOfInMemRows)();
+ void (*tsdReaderNotifyClosing)();
+} TsdReader;
+
+typedef struct SStoreCacheReader {
+ int32_t (*openReader)(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
+ SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr);
+ void *(*closeReader)(void *pReader);
+ int32_t (*retrieveRows)(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
+ SArray *pTableUidList);
+ int32_t (*reuseReader)(void *pReader, void *pTableIdList, int32_t numOfTables);
+} SStoreCacheReader;
+
+// clang-format on
+
+/*------------------------------------------------------------------------------------------------------------------*/
+/*
+void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
+int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
+int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
+int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
+bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
+bool tqCurrentBlockConsumed(const STqReader* pReader);
+
+int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
+bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
+bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
+
+ int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char* idstr);
+STqReader *tqReaderOpen(void *pVnode);
+void tqReaderClose(STqReader *);
+
+int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
+bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
+SWalReader* tqGetWalReader(STqReader* pReader);
+int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
+*/
+// todo rename
+typedef struct SStoreTqReader {
+ struct STqReader* (*tqReaderOpen)();
+ void (*tqReaderClose)();
+
+ int32_t (*tqReaderSeek)();
+ int32_t (*tqRetrieveBlock)();
+ bool (*tqReaderNextBlockInWal)();
+ bool (*tqNextBlockImpl)(); // todo remove it
+ SSDataBlock* (*tqGetResultBlock)();
+
+ void (*tqReaderSetColIdList)();
+ int32_t (*tqReaderSetQueryTableList)();
+
+ int32_t (*tqReaderAddTables)();
+ int32_t (*tqReaderRemoveTables)();
+
+ bool (*tqReaderIsQueriedTable)();
+ bool (*tqReaderCurrentBlockConsumed)();
+
+ struct SWalReader* (*tqReaderGetWalReader)(); // todo remove it
+ int32_t (*tqReaderRetrieveTaosXBlock)(); // todo remove it
+
+ int32_t (*tqReaderSetSubmitMsg)(); // todo remove it
+ bool (*tqReaderNextBlockFilterOut)();
+} SStoreTqReader;
+
+typedef struct SStoreSnapshotFn {
+ int32_t (*createSnapshot)(SSnapContext* ctx, int64_t uid);
+ int32_t (*destroySnapshot)(SSnapContext* ctx);
+ SMetaTableInfo (*getMetaTableInfoFromSnapshot)(SSnapContext* ctx);
+ int32_t (*getTableInfoFromSnapshot)(SSnapContext* ctx, void** pBuf, int32_t* contLen, int16_t* type, int64_t* uid);
+} SStoreSnapshotFn;
+
+/**
+void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
+void metaReaderReleaseLock(SMetaReader *pReader);
+void metaReaderClear(SMetaReader *pReader);
+int32_t metaReaderGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
+int32_t metaReaderGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid);
+int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList);
+const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
+int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
+
+int metaGetTableUidByName(void *meta, char *tbName, uint64_t *uid);
+int metaGetTableTypeByName(void *meta, char *tbName, ETableType *tbType);
+bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
+int32_t metaGetCachedTableUidList(SMeta *pMeta, tb_uid_t suid, const uint8_t *key, int32_t keyLen, SArray *pList,
+ bool *acquired);
+int32_t metaUidFilterCachePut(SMeta *pMeta, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
+ int32_t payloadLen, double selectivityRatio);
+tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name);
+int32_t metaGetCachedTbGroup(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
+int32_t metaPutTbGroupToCache(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload, int32_t
+payloadLen);
+ */
+
+typedef struct SStoreMeta {
+ SMTbCursor* (*openTableMetaCursor)(void* pVnode); // metaOpenTbCursor
+ void (*closeTableMetaCursor)(SMTbCursor* pTbCur); // metaCloseTbCursor
+ void (*pauseTableMetaCursor)(SMTbCursor* pTbCur); // metaPauseTbCursor
+ void (*resumeTableMetaCursor)(SMTbCursor* pTbCur, int8_t first); // metaResumeTbCursor
+ int32_t (*cursorNext)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorNext
+ int32_t (*cursorPrev)(SMTbCursor* pTbCur, ETableType jumpTableType); // metaTbCursorPrev
+
+ int32_t (*getTableTags)(void* pVnode, uint64_t suid, SArray* uidList);
+ int32_t (*getTableTagsByUid)(void* pVnode, int64_t suid, SArray* uidList);
+ const void* (*extractTagVal)(const void* tag, int16_t type, STagVal* tagVal); // todo remove it
+
+ int32_t (*getTableUidByName)(void* pVnode, char* tbName, uint64_t* uid);
+ int32_t (*getTableTypeByName)(void* pVnode, char* tbName, ETableType* tbType);
+ int32_t (*getTableNameByUid)(void* pVnode, uint64_t uid, char* tbName);
+ bool (*isTableExisted)(void* pVnode, tb_uid_t uid);
+
+ int32_t (*metaGetCachedTbGroup)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
+ int32_t (*metaPutTbGroupToCache)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
+ int32_t payloadLen);
+
+ int32_t (*getCachedTableList)(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
+ bool* acquireRes);
+ int32_t (*putCachedTableList)(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
+ int32_t payloadLen, double selectivityRatio);
+
+ void* (*storeGetIndexInfo)();
+ void* (*getInvertIndex)(void* pVnode);
+ int32_t (*getChildTableList)(
+ void* pVnode, int64_t suid,
+ SArray* list); // support filter and non-filter cases. [vnodeGetCtbIdList & vnodeGetCtbIdListByFilter]
+ int32_t (*storeGetTableList)(void* pVnode, int8_t type, SArray* pList); // vnodeGetStbIdList & vnodeGetAllTableList
+ void* storeGetVersionRange;
+ void* storeGetLastTimestamp;
+
+ int32_t (*getTableSchema)(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid); // tsdbGetTableSchema
+
+ // db name, vgId, numOfTables, numOfSTables
+ int32_t (*getNumOfChildTables)(
+ void* pVnode, int64_t uid,
+ int64_t* numOfTables); // int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
+ void (*getBasicInfo)(void* pVnode, const char** dbname, int32_t* vgId, int64_t* numOfTables,
+ int64_t* numOfNormalTables); // vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId) &
+ // metaGetTbNum(SMeta *pMeta) & metaGetNtbNum(SMeta *pMeta);
+
+ int64_t (*getNumOfRowsInMem)(void* pVnode);
+ /**
+int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
+int32_t vnodeGetCtbIdListByFilter(void *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
+int32_t vnodeGetStbIdList(void *pVnode, int64_t suid, SArray *list);
+ */
+} SStoreMeta;
+
+typedef struct SStoreMetaReader {
+ void (*initReader)(SMetaReader* pReader, void* pVnode, int32_t flags, SStoreMeta* pAPI);
+ void (*clearReader)(SMetaReader* pReader);
+ void (*readerReleaseLock)(SMetaReader* pReader);
+ int32_t (*getTableEntryByUid)(SMetaReader* pReader, tb_uid_t uid);
+ int32_t (*getTableEntryByName)(SMetaReader* pReader, const char* name);
+ int32_t (*getEntryGetUidCache)(SMetaReader* pReader, tb_uid_t uid);
+} SStoreMetaReader;
+
+typedef struct SUpdateInfo {
+ SArray* pTsBuckets;
+ uint64_t numBuckets;
+ SArray* pTsSBFs;
+ uint64_t numSBFs;
+ int64_t interval;
+ int64_t watermark;
+ TSKEY minTS;
+ SScalableBf* pCloseWinSBF;
+ SHashObj* pMap;
+ uint64_t maxDataVersion;
+} SUpdateInfo;
+
+typedef struct {
+ void* iter; // rocksdb_iterator_t* iter;
+ void* snapshot; // rocksdb_snapshot_t* snapshot;
+ void* readOpt; // rocksdb_readoptions_t* readOpt;
+ void* db; // rocksdb_t* db;
+ void* pCur;
+ int64_t number;
+} SStreamStateCur;
+
+typedef struct SStateStore {
+ int32_t (*streamStatePutParName)(SStreamState* pState, int64_t groupId, const char* tbname);
+ int32_t (*streamStateGetParName)(SStreamState* pState, int64_t groupId, void** pVal);
+
+ int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+ int32_t (*streamStateReleaseBuf)(SStreamState* pState, const SWinKey* key, void* pVal);
+ void (*streamStateFreeVal)(void* val);
+
+ int32_t (*streamStatePut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
+ int32_t (*streamStateGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+ bool (*streamStateCheck)(SStreamState* pState, const SWinKey* key);
+ int32_t (*streamStateGetByPos)(SStreamState* pState, void* pos, void** pVal);
+ int32_t (*streamStateDel)(SStreamState* pState, const SWinKey* key);
+ int32_t (*streamStateClear)(SStreamState* pState);
+ void (*streamStateSetNumber)(SStreamState* pState, int32_t number);
+ int32_t (*streamStateSaveInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void* pVal, int32_t vLen);
+ int32_t (*streamStateGetInfo)(SStreamState* pState, void* pKey, int32_t keyLen, void** pVal, int32_t* pLen);
+
+ int32_t (*streamStateFillPut)(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
+ int32_t (*streamStateFillGet)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
+ int32_t (*streamStateFillDel)(SStreamState* pState, const SWinKey* key);
+
+ int32_t (*streamStateCurNext)(SStreamState* pState, SStreamStateCur* pCur);
+ int32_t (*streamStateCurPrev)(SStreamState* pState, SStreamStateCur* pCur);
+
+ SStreamStateCur* (*streamStateGetAndCheckCur)(SStreamState* pState, SWinKey* key);
+ SStreamStateCur* (*streamStateSeekKeyNext)(SStreamState* pState, const SWinKey* key);
+ SStreamStateCur* (*streamStateFillSeekKeyNext)(SStreamState* pState, const SWinKey* key);
+ SStreamStateCur* (*streamStateFillSeekKeyPrev)(SStreamState* pState, const SWinKey* key);
+ void (*streamStateFreeCur)(SStreamStateCur* pCur);
+
+ int32_t (*streamStateGetGroupKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
+ int32_t (*streamStateGetKVByCur)(SStreamStateCur* pCur, SWinKey* pKey, const void** pVal, int32_t* pVLen);
+
+ int32_t (*streamStateSessionAddIfNotExist)(SStreamState* pState, SSessionKey* key, TSKEY gap, void** pVal,
+ int32_t* pVLen);
+ int32_t (*streamStateSessionPut)(SStreamState* pState, const SSessionKey* key, const void* value, int32_t vLen);
+ int32_t (*streamStateSessionGet)(SStreamState* pState, SSessionKey* key, void** pVal, int32_t* pVLen);
+ int32_t (*streamStateSessionDel)(SStreamState* pState, const SSessionKey* key);
+ int32_t (*streamStateSessionClear)(SStreamState* pState);
+ int32_t (*streamStateSessionGetKVByCur)(SStreamStateCur* pCur, SSessionKey* pKey, void** pVal, int32_t* pVLen);
+ int32_t (*streamStateStateAddIfNotExist)(SStreamState* pState, SSessionKey* key, char* pKeyData, int32_t keyDataLen,
+ state_key_cmpr_fn fn, void** pVal, int32_t* pVLen);
+ int32_t (*streamStateSessionGetKeyByRange)(SStreamState* pState, const SSessionKey* range, SSessionKey* curKey);
+
+ SUpdateInfo* (*updateInfoInit)(int64_t interval, int32_t precision, int64_t watermark);
+ TSKEY (*updateInfoFillBlockData)(SUpdateInfo* pInfo, SSDataBlock* pBlock, int32_t primaryTsCol);
+ bool (*updateInfoIsUpdated)(SUpdateInfo* pInfo, uint64_t tableId, TSKEY ts);
+ bool (*updateInfoIsTableInserted)(SUpdateInfo* pInfo, int64_t tbUid);
+ void (*updateInfoDestroy)(SUpdateInfo* pInfo);
+
+ SUpdateInfo* (*updateInfoInitP)(SInterval* pInterval, int64_t watermark);
+ void (*updateInfoAddCloseWindowSBF)(SUpdateInfo* pInfo);
+ void (*updateInfoDestoryColseWinSBF)(SUpdateInfo* pInfo);
+ int32_t (*updateInfoSerialize)(void* buf, int32_t bufLen, const SUpdateInfo* pInfo);
+ int32_t (*updateInfoDeserialize)(void* buf, int32_t bufLen, SUpdateInfo* pInfo);
+
+ SStreamStateCur* (*streamStateSessionSeekKeyNext)(SStreamState* pState, const SSessionKey* key);
+ SStreamStateCur* (*streamStateSessionSeekKeyCurrentPrev)(SStreamState* pState, const SSessionKey* key);
+ SStreamStateCur* (*streamStateSessionSeekKeyCurrentNext)(SStreamState* pState, const SSessionKey* key);
+
+ struct SStreamFileState* (*streamFileStateInit)(int64_t memSize, uint32_t keySize, uint32_t rowSize,
+ uint32_t selectRowSize, GetTsFun fp, void* pFile, TSKEY delMark);
+
+ void (*streamFileStateDestroy)(struct SStreamFileState* pFileState);
+ void (*streamFileStateClear)(struct SStreamFileState* pFileState);
+ bool (*needClearDiskBuff)(struct SStreamFileState* pFileState);
+
+ SStreamState* (*streamStateOpen)(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
+ void (*streamStateClose)(SStreamState* pState, bool remove);
+ int32_t (*streamStateBegin)(SStreamState* pState);
+ int32_t (*streamStateCommit)(SStreamState* pState);
+ void (*streamStateDestroy)(SStreamState* pState, bool remove);
+ int32_t (*streamStateDeleteCheckPoint)(SStreamState* pState, TSKEY mark);
+} SStateStore;
+
+typedef struct SStorageAPI {
+ SStoreMeta metaFn; // todo: refactor
+ TsdReader tsdReader;
+ SStoreMetaReader metaReaderFn;
+ SStoreCacheReader cacheFn;
+ SStoreSnapshotFn snapshotFn;
+ SStoreTqReader tqReaderFn;
+ SStateStore stateStore;
+ SMetaDataFilterAPI metaFilter;
+ SFunctionStateStore functionStore;
+} SStorageAPI;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // TDENGINE_STORAGEAPI_H
diff --git a/include/libs/function/function.h b/include/libs/function/function.h
index 85f7cf7e2c1e2d8de3970029ed9bffe9fb4c0563..e015f4182eb159508ba1be141df0c6807db2fcef 100644
--- a/include/libs/function/function.h
+++ b/include/libs/function/function.h
@@ -21,6 +21,7 @@ extern "C" {
#endif
#include "tcommon.h"
+#include "tsimplehash.h"
#include "tvariant.h"
struct SqlFunctionCtx;
@@ -76,7 +77,7 @@ enum {
enum {
MAIN_SCAN = 0x0u,
REVERSE_SCAN = 0x1u, // todo remove it
- PRE_SCAN = 0x2u, // pre-scan belongs to the main scan and occurs before main scan
+ PRE_SCAN = 0x2u, // pre-scan belongs to the main scan and occurs before main scan
};
typedef struct SPoint1 {
@@ -127,16 +128,58 @@ typedef struct SSerializeDataHandle {
void *pState;
} SSerializeDataHandle;
+// incremental state storage
+typedef struct STdbState {
+ void *rocksdb;
+ void **pHandle;
+ void *writeOpts;
+ void *readOpts;
+ void **cfOpts;
+ void *dbOpt;
+ struct SStreamTask *pOwner;
+ void *param;
+ void *env;
+ SListNode *pComparNode;
+ void *pBackend;
+ char idstr[64];
+ void *compactFactory;
+ TdThreadRwlock rwLock;
+
+ void *db;
+ void *pStateDb;
+ void *pFuncStateDb;
+ void *pFillStateDb; // todo refactor
+ void *pSessionStateDb;
+ void *pParNameDb;
+ void *pParTagDb;
+ void *txn;
+} STdbState;
+
+typedef struct {
+ STdbState *pTdbState;
+ struct SStreamFileState *pFileState;
+ int32_t number;
+ SSHashObj *parNameMap;
+ int64_t checkPointId;
+ int32_t taskId;
+ int64_t streamId;
+} SStreamState;
+
+typedef struct SFunctionStateStore {
+ int32_t (*streamStateFuncPut)(SStreamState *pState, const SWinKey *key, const void *value, int32_t vLen);
+ int32_t (*streamStateFuncGet)(SStreamState *pState, const SWinKey *key, void **ppVal, int32_t *pVLen);
+} SFunctionStateStore;
+
// sql function runtime context
typedef struct SqlFunctionCtx {
SInputColumnInfoData input;
SResultDataInfo resDataInfo;
- uint32_t order; // data block scanner order: asc|desc
- uint8_t isPseudoFunc;// denote current function is pseudo function or not [added for perf reason]
- uint8_t isNotNullFunc;// not return null value.
- uint8_t scanFlag; // record current running step, default: 0
- int16_t functionId; // function id
- char *pOutput; // final result output buffer, point to sdata->data
+ uint32_t order; // data block scanner order: asc|desc
+ uint8_t isPseudoFunc; // denote current function is pseudo function or not [added for perf reason]
+ uint8_t isNotNullFunc; // not return null value.
+ uint8_t scanFlag; // record current running step, default: 0
+ int16_t functionId; // function id
+ char *pOutput; // final result output buffer, point to sdata->data
// input parameter, e.g., top(k, 20), the number of results of top query is kept in param
SFunctParam *param;
// corresponding output buffer for timestamp of each result, e.g., diff/csum
@@ -155,6 +198,7 @@ typedef struct SqlFunctionCtx {
SSerializeDataHandle saveHandle;
int32_t exprIdx;
char *udfName;
+ SFunctionStateStore *pStore;
} SqlFunctionCtx;
typedef struct tExprNode {
diff --git a/include/libs/index/index.h b/include/libs/index/index.h
index 0d31ca2f683ff52b440a131ddf7602b11e53233b..cfcc9993cffb54942fe43b492b1a84f63f9a9d37 100644
--- a/include/libs/index/index.h
+++ b/include/libs/index/index.h
@@ -212,13 +212,38 @@ typedef struct SIndexMetaArg {
void* idx;
void* ivtIdx;
uint64_t suid;
+ int (*metaFilterFunc)(void* metaEx, void* param, SArray* result);
} SIndexMetaArg;
+/**
+ * the underlying storage module must implement this API to employ the index functions.
+ * @param pMeta
+ * @param param
+ * @param results
+ * @return
+ */
+typedef struct SMetaFltParam {
+ uint64_t suid;
+ int16_t cid;
+ int16_t type;
+ void *val;
+ bool reverse;
+ bool equal;
+ int (*filterFunc)(void *a, void *b, int16_t type);
+} SMetaFltParam;
+
+typedef struct SMetaDataFilterAPI {
+ int32_t (*metaFilterTableIds)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
+ int32_t (*metaFilterCreateTime)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
+ int32_t (*metaFilterTableName)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
+ int32_t (*metaFilterTtl)(void *pVnode, SMetaFltParam *arg, SArray *pUids);
+} SMetaDataFilterAPI;
+
typedef enum { SFLT_NOT_INDEX, SFLT_COARSE_INDEX, SFLT_ACCURATE_INDEX } SIdxFltStatus;
-SIdxFltStatus idxGetFltStatus(SNode* pFilterNode);
+SIdxFltStatus idxGetFltStatus(SNode* pFilterNode, SMetaDataFilterAPI* pAPI);
-int32_t doFilterTag(SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result, SIdxFltStatus* status);
+int32_t doFilterTag(SNode* pFilterNode, SIndexMetaArg* metaArg, SArray* result, SIdxFltStatus* status, SMetaDataFilterAPI* pAPI);
/*
* init index env
diff --git a/include/libs/nodes/cmdnodes.h b/include/libs/nodes/cmdnodes.h
index 9e0cac066db12d16976759f126e3ec7801810cae..c8ce9634f5474f9209f2417ff96c8094cbdfeb43 100644
--- a/include/libs/nodes/cmdnodes.h
+++ b/include/libs/nodes/cmdnodes.h
@@ -364,6 +364,7 @@ typedef struct SCreateTopicStmt {
bool ignoreExists;
bool withMeta;
SNode* pQuery;
+ SNode* pWhere;
} SCreateTopicStmt;
typedef struct SDropTopicStmt {
diff --git a/include/libs/nodes/nodes.h b/include/libs/nodes/nodes.h
index 4002999104e7e3a98fb0b44f997b0ba76b19b5b8..2319643b09f6a93084a527f5f5d6ceb02e6b891e 100644
--- a/include/libs/nodes/nodes.h
+++ b/include/libs/nodes/nodes.h
@@ -328,6 +328,8 @@ void nodesListInsertList(SNodeList* pTarget, SListCell* pPos, SNodeList* p
SNode* nodesListGetNode(SNodeList* pList, int32_t index);
SListCell* nodesListGetCell(SNodeList* pList, int32_t index);
void nodesDestroyList(SNodeList* pList);
+bool nodesListMatch(const SNodeList* pList, const SNodeList* pSubList);
+
// Only clear the linked list structure, without releasing the elements inside
void nodesClearList(SNodeList* pList);
@@ -346,6 +348,7 @@ void nodesRewriteExprPostOrder(SNode** pNode, FNodeRewriter rewriter, void* pCon
void nodesRewriteExprsPostOrder(SNodeList* pList, FNodeRewriter rewriter, void* pContext);
bool nodesEqualNode(const SNode* a, const SNode* b);
+bool nodesMatchNode(const SNode* pSub, const SNode* pNode);
SNode* nodesCloneNode(const SNode* pNode);
SNodeList* nodesCloneList(const SNodeList* pList);
diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h
index 197a5ecaf9b1e13af8d19dfde550f04c83183711..02459ed9511c19f3d76152450b65b433f315333e 100644
--- a/include/libs/nodes/plannodes.h
+++ b/include/libs/nodes/plannodes.h
@@ -112,6 +112,7 @@ typedef struct SJoinLogicNode {
SNode* pOnConditions;
bool isSingleTableJoin;
EOrder inputTsOrder;
+ SNode* pColEqualOnConditions;
} SJoinLogicNode;
typedef struct SAggLogicNode {
@@ -406,6 +407,7 @@ typedef struct SSortMergeJoinPhysiNode {
SNode* pOnConditions;
SNodeList* pTargets;
EOrder inputTsOrder;
+ SNode* pColEqualOnConditions;
} SSortMergeJoinPhysiNode;
typedef struct SAggPhysiNode {
@@ -448,7 +450,7 @@ typedef struct SMergePhysiNode {
bool ignoreGroupId;
} SMergePhysiNode;
-typedef struct SWinodwPhysiNode {
+typedef struct SWindowPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of parameter expression of function
SNodeList* pFuncs;
@@ -461,10 +463,10 @@ typedef struct SWinodwPhysiNode {
EOrder inputTsOrder;
EOrder outputTsOrder;
bool mergeDataBlock;
-} SWinodwPhysiNode;
+} SWindowPhysiNode;
typedef struct SIntervalPhysiNode {
- SWinodwPhysiNode window;
+ SWindowPhysiNode window;
int64_t interval;
int64_t offset;
int64_t sliding;
@@ -497,7 +499,7 @@ typedef struct SMultiTableIntervalPhysiNode {
} SMultiTableIntervalPhysiNode;
typedef struct SSessionWinodwPhysiNode {
- SWinodwPhysiNode window;
+ SWindowPhysiNode window;
int64_t gap;
} SSessionWinodwPhysiNode;
@@ -506,14 +508,14 @@ typedef SSessionWinodwPhysiNode SStreamSemiSessionWinodwPhysiNode;
typedef SSessionWinodwPhysiNode SStreamFinalSessionWinodwPhysiNode;
typedef struct SStateWinodwPhysiNode {
- SWinodwPhysiNode window;
+ SWindowPhysiNode window;
SNode* pStateKey;
} SStateWinodwPhysiNode;
typedef SStateWinodwPhysiNode SStreamStateWinodwPhysiNode;
typedef struct SEventWinodwPhysiNode {
- SWinodwPhysiNode window;
+ SWindowPhysiNode window;
SNode* pStartCond;
SNode* pEndCond;
} SEventWinodwPhysiNode;
diff --git a/include/libs/nodes/querynodes.h b/include/libs/nodes/querynodes.h
index 9569cfe055fee6c715b3dc921518cc15feb439ad..12890571f994e52147fd8050b4f074ba4d66f905 100644
--- a/include/libs/nodes/querynodes.h
+++ b/include/libs/nodes/querynodes.h
@@ -241,6 +241,12 @@ typedef enum EFillMode {
FILL_MODE_NEXT
} EFillMode;
+typedef enum ETimeLineMode {
+ TIME_LINE_NONE = 1,
+ TIME_LINE_MULTI,
+ TIME_LINE_GLOBAL,
+} ETimeLineMode;
+
typedef struct SFillNode {
ENodeType type; // QUERY_NODE_FILL
EFillMode mode;
@@ -263,50 +269,50 @@ typedef struct SCaseWhenNode {
} SCaseWhenNode;
typedef struct SSelectStmt {
- ENodeType type; // QUERY_NODE_SELECT_STMT
- bool isDistinct;
- SNodeList* pProjectionList;
- SNode* pFromTable;
- SNode* pWhere;
- SNodeList* pPartitionByList;
- SNodeList* pTags; // for create stream
- SNode* pSubtable; // for create stream
- SNode* pWindow;
- SNodeList* pGroupByList; // SGroupingSetNode
- SNode* pHaving;
- SNode* pRange;
- SNode* pEvery;
- SNode* pFill;
- SNodeList* pOrderByList; // SOrderByExprNode
- SLimitNode* pLimit;
- SLimitNode* pSlimit;
- STimeWindow timeRange;
- char stmtName[TSDB_TABLE_NAME_LEN];
- uint8_t precision;
- int32_t selectFuncNum;
- int32_t returnRows; // EFuncReturnRows
- bool isEmptyResult;
- bool isTimeLineResult;
- bool isSubquery;
- bool hasAggFuncs;
- bool hasRepeatScanFuncs;
- bool hasIndefiniteRowsFunc;
- bool hasMultiRowsFunc;
- bool hasSelectFunc;
- bool hasSelectValFunc;
- bool hasOtherVectorFunc;
- bool hasUniqueFunc;
- bool hasTailFunc;
- bool hasInterpFunc;
- bool hasInterpPseudoColFunc;
- bool hasLastRowFunc;
- bool hasLastFunc;
- bool hasTimeLineFunc;
- bool hasUdaf;
- bool hasStateKey;
- bool onlyHasKeepOrderFunc;
- bool groupSort;
- bool tagScan;
+ ENodeType type; // QUERY_NODE_SELECT_STMT
+ bool isDistinct;
+ SNodeList* pProjectionList;
+ SNode* pFromTable;
+ SNode* pWhere;
+ SNodeList* pPartitionByList;
+ SNodeList* pTags; // for create stream
+ SNode* pSubtable; // for create stream
+ SNode* pWindow;
+ SNodeList* pGroupByList; // SGroupingSetNode
+ SNode* pHaving;
+ SNode* pRange;
+ SNode* pEvery;
+ SNode* pFill;
+ SNodeList* pOrderByList; // SOrderByExprNode
+ SLimitNode* pLimit;
+ SLimitNode* pSlimit;
+ STimeWindow timeRange;
+ char stmtName[TSDB_TABLE_NAME_LEN];
+ uint8_t precision;
+ int32_t selectFuncNum;
+ int32_t returnRows; // EFuncReturnRows
+ ETimeLineMode timeLineResMode;
+ bool isEmptyResult;
+ bool isSubquery;
+ bool hasAggFuncs;
+ bool hasRepeatScanFuncs;
+ bool hasIndefiniteRowsFunc;
+ bool hasMultiRowsFunc;
+ bool hasSelectFunc;
+ bool hasSelectValFunc;
+ bool hasOtherVectorFunc;
+ bool hasUniqueFunc;
+ bool hasTailFunc;
+ bool hasInterpFunc;
+ bool hasInterpPseudoColFunc;
+ bool hasLastRowFunc;
+ bool hasLastFunc;
+ bool hasTimeLineFunc;
+ bool hasUdaf;
+ bool hasStateKey;
+ bool onlyHasKeepOrderFunc;
+ bool groupSort;
+ bool tagScan;
} SSelectStmt;
typedef enum ESetOperatorType { SET_OP_TYPE_UNION_ALL = 1, SET_OP_TYPE_UNION } ESetOperatorType;
@@ -321,6 +327,7 @@ typedef struct SSetOperator {
SNode* pLimit;
char stmtName[TSDB_TABLE_NAME_LEN];
uint8_t precision;
+ ETimeLineMode timeLineResMode;
} SSetOperator;
typedef enum ESqlClause {
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index e4282c3f348f3bfd7ebc458a93f0c56b5b611779..de3eba599da0ab436e0f3f2a166357b3c0b629ba 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -51,6 +51,12 @@ typedef enum {
TARGET_TYPE_OTHER,
} ETargetType;
+typedef enum {
+ TCOL_TYPE_COLUMN = 1,
+ TCOL_TYPE_TAG,
+ TCOL_TYPE_NONE,
+} ETableColumnType;
+
#define QUERY_POLICY_VNODE 1
#define QUERY_POLICY_HYBRID 2
#define QUERY_POLICY_QNODE 3
@@ -253,6 +259,7 @@ void destroyQueryExecRes(SExecResult* pRes);
int32_t dataConverToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len);
char* parseTagDatatoJson(void* p);
int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst);
+void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType* pType);
int32_t cloneDbVgInfo(SDBVgInfo* pSrc, SDBVgInfo** pDst);
int32_t cloneSVreateTbReq(SVCreateTbReq* pSrc, SVCreateTbReq** pDst);
void freeVgInfo(SDBVgInfo* vgInfo);
diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h
index 1cc61ec07298c6856822b4e8ea277cc54c7a3113..7f9d20a9dd878892e512b170921bdb1794defc52 100644
--- a/include/libs/stream/streamState.h
+++ b/include/libs/stream/streamState.h
@@ -27,65 +27,63 @@
extern "C" {
#endif
+#include "storageapi.h"
+
// void* streamBackendInit(const char* path);
// void streamBackendCleanup(void* arg);
// SListNode* streamBackendAddCompare(void* backend, void* arg);
// void streamBackendDelCompare(void* backend, void* arg);
-typedef bool (*state_key_cmpr_fn)(void* pKey1, void* pKey2);
-
-typedef struct STdbState {
- rocksdb_t* rocksdb;
- rocksdb_column_family_handle_t** pHandle;
- rocksdb_writeoptions_t* writeOpts;
- rocksdb_readoptions_t* readOpts;
- rocksdb_options_t** cfOpts;
- rocksdb_options_t* dbOpt;
- struct SStreamTask* pOwner;
- void* param;
- void* env;
- SListNode* pComparNode;
- void* pBackendHandle;
- char idstr[64];
- void* compactFactory;
-
- TDB* db;
- TTB* pStateDb;
- TTB* pFuncStateDb;
- TTB* pFillStateDb; // todo refactor
- TTB* pSessionStateDb;
- TTB* pParNameDb;
- TTB* pParTagDb;
- TXN* txn;
-} STdbState;
-
-// incremental state storage
-typedef struct {
- STdbState* pTdbState;
- SStreamFileState* pFileState;
- int32_t number;
- SSHashObj* parNameMap;
- int64_t checkPointId;
- int32_t taskId;
- int64_t streamId;
-} SStreamState;
-
-SStreamState* streamStateOpen(char* path, struct SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages);
+
+// <<<<<<< HEAD
+// typedef struct STdbState {
+// rocksdb_t* rocksdb;
+// rocksdb_column_family_handle_t** pHandle;
+// rocksdb_writeoptions_t* writeOpts;
+// rocksdb_readoptions_t* readOpts;
+// rocksdb_options_t** cfOpts;
+// rocksdb_options_t* dbOpt;
+// struct SStreamTask* pOwner;
+// void* param;
+// void* env;
+// SListNode* pComparNode;
+// void* pBackend;
+// char idstr[64];
+// void* compactFactory;
+// TdThreadRwlock rwLock;
+// =======
+// typedef struct STdbState {
+// rocksdb_t* rocksdb;
+// rocksdb_column_family_handle_t** pHandle;
+// rocksdb_writeoptions_t* writeOpts;
+// rocksdb_readoptions_t* readOpts;
+// rocksdb_options_t** cfOpts;
+// rocksdb_options_t* dbOpt;
+// struct SStreamTask* pOwner;
+// void* param;
+// void* env;
+// SListNode* pComparNode;
+// void* pBackendHandle;
+// char idstr[64];
+// void* compactFactory;
+//
+// TDB* db;
+// TTB* pStateDb;
+// TTB* pFuncStateDb;
+// TTB* pFillStateDb; // todo refactor
+// TTB* pSessionStateDb;
+// TTB* pParNameDb;
+// TTB* pParTagDb;
+// TXN* txn;
+//} STdbState;
+//>>>>>>> enh/dev3.0
+
+SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages);
void streamStateClose(SStreamState* pState, bool remove);
int32_t streamStateBegin(SStreamState* pState);
int32_t streamStateCommit(SStreamState* pState);
void streamStateDestroy(SStreamState* pState, bool remove);
int32_t streamStateDeleteCheckPoint(SStreamState* pState, TSKEY mark);
-typedef struct {
- rocksdb_iterator_t* iter;
- rocksdb_snapshot_t* snapshot;
- rocksdb_readoptions_t* readOpt;
- rocksdb_t* db;
-
- TBC* pCur;
- int64_t number;
-} SStreamStateCur;
-
int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen);
int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen);
@@ -119,7 +117,7 @@ int32_t streamStateFillDel(SStreamState* pState, const SWinKey* key);
int32_t streamStateAddIfNotExist(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen);
int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pVal);
-void streamFreeVal(void* val);
+void streamStateFreeVal(void* val);
SStreamStateCur* streamStateGetAndCheckCur(SStreamState* pState, SWinKey* key);
SStreamStateCur* streamStateSeekKeyNext(SStreamState* pState, const SWinKey* key);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index 8e7dd0bb0d0dca1f8465cc07eb5f7d9695fed267..51f2de481d120cfb6a044565fb0a482297e8660a 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -78,11 +78,11 @@ enum {
TASK_TRIGGER_STATUS__ACTIVE,
};
-enum {
+typedef enum {
TASK_LEVEL__SOURCE = 1,
TASK_LEVEL__AGG,
TASK_LEVEL__SINK,
-};
+} ETASK_LEVEL;
enum {
TASK_OUTPUT__FIXED_DISPATCH = 1,
@@ -206,7 +206,7 @@ static FORCE_INLINE void streamQueueProcessFail(SStreamQueue* queue) {
void* streamQueueNextItem(SStreamQueue* queue);
SStreamDataSubmit* streamDataSubmitNew(SPackedData* pData, int32_t type);
-void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
+void streamDataSubmitDestroy(SStreamDataSubmit* pDataSubmit);
SStreamDataSubmit* streamSubmitBlockClone(SStreamDataSubmit* pSubmit);
@@ -284,13 +284,13 @@ struct SStreamTask {
int16_t dispatchMsgType;
SStreamStatus status;
int32_t selfChildId;
- int32_t nodeId;
+ int32_t nodeId; // vgroup id
SEpSet epSet;
SCheckpointInfo chkInfo;
STaskExec exec;
-
- // fill history
- int8_t fillHistory;
+ int8_t fillHistory; // fill history
+ int64_t ekey; // end ts key
+ int64_t endVer; // end version
// children info
SArray* childEpInfo; // SArray
@@ -327,6 +327,7 @@ struct SStreamTask {
int64_t checkpointingId;
int32_t checkpointAlignCnt;
struct SStreamMeta* pMeta;
+ SSHashObj* pNameMap;
};
// meta
@@ -346,12 +347,14 @@ typedef struct SStreamMeta {
void* streamBackend;
int32_t streamBackendId;
int64_t streamBackendRid;
+ SHashObj* pTaskBackendUnique;
} SStreamMeta;
int32_t tEncodeStreamEpInfo(SEncoder* pEncoder, const SStreamChildEpInfo* pInfo);
int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo);
-SStreamTask* tNewStreamTask(int64_t streamId);
+SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam,
+ SArray* pTaskList);
int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask);
int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask);
void tFreeStreamTask(SStreamTask* pTask);
diff --git a/include/libs/stream/tstreamFileState.h b/include/libs/stream/tstreamFileState.h
index 7124e2d2517c10de4b837f1dcfbc686988e9760c..0dbacf6c9f1a52eac62966206ad0b95d52c0620f 100644
--- a/include/libs/stream/tstreamFileState.h
+++ b/include/libs/stream/tstreamFileState.h
@@ -21,23 +21,16 @@
#include "tarray.h"
#include "tdef.h"
#include "tlist.h"
+#include "storageapi.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct SStreamFileState SStreamFileState;
-typedef struct SRowBuffPos {
- void* pRowBuff;
- void* pKey;
- bool beFlushed;
- bool beUsed;
-} SRowBuffPos;
typedef SList SStreamSnapshot;
-typedef TSKEY (*GetTsFun)(void*);
-
SStreamFileState* streamFileStateInit(int64_t memSize, uint32_t keySize, uint32_t rowSize, uint32_t selectRowSize,
GetTsFun fp, void* pFile, TSKEY delMark);
void streamFileStateDestroy(SStreamFileState* pFileState);
diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h
index 4678aa0bd9630beff4eb53111dbe5221325d1ac9..bd5a3be8de638005a5e85e999d3888702903eb75 100644
--- a/include/libs/stream/tstreamUpdate.h
+++ b/include/libs/stream/tstreamUpdate.h
@@ -19,7 +19,7 @@
#include "tarray.h"
#include "tcommon.h"
#include "tmsg.h"
-#include "tscalablebf.h"
+#include "storageapi.h"
#ifdef __cplusplus
extern "C" {
@@ -30,18 +30,18 @@ typedef struct SUpdateKey {
TSKEY ts;
} SUpdateKey;
-typedef struct SUpdateInfo {
- SArray *pTsBuckets;
- uint64_t numBuckets;
- SArray *pTsSBFs;
- uint64_t numSBFs;
- int64_t interval;
- int64_t watermark;
- TSKEY minTS;
- SScalableBf *pCloseWinSBF;
- SHashObj *pMap;
- uint64_t maxDataVersion;
-} SUpdateInfo;
+//typedef struct SUpdateInfo {
+// SArray *pTsBuckets;
+// uint64_t numBuckets;
+// SArray *pTsSBFs;
+// uint64_t numSBFs;
+// int64_t interval;
+// int64_t watermark;
+// TSKEY minTS;
+// SScalableBf *pCloseWinSBF;
+// SHashObj *pMap;
+// uint64_t maxDataVersion;
+//} SUpdateInfo;
SUpdateInfo *updateInfoInitP(SInterval *pInterval, int64_t watermark);
SUpdateInfo *updateInfoInit(int64_t interval, int32_t precision, int64_t watermark);
diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h
index 35a6838b2e0f36ba36ccd41c4e8dd175e2efe7c9..1aa08ff8026f0efef2e0096b1d2112ce56e3f7fb 100644
--- a/include/libs/wal/wal.h
+++ b/include/libs/wal/wal.h
@@ -149,6 +149,7 @@ struct SWalReader {
TdFilePtr pIdxFile;
int64_t curFileFirstVer;
int64_t curVersion;
+ int64_t skipToVersion; // skip data and jump to destination version, usually used by stream resume ignoring untreated data
int64_t capacity;
TdThreadMutex mutex;
SWalFilterCond cond;
@@ -200,6 +201,8 @@ int32_t walReaderSeekVer(SWalReader *pRead, int64_t ver);
int32_t walNextValidMsg(SWalReader *pRead);
int64_t walReaderGetCurrentVer(const SWalReader *pReader);
int64_t walReaderGetValidFirstVer(const SWalReader *pReader);
+int64_t walReaderGetSkipToVersion(SWalReader *pReader);
+void walReaderSetSkipToVersion(SWalReader *pReader, int64_t ver);
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever);
void walReaderVerifyOffset(SWalReader *pWalReader, STqOffsetVal* pOffset);
@@ -209,7 +212,8 @@ int32_t walFetchHead(SWalReader *pRead, int64_t ver, SWalCkHead *pHead);
int32_t walFetchBody(SWalReader *pRead, SWalCkHead **ppHead);
int32_t walSkipFetchBody(SWalReader *pRead, const SWalCkHead *pHead);
-SWalRef *walRefFirstVer(SWal *, SWalRef *);
+void walRefFirstVer(SWal *, SWalRef *);
+void walRefLastVer(SWal *, SWalRef *);
SWalRef *walRefCommittedVer(SWal *);
SWalRef *walOpenRef(SWal *);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 636a5e63a7bba84855550dabb2c97c01676d37da..9e5229870e05cae1c1f7cea1c6d8a4f57c92033d 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -409,6 +409,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_MNODE_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0413) // internal
#define TSDB_CODE_MNODE_ONLY_TWO_MNODE TAOS_DEF_ERROR_CODE(0, 0x0414) // internal
#define TSDB_CODE_MNODE_NO_NEED_RESTORE TAOS_DEF_ERROR_CODE(0, 0x0415) // internal
+#define TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE TAOS_DEF_ERROR_CODE(0, 0x0416)
// vnode
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
diff --git a/include/util/tdef.h b/include/util/tdef.h
index 0b0569e2d1e2b0dcb3b802dbf646dc94ba6f2b21..37eeb87fdd6dbbdfc77db8a1d7ab048f40d68007 100644
--- a/include/util/tdef.h
+++ b/include/util/tdef.h
@@ -32,7 +32,7 @@ extern "C" {
#define TD_VER_MAX UINT64_MAX // TODO: use the real max version from query handle
// Bytes for each type.
-extern const int32_t TYPE_BYTES[17];
+extern const int32_t TYPE_BYTES[21];
// TODO: replace and remove code below
#define CHAR_BYTES sizeof(char)
diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst
index 904a946e2092fdef4325cab27fd759291896348d..d6558d5b3b458cde394c5dea4622778b64017a4c 100644
--- a/packaging/deb/DEBIAN/preinst
+++ b/packaging/deb/DEBIAN/preinst
@@ -80,4 +80,5 @@ fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f ${install_main_dir}/driver/libtaos.* || :
+[ -f ${install_main_dir}/driver/librocksdb.* ] && ${csudo}rm -f ${install_main_dir}/driver/librocksdb.* || :
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || :
diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm
index 0d63115a0411e1edbd3a1474f56b865697040ccf..8f8d4728676a0559caed5491f7c3a2a6e329fdf9 100644
--- a/packaging/deb/DEBIAN/prerm
+++ b/packaging/deb/DEBIAN/prerm
@@ -40,6 +40,7 @@ else
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
[ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
+ [ -f ${lib_link_dir}/librocksdb.* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
[ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || :
${csudo}rm -f ${log_link_dir} || :
diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh
index 9f49cf345a4dd9e36f048f03bd49a28539baec66..024c69deb14d316b681fa7737a17504878f79af6 100755
--- a/packaging/deb/makedeb.sh
+++ b/packaging/deb/makedeb.sh
@@ -31,6 +31,7 @@ cd ${pkg_dir}
libfile="libtaos.so.${tdengine_ver}"
wslibfile="libtaosws.so"
+rocksdblib="librocksdb.so.8"
# create install dir
install_home_path="/usr/local/taos"
@@ -94,6 +95,7 @@ fi
cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin
cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver
+[ -f ${compile_dir}/build/lib/${rocksdblib} ] && cp ${compile_dir}/build/lib/${rocksdblib} ${pkg_dir}${install_home_path}/driver ||:
[ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||:
cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include
cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include
diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec
index 52d5335003097787a9d607ed4f020eb9153ceb31..2b056c376a01f967600380befa8cb035c39b5f6f 100644
--- a/packaging/rpm/tdengine.spec
+++ b/packaging/rpm/tdengine.spec
@@ -45,6 +45,7 @@ echo buildroot: %{buildroot}
libfile="libtaos.so.%{_version}"
wslibfile="libtaosws.so"
+rocksdblib="librocksdb.so.8"
# create install path, and cp file
mkdir -p %{buildroot}%{homepath}/bin
@@ -92,6 +93,7 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then
fi
cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver
[ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||:
+[ -f %{_compiledir}/build/lib/${rocksdblib} ] && cp %{_compiledir}/build/lib/${rocksdblib} %{buildroot}%{homepath}/driver ||:
cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include
cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include
@@ -174,6 +176,7 @@ fi
# there can not libtaos.so*, otherwise ln -s error
${csudo}rm -f %{homepath}/driver/libtaos* || :
+${csudo}rm -f %{homepath}/driver/librocksdb* || :
#Scripts executed after installation
%post
@@ -219,6 +222,7 @@ if [ $1 -eq 0 ];then
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
${csudo}rm -f ${log_link_dir} || :
${csudo}rm -f ${data_link_dir} || :
diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh
index 1b47b10520147664a3c2f3a558fed1208f84a2ca..9aa019f218a199ab5635fc070c06d1be1a0cf200 100755
--- a/packaging/tools/install.sh
+++ b/packaging/tools/install.sh
@@ -250,18 +250,30 @@ function install_lib() {
# Remove links
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
+ ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
+ ${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
#${csudo}rm -rf ${v15_java_app_dir} || :
${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/*
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
+ ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
+ ${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
+
+ ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8
+ ${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so
+
+
[ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
+ ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib64_link_dir}/librocksdb.so.8 || :
+ ${csudo}ln -sf ${lib64_link_dir}/librocksdb.so.8 ${lib64_link_dir}/librocksdb.so || :
+
[ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || :
fi
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index 2f1e803689cb877560ca7d41bcd3a16ff1264b05..ab45c684c4b5bcb75179ca366799c74949f0454c 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -70,8 +70,7 @@ if [ "$pagMode" == "lite" ]; then
taostools_bin_files=""
else
if [ "$verMode" == "cloud" ]; then
- taostools_bin_files=" ${build_dir}/bin/taosdump \
- ${build_dir}/bin/taosBenchmark"
+ taostools_bin_files=" ${build_dir}/bin/taosBenchmark"
else
wget https://github.com/taosdata/grafanaplugin/releases/latest/download/TDinsight.sh -O ${build_dir}/bin/TDinsight.sh \
&& echo "TDinsight.sh downloaded!" \
@@ -112,9 +111,11 @@ fi
if [ "$osType" == "Darwin" ]; then
lib_files="${build_dir}/lib/libtaos.${version}.dylib"
wslib_files="${build_dir}/lib/libtaosws.dylib"
+ rocksdb_lib_files="${build_dir}/lib/librocksdb.dylib.8.1.1"
else
lib_files="${build_dir}/lib/libtaos.so.${version}"
wslib_files="${build_dir}/lib/libtaosws.so"
+ rocksdb_lib_files="${build_dir}/lib/librocksdb.so.8.1.1"
fi
header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h"
@@ -337,6 +338,7 @@ fi
# Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
+[ -f ${rocksdb_lib_files} ] && cp ${rocksdb_lib_files} ${install_dir}/driver || :
# Copy connector
if [ "$verMode" == "cluster" ]; then
diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh
index fc392c9684c04ed6ce587d977ec4628df59cbe6f..10de87966fdcd3d86060eb774b05ad9e7eab9f1d 100755
--- a/packaging/tools/post.sh
+++ b/packaging/tools/post.sh
@@ -202,10 +202,19 @@ function install_lib() {
log_print "start install lib from ${lib_dir} to ${lib_link_dir}"
${csudo}rm -f ${lib_link_dir}/libtaos* || :
${csudo}rm -f ${lib64_link_dir}/libtaos* || :
+
+ #rocksdb
+ [ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
+ [ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
+
+ #rocksdb
+ [ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || :
+ [ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || :
[ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || :
[ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || :
+ ${csudo}ln -s ${lib_dir}/librocksdb.* ${lib_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
@@ -214,6 +223,7 @@ function install_lib() {
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then
${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1
${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1
+ ${csudo}ln -s ${lib_dir}/librocksdb.* ${lib64_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1
[ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path}
fi
diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh
index 6c671473bfb5046ac18e7b813088ec6d6d3843d2..a17b29983c820d68eb4e4c5ed60536321018b227 100755
--- a/packaging/tools/remove.sh
+++ b/packaging/tools/remove.sh
@@ -142,11 +142,14 @@ function clean_local_bin() {
function clean_lib() {
# Remove link
${csudo}rm -f ${lib_link_dir}/libtaos.* || :
+ ${csudo}rm -f ${lib_link_dir}/librocksdb.* || :
[ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || :
${csudo}rm -f ${lib64_link_dir}/libtaos.* || :
+ ${csudo}rm -f ${lib64_link_dir}/librocksdb.* || :
[ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || :
#${csudo}rm -rf ${v15_java_app_dir} || :
+
}
function clean_header() {
diff --git a/source/client/inc/clientSml.h b/source/client/inc/clientSml.h
index b20fc6f57a4256f07f55150711d82a6bf05175f7..c9eb95101495abda29f88e6876464560b94b849a 100644
--- a/source/client/inc/clientSml.h
+++ b/source/client/inc/clientSml.h
@@ -232,9 +232,9 @@ int smlJsonParseObjFirst(char **start, SSmlLineInfo *element, int8_t *
int smlJsonParseObj(char **start, SSmlLineInfo *element, int8_t *offset);
//SArray *smlJsonParseTags(char *start, char *end);
bool smlParseNumberOld(SSmlKv *kvVal, SSmlMsgBuf *msg);
-void* nodeListGet(NodeList* list, const void *key, int32_t len, _equal_fn_sml fn);
-int nodeListSet(NodeList** list, const void *key, int32_t len, void* value, _equal_fn_sml fn);
-int nodeListSize(NodeList* list);
+//void* nodeListGet(NodeList* list, const void *key, int32_t len, _equal_fn_sml fn);
+//int nodeListSet(NodeList** list, const void *key, int32_t len, void* value, _equal_fn_sml fn);
+//int nodeListSize(NodeList* list);
bool smlDoubleToInt64OverFlow(double num);
int32_t smlBuildInvalidDataMsg(SSmlMsgBuf *pBuf, const char *msg1, const char *msg2);
bool smlParseNumber(SSmlKv *kvVal, SSmlMsgBuf *msg);
@@ -251,7 +251,7 @@ int32_t smlClearForRerun(SSmlHandle *info);
int32_t smlParseValue(SSmlKv *pVal, SSmlMsgBuf *msg);
uint8_t smlGetTimestampLen(int64_t num);
void clearColValArray(SArray* pCols);
-void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag);
+void smlDestroyTableInfo(void *para);
void freeSSmlKv(void* data);
int32_t smlParseInfluxString(SSmlHandle *info, char *sql, char *sqlEnd, SSmlLineInfo *elements);
diff --git a/source/client/jni/com_taosdata_jdbc_tmq_TMQConnector.h b/source/client/jni/com_taosdata_jdbc_tmq_TMQConnector.h
index c035b6598c8b6290997e65a30f40cd28a11279b1..422bcd57ac1f3a5fa2cfe47eeb1b2c88427b2c20 100644
--- a/source/client/jni/com_taosdata_jdbc_tmq_TMQConnector.h
+++ b/source/client/jni/com_taosdata_jdbc_tmq_TMQConnector.h
@@ -158,6 +158,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetVgroupId(JN
*/
JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTableName(JNIEnv *, jobject, jlong);
+/*
+ * Class: com_taosdata_jdbc_tmq_TMQConnector
+ * Method: tmqGetOffset
+ * Signature: (J)Ljava/lang/String;
+ */
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetOffset(JNIEnv *, jobject, jlong);
+
/*
* Class: com_taosdata_jdbc_tmq_TMQConnector
* Method: fetchBlockImp
@@ -166,6 +173,12 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTableNam
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(JNIEnv *, jobject, jlong, jlong,
jobject, jobject);
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqSeekImp(JNIEnv *, jobject, jlong, jstring, jint,
+ jlong);
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTopicAssignmentImp(JNIEnv *, jobject, jlong,
+ jstring, jobject);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 82177302c7f09a7166ee088a5cf69f44bfd752b9..5963e419e1cd504c2d9d39fa1634e6d1a75a346a 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -1757,6 +1757,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
return TSDB_CODE_TSC_INTERNAL_ERROR;
}
+ taosMemoryFreeClear(pResultInfo->convertJson);
pResultInfo->convertJson = taosMemoryCalloc(1, dataLen);
if (pResultInfo->convertJson == NULL) return TSDB_CODE_OUT_OF_MEMORY;
char* p1 = pResultInfo->convertJson;
diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c
index 3ea471fbf8522eff8eedbe4220762189de8cb3b2..bea237d09eaa28c1c32b8e7006734992c561221d 100644
--- a/source/client/src/clientSml.c
+++ b/source/client/src/clientSml.c
@@ -24,72 +24,91 @@ int64_t smlToMilli[3] = {3600000LL, 60000LL, 1000LL};
int64_t smlFactorNS[3] = {NANOSECOND_PER_MSEC, NANOSECOND_PER_USEC, 1};
int64_t smlFactorS[3] = {1000LL, 1000000LL, 1000000000LL};
-void *nodeListGet(NodeList *list, const void *key, int32_t len, _equal_fn_sml fn) {
- NodeList *tmp = list;
- while (tmp) {
- if (fn == NULL) {
- if (tmp->data.used && tmp->data.keyLen == len && memcmp(tmp->data.key, key, len) == 0) {
- return tmp->data.value;
- }
- } else {
- if (tmp->data.used && fn(tmp->data.key, key) == 0) {
- return tmp->data.value;
- }
- }
-
- tmp = tmp->next;
+//void *nodeListGet(NodeList *list, const void *key, int32_t len, _equal_fn_sml fn) {
+// NodeList *tmp = list;
+// while (tmp) {
+// if (fn == NULL) {
+// if (tmp->data.used && tmp->data.keyLen == len && memcmp(tmp->data.key, key, len) == 0) {
+// return tmp->data.value;
+// }
+// } else {
+// if (tmp->data.used && fn(tmp->data.key, key) == 0) {
+// return tmp->data.value;
+// }
+// }
+//
+// tmp = tmp->next;
+// }
+// return NULL;
+//}
+//
+//int nodeListSet(NodeList **list, const void *key, int32_t len, void *value, _equal_fn_sml fn) {
+// NodeList *tmp = *list;
+// while (tmp) {
+// if (!tmp->data.used) break;
+// if (fn == NULL) {
+// if (tmp->data.keyLen == len && memcmp(tmp->data.key, key, len) == 0) {
+// return -1;
+// }
+// } else {
+// if (tmp->data.keyLen == len && fn(tmp->data.key, key) == 0) {
+// return -1;
+// }
+// }
+//
+// tmp = tmp->next;
+// }
+// if (tmp) {
+// tmp->data.key = key;
+// tmp->data.keyLen = len;
+// tmp->data.value = value;
+// tmp->data.used = true;
+// } else {
+// NodeList *newNode = (NodeList *)taosMemoryCalloc(1, sizeof(NodeList));
+// if (newNode == NULL) {
+// return -1;
+// }
+// newNode->data.key = key;
+// newNode->data.keyLen = len;
+// newNode->data.value = value;
+// newNode->data.used = true;
+// newNode->next = *list;
+// *list = newNode;
+// }
+// return 0;
+//}
+//
+//int nodeListSize(NodeList *list) {
+// int cnt = 0;
+// while (list) {
+// if (list->data.used)
+// cnt++;
+// else
+// break;
+// list = list->next;
+// }
+// return cnt;
+//}
+
+static int32_t smlCheckAuth(SSmlHandle *info, SRequestConnInfo* conn, const char* pTabName, AUTH_TYPE type){
+ SUserAuthInfo pAuth = {0};
+ snprintf(pAuth.user, sizeof(pAuth.user), "%s", info->taos->user);
+ if (NULL == pTabName) {
+ tNameSetDbName(&pAuth.tbName, info->taos->acctId, info->pRequest->pDb, strlen(info->pRequest->pDb));
+ } else {
+ toName(info->taos->acctId, info->pRequest->pDb, pTabName, &pAuth.tbName);
}
- return NULL;
-}
+ pAuth.type = type;
-int nodeListSet(NodeList **list, const void *key, int32_t len, void *value, _equal_fn_sml fn) {
- NodeList *tmp = *list;
- while (tmp) {
- if (!tmp->data.used) break;
- if (fn == NULL) {
- if (tmp->data.keyLen == len && memcmp(tmp->data.key, key, len) == 0) {
- return -1;
- }
- } else {
- if (tmp->data.keyLen == len && fn(tmp->data.key, key) == 0) {
- return -1;
- }
- }
+ int32_t code = TSDB_CODE_SUCCESS;
+ SUserAuthRes authRes = {0};
- tmp = tmp->next;
- }
- if (tmp) {
- tmp->data.key = key;
- tmp->data.keyLen = len;
- tmp->data.value = value;
- tmp->data.used = true;
- } else {
- NodeList *newNode = (NodeList *)taosMemoryCalloc(1, sizeof(NodeList));
- if (newNode == NULL) {
- return -1;
- }
- newNode->data.key = key;
- newNode->data.keyLen = len;
- newNode->data.value = value;
- newNode->data.used = true;
- newNode->next = *list;
- *list = newNode;
- }
- return 0;
-}
+ code = catalogChkAuth(info->pCatalog, conn, &pAuth, &authRes);
-int nodeListSize(NodeList *list) {
- int cnt = 0;
- while (list) {
- if (list->data.used)
- cnt++;
- else
- break;
- list = list->next;
- }
- return cnt;
-}
+ return (code == TSDB_CODE_SUCCESS) ? (authRes.pass ? TSDB_CODE_SUCCESS : TSDB_CODE_PAR_PERMISSION_DENIED) : code;
+
+}
inline bool smlDoubleToInt64OverFlow(double num) {
if (num >= (double)INT64_MAX || num <= (double)INT64_MIN) return true;
return false;
@@ -211,6 +230,16 @@ void getTableUid(SSmlHandle *info, SSmlLineInfo *currElement, SSmlTableInfo *tin
}
}
+static void smlDestroySTableMeta(void *para) {
+ SSmlSTableMeta *meta = *(SSmlSTableMeta**)para;
+ taosHashCleanup(meta->tagHash);
+ taosHashCleanup(meta->colHash);
+ taosArrayDestroy(meta->tags);
+ taosArrayDestroy(meta->cols);
+ taosMemoryFreeClear(meta->tableMeta);
+ taosMemoryFree(meta);
+}
+
SSmlSTableMeta *smlBuildSTableMeta(bool isDataFormat) {
SSmlSTableMeta *meta = (SSmlSTableMeta *)taosMemoryCalloc(sizeof(SSmlSTableMeta), 1);
if (!meta) {
@@ -245,7 +274,7 @@ SSmlSTableMeta *smlBuildSTableMeta(bool isDataFormat) {
return meta;
cleanup:
- taosMemoryFree(meta);
+ smlDestroySTableMeta(meta);
return NULL;
}
@@ -813,6 +842,10 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = catalogGetSTableMeta(info->pCatalog, &conn, &pName, &pTableMeta);
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST || code == TSDB_CODE_MND_STB_NOT_EXIST) {
+ code = smlCheckAuth(info, &conn, NULL, AUTH_TYPE_WRITE);
+ if(code != TSDB_CODE_SUCCESS){
+ goto end;
+ }
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas create table:%s", info->id, pName.tname);
SArray *pColumns = taosArrayInit(taosArrayGetSize(sTableData->cols), sizeof(SField));
SArray *pTags = taosArrayInit(taosArrayGetSize(sTableData->tags), sizeof(SField));
@@ -857,6 +890,10 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
goto end;
}
if (action != SCHEMA_ACTION_NULL) {
+ code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
+ if(code != TSDB_CODE_SUCCESS){
+ goto end;
+ }
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas change table tag, table:%s, action:%d", info->id, pName.tname,
action);
SArray *pColumns =
@@ -927,6 +964,10 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
goto end;
}
if (action != SCHEMA_ACTION_NULL) {
+ code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
+ if(code != TSDB_CODE_SUCCESS){
+ goto end;
+ }
uDebug("SML:0x%" PRIx64 " smlModifyDBSchemas change table col, table:%s, action:%d", info->id, pName.tname,
action);
SArray *pColumns =
@@ -1004,6 +1045,7 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
}
}
+ taosMemoryFreeClear(sTableData->tableMeta);
sTableData->tableMeta = pTableMeta;
uDebug("SML:0x%" PRIx64 "modify schema uid:%" PRIu64 ", sversion:%d, tversion:%d", info->id, pTableMeta->uid,
pTableMeta->sversion, pTableMeta->tversion) tmp = (SSmlSTableMeta **)taosHashIterate(info->superTables, tmp);
@@ -1062,15 +1104,6 @@ static void smlInsertMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols) {
}
}
-static void smlDestroySTableMeta(SSmlSTableMeta *meta) {
- taosHashCleanup(meta->tagHash);
- taosHashCleanup(meta->colHash);
- taosArrayDestroy(meta->tags);
- taosArrayDestroy(meta->cols);
- taosMemoryFree(meta->tableMeta);
- taosMemoryFree(meta);
-}
-
static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols, bool isTag, SSmlMsgBuf *msg) {
for (int i = 0; i < taosArrayGetSize(cols); ++i) {
SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i);
@@ -1110,7 +1143,8 @@ static int32_t smlUpdateMeta(SHashObj *metaHash, SArray *metaArray, SArray *cols
return TSDB_CODE_SUCCESS;
}
-void smlDestroyTableInfo(SSmlHandle *info, SSmlTableInfo *tag) {
+void smlDestroyTableInfo(void *para) {
+ SSmlTableInfo *tag = *(SSmlTableInfo**)para;
for (size_t i = 0; i < taosArrayGetSize(tag->cols); i++) {
SHashObj *kvHash = (SHashObj *)taosArrayGetP(tag->cols, i);
taosHashCleanup(kvHash);
@@ -1147,18 +1181,18 @@ void smlDestroyInfo(SSmlHandle *info) {
qDestroyQuery(info->pQuery);
// destroy info->childTables
- SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, NULL);
- while (oneTable) {
- smlDestroyTableInfo(info, *oneTable);
- oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable);
- }
+// SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, NULL);
+// while (oneTable) {
+// smlDestroyTableInfo(oneTable);
+// oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable);
+// }
// destroy info->superTables
- SSmlSTableMeta **oneSTable = (SSmlSTableMeta **)taosHashIterate(info->superTables, NULL);
- while (oneSTable) {
- smlDestroySTableMeta(*oneSTable);
- oneSTable = (SSmlSTableMeta **)taosHashIterate(info->superTables, oneSTable);
- }
+// SSmlSTableMeta **oneSTable = (SSmlSTableMeta **)taosHashIterate(info->superTables, NULL);
+// while (oneSTable) {
+// smlDestroySTableMeta(*oneSTable);
+// oneSTable = (SSmlSTableMeta **)taosHashIterate(info->superTables, oneSTable);
+// }
// destroy info->pVgHash
taosHashCleanup(info->pVgHash);
@@ -1217,6 +1251,8 @@ SSmlHandle *smlBuildSmlInfo(TAOS *taos) {
info->childTables = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
info->tableUids = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
info->superTables = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
+ taosHashSetFreeFp(info->superTables, smlDestroySTableMeta);
+ taosHashSetFreeFp(info->childTables, smlDestroyTableInfo);
info->id = smlGenId();
info->pQuery = smlInitHandle();
@@ -1323,6 +1359,9 @@ static int32_t smlParseLineBottom(SSmlHandle *info) {
uDebug("SML:0x%" PRIx64 " smlParseLineBottom add meta, format:%d, linenum:%d", info->id, info->dataFormat,
info->lineNum);
SSmlSTableMeta *meta = smlBuildSTableMeta(info->dataFormat);
+ if(meta == NULL){
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES);
terrno = 0;
smlInsertMeta(meta->tagHash, meta->tags, tinfo->tags);
@@ -1367,6 +1406,11 @@ static int32_t smlInsertData(SSmlHandle *info) {
conn.requestObjRefId = info->pRequest->self;
conn.mgmtEps = getEpSet_s(&info->taos->pAppInfo->mgmtEp);
+ code = smlCheckAuth(info, &conn, pName.tname, AUTH_TYPE_WRITE);
+ if(code != TSDB_CODE_SUCCESS){
+ return code;
+ }
+
SVgroupInfo vg;
code = catalogGetTableHashVgroup(info->pCatalog, &conn, &pName, &vg);
if (code != TSDB_CODE_SUCCESS) {
@@ -1437,18 +1481,18 @@ static void smlPrintStatisticInfo(SSmlHandle *info) {
int32_t smlClearForRerun(SSmlHandle *info) {
info->reRun = false;
// clear info->childTables
- SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, NULL);
- while (oneTable) {
- smlDestroyTableInfo(info, *oneTable);
- oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable);
- }
+// SSmlTableInfo **oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, NULL);
+// while (oneTable) {
+// smlDestroyTableInfo(info, *oneTable);
+// oneTable = (SSmlTableInfo **)taosHashIterate(info->childTables, oneTable);
+// }
// clear info->superTables
- SSmlSTableMeta **oneSTable = (SSmlSTableMeta **)taosHashIterate(info->superTables, NULL);
- while (oneSTable) {
- smlDestroySTableMeta(*oneSTable);
- oneSTable = (SSmlSTableMeta **)taosHashIterate(info->superTables, oneSTable);
- }
+// SSmlSTableMeta **oneSTable = (SSmlSTableMeta **)taosHashIterate(info->superTables, NULL);
+// while (oneSTable) {
+// smlDestroySTableMeta(*oneSTable);
+// oneSTable = (SSmlSTableMeta **)taosHashIterate(info->superTables, oneSTable);
+// }
taosHashClear(info->childTables);
taosHashClear(info->superTables);
@@ -1586,9 +1630,7 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
do {
code = smlModifyDBSchemas(info);
- if (code == 0 || code == TSDB_CODE_SML_INVALID_DATA || code == TSDB_CODE_PAR_TOO_MANY_COLUMNS
- || code == TSDB_CODE_PAR_INVALID_TAGS_NUM || code == TSDB_CODE_PAR_INVALID_TAGS_LENGTH
- || code == TSDB_CODE_PAR_INVALID_ROW_LENGTH || code == TSDB_CODE_MND_FIELD_VALUE_OVERFLOW) {
+ if (code != TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER && code != TSDB_CODE_SDB_OBJ_CREATING && code != TSDB_CODE_MND_TRANS_CONFLICT) {
break;
}
taosMsleep(100);
diff --git a/source/client/src/clientSmlJson.c b/source/client/src/clientSmlJson.c
index 7ccf9309648583cd9610a6375fc717297fc7352d..0f59505f8c02c64543ebb674c8ad86ae996a3f0f 100644
--- a/source/client/src/clientSmlJson.c
+++ b/source/client/src/clientSmlJson.c
@@ -695,6 +695,10 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
+ if(sMeta == NULL){
+ taosMemoryFreeClear(pTableMeta);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
sMeta->tableMeta = pTableMeta;
taosHashPut(info->superTables, elements->measure, elements->measureLen, &sMeta, POINTER_BYTES);
for(int i = pTableMeta->tableInfo.numOfColumns; i < pTableMeta->tableInfo.numOfTags + pTableMeta->tableInfo.numOfColumns; i++){
@@ -784,7 +788,7 @@ static int32_t smlParseTagsFromJSON(SSmlHandle *info, cJSON *tags, SSmlLineInfo
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
if (tinfo->tableDataCtx == NULL) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
- smlDestroyTableInfo(info, tinfo);
+ smlDestroyTableInfo(&tinfo);
return TSDB_CODE_SML_INVALID_DATA;
}
}
@@ -1048,12 +1052,18 @@ static int32_t smlParseJSONExt(SSmlHandle *info, char *payload) {
return TSDB_CODE_TSC_INVALID_JSON;
}
- info->lineNum = payloadNum;
- info->dataFormat = true;
+
if (unlikely(info->lines != NULL)) {
+ for (int i = 0; i < info->lineNum; i++) {
+ taosArrayDestroyEx(info->lines[i].colArray, freeSSmlKv);
+ if (info->lines[i].measureTagsLen != 0) taosMemoryFree(info->lines[i].measureTag);
+ }
taosMemoryFree(info->lines);
info->lines = NULL;
}
+ info->lineNum = payloadNum;
+ info->dataFormat = true;
+
ret = smlClearForRerun(info);
if (ret != TSDB_CODE_SUCCESS) {
return ret;
diff --git a/source/client/src/clientSmlLine.c b/source/client/src/clientSmlLine.c
index e79093398e87275a2eabc9bb382c8987a41f1703..40e014458e815a68de4dccac310ee1ac9442bcaf 100644
--- a/source/client/src/clientSmlLine.c
+++ b/source/client/src/clientSmlLine.c
@@ -168,6 +168,10 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
+ if(sMeta == NULL){
+ taosMemoryFreeClear(pTableMeta);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
sMeta->tableMeta = pTableMeta;
taosHashPut(info->superTables, currElement->measure, currElement->measureLen, &sMeta, POINTER_BYTES);
for (int i = pTableMeta->tableInfo.numOfColumns;
@@ -326,7 +330,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
info->currSTableMeta->uid = tinfo->uid;
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
if (tinfo->tableDataCtx == NULL) {
- smlDestroyTableInfo(info, tinfo);
+ smlDestroyTableInfo(&tinfo);
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
return TSDB_CODE_SML_INVALID_DATA;
}
@@ -372,6 +376,10 @@ static int32_t smlParseColKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
return TSDB_CODE_SUCCESS;
}
*tmp = smlBuildSTableMeta(info->dataFormat);
+ if(*tmp == NULL){
+ taosMemoryFreeClear(pTableMeta);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
(*tmp)->tableMeta = pTableMeta;
taosHashPut(info->superTables, currElement->measure, currElement->measureLen, tmp, POINTER_BYTES);
diff --git a/source/client/src/clientSmlTelnet.c b/source/client/src/clientSmlTelnet.c
index 42b8001e5981a781fdf341754dbf07618d342855..b3f45a3107a239ba9f4b38161fd77a0635ca2879 100644
--- a/source/client/src/clientSmlTelnet.c
+++ b/source/client/src/clientSmlTelnet.c
@@ -91,6 +91,10 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
return TSDB_CODE_SUCCESS;
}
sMeta = smlBuildSTableMeta(info->dataFormat);
+ if(sMeta == NULL){
+ taosMemoryFreeClear(pTableMeta);
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
sMeta->tableMeta = pTableMeta;
taosHashPut(info->superTables, elements->measure, elements->measureLen, &sMeta, POINTER_BYTES);
for(int i = pTableMeta->tableInfo.numOfColumns; i < pTableMeta->tableInfo.numOfTags + pTableMeta->tableInfo.numOfColumns; i++){
@@ -212,7 +216,7 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
tinfo->tableDataCtx = smlInitTableDataCtx(info->pQuery, info->currSTableMeta);
if (tinfo->tableDataCtx == NULL) {
smlBuildInvalidDataMsg(&info->msgBuf, "smlInitTableDataCtx error", NULL);
- smlDestroyTableInfo(info, tinfo);
+ smlDestroyTableInfo(&tinfo);
return TSDB_CODE_SML_INVALID_DATA;
}
}
diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c
index aff5846092348f5ec7e77938e7a0f11c764affe4..e1b2b9c48b28ebb658befc73792f8d289aef5c51 100644
--- a/source/client/src/clientTmq.c
+++ b/source/client/src/clientTmq.c
@@ -27,6 +27,8 @@
#define EMPTY_BLOCK_POLL_IDLE_DURATION 10
#define DEFAULT_AUTO_COMMIT_INTERVAL 5000
+#define OFFSET_IS_RESET_OFFSET(_of) ((_of) < 0)
+
typedef void (*__tmq_askep_fn_t)(tmq_t* pTmq, int32_t code, SDataBuf* pBuf, void* pParam);
struct SMqMgmt {
@@ -87,6 +89,7 @@ struct tmq_t {
void* commitCbUserParam;
// status
+ SRWLatch lock;
int8_t status;
int32_t epoch;
#if 0
@@ -146,10 +149,10 @@ typedef struct {
SVgOffsetInfo offsetInfo;
int32_t vgId;
int32_t vgStatus;
- int32_t vgSkipCnt; // here used to mark the slow vgroups
- bool receivedInfoFromVnode;// has already received info from vnode
- int64_t emptyBlockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data
- bool seekUpdated; // offset is updated by seek operator, therefore, not update by vnode rsp.
+ int32_t vgSkipCnt; // here used to mark the slow vgroups
+ bool receivedInfoFromVnode; // has already received info from vnode
+ int64_t emptyBlockReceiveTs; // once empty block is received, idle for ignoreCnt then start to poll data
+ bool seekUpdated; // offset is updated by seek operator, therefore, not update by vnode rsp.
SEpSet epSet;
} SMqClientVg;
@@ -164,6 +167,7 @@ typedef struct {
int8_t tmqRspType;
int32_t epoch; // epoch can be used to guard the vgHandle
int32_t vgId;
+ char topicName[TSDB_TOPIC_FNAME_LEN];
SMqClientVg* vgHandle;
SMqClientTopic* topicHandle;
uint64_t reqId;
@@ -176,8 +180,8 @@ typedef struct {
} SMqPollRspWrapper;
typedef struct {
- int64_t refId;
- int32_t epoch;
+// int64_t refId;
+// int32_t epoch;
tsem_t rspSem;
int32_t rspErr;
} SMqSubscribeCbParam;
@@ -192,8 +196,9 @@ typedef struct {
typedef struct {
int64_t refId;
int32_t epoch;
- SMqClientVg* pVg;
- SMqClientTopic* pTopic;
+ char topicName[TSDB_TOPIC_FNAME_LEN];
+// SMqClientVg* pVg;
+// SMqClientTopic* pTopic;
int32_t vgId;
uint64_t requestId; // request id for debug purpose
} SMqPollCbParam;
@@ -741,8 +746,8 @@ static void generateTimedTask(int64_t refId, int32_t type) {
*pTaskType = type;
taosWriteQitem(tmq->delayedTask, pTaskType);
tsem_post(&tmq->rspSem);
+ taosReleaseRef(tmqMgmt.rsetId, refId);
}
- taosReleaseRef(tmqMgmt.rsetId, refId);
}
void tmqAssignAskEpTask(void* param, void* tmrId) {
@@ -1069,6 +1074,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
pTmq->commitCb = conf->commitCb;
pTmq->commitCbUserParam = conf->commitCbUserParam;
pTmq->resetOffsetCfg = conf->resetOffset;
+ taosInitRWLatch(&pTmq->lock);
pTmq->hbBgEnable = conf->hbBgEnable;
@@ -1117,7 +1123,7 @@ _failed:
}
int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
- const int32_t MAX_RETRY_COUNT = 120 * 4; // let's wait for 4 mins at most
+ const int32_t MAX_RETRY_COUNT = 120 * 60; // let's wait for 2 mins at most
const SArray* container = &topic_list->container;
int32_t sz = taosArrayGetSize(container);
void* buf = NULL;
@@ -1170,7 +1176,7 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
goto FAIL;
}
- SMqSubscribeCbParam param = { .rspErr = 0, .refId = tmq->refId, .epoch = tmq->epoch };
+ SMqSubscribeCbParam param = { .rspErr = 0};
if (tsem_init(¶m.rspSem, 0, 0) != 0) {
code = TSDB_CODE_TSC_INTERNAL_ERROR;
goto FAIL;
@@ -1204,8 +1210,8 @@ int32_t tmq_subscribe(tmq_t* tmq, const tmq_list_t* topic_list) {
int32_t retryCnt = 0;
while (TSDB_CODE_MND_CONSUMER_NOT_READY == doAskEp(tmq)) {
if (retryCnt++ > MAX_RETRY_COUNT) {
- tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, max retry reached:%d", tmq->consumerId, retryCnt);
- code = TSDB_CODE_TSC_INTERNAL_ERROR;
+ tscError("consumer:0x%" PRIx64 ", mnd not ready for subscribe, retry:%d in 500ms", tmq->consumerId, retryCnt);
+ code = TSDB_CODE_MND_CONSUMER_NOT_READY;
goto FAIL;
}
@@ -1240,12 +1246,40 @@ void tmq_conf_set_auto_commit_cb(tmq_conf_t* conf, tmq_commit_cb* cb, void* para
conf->commitCbUserParam = param;
}
-static int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
+static SMqClientVg* getVgInfo(tmq_t* tmq, char* topicName, int32_t vgId){
+ int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
+ for(int i = 0; i < topicNumCur; i++){
+ SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i);
+ if(strcmp(pTopicCur->topicName, topicName) == 0){
+ int32_t vgNumCur = taosArrayGetSize(pTopicCur->vgs);
+ for (int32_t j = 0; j < vgNumCur; j++) {
+ SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, j);
+ if(pVgCur->vgId == vgId){
+ return pVgCur;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+static SMqClientTopic* getTopicInfo(tmq_t* tmq, char* topicName){
+ int32_t topicNumCur = taosArrayGetSize(tmq->clientTopics);
+ for(int i = 0; i < topicNumCur; i++){
+ SMqClientTopic* pTopicCur = taosArrayGet(tmq->clientTopics, i);
+ if(strcmp(pTopicCur->topicName, topicName) == 0){
+ return pTopicCur;
+ }
+ }
+ return NULL;
+}
+
+int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
SMqPollCbParam* pParam = (SMqPollCbParam*)param;
int64_t refId = pParam->refId;
- SMqClientVg* pVg = pParam->pVg;
- SMqClientTopic* pTopic = pParam->pTopic;
+// SMqClientVg* pVg = pParam->pVg;
+// SMqClientTopic* pTopic = pParam->pTopic;
tmq_t* tmq = taosAcquireRef(tmqMgmt.rsetId, refId);
if (tmq == NULL) {
@@ -1260,15 +1294,13 @@ static int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
int32_t vgId = pParam->vgId;
uint64_t requestId = pParam->requestId;
- taosMemoryFree(pParam);
-
if (code != 0) {
if (pMsg->pData) taosMemoryFree(pMsg->pData);
if (pMsg->pEpSet) taosMemoryFree(pMsg->pEpSet);
// in case of consumer mismatch, wait for 500ms and retry
if (code == TSDB_CODE_TMQ_CONSUMER_MISMATCH) {
- taosMsleep(500);
+// taosMsleep(500);
atomic_store_8(&tmq->status, TMQ_CONSUMER_STATUS__RECOVER);
tscDebug("consumer:0x%" PRIx64 " wait for the re-balance, wait for 500ms and set status to be RECOVER",
tmq->consumerId);
@@ -1282,8 +1314,8 @@ static int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
pRspWrapper->tmqRspType = TMQ_MSG_TYPE__END_RSP;
taosWriteQitem(tmq->mqueue, pRspWrapper);
- } else if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) { // poll data while insert
- taosMsleep(500);
+// } else if (code == TSDB_CODE_WAL_LOG_NOT_EXIST) { // poll data while insert
+// taosMsleep(5);
} else{
tscError("consumer:0x%" PRIx64 " msg from vgId:%d discarded, epoch %d, since %s, reqId:0x%" PRIx64, tmq->consumerId,
vgId, epoch, tstrerror(code), requestId);
@@ -1305,6 +1337,8 @@ static int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
+ taosMemoryFree(pParam);
+
return 0;
}
@@ -1326,11 +1360,12 @@ static int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
}
pRspWrapper->tmqRspType = rspType;
- pRspWrapper->vgHandle = pVg;
- pRspWrapper->topicHandle = pTopic;
+// pRspWrapper->vgHandle = pVg;
+// pRspWrapper->topicHandle = pTopic;
pRspWrapper->reqId = requestId;
pRspWrapper->pEpset = pMsg->pEpSet;
- pRspWrapper->vgId = pVg->vgId;
+ pRspWrapper->vgId = vgId;
+ strcpy(pRspWrapper->topicName, pParam->topicName);
pMsg->pEpSet = NULL;
if (rspType == TMQ_MSG_TYPE__POLL_RSP) {
@@ -1369,16 +1404,23 @@ static int32_t tmqPollCb(void* param, SDataBuf* pMsg, int32_t code) {
tsem_post(&tmq->rspSem);
taosReleaseRef(tmqMgmt.rsetId, refId);
+ taosMemoryFree(pParam);
return 0;
CREATE_MSG_FAIL:
if (epoch == tmq->epoch) {
- atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
+ taosWLockLatch(&tmq->lock);
+ SMqClientVg* pVg = getVgInfo(tmq, pParam->topicName, vgId);
+ if(pVg){
+ atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
+ }
+ taosWUnLockLatch(&tmq->lock);
}
tsem_post(&tmq->rspSem);
taosReleaseRef(tmqMgmt.rsetId, refId);
+ taosMemoryFree(pParam);
return -1;
}
@@ -1501,11 +1543,13 @@ static bool doUpdateLocalEp(tmq_t* tmq, int32_t epoch, const SMqAskEpRsp* pRsp)
taosHashCleanup(pVgOffsetHashMap);
+ taosWLockLatch(&tmq->lock);
// destroy current buffered existed topics info
if (tmq->clientTopics) {
taosArrayDestroyEx(tmq->clientTopics, freeClientVgInfo);
}
tmq->clientTopics = newTopics;
+ taosWUnLockLatch(&tmq->lock);
int8_t flag = (topicNumGet == 0) ? TMQ_CONSUMER_STATUS__NO_TOPIC : TMQ_CONSUMER_STATUS__READY;
atomic_store_8(&tmq->status, flag);
@@ -1521,7 +1565,7 @@ int32_t askEpCallbackFn(void* param, SDataBuf* pMsg, int32_t code) {
if (tmq == NULL) {
terrno = TSDB_CODE_TMQ_CONSUMER_CLOSED;
- pParam->pUserFn(tmq, terrno, NULL, pParam->pParam);
+// pParam->pUserFn(tmq, terrno, NULL, pParam->pParam);
taosMemoryFree(pMsg->pData);
taosMemoryFree(pMsg->pEpSet);
@@ -1680,8 +1724,9 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
pParam->refId = pTmq->refId;
pParam->epoch = pTmq->epoch;
- pParam->pVg = pVg;
- pParam->pTopic = pTopic;
+// pParam->pVg = pVg; // pVg may be released,fix it
+// pParam->pTopic = pTopic;
+ strcpy(pParam->topicName, pTopic->topicName);
pParam->vgId = pVg->vgId;
pParam->requestId = req.reqId;
@@ -1716,6 +1761,9 @@ static int32_t doTmqPollImpl(tmq_t* pTmq, SMqClientTopic* pTopic, SMqClientVg* p
// broadcast the poll request to all related vnodes
static int32_t tmqPollImpl(tmq_t* tmq, int64_t timeout) {
+ if(atomic_load_8(&tmq->status) == TMQ_CONSUMER_STATUS__RECOVER){
+ return 0;
+ }
int32_t numOfTopics = taosArrayGetSize(tmq->clientTopics);
tscDebug("consumer:0x%" PRIx64 " start to poll data, numOfTopics:%d", tmq->consumerId, numOfTopics);
@@ -1800,8 +1848,14 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
SMqDataRsp* pDataRsp = &pollRspWrapper->dataRsp;
if (pDataRsp->head.epoch == consumerEpoch) {
- SMqClientVg* pVg = pollRspWrapper->vgHandle;
-
+ SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
+ pollRspWrapper->vgHandle = pVg;
+ pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName);
+ if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){
+ tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId,
+ pollRspWrapper->topicName, pollRspWrapper->vgId);
+ return NULL;
+ }
// update the epset
if (pollRspWrapper->pEpset != NULL) {
SEp* pEp = GET_ACTIVE_EP(pollRspWrapper->pEpset);
@@ -1814,7 +1868,10 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
// update the local offset value only for the returned values, only when the local offset is NOT updated
// by tmq_offset_seek function
if (!pVg->seekUpdated) {
+ tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", tmq->consumerId);
pVg->offsetInfo.currentOffset = pDataRsp->rspOffset;
+ } else {
+ tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", tmq->consumerId);
}
// update the status
@@ -1860,8 +1917,16 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
tscDebug("consumer:0x%" PRIx64 " process meta rsp", tmq->consumerId);
if (pollRspWrapper->metaRsp.head.epoch == consumerEpoch) {
- SMqClientVg* pVg = pollRspWrapper->vgHandle;
- if (!pVg->seekUpdated) {
+ SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
+ pollRspWrapper->vgHandle = pVg;
+ pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName);
+ if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){
+ tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId,
+ pollRspWrapper->topicName, pollRspWrapper->vgId);
+ return NULL;
+ }
+
+ if(pollRspWrapper->metaRsp.rspOffset.type != 0){ // if offset is validate
pVg->offsetInfo.currentOffset = pollRspWrapper->metaRsp.rspOffset;
}
@@ -1881,9 +1946,24 @@ static void* tmqHandleAllRsp(tmq_t* tmq, int64_t timeout, bool pollIfReset) {
int32_t consumerEpoch = atomic_load_32(&tmq->epoch);
if (pollRspWrapper->taosxRsp.head.epoch == consumerEpoch) {
- SMqClientVg* pVg = pollRspWrapper->vgHandle;
- if (!pVg->seekUpdated) { // if offset is validate
- pVg->offsetInfo.currentOffset = pollRspWrapper->taosxRsp.rspOffset;
+ SMqClientVg* pVg = getVgInfo(tmq, pollRspWrapper->topicName, pollRspWrapper->vgId);
+ pollRspWrapper->vgHandle = pVg;
+ pollRspWrapper->topicHandle = getTopicInfo(tmq, pollRspWrapper->topicName);
+ if(pollRspWrapper->vgHandle == NULL || pollRspWrapper->topicHandle == NULL){
+ tscError("consumer:0x%" PRIx64 " get vg or topic error, topic:%s vgId:%d", tmq->consumerId,
+ pollRspWrapper->topicName, pollRspWrapper->vgId);
+ return NULL;
+ }
+
+ // update the local offset value only for the returned values, only when the local offset is NOT updated
+ // by tmq_offset_seek function
+ if (!pVg->seekUpdated) {
+ if(pollRspWrapper->taosxRsp.rspOffset.type != 0) { // if offset is validate
+ tscDebug("consumer:0x%" PRIx64" local offset is update, since seekupdate not set", tmq->consumerId);
+ pVg->offsetInfo.currentOffset = pollRspWrapper->taosxRsp.rspOffset;
+ }
+ } else {
+ tscDebug("consumer:0x%" PRIx64" local offset is NOT update, since seekupdate is set", tmq->consumerId);
}
atomic_store_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE);
@@ -2626,12 +2706,12 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
SVgOffsetInfo* pOffsetInfo = &pVg->offsetInfo;
int32_t type = pOffsetInfo->currentOffset.type;
- if (type != TMQ_OFFSET__LOG) {
+ if (type != TMQ_OFFSET__LOG && !OFFSET_IS_RESET_OFFSET(type)) {
tscError("consumer:0x%" PRIx64 " offset type:%d not wal version, seek not allowed", tmq->consumerId, type);
return TSDB_CODE_INVALID_PARA;
}
- if (offset < pOffsetInfo->walVerBegin || offset > pOffsetInfo->walVerEnd) {
+ if (type == TMQ_OFFSET__LOG && (offset < pOffsetInfo->walVerBegin || offset > pOffsetInfo->walVerEnd)) {
tscError("consumer:0x%" PRIx64 " invalid seek params, offset:%" PRId64 ", valid range:[%" PRId64 ", %" PRId64 "]",
tmq->consumerId, offset, pOffsetInfo->walVerBegin, pOffsetInfo->walVerEnd);
return TSDB_CODE_INVALID_PARA;
diff --git a/source/client/src/clientTmqConnector.c b/source/client/src/clientTmqConnector.c
index 894c51d13c9dd8e4205763893206de7ef86cdf7c..6ec82aa6ef0b5697e098d573935cec6e23e2f08c 100644
--- a/source/client/src/clientTmqConnector.c
+++ b/source/client/src/clientTmqConnector.c
@@ -17,9 +17,16 @@
#include "jniCommon.h"
#include "taos.h"
-int __init_tmq = 0;
+int __init_tmq = 0;
jmethodID g_offsetCallback;
+jclass g_assignmentClass;
+jmethodID g_assignmentConstructor;
+jmethodID g_assignmentSetVgId;
+jmethodID g_assignmentSetCurrentOffset;
+jmethodID g_assignmentSetBegin;
+jmethodID g_assignmentSetEnd;
+
void tmqGlobalMethod(JNIEnv *env) {
// make sure init function executed once
switch (atomic_val_compare_exchange_32(&__init_tmq, 0, 1)) {
@@ -46,6 +53,38 @@ void tmqGlobalMethod(JNIEnv *env) {
jniDebug("tmq method register finished");
}
+int __init_assignment = 0;
+void tmqAssignmentMethod(JNIEnv *env) {
+ // make sure init function executed once
+ switch (atomic_val_compare_exchange_32(&__init_assignment, 0, 1)) {
+ case 0:
+ break;
+ case 1:
+ do {
+ taosMsleep(0);
+ } while (atomic_load_32(&__init_assignment) == 1);
+ case 2:
+ return;
+ }
+
+ if (g_vm == NULL) {
+ (*env)->GetJavaVM(env, &g_vm);
+ }
+
+ jclass assignment = (*env)->FindClass(env, "com/taosdata/jdbc/tmq/Assignment");
+ g_assignmentClass = (*env)->NewGlobalRef(env, assignment);
+ g_assignmentConstructor = (*env)->GetMethodID(env, g_assignmentClass, "", "()V");
+ g_assignmentSetVgId = (*env)->GetMethodID(env, g_assignmentClass, "setVgId", "(I)V"); // int
+ g_assignmentSetCurrentOffset = (*env)->GetMethodID(env, g_assignmentClass, "setCurrentOffset", "(J)V"); // long
+ g_assignmentSetBegin = (*env)->GetMethodID(env, g_assignmentClass, "setBegin", "(J)V"); // long
+ g_assignmentSetEnd = (*env)->GetMethodID(env, g_assignmentClass, "setEnd", "(J)V"); // long
+
+ (*env)->DeleteLocalRef(env, assignment);
+
+ atomic_store_32(&__init_assignment, 2);
+ jniDebug("tmq method assignment finished");
+}
+
// deprecated
void commit_cb(tmq_t *tmq, int32_t code, void *param) {
JNIEnv *env = NULL;
@@ -266,8 +305,9 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqCommitAsync(JN
tmq_commit_async(tmq, res, commit_cb, consumer);
}
-JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitAsync(JNIEnv *env, jobject jobj, jlong jtmq,
- jlong jres, jobject offset) {
+JNIEXPORT void JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_consumerCommitAsync(JNIEnv *env, jobject jobj,
+ jlong jtmq, jlong jres,
+ jobject offset) {
tmqGlobalMethod(env);
tmq_t *tmq = (tmq_t *)jtmq;
if (tmq == NULL) {
@@ -335,7 +375,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetVgroupId(JN
TAOS_RES *res = (TAOS_RES *)jres;
if (res == NULL) {
jniDebug("jobj:%p, invalid res handle", jobj);
- return -1;
+ return JNI_RESULT_SET_NULL;
}
return tmq_get_vgroup_id(res);
}
@@ -350,6 +390,15 @@ JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTableNam
return (*env)->NewStringUTF(env, tmq_get_table_name(res));
}
+JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetOffset(JNIEnv *env, jobject jobj, jlong jres) {
+ TAOS_RES *res = (TAOS_RES *)jres;
+ if (res == NULL) {
+ jniDebug("jobj:%p, invalid res handle", jobj);
+ return JNI_RESULT_SET_NULL;
+ }
+ return tmq_get_vgroup_offset(res);
+}
+
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(JNIEnv *env, jobject jobj, jlong con,
jlong res, jobject rowobj,
jobject arrayListObj) {
@@ -369,7 +418,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(
jniDebug("jobj:%p, conn:%p, resultset:%p, no data to retrieve", jobj, tscon, (void *)res);
return JNI_FETCH_END;
} else {
- jniError("jobj:%p, conn:%p, query interrupted, tmq fetch block error code:%d, msg:%s", jobj, tscon, error_code, taos_errstr(tres));
+ jniError("jobj:%p, conn:%p, query interrupted, tmq fetch block error code:%d, msg:%s", jobj, tscon, error_code,
+ taos_errstr(tres));
return JNI_RESULT_SET_NULL;
}
}
@@ -399,3 +449,72 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_fetchRawBlockImp(
(*env)->CallVoidMethod(env, rowobj, g_blockdataSetByteArrayFp, jniFromNCharToByteArray(env, (char *)data, len));
return JNI_SUCCESS;
}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqSeekImp(JNIEnv *env, jobject jobj, jlong jtmq,
+ jstring jtopic, jint partition,
+ jlong offset) {
+ tmq_t *tmq = (tmq_t *)jtmq;
+ if (tmq == NULL) {
+ jniDebug("jobj:%p, tmq is closed", jobj);
+ return TMQ_CONSUMER_NULL;
+ }
+
+ if (jtopic == NULL) {
+ jniDebug("jobj:%p, topic is null", jobj);
+ return TMQ_TOPIC_NULL;
+ }
+ const char *topicName = (*env)->GetStringUTFChars(env, jtopic, NULL);
+
+ int32_t res = tmq_offset_seek(tmq, topicName, partition, offset);
+
+ if (res != TSDB_CODE_SUCCESS) {
+ jniError("jobj:%p, tmq seek error, code:%d, msg:%s", jobj, res, tmq_err2str(res));
+ }
+
+ (*env)->ReleaseStringUTFChars(env, jtopic, topicName);
+ return (jint)res;
+}
+
+JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_tmq_TMQConnector_tmqGetTopicAssignmentImp(JNIEnv *env, jobject jobj,
+ jlong jtmq, jstring jtopic,
+ jobject jarrayList) {
+ tmqAssignmentMethod(env);
+ tmq_t *tmq = (tmq_t *)jtmq;
+ if (tmq == NULL) {
+ jniDebug("jobj:%p, tmq is closed", jobj);
+ return TMQ_CONSUMER_NULL;
+ }
+
+ if (jtopic == NULL) {
+ jniDebug("jobj:%p, topic is null", jobj);
+ return TMQ_TOPIC_NULL;
+ }
+
+ const char *topicName = (*env)->GetStringUTFChars(env, jtopic, NULL);
+
+ tmq_topic_assignment *pAssign = NULL;
+ int32_t numOfAssignment = 0;
+ int32_t res = tmq_get_topic_assignment(tmq, topicName, &pAssign, &numOfAssignment);
+
+ if (res != TSDB_CODE_SUCCESS) {
+ (*env)->ReleaseStringUTFChars(env, jtopic, topicName);
+ jniError("jobj:%p, tmq get topic assignment error, topic:%s, code:%d, msg:%s", jobj, topicName, res,
+ tmq_err2str(res));
+ tmq_free_assignment(pAssign);
+ return (jint)res;
+ }
+
+ (*env)->ReleaseStringUTFChars(env, jtopic, topicName);
+
+ for (int i = 0; i < numOfAssignment; ++i) {
+ tmq_topic_assignment assignment = pAssign[i];
+ jobject jassignment = (*env)->NewObject(env, g_assignmentClass, g_assignmentConstructor);
+ (*env)->CallVoidMethod(env, jassignment, g_assignmentSetVgId, assignment.vgId);
+ (*env)->CallVoidMethod(env, jassignment, g_assignmentSetCurrentOffset, assignment.currentOffset);
+ (*env)->CallVoidMethod(env, jassignment, g_assignmentSetBegin, assignment.begin);
+ (*env)->CallVoidMethod(env, jassignment, g_assignmentSetEnd, assignment.end);
+ (*env)->CallBooleanMethod(env, jarrayList, g_arrayListAddFp, jassignment);
+ }
+ tmq_free_assignment(pAssign);
+ return JNI_SUCCESS;
+}
diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp
index b04727bfc030ed9d2181462725778a75c6202d99..ccc17289b01414f41bc9959fd2ba8fd7cd7f8061 100644
--- a/source/client/test/clientTests.cpp
+++ b/source/client/test/clientTests.cpp
@@ -1100,7 +1100,7 @@ TEST(clientCase, sub_tb_test) {
// 创建订阅 topics 列表
tmq_list_t* topicList = tmq_list_new();
- tmq_list_append(topicList, "topic_t1");
+ tmq_list_append(topicList, "t1");
// 启动订阅
tmq_subscribe(tmq, topicList);
@@ -1118,7 +1118,7 @@ TEST(clientCase, sub_tb_test) {
tmq_topic_assignment* pAssign = NULL;
int32_t numOfAssign = 0;
- int32_t code = tmq_get_topic_assignment(tmq, "topic_t1", &pAssign, &numOfAssign);
+ int32_t code = tmq_get_topic_assignment(tmq, "t1", &pAssign, &numOfAssign);
if (code != 0) {
printf("error occurs:%s\n", tmq_err2str(code));
tmq_consumer_close(tmq);
@@ -1127,7 +1127,16 @@ TEST(clientCase, sub_tb_test) {
return;
}
- tmq_offset_seek(tmq, "topic_t1", pAssign[0].vgId, 0);
+ tmq_offset_seek(tmq, "t1", pAssign[0].vgId, 4);
+
+ code = tmq_get_topic_assignment(tmq, "t1", &pAssign, &numOfAssign);
+ if (code != 0) {
+ printf("error occurs:%s\n", tmq_err2str(code));
+ tmq_consumer_close(tmq);
+ taos_close(pConn);
+ fprintf(stderr, "%d msg consumed, include %d rows\n", msgCnt, totalRows);
+ return;
+ }
while (1) {
TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout);
diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c
index 311c79381cc8c7919beaea26e120c3ea08e8a11f..033fbb0ef164551f24e95be6e40b0844d763f2e9 100644
--- a/source/common/src/tdatablock.c
+++ b/source/common/src/tdatablock.c
@@ -23,6 +23,20 @@
int32_t colDataGetLength(const SColumnInfoData* pColumnInfoData, int32_t numOfRows) {
if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
+ if (pColumnInfoData->reassigned) {
+ int32_t totalSize = 0;
+ for (int32_t row = 0; row < numOfRows; ++row) {
+ char* pColData = pColumnInfoData->pData + pColumnInfoData->varmeta.offset[row];
+ int32_t colSize = 0;
+ if (pColumnInfoData->info.type == TSDB_DATA_TYPE_JSON) {
+ colSize = getJsonValueLen(pColData);
+ } else {
+ colSize = varDataTLen(pColData);
+ }
+ totalSize += colSize;
+ }
+ return totalSize;
+ }
return pColumnInfoData->varmeta.length;
} else {
if (pColumnInfoData->info.type == TSDB_DATA_TYPE_NULL) {
@@ -126,6 +140,29 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const
return 0;
}
+int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) {
+ int32_t type = pColumnInfoData->info.type;
+ if (IS_VAR_DATA_TYPE(type)) {
+ int32_t dataLen = 0;
+ if (type == TSDB_DATA_TYPE_JSON) {
+ dataLen = getJsonValueLen(pData);
+ } else {
+ dataLen = varDataTLen(pData);
+ }
+
+ SVarColAttr* pAttr = &pColumnInfoData->varmeta;
+
+ pColumnInfoData->varmeta.offset[dstRowIdx] = pColumnInfoData->varmeta.offset[srcRowIdx];
+ pColumnInfoData->reassigned = true;
+ } else {
+ memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * dstRowIdx, pData, pColumnInfoData->info.bytes);
+ colDataClearNull_f(pColumnInfoData->nullbitmap, dstRowIdx);
+ }
+
+ return 0;
+}
+
+
int32_t colDataReserve(SColumnInfoData* pColumnInfoData, size_t newSize) {
if (!IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) {
return TSDB_CODE_SUCCESS;
@@ -580,8 +617,22 @@ int32_t blockDataToBuf(char* buf, const SSDataBlock* pBlock) {
*(int32_t*)pStart = dataSize;
pStart += sizeof(int32_t);
- memcpy(pStart, pCol->pData, dataSize);
- pStart += dataSize;
+ if (pCol->reassigned && IS_VAR_DATA_TYPE(pCol->info.type)) {
+ for (int32_t row = 0; row < numOfRows; ++row) {
+ char* pColData = pCol->pData + pCol->varmeta.offset[row];
+ int32_t colSize = 0;
+ if (pCol->info.type == TSDB_DATA_TYPE_JSON) {
+ colSize = getJsonValueLen(pColData);
+ } else {
+ colSize = varDataTLen(pColData);
+ }
+ memcpy(pStart, pColData, colSize);
+ pStart += colSize;
+ }
+ } else {
+ memcpy(pStart, pCol->pData, dataSize);
+ pStart += dataSize;
+ }
}
return 0;
@@ -1741,7 +1792,20 @@ int32_t tEncodeDataBlock(void** buf, const SSDataBlock* pBlock) {
int32_t len = colDataGetLength(pColData, rows);
tlen += taosEncodeFixedI32(buf, len);
- tlen += taosEncodeBinary(buf, pColData->pData, len);
+ if (pColData->reassigned && IS_VAR_DATA_TYPE(pColData->info.type)) {
+ for (int32_t row = 0; row < rows; ++row) {
+ char* pData = pColData->pData + pColData->varmeta.offset[row];
+ int32_t colSize = 0;
+ if (pColData->info.type == TSDB_DATA_TYPE_JSON) {
+ colSize = getJsonValueLen(pData);
+ } else {
+ colSize = varDataTLen(pData);
+ }
+ tlen += taosEncodeBinary(buf, pData, colSize);
+ }
+ } else {
+ tlen += taosEncodeBinary(buf, pColData->pData, len);
+ }
}
return tlen;
}
@@ -2401,19 +2465,31 @@ _end:
}
char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
- if (stbFullName[0] == 0) {
+ char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
+ if (!pBuf) {
+ return NULL;
+ }
+ int32_t code = buildCtbNameByGroupIdImpl(stbFullName, groupId, pBuf);
+ if (code != TSDB_CODE_SUCCESS) {
+ taosMemoryFree(pBuf);
return NULL;
}
+ return pBuf;
+}
+
+int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, char* cname) {
+ if (stbFullName[0] == 0) {
+ return TSDB_CODE_FAILED;
+ }
SArray* tags = taosArrayInit(0, sizeof(SSmlKv));
if (tags == NULL) {
- return NULL;
+ return TSDB_CODE_FAILED;
}
- void* cname = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1);
if (cname == NULL) {
taosArrayDestroy(tags);
- return NULL;
+ return TSDB_CODE_FAILED;
}
SSmlKv pTag = {.key = "group_id",
@@ -2435,9 +2511,9 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) {
taosArrayDestroy(tags);
if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) {
- return NULL;
+ return TSDB_CODE_FAILED;
}
- return rname.ctbShortName;
+ return TSDB_CODE_SUCCESS;
}
int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
@@ -2502,12 +2578,29 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) {
data += metaSize;
dataLen += metaSize;
- colSizes[col] = colDataGetLength(pColRes, numOfRows);
- dataLen += colSizes[col];
- if (pColRes->pData != NULL) {
- memmove(data, pColRes->pData, colSizes[col]);
+ if (pColRes->reassigned && IS_VAR_DATA_TYPE(pColRes->info.type)) {
+ colSizes[col] = 0;
+ for (int32_t row = 0; row < numOfRows; ++row) {
+ char* pColData = pColRes->pData + pColRes->varmeta.offset[row];
+ int32_t colSize = 0;
+ if (pColRes->info.type == TSDB_DATA_TYPE_JSON) {
+ colSize = getJsonValueLen(pColData);
+ } else {
+ colSize = varDataTLen(pColData);
+ }
+ colSizes[col] += colSize;
+ dataLen += colSize;
+ memmove(data, pColData, colSize);
+ data += colSize;
+ }
+ } else {
+ colSizes[col] = colDataGetLength(pColRes, numOfRows);
+ dataLen += colSizes[col];
+ if (pColRes->pData != NULL) {
+ memmove(data, pColRes->pData, colSizes[col]);
+ }
+ data += colSizes[col];
}
- data += colSizes[col];
colSizes[col] = htonl(colSizes[col]);
// uError("blockEncode col bytes:%d, type:%d, size:%d, htonl size:%d", pColRes->info.bytes, pColRes->info.type, htonl(colSizes[col]), colSizes[col]);
diff --git a/source/common/src/tdataformat.c b/source/common/src/tdataformat.c
index f30557e4eff7e8e0d59e214129b9c9469d3c39e5..0b121777544b33c918ca5c421e78e1b644f3109b 100644
--- a/source/common/src/tdataformat.c
+++ b/source/common/src/tdataformat.c
@@ -2504,18 +2504,21 @@ _exit:
return code;
}
-int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind) {
+int32_t tColDataAddValueByBind(SColData *pColData, TAOS_MULTI_BIND *pBind, int32_t buffMaxLen) {
int32_t code = 0;
if (!(pBind->num == 1 && pBind->is_null && *pBind->is_null)) {
ASSERT(pColData->type == pBind->buffer_type);
}
-
+
if (IS_VAR_DATA_TYPE(pColData->type)) { // var-length data type
for (int32_t i = 0; i < pBind->num; ++i) {
if (pBind->is_null && pBind->is_null[i]) {
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0);
if (code) goto _exit;
+ } else if (pBind->length[i] > buffMaxLen) {
+ uError("var data length too big, len:%d, max:%d", pBind->length[i], buffMaxLen);
+ return TSDB_CODE_INVALID_PARA;
} else {
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_VALUE](
pColData, (uint8_t *)pBind->buffer + pBind->buffer_length * i, pBind->length[i]);
@@ -3523,6 +3526,43 @@ static FORCE_INLINE void tColDataCalcSMAUBigInt(SColData *pColData, int64_t *sum
}
}
+static FORCE_INLINE void tColDataCalcSMAVarType(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min,
+ int16_t *numOfNull) {
+ *(uint64_t *)sum = 0;
+ *(uint64_t *)max = 0;
+ *(uint64_t *)min = 0;
+ *numOfNull = 0;
+
+ switch (pColData->flag) {
+ case HAS_NONE:
+ case HAS_NULL:
+ case (HAS_NONE | HAS_NULL):
+ *numOfNull = pColData->nVal;
+ break;
+ case HAS_VALUE:
+ *numOfNull = 0;
+ break;
+ case (HAS_VALUE | HAS_NULL):
+ case (HAS_VALUE | HAS_NONE):
+ for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
+ if (GET_BIT1(pColData->pBitMap, iVal) == 0) {
+ (*numOfNull)++;
+ }
+ }
+ break;
+ case (HAS_VALUE | HAS_NONE | HAS_NULL):
+ for (int32_t iVal = 0; iVal < pColData->nVal; iVal++) {
+ if (GET_BIT2(pColData->pBitMap, iVal) != 2) {
+ (*numOfNull)++;
+ }
+ }
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+}
+
void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_t *min, int16_t *numOfNull) = {
NULL,
tColDataCalcSMABool, // TSDB_DATA_TYPE_BOOL
@@ -3532,14 +3572,14 @@ void (*tColDataCalcSMA[])(SColData *pColData, int64_t *sum, int64_t *max, int64_
tColDataCalcSMABigInt, // TSDB_DATA_TYPE_BIGINT
tColDataCalcSMAFloat, // TSDB_DATA_TYPE_FLOAT
tColDataCalcSMADouble, // TSDB_DATA_TYPE_DOUBLE
- NULL, // TSDB_DATA_TYPE_VARCHAR
+ tColDataCalcSMAVarType, // TSDB_DATA_TYPE_VARCHAR
tColDataCalcSMABigInt, // TSDB_DATA_TYPE_TIMESTAMP
- NULL, // TSDB_DATA_TYPE_NCHAR
+ tColDataCalcSMAVarType, // TSDB_DATA_TYPE_NCHAR
tColDataCalcSMAUTinyInt, // TSDB_DATA_TYPE_UTINYINT
tColDataCalcSMATinyUSmallInt, // TSDB_DATA_TYPE_USMALLINT
tColDataCalcSMAUInt, // TSDB_DATA_TYPE_UINT
tColDataCalcSMAUBigInt, // TSDB_DATA_TYPE_UBIGINT
- NULL, // TSDB_DATA_TYPE_JSON
+ tColDataCalcSMAVarType, // TSDB_DATA_TYPE_JSON
NULL, // TSDB_DATA_TYPE_VARBINARY
NULL, // TSDB_DATA_TYPE_DECIMAL
NULL, // TSDB_DATA_TYPE_BLOB
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 9ff297c5b42827daa75e6bf3b47d75711b457394..a79351d5cc73ff423e7b1ab0295008b8a99daaed 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -60,6 +60,7 @@ int32_t tsNumOfQnodeQueryThreads = 4;
int32_t tsNumOfQnodeFetchThreads = 1;
int32_t tsNumOfSnodeStreamThreads = 4;
int32_t tsNumOfSnodeWriteThreads = 1;
+int32_t tsMaxStreamBackendCache = 128; // M
// sync raft
int32_t tsElectInterval = 25 * 1000;
@@ -72,6 +73,7 @@ int64_t tsVndCommitMaxIntervalMs = 600 * 1000;
// mnode
int64_t tsMndSdbWriteDelta = 200;
int64_t tsMndLogRetention = 2000;
+bool tsMndSkipGrant = false;
// monitor
bool tsEnableMonitor = true;
@@ -105,7 +107,7 @@ int32_t tsQueryPolicy = 1;
int32_t tsQueryRspPolicy = 0;
int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT
bool tsEnableQueryHb = false;
-bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true
+bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true
int32_t tsQuerySmaOptimize = 0;
int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data.
bool tsQueryPlannerTrace = false;
@@ -117,8 +119,8 @@ int32_t tsRedirectFactor = 2;
int32_t tsRedirectMaxPeriod = 1000;
int32_t tsMaxRetryWaitTime = 10000;
bool tsUseAdapter = false;
-int32_t tsMetaCacheMaxSize = -1; // MB
-int32_t tsSlowLogThreshold = 3; // seconds
+int32_t tsMetaCacheMaxSize = -1; // MB
+int32_t tsSlowLogThreshold = 3; // seconds
int32_t tsSlowLogScope = SLOW_LOG_TYPE_ALL;
/*
@@ -141,8 +143,8 @@ int32_t tsCompressColData = -1;
// count/hyperloglog function always return values in case of all NULL data or Empty data set.
int32_t tsCountAlwaysReturnValue = 1;
-// 10 ms for sliding time, the value will changed in case of time precision changed
-int32_t tsMinSlidingTime = 10;
+// 1 ms for sliding time, the value will changed in case of time precision changed
+int32_t tsMinSlidingTime = 1;
// the maxinum number of distict query result
int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
@@ -150,8 +152,8 @@ int32_t tsMaxNumOfDistinctResults = 1000 * 10000;
// 1 database precision unit for interval time range, changed accordingly
int32_t tsMinIntervalTime = 1;
-// maximum memory allowed to be allocated for a single csv load (in MB)
-int32_t tsMaxMemUsedByInsert = 1024;
+// maximum batch rows numbers imported from a single csv load
+int32_t tsMaxInsertBatchRows = 1000000;
float tsSelectivityRatio = 1.0;
int32_t tsTagFilterResCacheSize = 1024 * 10;
@@ -208,6 +210,7 @@ char tsUdfdLdLibPath[512] = "";
bool tsDisableStream = false;
int64_t tsStreamBufferSize = 128 * 1024 * 1024;
int64_t tsCheckpointInterval = 3 * 60 * 60 * 1000;
+bool tsFilterScalarMode = false;
#ifndef _STORAGE
int32_t taosSetTfsCfg(SConfig *pCfg) {
@@ -344,11 +347,12 @@ static int32_t taosAddClientCfg(SConfig *pCfg) {
if (cfgAddString(pCfg, "smlTagName", tsSmlTagName, 1) != 0) return -1;
// if (cfgAddBool(pCfg, "smlDataFormat", tsSmlDataFormat, 1) != 0) return -1;
// if (cfgAddInt32(pCfg, "smlBatchSize", tsSmlBatchSize, 1, INT32_MAX, true) != 0) return -1;
- if (cfgAddInt32(pCfg, "maxMemUsedByInsert", tsMaxMemUsedByInsert, 1, INT32_MAX, true) != 0) return -1;
+ if (cfgAddInt32(pCfg, "maxInsertBatchRows", tsMaxInsertBatchRows, 1, INT32_MAX, true) != 0) return -1;
if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, 0) != 0) return -1;
if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, true) != 0) return -1;
if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, true) != 0) return -1;
- if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, 1) != 0) return -1;
+ if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, 1) != 0)
+ return -1;
if (cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, true) != 0) return -1;
if (cfgAddString(pCfg, "slowLogScope", "", true) != 0) return -1;
@@ -419,7 +423,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, 0) != 0) return -1;
- if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 10, 1000000, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, 0) != 0) return -1;
@@ -488,6 +492,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, 0) != 0) return -1;
if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, 0) != 0) return -1;
+ if (cfgAddBool(pCfg, "skipGrant", tsMndSkipGrant, 0) != 0) return -1;
if (cfgAddBool(pCfg, "monitor", tsEnableMonitor, 0) != 0) return -1;
if (cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 200000, 0) != 0) return -1;
@@ -522,6 +527,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, 0) != 0) return -1;
+ if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, 0) != 0) return -1;
+ if (cfgAddInt32(pCfg, "maxStreamBackendCache", tsMaxStreamBackendCache, 16, 1024, 0) != 0) return -1;
+
GRANT_CFG_ADD;
return 0;
}
@@ -770,7 +778,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
// tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval;
// tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32;
- tsMaxMemUsedByInsert = cfgGetItem(pCfg, "maxMemUsedByInsert")->i32;
+ tsMaxInsertBatchRows = cfgGetItem(pCfg, "maxInsertBatchRows")->i32;
tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32;
tsCompressMsgSize = cfgGetItem(pCfg, "compressMsgSize")->i32;
@@ -778,7 +786,7 @@ static int32_t taosSetClientCfg(SConfig *pCfg) {
tsNumOfTaskQueueThreads = cfgGetItem(pCfg, "numOfTaskQueueThreads")->i32;
tsQueryPolicy = cfgGetItem(pCfg, "queryPolicy")->i32;
tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval;
- tsEnableScience = cfgGetItem(pCfg, "enableScience")->bval;
+ tsEnableScience = cfgGetItem(pCfg, "enableScience")->bval;
tsQuerySmaOptimize = cfgGetItem(pCfg, "querySmaOptimize")->i32;
tsQueryPlannerTrace = cfgGetItem(pCfg, "queryPlannerTrace")->bval;
tsQueryNodeChunkSize = cfgGetItem(pCfg, "queryNodeChunkSize")->i32;
@@ -884,6 +892,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsMndSdbWriteDelta = cfgGetItem(pCfg, "mndSdbWriteDelta")->i64;
tsMndLogRetention = cfgGetItem(pCfg, "mndLogRetention")->i64;
+ tsMndSkipGrant = cfgGetItem(pCfg, "skipGrant")->bval;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
tstrncpy(tsUdfdResFuncs, cfgGetItem(pCfg, "udfdResFuncs")->str, sizeof(tsUdfdResFuncs));
@@ -898,6 +907,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsStreamBufferSize = cfgGetItem(pCfg, "streamBufferSize")->i64;
tsCheckpointInterval = cfgGetItem(pCfg, "checkpointInterval")->i64;
+ tsFilterScalarMode = cfgGetItem(pCfg, "filterScalarMode")->bval;
+ tsMaxStreamBackendCache = cfgGetItem(pCfg, "maxStreamBackendCache")->i32;
+
GRANT_CFG_GET;
return 0;
}
@@ -1039,7 +1051,7 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) {
} else if (strcasecmp("maxNumOfDistinctRes", name) == 0) {
tsMaxNumOfDistinctResults = cfgGetItem(pCfg, "maxNumOfDistinctRes")->i32;
} else if (strcasecmp("maxMemUsedByInsert", name) == 0) {
- tsMaxMemUsedByInsert = cfgGetItem(pCfg, "maxMemUsedByInsert")->i32;
+ tsMaxInsertBatchRows = cfgGetItem(pCfg, "maxInsertBatchRows")->i32;
} else if (strcasecmp("maxRetryWaitTime", name) == 0) {
tsMaxRetryWaitTime = cfgGetItem(pCfg, "maxRetryWaitTime")->i32;
}
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index d7d393becc676344402b4a5f9da2725be492cd95..ac035e0a2b9ac631bff120d34c204732df28acde 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -4002,11 +4002,16 @@ int32_t tSerializeSCMCreateTopicReq(void *buf, int32_t bufLen, const SCMCreateTo
if (tEncodeI8(&encoder, pReq->withMeta) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->subDbName) < 0) return -1;
if (TOPIC_SUB_TYPE__DB == pReq->subType) {
- } else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) {
- if (tEncodeCStr(&encoder, pReq->subStbName) < 0) return -1;
} else {
- if (tEncodeI32(&encoder, strlen(pReq->ast)) < 0) return -1;
- if (tEncodeCStr(&encoder, pReq->ast) < 0) return -1;
+ if (TOPIC_SUB_TYPE__TABLE == pReq->subType) {
+ if (tEncodeCStr(&encoder, pReq->subStbName) < 0) return -1;
+ }
+ if (pReq->ast && strlen(pReq->ast) > 0) {
+ if (tEncodeI32(&encoder, strlen(pReq->ast)) < 0) return -1;
+ if (tEncodeCStr(&encoder, pReq->ast) < 0) return -1;
+ } else {
+ if (tEncodeI32(&encoder, 0) < 0) return -1;
+ }
}
if (tEncodeI32(&encoder, strlen(pReq->sql)) < 0) return -1;
if (tEncodeCStr(&encoder, pReq->sql) < 0) return -1;
@@ -4032,9 +4037,10 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR
if (tDecodeI8(&decoder, &pReq->withMeta) < 0) return -1;
if (tDecodeCStrTo(&decoder, pReq->subDbName) < 0) return -1;
if (TOPIC_SUB_TYPE__DB == pReq->subType) {
- } else if (TOPIC_SUB_TYPE__TABLE == pReq->subType) {
- if (tDecodeCStrTo(&decoder, pReq->subStbName) < 0) return -1;
} else {
+ if (TOPIC_SUB_TYPE__TABLE == pReq->subType) {
+ if (tDecodeCStrTo(&decoder, pReq->subStbName) < 0) return -1;
+ }
if (tDecodeI32(&decoder, &astLen) < 0) return -1;
if (astLen > 0) {
pReq->ast = taosMemoryCalloc(1, astLen + 1);
@@ -4057,7 +4063,7 @@ int32_t tDeserializeSCMCreateTopicReq(void *buf, int32_t bufLen, SCMCreateTopicR
void tFreeSCMCreateTopicReq(SCMCreateTopicReq *pReq) {
taosMemoryFreeClear(pReq->sql);
- if (TOPIC_SUB_TYPE__COLUMN == pReq->subType) {
+ if (TOPIC_SUB_TYPE__DB != pReq->subType) {
taosMemoryFreeClear(pReq->ast);
}
}
diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c
index dcd539bd91e11c570a9f779f9c1c9a3811765c41..d8c43747f7b92822fad5455a143e35c5e918f15c 100644
--- a/source/common/src/ttime.c
+++ b/source/common/src/ttime.c
@@ -82,6 +82,7 @@ static int32_t parseLocaltime(char* timestr, int32_t len, int64_t* utime, int32_
static int32_t parseLocaltimeDst(char* timestr, int32_t len, int64_t* utime, int32_t timePrec, char delim);
static char* forwardToTimeStringEnd(char* str);
static bool checkTzPresent(const char* str, int32_t len);
+static int32_t parseTimezone(char* str, int64_t* tzOffset);
static int32_t (*parseLocaltimeFp[])(char* timestr, int32_t len, int64_t* utime, int32_t timePrec, char delim) = {
parseLocaltime, parseLocaltimeDst};
@@ -92,13 +93,13 @@ int32_t taosParseTime(const char* timestr, int64_t* utime, int32_t len, int32_t
if (checkTzPresent(timestr, len)) {
return parseTimeWithTz(timestr, utime, timePrec, 'T');
} else {
- return (*parseLocaltimeFp[day_light])((char*)timestr, len, utime, timePrec, 'T');
+ return parseLocaltimeDst((char*)timestr, len, utime, timePrec, 'T');
}
} else {
if (checkTzPresent(timestr, len)) {
return parseTimeWithTz(timestr, utime, timePrec, 0);
} else {
- return (*parseLocaltimeFp[day_light])((char*)timestr, len, utime, timePrec, 0);
+ return parseLocaltimeDst((char*)timestr, len, utime, timePrec, 0);
}
}
}
@@ -713,16 +714,12 @@ int64_t taosTimeAdd(int64_t t, int64_t duration, char unit, int32_t precision) {
return t;
}
- if (unit != 'n' && unit != 'y') {
+ if (!IS_CALENDAR_TIME_DURATION(unit)) {
return t + duration;
}
// The following code handles the y/n time duration
- int64_t numOfMonth = duration;
- if (unit == 'y') {
- numOfMonth *= 12;
- }
-
+ int64_t numOfMonth = (unit == 'y')? duration*12:duration;
int64_t fraction = t % TSDB_TICK_PER_SECOND(precision);
struct tm tm;
@@ -741,6 +738,7 @@ int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char
ekey = skey;
skey = tmp;
}
+
if (unit != 'n' && unit != 'y') {
return (int32_t)((ekey - skey) / interval);
}
@@ -764,13 +762,16 @@ int32_t taosTimeCountInterval(int64_t skey, int64_t ekey, int64_t interval, char
return (emon - smon) / (int32_t)interval;
}
-int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precision) {
+int64_t taosTimeTruncate(int64_t ts, const SInterval* pInterval) {
if (pInterval->sliding == 0 && pInterval->interval == 0) {
- return t;
+ return ts;
}
- int64_t start = t;
- if (pInterval->slidingUnit == 'n' || pInterval->slidingUnit == 'y') {
+ int64_t start = ts;
+ int32_t precision = pInterval->precision;
+
+ if (IS_CALENDAR_TIME_DURATION(pInterval->slidingUnit)) {
+
start /= (int64_t)(TSDB_TICK_PER_SECOND(precision));
struct tm tm;
time_t tt = (time_t)start;
@@ -792,44 +793,72 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
start = (int64_t)(taosMktime(&tm) * TSDB_TICK_PER_SECOND(precision));
} else {
- int64_t delta = t - pInterval->interval;
- int32_t factor = (delta >= 0) ? 1 : -1;
+ if (IS_CALENDAR_TIME_DURATION(pInterval->intervalUnit)) {
+ int64_t news = (ts / pInterval->sliding) * pInterval->sliding;
+ ASSERT(news <= ts);
+
+ if (news <= ts) {
+ int64_t prev = news;
+ int64_t newe = taosTimeAdd(news, pInterval->interval, pInterval->intervalUnit, precision) - 1;
+
+ if (newe < ts) { // move towards the greater endpoint
+ while(newe < ts && news < ts) {
+ news += pInterval->sliding;
+ newe = taosTimeAdd(news, pInterval->interval, pInterval->intervalUnit, precision) - 1;
+ }
+
+ prev = news;
+ } else {
+ while (newe >= ts) {
+ prev = news;
+ news -= pInterval->sliding;
+ newe = taosTimeAdd(news, pInterval->interval, pInterval->intervalUnit, precision) - 1;
+ }
+ }
+
+ return prev;
+ }
+ } else {
+ int64_t delta = ts - pInterval->interval;
+ int32_t factor = (delta >= 0) ? 1 : -1;
- start = (delta / pInterval->sliding + factor) * pInterval->sliding;
+ start = (delta / pInterval->sliding + factor) * pInterval->sliding;
- if (pInterval->intervalUnit == 'd' || pInterval->intervalUnit == 'w') {
- /*
- * here we revised the start time of day according to the local time zone,
- * but in case of DST, the start time of one day need to be dynamically decided.
- */
- // todo refactor to extract function that is available for Linux/Windows/Mac platform
+ if (pInterval->intervalUnit == 'd' || pInterval->intervalUnit == 'w') {
+ /*
+ * here we revised the start time of day according to the local time zone,
+ * but in case of DST, the start time of one day need to be dynamically decided.
+ */
+ // todo refactor to extract function that is available for Linux/Windows/Mac platform
#if defined(WINDOWS) && _MSC_VER >= 1900
- // see https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
- int64_t timezone = _timezone;
- int32_t daylight = _daylight;
- char** tzname = _tzname;
+ // see
+ // https://docs.microsoft.com/en-us/cpp/c-runtime-library/daylight-dstbias-timezone-and-tzname?view=vs-2019
+ int64_t timezone = _timezone;
+ int32_t daylight = _daylight;
+ char** tzname = _tzname;
#endif
- start += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision));
- }
+ start += (int64_t)(timezone * TSDB_TICK_PER_SECOND(precision));
+ }
- int64_t end = 0;
+ int64_t end = 0;
- // not enough time range
- if (start < 0 || INT64_MAX - start > pInterval->interval - 1) {
- end = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1;
- while (end < t) { // move forward to the correct time window
- start += pInterval->sliding;
+ // not enough time range
+ if (start < 0 || INT64_MAX - start > pInterval->interval - 1) {
+ end = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1;
+ while (end < ts) { // move forward to the correct time window
+ start += pInterval->sliding;
- if (start < 0 || INT64_MAX - start > pInterval->interval - 1) {
- end = start + pInterval->interval - 1;
- } else {
- end = INT64_MAX;
- break;
+ if (start < 0 || INT64_MAX - start > pInterval->interval - 1) {
+ end = start + pInterval->interval - 1;
+ } else {
+ end = INT64_MAX;
+ break;
+ }
}
+ } else {
+ end = INT64_MAX;
}
- } else {
- end = INT64_MAX;
}
}
@@ -841,10 +870,10 @@ int64_t taosTimeTruncate(int64_t t, const SInterval* pInterval, int32_t precisio
// try to move current window to the left-hande-side, due to the offset effect.
int64_t end = taosTimeAdd(start, pInterval->interval, pInterval->intervalUnit, precision) - 1;
- int64_t newEnd = end;
- while (newEnd >= t) {
- end = newEnd;
- newEnd = taosTimeAdd(newEnd, -pInterval->sliding, pInterval->slidingUnit, precision);
+ int64_t newe = end;
+ while (newe >= ts) {
+ end = newe;
+ newe = taosTimeAdd(newe, -pInterval->sliding, pInterval->slidingUnit, precision);
}
start = taosTimeAdd(end, -pInterval->interval, pInterval->intervalUnit, precision) + 1;
diff --git a/source/common/src/ttypes.c b/source/common/src/ttypes.c
index 33b972594e0b1f09eb50afd79a1d328e66b13bdd..39255cff3a8d22136fd785914e131d6c2c7b2d92 100644
--- a/source/common/src/ttypes.c
+++ b/source/common/src/ttypes.c
@@ -17,7 +17,7 @@
#include "ttypes.h"
#include "tcompression.h"
-const int32_t TYPE_BYTES[17] = {
+const int32_t TYPE_BYTES[21] = {
-1, // TSDB_DATA_TYPE_NULL
CHAR_BYTES, // TSDB_DATA_TYPE_BOOL
CHAR_BYTES, // TSDB_DATA_TYPE_TINYINT
@@ -34,6 +34,10 @@ const int32_t TYPE_BYTES[17] = {
INT_BYTES, // TSDB_DATA_TYPE_UINT
sizeof(uint64_t), // TSDB_DATA_TYPE_UBIGINT
TSDB_MAX_JSON_TAG_LEN, // TSDB_DATA_TYPE_JSON
+ TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_VARBINARY: placeholder, not implemented
+ TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_DECIMAL: placeholder, not implemented
+ TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_BLOB: placeholder, not implemented
+ TSDB_MAX_TAGS_LEN, // TSDB_DATA_TYPE_MEDIUMBLOB: placeholder, not implemented
sizeof(VarDataOffsetT), // TSDB_DATA_TYPE_GEOMETRY
};
@@ -57,6 +61,10 @@ tDataTypeDescriptor tDataTypes[TSDB_DATA_TYPE_MAX] = {
{TSDB_DATA_TYPE_UINT, 12, INT_BYTES, "INT UNSIGNED", 0, UINT32_MAX, tsCompressInt, tsDecompressInt},
{TSDB_DATA_TYPE_UBIGINT, 15, LONG_BYTES, "BIGINT UNSIGNED", 0, UINT64_MAX, tsCompressBigint, tsDecompressBigint},
{TSDB_DATA_TYPE_JSON, 4, TSDB_MAX_JSON_TAG_LEN, "JSON", 0, 0, tsCompressString, tsDecompressString},
+ {TSDB_DATA_TYPE_VARBINARY, 9, 1, "VARBINARY", 0, 0, NULL, NULL}, // placeholder, not implemented
+ {TSDB_DATA_TYPE_DECIMAL, 7, 1, "DECIMAL", 0, 0, NULL, NULL}, // placeholder, not implemented
+ {TSDB_DATA_TYPE_BLOB, 4, 1, "BLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
+ {TSDB_DATA_TYPE_MEDIUMBLOB, 10, 1, "MEDIUMBLOB", 0, 0, NULL, NULL}, // placeholder, not implemented
{TSDB_DATA_TYPE_GEOMETRY, 8, 1, "GEOMETRY", 0, 0, tsCompressString, tsDecompressString},
};
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
index a5f53c87030b49bc321dd53f0fc91bf7ded0a977..e41c9e10d723ba895c13426f0c4a4cc2423bbe51 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
@@ -78,7 +78,7 @@ static void vmProcessQueryQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
int32_t code = vnodeProcessQueryMsg(pVnode->pImpl, pMsg);
if (code != 0) {
if (terrno != 0) code = terrno;
- dGError("vgId:%d, msg:%p failed to query since %s", pVnode->vgId, pMsg, terrstr(code));
+ dGError("vgId:%d, msg:%p failed to query since %s", pVnode->vgId, pMsg, tstrerror(code));
vmSendRsp(pMsg, code);
}
diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h
index 7d707f4cbaa4021223e8e8ed59342453b8b87584..82b714e6eb6e3a29cc3ea16d1fb4d0c1bd9d6a6d 100644
--- a/source/dnode/mnode/impl/inc/mndDef.h
+++ b/source/dnode/mnode/impl/inc/mndDef.h
@@ -663,9 +663,10 @@ typedef struct {
char targetDb[TSDB_DB_FNAME_LEN];
char targetSTbName[TSDB_TABLE_FNAME_LEN];
int64_t targetStbUid;
- int32_t fixedSinkVgId; // 0 for shuffle
+
// fixedSinkVg is not applicable for encode and decode
SVgObj fixedSinkVg;
+ int32_t fixedSinkVgId; // 0 for shuffle
// transformation
char* sql;
diff --git a/source/dnode/mnode/impl/inc/mndUser.h b/source/dnode/mnode/impl/inc/mndUser.h
index aa7f97f0870dfd83ba4c3296ba1293e091b6fdba..93ae38e5541de0034dcd9a7eace8079db2ffefa1 100644
--- a/source/dnode/mnode/impl/inc/mndUser.h
+++ b/source/dnode/mnode/impl/inc/mndUser.h
@@ -40,6 +40,8 @@ int32_t mndValidateUserPassInfo(SMnode *pMnode, SUserPassVersion *pUsers, int3
int32_t mndUserRemoveDb(SMnode *pMnode, STrans *pTrans, char *db);
int32_t mndUserRemoveTopic(SMnode *pMnode, STrans *pTrans, char *topic);
+int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew);
+void mndUserFreeObj(SUserObj *pUser);
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c
index a451ae9df5bf6167fc994acf9422c59901c39822..1a6b40191860818b7955fd41c4d3bb4318fe8bcd 100644
--- a/source/dnode/mnode/impl/src/mndDb.c
+++ b/source/dnode/mnode/impl/src/mndDb.c
@@ -446,7 +446,8 @@ static int32_t mndSetCreateDbUndoLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pD
return 0;
}
-static int32_t mndSetCreateDbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups) {
+static int32_t mndSetCreateDbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroups,
+ SUserObj *pUserDuped) {
SSdbRaw *pDbRaw = mndDbActionEncode(pDb);
if (pDbRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pDbRaw) != 0) return -1;
@@ -459,6 +460,13 @@ static int32_t mndSetCreateDbCommitLogs(SMnode *pMnode, STrans *pTrans, SDbObj *
if (sdbSetRawStatus(pVgRaw, SDB_STATUS_READY) != 0) return -1;
}
+ if (pUserDuped) {
+ SSdbRaw *pUserRaw = mndUserActionEncode(pUserDuped);
+ if (pUserRaw == NULL) return -1;
+ if (mndTransAppendCommitlog(pTrans, pUserRaw) != 0) return -1;
+ if (sdbSetRawStatus(pUserRaw, SDB_STATUS_READY) != 0) return -1;
+ }
+
return 0;
}
@@ -565,6 +573,15 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
return -1;
}
+ // add database privileges for user
+ SUserObj newUserObj = {0}, *pNewUserDuped = NULL;
+ if (!pUser->superUser) {
+ if (mndUserDupObj(pUser, &newUserObj) != 0) goto _OVER;
+ taosHashPut(newUserObj.readDbs, dbObj.name, strlen(dbObj.name) + 1, dbObj.name, TSDB_FILENAME_LEN);
+ taosHashPut(newUserObj.writeDbs, dbObj.name, strlen(dbObj.name) + 1, dbObj.name, TSDB_FILENAME_LEN);
+ pNewUserDuped = &newUserObj;
+ }
+
int32_t code = -1;
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB, pReq, "create-db");
if (pTrans == NULL) goto _OVER;
@@ -577,7 +594,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
mndTransSetOper(pTrans, MND_OPER_CREATE_DB);
if (mndSetCreateDbRedoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
- if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
+ if (mndSetCreateDbCommitLogs(pMnode, pTrans, &dbObj, pVgroups, pNewUserDuped) != 0) goto _OVER;
if (mndSetCreateDbRedoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndSetCreateDbUndoActions(pMnode, pTrans, &dbObj, pVgroups) != 0) goto _OVER;
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
@@ -586,6 +603,7 @@ static int32_t mndCreateDb(SMnode *pMnode, SRpcMsg *pReq, SCreateDbReq *pCreate,
_OVER:
taosMemoryFree(pVgroups);
+ mndUserFreeObj(&newUserObj);
mndTransDrop(pTrans);
return code;
}
@@ -929,7 +947,7 @@ static void mndDumpDbCfgInfo(SDbCfgRsp *cfgRsp, SDbObj *pDb) {
cfgRsp->walRetentionSize = pDb->cfg.walRetentionSize;
cfgRsp->walSegmentSize = pDb->cfg.walSegmentSize;
cfgRsp->numOfRetensions = pDb->cfg.numOfRetensions;
- cfgRsp->pRetensions = pDb->cfg.pRetensions;
+ cfgRsp->pRetensions = taosArrayDup(pDb->cfg.pRetensions, NULL);
cfgRsp->schemaless = pDb->cfg.schemaless;
cfgRsp->sstTrigger = pDb->cfg.sstTrigger;
}
@@ -972,6 +990,8 @@ static int32_t mndProcessGetDbCfgReq(SRpcMsg *pReq) {
_OVER:
+ tFreeSDbCfgRsp(&cfgRsp);
+
if (code != 0) {
mError("db:%s, failed to get cfg since %s", cfgReq.db, terrstr());
}
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index cee3b3c61d67c918e3c200cd2a116d3999d62f15..73dbb243a13e89d1f365e63566bea4da7d0f7f85 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -986,13 +986,20 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
}
int32_t numOfVnodes = mndGetVnodesNum(pMnode, pDnode->id);
- if ((numOfVnodes > 0 || pMObj != NULL || pSObj != NULL || pQObj != NULL) && !force) {
- if (!mndIsDnodeOnline(pDnode, taosGetTimestampMs())) {
- terrno = TSDB_CODE_DNODE_OFFLINE;
- mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
- numOfVnodes, pMObj != NULL, pQObj != NULL, pSObj != NULL);
- goto _OVER;
- }
+ bool isonline = mndIsDnodeOnline(pDnode, taosGetTimestampMs());
+
+ if (isonline && force) {
+ terrno = TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE;
+ mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
+ numOfVnodes, pMObj != NULL, pQObj != NULL, pSObj != NULL);
+ goto _OVER;
+ }
+
+ if (!isonline && !force) {
+ terrno = TSDB_CODE_DNODE_OFFLINE;
+ mError("dnode:%d, failed to drop since %s, vnodes:%d mnode:%d qnode:%d snode:%d", pDnode->id, terrstr(),
+ numOfVnodes, pMObj != NULL, pQObj != NULL, pSObj != NULL);
+ goto _OVER;
}
code = mndDropDnode(pMnode, pReq, pDnode, pMObj, pQObj, pSObj, numOfVnodes, force, dropReq.unsafe);
diff --git a/source/dnode/mnode/impl/src/mndMnode.c b/source/dnode/mnode/impl/src/mndMnode.c
index 19c3d59167fd86cc98f1a66159ee97ede21313f8..d0b10a57686ff626b7765b6191cd40ee3a4da92b 100644
--- a/source/dnode/mnode/impl/src/mndMnode.c
+++ b/source/dnode/mnode/impl/src/mndMnode.c
@@ -695,7 +695,7 @@ static int32_t mndSetDropMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnode
if (totalMnodes == 2) {
if (force) {
mError("cant't force drop dnode, since a mnode on it and replica is 2");
- terrno = TSDB_CODE_DNODE_OFFLINE;
+ terrno = TSDB_CODE_MNODE_ONLY_TWO_MNODE;
return -1;
}
mInfo("vgId:1, has %d mnodes, exec redo log first", totalMnodes);
diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c
index 01dd223b5f3027808c05f41000d1dfcd6a7b6f3b..a1d815189c335215a3a812a50dfb5c6d96d64a84 100644
--- a/source/dnode/mnode/impl/src/mndProfile.c
+++ b/source/dnode/mnode/impl/src/mndProfile.c
@@ -245,7 +245,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
goto _OVER;
}
- if (strncmp(connReq.passwd, pUser->pass, TSDB_PASSWORD_LEN - 1) != 0) {
+ if (strncmp(connReq.passwd, pUser->pass, TSDB_PASSWORD_LEN - 1) != 0 && !tsMndSkipGrant) {
mGError("user:%s, failed to login from %s since invalid pass, input:%s", pReq->info.conn.user, ip, connReq.passwd);
code = TSDB_CODE_MND_AUTH_FAILURE;
goto _OVER;
diff --git a/source/dnode/mnode/impl/src/mndQuery.c b/source/dnode/mnode/impl/src/mndQuery.c
index 5278fc776107c31e43375b16b708c0c8ad34901d..8e95fa3d6dd26ec9f6b99afe6dc2f49ce8fea389 100644
--- a/source/dnode/mnode/impl/src/mndQuery.c
+++ b/source/dnode/mnode/impl/src/mndQuery.c
@@ -33,6 +33,7 @@ void mndPostProcessQueryMsg(SRpcMsg *pMsg) {
int32_t mndProcessQueryMsg(SRpcMsg *pMsg) {
int32_t code = -1;
SMnode *pMnode = pMsg->info.node;
+
SReadHandle handle = {.mnd = pMnode, .pMsgCb = &pMnode->msgCb};
mTrace("msg:%p, in query queue is processing", pMsg);
diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c
index 734f624be0e19c942c10244f28263570d6ea4504..64082536daa45d8be6f06b05535b48c9f404a0a4 100644
--- a/source/dnode/mnode/impl/src/mndScheduler.c
+++ b/source/dnode/mnode/impl/src/mndScheduler.c
@@ -14,18 +14,8 @@
*/
#include "mndScheduler.h"
-#include "mndConsumer.h"
#include "mndDb.h"
-#include "mndDnode.h"
-#include "mndMnode.h"
-#include "mndShow.h"
#include "mndSnode.h"
-#include "mndStb.h"
-#include "mndStream.h"
-#include "mndSubscribe.h"
-#include "mndTopic.h"
-#include "mndTrans.h"
-#include "mndUser.h"
#include "mndVgroup.h"
#include "parser.h"
#include "tcompare.h"
@@ -34,12 +24,8 @@
extern bool tsDeployOnSnode;
-static int32_t mndAddTaskToTaskSet(SArray* pArray, SStreamTask* pTask) {
- int32_t childId = taosArrayGetSize(pArray);
- pTask->selfChildId = childId;
- taosArrayPush(pArray, &pTask);
- return 0;
-}
+static int32_t mndAddSinkTaskToStream(SStreamObj* pStream, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup);
+static void setFixedDownstreamEpInfo(SStreamTask* pDstTask, const SStreamTask* pTask);
int32_t mndConvertRsmaTask(char** pDst, int32_t* pDstLen, const char* ast, int64_t uid, int8_t triggerType,
int64_t watermark, int64_t deleteMark) {
@@ -97,7 +83,7 @@ END:
return terrno;
}
-int32_t mndAddSinkToTask(SMnode* pMnode, SStreamObj* pStream, SStreamTask* pTask) {
+int32_t mndSetSinkTaskInfo(SStreamObj* pStream, SStreamTask* pTask) {
if (pStream->smaId != 0) {
pTask->outputType = TASK_OUTPUT__SMA;
pTask->smaSink.smaId = pStream->smaId;
@@ -106,16 +92,23 @@ int32_t mndAddSinkToTask(SMnode* pMnode, SStreamObj* pStream, SStreamTask* pTask
pTask->tbSink.stbUid = pStream->targetStbUid;
memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
+ if (pTask->tbSink.pSchemaWrapper == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
}
+
return 0;
}
-int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStreamTask* pTask) {
+#define SINK_NODE_LEVEL (0)
+
+int32_t mndAddDispatcherForInnerTask(SMnode* pMnode, SStreamObj* pStream, SStreamTask* pTask) {
bool isShuffle = false;
if (pStream->fixedSinkVgId == 0) {
SDbObj* pDb = mndAcquireDb(pMnode, pStream->targetDb);
if (pDb != NULL && pDb->cfg.numOfVgroups > 1) {
+
isShuffle = true;
pTask->outputType = TASK_OUTPUT__SHUFFLE_DISPATCH;
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
@@ -127,47 +120,46 @@ int32_t mndAddDispatcherToInnerTask(SMnode* pMnode, SStreamObj* pStream, SStream
sdbRelease(pMnode->pSdb, pDb);
}
+ SArray* pSinkNodeList = taosArrayGetP(pStream->tasks, SINK_NODE_LEVEL);
+ int32_t numOfSinkNodes = taosArrayGetSize(pSinkNodeList);
+
if (isShuffle) {
memcpy(pTask->shuffleDispatcher.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
SArray* pVgs = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
- int32_t sz = taosArrayGetSize(pVgs);
- SArray* sinkLv = taosArrayGetP(pStream->tasks, 0);
- int32_t sinkLvSize = taosArrayGetSize(sinkLv);
- for (int32_t i = 0; i < sz; i++) {
+
+ int32_t numOfVgroups = taosArrayGetSize(pVgs);
+ for (int32_t i = 0; i < numOfVgroups; i++) {
SVgroupInfo* pVgInfo = taosArrayGet(pVgs, i);
- for (int32_t j = 0; j < sinkLvSize; j++) {
- SStreamTask* pLastLevelTask = taosArrayGetP(sinkLv, j);
- if (pLastLevelTask->nodeId == pVgInfo->vgId) {
- pVgInfo->taskId = pLastLevelTask->id.taskId;
+
+ for (int32_t j = 0; j < numOfSinkNodes; j++) {
+ SStreamTask* pSinkTask = taosArrayGetP(pSinkNodeList, j);
+ if (pSinkTask->nodeId == pVgInfo->vgId) {
+ pVgInfo->taskId = pSinkTask->id.taskId;
break;
}
}
}
} else {
- pTask->outputType = TASK_OUTPUT__FIXED_DISPATCH;
- pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
- SArray* pArray = taosArrayGetP(pStream->tasks, 0);
- // one sink only
- SStreamTask* lastLevelTask = taosArrayGetP(pArray, 0);
- pTask->fixedEpDispatcher.taskId = lastLevelTask->id.taskId;
- pTask->fixedEpDispatcher.nodeId = lastLevelTask->nodeId;
- pTask->fixedEpDispatcher.epSet = lastLevelTask->epSet;
+ SStreamTask* pOneSinkTask = taosArrayGetP(pSinkNodeList, 0);
+ setFixedDownstreamEpInfo(pTask, pOneSinkTask);
}
+
return 0;
}
-int32_t mndAssignTaskToVg(SMnode* pMnode, SStreamTask* pTask, SSubplan* plan, const SVgObj* pVgroup) {
+int32_t mndAssignStreamTaskToVgroup(SMnode* pMnode, SStreamTask* pTask, SSubplan* plan, const SVgObj* pVgroup) {
int32_t msgLen;
+
pTask->nodeId = pVgroup->vgId;
pTask->epSet = mndGetVgroupEpset(pMnode, pVgroup);
- plan->execNode.nodeId = pVgroup->vgId;
+ plan->execNode.nodeId = pTask->nodeId;
plan->execNode.epSet = pTask->epSet;
-
if (qSubPlanToString(plan, &pTask->exec.qmsg, &msgLen) < 0) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
+
return 0;
}
@@ -210,100 +202,121 @@ SVgObj* mndSchedFetchOneVg(SMnode* pMnode, int64_t dbUid) {
return pVgroup;
}
+// create sink node for each vgroup.
int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SStreamObj* pStream) {
SSdb* pSdb = pMnode->pSdb;
void* pIter = NULL;
- SArray* tasks = taosArrayGetP(pStream->tasks, 0);
while (1) {
SVgObj* pVgroup = NULL;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
- if (pIter == NULL) break;
- if (!mndVgroupInDb(pVgroup, pStream->targetDbUid)) {
- sdbRelease(pSdb, pVgroup);
- continue;
+ if (pIter == NULL) {
+ break;
}
- SStreamTask* pTask = tNewStreamTask(pStream->uid);
- if (pTask == NULL) {
+ if (!mndVgroupInDb(pVgroup, pStream->targetDbUid)) {
sdbRelease(pSdb, pVgroup);
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
+ continue;
}
- pTask->fillHistory = pStream->fillHistory;
- mndAddTaskToTaskSet(tasks, pTask);
-
- pTask->nodeId = pVgroup->vgId;
- pTask->epSet = mndGetVgroupEpset(pMnode, pVgroup);
- // type
- pTask->taskLevel = TASK_LEVEL__SINK;
-
- // sink
- if (pStream->smaId != 0) {
- pTask->outputType = TASK_OUTPUT__SMA;
- pTask->smaSink.smaId = pStream->smaId;
- } else {
- pTask->outputType = TASK_OUTPUT__TABLE;
- pTask->tbSink.stbUid = pStream->targetStbUid;
- memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
- pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
- if (pTask->tbSink.pSchemaWrapper == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
- }
+ mndAddSinkTaskToStream(pStream, pMnode, pVgroup->vgId, pVgroup);
sdbRelease(pSdb, pVgroup);
}
+
return 0;
}
-int32_t mndAddFixedSinkTaskToStream(SMnode* pMnode, SStreamObj* pStream) {
- SArray* tasks = taosArrayGetP(pStream->tasks, 0);
- SStreamTask* pTask = tNewStreamTask(pStream->uid);
+int32_t mndAddSinkTaskToStream(SStreamObj* pStream, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup) {
+ SArray* pTaskList = taosArrayGetP(pStream->tasks, SINK_NODE_LEVEL);
+
+ SStreamTask* pTask = tNewStreamTask(pStream->uid, TASK_LEVEL__SINK, pStream->fillHistory, 0, pTaskList);
if (pTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
- pTask->fillHistory = pStream->fillHistory;
- mndAddTaskToTaskSet(tasks, pTask);
- pTask->nodeId = pStream->fixedSinkVgId;
-#if 0
- SVgObj* pVgroup = mndAcquireVgroup(pMnode, pStream->fixedSinkVgId);
- if (pVgroup == NULL) {
- return -1;
- }
+ pTask->nodeId = vgId;
pTask->epSet = mndGetVgroupEpset(pMnode, pVgroup);
-#endif
- pTask->epSet = mndGetVgroupEpset(pMnode, &pStream->fixedSinkVg);
+ mndSetSinkTaskInfo(pStream, pTask);
+ return 0;
+}
- pTask->taskLevel = TASK_LEVEL__SINK;
+static int32_t mndScheduleFillHistoryStreamTask(SMnode* pMnode, SStreamObj* pStream) {
+ return 0;
+}
- // sink
- if (pStream->smaId != 0) {
- pTask->outputType = TASK_OUTPUT__SMA;
- pTask->smaSink.smaId = pStream->smaId;
+static int32_t addSourceStreamTask(SMnode* pMnode, SVgObj* pVgroup, SArray* pTaskList, SStreamObj* pStream,
+ SSubplan* plan, uint64_t uid, int8_t taskLevel, int8_t fillHistory,
+ bool hasExtraSink) {
+ SStreamTask* pTask = tNewStreamTask(uid, taskLevel, fillHistory, pStream->triggerParam, pTaskList);
+ if (pTask == NULL) {
+ return terrno;
+ }
+
+ // sink or dispatch
+ if (hasExtraSink) {
+ mndAddDispatcherForInnerTask(pMnode, pStream, pTask);
} else {
- pTask->outputType = TASK_OUTPUT__TABLE;
- pTask->tbSink.stbUid = pStream->targetStbUid;
- memcpy(pTask->tbSink.stbFullName, pStream->targetSTbName, TSDB_TABLE_FNAME_LEN);
- pTask->tbSink.pSchemaWrapper = tCloneSSchemaWrapper(&pStream->outputSchema);
+ mndSetSinkTaskInfo(pStream, pTask);
}
- return 0;
+ if (mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup) < 0) {
+ return terrno;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static SStreamChildEpInfo* createStreamTaskEpInfo(SStreamTask* pTask) {
+ SStreamChildEpInfo* pEpInfo = taosMemoryMalloc(sizeof(SStreamChildEpInfo));
+ if (pEpInfo == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
+ return NULL;
+ }
+
+ pEpInfo->childId = pTask->selfChildId;
+ pEpInfo->epSet = pTask->epSet;
+ pEpInfo->nodeId = pTask->nodeId;
+ pEpInfo->taskId = pTask->id.taskId;
+
+ return pEpInfo;
+}
+
+void setFixedDownstreamEpInfo(SStreamTask* pDstTask, const SStreamTask* pTask) {
+ STaskDispatcherFixedEp* pDispatcher = &pDstTask->fixedEpDispatcher;
+ pDispatcher->taskId = pTask->id.taskId;
+ pDispatcher->nodeId = pTask->nodeId;
+ pDispatcher->epSet = pTask->epSet;
+
+ pDstTask->outputType = TASK_OUTPUT__FIXED_DISPATCH;
+ pDstTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
+}
+
+int32_t appendToUpstream(SStreamTask* pTask, SStreamTask* pUpstream) {
+ SStreamChildEpInfo* pEpInfo = createStreamTaskEpInfo(pTask);
+ if (pEpInfo == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ if(pUpstream->childEpInfo == NULL) {
+ pUpstream->childEpInfo = taosArrayInit(4, POINTER_BYTES);
+ }
+
+ taosArrayPush(pUpstream->childEpInfo, &pEpInfo);
+ return TSDB_CODE_SUCCESS;
}
int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
- SSdb* pSdb = pMnode->pSdb;
+ SSdb* pSdb = pMnode->pSdb;
+
SQueryPlan* pPlan = qStringToQueryPlan(pStream->physicalPlan);
if (pPlan == NULL) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
- int32_t planTotLevel = LIST_LENGTH(pPlan->pSubplans);
- pStream->tasks = taosArrayInit(planTotLevel, sizeof(void*));
+ int32_t planTotLevel = LIST_LENGTH(pPlan->pSubplans);
+ pStream->tasks = taosArrayInit(planTotLevel, POINTER_BYTES);
bool hasExtraSink = false;
bool externalTargetDB = strcmp(pStream->sourceDb, pStream->targetDb) != 0;
@@ -313,13 +326,13 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
return -1;
}
- bool multiTarget = pDbObj->cfg.numOfVgroups > 1;
+ bool multiTarget = (pDbObj->cfg.numOfVgroups > 1);
sdbRelease(pSdb, pDbObj);
if (planTotLevel == 2 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) {
- /*if (true) {*/
- SArray* taskOneLevel = taosArrayInit(0, sizeof(void*));
+ SArray* taskOneLevel = taosArrayInit(0, POINTER_BYTES);
taosArrayPush(pStream->tasks, &taskOneLevel);
+
// add extra sink
hasExtraSink = true;
if (pStream->fixedSinkVgId == 0) {
@@ -328,19 +341,20 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
return -1;
}
} else {
- if (mndAddFixedSinkTaskToStream(pMnode, pStream) < 0) {
+ if (mndAddSinkTaskToStream(pStream, pMnode, pStream->fixedSinkVgId, &pStream->fixedSinkVg) < 0) {
// TODO free
return -1;
}
}
}
+
pStream->totalLevel = planTotLevel + hasExtraSink;
if (planTotLevel > 1) {
SStreamTask* pInnerTask;
// inner level
{
- SArray* taskInnerLevel = taosArrayInit(0, sizeof(void*));
+ SArray* taskInnerLevel = taosArrayInit(0, POINTER_BYTES);
taosArrayPush(pStream->tasks, &taskInnerLevel);
SNodeListNode* inner = (SNodeListNode*)nodesListGetNode(pPlan->pSubplans, 0);
@@ -350,25 +364,15 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
return -1;
}
- pInnerTask = tNewStreamTask(pStream->uid);
+ pInnerTask = tNewStreamTask(pStream->uid, TASK_LEVEL__AGG, pStream->fillHistory, pStream->triggerParam, taskInnerLevel);
if (pInnerTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
qDestroyQueryPlan(pPlan);
return -1;
}
- pInnerTask->fillHistory = pStream->fillHistory;
- mndAddTaskToTaskSet(taskInnerLevel, pInnerTask);
-
- pInnerTask->childEpInfo = taosArrayInit(0, sizeof(void*));
-
- pInnerTask->taskLevel = TASK_LEVEL__AGG;
-
- // trigger
- pInnerTask->triggerParam = pStream->triggerParam;
-
// dispatch
- if (mndAddDispatcherToInnerTask(pMnode, pStream, pInnerTask) < 0) {
+ if (mndAddDispatcherForInnerTask(pMnode, pStream, pInnerTask) < 0) {
qDestroyQueryPlan(pPlan);
return -1;
}
@@ -377,7 +381,7 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
SSnodeObj* pSnode = mndSchedFetchOneSnode(pMnode);
if (pSnode == NULL) {
SVgObj* pVgroup = mndSchedFetchOneVg(pMnode, pStream->sourceDbUid);
- if (mndAssignTaskToVg(pMnode, pInnerTask, plan, pVgroup) < 0) {
+ if (mndAssignStreamTaskToVgroup(pMnode, pInnerTask, plan, pVgroup) < 0) {
sdbRelease(pSdb, pVgroup);
qDestroyQueryPlan(pPlan);
return -1;
@@ -392,17 +396,18 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
}
} else {
SVgObj* pVgroup = mndSchedFetchOneVg(pMnode, pStream->sourceDbUid);
- if (mndAssignTaskToVg(pMnode, pInnerTask, plan, pVgroup) < 0) {
+ if (mndAssignStreamTaskToVgroup(pMnode, pInnerTask, plan, pVgroup) < 0) {
sdbRelease(pSdb, pVgroup);
qDestroyQueryPlan(pPlan);
return -1;
}
+
sdbRelease(pSdb, pVgroup);
}
}
// source level
- SArray* taskSourceLevel = taosArrayInit(0, sizeof(void*));
+ SArray* taskSourceLevel = taosArrayInit(0, POINTER_BYTES);
taosArrayPush(pStream->tasks, &taskSourceLevel);
SNodeListNode* inner = (SNodeListNode*)nodesListGetNode(pPlan->pSubplans, 1);
@@ -416,66 +421,52 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
while (1) {
SVgObj* pVgroup;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
- if (pIter == NULL) break;
+ if (pIter == NULL) {
+ break;
+ }
+
if (!mndVgroupInDb(pVgroup, pStream->sourceDbUid)) {
sdbRelease(pSdb, pVgroup);
continue;
}
- SStreamTask* pTask = tNewStreamTask(pStream->uid);
+ SStreamTask* pTask = tNewStreamTask(pStream->uid, TASK_LEVEL__SOURCE, pStream->fillHistory, 0, taskSourceLevel);
if (pTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
sdbRelease(pSdb, pVgroup);
qDestroyQueryPlan(pPlan);
return -1;
}
- pTask->fillHistory = pStream->fillHistory;
- mndAddTaskToTaskSet(taskSourceLevel, pTask);
- pTask->triggerParam = 0;
+ // all the source tasks dispatch result to a single agg node.
+ setFixedDownstreamEpInfo(pTask, pInnerTask);
- // source
- pTask->taskLevel = TASK_LEVEL__SOURCE;
-
- // add fixed vg dispatch
- pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
- pTask->outputType = TASK_OUTPUT__FIXED_DISPATCH;
-
- pTask->fixedEpDispatcher.taskId = pInnerTask->id.taskId;
- pTask->fixedEpDispatcher.nodeId = pInnerTask->nodeId;
- pTask->fixedEpDispatcher.epSet = pInnerTask->epSet;
-
- if (mndAssignTaskToVg(pMnode, pTask, plan, pVgroup) < 0) {
+ if (mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup) < 0) {
sdbRelease(pSdb, pVgroup);
qDestroyQueryPlan(pPlan);
return -1;
}
- SStreamChildEpInfo* pEpInfo = taosMemoryMalloc(sizeof(SStreamChildEpInfo));
- if (pEpInfo == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- sdbRelease(pSdb, pVgroup);
+ int32_t code = appendToUpstream(pTask, pInnerTask);
+ sdbRelease(pSdb, pVgroup);
+
+ if (code != TSDB_CODE_SUCCESS) {
+ terrno = code;
qDestroyQueryPlan(pPlan);
return -1;
}
- pEpInfo->childId = pTask->selfChildId;
- pEpInfo->epSet = pTask->epSet;
- pEpInfo->nodeId = pTask->nodeId;
- pEpInfo->taskId = pTask->id.taskId;
- taosArrayPush(pInnerTask->childEpInfo, &pEpInfo);
- sdbRelease(pSdb, pVgroup);
}
- }
-
- if (planTotLevel == 1) {
- SArray* taskOneLevel = taosArrayInit(0, sizeof(void*));
- taosArrayPush(pStream->tasks, &taskOneLevel);
+ } else if (planTotLevel == 1) {
+ // create exec stream task, since only one level, the exec task is also the source task
+ SArray* pTaskList = taosArrayInit(0, POINTER_BYTES);
+ taosArrayPush(pStream->tasks, &pTaskList);
SNodeListNode* inner = (SNodeListNode*)nodesListGetNode(pPlan->pSubplans, 0);
if (LIST_LENGTH(inner->pNodeList) != 1) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
+
SSubplan* plan = (SSubplan*)nodesListGetNode(inner->pNodeList, 0);
if (plan->subplanType != SUBPLAN_TYPE_SCAN) {
terrno = TSDB_CODE_QRY_INVALID_INPUT;
@@ -486,42 +477,26 @@ int32_t mndScheduleStream(SMnode* pMnode, SStreamObj* pStream) {
while (1) {
SVgObj* pVgroup;
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void**)&pVgroup);
- if (pIter == NULL) break;
- if (!mndVgroupInDb(pVgroup, pStream->sourceDbUid)) {
- sdbRelease(pSdb, pVgroup);
- continue;
+ if (pIter == NULL) {
+ break;
}
- SStreamTask* pTask = tNewStreamTask(pStream->uid);
- if (pTask == NULL) {
+ if (!mndVgroupInDb(pVgroup, pStream->sourceDbUid)) {
sdbRelease(pSdb, pVgroup);
- qDestroyQueryPlan(pPlan);
- return -1;
+ continue;
}
- pTask->fillHistory = pStream->fillHistory;
- mndAddTaskToTaskSet(taskOneLevel, pTask);
- // source
- pTask->taskLevel = TASK_LEVEL__SOURCE;
-
- // trigger
- pTask->triggerParam = pStream->triggerParam;
-
- // sink or dispatch
- if (hasExtraSink) {
- mndAddDispatcherToInnerTask(pMnode, pStream, pTask);
- } else {
- mndAddSinkToTask(pMnode, pStream, pTask);
- }
+ // new stream task
+ int32_t code = addSourceStreamTask(pMnode, pVgroup, pTaskList, pStream, plan, pStream->uid, TASK_LEVEL__SOURCE, pStream->fillHistory, hasExtraSink);
+ sdbRelease(pSdb, pVgroup);
- if (mndAssignTaskToVg(pMnode, pTask, plan, pVgroup) < 0) {
- sdbRelease(pSdb, pVgroup);
+ if (code != TSDB_CODE_SUCCESS) {
qDestroyQueryPlan(pPlan);
return -1;
}
- sdbRelease(pSdb, pVgroup);
}
}
+
qDestroyQueryPlan(pPlan);
return 0;
}
@@ -538,7 +513,23 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
terrno = TSDB_CODE_QRY_INVALID_INPUT;
return -1;
}
+ }else if(pTopic->subType == TOPIC_SUB_TYPE__TABLE && pTopic->ast != NULL){
+ SNode *pAst = NULL;
+ if (nodesStringToNode(pTopic->ast, &pAst) != 0) {
+ mError("topic:%s, failed to create since %s", pTopic->name, terrstr());
+ return -1;
+ }
+
+ SPlanContext cxt = {.pAstRoot = pAst, .topicQuery = true};
+ if (qCreateQueryPlan(&cxt, &pPlan, NULL) != 0) {
+ mError("failed to create topic:%s since %s", pTopic->name, terrstr());
+ nodesDestroyNode(pAst);
+ return -1;
+ }
+ nodesDestroyNode(pAst);
+ }
+ if(pPlan){
int32_t levelNum = LIST_LENGTH(pPlan->pSubplans);
if (levelNum != 1) {
qDestroyQueryPlan(pPlan);
@@ -579,7 +570,7 @@ int32_t mndSchedInitSubEp(SMnode* pMnode, const SMqTopicObj* pTopic, SMqSubscrib
mDebug("init subscription %s for topic:%s assign vgId:%d", pSub->key, pTopic->name, pVgEp->vgId);
- if (pTopic->subType == TOPIC_SUB_TYPE__COLUMN) {
+ if (pSubplan) {
int32_t msgLen;
pSubplan->execNode.epSet = pVgEp->epSet;
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 03616aaace20eaeb0bdda8162a72ab23b28841ec..d58b9fd4bf16ba9f4a632f4266a453b34c9e8866 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -1230,7 +1230,7 @@ static int32_t mndCheckAlterColForTopic(SMnode *pMnode, const char *stbFullName,
mInfo("topic:%s, check tag and column modifiable, stb:%s suid:%" PRId64 " colId:%d, subType:%d sql:%s",
pTopic->name, stbFullName, suid, colId, pTopic->subType, pTopic->sql);
- if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
+ if (pTopic->ast == NULL) {
sdbRelease(pSdb, pTopic);
continue;
}
@@ -2272,7 +2272,7 @@ static int32_t mndCheckDropStbForTopic(SMnode *pMnode, const char *stbFullName,
}
}
- if (pTopic->subType != TOPIC_SUB_TYPE__COLUMN) {
+ if (pTopic->ast == NULL) {
sdbRelease(pSdb, pTopic);
continue;
}
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 0713150b486d953ccb42eb6acd5e907251d268d6..39a1fa223f13c1ca7137172b7f55ee9e09f82817 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -700,6 +700,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
if (pStream->sourceDbUid == streamObj.sourceDbUid) {
++numOfStream;
}
+
sdbRelease(pMnode->pSdb, pStream);
if (numOfStream > MND_STREAM_MAX_NUM) {
mError("too many streams, no more than %d for each database", MND_STREAM_MAX_NUM);
@@ -723,6 +724,7 @@ static int32_t mndProcessCreateStreamReq(SRpcMsg *pReq) {
pDb = NULL;
goto _OVER;
}
+
mndReleaseDb(pMnode, pDb);
STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_DB_INSIDE, pReq, "create-stream");
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index e62102fa77254c37e0d8244ae437a9570f870dfb..74421afa338bdb3f403d78fd9634ebf912d2e501 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -111,7 +111,14 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
req.suid = pSub->stbUid;
tstrncpy(req.subKey, pSub->key, TSDB_SUBSCRIBE_KEY_LEN);
- int32_t tlen = sizeof(SMsgHead) + tEncodeSMqRebVgReq(NULL, &req);
+ int32_t tlen = 0;
+ int32_t ret = 0;
+ tEncodeSize(tEncodeSMqRebVgReq, &req, tlen, ret);
+ if (ret < 0) {
+ return -1;
+ }
+
+ tlen += sizeof(SMsgHead);
void *buf = taosMemoryMalloc(tlen);
if (buf == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -123,8 +130,14 @@ static int32_t mndBuildSubChangeReq(void **pBuf, int32_t *pLen, const SMqSubscri
pMsgHead->contLen = htonl(tlen);
pMsgHead->vgId = htonl(pRebVg->pVgEp->vgId);
- void *abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
- tEncodeSMqRebVgReq(&abuf, &req);
+ SEncoder encoder = {0};
+ tEncoderInit(&encoder, POINTER_SHIFT(buf, sizeof(SMsgHead)), tlen);
+ if (tEncodeSMqRebVgReq(&encoder, &req) < 0) {
+ taosMemoryFreeClear(buf);
+ tEncoderClear(&encoder);
+ return -1;
+ }
+ tEncoderClear(&encoder);
*pBuf = buf;
*pLen = tlen;
diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c
index 95524c03230f38c90054c3ab1981addecb9698dc..f1ee7bca3ba5c960adc66154e4b25aca6febbfc2 100644
--- a/source/dnode/mnode/impl/src/mndTopic.c
+++ b/source/dnode/mnode/impl/src/mndTopic.c
@@ -28,7 +28,7 @@
#include "parser.h"
#include "tname.h"
-#define MND_TOPIC_VER_NUMBER 2
+#define MND_TOPIC_VER_NUMBER 3
#define MND_TOPIC_RESERVE_SIZE 64
SSdbRaw *mndTopicActionEncode(SMqTopicObj *pTopic);
@@ -170,7 +170,7 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
int8_t sver = 0;
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto TOPIC_DECODE_OVER;
- if (sver != 1 && sver != 2) {
+ if (sver < 1 || sver > MND_TOPIC_VER_NUMBER) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto TOPIC_DECODE_OVER;
}
@@ -197,7 +197,9 @@ SSdbRow *mndTopicActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT8(pRaw, dataPos, &pTopic->withMeta, TOPIC_DECODE_OVER);
SDB_GET_INT64(pRaw, dataPos, &pTopic->stbUid, TOPIC_DECODE_OVER);
- SDB_GET_BINARY(pRaw, dataPos, pTopic->stbName, TSDB_TABLE_FNAME_LEN, TOPIC_DECODE_OVER);
+ if (sver >= 3) {
+ SDB_GET_BINARY(pRaw, dataPos, pTopic->stbName, TSDB_TABLE_FNAME_LEN, TOPIC_DECODE_OVER);
+ }
SDB_GET_INT32(pRaw, dataPos, &pTopic->sqlLen, TOPIC_DECODE_OVER);
pTopic->sql = taosMemoryCalloc(pTopic->sqlLen, sizeof(char));
if (pTopic->sql == NULL) {
@@ -422,6 +424,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
mError("failed to create topic:%s since %s", pCreate->name, terrstr());
taosMemoryFree(topicObj.ast);
taosMemoryFree(topicObj.sql);
+ nodesDestroyNode(pAst);
return -1;
}
@@ -429,6 +432,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
if (topicObj.ntbColIds == NULL) {
taosMemoryFree(topicObj.ast);
taosMemoryFree(topicObj.sql);
+ nodesDestroyNode(pAst);
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
@@ -444,6 +448,7 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
mError("topic:%s, failed to create since %s", pCreate->name, terrstr());
taosMemoryFree(topicObj.ast);
taosMemoryFree(topicObj.sql);
+ nodesDestroyNode(pAst);
return -1;
}
@@ -465,6 +470,11 @@ static int32_t mndCreateTopic(SMnode *pMnode, SRpcMsg *pReq, SCMCreateTopicReq *
strcpy(topicObj.stbName, pCreate->subStbName);
topicObj.stbUid = pStb->uid;
mndReleaseStb(pMnode, pStb);
+ if(pCreate->ast != NULL){
+ qDebugL("topic:%s ast %s", topicObj.name, pCreate->ast);
+ topicObj.ast = taosStrdup(pCreate->ast);
+ topicObj.astLen = strlen(pCreate->ast) + 1;
+ }
}
/*} else if (pCreate->subType == TOPIC_SUB_TYPE__DB) {*/
/*topicObj.ast = NULL;*/
@@ -914,13 +924,12 @@ static int32_t mndRetrieveTopic(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBl
}else if(pTopic->subType == TOPIC_SUB_TYPE__TABLE){
SStbObj *pStb = mndAcquireStb(pMnode, pTopic->stbName);
if (pStb == NULL) {
- terrno = TSDB_CODE_MND_STB_NOT_EXIST;
- taosMemoryFree(schemaJson);
- return -1;
+ STR_TO_VARSTR(schemaJson, "NULL");
+ mError("mndRetrieveTopic mndAcquireStb null stbName:%s", pTopic->stbName);
+ }else{
+ schemaToJson(pStb->pColumns, pStb->numOfColumns, schemaJson);
+ mndReleaseStb(pMnode, pStb);
}
- schemaToJson(pStb->pColumns, pStb->numOfColumns, schemaJson);
-
- mndReleaseStb(pMnode, pStb);
}else{
STR_TO_VARSTR(schemaJson, "NULL");
}
diff --git a/source/dnode/mnode/impl/src/mndUser.c b/source/dnode/mnode/impl/src/mndUser.c
index 3da594109a1910fdd81ab654c3023d1cbfc48c9e..90d16a0a811c654f39f16c2d6b556200b718995d 100644
--- a/source/dnode/mnode/impl/src/mndUser.c
+++ b/source/dnode/mnode/impl/src/mndUser.c
@@ -488,7 +488,7 @@ SHashObj *mndDupUseDbHash(SHashObj *pOld) {
return pNew;
}
-static int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) {
+int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) {
memcpy(pNew, pUser, sizeof(SUserObj));
pNew->authVersion++;
pNew->updateTime = taosGetTimestampMs();
@@ -508,7 +508,7 @@ static int32_t mndUserDupObj(SUserObj *pUser, SUserObj *pNew) {
return 0;
}
-static void mndUserFreeObj(SUserObj *pUser) {
+void mndUserFreeObj(SUserObj *pUser) {
taosHashCleanup(pUser->readDbs);
taosHashCleanup(pUser->writeDbs);
taosHashCleanup(pUser->topics);
diff --git a/source/dnode/qnode/CMakeLists.txt b/source/dnode/qnode/CMakeLists.txt
index 5426cd55d3c1183f86e057baf205e199d6307fea..10bbbc1b26e94183da9ceeef6fadb700737fb1c4 100644
--- a/source/dnode/qnode/CMakeLists.txt
+++ b/source/dnode/qnode/CMakeLists.txt
@@ -14,4 +14,7 @@ target_link_libraries(
PRIVATE qworker
PRIVATE qcom
PRIVATE executor
+ PRIVATE tdb
+ PRIVATE wal
+ PRIVATE index
)
\ No newline at end of file
diff --git a/source/dnode/qnode/src/qnode.c b/source/dnode/qnode/src/qnode.c
index 5efc714e95c85b528c24d64fc9642788d06c99ec..348235551271958318ec989056ff6d5b5037e7e7 100644
--- a/source/dnode/qnode/src/qnode.c
+++ b/source/dnode/qnode/src/qnode.c
@@ -14,7 +14,6 @@
*/
#include "executor.h"
-#include "libs/function/function.h"
#include "qndInt.h"
#include "query.h"
#include "qworker.h"
diff --git a/source/dnode/snode/CMakeLists.txt b/source/dnode/snode/CMakeLists.txt
index 6f1e7f9593a369afc0f3626514853873012da75f..ebfe80ecabe006f82bec5ec3c997064789ae69e8 100644
--- a/source/dnode/snode/CMakeLists.txt
+++ b/source/dnode/snode/CMakeLists.txt
@@ -14,4 +14,6 @@ target_link_libraries(
PRIVATE util
PRIVATE qcom
PRIVATE stream
+ PRIVATE wal
+ PRIVATE index
)
diff --git a/source/dnode/snode/inc/sndInt.h b/source/dnode/snode/inc/sndInt.h
index 3fcee862a102acd202be4848af800f16a8ef3b66..68f7f756d50ed190ad2117b68f24408cbc773670 100644
--- a/source/dnode/snode/inc/sndInt.h
+++ b/source/dnode/snode/inc/sndInt.h
@@ -53,6 +53,8 @@ int32_t sndStopTaskOfStream(SStreamMeta* pMeta, int64_t streamId);
int32_t sndResumeTaskOfStream(SStreamMeta* pMeta, int64_t streamId);
#endif
+void initStreamStateAPI(SStorageAPI* pAPI);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c
index e9225f3d6edf1bea49ae7aee163eb4bca536ae9a..678dd34e4a6e12ba3573f4d8103e10bcba754119 100644
--- a/source/dnode/snode/src/snode.c
+++ b/source/dnode/snode/src/snode.c
@@ -62,8 +62,7 @@ FAIL:
}
int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
- ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
- ASSERT(taosArrayGetSize(pTask->childEpInfo) != 0);
+ ASSERT(pTask->taskLevel == TASK_LEVEL__AGG && taosArrayGetSize(pTask->childEpInfo) != 0);
pTask->refCnt = 1;
pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE;
@@ -87,9 +86,10 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) {
}
int32_t numOfChildEp = taosArrayGetSize(pTask->childEpInfo);
- SReadHandle mgHandle = { .vnode = NULL, .numOfVgroups = numOfChildEp, .pStateBackend = pTask->pState };
+ SReadHandle handle = { .vnode = NULL, .numOfVgroups = numOfChildEp, .pStateBackend = pTask->pState };
+ initStreamStateAPI(&handle.api);
- pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, 0);
+ pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, 0);
ASSERT(pTask->exec.pExecutor);
streamSetupTrigger(pTask);
diff --git a/source/dnode/snode/src/snodeInitApi.c b/source/dnode/snode/src/snodeInitApi.c
new file mode 100644
index 0000000000000000000000000000000000000000..f5e924525212c34ffd375454703a91ccc160836f
--- /dev/null
+++ b/source/dnode/snode/src/snodeInitApi.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "storageapi.h"
+#include "tstreamUpdate.h"
+#include "streamState.h"
+
+static void initStateStoreAPI(SStateStore* pStore);
+static void initFunctionStateStore(SFunctionStateStore* pStore);
+
+void initStreamStateAPI(SStorageAPI* pAPI) {
+ initStateStoreAPI(&pAPI->stateStore);
+ initFunctionStateStore(&pAPI->functionStore);
+}
+
+void initStateStoreAPI(SStateStore* pStore) {
+ pStore->streamFileStateInit = streamFileStateInit;
+ pStore->updateInfoDestoryColseWinSBF = updateInfoDestoryColseWinSBF;
+
+ pStore->streamStateGetByPos = streamStateGetByPos;
+
+ pStore->streamStatePutParName = streamStatePutParName;
+ pStore->streamStateGetParName = streamStateGetParName;
+
+ pStore->streamStateAddIfNotExist = streamStateAddIfNotExist;
+ pStore->streamStateReleaseBuf = streamStateReleaseBuf;
+ pStore->streamStateFreeVal = streamStateFreeVal;
+
+ pStore->streamStatePut = streamStatePut;
+ pStore->streamStateGet = streamStateGet;
+ pStore->streamStateCheck = streamStateCheck;
+ pStore->streamStateGetByPos = streamStateGetByPos;
+ pStore->streamStateDel = streamStateDel;
+ pStore->streamStateClear = streamStateClear;
+ pStore->streamStateSaveInfo = streamStateSaveInfo;
+ pStore->streamStateGetInfo = streamStateGetInfo;
+ pStore->streamStateSetNumber = streamStateSetNumber;
+
+ pStore->streamStateFillPut = streamStateFillPut;
+ pStore->streamStateFillGet = streamStateFillGet;
+ pStore->streamStateFillDel = streamStateFillDel;
+
+ pStore->streamStateCurNext = streamStateCurNext;
+ pStore->streamStateCurPrev = streamStateCurPrev;
+
+ pStore->streamStateGetAndCheckCur = streamStateGetAndCheckCur;
+ pStore->streamStateSeekKeyNext = streamStateSeekKeyNext;
+ pStore->streamStateFillSeekKeyNext = streamStateFillSeekKeyNext;
+ pStore->streamStateFillSeekKeyPrev = streamStateFillSeekKeyPrev;
+ pStore->streamStateFreeCur = streamStateFreeCur;
+
+ pStore->streamStateGetGroupKVByCur = streamStateGetGroupKVByCur;
+ pStore->streamStateGetKVByCur = streamStateGetKVByCur;
+
+ pStore->streamStateSessionAddIfNotExist = streamStateSessionAddIfNotExist;
+ pStore->streamStateSessionPut = streamStateSessionPut;
+ pStore->streamStateSessionGet = streamStateSessionGet;
+ pStore->streamStateSessionDel = streamStateSessionDel;
+ pStore->streamStateSessionClear = streamStateSessionClear;
+ pStore->streamStateSessionGetKVByCur = streamStateSessionGetKVByCur;
+ pStore->streamStateStateAddIfNotExist = streamStateStateAddIfNotExist;
+ pStore->streamStateSessionGetKeyByRange = streamStateSessionGetKeyByRange;
+
+ pStore->updateInfoInit = updateInfoInit;
+ pStore->updateInfoFillBlockData = updateInfoFillBlockData;
+ pStore->updateInfoIsUpdated = updateInfoIsUpdated;
+ pStore->updateInfoIsTableInserted = updateInfoIsTableInserted;
+ pStore->updateInfoDestroy = updateInfoDestroy;
+
+ pStore->updateInfoInitP = updateInfoInitP;
+ pStore->updateInfoAddCloseWindowSBF = updateInfoAddCloseWindowSBF;
+ pStore->updateInfoDestoryColseWinSBF = updateInfoDestoryColseWinSBF;
+ pStore->updateInfoSerialize = updateInfoSerialize;
+ pStore->updateInfoDeserialize = updateInfoDeserialize;
+
+ pStore->streamStateSessionSeekKeyNext = streamStateSessionSeekKeyNext;
+ pStore->streamStateSessionSeekKeyCurrentPrev = streamStateSessionSeekKeyCurrentPrev;
+ pStore->streamStateSessionSeekKeyCurrentNext = streamStateSessionSeekKeyCurrentNext;
+
+ pStore->streamFileStateInit = streamFileStateInit;
+
+ pStore->streamFileStateDestroy = streamFileStateDestroy;
+ pStore->streamFileStateClear = streamFileStateClear;
+ pStore->needClearDiskBuff = needClearDiskBuff;
+
+ pStore->streamStateOpen = streamStateOpen;
+ pStore->streamStateClose = streamStateClose;
+ pStore->streamStateBegin = streamStateBegin;
+ pStore->streamStateCommit = streamStateCommit;
+ pStore->streamStateDestroy= streamStateDestroy;
+ pStore->streamStateDeleteCheckPoint = streamStateDeleteCheckPoint;
+}
+
+void initFunctionStateStore(SFunctionStateStore* pStore) {
+ pStore->streamStateFuncPut = streamStateFuncPut;
+ pStore->streamStateFuncGet = streamStateFuncGet;
+}
\ No newline at end of file
diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt
index e8660cd6adff1283f3852abf52aa3e7769250dc5..b18cb8e282e7b724965c9268b9140c694d3c30d4 100644
--- a/source/dnode/vnode/CMakeLists.txt
+++ b/source/dnode/vnode/CMakeLists.txt
@@ -15,6 +15,7 @@ target_sources(
"src/vnd/vnodeSync.c"
"src/vnd/vnodeSnapshot.c"
"src/vnd/vnodeRetention.c"
+ "src/vnd/vnodeInitApi.c"
# meta
"src/meta/metaOpen.c"
@@ -86,6 +87,28 @@ target_include_directories(
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
+IF (TD_LINUX)
+target_link_libraries(
+ vnode
+ PUBLIC os
+ PUBLIC util
+ PUBLIC common
+ PUBLIC tfs
+ PUBLIC wal
+ PUBLIC qworker
+ PUBLIC sync
+ PUBLIC executor
+ PUBLIC scheduler
+ PUBLIC tdb
+
+ # PUBLIC bdb
+ # PUBLIC scalar
+ PUBLIC rocksdb-shared
+ PUBLIC transport
+ PUBLIC stream
+ PUBLIC index
+)
+ELSE()
target_link_libraries(
vnode
PUBLIC os
@@ -106,6 +129,7 @@ target_link_libraries(
PUBLIC stream
PUBLIC index
)
+ENDIF()
IF (TD_GRANT)
TARGET_LINK_LIBRARIES(vnode PUBLIC grant)
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 18fa893fa438348343ed40967334c25f29f4ecad..7e19425d56492018577aa733920906874931ebfe 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -26,6 +26,7 @@
#include "tfs.h"
#include "wal.h"
+#include "filter.h"
#include "tcommon.h"
#include "tfs.h"
#include "tgrant.h"
@@ -33,6 +34,7 @@
#include "trow.h"
#include "tdb.h"
+#include "storageapi.h"
#ifdef __cplusplus
extern "C" {
@@ -65,17 +67,18 @@ int32_t vnodeStart(SVnode *pVnode);
void vnodeStop(SVnode *pVnode);
int64_t vnodeGetSyncHandle(SVnode *pVnode);
void vnodeGetSnapshot(SVnode *pVnode, SSnapshot *pSnapshot);
-void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId);
+void vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId, int64_t* numOfTables, int64_t* numOfNormalTables);
int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen);
+int32_t vnodeGetTableList(void* pVnode, int8_t type, SArray* pList);
int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list);
int32_t vnodeIsCatchUp(SVnode *pVnode);
ESyncRole vnodeGetRole(SVnode *pVnode);
-int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list);
+int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list);
int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg);
int32_t vnodeGetStbIdList(SVnode *pVnode, int64_t suid, SArray *list);
-void *vnodeGetIdx(SVnode *pVnode);
-void *vnodeGetIvtIdx(SVnode *pVnode);
+void *vnodeGetIdx(void *pVnode);
+void *vnodeGetIvtIdx(void *pVnode);
int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num);
int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num);
@@ -97,76 +100,33 @@ void vnodeApplyWriteMsg(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfMsgs
void vnodeProposeCommitOnNeed(SVnode *pVnode, bool atExit);
// meta
-typedef struct SMeta SMeta; // todo: remove
-typedef struct SMetaReader SMetaReader;
-typedef struct SMetaEntry SMetaEntry;
-
-#define META_READER_NOLOCK 0x1
-
-void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags);
+void _metaReaderInit(SMetaReader *pReader, void *pVnode, int32_t flags, SStoreMeta* pAPI);
void metaReaderReleaseLock(SMetaReader *pReader);
void metaReaderClear(SMetaReader *pReader);
-int32_t metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
-int32_t metaGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid);
-int metaGetTableEntryByName(SMetaReader *pReader, const char *name);
-int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *uidList);
-int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList);
+int32_t metaReaderGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid);
+int32_t metaReaderGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid);
+int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *uidList);
+int32_t metaGetTableTagsByUids(void* pVnode, int64_t suid, SArray *uidList);
int32_t metaReadNext(SMetaReader *pReader);
-const void *metaGetTableTagVal(void *tag, int16_t type, STagVal *tagVal);
+const void *metaGetTableTagVal(const void *tag, int16_t type, STagVal *tagVal);
int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName);
int metaGetTableSzNameByUid(void *meta, uint64_t uid, char *tbName);
-int metaGetTableUidByName(void *meta, char *tbName, uint64_t *uid);
+int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid);
int metaGetTableTypeByName(void *meta, char *tbName, ETableType *tbType);
-bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid);
-int32_t metaGetCachedTableUidList(SMeta *pMeta, tb_uid_t suid, const uint8_t *key, int32_t keyLen, SArray *pList,
+bool metaIsTableExist(void* pVnode, tb_uid_t uid);
+int32_t metaGetCachedTableUidList(void *pVnode, tb_uid_t suid, const uint8_t *key, int32_t keyLen, SArray *pList,
bool *acquired);
-int32_t metaUidFilterCachePut(SMeta *pMeta, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
+int32_t metaUidFilterCachePut(void *pVnode, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
int32_t payloadLen, double selectivityRatio);
-int32_t metaUidCacheClear(SMeta *pMeta, uint64_t suid);
tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name);
-int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid);
-int32_t metaGetCachedTbGroup(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList);
-int32_t metaPutTbGroupToCache(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
- int32_t payloadLen);
+int32_t metaGetCachedTbGroup(void *pVnode, tb_uid_t suid, const uint8_t *pKey, int32_t keyLen, SArray **pList);
+int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void *pKey, int32_t keyLen, void *pPayload,
+ int32_t payloadLen);
int64_t metaGetTbNum(SMeta *pMeta);
-int64_t metaGetNtbNum(SMeta *pMeta);
-typedef struct {
- int64_t uid;
- int64_t ctbNum;
-} SMetaStbStats;
-int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo);
-
-typedef struct SMetaFltParam {
- tb_uid_t suid;
- int16_t cid;
- int16_t type;
- void *val;
- bool reverse;
- bool equal;
- int (*filterFunc)(void *a, void *b, int16_t type);
-
-} SMetaFltParam;
-
-// TODO, refactor later
-int32_t metaFilterTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *results);
-int32_t metaFilterCreateTime(SMeta *pMeta, SMetaFltParam *parm, SArray *pUids);
-int32_t metaFilterTableName(SMeta *pMeta, SMetaFltParam *param, SArray *pUids);
-int32_t metaFilterTtl(SMeta *pMeta, SMetaFltParam *param, SArray *pUids);
-
-#if 1 // refact APIs below (TODO)
-typedef SVCreateTbReq STbCfg;
-typedef SVCreateTSmaReq SSmaCfg;
-
-typedef struct SMTbCursor SMTbCursor;
-
-SMTbCursor *metaOpenTbCursor(SMeta *pMeta);
-void metaCloseTbCursor(SMTbCursor *pTbCur);
-int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType);
-int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType);
-#endif
+int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables);
// tsdb
typedef struct STsdbReader STsdbReader;
@@ -182,13 +142,14 @@ typedef struct STsdbReader STsdbReader;
#define CACHESCAN_RETRIEVE_LAST_ROW 0x4
#define CACHESCAN_RETRIEVE_LAST 0x8
-int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables,
- SSDataBlock *pResBlock, STsdbReader **ppReader, const char *idstr, bool countOnly, SHashObj** pIgnoreTables);
+int32_t tsdbReaderOpen(void *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables,
+ SSDataBlock *pResBlock, void **ppReader, const char *idstr, bool countOnly,
+ SHashObj **pIgnoreTables);
int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t num);
void tsdbReaderSetId(STsdbReader *pReader, const char *idstr);
void tsdbReaderClose(STsdbReader *pReader);
int32_t tsdbNextDataBlock(STsdbReader *pReader, bool *hasNext);
-int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
+int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave, bool *hasNullSMA);
void tsdbReleaseDataBlock(STsdbReader *pReader);
SSDataBlock *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
@@ -198,48 +159,27 @@ void *tsdbGetIdx(SMeta *pMeta);
void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t tsdbGetReaderMaxVersion(STsdbReader *pReader);
void tsdbReaderSetCloseFlag(STsdbReader *pReader);
-int64_t tsdbGetLastTimestamp(SVnode* pVnode, void* pTableList, int32_t numOfTables, const char* pIdStr);
+int64_t tsdbGetLastTimestamp(SVnode *pVnode, void *pTableList, int32_t numOfTables, const char *pIdStr);
-int32_t tsdbReuseCacherowsReader(void* pReader, void* pTableIdList, int32_t numOfTables);
+int32_t tsdbReuseCacherowsReader(void *pReader, void *pTableIdList, int32_t numOfTables);
int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr);
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
SArray *pTableUids);
void *tsdbCacherowsReaderClose(void *pReader);
-int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
+int32_t tsdbGetTableSchema(void *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
size_t tsdbCacheGetUsage(SVnode *pVnode);
int32_t tsdbCacheGetElems(SVnode *pVnode);
-// tq
-typedef struct SMetaTableInfo {
- int64_t suid;
- int64_t uid;
- SSchemaWrapper *schema;
- char tbName[TSDB_TABLE_NAME_LEN];
-} SMetaTableInfo;
-
+//// tq
typedef struct SIdInfo {
int64_t version;
int32_t index;
} SIdInfo;
-typedef struct SSnapContext {
- SMeta *pMeta;
- int64_t snapVersion;
- TBC *pCur;
- int64_t suid;
- int8_t subType;
- SHashObj *idVersion;
- SHashObj *suidInfo;
- SArray *idList;
- int32_t index;
- bool withMeta;
- bool queryMeta; // true-get meta, false-get data
-} SSnapContext;
-
typedef struct STqReader {
SPackedData msg;
SSubmitReq2 submit;
@@ -257,21 +197,26 @@ typedef struct STqReader {
} STqReader;
STqReader *tqReaderOpen(SVnode *pVnode);
-void tqCloseReader(STqReader *);
+void tqReaderClose(STqReader *);
void tqReaderSetColIdList(STqReader *pReader, SArray *pColIdList);
-int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList);
+int32_t tqReaderSetTbUidList(STqReader *pReader, const SArray *tbUidList, const char* id);
int32_t tqReaderAddTbUidList(STqReader *pReader, const SArray *pTableUidList);
int32_t tqReaderRemoveTbUidList(STqReader *pReader, const SArray *tbUidList);
-int32_t tqSeekVer(STqReader *pReader, int64_t ver, const char *id);
-bool tqNextBlockInWal(STqReader* pReader, const char* idstr);
-bool tqNextBlockImpl(STqReader *pReader, const char* idstr);
+bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid);
+bool tqCurrentBlockConsumed(const STqReader* pReader);
+
+int32_t tqReaderSeek(STqReader *pReader, int64_t ver, const char *id);
+bool tqNextBlockInWal(STqReader *pReader, const char *idstr);
+bool tqNextBlockImpl(STqReader *pReader, const char *idstr);
+SWalReader* tqGetWalReader(STqReader* pReader);
+SSDataBlock* tqGetResultBlock (STqReader* pReader);
-int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, const char* id);
+int32_t extractMsgFromWal(SWalReader *pReader, void **pItem, const char *id);
int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver);
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
-int32_t tqRetrieveDataBlock(STqReader *pReader, const char* idstr);
+int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock** pRes, const char* idstr);
int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet);
int32_t vnodeEnqueueStreamMsg(SVnode *pVnode, SRpcMsg *pMsg);
@@ -288,10 +233,10 @@ int32_t vnodeSnapWriterOpen(SVnode *pVnode, int64_t sver, int64_t ever, SVSnapWr
int32_t vnodeSnapWriterClose(SVSnapWriter *pWriter, int8_t rollback, SSnapshot *pSnapshot);
int32_t vnodeSnapWrite(SVSnapWriter *pWriter, uint8_t *pData, uint32_t nData);
-int32_t buildSnapContext(SMeta *pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
+int32_t buildSnapContext(SVnode *pVnode, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
SSnapContext **ctxRet);
-int32_t getMetafromSnapShot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
-SMetaTableInfo getUidfromSnapShot(SSnapContext *ctx);
+int32_t getTableInfoFromSnapshot(SSnapContext *ctx, void **pBuf, int32_t *contLen, int16_t *type, int64_t *uid);
+SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext *ctx);
int32_t setForSnapShot(SSnapContext *ctx, int64_t uid);
int32_t destroySnapContext(SSnapContext *ctx);
@@ -350,67 +295,9 @@ struct SVnodeCfg {
int32_t tsdbPageSize;
};
-typedef struct {
- uint64_t uid;
- uint64_t groupId;
-} STableKeyInfo;
-
#define TABLE_ROLLUP_ON ((int8_t)0x1)
#define TABLE_IS_ROLLUP(FLG) (((FLG) & (TABLE_ROLLUP_ON)) != 0)
#define TABLE_SET_ROLLUP(FLG) ((FLG) |= TABLE_ROLLUP_ON)
-struct SMetaEntry {
- int64_t version;
- int8_t type;
- int8_t flags; // TODO: need refactor?
- tb_uid_t uid;
- char *name;
- union {
- struct {
- SSchemaWrapper schemaRow;
- SSchemaWrapper schemaTag;
- SRSmaParam rsmaParam;
- } stbEntry;
- struct {
- int64_t ctime;
- int32_t ttlDays;
- int32_t commentLen;
- char *comment;
- tb_uid_t suid;
- uint8_t *pTags;
- } ctbEntry;
- struct {
- int64_t ctime;
- int32_t ttlDays;
- int32_t commentLen;
- char *comment;
- int32_t ncid; // next column id
- SSchemaWrapper schemaRow;
- } ntbEntry;
- struct {
- STSma *tsma;
- } smaEntry;
- };
-
- uint8_t *pBuf;
-};
-
-struct SMetaReader {
- int32_t flags;
- SMeta *pMeta;
- SDecoder coder;
- SMetaEntry me;
- void *pBuf;
- int32_t szBuf;
-};
-
-struct SMTbCursor {
- TBC *pDbc;
- void *pKey;
- void *pVal;
- int32_t kLen;
- int32_t vLen;
- SMetaReader mr;
-};
#ifdef __cplusplus
}
diff --git a/source/dnode/vnode/src/inc/meta.h b/source/dnode/vnode/src/inc/meta.h
index 3999aa0b7f22a53197413048eca7842e6fe5e57d..b39008147b2d1284a9e6d7562c2463874854ce94 100644
--- a/source/dnode/vnode/src/inc/meta.h
+++ b/source/dnode/vnode/src/inc/meta.h
@@ -162,6 +162,12 @@ typedef struct {
int metaCreateTagIdxKey(tb_uid_t suid, int32_t cid, const void* pTagData, int32_t nTagData, int8_t type, tb_uid_t uid,
STagIdxKey** ppTagIdxKey, int32_t* nTagIdxKey);
+// TODO, refactor later
+int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *param, SArray *results);
+int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *parm, SArray *pUids);
+int32_t metaFilterTableName(void *pVnode, SMetaFltParam *param, SArray *pUids);
+int32_t metaFilterTtl(void *pVnode, SMetaFltParam *param, SArray *pUids);
+
#ifndef META_REFACT
// SMetaDB
int metaOpenDB(SMeta* pMeta);
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index 9652473f9d2e64b8047219d812983d8e070391d5..4ba8d6d69fe3d490e4fb4e87a5ac417cee2ee263 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -72,6 +72,8 @@ typedef struct {
typedef struct {
int64_t suid;
+ char* qmsg; // SubPlanToString
+ SNode* node;
} STqExecTb;
typedef struct {
@@ -96,18 +98,17 @@ typedef enum tq_handle_status {
} tq_handle_status;
typedef struct {
- char subKey[TSDB_SUBSCRIBE_KEY_LEN];
- int64_t consumerId;
- int32_t epoch;
- int8_t fetchMeta;
- int64_t snapshotVer;
- SWalReader* pWalReader;
- SWalRef* pRef;
- // STqPushHandle pushHandle; // push
- STqExecHandle execHandle; // exec
- SRpcMsg* msg;
- int32_t noDataPollCnt;
- tq_handle_status status;
+ char subKey[TSDB_SUBSCRIBE_KEY_LEN];
+ int64_t consumerId;
+ int32_t epoch;
+ int8_t fetchMeta;
+ int64_t snapshotVer;
+ SWalReader* pWalReader;
+ SWalRef* pRef;
+// STqPushHandle pushHandle; // push
+ STqExecHandle execHandle; // exec
+ SRpcMsg* msg;
+ tq_handle_status status;
} STqHandle;
struct STQ {
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index 8fddb9790946e2d0aee2c7d0417d6f271abffd89..9df95a379a47ae15f9014d12ac439b31d35779a4 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -297,6 +297,7 @@ int32_t tsdbUpdateDelFileHdr(SDelFWriter *pWriter);
// SDelFReader
int32_t tsdbDelFReaderOpen(SDelFReader **ppReader, SDelFile *pFile, STsdb *pTsdb);
int32_t tsdbDelFReaderClose(SDelFReader **ppReader);
+int32_t tsdbReadDelDatav1(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData, int64_t maxVer);
int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData);
int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx);
// tsdbRead.c ==============================================================================================
@@ -847,9 +848,6 @@ int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
-void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
-size_t tsdbCacheGetCapacity(SVnode *pVnode);
-
// int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema);
// ========== inline functions ==========
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index f05631138834f19d3550d83ee62e52d372289f0b..2811fc35b0af49a423679953a2df42b2d1087e0e 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -103,6 +103,19 @@ struct SQueryNode {
_query_reseek_func_t reseek;
};
+#if 1 // refact APIs below (TODO)
+typedef SVCreateTbReq STbCfg;
+typedef SVCreateTSmaReq SSmaCfg;
+
+SMTbCursor* metaOpenTbCursor(void* pVnode);
+void metaCloseTbCursor(SMTbCursor* pTbCur);
+void metaPauseTbCursor(SMTbCursor* pTbCur);
+void metaResumeTbCursor(SMTbCursor* pTbCur, int8_t first);
+int32_t metaTbCursorNext(SMTbCursor* pTbCur, ETableType jumpTableType);
+int32_t metaTbCursorPrev(SMTbCursor* pTbCur, ETableType jumpTableType);
+
+#endif
+
void* vnodeBufPoolMalloc(SVBufPool* pPool, int size);
void* vnodeBufPoolMallocAligned(SVBufPool* pPool, int size);
void vnodeBufPoolFree(SVBufPool* pPool, void* p);
@@ -135,6 +148,7 @@ int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* p
int metaDropSTable(SMeta* pMeta, int64_t verison, SVDropStbReq* pReq, SArray* tbUidList);
int metaCreateTable(SMeta* pMeta, int64_t version, SVCreateTbReq* pReq, STableMetaRsp** pMetaRsp);
int metaDropTable(SMeta* pMeta, int64_t version, SVDropTbReq* pReq, SArray* tbUids, int64_t* tbUid);
+int32_t metaTrimTables(SMeta* pMeta);
int metaTtlDropTable(SMeta* pMeta, int64_t ttl, SArray* tbUids);
int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp);
SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock);
@@ -143,6 +157,9 @@ int32_t metaGetTbTSchemaEx(SMeta* pMeta, tb_uid_t suid, tb_uid_t uid, in
int metaGetTableEntryByName(SMetaReader* pReader, const char* name);
int metaAlterCache(SMeta* pMeta, int32_t nPage);
+int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid);
+int32_t metaTbGroupCacheClear(SMeta* pMeta, uint64_t suid);
+
int metaAddIndexToSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq);
int metaDropIndexFromSTable(SMeta* pMeta, int64_t version, SDropIndexReq* pReq);
@@ -161,6 +178,8 @@ void* metaGetIdx(SMeta* pMeta);
void* metaGetIvtIdx(SMeta* pMeta);
int metaTtlSmaller(SMeta* pMeta, uint64_t time, SArray* uidList);
+void metaReaderInit(SMetaReader* pReader, SMeta* pMeta, int32_t flags);
+
int32_t metaCreateTSma(SMeta* pMeta, int64_t version, SSmaCfg* pCfg);
int32_t metaDropTSma(SMeta* pMeta, int64_t indexUid);
@@ -473,6 +492,8 @@ struct SCompactInfo {
STimeWindow tw;
};
+void initStorageAPI(SStorageAPI* pAPI);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/dnode/vnode/src/meta/metaCache.c b/source/dnode/vnode/src/meta/metaCache.c
index 436ca1abd3c7939488796b536c28aed03029da80..8749b3ac9464ed0e446e59e7db409bc09d3d600d 100644
--- a/source/dnode/vnode/src/meta/metaCache.c
+++ b/source/dnode/vnode/src/meta/metaCache.c
@@ -499,8 +499,9 @@ static void initCacheKey(uint64_t* buf, const SHashObj* pHashMap, uint64_t suid,
ASSERT(keyLen == sizeof(uint64_t) * 2);
}
-int32_t metaGetCachedTableUidList(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
+int32_t metaGetCachedTableUidList(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray* pList1,
bool* acquireRes) {
+ SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
// generate the composed key for LRU cache
@@ -603,9 +604,10 @@ static int32_t addNewEntry(SHashObj* pTableEntry, const void* pKey, int32_t keyL
}
// check both the payload size and selectivity ratio
-int32_t metaUidFilterCachePut(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
+int32_t metaUidFilterCachePut(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen, double selectivityRatio) {
int32_t code = 0;
+ SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
if (selectivityRatio > tsSelectivityRatio) {
@@ -702,7 +704,8 @@ int32_t metaUidCacheClear(SMeta* pMeta, uint64_t suid) {
return TSDB_CODE_SUCCESS;
}
-int32_t metaGetCachedTbGroup(SMeta* pMeta, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList) {
+int32_t metaGetCachedTbGroup(void* pVnode, tb_uid_t suid, const uint8_t* pKey, int32_t keyLen, SArray** pList) {
+ SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
// generate the composed key for LRU cache
@@ -786,9 +789,10 @@ static void freeTbGroupCachePayload(const void* key, size_t keyLen, void* value)
}
-int32_t metaPutTbGroupToCache(SMeta* pMeta, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
+int32_t metaPutTbGroupToCache(void* pVnode, uint64_t suid, const void* pKey, int32_t keyLen, void* pPayload,
int32_t payloadLen) {
int32_t code = 0;
+ SMeta* pMeta = ((SVnode*)pVnode)->pMeta;
int32_t vgId = TD_VID(pMeta->pVnode);
if (payloadLen > tsTagFilterResCacheSize) {
diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c
index d464f64de327969f4c302498fe2f6370676845d6..29fe89c3f29716bf49940b6bffe061e798a608b1 100644
--- a/source/dnode/vnode/src/meta/metaQuery.c
+++ b/source/dnode/vnode/src/meta/metaQuery.c
@@ -17,10 +17,16 @@
#include "osMemory.h"
#include "tencode.h"
+void _metaReaderInit(SMetaReader *pReader, void *pVnode, int32_t flags, SStoreMeta *pAPI) {
+ SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
+ metaReaderInit(pReader, pMeta, flags);
+ pReader->pAPI = pAPI;
+}
+
void metaReaderInit(SMetaReader *pReader, SMeta *pMeta, int32_t flags) {
memset(pReader, 0, sizeof(*pReader));
- pReader->flags = flags;
pReader->pMeta = pMeta;
+ pReader->flags = flags;
if (pReader->pMeta && !(flags & META_READER_NOLOCK)) {
metaRLock(pMeta);
}
@@ -64,96 +70,20 @@ _err:
return -1;
}
-// int metaGetTableEntryByUidTest(void* meta, SArray *uidList) {
-//
-// SArray* readerList = taosArrayInit(taosArrayGetSize(uidList), sizeof(SMetaReader));
-// SArray* uidVersion = taosArrayInit(taosArrayGetSize(uidList), sizeof(STbDbKey));
-// SMeta *pMeta = meta;
-// int64_t version;
-// SHashObj *uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
-//
-// int64_t stt1 = taosGetTimestampUs();
-// for(int i = 0; i < taosArrayGetSize(uidList); i++) {
-// void* ppVal = NULL;
-// int vlen = 0;
-// uint64_t * uid = taosArrayGet(uidList, i);
-// // query uid.idx
-// if (tdbTbGet(pMeta->pUidIdx, uid, sizeof(*uid), &ppVal, &vlen) < 0) {
-// continue;
-// }
-// version = *(int64_t *)ppVal;
-//
-// STbDbKey tbDbKey = {.version = version, .uid = *uid};
-// taosArrayPush(uidVersion, &tbDbKey);
-// taosHashPut(uHash, uid, sizeof(int64_t), ppVal, sizeof(int64_t));
-// }
-// int64_t stt2 = taosGetTimestampUs();
-// qDebug("metaGetTableEntryByUidTest1 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt2-stt1);
-//
-// TBC *pCur = NULL;
-// tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
-// tdbTbcMoveToFirst(pCur);
-// void *pKey = NULL;
-// int kLen = 0;
-//
-// while(1){
-// SMetaReader pReader = {0};
-// int32_t ret = tdbTbcNext(pCur, &pKey, &kLen, &pReader.pBuf, &pReader.szBuf);
-// if (ret < 0) break;
-// STbDbKey *tmp = (STbDbKey*)pKey;
-// int64_t *ver = (int64_t*)taosHashGet(uHash, &tmp->uid, sizeof(int64_t));
-// if(ver == NULL || *ver != tmp->version) continue;
-// taosArrayPush(readerList, &pReader);
-// }
-// tdbTbcClose(pCur);
-//
-// taosArrayClear(readerList);
-// int64_t stt3 = taosGetTimestampUs();
-// qDebug("metaGetTableEntryByUidTest2 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt3-stt2);
-// for(int i = 0; i < taosArrayGetSize(uidVersion); i++) {
-// SMetaReader pReader = {0};
-//
-// STbDbKey *tbDbKey = taosArrayGet(uidVersion, i);
-// // query table.db
-// if (tdbTbGet(pMeta->pTbDb, tbDbKey, sizeof(STbDbKey), &pReader.pBuf, &pReader.szBuf) < 0) {
-// continue;
-// }
-// taosArrayPush(readerList, &pReader);
-// }
-// int64_t stt4 = taosGetTimestampUs();
-// qDebug("metaGetTableEntryByUidTest3 rows:%d, cost:%ld us", taosArrayGetSize(uidList), stt4-stt3);
-//
-// for(int i = 0; i < taosArrayGetSize(readerList); i++){
-// SMetaReader* pReader = taosArrayGet(readerList, i);
-// metaReaderInit(pReader, meta, 0);
-// // decode the entry
-// tDecoderInit(&pReader->coder, pReader->pBuf, pReader->szBuf);
-//
-// if (metaDecodeEntry(&pReader->coder, &pReader->me) < 0) {
-// }
-// metaReaderClear(pReader);
-// }
-// int64_t stt5 = taosGetTimestampUs();
-// qDebug("metaGetTableEntryByUidTest4 rows:%d, cost:%ld us", taosArrayGetSize(readerList), stt5-stt4);
-// return 0;
-// }
-
-bool metaIsTableExist(SMeta *pMeta, tb_uid_t uid) {
- // query uid.idx
- metaRLock(pMeta);
-
- if (tdbTbGet(pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) {
- metaULock(pMeta);
+bool metaIsTableExist(void *pVnode, tb_uid_t uid) {
+ SVnode *pVnodeObj = pVnode;
+ metaRLock(pVnodeObj->pMeta); // query uid.idx
+ if (tdbTbGet(pVnodeObj->pMeta->pUidIdx, &uid, sizeof(uid), NULL, NULL) < 0) {
+ metaULock(pVnodeObj->pMeta);
return false;
}
- metaULock(pMeta);
-
+ metaULock(pVnodeObj->pMeta);
return true;
}
-int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
+int metaReaderGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
SMeta *pMeta = pReader->pMeta;
int64_t version1;
@@ -167,7 +97,7 @@ int metaGetTableEntryByUid(SMetaReader *pReader, tb_uid_t uid) {
return metaGetTableEntryByVersion(pReader, version1, uid);
}
-int metaGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid) {
+int metaReaderGetTableEntryByUidCache(SMetaReader *pReader, tb_uid_t uid) {
SMeta *pMeta = pReader->pMeta;
SMetaInfo info;
@@ -190,7 +120,7 @@ int metaGetTableEntryByName(SMetaReader *pReader, const char *name) {
}
uid = *(tb_uid_t *)pReader->pBuf;
- return metaGetTableEntryByUid(pReader, uid);
+ return metaReaderGetTableEntryByUid(pReader, uid);
}
tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) {
@@ -210,11 +140,11 @@ tb_uid_t metaGetTableEntryUidByName(SMeta *pMeta, const char *name) {
return uid;
}
-int metaGetTableNameByUid(void *meta, uint64_t uid, char *tbName) {
+int metaGetTableNameByUid(void *pVnode, uint64_t uid, char *tbName) {
int code = 0;
SMetaReader mr = {0};
- metaReaderInit(&mr, (SMeta *)meta, 0);
- code = metaGetTableEntryByUid(&mr, uid);
+ metaReaderInit(&mr, ((SVnode *)pVnode)->pMeta, 0);
+ code = metaReaderGetTableEntryByUid(&mr, uid);
if (code < 0) {
metaReaderClear(&mr);
return -1;
@@ -230,7 +160,7 @@ int metaGetTableSzNameByUid(void *meta, uint64_t uid, char *tbName) {
int code = 0;
SMetaReader mr = {0};
metaReaderInit(&mr, (SMeta *)meta, 0);
- code = metaGetTableEntryByUid(&mr, uid);
+ code = metaReaderGetTableEntryByUid(&mr, uid);
if (code < 0) {
metaReaderClear(&mr);
return -1;
@@ -241,15 +171,15 @@ int metaGetTableSzNameByUid(void *meta, uint64_t uid, char *tbName) {
return 0;
}
-int metaGetTableUidByName(void *meta, char *tbName, uint64_t *uid) {
+int metaGetTableUidByName(void *pVnode, char *tbName, uint64_t *uid) {
int code = 0;
SMetaReader mr = {0};
- metaReaderInit(&mr, (SMeta *)meta, 0);
+ metaReaderInit(&mr, ((SVnode *)pVnode)->pMeta, 0);
SMetaReader *pReader = &mr;
// query name.idx
- if (tdbTbGet(pReader->pMeta->pNameIdx, tbName, strlen(tbName) + 1, &pReader->pBuf, &pReader->szBuf) < 0) {
+ if (tdbTbGet(((SMeta *)pReader->pMeta)->pNameIdx, tbName, strlen(tbName) + 1, &pReader->pBuf, &pReader->szBuf) < 0) {
terrno = TSDB_CODE_PAR_TABLE_NOT_EXIST;
metaReaderClear(&mr);
return -1;
@@ -262,10 +192,10 @@ int metaGetTableUidByName(void *meta, char *tbName, uint64_t *uid) {
return 0;
}
-int metaGetTableTypeByName(void *meta, char *tbName, ETableType *tbType) {
+int metaGetTableTypeByName(void *pVnode, char *tbName, ETableType *tbType) {
int code = 0;
SMetaReader mr = {0};
- metaReaderInit(&mr, (SMeta *)meta, 0);
+ metaReaderInit(&mr, ((SVnode *)pVnode)->pMeta, 0);
code = metaGetTableEntryByName(&mr, tbName);
if (code == 0) *tbType = mr.me.type;
@@ -283,7 +213,7 @@ int metaReadNext(SMetaReader *pReader) {
}
#if 1 // ===================================================
-SMTbCursor *metaOpenTbCursor(SMeta *pMeta) {
+SMTbCursor *metaOpenTbCursor(void *pVnode) {
SMTbCursor *pTbCur = NULL;
pTbCur = (SMTbCursor *)taosMemoryCalloc(1, sizeof(*pTbCur));
@@ -291,12 +221,13 @@ SMTbCursor *metaOpenTbCursor(SMeta *pMeta) {
return NULL;
}
- metaReaderInit(&pTbCur->mr, pMeta, 0);
-
- tdbTbcOpen(pMeta->pUidIdx, &pTbCur->pDbc, NULL);
-
- tdbTbcMoveToFirst(pTbCur->pDbc);
+ SVnode *pVnodeObj = pVnode;
+ // metaReaderInit(&pTbCur->mr, pVnodeObj->pMeta, 0);
+ // tdbTbcMoveToFirst((TBC *)pTbCur->pDbc);
+ pTbCur->pMeta = pVnodeObj->pMeta;
+ pTbCur->paused = 1;
+ metaResumeTbCursor(pTbCur, 1);
return pTbCur;
}
@@ -304,21 +235,52 @@ void metaCloseTbCursor(SMTbCursor *pTbCur) {
if (pTbCur) {
tdbFree(pTbCur->pKey);
tdbFree(pTbCur->pVal);
- metaReaderClear(&pTbCur->mr);
- if (pTbCur->pDbc) {
- tdbTbcClose(pTbCur->pDbc);
+ if (!pTbCur->paused) {
+ metaReaderClear(&pTbCur->mr);
+ if (pTbCur->pDbc) {
+ tdbTbcClose((TBC *)pTbCur->pDbc);
+ }
}
taosMemoryFree(pTbCur);
}
}
+void metaPauseTbCursor(SMTbCursor *pTbCur) {
+ if (!pTbCur->paused) {
+ metaReaderClear(&pTbCur->mr);
+ tdbTbcClose((TBC *)pTbCur->pDbc);
+ pTbCur->paused = 1;
+ }
+}
+void metaResumeTbCursor(SMTbCursor *pTbCur, int8_t first) {
+ if (pTbCur->paused) {
+ metaReaderInit(&pTbCur->mr, pTbCur->pMeta, 0);
+
+ tdbTbcOpen(((SMeta *)pTbCur->pMeta)->pUidIdx, (TBC **)&pTbCur->pDbc, NULL);
+
+ if (first) {
+ tdbTbcMoveToFirst((TBC *)pTbCur->pDbc);
+ } else {
+ int c = 0;
+ tdbTbcMoveTo(pTbCur->pDbc, pTbCur->pKey, pTbCur->kLen, &c);
+ if (c < 0) {
+ tdbTbcMoveToPrev(pTbCur->pDbc);
+ } else {
+ tdbTbcMoveToNext(pTbCur->pDbc);
+ }
+ }
+
+ pTbCur->paused = 0;
+ }
+}
+
int32_t metaTbCursorNext(SMTbCursor *pTbCur, ETableType jumpTableType) {
int ret;
void *pBuf;
STbCfg tbCfg;
for (;;) {
- ret = tdbTbcNext(pTbCur->pDbc, &pTbCur->pKey, &pTbCur->kLen, &pTbCur->pVal, &pTbCur->vLen);
+ ret = tdbTbcNext((TBC *)pTbCur->pDbc, &pTbCur->pKey, &pTbCur->kLen, &pTbCur->pVal, &pTbCur->vLen);
if (ret < 0) {
return -1;
}
@@ -342,7 +304,7 @@ int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType) {
STbCfg tbCfg;
for (;;) {
- ret = tdbTbcPrev(pTbCur->pDbc, &pTbCur->pKey, &pTbCur->kLen, &pTbCur->pVal, &pTbCur->vLen);
+ ret = tdbTbcPrev((TBC *)pTbCur->pDbc, &pTbCur->pKey, &pTbCur->kLen, &pTbCur->pVal, &pTbCur->vLen);
if (ret < 0) {
return -1;
}
@@ -387,42 +349,6 @@ _query:
tDecoderClear(&dc);
goto _exit;
}
- { // Traverse to find the previous qualified data
- TBC *pCur;
- tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
- STbDbKey key = {.version = sver, .uid = INT64_MAX};
- int c = 0;
- tdbTbcMoveTo(pCur, &key, sizeof(key), &c);
- if (c < 0) {
- tdbTbcMoveToPrev(pCur);
- }
-
- void *pKey = NULL;
- void *pVal = NULL;
- int vLen = 0, kLen = 0;
- while (1) {
- int32_t ret = tdbTbcPrev(pCur, &pKey, &kLen, &pVal, &vLen);
- if (ret < 0) break;
-
- STbDbKey *tmp = (STbDbKey *)pKey;
- if (tmp->uid != uid) {
- continue;
- }
- SDecoder dcNew = {0};
- SMetaEntry meNew = {0};
- tDecoderInit(&dcNew, pVal, vLen);
- metaDecodeEntry(&dcNew, &meNew);
- pSchema = tCloneSSchemaWrapper(&meNew.stbEntry.schemaRow);
- tDecoderClear(&dcNew);
- tdbTbcClose(pCur);
- tdbFree(pKey);
- tdbFree(pVal);
- goto _exit;
- }
- tdbFree(pKey);
- tdbFree(pVal);
- tdbTbcClose(pCur);
- }
} else if (me.type == TSDB_CHILD_TABLE) {
uid = me.ctbEntry.suid;
tDecoderClear(&dc);
@@ -447,7 +373,6 @@ _query:
tDecoderClear(&dc);
_exit:
- tDecoderClear(&dc);
if (lock) {
metaULock(pMeta);
}
@@ -455,7 +380,6 @@ _exit:
return pSchema;
_err:
- tDecoderClear(&dc);
if (lock) {
metaULock(pMeta);
}
@@ -770,8 +694,6 @@ int64_t metaGetTimeSeriesNum(SMeta *pMeta) {
return pMeta->pVnode->config.vndStats.numOfTimeSeries + pMeta->pVnode->config.vndStats.numOfNTimeSeries;
}
-int64_t metaGetNtbNum(SMeta *pMeta) { return pMeta->pVnode->config.vndStats.numOfNTables; }
-
typedef struct {
SMeta *pMeta;
TBC *pCur;
@@ -876,7 +798,7 @@ STSmaWrapper *metaGetSmaInfoByTable(SMeta *pMeta, tb_uid_t uid, bool deepCopy) {
STSma *pTSma = NULL;
for (int i = 0; i < pSW->number; ++i) {
smaId = *(tb_uid_t *)taosArrayGet(pSmaIds, i);
- if (metaGetTableEntryByUid(&mr, smaId) < 0) {
+ if (metaReaderGetTableEntryByUid(&mr, smaId) < 0) {
tDecoderClear(&mr.coder);
metaWarn("vgId:%d, no entry for tbId:%" PRIi64 ", smaId:%" PRIi64, TD_VID(pMeta->pVnode), uid, smaId);
continue;
@@ -926,7 +848,7 @@ STSma *metaGetSmaInfoByIndex(SMeta *pMeta, int64_t indexUid) {
STSma *pTSma = NULL;
SMetaReader mr = {0};
metaReaderInit(&mr, pMeta, 0);
- if (metaGetTableEntryByUid(&mr, indexUid) < 0) {
+ if (metaReaderGetTableEntryByUid(&mr, indexUid) < 0) {
metaWarn("vgId:%d, failed to get table entry for smaId:%" PRIi64, TD_VID(pMeta->pVnode), indexUid);
metaReaderClear(&mr);
return NULL;
@@ -1027,7 +949,7 @@ SArray *metaGetSmaTbUids(SMeta *pMeta) {
#endif
-const void *metaGetTableTagVal(void *pTag, int16_t type, STagVal *val) {
+const void *metaGetTableTagVal(const void *pTag, int16_t type, STagVal *val) {
STag *tag = (STag *)pTag;
if (type == TSDB_DATA_TYPE_JSON) {
return tag;
@@ -1083,8 +1005,10 @@ typedef struct {
int32_t vLen;
} SIdxCursor;
-int32_t metaFilterCreateTime(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
- int32_t ret = 0;
+int32_t metaFilterCreateTime(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
+ SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
+ SMetaFltParam *param = arg;
+ int32_t ret = 0;
SIdxCursor *pCursor = NULL;
pCursor = (SIdxCursor *)taosMemoryCalloc(1, sizeof(SIdxCursor));
@@ -1141,9 +1065,11 @@ END:
return ret;
}
-int32_t metaFilterTableName(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
- int32_t ret = 0;
- char *buf = NULL;
+int32_t metaFilterTableName(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
+ SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
+ SMetaFltParam *param = arg;
+ int32_t ret = 0;
+ char *buf = NULL;
STagIdxKey *pKey = NULL;
int32_t nKey = 0;
@@ -1206,9 +1132,11 @@ END:
return ret;
}
-int32_t metaFilterTtl(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
- int32_t ret = 0;
- char *buf = NULL;
+int32_t metaFilterTtl(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
+ SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
+ SMetaFltParam *param = arg;
+ int32_t ret = 0;
+ char *buf = NULL;
STtlIdxKey *pKey = NULL;
int32_t nKey = 0;
@@ -1235,7 +1163,10 @@ END:
// impl later
return 0;
}
-int32_t metaFilterTableIds(SMeta *pMeta, SMetaFltParam *param, SArray *pUids) {
+int32_t metaFilterTableIds(void *pVnode, SMetaFltParam *arg, SArray *pUids) {
+ SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
+ SMetaFltParam *param = arg;
+
SMetaEntry oStbEntry = {0};
int32_t ret = -1;
char *buf = NULL;
@@ -1418,7 +1349,8 @@ static int32_t metaGetTableTagByUid(SMeta *pMeta, int64_t suid, int64_t uid, voi
return ret;
}
-int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList) {
+int32_t metaGetTableTagsByUids(void *pVnode, int64_t suid, SArray *uidList) {
+ SMeta *pMeta = ((SVnode *)pVnode)->pMeta;
const int32_t LIMIT = 128;
int32_t isLock = false;
@@ -1450,8 +1382,8 @@ int32_t metaGetTableTagsByUids(SMeta *pMeta, int64_t suid, SArray *uidList) {
return 0;
}
-int32_t metaGetTableTags(SMeta *pMeta, uint64_t suid, SArray *pUidTagInfo) {
- SMCtbCursor *pCur = metaOpenCtbCursor(pMeta, suid, 1);
+int32_t metaGetTableTags(void *pVnode, uint64_t suid, SArray *pUidTagInfo) {
+ SMCtbCursor *pCur = metaOpenCtbCursor(((SVnode *)pVnode)->pMeta, suid, 1);
// If len > 0 means there already have uids, and we only want the
// tags of the specified tables, of which uid in the uid list. Otherwise, all table tags are retrieved and kept
@@ -1556,30 +1488,35 @@ _exit:
return code;
}
-int32_t metaGetStbStats(SMeta *pMeta, int64_t uid, SMetaStbStats *pInfo) {
+int32_t metaGetStbStats(void *pVnode, int64_t uid, int64_t *numOfTables) {
int32_t code = 0;
+ *numOfTables = 0;
- metaRLock(pMeta);
+ SVnode *pVnodeObj = pVnode;
+ metaRLock(pVnodeObj->pMeta);
// fast path: search cache
- if (metaStatsCacheGet(pMeta, uid, pInfo) == TSDB_CODE_SUCCESS) {
- metaULock(pMeta);
+ SMetaStbStats state = {0};
+ if (metaStatsCacheGet(pVnodeObj->pMeta, uid, &state) == TSDB_CODE_SUCCESS) {
+ metaULock(pVnodeObj->pMeta);
+ *numOfTables = state.ctbNum;
goto _exit;
}
// slow path: search TDB
int64_t ctbNum = 0;
- vnodeGetCtbNum(pMeta->pVnode, uid, &ctbNum);
+ vnodeGetCtbNum(pVnode, uid, &ctbNum);
- metaULock(pMeta);
+ metaULock(pVnodeObj->pMeta);
+ *numOfTables = ctbNum;
- pInfo->uid = uid;
- pInfo->ctbNum = ctbNum;
+ state.uid = uid;
+ state.ctbNum = ctbNum;
// upsert the cache
- metaWLock(pMeta);
- metaStatsCacheUpsert(pMeta, pInfo);
- metaULock(pMeta);
+ metaWLock(pVnodeObj->pMeta);
+ metaStatsCacheUpsert(pVnodeObj->pMeta, &state);
+ metaULock(pVnodeObj->pMeta);
_exit:
return code;
diff --git a/source/dnode/vnode/src/meta/metaSma.c b/source/dnode/vnode/src/meta/metaSma.c
index 8d5821e28bfa2c33a5582e34bc32127970af6320..a49848f4421e1c0e103d2d083713582574aee609 100644
--- a/source/dnode/vnode/src/meta/metaSma.c
+++ b/source/dnode/vnode/src/meta/metaSma.c
@@ -13,8 +13,10 @@
* along with this program. If not, see .
*/
+#include "vnodeInt.h"
#include "meta.h"
+
static int metaHandleSmaEntry(SMeta *pMeta, const SMetaEntry *pME);
static int metaSaveSmaToDB(SMeta *pMeta, const SMetaEntry *pME);
@@ -36,7 +38,7 @@ int32_t metaCreateTSma(SMeta *pMeta, int64_t version, SSmaCfg *pCfg) {
// validate req
// save smaIndex
metaReaderInit(&mr, pMeta, 0);
- if (metaGetTableEntryByUidCache(&mr, pCfg->indexUid) == 0) {
+ if (metaReaderGetTableEntryByUidCache(&mr, pCfg->indexUid) == 0) {
#if 1
terrno = TSDB_CODE_TSMA_ALREADY_EXIST;
metaReaderClear(&mr);
diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c
index 707dd66e3011dd98cd8449f20bf748a2b6f2ff7c..f4e930e509cd6406b415ce4296b128f1c72219eb 100644
--- a/source/dnode/vnode/src/meta/metaSnapshot.c
+++ b/source/dnode/vnode/src/meta/metaSnapshot.c
@@ -222,29 +222,29 @@ static void destroySTableInfoForChildTable(void* data) {
}
static void MoveToSnapShotVersion(SSnapContext* ctx) {
- tdbTbcClose(ctx->pCur);
- tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ tdbTbcClose((TBC*)ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, (TBC**)&ctx->pCur, NULL);
STbDbKey key = {.version = ctx->snapVersion, .uid = INT64_MAX};
int c = 0;
- tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c);
+ tdbTbcMoveTo((TBC*)ctx->pCur, &key, sizeof(key), &c);
if (c < 0) {
- tdbTbcMoveToPrev(ctx->pCur);
+ tdbTbcMoveToPrev((TBC*)ctx->pCur);
}
}
static int32_t MoveToPosition(SSnapContext* ctx, int64_t ver, int64_t uid) {
- tdbTbcClose(ctx->pCur);
- tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
+ tdbTbcClose((TBC*)ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, (TBC**)&ctx->pCur, NULL);
STbDbKey key = {.version = ver, .uid = uid};
int c = 0;
- tdbTbcMoveTo(ctx->pCur, &key, sizeof(key), &c);
+ tdbTbcMoveTo((TBC*)ctx->pCur, &key, sizeof(key), &c);
return c;
}
static void MoveToFirst(SSnapContext* ctx) {
- tdbTbcClose(ctx->pCur);
- tdbTbcOpen(ctx->pMeta->pTbDb, &ctx->pCur, NULL);
- tdbTbcMoveToFirst(ctx->pCur);
+ tdbTbcClose((TBC*)ctx->pCur);
+ tdbTbcOpen(ctx->pMeta->pTbDb, (TBC**)&ctx->pCur, NULL);
+ tdbTbcMoveToFirst((TBC*)ctx->pCur);
}
static void saveSuperTableInfoForChildTable(SMetaEntry* me, SHashObj* suidInfo) {
@@ -260,12 +260,12 @@ static void saveSuperTableInfoForChildTable(SMetaEntry* me, SHashObj* suidInfo)
taosHashPut(suidInfo, &me->uid, sizeof(tb_uid_t), &dataTmp, sizeof(STableInfoForChildTable));
}
-int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
+int32_t buildSnapContext(SVnode* pVnode, int64_t snapVersion, int64_t suid, int8_t subType, bool withMeta,
SSnapContext** ctxRet) {
SSnapContext* ctx = taosMemoryCalloc(1, sizeof(SSnapContext));
if (ctx == NULL) return -1;
*ctxRet = ctx;
- ctx->pMeta = pMeta;
+ ctx->pMeta = pVnode->pMeta;
ctx->snapVersion = snapVersion;
ctx->suid = suid;
ctx->subType = subType;
@@ -291,7 +291,7 @@ int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t
metaDebug("tmqsnap init snapVersion:%" PRIi64, ctx->snapVersion);
MoveToFirst(ctx);
while (1) {
- int32_t ret = tdbTbcNext(ctx->pCur, &pKey, &kLen, &pVal, &vLen);
+ int32_t ret = tdbTbcNext((TBC*)ctx->pCur, &pKey, &kLen, &pVal, &vLen);
if (ret < 0) break;
STbDbKey* tmp = (STbDbKey*)pKey;
if (tmp->version > ctx->snapVersion) break;
@@ -301,7 +301,7 @@ int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t
continue;
}
- if (tdbTbGet(pMeta->pUidIdx, &tmp->uid, sizeof(tb_uid_t), NULL, NULL) <
+ if (tdbTbGet(ctx->pMeta->pUidIdx, &tmp->uid, sizeof(tb_uid_t), NULL, NULL) <
0) { // check if table exist for now, need optimize later
continue;
}
@@ -329,7 +329,7 @@ int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t
MoveToSnapShotVersion(ctx);
while (1) {
- int32_t ret = tdbTbcPrev(ctx->pCur, &pKey, &kLen, &pVal, &vLen);
+ int32_t ret = tdbTbcPrev((TBC*)ctx->pCur, &pKey, &kLen, &pVal, &vLen);
if (ret < 0) break;
STbDbKey* tmp = (STbDbKey*)pKey;
@@ -378,7 +378,7 @@ int32_t buildSnapContext(SMeta* pMeta, int64_t snapVersion, int64_t suid, int8_t
}
int32_t destroySnapContext(SSnapContext* ctx) {
- tdbTbcClose(ctx->pCur);
+ tdbTbcClose((TBC*)ctx->pCur);
taosArrayDestroy(ctx->idList);
taosHashCleanup(ctx->idVersion);
taosHashCleanup(ctx->suidInfo);
@@ -466,7 +466,7 @@ int32_t setForSnapShot(SSnapContext* ctx, int64_t uid) {
return c;
}
-int32_t getMetafromSnapShot(SSnapContext* ctx, void** pBuf, int32_t* contLen, int16_t* type, int64_t* uid) {
+int32_t getTableInfoFromSnapshot(SSnapContext* ctx, void** pBuf, int32_t* contLen, int16_t* type, int64_t* uid) {
int32_t ret = 0;
void* pKey = NULL;
void* pVal = NULL;
@@ -496,7 +496,7 @@ int32_t getMetafromSnapShot(SSnapContext* ctx, void** pBuf, int32_t* contLen, in
metaDebug("tmqsnap get meta not exist uid:%" PRIi64 " version:%" PRIi64, *uid, idInfo->version);
}
- tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
+ tdbTbcGet((TBC*)ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
SDecoder dc = {0};
SMetaEntry me = {0};
tDecoderInit(&dc, pVal, vLen);
@@ -598,7 +598,7 @@ int32_t getMetafromSnapShot(SSnapContext* ctx, void** pBuf, int32_t* contLen, in
return ret;
}
-SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx) {
+SMetaTableInfo getMetaTableInfoFromSnapshot(SSnapContext* ctx) {
SMetaTableInfo result = {0};
void* pKey = NULL;
void* pVal = NULL;
@@ -619,10 +619,10 @@ SMetaTableInfo getUidfromSnapShot(SSnapContext* ctx) {
int32_t ret = MoveToPosition(ctx, idInfo->version, *uidTmp);
if (ret != 0) {
- metaDebug("tmqsnap getUidfromSnapShot not exist uid:%" PRIi64 " version:%" PRIi64, *uidTmp, idInfo->version);
+ metaDebug("tmqsnap getMetaTableInfoFromSnapshot not exist uid:%" PRIi64 " version:%" PRIi64, *uidTmp, idInfo->version);
continue;
}
- tdbTbcGet(ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
+ tdbTbcGet((TBC*)ctx->pCur, (const void**)&pKey, &kLen, (const void**)&pVal, &vLen);
SDecoder dc = {0};
SMetaEntry me = {0};
tDecoderInit(&dc, pVal, vLen);
diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c
index 0164a82c69cfd7be9dd7a20705ba6780d5123c38..eb169fbdc20dcbf0ba42040cbec257ac47eb9d12 100644
--- a/source/dnode/vnode/src/meta/metaTable.c
+++ b/source/dnode/vnode/src/meta/metaTable.c
@@ -690,7 +690,7 @@ _err:
return -1;
}
-int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMetaRsp **pMetaRsp) {
+int metaCreateTable(SMeta *pMeta, int64_t ver, SVCreateTbReq *pReq, STableMetaRsp **pMetaRsp) {
SMetaEntry me = {0};
SMetaReader mr = {0};
@@ -729,7 +729,7 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq, STableMe
metaReaderClear(&mr);
// build SMetaEntry
- me.version = version;
+ me.version = ver;
me.type = pReq->type;
me.uid = pReq->uid;
me.name = pReq->name;
@@ -838,22 +838,96 @@ int metaDropTable(SMeta *pMeta, int64_t version, SVDropTbReq *pReq, SArray *tbUi
return 0;
}
+static void metaDropTables(SMeta *pMeta, SArray *tbUids) {
+ metaWLock(pMeta);
+ for (int i = 0; i < TARRAY_SIZE(tbUids); ++i) {
+ tb_uid_t uid = *(tb_uid_t *)taosArrayGet(tbUids, i);
+ metaDropTableByUid(pMeta, uid, NULL);
+ metaDebug("batch drop table:%" PRId64, uid);
+ }
+ metaULock(pMeta);
+}
+
+static int32_t metaFilterTableByHash(SMeta *pMeta, SArray *uidList) {
+ int32_t code = 0;
+ // 1, tranverse table's
+ // 2, validate table name using vnodeValidateTableHash
+ // 3, push invalidated table's uid into uidList
+
+ TBC *pCur;
+ code = tdbTbcOpen(pMeta->pTbDb, &pCur, NULL);
+ if (code < 0) {
+ return code;
+ }
+
+ code = tdbTbcMoveToFirst(pCur);
+ if (code) {
+ tdbTbcClose(pCur);
+ return code;
+ }
+
+ void *pData = NULL, *pKey = NULL;
+ int nData = 0, nKey = 0;
+
+ while (1) {
+ int32_t ret = tdbTbcNext(pCur, &pKey, &nKey, &pData, &nData);
+ if (ret < 0) {
+ break;
+ }
+
+ SMetaEntry me = {0};
+ SDecoder dc = {0};
+ tDecoderInit(&dc, pData, nData);
+ metaDecodeEntry(&dc, &me);
+ if (me.type != TSDB_SUPER_TABLE) {
+ int32_t ret = vnodeValidateTableHash(pMeta->pVnode, me.name);
+ if (TSDB_CODE_VND_HASH_MISMATCH == ret) {
+ taosArrayPush(uidList, &me.uid);
+ }
+ }
+ tDecoderClear(&dc);
+ }
+ tdbFree(pData);
+ tdbFree(pKey);
+ tdbTbcClose(pCur);
+
+ return 0;
+}
+
+int32_t metaTrimTables(SMeta *pMeta) {
+ int32_t code = 0;
+
+ SArray *tbUids = taosArrayInit(8, sizeof(int64_t));
+ if (tbUids == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ code = metaFilterTableByHash(pMeta, tbUids);
+ if (code != 0) {
+ goto end;
+ }
+ if (TARRAY_SIZE(tbUids) == 0) {
+ goto end;
+ }
+
+ metaDropTables(pMeta, tbUids);
+
+end:
+ taosArrayDestroy(tbUids);
+
+ return code;
+}
+
int metaTtlDropTable(SMeta *pMeta, int64_t ttl, SArray *tbUids) {
int ret = metaTtlSmaller(pMeta, ttl, tbUids);
if (ret != 0) {
return ret;
}
- if (taosArrayGetSize(tbUids) == 0) {
+ if (TARRAY_SIZE(tbUids) == 0) {
return 0;
}
- metaWLock(pMeta);
- for (int i = 0; i < taosArrayGetSize(tbUids); ++i) {
- tb_uid_t *uid = (tb_uid_t *)taosArrayGet(tbUids, i);
- metaDropTableByUid(pMeta, *uid, NULL);
- metaDebug("ttl drop table:%" PRId64, *uid);
- }
- metaULock(pMeta);
+ metaDropTables(pMeta, tbUids);
return 0;
}
@@ -999,7 +1073,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
metaUpdateStbStats(pMeta, e.ctbEntry.suid, -1);
metaUidCacheClear(pMeta, e.ctbEntry.suid);
- metaTbGroupCacheClear(pMeta, e.ctbEntry.suid);
+ metaTbGroupCacheClear(pMeta, e.ctbEntry.suid);
} else if (e.type == TSDB_NORMAL_TABLE) {
// drop schema.db (todo)
@@ -1011,7 +1085,7 @@ static int metaDropTableByUid(SMeta *pMeta, tb_uid_t uid, int *type) {
metaStatsCacheDrop(pMeta, uid);
metaUidCacheClear(pMeta, uid);
- metaTbGroupCacheClear(pMeta, uid);
+ metaTbGroupCacheClear(pMeta, uid);
--pMeta->pVnode->config.vndStats.numOfSTables;
}
@@ -1432,7 +1506,7 @@ static int metaUpdateTableTagVal(SMeta *pMeta, int64_t version, SVAlterTbReq *pA
((STag *)(ctbEntry.ctbEntry.pTags))->len, pMeta->txn);
metaUidCacheClear(pMeta, ctbEntry.ctbEntry.suid);
- metaTbGroupCacheClear(pMeta, ctbEntry.ctbEntry.suid);
+ metaTbGroupCacheClear(pMeta, ctbEntry.ctbEntry.suid);
metaULock(pMeta);
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index ccc00ce25e1ad03543a6332324b57c480ceb95c4..39aa5c30437a72f6ad26f22f4c2a01aa03a8c7dc 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -276,12 +276,9 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat
return TSDB_CODE_FAILED;
}
- SReadHandle handle = {
- .meta = pVnode->pMeta,
- .vnode = pVnode,
- .initTqReader = 1,
- .pStateBackend = pStreamState,
- };
+ SReadHandle handle = { .vnode = pVnode, .initTqReader = 1, .pStateBackend = pStreamState };
+ initStorageAPI(&handle.api);
+
pRSmaInfo->taskInfo[idx] = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle, TD_VID(pVnode));
if (!pRSmaInfo->taskInfo[idx]) {
terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE;
@@ -853,11 +850,8 @@ static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t
code = qSerializeTaskStatus(srcTaskInfo, &pOutput, &len);
TSDB_CHECK_CODE(code, lino, _exit);
- SReadHandle handle = {
- .meta = pVnode->pMeta,
- .vnode = pVnode,
- .initTqReader = 1,
- };
+ SReadHandle handle = { .vnode = pVnode, .initTqReader = 1 };
+ initStorageAPI(&handle.api);
if (ASSERTS(!dstTaskInfo, "dstTaskInfo:%p is not NULL", dstTaskInfo)) {
code = TSDB_CODE_APP_ERROR;
@@ -904,7 +898,7 @@ static int32_t tdRSmaInfoClone(SSma *pSma, SRSmaInfo *pInfo) {
metaReaderInit(&mr, SMA_META(pSma), 0);
smaDebug("vgId:%d, rsma clone qTaskInfo for suid:%" PRIi64, SMA_VID(pSma), pInfo->suid);
- if (metaGetTableEntryByUidCache(&mr, pInfo->suid) < 0) {
+ if (metaReaderGetTableEntryByUidCache(&mr, pInfo->suid) < 0) {
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
}
@@ -1131,7 +1125,7 @@ static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables) {
for (int64_t i = 0; i < arrSize; ++i) {
suid = *(tb_uid_t *)taosArrayGet(suidList, i);
smaDebug("vgId:%d, rsma restore, suid is %" PRIi64, TD_VID(pVnode), suid);
- if (metaGetTableEntryByUidCache(&mr, suid) < 0) {
+ if (metaReaderGetTableEntryByUidCache(&mr, suid) < 0) {
code = terrno;
TSDB_CHECK_CODE(code, lino, _exit);
}
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 0a3173b3cbf25745587fde15663a178e72b7605e..aa6bbbe9dfabb2efa715d206d7de3f5a77aabdb9 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -23,8 +23,8 @@
static int32_t tqInitialize(STQ* pTq);
static FORCE_INLINE bool tqIsHandleExec(STqHandle* pHandle) { return TMQ_HANDLE_STATUS_EXEC == pHandle->status; }
-static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) {pHandle->status = TMQ_HANDLE_STATUS_EXEC;}
-static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) {pHandle->status = TMQ_HANDLE_STATUS_IDLE;}
+static FORCE_INLINE void tqSetHandleExec(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_EXEC; }
+static FORCE_INLINE void tqSetHandleIdle(STqHandle* pHandle) { pHandle->status = TMQ_HANDLE_STATUS_IDLE; }
int32_t tqInit() {
int8_t old;
@@ -69,14 +69,16 @@ static void destroyTqHandle(void* data) {
if (pData->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
taosMemoryFreeClear(pData->execHandle.execCol.qmsg);
} else if (pData->execHandle.subType == TOPIC_SUB_TYPE__DB) {
- tqCloseReader(pData->execHandle.pTqReader);
+ tqReaderClose(pData->execHandle.pTqReader);
walCloseReader(pData->pWalReader);
taosHashCleanup(pData->execHandle.execDb.pFilterOutTbUid);
} else if (pData->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
walCloseReader(pData->pWalReader);
- tqCloseReader(pData->execHandle.pTqReader);
+ tqReaderClose(pData->execHandle.pTqReader);
+ taosMemoryFreeClear(pData->execHandle.execTb.qmsg);
+ nodesDestroyNode(pData->execHandle.execTb.node);
}
- if(pData->msg != NULL) {
+ if (pData->msg != NULL) {
rpcFreeCont(pData->msg->pCont);
taosMemoryFree(pData->msg);
pData->msg = NULL;
@@ -238,14 +240,15 @@ int32_t tqPushDataRsp(STqHandle* pHandle, int32_t vgId) {
int64_t sver = 0, ever = 0;
walReaderValidVersionRange(pHandle->execHandle.pTqReader->pWalReader, &sver, &ever);
- tqDoSendDataRsp(&pHandle->msg->info, &dataRsp, pHandle->epoch, pHandle->consumerId, TMQ_MSG_TYPE__POLL_RSP, sver, ever);
+ tqDoSendDataRsp(&pHandle->msg->info, &dataRsp, pHandle->epoch, pHandle->consumerId, TMQ_MSG_TYPE__POLL_RSP, sver,
+ ever);
char buf1[80] = {0};
char buf2[80] = {0};
tFormatOffset(buf1, tListLen(buf1), &dataRsp.reqOffset);
tFormatOffset(buf2, tListLen(buf2), &dataRsp.rspOffset);
- tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, req:%s, rsp:%s",
- vgId, dataRsp.head.consumerId, dataRsp.head.epoch, dataRsp.blockNum, buf1, buf2);
+ tqDebug("vgId:%d, from consumer:0x%" PRIx64 " (epoch %d) push rsp, block num: %d, req:%s, rsp:%s", vgId,
+ dataRsp.head.consumerId, dataRsp.head.epoch, dataRsp.blockNum, buf1, buf2);
return 0;
}
@@ -261,8 +264,8 @@ int32_t tqSendDataRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq*
tFormatOffset(buf1, 80, &pRsp->reqOffset);
tFormatOffset(buf2, 80, &pRsp->rspOffset);
- tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64,
- vgId, pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2, pReq->reqId);
+ tqDebug("vgId:%d consumer:0x%" PRIx64 " (epoch %d) send rsp, block num:%d, req:%s, rsp:%s, reqId:0x%" PRIx64, vgId,
+ pReq->consumerId, pReq->epoch, pRsp->blockNum, buf1, buf2, pReq->reqId);
return 0;
}
@@ -334,8 +337,7 @@ int32_t tqProcessSeekReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen)
STqHandle* pHandle = taosHashGet(pTq->pHandle, pOffset->subKey, strlen(pOffset->subKey));
if (pHandle == NULL) {
- tqError("tmq seek: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", vgOffset.consumerId, vgId,
- pOffset->subKey);
+ tqError("tmq seek: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", vgOffset.consumerId, vgId, pOffset->subKey);
terrno = TSDB_CODE_INVALID_MSG;
return -1;
}
@@ -351,7 +353,7 @@ int32_t tqProcessSeekReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen)
}
taosRUnLockLatch(&pTq->lock);
- //3. check the offset info
+ // 3. check the offset info
STqOffset* pSavedOffset = tqOffsetRead(pTq->pOffsetStore, pOffset->subKey);
if (pSavedOffset != NULL) {
if (pSavedOffset->val.type != TMQ_OFFSET__LOG) {
@@ -379,7 +381,7 @@ int32_t tqProcessSeekReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen)
tqDebug("vgId:%d sub:%s seek to:%" PRId64 " prev offset:%" PRId64, vgId, pOffset->subKey, pOffset->val.version,
pSavedOffset->val.version);
} else {
- tqDebug("vgId:%d sub:%s seek to:%"PRId64" not saved yet", vgId, pOffset->subKey, pOffset->val.version);
+ tqDebug("vgId:%d sub:%s seek to:%" PRId64 " not saved yet", vgId, pOffset->subKey, pOffset->val.version);
}
if (tqOffsetWrite(pTq->pOffsetStore, pOffset) < 0) {
@@ -421,6 +423,7 @@ int32_t tqCheckColModifiable(STQ* pTq, int64_t tbUid, int32_t colId) {
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
SMqPollReq req = {0};
+ int code = 0;
if (tDeserializeSMqPollReq(pMsg->pCont, pMsg->contLen, &req) < 0) {
tqError("tDeserializeSMqPollReq %d failed", pMsg->contLen);
terrno = TSDB_CODE_INVALID_MSG;
@@ -431,37 +434,49 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t reqEpoch = req.epoch;
STqOffsetVal reqOffset = req.reqOffset;
int32_t vgId = TD_VID(pTq->pVnode);
+ STqHandle* pHandle = NULL;
- taosWLockLatch(&pTq->lock);
- // 1. find handle
- STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
- if (pHandle == NULL) {
- tqError("tmq poll: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", consumerId, vgId, req.subKey);
- terrno = TSDB_CODE_INVALID_MSG;
- taosWUnLockLatch(&pTq->lock);
- return -1;
- }
+ while (1) {
+ taosWLockLatch(&pTq->lock);
+ // 1. find handle
+ pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
+ if (pHandle == NULL) {
+ tqError("tmq poll: consumer:0x%" PRIx64 " vgId:%d subkey %s not found", consumerId, vgId, req.subKey);
+ terrno = TSDB_CODE_INVALID_MSG;
+ taosWUnLockLatch(&pTq->lock);
+ return -1;
+ }
- while (tqIsHandleExec(pHandle)) {
- tqDebug("tmq poll: consumer:0x%" PRIx64 "vgId:%d, topic:%s, subscription is executing, wait for 5ms and retry", consumerId, vgId, req.subKey);
- taosMsleep(5);
- }
+ // 2. check re-balance status
+ if (pHandle->consumerId != consumerId) {
+ tqError("ERROR tmq poll: consumer:0x%" PRIx64
+ " vgId:%d, subkey %s, mismatch for saved handle consumer:0x%" PRIx64,
+ consumerId, TD_VID(pTq->pVnode), req.subKey, pHandle->consumerId);
+ terrno = TSDB_CODE_TMQ_CONSUMER_MISMATCH;
+ taosWUnLockLatch(&pTq->lock);
+ return -1;
+ }
- // 2. check re-balance status
- if (pHandle->consumerId != consumerId) {
- tqDebug("ERROR tmq poll: consumer:0x%" PRIx64 " vgId:%d, subkey %s, mismatch for saved handle consumer:0x%" PRIx64,
- consumerId, vgId, req.subKey, pHandle->consumerId);
- terrno = TSDB_CODE_TMQ_CONSUMER_MISMATCH;
+ bool exec = tqIsHandleExec(pHandle);
+ if (!exec) {
+ tqSetHandleExec(pHandle);
+ // qSetTaskCode(pHandle->execHandle.task, TDB_CODE_SUCCESS);
+ tqDebug("tmq poll: consumer:0x%" PRIx64 "vgId:%d, topic:%s, set handle exec, pHandle:%p", consumerId, vgId,
+ req.subKey, pHandle);
+ taosWUnLockLatch(&pTq->lock);
+ break;
+ }
taosWUnLockLatch(&pTq->lock);
- return -1;
+
+ tqDebug("tmq poll: consumer:0x%" PRIx64
+ "vgId:%d, topic:%s, subscription is executing, wait for 10ms and retry, pHandle:%p",
+ consumerId, vgId, req.subKey, pHandle);
+ taosMsleep(10);
}
- tqSetHandleExec(pHandle);
- taosWUnLockLatch(&pTq->lock);
// 3. update the epoch value
- int32_t savedEpoch = pHandle->epoch;
- if (savedEpoch < reqEpoch) {
- tqDebug("tmq poll: consumer:0x%" PRIx64 " epoch update from %d to %d by poll req", consumerId, savedEpoch,
+ if (pHandle->epoch < reqEpoch) {
+ tqDebug("tmq poll: consumer:0x%" PRIx64 " epoch update from %d to %d by poll req", consumerId, pHandle->epoch,
reqEpoch);
pHandle->epoch = reqEpoch;
}
@@ -471,8 +486,11 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
tqDebug("tmq poll: consumer:0x%" PRIx64 " (epoch %d), subkey %s, recv poll req vgId:%d, req:%s, reqId:0x%" PRIx64,
consumerId, req.epoch, pHandle->subKey, vgId, buf, req.reqId);
- int code = tqExtractDataForMq(pTq, pHandle, &req, pMsg);
+ code = tqExtractDataForMq(pTq, pHandle, &req, pMsg);
tqSetHandleIdle(pHandle);
+
+ tqDebug("tmq poll: consumer:0x%" PRIx64 "vgId:%d, topic:%s, , set handle idle, pHandle:%p", consumerId, vgId,
+ req.subKey, pHandle);
return code;
}
@@ -510,8 +528,6 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) {
int64_t sver = 0, ever = 0;
walReaderValidVersionRange(pHandle->execHandle.pTqReader->pWalReader, &sver, &ever);
- int64_t currentVer = walReaderGetCurrentVer(pHandle->execHandle.pTqReader->pWalReader);
-
SMqDataRsp dataRsp = {0};
tqInitDataRsp(&dataRsp, &req);
@@ -537,7 +553,12 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) {
dataRsp.rspOffset.type = TMQ_OFFSET__LOG;
if (reqOffset.type == TMQ_OFFSET__LOG) {
- dataRsp.rspOffset.version = currentVer; // return current consume offset value
+ int64_t currentVer = walReaderGetCurrentVer(pHandle->execHandle.pTqReader->pWalReader);
+ if (currentVer == -1) { // not start to read data from wal yet, return req offset directly
+ dataRsp.rspOffset.version = reqOffset.version;
+ } else {
+ dataRsp.rspOffset.version = currentVer; // return current consume offset value
+ }
} else if (reqOffset.type == TMQ_OFFSET__RESET_EARLIEAST) {
dataRsp.rspOffset.version = sver; // not consume yet, set the earliest position
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
@@ -557,7 +578,7 @@ int32_t tqProcessVgWalInfoReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
SMqVDeleteReq* pReq = (SMqVDeleteReq*)msg;
- int32_t vgId = TD_VID(pTq->pVnode);
+ int32_t vgId = TD_VID(pTq->pVnode);
tqDebug("vgId:%d, tq process delete sub req %s", vgId, pReq->subKey);
int32_t code = 0;
@@ -566,8 +587,9 @@ int32_t tqProcessDeleteSubReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
STqHandle* pHandle = taosHashGet(pTq->pHandle, pReq->subKey, strlen(pReq->subKey));
if (pHandle) {
while (tqIsHandleExec(pHandle)) {
- tqDebug("vgId:%d, topic:%s, subscription is executing, wait for 5ms and retry", vgId, pHandle->subKey);
- taosMsleep(5);
+ tqDebug("vgId:%d, topic:%s, subscription is executing, wait for 10ms and retry, pHandle:%p", vgId,
+ pHandle->subKey, pHandle);
+ taosMsleep(10);
}
if (pHandle->pRef) {
@@ -626,9 +648,18 @@ int32_t tqProcessDelCheckInfoReq(STQ* pTq, int64_t sversion, char* msg, int32_t
}
int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
- int ret = 0;
+ int ret = 0;
SMqRebVgReq req = {0};
- tDecodeSMqRebVgReq(msg, &req);
+ SDecoder dc = {0};
+
+ tDecoderInit(&dc, msg, msgLen);
+
+ // decode req
+ if (tDecodeSMqRebVgReq(&dc, &req) < 0) {
+ terrno = TSDB_CODE_INVALID_MSG;
+ tDecoderClear(&dc);
+ return -1;
+ }
SVnode* pVnode = pTq->pVnode;
int32_t vgId = TD_VID(pVnode);
@@ -636,7 +667,6 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
tqDebug("vgId:%d, tq process sub req:%s, Id:0x%" PRIx64 " -> Id:0x%" PRIx64, pVnode->config.vgId, req.subKey,
req.oldConsumerId, req.newConsumerId);
- taosWLockLatch(&pTq->lock);
STqHandle* pHandle = taosHashGet(pTq->pHandle, req.subKey, strlen(req.subKey));
if (pHandle == NULL) {
if (req.oldConsumerId != -1) {
@@ -652,7 +682,6 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
STqHandle tqHandle = {0};
pHandle = &tqHandle;
- uint64_t oldConsumerId = pHandle->consumerId;
memcpy(pHandle->subKey, req.subKey, TSDB_SUBSCRIBE_KEY_LEN);
pHandle->consumerId = req.newConsumerId;
pHandle->epoch = -1;
@@ -670,13 +699,13 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
int64_t ver = pRef->refVer;
pHandle->pRef = pRef;
- SReadHandle handle = {
- .meta = pVnode->pMeta, .vnode = pVnode, .initTableReader = true, .initTqReader = true, .version = ver};
+ SReadHandle handle = {.vnode = pVnode, .initTableReader = true, .initTqReader = true, .version = ver};
+ initStorageAPI(&handle.api);
+
pHandle->snapshotVer = ver;
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- pHandle->execHandle.execCol.qmsg = req.qmsg;
- req.qmsg = NULL;
+ pHandle->execHandle.execCol.qmsg = taosStrdup(req.qmsg);
pHandle->execHandle.task = qCreateQueueExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle, vgId,
&pHandle->execHandle.numOfCols, req.newConsumerId);
@@ -689,74 +718,82 @@ int32_t tqProcessSubscribeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
pHandle->execHandle.execDb.pFilterOutTbUid =
taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK);
- buildSnapContext(handle.meta, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta,
+ buildSnapContext(handle.vnode, handle.version, 0, pHandle->execHandle.subType, pHandle->fetchMeta,
(SSnapContext**)(&handle.sContext));
pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, vgId, NULL, req.newConsumerId);
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
pHandle->pWalReader = walOpenReader(pVnode->pWal, NULL);
pHandle->execHandle.execTb.suid = req.suid;
+ pHandle->execHandle.execTb.qmsg = taosStrdup(req.qmsg);
- SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
- vnodeGetCtbIdList(pVnode, req.suid, tbUidList);
- tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pVnode->config.vgId, req.suid);
- for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
- int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
- tqDebug("vgId:%d, idx %d, uid:%" PRId64, vgId, i, tbUid);
+ if (strcmp(pHandle->execHandle.execTb.qmsg, "") != 0) {
+ if (nodesStringToNode(pHandle->execHandle.execTb.qmsg, &pHandle->execHandle.execTb.node) != 0) {
+ tqError("nodesStringToNode error in sub stable, since %s, vgId:%d, subkey:%s consumer:0x%" PRIx64, terrstr(),
+ pVnode->config.vgId, req.subKey, pHandle->consumerId);
+ return -1;
+ }
}
- pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
- tqReaderSetTbUidList(pHandle->execHandle.pTqReader, tbUidList);
- taosArrayDestroy(tbUidList);
- buildSnapContext(handle.meta, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
+ buildSnapContext(handle.vnode, handle.version, req.suid, pHandle->execHandle.subType, pHandle->fetchMeta,
(SSnapContext**)(&handle.sContext));
pHandle->execHandle.task = qCreateQueueExecTaskInfo(NULL, &handle, vgId, NULL, req.newConsumerId);
+
+ SArray* tbUidList = NULL;
+ ret = qGetTableList(req.suid, pVnode, pHandle->execHandle.execTb.node, &tbUidList, pHandle->execHandle.task);
+ if (ret != TDB_CODE_SUCCESS) {
+ tqError("qGetTableList error:%d vgId:%d, subkey:%s consumer:0x%" PRIx64, ret, pVnode->config.vgId, req.subKey,
+ pHandle->consumerId);
+ taosArrayDestroy(tbUidList);
+ goto end;
+ }
+ tqDebug("tq try to get ctb for stb subscribe, vgId:%d, subkey:%s consumer:0x%" PRIx64 " suid:%" PRId64,
+ pVnode->config.vgId, req.subKey, pHandle->consumerId, req.suid);
+ pHandle->execHandle.pTqReader = tqReaderOpen(pVnode);
+ tqReaderSetTbUidList(pHandle->execHandle.pTqReader, tbUidList, NULL);
+ taosArrayDestroy(tbUidList);
}
taosHashPut(pTq->pHandle, req.subKey, strlen(req.subKey), pHandle, sizeof(STqHandle));
- tqDebug("try to persist handle %s consumer:0x%" PRIx64 " , old consumer:0x%" PRIx64, req.subKey,
- pHandle->consumerId, oldConsumerId);
+ tqDebug("try to persist handle %s consumer:0x%" PRIx64, req.subKey, pHandle->consumerId);
ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
goto end;
} else {
- while (tqIsHandleExec(pHandle)) {
- tqDebug("sub req vgId:%d, topic:%s, subscription is executing, wait for 5ms and retry", vgId, pHandle->subKey);
- taosMsleep(5);
- }
+ taosWLockLatch(&pTq->lock);
if (pHandle->consumerId == req.newConsumerId) { // do nothing
- tqInfo("vgId:%d consumer:0x%" PRIx64 " remains, no switch occurs", req.vgId, req.newConsumerId);
- atomic_add_fetch_32(&pHandle->epoch, 1);
-
+ tqInfo("vgId:%d consumer:0x%" PRIx64 " remains, no switch occurs, should not reach here", req.vgId,
+ req.newConsumerId);
} else {
tqInfo("vgId:%d switch consumer from Id:0x%" PRIx64 " to Id:0x%" PRIx64, req.vgId, pHandle->consumerId,
req.newConsumerId);
atomic_store_64(&pHandle->consumerId, req.newConsumerId);
- atomic_store_32(&pHandle->epoch, 0);
}
+ // atomic_add_fetch_32(&pHandle->epoch, 1);
+
// kill executing task
- qTaskInfo_t pTaskInfo = pHandle->execHandle.task;
- if (pTaskInfo != NULL) {
- qKillTask(pTaskInfo, TSDB_CODE_SUCCESS);
- }
- if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
- qStreamCloseTsdbReader(pTaskInfo);
- }
+ // if(tqIsHandleExec(pHandle)) {
+ // qTaskInfo_t pTaskInfo = pHandle->execHandle.task;
+ // if (pTaskInfo != NULL) {
+ // qKillTask(pTaskInfo, TSDB_CODE_SUCCESS);
+ // }
+
+ // if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
+ // qStreamCloseTsdbReader(pTaskInfo);
+ // }
+ // }
// remove if it has been register in the push manager, and return one empty block to consumer
tqUnregisterPushHandle(pTq, pHandle);
+ taosWUnLockLatch(&pTq->lock);
ret = tqMetaSaveHandle(pTq, req.subKey, pHandle);
- goto end;
}
end:
- taosWUnLockLatch(&pTq->lock);
- taosMemoryFree(req.qmsg);
+ tDecoderClear(&dc);
return ret;
}
-void freePtr(void *ptr) {
- taosMemoryFree(*(void**)ptr);
-}
+void freePtr(void* ptr) { taosMemoryFree(*(void**)ptr); }
int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
int32_t vgId = TD_VID(pTq->pVnode);
@@ -779,7 +816,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
pTask->chkInfo.currentVer = ver;
// expand executor
- pTask->status.taskStatus = (pTask->fillHistory)? TASK_STATUS__WAIT_DOWNSTREAM:TASK_STATUS__NORMAL;
+ pTask->status.taskStatus = (pTask->fillHistory) ? TASK_STATUS__WAIT_DOWNSTREAM : TASK_STATUS__NORMAL;
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
@@ -787,8 +824,8 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
return -1;
}
- SReadHandle handle = {
- .meta = pTq->pVnode->pMeta, .vnode = pTq->pVnode, .initTqReader = 1, .pStateBackend = pTask->pState};
+ SReadHandle handle = {.vnode = pTq->pVnode, .initTqReader = 1, .pStateBackend = pTask->pState};
+ initStorageAPI(&handle.api);
pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
if (pTask->exec.pExecutor == NULL) {
@@ -803,9 +840,10 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
}
int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTask->childEpInfo);
- SReadHandle mgHandle = {.vnode = NULL, .numOfVgroups = numOfVgroups, .pStateBackend = pTask->pState};
+ SReadHandle handle = {.vnode = NULL, .numOfVgroups = numOfVgroups, .pStateBackend = pTask->pState};
+ initStorageAPI(&handle.api);
- pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &mgHandle, vgId);
+ pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId);
if (pTask->exec.pExecutor == NULL) {
return -1;
}
@@ -844,8 +882,8 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
streamSetupTrigger(pTask);
- tqInfo("vgId:%d expand stream task, s-task:%s, checkpoint ver:%" PRId64 " child id:%d, level:%d", vgId, pTask->id.idStr,
- pTask->chkInfo.version, pTask->selfChildId, pTask->taskLevel);
+ tqInfo("vgId:%d expand stream task, s-task:%s, checkpoint ver:%" PRId64 " child id:%d, level:%d", vgId,
+ pTask->id.idStr, pTask->chkInfo.version, pTask->selfChildId, pTask->taskLevel);
// next valid version will add one
pTask->chkInfo.version += 1;
@@ -958,7 +996,8 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask));
if (pTask == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
- tqError("vgId:%d failed to create stream task due to out of memory, alloc size:%d", vgId, (int32_t) sizeof(SStreamTask));
+ tqError("vgId:%d failed to create stream task due to out of memory, alloc size:%d", vgId,
+ (int32_t)sizeof(SStreamTask));
return -1;
}
@@ -1073,7 +1112,7 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t sversion, char* msg, int32_t
// do recovery step 2
int64_t st = taosGetTimestampMs();
- tqDebug("s-task:%s start step2 recover, ts:%"PRId64, pTask->id.idStr, st);
+ tqDebug("s-task:%s start step2 recover, ts:%" PRId64, pTask->id.idStr, st);
code = streamSourceRecoverScanStep2(pTask, sversion);
if (code < 0) {
@@ -1081,8 +1120,10 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t sversion, char* msg, int32_t
return -1;
}
- qDebug("s-task:%s set the start wal offset to be:%"PRId64, pTask->id.idStr, sversion);
+ qDebug("s-task:%s set start wal scan start ver:%"PRId64, pTask->id.idStr, sversion);
+
walReaderSeekVer(pTask->exec.pWalReader, sversion);
+ pTask->chkInfo.currentVer = sversion;
if (atomic_load_8(&pTask->status.taskStatus) == TASK_STATUS__DROPPING) {
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
@@ -1104,7 +1145,7 @@ int32_t tqProcessTaskRecover2Req(STQ* pTq, int64_t sversion, char* msg, int32_t
return -1;
}
- double el = (taosGetTimestampMs() - st)/ 1000.0;
+ double el = (taosGetTimestampMs() - st) / 1000.0;
tqDebug("s-task:%s step2 recover finished, el:%.2fs", pTask->id.idStr, el);
// dispatch recover finish req to all related downstream task
@@ -1220,12 +1261,12 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
if (pTask != NULL) {
if (pTask->status.taskStatus == TASK_STATUS__NORMAL) {
- tqDebug("vgId:%d s-task:%s start to process block from wal, last chk point:%" PRId64, vgId,
- pTask->id.idStr, pTask->chkInfo.version);
+ tqDebug("vgId:%d s-task:%s start to process block from wal, last chk point:%" PRId64, vgId, pTask->id.idStr,
+ pTask->chkInfo.version);
streamProcessRunReq(pTask);
} else {
if (streamTaskShouldPause(&pTask->status)) {
- atomic_val_compare_exchange_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__WAITING, TASK_SCHED_STATUS__INACTIVE);
+ atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE);
}
tqDebug("vgId:%d s-task:%s ignore run req since not in ready state", vgId, pTask->id.idStr);
}
@@ -1240,9 +1281,9 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) {
}
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
- char* msgStr = pMsg->pCont;
- char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
- int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
+ char* msgStr = pMsg->pCont;
+ char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
+ int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
SStreamDispatchReq req = {0};
@@ -1266,25 +1307,29 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
int32_t taskId = ntohl(pRsp->upstreamTaskId);
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId);
- tqDebug("recv dispatch rsp, code:%x", pMsg->code);
+
+ int32_t vgId = pTq->pStreamMeta->vgId;
if (pTask) {
streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
return 0;
} else {
- return -1;
+ tqDebug("vgId:%d failed to handle the dispatch rsp, since find task:0x%x failed", vgId, taskId);
+ return TSDB_CODE_INVALID_MSG;
}
}
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg;
+ tqDebug("vgId:%d receive msg to drop stream task:0x%x", TD_VID(pTq->pVnode), pReq->taskId);
+
streamMetaRemoveTask(pTq->pStreamMeta, pReq->taskId);
return 0;
}
int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)msg;
- SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
+ SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
if (pTask) {
tqDebug("vgId:%d s-task:%s set pause flag", pTq->pStreamMeta->vgId, pTask->id.idStr);
atomic_store_8(&pTask->status.keepTaskStatus, pTask->status.taskStatus);
@@ -1296,19 +1341,22 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg
int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) {
SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg;
+
+ int32_t vgId = pTq->pStreamMeta->vgId;
SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId);
if (pTask) {
atomic_store_8(&pTask->status.taskStatus, pTask->status.keepTaskStatus);
// no lock needs to secure the access of the version
- if (pReq->igUntreated && pTask->taskLevel == TASK_LEVEL__SOURCE) { // discard all the data when the stream task is suspended.
- pTask->chkInfo.currentVer = sversion;
- walReaderSeekVer(pTask->exec.pWalReader, sversion);
- tqDebug("vgId:%d s-task:%s resume to normal from the latest version:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d", pTq->pStreamMeta->vgId,
- pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
+ if (pReq->igUntreated && pTask->taskLevel == TASK_LEVEL__SOURCE) {
+ // discard all the data when the stream task is suspended.
+ walReaderSetSkipToVersion(pTask->exec.pWalReader, sversion);
+ tqDebug("vgId:%d s-task:%s resume to exec, prev paused version:%" PRId64 ", start from vnode ver:%" PRId64
+ ", schedStatus:%d",
+ vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
} else { // from the previous paused version and go on
- tqDebug("vgId:%d s-task:%s resume to normal from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d", pTq->pStreamMeta->vgId,
- pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
+ tqDebug("vgId:%d s-task:%s resume to exec, from paused ver:%" PRId64 ", vnode ver:%" PRId64 ", schedStatus:%d",
+ vgId, pTask->id.idStr, pTask->chkInfo.currentVer, sversion, pTask->status.schedStatus);
}
if (pTask->taskLevel == TASK_LEVEL__SOURCE && taosQueueItemSize(pTask->inputQueue->queue) == 0) {
@@ -1317,6 +1365,8 @@ int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms
streamSchedExec(pTask);
}
streamMetaReleaseTask(pTq->pStreamMeta, pTask);
+ } else {
+ tqError("vgId:%d failed to find the s-task:0x%x for resume stream task", vgId, pReq->taskId);
}
return 0;
@@ -1425,7 +1475,7 @@ int32_t tqStartStreamTasks(STQ* pTq) {
int32_t numOfTasks = taosArrayGetSize(pMeta->pTaskList);
if (numOfTasks == 0) {
tqInfo("vgId:%d no stream tasks exist", vgId);
- taosWUnLockLatch(&pTq->pStreamMeta->lock);
+ taosWUnLockLatch(&pMeta->lock);
return 0;
}
@@ -1433,7 +1483,7 @@ int32_t tqStartStreamTasks(STQ* pTq) {
if (pMeta->walScanCounter > 1) {
tqDebug("vgId:%d wal read task has been launched, remain scan times:%d", vgId, pMeta->walScanCounter);
- taosWUnLockLatch(&pTq->pStreamMeta->lock);
+ taosWUnLockLatch(&pMeta->lock);
return 0;
}
@@ -1441,7 +1491,7 @@ int32_t tqStartStreamTasks(STQ* pTq) {
if (pRunReq == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tqError("vgId:%d failed to create msg to start wal scanning to launch stream tasks, code:%s", vgId, terrstr());
- taosWUnLockLatch(&pTq->pStreamMeta->lock);
+ taosWUnLockLatch(&pMeta->lock);
return -1;
}
@@ -1452,7 +1502,7 @@ int32_t tqStartStreamTasks(STQ* pTq) {
SRpcMsg msg = {.msgType = TDMT_STREAM_TASK_RUN, .pCont = pRunReq, .contLen = sizeof(SStreamTaskRunReq)};
tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &msg);
- taosWUnLockLatch(&pTq->pStreamMeta->lock);
+ taosWUnLockLatch(&pMeta->lock);
return 0;
}
diff --git a/source/dnode/vnode/src/tq/tqMeta.c b/source/dnode/vnode/src/tq/tqMeta.c
index 5654147b6df6d1518285e0c028e23db20997ff57..ba6d7cb50191304ecf823959071736fcc5b19fd8 100644
--- a/source/dnode/vnode/src/tq/tqMeta.c
+++ b/source/dnode/vnode/src/tq/tqMeta.c
@@ -37,6 +37,9 @@ int32_t tEncodeSTqHandle(SEncoder* pEncoder, const STqHandle* pHandle) {
}
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
if (tEncodeI64(pEncoder, pHandle->execHandle.execTb.suid) < 0) return -1;
+ if (pHandle->execHandle.execTb.qmsg != NULL){
+ if (tEncodeCStr(pEncoder, pHandle->execHandle.execTb.qmsg) < 0) return -1;
+ }
}
tEndEncode(pEncoder);
return pEncoder->pos;
@@ -64,6 +67,9 @@ int32_t tDecodeSTqHandle(SDecoder* pDecoder, STqHandle* pHandle) {
}
} else if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
if (tDecodeI64(pDecoder, &pHandle->execHandle.execTb.suid) < 0) return -1;
+ if (!tDecodeIsEnd(pDecoder)){
+ if (tDecodeCStrAlloc(pDecoder, &pHandle->execHandle.execTb.qmsg) < 0) return -1;
+ }
}
tEndDecode(pDecoder);
return 0;
@@ -298,13 +304,14 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
walSetRefVer(handle.pRef, handle.snapshotVer);
SReadHandle reader = {
- .meta = pTq->pVnode->pMeta,
.vnode = pTq->pVnode,
.initTableReader = true,
.initTqReader = true,
- .version = handle.snapshotVer,
+ .version = handle.snapshotVer
};
+ initStorageAPI(&reader.api);
+
if (handle.execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
handle.execHandle.task =
qCreateQueueExecTaskInfo(handle.execHandle.execCol.qmsg, &reader, vgId, &handle.execHandle.numOfCols, 0);
@@ -330,31 +337,36 @@ int32_t tqMetaRestoreHandle(STQ* pTq) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
- buildSnapContext(reader.meta, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
+ buildSnapContext(reader.vnode, reader.version, 0, handle.execHandle.subType, handle.fetchMeta,
(SSnapContext**)(&reader.sContext));
handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, 0);
} else if (handle.execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
handle.pWalReader = walOpenReader(pTq->pVnode->pWal, NULL);
- SArray* tbUidList = taosArrayInit(0, sizeof(int64_t));
- vnodeGetCtbIdList(pTq->pVnode, handle.execHandle.execTb.suid, tbUidList);
- tqDebug("vgId:%d, tq try to get all ctb, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
- for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
- int64_t tbUid = *(int64_t*)taosArrayGet(tbUidList, i);
- tqDebug("vgId:%d, idx %d, uid:%" PRId64, vgId, i, tbUid);
+ if(handle.execHandle.execTb.qmsg != NULL && strcmp(handle.execHandle.execTb.qmsg, "") != 0) {
+ if (nodesStringToNode(handle.execHandle.execTb.qmsg, &handle.execHandle.execTb.node) != 0) {
+ tqError("nodesStringToNode error in sub stable, since %s", terrstr());
+ return -1;
+ }
}
- handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
- tqReaderSetTbUidList(handle.execHandle.pTqReader, tbUidList);
- taosArrayDestroy(tbUidList);
-
- buildSnapContext(reader.meta, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType,
+ buildSnapContext(reader.vnode, reader.version, handle.execHandle.execTb.suid, handle.execHandle.subType,
handle.fetchMeta, (SSnapContext**)(&reader.sContext));
handle.execHandle.task = qCreateQueueExecTaskInfo(NULL, &reader, vgId, NULL, 0);
+
+ SArray* tbUidList = NULL;
+ int ret = qGetTableList(handle.execHandle.execTb.suid, pTq->pVnode, handle.execHandle.execTb.node, &tbUidList, handle.execHandle.task);
+ if(ret != TDB_CODE_SUCCESS) {
+ tqError("qGetTableList error:%d handle %s consumer:0x%" PRIx64, ret, handle.subKey, handle.consumerId);
+ taosArrayDestroy(tbUidList);
+ goto end;
+ }
+ tqDebug("vgId:%d, tq try to get ctb for stb subscribe, suid:%" PRId64, pTq->pVnode->config.vgId, handle.execHandle.execTb.suid);
+ handle.execHandle.pTqReader = tqReaderOpen(pTq->pVnode);
+ tqReaderSetTbUidList(handle.execHandle.pTqReader, tbUidList, NULL);
+ taosArrayDestroy(tbUidList);
}
tqDebug("tq restore %s consumer %" PRId64 " vgId:%d", handle.subKey, handle.consumerId, vgId);
- taosWLockLatch(&pTq->lock);
taosHashPut(pTq->pHandle, pKey, kLen, &handle, sizeof(STqHandle));
- taosWUnLockLatch(&pTq->lock);
}
end:
diff --git a/source/dnode/vnode/src/tq/tqOffset.c b/source/dnode/vnode/src/tq/tqOffset.c
index 377a5d18875c77b38d4ca6036a96cabfafa0ea3f..0a9905b5449a987c926622f26b28ca3e8e7200a2 100644
--- a/source/dnode/vnode/src/tq/tqOffset.c
+++ b/source/dnode/vnode/src/tq/tqOffset.c
@@ -78,7 +78,6 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname) {
// todo remove this
if (offset.val.type == TMQ_OFFSET__LOG) {
- taosWLockLatch(&pStore->pTq->lock);
STqHandle* pHandle = taosHashGet(pStore->pTq->pHandle, offset.subKey, strlen(offset.subKey));
if (pHandle) {
if (walSetRefVer(pHandle->pRef, offset.val.version) < 0) {
@@ -86,7 +85,6 @@ int32_t tqOffsetRestoreFromFile(STqOffsetStore* pStore, const char* fname) {
// offset.val.version);
}
}
- taosWUnLockLatch(&pStore->pTq->lock);
}
taosMemoryFree(pMemBuf);
diff --git a/source/dnode/vnode/src/tq/tqPush.c b/source/dnode/vnode/src/tq/tqPush.c
index 7f4fe48b8e20c8fb4dd7b67f14ff553bf78e8199..4c2e19dcfbdc7828f8b61ae5d46fcfc0a83ac66f 100644
--- a/source/dnode/vnode/src/tq/tqPush.c
+++ b/source/dnode/vnode/src/tq/tqPush.c
@@ -101,8 +101,11 @@ int32_t tqUnregisterPushHandle(STQ* pTq, void *handle) {
STqHandle *pHandle = (STqHandle*)handle;
int32_t vgId = TD_VID(pTq->pVnode);
+ if(taosHashGetSize(pTq->pPushMgr) <= 0) {
+ return 0;
+ }
int32_t ret = taosHashRemove(pTq->pPushMgr, pHandle->subKey, strlen(pHandle->subKey));
- tqError("vgId:%d remove pHandle:%p,ret:%d consumer Id:0x%" PRIx64, vgId, pHandle, ret, pHandle->consumerId);
+ tqDebug("vgId:%d remove pHandle:%p,ret:%d consumer Id:0x%" PRIx64, vgId, pHandle, ret, pHandle->consumerId);
if(pHandle->msg != NULL) {
tqPushDataRsp(pHandle, vgId);
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index b756f99f3298a9c1e614839aad1c533036680085..77a966715eb84410cecc245c38d0f1ba60bcd9aa 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -273,7 +273,7 @@ STqReader* tqReaderOpen(SVnode* pVnode) {
return pReader;
}
-void tqCloseReader(STqReader* pReader) {
+void tqReaderClose(STqReader* pReader) {
// close wal reader
if (pReader->pWalReader) {
walCloseReader(pReader->pWalReader);
@@ -294,7 +294,7 @@ void tqCloseReader(STqReader* pReader) {
taosMemoryFree(pReader);
}
-int32_t tqSeekVer(STqReader* pReader, int64_t ver, const char* id) {
+int32_t tqReaderSeek(STqReader* pReader, int64_t ver, const char* id) {
if (walReaderSeekVer(pReader->pWalReader, ver) < 0) {
return -1;
}
@@ -394,8 +394,9 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) {
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
if (pReader->tbIdHash == NULL) {
- int32_t code = tqRetrieveDataBlock(pReader, NULL);
- if (code == TSDB_CODE_SUCCESS && pReader->pResBlock->info.rows > 0) {
+ SSDataBlock* pRes = NULL;
+ int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL);
+ if (code == TSDB_CODE_SUCCESS && pRes->info.rows > 0) {
return true;
}
}
@@ -404,8 +405,9 @@ bool tqNextBlockInWal(STqReader* pReader, const char* id) {
if (ret != NULL) {
tqDebug("tq reader return submit block, uid:%" PRId64 ", ver:%" PRId64, pSubmitTbData->uid, pReader->msg.ver);
- int32_t code = tqRetrieveDataBlock(pReader, NULL);
- if (code == TSDB_CODE_SUCCESS && pReader->pResBlock->info.rows > 0) {
+ SSDataBlock* pRes = NULL;
+ int32_t code = tqRetrieveDataBlock(pReader, &pRes, NULL);
+ if (code == TSDB_CODE_SUCCESS && pRes->info.rows > 0) {
return true;
}
} else {
@@ -440,6 +442,14 @@ int32_t tqReaderSetSubmitMsg(STqReader* pReader, void* msgStr, int32_t msgLen, i
return 0;
}
+SWalReader* tqGetWalReader(STqReader* pReader) {
+ return pReader->pWalReader;
+}
+
+SSDataBlock* tqGetResultBlock (STqReader* pReader) {
+ return pReader->pResBlock;
+}
+
bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
if (pReader->msg.msgStr == NULL) {
return false;
@@ -447,7 +457,7 @@ bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
int32_t numOfBlocks = taosArrayGetSize(pReader->submit.aSubmitTbData);
while (pReader->nextBlk < numOfBlocks) {
- tqDebug("tq reader next data block, len:%d ver:%" PRId64 " index:%d/%d, %s", pReader->msg.msgLen, pReader->msg.ver,
+ tqDebug("try next data block, len:%d ver:%" PRId64 " index:%d/%d, %s", pReader->msg.msgLen, pReader->msg.ver,
pReader->nextBlk, numOfBlocks, idstr);
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk);
@@ -457,10 +467,11 @@ bool tqNextBlockImpl(STqReader* pReader, const char* idstr) {
void* ret = taosHashGet(pReader->tbIdHash, &pSubmitTbData->uid, sizeof(int64_t));
if (ret != NULL) {
- tqDebug("tq reader block found, ver:%" PRId64 ", uid:%" PRId64, pReader->msg.ver, pSubmitTbData->uid);
+ tqDebug("block found, ver:%" PRId64 ", uid:%" PRId64", %s", pReader->msg.ver, pSubmitTbData->uid, idstr);
return true;
} else {
- tqDebug("tq reader discard submit block, uid:%" PRId64 ", continue", pSubmitTbData->uid);
+ tqDebug("discard submit block, uid:%" PRId64 ", total queried tables:%d continue %s", pSubmitTbData->uid,
+ taosHashGetSize(pReader->tbIdHash), idstr);
}
pReader->nextBlk++;
@@ -592,14 +603,16 @@ static int32_t doSetVal(SColumnInfoData* pColumnInfoData, int32_t rowIndex, SCol
return code;
}
-int32_t tqRetrieveDataBlock(STqReader* pReader, const char* id) {
+int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* id) {
tqDebug("tq reader retrieve data block %p, index:%d", pReader->msg.msgStr, pReader->nextBlk);
-
SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk++);
SSDataBlock* pBlock = pReader->pResBlock;
+ *pRes = pBlock;
+
blockDataCleanup(pBlock);
+ int32_t vgId = pReader->pWalReader->pWal->cfg.vgId;
int32_t sversion = pSubmitTbData->sver;
int64_t suid = pSubmitTbData->suid;
int64_t uid = pSubmitTbData->uid;
@@ -616,7 +629,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, const char* id) {
if (pReader->pSchemaWrapper == NULL) {
tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", uid:%" PRId64
"version %d, possibly dropped table",
- pReader->pWalReader->pWal->cfg.vgId, suid, uid, pReader->cachedSchemaVer);
+ vgId, suid, uid, pReader->cachedSchemaVer);
pReader->cachedSchemaSuid = 0;
terrno = TSDB_CODE_TQ_TABLE_SCHEMA_NOT_FOUND;
return -1;
@@ -630,6 +643,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, const char* id) {
if (blockDataGetNumOfCols(pBlock) == 0) {
int32_t code = buildResSDataBlock(pReader->pResBlock, pReader->pSchemaWrapper, pReader->pColIdList);
if (code != TSDB_CODE_SUCCESS) {
+ tqError("vgId:%d failed to build data block, code:%s", vgId, tstrerror(code));
return code;
}
}
@@ -986,7 +1000,7 @@ FAIL:
void tqReaderSetColIdList(STqReader* pReader, SArray* pColIdList) { pReader->pColIdList = pColIdList; }
-int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
+int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList, const char* id) {
if (pReader->tbIdHash) {
taosHashClear(pReader->tbIdHash);
} else {
@@ -1003,6 +1017,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
taosHashPut(pReader->tbIdHash, pKey, sizeof(int64_t), NULL, 0);
}
+ tqDebug("s-task:%s %d tables are set to be queried target table", id, (int32_t) taosArrayGetSize(tbUidList));
return 0;
}
@@ -1024,6 +1039,14 @@ int tqReaderAddTbUidList(STqReader* pReader, const SArray* pTableUidList) {
return 0;
}
+bool tqReaderIsQueriedTable(STqReader* pReader, uint64_t uid) {
+ return taosHashGet(pReader->tbIdHash, &uid, sizeof(uint64_t));
+}
+
+bool tqCurrentBlockConsumed(const STqReader* pReader) {
+ return pReader->msg.msgStr == NULL;
+}
+
int tqReaderRemoveTbUidList(STqReader* pReader, const SArray* tbUidList) {
for (int32_t i = 0; i < taosArrayGetSize(tbUidList); i++) {
int64_t* pKey = (int64_t*)taosArrayGet(tbUidList, i);
@@ -1039,7 +1062,6 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
int32_t vgId = TD_VID(pTq->pVnode);
// update the table list for each consumer handle
- taosWLockLatch(&pTq->lock);
while (1) {
pIter = taosHashIterate(pTq->pHandle, pIter);
if (pIter == NULL) {
@@ -1063,40 +1085,20 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
}
} else if (pTqHandle->execHandle.subType == TOPIC_SUB_TYPE__TABLE) {
if (isAdd) {
- SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
- SMetaReader mr = {0};
- metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
- for (int32_t i = 0; i < taosArrayGetSize(tbUidList); ++i) {
- uint64_t* id = (uint64_t*)taosArrayGet(tbUidList, i);
-
- int32_t code = metaGetTableEntryByUidCache(&mr, *id);
- if (code != TSDB_CODE_SUCCESS) {
- tqError("failed to get table meta, uid:%" PRIu64 " code:%s", *id, tstrerror(terrno));
- continue;
- }
-
- tDecoderClear(&mr.coder);
- if (mr.me.type != TSDB_CHILD_TABLE || mr.me.ctbEntry.suid != pTqHandle->execHandle.execTb.suid) {
- tqDebug("table uid %" PRId64 " does not add to tq handle", *id);
- continue;
- }
-
- tqDebug("table uid %" PRId64 " add to tq handle", *id);
- taosArrayPush(qa, id);
+ SArray* list = NULL;
+ int ret = qGetTableList(pTqHandle->execHandle.execTb.suid, pTq->pVnode, pTqHandle->execHandle.execTb.node, &list, pTqHandle->execHandle.task);
+ if(ret != TDB_CODE_SUCCESS) {
+ tqError("qGetTableList in tqUpdateTbUidList error:%d handle %s consumer:0x%" PRIx64, ret, pTqHandle->subKey, pTqHandle->consumerId);
+ taosArrayDestroy(list);
+ return ret;
}
-
- metaReaderClear(&mr);
- if (taosArrayGetSize(qa) > 0) {
- tqReaderAddTbUidList(pTqHandle->execHandle.pTqReader, qa);
- }
-
- taosArrayDestroy(qa);
+ tqReaderSetTbUidList(pTqHandle->execHandle.pTqReader, list, NULL);
+ taosArrayDestroy(list);
} else {
tqReaderRemoveTbUidList(pTqHandle->execHandle.pTqReader, tbUidList);
}
}
}
- taosWUnLockLatch(&pTq->lock);
// update the table list handle for each stream scanner/wal reader
taosWLockLatch(&pTq->pStreamMeta->lock);
diff --git a/source/dnode/vnode/src/tq/tqRestore.c b/source/dnode/vnode/src/tq/tqRestore.c
index ff9f95d5fadf74a846d4e9b7c184c16793736ec1..fe80f486918413390ee7916fb97fe07c58a1b80d 100644
--- a/source/dnode/vnode/src/tq/tqRestore.c
+++ b/source/dnode/vnode/src/tq/tqRestore.c
@@ -61,9 +61,10 @@ static int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) {
// seek the stored version and extract data from WAL
int64_t firstVer = walReaderGetValidFirstVer(pTask->exec.pWalReader);
if (pTask->chkInfo.currentVer < firstVer) {
+ tqWarn("vgId:%d s-task:%s ver:%"PRId64" earlier than the first ver of wal range %" PRId64 ", forward to %" PRId64, vgId,
+ pTask->id.idStr, pTask->chkInfo.currentVer, firstVer, firstVer);
+
pTask->chkInfo.currentVer = firstVer;
- tqWarn("vgId:%d s-task:%s ver earlier than the first ver of wal range %" PRId64 ", forward to %" PRId64, vgId,
- pTask->id.idStr, firstVer, pTask->chkInfo.currentVer);
// todo need retry if failed
int32_t code = walReaderSeekVer(pTask->exec.pWalReader, pTask->chkInfo.currentVer);
@@ -86,6 +87,16 @@ static int32_t doSetOffsetForWalReader(SStreamTask *pTask, int32_t vgId) {
}
}
+ int64_t skipToVer = walReaderGetSkipToVersion(pTask->exec.pWalReader);
+ if (skipToVer != 0 && skipToVer > pTask->chkInfo.currentVer) {
+ int32_t code = walReaderSeekVer(pTask->exec.pWalReader, skipToVer);
+ if (code != TSDB_CODE_SUCCESS) { // no data in wal, quit
+ return code;
+ }
+
+ tqDebug("vgId:%d s-task:%s wal reader jump to ver:%" PRId64, vgId, pTask->id.idStr, skipToVer);
+ }
+
return TSDB_CODE_SUCCESS;
}
diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c
index e268199e16d9aa1eccc0f88f891deef1d0402936..1ff78c586f747ad74bce7d752818bddd90a1de67 100644
--- a/source/dnode/vnode/src/tq/tqScan.c
+++ b/source/dnode/vnode/src/tq/tqScan.c
@@ -51,7 +51,7 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, int64_t uid, STaosxRsp* pRsp, in
metaReaderInit(&mr, pTq->pVnode->pMeta, 0);
// TODO add reference to gurantee success
- if (metaGetTableEntryByUidCache(&mr, uid) < 0) {
+ if (metaReaderGetTableEntryByUidCache(&mr, uid) < 0) {
metaReaderClear(&mr);
return -1;
}
@@ -84,8 +84,10 @@ int32_t tqScanData(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, STqOffs
qStreamSetOpen(task);
tqDebug("consumer:0x%" PRIx64 " vgId:%d, tmq one task start execute", pHandle->consumerId, vgId);
- if (qExecTask(task, &pDataBlock, &ts) != TSDB_CODE_SUCCESS) {
- tqError("consumer:0x%" PRIx64 " vgId:%d, task exec error since %s", pHandle->consumerId, vgId, terrstr());
+ code = qExecTask(task, &pDataBlock, &ts);
+ if (code != TSDB_CODE_SUCCESS) {
+ tqError("consumer:0x%" PRIx64 " vgId:%d, task exec error since %s", pHandle->consumerId, vgId, tstrerror(code));
+ terrno = code;
return -1;
}
@@ -128,8 +130,10 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, STaosxRsp* pRsp, SMqMeta
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
tqDebug("tmqsnap task start to execute");
- if (qExecTask(task, &pDataBlock, &ts) < 0) {
- tqError("vgId:%d, task exec error since %s", pTq->pVnode->config.vgId, terrstr());
+ int code = qExecTask(task, &pDataBlock, &ts);
+ if (code != 0) {
+ tqError("vgId:%d, task exec error since %s", pTq->pVnode->config.vgId, tstrerror(code));
+ terrno = code;
return -1;
}
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index 0bd7d9a57b9ca19514e63f5a4c219c92f006d069..9349c6eb0dd9f36073aceb7d76d769df9871e92a 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -137,7 +137,7 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
int32_t blockSz = taosArrayGetSize(pBlocks);
- tqDebug("vgId:%d, s-task:%s write results blocks:%d into table", TD_VID(pVnode), pTask->id.idStr, blockSz);
+ tqDebug("vgId:%d, s-task:%s write results %d blocks into table", TD_VID(pVnode), pTask->id.idStr, blockSz);
void* pBuf = NULL;
SArray* tagArray = NULL;
@@ -294,13 +294,12 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
char* ctbName = pDataBlock->info.parTbName;
if (!ctbName[0]) {
+ memset(ctbName, 0, TSDB_TABLE_NAME_LEN);
if (res == TSDB_CODE_SUCCESS) {
memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName));
} else {
- char* tmp = buildCtbNameByGroupId(stbFullName, pDataBlock->info.id.groupId);
- memcpy(ctbName, tmp, strlen(tmp));
- memcpy(pTableSinkInfo->tbName, tmp, strlen(tmp));
- taosMemoryFree(tmp);
+ buildCtbNameByGroupIdImpl(stbFullName, pDataBlock->info.id.groupId, ctbName);
+ memcpy(pTableSinkInfo->tbName, ctbName, strlen(ctbName));
tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode),
pDataBlock->info.id.groupId);
}
@@ -482,17 +481,15 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d
tEncoderClear(&encoder);
tDestroySubmitReq(&submitReq, TSDB_MSG_FLG_ENCODE);
- SRpcMsg msg = {
- .msgType = TDMT_VND_SUBMIT,
- .pCont = pBuf,
- .contLen = len,
- };
-
+ SRpcMsg msg = { .msgType = TDMT_VND_SUBMIT, .pCont = pBuf, .contLen = len };
if (tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg) != 0) {
tqDebug("failed to put into write-queue since %s", terrstr());
}
}
}
+
+ tqDebug("vgId:%d, s-task:%s write results completed", TD_VID(pVnode), pTask->id.idStr);
+
_end:
taosArrayDestroy(tagArray);
taosArrayDestroy(pVals);
diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c
index b5308d8b98c225c3aa6209b4648aa183ea690e95..a34e765e509d3453d486b38c4f1c4caf6406b059 100644
--- a/source/dnode/vnode/src/tq/tqUtil.c
+++ b/source/dnode/vnode/src/tq/tqUtil.c
@@ -15,8 +15,7 @@
#include "tq.h"
-#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
-#define NO_POLL_CNT 5
+#define IS_OFFSET_RESET_TYPE(_t) ((_t) < 0)
static int32_t tqSendMetaPollRsp(STqHandle* pHandle, const SRpcMsg* pMsg, const SMqPollReq* pReq,
const SMqMetaRsp* pRsp, int32_t vgId);
@@ -89,15 +88,13 @@ static int32_t tqInitTaosxRsp(STaosxRsp* pRsp, const SMqPollReq* pReq) {
return 0;
}
-static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest,
- SRpcMsg* pMsg, bool* pBlockReturned) {
+static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg, bool* pBlockReturned) {
uint64_t consumerId = pRequest->consumerId;
STqOffsetVal reqOffset = pRequest->reqOffset;
STqOffset* pOffset = tqOffsetRead(pTq->pOffsetStore, pRequest->subKey);
int32_t vgId = TD_VID(pTq->pVnode);
*pBlockReturned = false;
-
// In this vnode, data has been polled by consumer for this topic, so let's continue from the last offset value.
if (pOffset != NULL) {
*pOffsetVal = pOffset->val;
@@ -121,21 +118,16 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
tqOffsetResetToData(pOffsetVal, 0, 0);
}
} else {
- pHandle->pRef = walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef);
- if (pHandle->pRef == NULL) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
- return -1;
- }
-
- // offset set to previous version when init
+ walRefFirstVer(pTq->pVnode->pWal, pHandle->pRef);
tqOffsetResetToLog(pOffsetVal, pHandle->pRef->refVer - 1);
}
} else if (reqOffset.type == TMQ_OFFSET__RESET_LATEST) {
+ walRefLastVer(pTq->pVnode->pWal, pHandle->pRef);
if (pHandle->execHandle.subType == TOPIC_SUB_TYPE__COLUMN) {
SMqDataRsp dataRsp = {0};
tqInitDataRsp(&dataRsp, pRequest);
- tqOffsetResetToLog(&dataRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
+ tqOffsetResetToLog(&dataRsp.rspOffset, pHandle->pRef->refVer);
tqDebug("tmq poll: consumer:0x%" PRIx64 ", subkey %s, vgId:%d, (latest) offset reset to %" PRId64, consumerId,
pHandle->subKey, vgId, dataRsp.rspOffset.version);
int32_t code = tqSendDataRsp(pHandle, pMsg, pRequest, &dataRsp, TMQ_MSG_TYPE__POLL_RSP, vgId);
@@ -146,7 +138,7 @@ static int32_t extractResetOffsetVal(STqOffsetVal* pOffsetVal, STQ* pTq, STqHand
} else {
STaosxRsp taosxRsp = {0};
tqInitTaosxRsp(&taosxRsp, pRequest);
- tqOffsetResetToLog(&taosxRsp.rspOffset, walGetLastVer(pTq->pVnode->pWal));
+ tqOffsetResetToLog(&taosxRsp.rspOffset, pHandle->pRef->refVer);
int32_t code = tqSendDataRsp(pHandle, pMsg, pRequest, (SMqDataRsp*)&taosxRsp, TMQ_MSG_TYPE__TAOSX_RSP, vgId);
tDeleteSTaosxRsp(&taosxRsp);
@@ -176,24 +168,18 @@ static int32_t extractDataAndRspForNormalSubscribe(STQ* pTq, STqHandle* pHandle,
qSetTaskId(pHandle->execHandle.task, consumerId, pRequest->reqId);
code = tqScanData(pTq, pHandle, &dataRsp, pOffset);
- if (code != 0) {
+ if(code != 0 && terrno != TSDB_CODE_WAL_LOG_NOT_EXIST) {
goto end;
}
// till now, all data has been transferred to consumer, new data needs to push client once arrived.
- if (dataRsp.blockNum == 0 && dataRsp.reqOffset.type == TMQ_OFFSET__LOG &&
- dataRsp.reqOffset.version == dataRsp.rspOffset.version && pHandle->consumerId == pRequest->consumerId) {
- if (pHandle->noDataPollCnt >= NO_POLL_CNT) { // send poll result to client if no data 5 times to avoid lost data
- pHandle->noDataPollCnt = 0;
- // lock
- taosWLockLatch(&pTq->lock);
- code = tqRegisterPushHandle(pTq, pHandle, pMsg);
- taosWUnLockLatch(&pTq->lock);
- tDeleteMqDataRsp(&dataRsp);
- return code;
- } else {
- pHandle->noDataPollCnt++;
- }
+ if (terrno == TSDB_CODE_WAL_LOG_NOT_EXIST && dataRsp.blockNum == 0) {
+ // lock
+ taosWLockLatch(&pTq->lock);
+ code = tqRegisterPushHandle(pTq, pHandle, pMsg);
+ taosWUnLockLatch(&pTq->lock);
+ tDeleteMqDataRsp(&dataRsp);
+ return code;
}
// NOTE: this pHandle->consumerId may have been changed already.
diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c
index d33f04c1454b414caadacc7daa7feede0b9410cd..c659c8f4a2a917da32764f12494e6f318f7681b9 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCache.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCache.c
@@ -128,6 +128,7 @@ static int32_t tsdbOpenRocksCache(STsdb *pTsdb) {
rocksdb_options_set_comparator(options, cmp);
rocksdb_block_based_options_set_block_cache(tableoptions, cache);
rocksdb_options_set_block_based_table_factory(options, tableoptions);
+ rocksdb_options_set_info_log_level(options, 2); // WARN_LEVEL
// rocksdb_options_set_inplace_update_support(options, 1);
// rocksdb_options_set_allow_concurrent_memtable_write(options, 0);
@@ -703,6 +704,7 @@ static int32_t tsdbCacheLoadFromRaw(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArr
*pTmpLastCol = *pLastCol;
pLastCol = pTmpLastCol;
+ reallocVarData(&pLastCol->colVal);
size_t charge = sizeof(*pLastCol);
if (IS_VAR_DATA_TYPE(pLastCol->colVal.type)) {
charge += pLastCol->colVal.value.nData;
@@ -789,7 +791,9 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA
code = -1;
}
- taosArraySet(pLastArray, idxKey->idx, pLastCol);
+ SLastCol lastCol = *pLastCol;
+ reallocVarData(&lastCol.colVal);
+ taosArraySet(pLastArray, idxKey->idx, &lastCol);
taosArrayRemove(remainCols, j);
taosMemoryFree(values_list[i]);
@@ -825,7 +829,7 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache
SLastCol *pLastCol = (SLastCol *)taosLRUCacheValue(pCache, h);
SLastCol lastCol = *pLastCol;
- // reallocVarData(&lastCol.colVal);
+ reallocVarData(&lastCol.colVal);
taosArrayPush(pLastArray, &lastCol);
if (h) {
@@ -853,8 +857,8 @@ int32_t tsdbCacheGetBatch(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCache
SLastCol lastCol = *pLastCol;
reallocVarData(&lastCol.colVal);
-
taosArraySet(pLastArray, idxKey->idx, &lastCol);
+
if (h) {
taosLRUCacheRelease(pCache, h, false);
}
@@ -937,14 +941,14 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
}
// build keys & multi get from rocks
- int num_keys = pTSchema->numOfCols;
- char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
- size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
+ int num_keys = pTSchema->numOfCols;
+ char **keys_list = taosMemoryCalloc(num_keys * 2, sizeof(char *));
+ size_t *keys_list_sizes = taosMemoryCalloc(num_keys * 2, sizeof(size_t));
+ const size_t klen = ROCKS_KEY_LEN;
for (int i = 0; i < num_keys; ++i) {
int16_t cid = pTSchema->columns[i].colId;
- size_t klen = ROCKS_KEY_LEN;
- char *keys = taosMemoryCalloc(2, sizeof(SLastKey));
+ char *keys = taosMemoryCalloc(2, sizeof(SLastKey));
((SLastKey *)keys)[0] = (SLastKey){.ltype = 1, .uid = uid, .cid = cid};
((SLastKey *)keys)[1] = (SLastKey){.ltype = 0, .uid = uid, .cid = cid};
@@ -960,39 +964,35 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE
rocksMayWrite(pTsdb, true, false, false);
rocksdb_multi_get(pTsdb->rCache.db, pTsdb->rCache.readoptions, num_keys * 2, (const char *const *)keys_list,
keys_list_sizes, values_list, values_list_sizes, errs);
- for (int i = 0; i < num_keys; ++i) {
- taosMemoryFree(keys_list[i]);
- }
for (int i = 0; i < num_keys * 2; ++i) {
- rocksdb_free(errs[i]);
+ if (errs[i]) {
+ rocksdb_free(errs[i]);
+ }
}
- taosMemoryFree(keys_list);
- taosMemoryFree(keys_list_sizes);
taosMemoryFree(errs);
rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch;
for (int i = 0; i < num_keys; ++i) {
SLastCol *pLastCol = tsdbCacheDeserialize(values_list[i]);
if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) {
- SLastKey *key = &(SLastKey){.ltype = 1, .uid = uid, .cid = pLastCol->colVal.cid};
- size_t klen = ROCKS_KEY_LEN;
-
- rocksdb_writebatch_delete(wb, (char *)key, klen);
- taosLRUCacheErase(pTsdb->lruCache, key, klen);
+ rocksdb_writebatch_delete(wb, keys_list[i], klen);
}
+ taosLRUCacheErase(pTsdb->lruCache, keys_list[i], klen);
pLastCol = tsdbCacheDeserialize(values_list[i + num_keys]);
if (NULL != pLastCol && (pLastCol->ts <= eKey && pLastCol->ts >= sKey)) {
- SLastKey *key = &(SLastKey){.ltype = 0, .uid = uid, .cid = pLastCol->colVal.cid};
- size_t klen = ROCKS_KEY_LEN;
-
- rocksdb_writebatch_delete(wb, (char *)key, klen);
- taosLRUCacheErase(pTsdb->lruCache, key, klen);
+ rocksdb_writebatch_delete(wb, keys_list[num_keys + i], klen);
}
+ taosLRUCacheErase(pTsdb->lruCache, keys_list[num_keys + i], klen);
rocksdb_free(values_list[i]);
rocksdb_free(values_list[i + num_keys]);
}
+ for (int i = 0; i < num_keys; ++i) {
+ taosMemoryFree(keys_list[i]);
+ }
+ taosMemoryFree(keys_list);
+ taosMemoryFree(keys_list_sizes);
taosMemoryFree(values_list);
taosMemoryFree(values_list_sizes);
@@ -1432,7 +1432,7 @@ static tb_uid_t getTableSuidByUid(tb_uid_t uid, STsdb *pTsdb) {
SMetaReader mr = {0};
metaReaderInit(&mr, pTsdb->pVnode->pMeta, 0);
- if (metaGetTableEntryByUidCache(&mr, uid) < 0) {
+ if (metaReaderGetTableEntryByUidCache(&mr, uid) < 0) {
metaReaderClear(&mr); // table not esist
return 0;
}
@@ -1454,7 +1454,7 @@ static int32_t getTableDelDataFromDelIdx(SDelFReader *pDelReader, SDelIdx *pDelI
int32_t code = 0;
if (pDelIdx) {
- code = tsdbReadDelData(pDelReader, pDelIdx, aDelData);
+ code = tsdbReadDelDatav1(pDelReader, pDelIdx, aDelData, INT64_MAX);
}
return code;
@@ -1871,10 +1871,14 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
if (isLast && (pColData->flag & HAS_VALUE)) {
skipBlock = false;
break;
- } else if (pColData->flag & (HAS_VALUE | HAS_NULL)) {
+ } /*else if (pColData->flag & (HAS_VALUE | HAS_NULL)) {
skipBlock = false;
break;
- }
+ }*/
+ }
+
+ if (!isLast) {
+ skipBlock = false;
}
if (skipBlock) {
@@ -1908,6 +1912,9 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
if (checkRemainingRow) {
bool skipBlock = true;
int inputColIndex = 0;
+ if (aCols[0] == PRIMARYKEY_TIMESTAMP_COL_ID) {
+ ++inputColIndex;
+ }
for (int colIndex = 0; colIndex < state->pBlockData->nColData; ++colIndex) {
SColData *pColData = &state->pBlockData->aColData[colIndex];
int16_t cid = pColData->cid;
@@ -1916,15 +1923,19 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow, bool *pIgnoreEarlie
if (isLast && (pColData->flag & HAS_VALUE)) {
skipBlock = false;
break;
- } else if (pColData->flag & (HAS_VALUE | HAS_NULL)) {
+ } /*else if (pColData->flag & (HAS_VALUE | HAS_NULL)) {
skipBlock = false;
break;
- }
+ }*/
++inputColIndex;
}
}
+ if (!isLast) {
+ skipBlock = false;
+ }
+
if (skipBlock) {
if (--state->iBlock < 0) {
tsdbDataFReaderClose(state->pDataFReader);
@@ -2145,9 +2156,14 @@ static bool tsdbKeyDeleted(TSDBKEY *key, SArray *pSkyline, int64_t *iSkyline) {
return false;
} else if (key->ts >= pItemFront->ts && key->ts <= pItemBack->ts) {
if (key->version <= pItemFront->version || (key->ts == pItemBack->ts && key->version <= pItemBack->version)) {
+ // if (key->version <= pItemFront->version || key->version <= pItemBack->version) {
return true;
} else {
- return false;
+ if (*iSkyline > 1) {
+ --*iSkyline;
+ } else {
+ return false;
+ }
}
} else {
if (*iSkyline > 1) {
@@ -2959,7 +2975,7 @@ static int32_t mergeLastRowCid(tb_uid_t uid, STsdb *pTsdb, SArray **ppLastArray,
do {
TSDBROW *pRow = NULL;
- nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, true, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray));
+ nextRowIterGet(&iter, &pRow, &ignoreEarlierTs, false, TARRAY_DATA(aColArray), TARRAY_SIZE(aColArray));
if (!pRow) {
break;
diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
index 0e0b7a2ffa8105774c2f3ffb61eb49fdaf3dd220..6138b1f7b40184be649a78543d8958fbfe80516f 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
@@ -201,6 +201,9 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
void* tsdbCacherowsReaderClose(void* pReader) {
SCacheRowsReader* p = pReader;
+ if (p == NULL) {
+ return NULL;
+ }
if (p->pSchema != NULL) {
for (int32_t i = 0; i < p->pSchema->numOfCols; ++i) {
@@ -315,14 +318,14 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
tsdbCacheGetBatch(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
// tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
- // taosArrayClearEx(pRow, freeItem);
- taosArrayClear(pRow);
+ taosArrayClearEx(pRow, freeItem);
+ // taosArrayClear(pRow);
continue;
}
SLastCol* pColVal = taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
- // taosArrayClearEx(pRow, freeItem);
- taosArrayClear(pRow);
+ taosArrayClearEx(pRow, freeItem);
+ // taosArrayClear(pRow);
continue;
}
@@ -381,8 +384,8 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
}
- // taosArrayClearEx(pRow, freeItem);
- taosArrayClear(pRow);
+ taosArrayClearEx(pRow, freeItem);
+ // taosArrayClear(pRow);
}
if (hasRes) {
@@ -394,20 +397,20 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
tsdbCacheGetBatch(pr->pTsdb, uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
- // taosArrayClearEx(pRow, freeItem);
- taosArrayClear(pRow);
+ taosArrayClearEx(pRow, freeItem);
+ // taosArrayClear(pRow);
continue;
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
- // taosArrayClearEx(pRow, freeItem);
- taosArrayClear(pRow);
+ taosArrayClearEx(pRow, freeItem);
+ // taosArrayClear(pRow);
continue;
}
saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
- // taosArrayClearEx(pRow, freeItem);
- taosArrayClear(pRow);
+ taosArrayClearEx(pRow, freeItem);
+ // taosArrayClear(pRow);
taosArrayPush(pTableUidList, &uid);
diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit.c b/source/dnode/vnode/src/tsdb/tsdbCommit.c
index d15f848cfdcde11c946a943d96b0397933324b14..b440d5188356652b8b32396ab239553bea4eba62 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCommit.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCommit.c
@@ -266,7 +266,7 @@ static int32_t tsdbCommitTableDel(SCommitter *pCommitter, STbData *pTbData, SDel
suid = pDelIdx->suid;
uid = pDelIdx->uid;
- code = tsdbReadDelData(pCommitter->pDelFReader, pDelIdx, pCommitter->aDelData);
+ code = tsdbReadDelDatav1(pCommitter->pDelFReader, pDelIdx, pCommitter->aDelData, INT64_MAX);
TSDB_CHECK_CODE(code, lino, _exit);
} else {
taosArrayClear(pCommitter->aDelData);
diff --git a/source/dnode/vnode/src/tsdb/tsdbDataIter.c b/source/dnode/vnode/src/tsdb/tsdbDataIter.c
index e27aec5b1bad10afeb06cb857661ee117aea7e12..8215c1ac290085fbb8efa158785151d7c68bfcc4 100644
--- a/source/dnode/vnode/src/tsdb/tsdbDataIter.c
+++ b/source/dnode/vnode/src/tsdb/tsdbDataIter.c
@@ -412,7 +412,7 @@ static int32_t tsdbTombFileDataIterNext(STsdbDataIter2* pIter, STsdbFilterInfo*
}
}
- code = tsdbReadDelData(pIter->tIter.pReader, pDelIdx, pIter->tIter.aDelData);
+ code = tsdbReadDelDatav1(pIter->tIter.pReader, pDelIdx, pIter->tIter.aDelData, INT64_MAX);
TSDB_CHECK_CODE(code, lino, _exit);
pIter->delInfo.suid = pDelIdx->suid;
diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
index 80967a906f25d7c16b40a4d557c62d8eddcc9005..6d223e00c54272a47377f3521a181d350ab2c4a2 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
@@ -190,9 +190,9 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey);
}
*/
- if (eKey >= pTbData->maxKey && sKey <= pTbData->maxKey) {
- tsdbCacheDel(pTsdb, suid, uid, sKey, eKey);
- }
+ // if (eKey >= pTbData->maxKey && sKey <= pTbData->maxKey) {
+ tsdbCacheDel(pTsdb, suid, uid, sKey, eKey);
+ //}
tsdbTrace("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
" at version %" PRId64,
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index b4b090249e07e1e686f3cf0fbd265a4f0200d07b..2500015ec1d87d05667b63ca305eac6a16bfe605 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -754,7 +754,7 @@ static int32_t initResBlockInfo(SResultBlockInfo* pResBlockInfo, int64_t capacit
return terrno;
}
-static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, STsdbReader** ppReader, int32_t capacity,
+static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void** ppReader, int32_t capacity,
SSDataBlock* pResBlock, const char* idstr) {
int32_t code = 0;
int8_t level = 0;
@@ -1121,6 +1121,27 @@ static int32_t getEndPosInDataBlock(STsdbReader* pReader, SBlockData* pBlockData
endPos = doBinarySearchKey(pBlockData->aTSKEY, pBlock->nRow, pos, key, pReader->order);
}
+ if ((pReader->verRange.maxVer >= pBlock->minVer && pReader->verRange.maxVer < pBlock->maxVer)||
+ (pReader->verRange.minVer <= pBlock->maxVer && pReader->verRange.minVer > pBlock->minVer)) {
+ int32_t i = endPos;
+
+ if (asc) {
+ for(; i >= 0; --i) {
+ if (pBlockData->aVersion[i] <= pReader->verRange.maxVer) {
+ break;
+ }
+ }
+ } else {
+ for(; i < pBlock->nRow; ++i) {
+ if (pBlockData->aVersion[i] >= pReader->verRange.minVer) {
+ break;
+ }
+ }
+ }
+
+ endPos = i;
+ }
+
return endPos;
}
@@ -1260,10 +1281,11 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
return 0;
}
+ // row index of dump info remain the initial position, let's find the appropriate start position.
if ((pDumpInfo->rowIndex == 0 && asc) || (pDumpInfo->rowIndex == pBlock->nRow - 1 && (!asc))) {
- if (asc && pReader->window.skey <= pBlock->minKey.ts) {
+ if (asc && pReader->window.skey <= pBlock->minKey.ts && pReader->verRange.minVer <= pBlock->minVer) {
// pDumpInfo->rowIndex = 0;
- } else if (!asc && pReader->window.ekey >= pBlock->maxKey.ts) {
+ } else if (!asc && pReader->window.ekey >= pBlock->maxKey.ts && pReader->verRange.maxVer >= pBlock->maxVer) {
// pDumpInfo->rowIndex = pBlock->nRow - 1;
} else { // find the appropriate the start position in current block, and set it to be the current rowIndex
int32_t pos = asc ? pBlock->nRow - 1 : 0;
@@ -1279,6 +1301,29 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
pBlock->maxVer, pReader->idStr);
return TSDB_CODE_INVALID_PARA;
}
+
+ ASSERT(pReader->verRange.minVer <= pBlock->maxVer && pReader->verRange.maxVer >= pBlock->minVer);
+
+ // find the appropriate start position that satisfies the version requirement.
+ if ((pReader->verRange.maxVer >= pBlock->minVer && pReader->verRange.maxVer < pBlock->maxVer)||
+ (pReader->verRange.minVer <= pBlock->maxVer && pReader->verRange.minVer > pBlock->minVer)) {
+ int32_t i = pDumpInfo->rowIndex;
+ if (asc) {
+ for(; i < pBlock->nRow; ++i) {
+ if (pBlockData->aVersion[i] >= pReader->verRange.minVer) {
+ break;
+ }
+ }
+ } else {
+ for(; i >= 0; --i) {
+ if (pBlockData->aVersion[i] <= pReader->verRange.maxVer) {
+ break;
+ }
+ }
+ }
+
+ pDumpInfo->rowIndex = i;
+ }
}
}
@@ -1293,6 +1338,9 @@ static int32_t copyBlockDataToSDataBlock(STsdbReader* pReader) {
int32_t dumpedRows = asc ? (endIndex - pDumpInfo->rowIndex) : (pDumpInfo->rowIndex - endIndex);
if (dumpedRows > pReader->resBlockInfo.capacity) { // output buffer check
dumpedRows = pReader->resBlockInfo.capacity;
+ } else if (dumpedRows <= 0) { // no qualified rows in current data block, abort directly.
+ setBlockAllDumped(pDumpInfo, pReader->window.ekey, pReader->order);
+ return TSDB_CODE_SUCCESS;
}
int32_t i = 0;
@@ -2809,7 +2857,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
// it is a clean block, load it directly
if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader) &&
pBlock->nRow <= pReader->resBlockInfo.capacity) {
- if (asc || ((!asc) && (!hasDataInLastBlock(pLastBlockReader)))) {
+ if (asc || (!hasDataInLastBlock(pLastBlockReader))) {
code = copyBlockDataToSDataBlock(pReader);
if (code) {
goto _end;
@@ -2828,7 +2876,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
}
}
- SBlockData* pBlockData = &pReader->status.fileBlockData;
+ SBlockData* pBlockData = &pReader->status.fileBlockData;
while (1) {
bool hasBlockData = false;
@@ -2842,7 +2890,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
pDumpInfo->rowIndex += step;
- SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ pBlock = getCurrentBlock(&pReader->status.blockIter);
if (pDumpInfo->rowIndex >= pBlock->nRow || pDumpInfo->rowIndex < 0) {
pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter); // NOTE: get the new block info
@@ -2870,7 +2918,7 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
// currently loaded file data block is consumed
if ((pBlockData->nRow > 0) && (pDumpInfo->rowIndex >= pBlockData->nRow || pDumpInfo->rowIndex < 0)) {
- SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
+ pBlock = getCurrentBlock(&pReader->status.blockIter);
setBlockAllDumped(pDumpInfo, pBlock->maxKey.ts, pReader->order);
break;
}
@@ -2919,7 +2967,7 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
SDelIdx* pIdx = taosArraySearch(pReader->pDelIdx, &idx, tCmprDelIdx, TD_EQ);
if (pIdx != NULL) {
- code = tsdbReadDelData(pReader->pDelFReader, pIdx, pDelData);
+ code = tsdbReadDelDatav1(pReader->pDelFReader, pIdx, pDelData, pReader->verRange.maxVer);
}
if (code != TSDB_CODE_SUCCESS) {
goto _err;
@@ -2930,7 +2978,10 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
if (pMemTbData != NULL) {
p = pMemTbData->pHead;
while (p) {
- taosArrayPush(pDelData, p);
+ if (p->version <= pReader->verRange.maxVer) {
+ taosArrayPush(pDelData, p);
+ }
+
p = p->pNext;
}
}
@@ -2938,7 +2989,9 @@ int32_t initDelSkylineIterator(STableBlockScanInfo* pBlockScanInfo, STsdbReader*
if (piMemTbData != NULL) {
p = piMemTbData->pHead;
while (p) {
- taosArrayPush(pDelData, p);
+ if (p->version <= pReader->verRange.maxVer) {
+ taosArrayPush(pDelData, p);
+ }
p = p->pNext;
}
}
@@ -3440,6 +3493,7 @@ static int32_t buildBlockFromBufferSequentially(STsdbReader* pReader) {
if (!hasNexTable) {
return TSDB_CODE_SUCCESS;
}
+ pBlockScanInfo = pStatus->pTableIter;
}
initMemDataIterator(*pBlockScanInfo, pReader);
@@ -4395,11 +4449,12 @@ static void freeSchemaFunc(void* param) {
}
// ====================================== EXPOSED APIs ======================================
-int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableList, int32_t numOfTables,
- SSDataBlock* pResBlock, STsdbReader** ppReader, const char* idstr, bool countOnly, SHashObj** pIgnoreTables) {
+int32_t tsdbReaderOpen(void* pVnode, SQueryTableDataCond* pCond, void* pTableList, int32_t numOfTables,
+ SSDataBlock* pResBlock, void** ppReader, const char* idstr, bool countOnly, SHashObj** pIgnoreTables) {
STimeWindow window = pCond->twindows;
+ SVnodeCfg* pConf = &(((SVnode*)pVnode)->config);
- int32_t capacity = pVnode->config.tsdbCfg.maxRows;
+ int32_t capacity = pConf->tsdbCfg.maxRows;
if (pResBlock != NULL) {
blockDataEnsureCapacity(pResBlock, capacity);
}
@@ -4430,7 +4485,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
}
// here we only need one more row, so the capacity is set to be ONE.
- code = tsdbReaderCreate(pVnode, pCond, &pReader->innerReader[0], 1, pResBlock, idstr);
+ code = tsdbReaderCreate(pVnode, pCond, (void**)&((STsdbReader*)pReader)->innerReader[0], 1, pResBlock, idstr);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
@@ -4444,7 +4499,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
}
pCond->order = order;
- code = tsdbReaderCreate(pVnode, pCond, &pReader->innerReader[1], 1, pResBlock, idstr);
+ code = tsdbReaderCreate(pVnode, pCond, (void**)&((STsdbReader*)pReader)->innerReader[1], 1, pResBlock, idstr);
if (code != TSDB_CODE_SUCCESS) {
goto _err;
}
@@ -4494,7 +4549,7 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
goto _err;
}
- pReader->status.pLDataIter = taosMemoryCalloc(pVnode->config.sttTrigger, sizeof(SLDataIter));
+ pReader->status.pLDataIter = taosMemoryCalloc(pConf->sttTrigger, sizeof(SLDataIter));
if (pReader->status.pLDataIter == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _err;
@@ -4508,7 +4563,11 @@ int32_t tsdbReaderOpen(SVnode* pVnode, SQueryTableDataCond* pCond, void* pTableL
pReader->pIgnoreTables = pIgnoreTables;
- tsdbDebug("%p total numOfTable:%d in this query %s", pReader, numOfTables, pReader->idStr);
+ tsdbDebug("%p total numOfTable:%d, window:%" PRId64 " - %" PRId64 ", verRange:%" PRId64 " - %" PRId64
+ " in this query %s",
+ pReader, numOfTables, pReader->window.skey, pReader->window.ekey, pReader->verRange.minVer,
+ pReader->verRange.maxVer, pReader->idStr);
+
return code;
_err:
@@ -4980,7 +5039,8 @@ int32_t tsdbNextDataBlock(STsdbReader* pReader, bool* hasNext) {
return code;
}
-static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_t numOfCols, SColumnDataAgg* pTsAgg) {
+static bool doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_t numOfCols, SColumnDataAgg* pTsAgg) {
+ bool hasNullSMA = false;
// do fill all null column value SMA info
int32_t i = 0, j = 0;
int32_t size = (int32_t)taosArrayGetSize(pSup->pColAgg);
@@ -5000,6 +5060,7 @@ static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_
taosArrayInsert(pSup->pColAgg, i, &nullColAgg);
i += 1;
size++;
+ hasNullSMA = true;
}
j += 1;
}
@@ -5010,12 +5071,15 @@ static void doFillNullColSMA(SBlockLoadSuppInfo* pSup, int32_t numOfRows, int32_
SColumnDataAgg nullColAgg = {.colId = pSup->colId[j], .numOfNull = numOfRows};
taosArrayInsert(pSup->pColAgg, i, &nullColAgg);
i += 1;
+ hasNullSMA = true;
}
j++;
}
+
+ return hasNullSMA;
}
-int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SSDataBlock* pDataBlock, bool* allHave) {
+int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SSDataBlock* pDataBlock, bool* allHave, bool *hasNullSMA) {
SColumnDataAgg*** pBlockSMA = &pDataBlock->pBlockAgg;
int32_t code = 0;
@@ -5079,7 +5143,10 @@ int32_t tsdbRetrieveDatablockSMA(STsdbReader* pReader, SSDataBlock* pDataBlock,
}
// do fill all null column value SMA info
- doFillNullColSMA(pSup, pBlock->nRow, numOfCols, pTsAgg);
+ if (doFillNullColSMA(pSup, pBlock->nRow, numOfCols, pTsAgg)) {
+ *hasNullSMA = true;
+ return TSDB_CODE_SUCCESS;
+ }
size_t size = taosArrayGetSize(pSup->pColAgg);
int32_t i = 0, j = 0;
@@ -5246,6 +5313,9 @@ int32_t tsdbReaderReset(STsdbReader* pReader, SQueryTableDataCond* pCond) {
}
static int32_t getBucketIndex(int32_t startRow, int32_t bucketRange, int32_t numOfRows, int32_t numOfBucket) {
+ if (numOfRows < startRow) {
+ return 0;
+ }
int32_t bucketIndex = ((numOfRows - startRow) / bucketRange);
if (bucketIndex == numOfBucket) {
bucketIndex -= 1;
@@ -5380,10 +5450,10 @@ int64_t tsdbGetNumOfRowsInMemTable(STsdbReader* pReader) {
return rows;
}
-int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid) {
+int32_t tsdbGetTableSchema(void* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid) {
SMetaReader mr = {0};
- metaReaderInit(&mr, pVnode->pMeta, 0);
- int32_t code = metaGetTableEntryByUidCache(&mr, uid);
+ metaReaderInit(&mr, ((SVnode*)pVnode)->pMeta, 0);
+ int32_t code = metaReaderGetTableEntryByUidCache(&mr, uid);
if (code != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
metaReaderClear(&mr);
@@ -5396,7 +5466,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
if (mr.me.type == TSDB_CHILD_TABLE) {
tDecoderClear(&mr.coder);
*suid = mr.me.ctbEntry.suid;
- code = metaGetTableEntryByUidCache(&mr, *suid);
+ code = metaReaderGetTableEntryByUidCache(&mr, *suid);
if (code != TSDB_CODE_SUCCESS) {
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
metaReaderClear(&mr);
@@ -5412,7 +5482,7 @@ int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int6
metaReaderClear(&mr);
// get the newest table schema version
- code = metaGetTbTSchemaEx(pVnode->pMeta, *suid, uid, -1, pSchema);
+ code = metaGetTbTSchemaEx(((SVnode*)pVnode)->pMeta, *suid, uid, -1, pSchema);
return code;
}
@@ -5545,7 +5615,7 @@ int64_t tsdbGetLastTimestamp(SVnode* pVnode, void* pTableList, int32_t numOfTabl
int64_t key = INT64_MIN;
for(int32_t i = 0; i < numOfTables; ++i) {
- int32_t code = tsdbReaderOpen(pVnode, &cond, &pTableKeyInfo[i], 1, pBlock, &pReader, pIdStr, false, NULL);
+ int32_t code = tsdbReaderOpen(pVnode, &cond, &pTableKeyInfo[i], 1, pBlock, (void**)&pReader, pIdStr, false, NULL);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
index 50fd9d7aa7035adbfe8d17fde078b301483fe015..4b677533e73983d4da4ef44c230d59f2a8e4d847 100644
--- a/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
+++ b/source/dnode/vnode/src/tsdb/tsdbReaderWriter.c
@@ -523,7 +523,7 @@ static int32_t tsdbWriteBlockSma(SDataFWriter *pWriter, SBlockData *pBlockData,
for (int32_t iColData = 0; iColData < pBlockData->nColData; iColData++) {
SColData *pColData = tBlockDataGetColDataByIdx(pBlockData, iColData);
- if ((!pColData->smaOn) || IS_VAR_DATA_TYPE(pColData->type) || ((pColData->flag & HAS_VALUE) == 0)) continue;
+ if ((!pColData->smaOn) || ((pColData->flag & HAS_VALUE) == 0)) continue;
SColumnDataAgg sma = {.colId = pColData->cid};
tColDataCalcSMA[pColData->type](pColData, &sma.sum, &sma.max, &sma.min, &sma.numOfNull);
@@ -1489,6 +1489,10 @@ int32_t tsdbDelFReaderClose(SDelFReader **ppReader) {
}
int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData) {
+ return tsdbReadDelDatav1(pReader, pDelIdx, aDelData, INT64_MAX);
+}
+
+int32_t tsdbReadDelDatav1(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData, int64_t maxVer) {
int32_t code = 0;
int64_t offset = pDelIdx->offset;
int64_t size = pDelIdx->size;
@@ -1510,11 +1514,15 @@ int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData
SDelData delData;
n += tGetDelData(pReader->aBuf[0] + n, &delData);
- if (taosArrayPush(aDelData, &delData) == NULL) {
- code = TSDB_CODE_OUT_OF_MEMORY;
- goto _err;
+ if (delData.version > maxVer) {
+ continue;
}
+ if (taosArrayPush(aDelData, &delData) == NULL) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ goto _err;
+ }
}
+
ASSERT(n == size);
return code;
diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
index dfea125cc1026eefca2abdb7ed5bcbc9bd02fd1c..b5ca716701b2c479fbc3e04ace61829d91184c69 100644
--- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
+++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c
@@ -1166,7 +1166,7 @@ static int32_t tsdbSnapWriteDelTableDataStart(STsdbSnapWriter* pWriter, TABLEID*
int32_t c = tTABLEIDCmprFn(pDelIdx, &pWriter->tbid);
if (c < 0) {
- code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->pTIter->tIter.aDelData);
+ code = tsdbReadDelDatav1(pWriter->pDelFReader, pDelIdx, pWriter->pTIter->tIter.aDelData, INT64_MAX);
TSDB_CHECK_CODE(code, lino, _exit);
SDelIdx* pDelIdxNew = taosArrayReserve(pWriter->aDelIdx, 1);
@@ -1183,7 +1183,7 @@ static int32_t tsdbSnapWriteDelTableDataStart(STsdbSnapWriter* pWriter, TABLEID*
pWriter->pTIter->tIter.iDelIdx++;
} else if (c == 0) {
- code = tsdbReadDelData(pWriter->pDelFReader, pDelIdx, pWriter->aDelData);
+ code = tsdbReadDelDatav1(pWriter->pDelFReader, pDelIdx, pWriter->aDelData, INT64_MAX);
TSDB_CHECK_CODE(code, lino, _exit);
pWriter->pTIter->tIter.iDelIdx++;
diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c
new file mode 100644
index 0000000000000000000000000000000000000000..d2db6368a2d041fa25916ae8202c61e7339b6c8e
--- /dev/null
+++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "meta.h"
+#include "storageapi.h"
+#include "tstreamUpdate.h"
+#include "vnodeInt.h"
+
+static void initTsdbReaderAPI(TsdReader* pReader);
+static void initMetadataAPI(SStoreMeta* pMeta);
+static void initTqAPI(SStoreTqReader* pTq);
+static void initStateStoreAPI(SStateStore* pStore);
+static void initMetaReaderAPI(SStoreMetaReader* pMetaReader);
+static void initMetaFilterAPI(SMetaDataFilterAPI* pFilter);
+static void initFunctionStateStore(SFunctionStateStore* pStore);
+static void initCacheFn(SStoreCacheReader* pCache);
+static void initSnapshotFn(SStoreSnapshotFn* pSnapshot);
+
+void initStorageAPI(SStorageAPI* pAPI) {
+ initTsdbReaderAPI(&pAPI->tsdReader);
+ initMetadataAPI(&pAPI->metaFn);
+ initStateStoreAPI(&pAPI->stateStore);
+ initMetaReaderAPI(&pAPI->metaReaderFn);
+ initMetaFilterAPI(&pAPI->metaFilter);
+ initTqAPI(&pAPI->tqReaderFn);
+ initFunctionStateStore(&pAPI->functionStore);
+ initCacheFn(&pAPI->cacheFn);
+ initSnapshotFn(&pAPI->snapshotFn);
+}
+
+void initTsdbReaderAPI(TsdReader* pReader) {
+ pReader->tsdReaderOpen = (int32_t(*)(void*, SQueryTableDataCond*, void*, int32_t, SSDataBlock*, void**, const char*,
+ bool, SHashObj**))tsdbReaderOpen;
+ pReader->tsdReaderClose = tsdbReaderClose;
+
+ pReader->tsdNextDataBlock = tsdbNextDataBlock;
+
+ pReader->tsdReaderRetrieveDataBlock = tsdbRetrieveDataBlock;
+ pReader->tsdReaderReleaseDataBlock = tsdbReleaseDataBlock;
+
+ pReader->tsdReaderRetrieveBlockSMAInfo = tsdbRetrieveDatablockSMA;
+
+ pReader->tsdReaderNotifyClosing = tsdbReaderSetCloseFlag;
+ pReader->tsdReaderResetStatus = tsdbReaderReset;
+
+ pReader->tsdReaderGetDataBlockDistInfo = tsdbGetFileBlocksDistInfo;
+ pReader->tsdReaderGetNumOfInMemRows = tsdbGetNumOfRowsInMemTable; // todo this function should be moved away
+
+ pReader->tsdSetQueryTableList = tsdbSetTableList;
+ pReader->tsdSetReaderTaskId = (void (*)(void*, const char*))tsdbReaderSetId;
+}
+
+void initMetadataAPI(SStoreMeta* pMeta) {
+ pMeta->isTableExisted = metaIsTableExist;
+
+ pMeta->openTableMetaCursor = metaOpenTbCursor;
+ pMeta->closeTableMetaCursor = metaCloseTbCursor;
+ pMeta->pauseTableMetaCursor = metaPauseTbCursor;
+ pMeta->resumeTableMetaCursor = metaResumeTbCursor;
+ pMeta->cursorNext = metaTbCursorNext;
+ pMeta->cursorPrev = metaTbCursorPrev;
+
+ pMeta->getBasicInfo = vnodeGetInfo;
+ pMeta->getNumOfChildTables = metaGetStbStats;
+
+ pMeta->getChildTableList = vnodeGetCtbIdList;
+
+ pMeta->storeGetIndexInfo = vnodeGetIdx;
+ pMeta->getInvertIndex = vnodeGetIvtIdx;
+
+ pMeta->extractTagVal = (const void* (*)(const void*, int16_t, STagVal*))metaGetTableTagVal;
+ pMeta->getTableTags = metaGetTableTags;
+ pMeta->getTableTagsByUid = metaGetTableTagsByUids;
+
+ pMeta->getTableUidByName = metaGetTableUidByName;
+ pMeta->getTableTypeByName = metaGetTableTypeByName;
+ pMeta->getTableNameByUid = metaGetTableNameByUid;
+
+ pMeta->getTableSchema = tsdbGetTableSchema; // todo refactor
+ pMeta->storeGetTableList = vnodeGetTableList;
+
+ pMeta->getCachedTableList = metaGetCachedTableUidList;
+ pMeta->putCachedTableList = metaUidFilterCachePut;
+
+ pMeta->metaGetCachedTbGroup = metaGetCachedTbGroup;
+ pMeta->metaPutTbGroupToCache = metaPutTbGroupToCache;
+}
+
+void initTqAPI(SStoreTqReader* pTq) {
+ pTq->tqReaderOpen = tqReaderOpen;
+ pTq->tqReaderSetColIdList = tqReaderSetColIdList;
+
+ pTq->tqReaderClose = tqReaderClose;
+ pTq->tqReaderSeek = tqReaderSeek;
+ pTq->tqRetrieveBlock = tqRetrieveDataBlock;
+
+ pTq->tqReaderNextBlockInWal = tqNextBlockInWal;
+
+ pTq->tqNextBlockImpl = tqNextBlockImpl; // todo remove it
+
+ pTq->tqReaderAddTables = tqReaderAddTbUidList;
+ pTq->tqReaderSetQueryTableList = tqReaderSetTbUidList;
+
+ pTq->tqReaderRemoveTables = tqReaderRemoveTbUidList;
+
+ pTq->tqReaderIsQueriedTable = tqReaderIsQueriedTable;
+ pTq->tqReaderCurrentBlockConsumed = tqCurrentBlockConsumed;
+
+ pTq->tqReaderGetWalReader = tqGetWalReader; // todo remove it
+ pTq->tqReaderRetrieveTaosXBlock = tqRetrieveTaosxBlock; // todo remove it
+
+ pTq->tqReaderSetSubmitMsg = tqReaderSetSubmitMsg; // todo remove it
+ pTq->tqGetResultBlock = tqGetResultBlock;
+
+ pTq->tqReaderNextBlockFilterOut = tqNextDataBlockFilterOut;
+}
+
+void initStateStoreAPI(SStateStore* pStore) {
+ pStore->streamFileStateInit = streamFileStateInit;
+ pStore->updateInfoDestoryColseWinSBF = updateInfoDestoryColseWinSBF;
+
+ pStore->streamStateGetByPos = streamStateGetByPos;
+
+ pStore->streamStatePutParName = streamStatePutParName;
+ pStore->streamStateGetParName = streamStateGetParName;
+
+ pStore->streamStateAddIfNotExist = streamStateAddIfNotExist;
+ pStore->streamStateReleaseBuf = streamStateReleaseBuf;
+ pStore->streamStateFreeVal = streamStateFreeVal;
+
+ pStore->streamStatePut = streamStatePut;
+ pStore->streamStateGet = streamStateGet;
+ pStore->streamStateCheck = streamStateCheck;
+ pStore->streamStateGetByPos = streamStateGetByPos;
+ pStore->streamStateDel = streamStateDel;
+ pStore->streamStateClear = streamStateClear;
+ pStore->streamStateSaveInfo = streamStateSaveInfo;
+ pStore->streamStateGetInfo = streamStateGetInfo;
+ pStore->streamStateSetNumber = streamStateSetNumber;
+
+ pStore->streamStateFillPut = streamStateFillPut;
+ pStore->streamStateFillGet = streamStateFillGet;
+ pStore->streamStateFillDel = streamStateFillDel;
+
+ pStore->streamStateCurNext = streamStateCurNext;
+ pStore->streamStateCurPrev = streamStateCurPrev;
+
+ pStore->streamStateGetAndCheckCur = streamStateGetAndCheckCur;
+ pStore->streamStateSeekKeyNext = streamStateSeekKeyNext;
+ pStore->streamStateFillSeekKeyNext = streamStateFillSeekKeyNext;
+ pStore->streamStateFillSeekKeyPrev = streamStateFillSeekKeyPrev;
+ pStore->streamStateFreeCur = streamStateFreeCur;
+
+ pStore->streamStateGetGroupKVByCur = streamStateGetGroupKVByCur;
+ pStore->streamStateGetKVByCur = streamStateGetKVByCur;
+
+ pStore->streamStateSessionAddIfNotExist = streamStateSessionAddIfNotExist;
+ pStore->streamStateSessionPut = streamStateSessionPut;
+ pStore->streamStateSessionGet = streamStateSessionGet;
+ pStore->streamStateSessionDel = streamStateSessionDel;
+ pStore->streamStateSessionClear = streamStateSessionClear;
+ pStore->streamStateSessionGetKVByCur = streamStateSessionGetKVByCur;
+ pStore->streamStateStateAddIfNotExist = streamStateStateAddIfNotExist;
+ pStore->streamStateSessionGetKeyByRange = streamStateSessionGetKeyByRange;
+
+ pStore->updateInfoInit = updateInfoInit;
+ pStore->updateInfoFillBlockData = updateInfoFillBlockData;
+ pStore->updateInfoIsUpdated = updateInfoIsUpdated;
+ pStore->updateInfoIsTableInserted = updateInfoIsTableInserted;
+ pStore->updateInfoDestroy = updateInfoDestroy;
+
+ pStore->updateInfoInitP = updateInfoInitP;
+ pStore->updateInfoAddCloseWindowSBF = updateInfoAddCloseWindowSBF;
+ pStore->updateInfoDestoryColseWinSBF = updateInfoDestoryColseWinSBF;
+ pStore->updateInfoSerialize = updateInfoSerialize;
+ pStore->updateInfoDeserialize = updateInfoDeserialize;
+
+ pStore->streamStateSessionSeekKeyNext = streamStateSessionSeekKeyNext;
+ pStore->streamStateSessionSeekKeyCurrentPrev = streamStateSessionSeekKeyCurrentPrev;
+ pStore->streamStateSessionSeekKeyCurrentNext = streamStateSessionSeekKeyCurrentNext;
+
+ pStore->streamFileStateInit = streamFileStateInit;
+
+ pStore->streamFileStateDestroy = streamFileStateDestroy;
+ pStore->streamFileStateClear = streamFileStateClear;
+ pStore->needClearDiskBuff = needClearDiskBuff;
+
+ pStore->streamStateOpen = streamStateOpen;
+ pStore->streamStateClose = streamStateClose;
+ pStore->streamStateBegin = streamStateBegin;
+ pStore->streamStateCommit = streamStateCommit;
+ pStore->streamStateDestroy = streamStateDestroy;
+ pStore->streamStateDeleteCheckPoint = streamStateDeleteCheckPoint;
+}
+
+void initMetaReaderAPI(SStoreMetaReader* pMetaReader) {
+ pMetaReader->initReader = _metaReaderInit;
+ pMetaReader->clearReader = metaReaderClear;
+
+ pMetaReader->getTableEntryByUid = metaReaderGetTableEntryByUid;
+
+ pMetaReader->getEntryGetUidCache = metaReaderGetTableEntryByUidCache;
+ pMetaReader->getTableEntryByName = metaGetTableEntryByName;
+
+ pMetaReader->readerReleaseLock = metaReaderReleaseLock;
+}
+
+void initMetaFilterAPI(SMetaDataFilterAPI* pFilter) {
+ pFilter->metaFilterCreateTime = metaFilterCreateTime;
+ pFilter->metaFilterTableIds = metaFilterTableIds;
+ pFilter->metaFilterTableName = metaFilterTableName;
+ pFilter->metaFilterTtl = metaFilterTtl;
+}
+
+void initFunctionStateStore(SFunctionStateStore* pStore) {
+ pStore->streamStateFuncPut = streamStateFuncPut;
+ pStore->streamStateFuncGet = streamStateFuncGet;
+}
+
+void initCacheFn(SStoreCacheReader* pCache) {
+ pCache->openReader = tsdbCacherowsReaderOpen;
+ pCache->closeReader = tsdbCacherowsReaderClose;
+ pCache->retrieveRows = tsdbRetrieveCacheRows;
+ pCache->reuseReader = tsdbReuseCacherowsReader;
+}
+
+void initSnapshotFn(SStoreSnapshotFn* pSnapshot) {
+ pSnapshot->createSnapshot = setForSnapShot;
+ pSnapshot->destroySnapshot = destroySnapContext;
+ pSnapshot->getMetaTableInfoFromSnapshot = getMetaTableInfoFromSnapshot;
+ pSnapshot->getTableInfoFromSnapshot = getTableInfoFromSnapshot;
+}
diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c
index 303d2a9ca449de504a582c8472951a1bf6c6c683..022fc4c951cf2b1d27326dc4788affc8bb9caf48 100644
--- a/source/dnode/vnode/src/vnd/vnodeQuery.c
+++ b/source/dnode/vnode/src/vnd/vnodeQuery.c
@@ -80,7 +80,7 @@ int vnodeGetTableMeta(SVnode *pVnode, SRpcMsg *pMsg, bool direct) {
metaRsp.suid = mer1.me.uid;
} else if (mer1.me.type == TSDB_CHILD_TABLE) {
metaReaderInit(&mer2, pVnode->pMeta, META_READER_NOLOCK);
- if (metaGetTableEntryByUid(&mer2, mer1.me.ctbEntry.suid) < 0) goto _exit;
+ if (metaReaderGetTableEntryByUid(&mer2, mer1.me.ctbEntry.suid) < 0) goto _exit;
strcpy(metaRsp.stbName, mer2.me.name);
metaRsp.suid = mer2.me.uid;
@@ -189,7 +189,7 @@ int vnodeGetTableCfg(SVnode *pVnode, SRpcMsg *pMsg, bool direct) {
goto _exit;
} else if (mer1.me.type == TSDB_CHILD_TABLE) {
metaReaderInit(&mer2, pVnode->pMeta, 0);
- if (metaGetTableEntryByUid(&mer2, mer1.me.ctbEntry.suid) < 0) goto _exit;
+ if (metaReaderGetTableEntryByUid(&mer2, mer1.me.ctbEntry.suid) < 0) goto _exit;
strcpy(cfgRsp.stbName, mer2.me.name);
schema = mer2.me.stbEntry.schemaRow;
@@ -410,13 +410,32 @@ void vnodeResetLoad(SVnode *pVnode, SVnodeLoad *pLoad) {
"nBatchInsertSuccess");
}
-void vnodeGetInfo(SVnode *pVnode, const char **dbname, int32_t *vgId) {
+void vnodeGetInfo(void *pVnode, const char **dbname, int32_t *vgId, int64_t* numOfTables, int64_t* numOfNormalTables) {
+ SVnode* pVnodeObj = pVnode;
+ SVnodeCfg* pConf = &pVnodeObj->config;
+
if (dbname) {
- *dbname = pVnode->config.dbname;
+ *dbname = pConf->dbname;
}
if (vgId) {
- *vgId = TD_VID(pVnode);
+ *vgId = TD_VID(pVnodeObj);
+ }
+
+ if (numOfTables) {
+ *numOfTables = pConf->vndStats.numOfNTables + pConf->vndStats.numOfCTables;
+ }
+
+ if (numOfNormalTables) {
+ *numOfNormalTables = pConf->vndStats.numOfNTables;
+ }
+}
+
+int32_t vnodeGetTableList(void* pVnode, int8_t type, SArray* pList) {
+ if (type == TSDB_SUPER_TABLE) {
+ return vnodeGetStbIdList(pVnode, 0, pList);
+ } else {
+ return TSDB_CODE_INVALID_PARA;
}
}
@@ -440,8 +459,10 @@ int32_t vnodeGetAllTableList(SVnode *pVnode, uint64_t uid, SArray *list) {
int32_t vnodeGetCtbIdListByFilter(SVnode *pVnode, int64_t suid, SArray *list, bool (*filter)(void *arg), void *arg) {
return 0;
}
-int32_t vnodeGetCtbIdList(SVnode *pVnode, int64_t suid, SArray *list) {
- SMCtbCursor *pCur = metaOpenCtbCursor(pVnode->pMeta, suid, 1);
+
+int32_t vnodeGetCtbIdList(void *pVnode, int64_t suid, SArray *list) {
+ SVnode *pVnodeObj = pVnode;
+ SMCtbCursor *pCur = metaOpenCtbCursor(pVnodeObj->pMeta, suid, 1);
while (1) {
tb_uid_t id = metaCtbCursorNext(pCur);
@@ -529,10 +550,8 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) {
for (int64_t i = 0; i < arrSize; ++i) {
tb_uid_t suid = *(tb_uid_t *)taosArrayGet(suidList, i);
- SMetaStbStats stats = {0};
- metaGetStbStats(pVnode->pMeta, suid, &stats);
- int64_t ctbNum = stats.ctbNum;
- // vnodeGetCtbNum(pVnode, id, &ctbNum);
+ int64_t ctbNum = 0;
+ metaGetStbStats(pVnode, suid, &ctbNum);
int numOfCols = 0;
vnodeGetStbColumnNum(pVnode, suid, &numOfCols);
@@ -567,16 +586,17 @@ int32_t vnodeGetAllCtbNum(SVnode *pVnode, int64_t *num) {
return TSDB_CODE_SUCCESS;
}
-void *vnodeGetIdx(SVnode *pVnode) {
+void *vnodeGetIdx(void *pVnode) {
if (pVnode == NULL) {
return NULL;
}
- return metaGetIdx(pVnode->pMeta);
+
+ return metaGetIdx(((SVnode*)pVnode)->pMeta);
}
-void *vnodeGetIvtIdx(SVnode *pVnode) {
+void *vnodeGetIvtIdx(void *pVnode) {
if (pVnode == NULL) {
return NULL;
}
- return metaGetIvtIdx(pVnode->pMeta);
+ return metaGetIvtIdx(((SVnode*)pVnode)->pMeta);
}
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index fe1ccf90c872c090e6d0957e742b7879b4f18640..c3fb5e5ad4c3841ab48f8b86182882b19d543938 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -19,23 +19,23 @@
#include "vnode.h"
#include "vnodeInt.h"
-static int32_t vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessAlterStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessDropStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessCreateIndexReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessCreateStbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessAlterStbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessDropStbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessCreateIndexReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodePreprocessCreateTableReq(SVnode *pVnode, SDecoder *pCoder, int64_t ctime, int64_t *pUid) {
int32_t code = 0;
@@ -245,12 +245,14 @@ _exit:
static int32_t vnodePreProcessDeleteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0;
- int32_t size;
- int32_t ret;
- uint8_t *pCont;
- SEncoder *pCoder = &(SEncoder){0};
- SDeleteRes res = {0};
- SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
+ int32_t size;
+ int32_t ret;
+ uint8_t *pCont;
+ SEncoder *pCoder = &(SEncoder){0};
+ SDeleteRes res = {0};
+
+ SReadHandle handle = {.config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
+ initStorageAPI(&handle.api);
code = qWorkerProcessDeleteMsg(&handle, pVnode->pQuery, pMsg, &res);
if (code) goto _exit;
@@ -301,32 +303,31 @@ _exit:
return code;
}
-int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRpcMsg *pRsp) {
+int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t ver, SRpcMsg *pRsp) {
void *ptr = NULL;
void *pReq;
int32_t len;
int32_t ret;
- if (version <= pVnode->state.applied) {
- vError("vgId:%d, duplicate write request. version: %" PRId64 ", applied: %" PRId64 "", TD_VID(pVnode), version,
+ if (ver <= pVnode->state.applied) {
+ vError("vgId:%d, duplicate write request. ver: %" PRId64 ", applied: %" PRId64 "", TD_VID(pVnode), ver,
pVnode->state.applied);
terrno = TSDB_CODE_VND_DUP_REQUEST;
return -1;
}
- vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
- version);
+ vDebug("vgId:%d, start to process write request %s, index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), ver);
ASSERT(pVnode->state.applyTerm <= pMsg->info.conn.applyTerm);
- ASSERT(pVnode->state.applied + 1 == version);
+ ASSERT(pVnode->state.applied + 1 == ver);
- atomic_store_64(&pVnode->state.applied, version);
+ atomic_store_64(&pVnode->state.applied, ver);
atomic_store_64(&pVnode->state.applyTerm, pMsg->info.conn.applyTerm);
if (!syncUtilUserCommit(pMsg->msgType)) goto _exit;
if (pMsg->msgType == TDMT_VND_STREAM_RECOVER_BLOCKING_STAGE || pMsg->msgType == TDMT_STREAM_TASK_CHECK_RSP) {
- if (tqCheckLogInWal(pVnode->pTq, version)) return 0;
+ if (tqCheckLogInWal(pVnode->pTq, ver)) return 0;
}
// skip header
@@ -337,123 +338,123 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
switch (pMsg->msgType) {
/* META */
case TDMT_VND_CREATE_STB:
- if (vnodeProcessCreateStbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessCreateStbReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_ALTER_STB:
- if (vnodeProcessAlterStbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessAlterStbReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_DROP_STB:
- if (vnodeProcessDropStbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessDropStbReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_CREATE_TABLE:
- if (vnodeProcessCreateTbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessCreateTbReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_ALTER_TABLE:
- if (vnodeProcessAlterTbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessAlterTbReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_DROP_TABLE:
- if (vnodeProcessDropTbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessDropTbReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_DROP_TTL_TABLE:
- if (vnodeProcessDropTtlTbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessDropTtlTbReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_TRIM:
- if (vnodeProcessTrimReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessTrimReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_CREATE_SMA:
- if (vnodeProcessCreateTSmaReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessCreateTSmaReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
/* TSDB */
case TDMT_VND_SUBMIT:
- if (vnodeProcessSubmitReq(pVnode, version, pMsg->pCont, pMsg->contLen, pRsp) < 0) goto _err;
+ if (vnodeProcessSubmitReq(pVnode, ver, pMsg->pCont, pMsg->contLen, pRsp) < 0) goto _err;
break;
case TDMT_VND_DELETE:
- if (vnodeProcessDeleteReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessDeleteReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_BATCH_DEL:
- if (vnodeProcessBatchDeleteReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
+ if (vnodeProcessBatchDeleteReq(pVnode, ver, pReq, len, pRsp) < 0) goto _err;
break;
/* TQ */
case TDMT_VND_TMQ_SUBSCRIBE:
- if (tqProcessSubscribeReq(pVnode->pTq, version, pReq, len) < 0) {
+ if (tqProcessSubscribeReq(pVnode->pTq, ver, pReq, len) < 0) {
goto _err;
}
break;
case TDMT_VND_TMQ_DELETE_SUB:
- if (tqProcessDeleteSubReq(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) {
+ if (tqProcessDeleteSubReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
goto _err;
}
break;
case TDMT_VND_TMQ_COMMIT_OFFSET:
- if (tqProcessOffsetCommitReq(pVnode->pTq, version, pReq, pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ if (tqProcessOffsetCommitReq(pVnode->pTq, ver, pReq, pMsg->contLen - sizeof(SMsgHead)) < 0) {
goto _err;
}
break;
case TDMT_VND_TMQ_SEEK_TO_OFFSET:
- if (tqProcessSeekReq(pVnode->pTq, version, pReq, pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ if (tqProcessSeekReq(pVnode->pTq, ver, pReq, pMsg->contLen - sizeof(SMsgHead)) < 0) {
goto _err;
}
break;
case TDMT_VND_TMQ_ADD_CHECKINFO:
- if (tqProcessAddCheckInfoReq(pVnode->pTq, version, pReq, len) < 0) {
+ if (tqProcessAddCheckInfoReq(pVnode->pTq, ver, pReq, len) < 0) {
goto _err;
}
break;
case TDMT_VND_TMQ_DEL_CHECKINFO:
- if (tqProcessDelCheckInfoReq(pVnode->pTq, version, pReq, len) < 0) {
+ if (tqProcessDelCheckInfoReq(pVnode->pTq, ver, pReq, len) < 0) {
goto _err;
}
break;
case TDMT_STREAM_TASK_DEPLOY: {
- if (pVnode->restored && tqProcessTaskDeployReq(pVnode->pTq, version, pReq, len) < 0) {
+ if (pVnode->restored && tqProcessTaskDeployReq(pVnode->pTq, ver, pReq, len) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_DROP: {
- if (tqProcessTaskDropReq(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) {
+ if (tqProcessTaskDropReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_PAUSE: {
- if (pVnode->restored && tqProcessTaskPauseReq(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) {
+ if (pVnode->restored && tqProcessTaskPauseReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_RESUME: {
- if (pVnode->restored && tqProcessTaskResumeReq(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) {
+ if (pVnode->restored && tqProcessTaskResumeReq(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
goto _err;
}
} break;
case TDMT_VND_STREAM_RECOVER_BLOCKING_STAGE: {
- if (tqProcessTaskRecover2Req(pVnode->pTq, version, pMsg->pCont, pMsg->contLen) < 0) {
+ if (tqProcessTaskRecover2Req(pVnode->pTq, ver, pMsg->pCont, pMsg->contLen) < 0) {
goto _err;
}
} break;
case TDMT_STREAM_TASK_CHECK_RSP: {
- if (tqProcessStreamTaskCheckRsp(pVnode->pTq, version, pReq, len) < 0) {
+ if (tqProcessStreamTaskCheckRsp(pVnode->pTq, ver, pReq, len) < 0) {
goto _err;
}
} break;
case TDMT_VND_ALTER_CONFIRM:
needCommit = pVnode->config.hashChange;
- if (vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp) < 0) {
+ if (vnodeProcessAlterConfirmReq(pVnode, ver, pReq, len, pRsp) < 0) {
goto _err;
}
break;
case TDMT_VND_ALTER_CONFIG:
- vnodeProcessAlterConfigReq(pVnode, version, pReq, len, pRsp);
+ vnodeProcessAlterConfigReq(pVnode, ver, pReq, len, pRsp);
break;
case TDMT_VND_COMMIT:
needCommit = true;
break;
case TDMT_VND_CREATE_INDEX:
- vnodeProcessCreateIndexReq(pVnode, version, pReq, len, pRsp);
+ vnodeProcessCreateIndexReq(pVnode, ver, pReq, len, pRsp);
break;
case TDMT_VND_DROP_INDEX:
- vnodeProcessDropIndexReq(pVnode, version, pReq, len, pRsp);
+ vnodeProcessDropIndexReq(pVnode, ver, pReq, len, pRsp);
break;
case TDMT_VND_COMPACT:
- vnodeProcessCompactVnodeReq(pVnode, version, pReq, len, pRsp);
+ vnodeProcessCompactVnodeReq(pVnode, ver, pReq, len, pRsp);
goto _exit;
default:
vError("vgId:%d, unprocessed msg, %d", TD_VID(pVnode), pMsg->msgType);
@@ -461,18 +462,18 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
}
vTrace("vgId:%d, process %s request, code:0x%x index:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType), pRsp->code,
- version);
+ ver);
- walApplyVer(pVnode->pWal, version);
+ walApplyVer(pVnode->pWal, ver);
- if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, version) < 0) {
+ if (tqPushMsg(pVnode->pTq, pMsg->pCont, pMsg->contLen, pMsg->msgType, ver) < 0) {
vError("vgId:%d, failed to push msg to TQ since %s", TD_VID(pVnode), tstrerror(terrno));
return -1;
}
// commit if need
if (needCommit) {
- vInfo("vgId:%d, commit at version %" PRId64, TD_VID(pVnode), version);
+ vInfo("vgId:%d, commit at version %" PRId64, TD_VID(pVnode), ver);
if (vnodeAsyncCommit(pVnode) < 0) {
vError("vgId:%d, failed to vnode async commit since %s.", TD_VID(pVnode), tstrerror(terrno));
goto _err;
@@ -489,8 +490,8 @@ _exit:
return 0;
_err:
- vError("vgId:%d, process %s request failed since %s, version:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
- tstrerror(terrno), version);
+ vError("vgId:%d, process %s request failed since %s, ver:%" PRId64, TD_VID(pVnode), TMSG_INFO(pMsg->msgType),
+ tstrerror(terrno), ver);
return -1;
}
@@ -514,7 +515,9 @@ int32_t vnodeProcessQueryMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return 0;
}
- SReadHandle handle = {.meta = pVnode->pMeta, .config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
+ SReadHandle handle = {.config = &pVnode->config, .vnode = pVnode, .pMsgCb = &pVnode->msgCb};
+ initStorageAPI(&handle.api);
+
switch (pMsg->msgType) {
case TDMT_SCH_QUERY:
case TDMT_SCH_MERGE_QUERY:
@@ -603,7 +606,7 @@ void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) {
}
extern int32_t vnodeAsyncRentention(SVnode *pVnode, int64_t now);
-static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
int32_t code = 0;
SVTrimDbReq trimReq = {0};
@@ -624,7 +627,7 @@ _exit:
return code;
}
-static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SArray *tbUids = taosArrayInit(8, sizeof(int64_t));
if (tbUids == NULL) return TSDB_CODE_OUT_OF_MEMORY;
@@ -650,7 +653,7 @@ end:
return ret;
}
-static int32_t vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessCreateStbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SVCreateStbReq req = {0};
SDecoder coder;
@@ -667,7 +670,7 @@ static int32_t vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *p
goto _err;
}
- if (metaCreateSTable(pVnode->pMeta, version, &req) < 0) {
+ if (metaCreateSTable(pVnode->pMeta, ver, &req) < 0) {
pRsp->code = terrno;
goto _err;
}
@@ -685,7 +688,7 @@ _err:
return -1;
}
-static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SDecoder decoder = {0};
SEncoder encoder = {0};
int32_t rcode = 0;
@@ -742,7 +745,7 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t version, void *pR
}
// do create table
- if (metaCreateTable(pVnode->pMeta, version, pCreateReq, &cRsp.pMeta) < 0) {
+ if (metaCreateTable(pVnode->pMeta, ver, pCreateReq, &cRsp.pMeta) < 0) {
if (pCreateReq->flags & TD_CREATE_IF_NOT_EXISTS && terrno == TSDB_CODE_TDB_TABLE_ALREADY_EXIST) {
cRsp.code = TSDB_CODE_SUCCESS;
} else {
@@ -790,7 +793,7 @@ _exit:
return rcode;
}
-static int32_t vnodeProcessAlterStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessAlterStbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SVCreateStbReq req = {0};
SDecoder dc = {0};
@@ -808,7 +811,7 @@ static int32_t vnodeProcessAlterStbReq(SVnode *pVnode, int64_t version, void *pR
return -1;
}
- if (metaAlterSTable(pVnode->pMeta, version, &req) < 0) {
+ if (metaAlterSTable(pVnode->pMeta, ver, &req) < 0) {
pRsp->code = terrno;
tDecoderClear(&dc);
return -1;
@@ -819,7 +822,7 @@ static int32_t vnodeProcessAlterStbReq(SVnode *pVnode, int64_t version, void *pR
return 0;
}
-static int32_t vnodeProcessDropStbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessDropStbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SVDropStbReq req = {0};
int32_t rcode = TSDB_CODE_SUCCESS;
SDecoder decoder = {0};
@@ -839,7 +842,7 @@ static int32_t vnodeProcessDropStbReq(SVnode *pVnode, int64_t version, void *pRe
// process request
tbUidList = taosArrayInit(8, sizeof(int64_t));
if (tbUidList == NULL) goto _exit;
- if (metaDropSTable(pVnode->pMeta, version, &req, tbUidList) < 0) {
+ if (metaDropSTable(pVnode->pMeta, ver, &req, tbUidList) < 0) {
rcode = terrno;
goto _exit;
}
@@ -862,7 +865,7 @@ _exit:
return 0;
}
-static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SVAlterTbReq vAlterTbReq = {0};
SVAlterTbRsp vAlterTbRsp = {0};
SDecoder dc = {0};
@@ -887,7 +890,7 @@ static int32_t vnodeProcessAlterTbReq(SVnode *pVnode, int64_t version, void *pRe
}
// process
- if (metaAlterTable(pVnode->pMeta, version, &vAlterTbReq, &vMetaRsp) < 0) {
+ if (metaAlterTable(pVnode->pMeta, ver, &vAlterTbReq, &vMetaRsp) < 0) {
vAlterTbRsp.code = terrno;
tDecoderClear(&dc);
rcode = -1;
@@ -912,7 +915,7 @@ _exit:
return 0;
}
-static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SVDropTbBatchReq req = {0};
SVDropTbBatchRsp rsp = {0};
SDecoder decoder = {0};
@@ -946,7 +949,7 @@ static int32_t vnodeProcessDropTbReq(SVnode *pVnode, int64_t version, void *pReq
tb_uid_t tbUid = 0;
/* code */
- ret = metaDropTable(pVnode->pMeta, version, pDropTbReq, tbUids, &tbUid);
+ ret = metaDropTable(pVnode->pMeta, ver, pDropTbReq, tbUids, &tbUid);
if (ret < 0) {
if (pDropTbReq->igNotExists && terrno == TSDB_CODE_TDB_TABLE_NOT_EXIST) {
dropTbRsp.code = TSDB_CODE_SUCCESS;
@@ -1189,7 +1192,7 @@ static int32_t vnodeRebuildSubmitReqMsg(SSubmitReq2 *pSubmitReq, void **ppMsg) {
return code;
}
-static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
int32_t code = 0;
terrno = 0;
@@ -1251,7 +1254,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
for (int32_t iRow = 0; iRow < pColData->nVal; iRow++) {
if (aKey[iRow] < minKey || aKey[iRow] > maxKey || (iRow > 0 && aKey[iRow] <= aKey[iRow - 1])) {
code = TSDB_CODE_INVALID_MSG;
- vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), version);
+ vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
goto _exit;
}
}
@@ -1263,7 +1266,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
for (int32_t iRow = 0; iRow < nRow; ++iRow) {
if (aRow[iRow]->ts < minKey || aRow[iRow]->ts > maxKey || (iRow > 0 && aRow[iRow]->ts <= aRow[iRow - 1]->ts)) {
code = TSDB_CODE_INVALID_MSG;
- vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), version);
+ vError("vgId:%d %s failed since %s, version:%" PRId64, TD_VID(pVnode), __func__, tstrerror(terrno), ver);
goto _exit;
}
}
@@ -1350,7 +1353,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
SVCreateTbRsp *pCreateTbRsp = taosArrayReserve(pSubmitRsp->aCreateTbRsp, 1);
// create table
- if (metaCreateTable(pVnode->pMeta, version, pSubmitTbData->pCreateTbReq, &pCreateTbRsp->pMeta) == 0) {
+ if (metaCreateTable(pVnode->pMeta, ver, pSubmitTbData->pCreateTbReq, &pCreateTbRsp->pMeta) == 0) {
// create table success
if (newTbUids == NULL &&
@@ -1376,7 +1379,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
// insert data
int32_t affectedRows;
- code = tsdbInsertTableData(pVnode->pTsdb, version, pSubmitTbData, &affectedRows);
+ code = tsdbInsertTableData(pVnode->pTsdb, ver, pSubmitTbData, &affectedRows);
if (code) goto _exit;
pSubmitRsp->affectedRows += affectedRows;
@@ -1404,7 +1407,7 @@ _exit:
atomic_add_fetch_64(&pVnode->statis.nBatchInsert, 1);
if (code == 0) {
atomic_add_fetch_64(&pVnode->statis.nBatchInsertSuccess, 1);
- tdProcessRSmaSubmit(pVnode->pSma, version, pSubmitReq, pReq, len, STREAM_INPUT__DATA_SUBMIT);
+ tdProcessRSmaSubmit(pVnode->pSma, ver, pSubmitReq, pReq, len, STREAM_INPUT__DATA_SUBMIT);
}
// clear
@@ -1419,7 +1422,7 @@ _exit:
return code;
}
-static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SVCreateTSmaReq req = {0};
SDecoder coder = {0};
@@ -1439,20 +1442,20 @@ static int32_t vnodeProcessCreateTSmaReq(SVnode *pVnode, int64_t version, void *
goto _err;
}
- if (tdProcessTSmaCreate(pVnode->pSma, version, (const char *)&req) < 0) {
+ if (tdProcessTSmaCreate(pVnode->pSma, ver, (const char *)&req) < 0) {
if (pRsp) pRsp->code = terrno;
goto _err;
}
tDecoderClear(&coder);
vDebug("vgId:%d, success to create tsma %s:%" PRIi64 " version %" PRIi64 " for table %" PRIi64, TD_VID(pVnode),
- req.indexName, req.indexUid, version, req.tableUid);
+ req.indexName, req.indexUid, ver, req.tableUid);
return 0;
_err:
tDecoderClear(&coder);
vError("vgId:%d, failed to create tsma %s:%" PRIi64 " version %" PRIi64 "for table %" PRIi64 " since %s",
- TD_VID(pVnode), req.indexName, req.indexUid, version, req.tableUid, terrstr());
+ TD_VID(pVnode), req.indexName, req.indexUid, ver, req.tableUid, terrstr());
return -1;
}
@@ -1468,28 +1471,28 @@ int32_t vnodeProcessCreateTSma(SVnode *pVnode, void *pCont, uint32_t contLen) {
return vnodeProcessCreateTSmaReq(pVnode, 1, pCont, contLen, NULL);
}
-static int32_t vnodeConsolidateAlterHashRange(SVnode *pVnode, int64_t version) {
+static int32_t vnodeConsolidateAlterHashRange(SVnode *pVnode, int64_t ver) {
int32_t code = TSDB_CODE_SUCCESS;
vInfo("vgId:%d, trim meta of tables per hash range [%" PRIu32 ", %" PRIu32 "]. apply-index:%" PRId64, TD_VID(pVnode),
- pVnode->config.hashBegin, pVnode->config.hashEnd, version);
+ pVnode->config.hashBegin, pVnode->config.hashEnd, ver);
// TODO: trim meta of tables from TDB per hash range [pVnode->config.hashBegin, pVnode->config.hashEnd]
+ code = metaTrimTables(pVnode->pMeta);
return code;
}
-static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
vInfo("vgId:%d, vnode handle msgType:alter-confirm, alter confim msg is processed", TD_VID(pVnode));
int32_t code = TSDB_CODE_SUCCESS;
if (!pVnode->config.hashChange) {
goto _exit;
}
- code = vnodeConsolidateAlterHashRange(pVnode, version);
+ code = vnodeConsolidateAlterHashRange(pVnode, ver);
if (code < 0) {
- vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(),
- version);
+ vError("vgId:%d, failed to consolidate alter hashrange since %s. version:%" PRId64, TD_VID(pVnode), terrstr(), ver);
goto _exit;
}
pVnode->config.hashChange = false;
@@ -1503,7 +1506,7 @@ _exit:
return code;
}
-static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
bool walChanged = false;
bool tsdbChanged = false;
@@ -1606,7 +1609,7 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void
return 0;
}
-static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SBatchDeleteReq deleteReq;
SDecoder decoder;
tDecoderInit(&decoder, pReq, len);
@@ -1626,7 +1629,7 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void
int64_t uid = mr.me.uid;
- int32_t code = tsdbDeleteTableData(pVnode->pTsdb, version, deleteReq.suid, uid, pOneReq->startTs, pOneReq->endTs);
+ int32_t code = tsdbDeleteTableData(pVnode->pTsdb, ver, deleteReq.suid, uid, pOneReq->startTs, pOneReq->endTs);
if (code < 0) {
terrno = code;
vError("vgId:%d, delete error since %s, suid:%" PRId64 ", uid:%" PRId64 ", start ts:%" PRId64 ", end ts:%" PRId64,
@@ -1640,7 +1643,7 @@ static int32_t vnodeProcessBatchDeleteReq(SVnode *pVnode, int64_t version, void
return 0;
}
-static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
int32_t code = 0;
SDecoder *pCoder = &(SDecoder){0};
SDeleteRes *pRes = &(SDeleteRes){0};
@@ -1661,7 +1664,7 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq
ASSERT(taosArrayGetSize(pRes->uidList) == 0 || (pRes->skey != 0 && pRes->ekey != 0));
for (int32_t iUid = 0; iUid < taosArrayGetSize(pRes->uidList); iUid++) {
- code = tsdbDeleteTableData(pVnode->pTsdb, version, pRes->suid, *(uint64_t *)taosArrayGet(pRes->uidList, iUid),
+ code = tsdbDeleteTableData(pVnode->pTsdb, ver, pRes->suid, *(uint64_t *)taosArrayGet(pRes->uidList, iUid),
pRes->skey, pRes->ekey);
if (code) goto _err;
}
@@ -1682,7 +1685,7 @@ static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq
_err:
return code;
}
-static int32_t vnodeProcessCreateIndexReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessCreateIndexReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SVCreateStbReq req = {0};
SDecoder dc = {0};
@@ -1698,7 +1701,7 @@ static int32_t vnodeProcessCreateIndexReq(SVnode *pVnode, int64_t version, void
tDecoderClear(&dc);
return -1;
}
- if (metaAddIndexToSTable(pVnode->pMeta, version, &req) < 0) {
+ if (metaAddIndexToSTable(pVnode->pMeta, ver, &req) < 0) {
pRsp->code = terrno;
goto _err;
}
@@ -1708,7 +1711,7 @@ _err:
tDecoderClear(&dc);
return -1;
}
-static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
SDropIndexReq req = {0};
pRsp->msgType = TDMT_VND_DROP_INDEX_RSP;
pRsp->code = TSDB_CODE_SUCCESS;
@@ -1720,21 +1723,21 @@ static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t version, void *p
return -1;
}
- if (metaDropIndexFromSTable(pVnode->pMeta, version, &req) < 0) {
+ if (metaDropIndexFromSTable(pVnode->pMeta, ver, &req) < 0) {
pRsp->code = terrno;
return -1;
}
return TSDB_CODE_SUCCESS;
}
-extern int32_t vnodeProcessCompactVnodeReqImpl(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
+extern int32_t vnodeProcessCompactVnodeReqImpl(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
-static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
- return vnodeProcessCompactVnodeReqImpl(pVnode, version, pReq, len, pRsp);
+static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
+ return vnodeProcessCompactVnodeReqImpl(pVnode, ver, pReq, len, pRsp);
}
#ifndef TD_ENTERPRISE
-int32_t vnodeProcessCompactVnodeReqImpl(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
+int32_t vnodeProcessCompactVnodeReqImpl(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) {
return 0;
}
#endif
diff --git a/source/libs/cache/test/cacheTests.cpp b/source/libs/cache/test/cacheTests.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c
index b90e51f1bc2813ff6f4b965d75671871e71e7e78..7cba6ddf0da240ce17e2794487c0d70181cecf27 100644
--- a/source/libs/catalog/src/ctgDbg.c
+++ b/source/libs/catalog/src/ctgDbg.c
@@ -19,7 +19,7 @@
#include "trpc.h"
extern SCatalogMgmt gCtgMgmt;
-SCtgDebug gCTGDebug = {.statEnable = true};
+SCtgDebug gCTGDebug = {0};
#if 0
diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c
index cf864e86438fe603b4611954043d0b6f377ff110..e623b7969dd7307737c67f24647eec644a8c51ef 100644
--- a/source/libs/catalog/src/ctgUtil.c
+++ b/source/libs/catalog/src/ctgUtil.c
@@ -1555,14 +1555,17 @@ int32_t ctgChkSetAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
char dbFName[TSDB_DB_FNAME_LEN];
tNameGetFullDbName(&pReq->tbName, dbFName);
+ // since that we add read/write previliges when create db, there is no need to check createdDbs
+#if 0
if (pInfo->createdDbs && taosHashGet(pInfo->createdDbs, dbFName, strlen(dbFName))) {
pRes->pass = true;
return TSDB_CODE_SUCCESS;
}
+#endif
switch (pReq->type) {
case AUTH_TYPE_READ: {
- if (pInfo->readTbs && taosHashGetSize(pInfo->readTbs) > 0) {
+ if (pReq->tbName.type == TSDB_TABLE_NAME_T && pInfo->readTbs && taosHashGetSize(pInfo->readTbs) > 0) {
req->singleType = AUTH_TYPE_READ;
CTG_ERR_RET(ctgChkSetTbAuthRes(pCtg, req, res));
if (pRes->pass || res->metaNotExists) {
@@ -1578,7 +1581,7 @@ int32_t ctgChkSetAuthRes(SCatalog* pCtg, SCtgAuthReq* req, SCtgAuthRsp* res) {
break;
}
case AUTH_TYPE_WRITE: {
- if (pInfo->writeTbs && taosHashGetSize(pInfo->writeTbs) > 0) {
+ if (pReq->tbName.type == TSDB_TABLE_NAME_T && pInfo->writeTbs && taosHashGetSize(pInfo->writeTbs) > 0) {
req->singleType = AUTH_TYPE_WRITE;
CTG_ERR_RET(ctgChkSetTbAuthRes(pCtg, req, res));
if (pRes->pass || res->metaNotExists) {
diff --git a/source/libs/executor/CMakeLists.txt b/source/libs/executor/CMakeLists.txt
index 8b3d04e32c35e7143ccdc46fdcdf9f56ec09d0fc..d2c39aba7483adf5d3088789180c3e436ffd3ae7 100644
--- a/source/libs/executor/CMakeLists.txt
+++ b/source/libs/executor/CMakeLists.txt
@@ -1,10 +1,9 @@
aux_source_directory(src EXECUTOR_SRC)
-#add_library(executor ${EXECUTOR_SRC})
add_library(executor STATIC ${EXECUTOR_SRC})
target_link_libraries(executor
- PRIVATE os util common function parser planner qcom vnode scalar nodes index stream
+ PRIVATE os util common function parser planner qcom scalar nodes index wal tdb
)
target_include_directories(
diff --git a/source/libs/executor/inc/dataSinkInt.h b/source/libs/executor/inc/dataSinkInt.h
index 57a771b275b219b776dbee34193e2ba644e73a1f..9893b4eb764814f4fa07f20190de499dc650086a 100644
--- a/source/libs/executor/inc/dataSinkInt.h
+++ b/source/libs/executor/inc/dataSinkInt.h
@@ -22,6 +22,7 @@ extern "C" {
#include "dataSinkMgt.h"
#include "plannodes.h"
+#include "storageapi.h"
#include "tcommon.h"
struct SDataSink;
@@ -29,6 +30,7 @@ struct SDataSinkHandle;
typedef struct SDataSinkManager {
SDataSinkMgtCfg cfg;
+ SStorageAPI* pAPI;
} SDataSinkManager;
typedef int32_t (*FPutDataBlock)(struct SDataSinkHandle* pHandle, const SInputData* pInput, bool* pContinue);
diff --git a/source/libs/executor/inc/executil.h b/source/libs/executor/inc/executil.h
index 896858ac7f745d685cc1cb4dc31c82a7534845d5..30911c6061a8634e55c1bc38468bc066ced19f72 100644
--- a/source/libs/executor/inc/executil.h
+++ b/source/libs/executor/inc/executil.h
@@ -12,17 +12,17 @@
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see .
*/
-#ifndef TDENGINE_QUERYUTIL_H
-#define TDENGINE_QUERYUTIL_H
+#ifndef TDENGINE_EXECUTIL_H
+#define TDENGINE_EXECUTIL_H
#include "executor.h"
#include "function.h"
#include "nodes.h"
#include "plannodes.h"
+#include "storageapi.h"
#include "tcommon.h"
#include "tpagedbuf.h"
#include "tsimplehash.h"
-#include "vnode.h"
#define T_LONG_JMP(_obj, _c) \
do { \
@@ -135,6 +135,10 @@ struct SResultRowEntryInfo* getResultEntryInfo(const SResultRow* pRow, int32_t i
static FORCE_INLINE SResultRow* getResultRowByPos(SDiskbasedBuf* pBuf, SResultRowPosition* pos, bool forUpdate) {
SFilePage* bufPage = (SFilePage*)getBufPage(pBuf, pos->pageId);
+ if (!bufPage) {
+ uFatal("failed to get the buffer page:%d since %s", pos->pageId, terrstr());
+ return NULL;
+ }
if (forUpdate) {
setBufPageDirty(bufPage, true);
}
@@ -154,7 +158,7 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo);
SSDataBlock* createDataBlockFromDescNode(SDataBlockDescNode* pNode);
EDealRes doTranslateTagExpr(SNode** pNode, void* pContext);
-int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId);
+int32_t getGroupIdFromTagsVal(void* pVnode, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId, SStorageAPI* pAPI);
size_t getTableTagsBufLen(const SNodeList* pGroups);
SArray* createSortInfo(SNodeList* pNodeList);
@@ -166,7 +170,7 @@ void createExprFromOneNode(SExprInfo* pExp, SNode* pNode, int16_t slotId);
void createExprFromTargetNode(SExprInfo* pExp, STargetNode* pTargetNode);
SExprInfo* createExprInfo(SNodeList* pNodeList, SNodeList* pGroupKeys, int32_t* numOfExprs);
-SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset);
+SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset, SFunctionStateStore* pStore);
void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray* pCols, bool outputEveryColumn);
void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWindow);
@@ -178,8 +182,11 @@ void cleanupQueryTableDataCond(SQueryTableDataCond* pCond);
int32_t convertFillType(int32_t mode);
int32_t resultrowComparAsc(const void* p1, const void* p2);
-int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified);
+int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI *pAPI);
void printDataBlock(SSDataBlock* pBlock, const char* flag);
-#endif // TDENGINE_QUERYUTIL_H
+void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order);
+void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery);
+
+#endif // TDENGINE_EXECUTIL_H
diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h
index 2daeb70260c14f0082216b12424dd0c40bbd64e3..38890f8c347d5bbaecb6f3b34fcab9667f1da80b 100644
--- a/source/libs/executor/inc/executorInt.h
+++ b/source/libs/executor/inc/executorInt.h
@@ -38,13 +38,15 @@ extern "C" {
#include "tlockfree.h"
#include "tmsg.h"
#include "tpagedbuf.h"
-#include "tstream.h"
-#include "tstreamUpdate.h"
-
-#include "vnode.h"
+//#include "tstream.h"
+//#include "tstreamUpdate.h"
+#include "tlrucache.h"
typedef int32_t (*__block_search_fn_t)(char* data, int32_t num, int64_t key, int32_t order);
+typedef struct STsdbReader STsdbReader;
+typedef struct STqReader STqReader;
+
#define IS_VALID_SESSION_WIN(winInfo) ((winInfo).sessionWin.win.skey > 0)
#define SET_SESSION_WIN_INVALID(winInfo) ((winInfo).sessionWin.win.skey = INT64_MIN)
#define IS_INVALID_SESSION_WIN_KEY(winKey) ((winKey).win.skey <= 0)
@@ -206,6 +208,7 @@ typedef struct STableScanBase {
SLimitInfo limitInfo;
// there are more than one table list exists in one task, if only one vnode exists.
STableListInfo* pTableListInfo;
+ TsdReader readerAPI;
} STableScanBase;
typedef struct STableScanInfo {
@@ -221,6 +224,7 @@ typedef struct STableScanInfo {
int8_t assignBlockUid;
bool hasGroupByTag;
bool countOnly;
+// TsdReader readerAPI;
} STableScanInfo;
typedef struct STableMergeScanInfo {
@@ -280,6 +284,7 @@ typedef struct SStreamAggSupporter {
int32_t stateKeySize;
int16_t stateKeyType;
SDiskbasedBuf* pResultBuf;
+ SStateStore stateStore;
} SStreamAggSupporter;
typedef struct SWindowSupporter {
@@ -335,7 +340,7 @@ typedef struct SStreamScanInfo {
STqReader* tqReader;
uint64_t groupId;
- SUpdateInfo* pUpdateInfo;
+ struct SUpdateInfo* pUpdateInfo;
EStreamScanMode scanMode;
struct SOperatorInfo* pStreamScanOp;
@@ -366,15 +371,18 @@ typedef struct SStreamScanInfo {
SSDataBlock* pCreateTbRes;
int8_t igCheckUpdate;
int8_t igExpired;
- SStreamState* pState;
+ void* pState; //void
+ SStoreTqReader readerFn;
+ SStateStore stateStore;
} SStreamScanInfo;
typedef struct {
- SVnode* vnode;
- SSDataBlock pRes; // result SSDataBlock
- STsdbReader* dataReader;
- SSnapContext* sContext;
- STableListInfo* pTableListInfo;
+ struct SVnode* vnode; // todo remove this
+ SSDataBlock pRes; // result SSDataBlock
+ STsdbReader* dataReader;
+ struct SSnapContext* sContext;
+ SStorageAPI* pAPI;
+ STableListInfo* pTableListInfo;
} SStreamRawScanInfo;
typedef struct STableCountScanSupp {
@@ -441,12 +449,15 @@ typedef struct SStreamIntervalOperatorInfo {
bool isFinal;
SArray* pChildren;
int32_t numOfChild;
- SStreamState* pState;
+ SStreamState* pState; // void
SWinKey delKey;
uint64_t numOfDatapack;
SArray* pUpdated;
SSHashObj* pUpdatedMap;
int64_t dataVersion;
+ SStateStore statestore;
+ bool recvGetAll;
+ SHashObj* pFinalPullDataMap;
} SStreamIntervalOperatorInfo;
typedef struct SDataGroupInfo {
@@ -543,6 +554,7 @@ typedef struct SStreamFillSupporter {
int32_t rowSize;
SSHashObj* pResMap;
bool hasDelete;
+ SStorageAPI* pAPI;
} SStreamFillSupporter;
typedef struct SStreamFillOperatorInfo {
@@ -569,12 +581,11 @@ void cleanupQueriedTableScanInfo(SSchemaInfo* pSchemaInfo);
void initBasicInfo(SOptrBasicInfo* pInfo, SSDataBlock* pBlock);
void cleanupBasicInfo(SOptrBasicInfo* pInfo);
-int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr);
+int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr, SFunctionStateStore* pStore);
void cleanupExprSupp(SExprSupp* pSup);
-
int32_t initAggSup(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
- const char* pkey, void* pState);
+ const char* pkey, void* pState, SFunctionStateStore* pStore);
void cleanupAggSup(SAggSupporter* pAggSup);
void initResultSizeInfo(SResultInfo* pResultInfo, int32_t numOfRows);
@@ -636,7 +647,7 @@ bool isInTimeWindow(STimeWindow* pWin, TSKEY ts, int64_t gap);
bool functionNeedToExecute(SqlFunctionCtx* pCtx);
bool isOverdue(TSKEY ts, STimeWindowAggSupp* pSup);
bool isCloseWindow(STimeWindow* pWin, STimeWindowAggSupp* pSup);
-bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pState, STimeWindowAggSupp* pTwSup);
+bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, void* pState, STimeWindowAggSupp* pTwSup, SStateStore* pStore);
void appendOneRowToStreamSpecialBlock(SSDataBlock* pBlock, TSKEY* pStartTs, TSKEY* pEndTs, uint64_t* pUid,
uint64_t* pGp, void* pTbName);
uint64_t calGroupIdByData(SPartitionBySupporter* pParSup, SExprSupp* pExprSup, SSDataBlock* pBlock, int32_t rowId);
@@ -645,20 +656,17 @@ int32_t finalizeResultRows(SDiskbasedBuf* pBuf, SResultRowPosition* resultRowPos
SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
bool groupbyTbname(SNodeList* pGroupList);
-int32_t buildDataBlockFromGroupRes(struct SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock, SExprSupp* pSup,
+int32_t buildDataBlockFromGroupRes(struct SOperatorInfo* pOperator, void* pState, SSDataBlock* pBlock, SExprSupp* pSup,
SGroupResInfo* pGroupResInfo);
-int32_t saveSessionDiscBuf(SStreamState* pState, SSessionKey* key, void* buf, int32_t size);
-int32_t buildSessionResultDataBlock(struct SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock,
+int32_t saveSessionDiscBuf(void* pState, SSessionKey* key, void* buf, int32_t size, SStateStore* pAPI);
+int32_t buildSessionResultDataBlock(struct SOperatorInfo* pOperator, void* pState, SSDataBlock* pBlock,
SExprSupp* pSup, SGroupResInfo* pGroupResInfo);
-int32_t setOutputBuf(SStreamState* pState, STimeWindow* win, SResultRow** pResult, int64_t tableGroupId,
- SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset, SAggSupporter* pAggSup);
-int32_t releaseOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult);
-int32_t saveOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult, int32_t resSize);
+int32_t releaseOutputBuf(void* pState, SWinKey* pKey, SResultRow* pResult, SStateStore* pAPI);
void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order);
int32_t getForwardStepsInBlock(int32_t numOfRows, __block_search_fn_t searchFn, TSKEY ekey, int32_t pos, int32_t order,
int64_t* pData);
-void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, uint64_t groupId,
- SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock);
+void appendCreateTableRow(void* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, uint64_t groupId,
+ SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock, SStateStore* pAPI);
SSDataBlock* buildCreateTableBlock(SExprSupp* tbName, SExprSupp* tag);
SExprInfo* createExpr(SNodeList* pNodeList, int32_t* numOfExprs);
diff --git a/source/libs/executor/inc/operator.h b/source/libs/executor/inc/operator.h
index 632b817a0754d2d8d4482200df5ac28e18bcf02c..1d2685b8c6f98aa8309c2e9900c4d378ff227e48 100644
--- a/source/libs/executor/inc/operator.h
+++ b/source/libs/executor/inc/operator.h
@@ -156,7 +156,7 @@ void destroyOperator(SOperatorInfo* pOperator);
SOperatorInfo* extractOperatorInTree(SOperatorInfo* pOperator, int32_t type, const char* id);
int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scanFlag, bool inheritUsOrder);
-int32_t stopTableScanOperator(SOperatorInfo* pOperator, const char* pIdStr);
+int32_t stopTableScanOperator(SOperatorInfo* pOperator, const char* pIdStr, SStorageAPI* pAPI);
int32_t getOperatorExplainExecInfo(struct SOperatorInfo* operatorInfo, SArray* pExecInfoList);
#ifdef __cplusplus
diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h
index 37c93fef5c3fdad15ac2d6855e3d0b7ca323274c..6497bd90b43ee73422c9dbae96f871a27f4e29b0 100644
--- a/source/libs/executor/inc/querytask.h
+++ b/source/libs/executor/inc/querytask.h
@@ -56,20 +56,19 @@ typedef struct STaskStopInfo {
} STaskStopInfo;
typedef struct {
- STqOffsetVal currentOffset; // for tmq
- SMqMetaRsp metaRsp; // for tmq fetching meta
- int64_t snapshotVer;
-// SPackedData submit; // todo remove it
- SSchemaWrapper* schema;
- char tbName[TSDB_TABLE_NAME_LEN]; // this is the current scan table: todo refactor
- int8_t recoverStep;
- int8_t recoverScanFinished;
- SQueryTableDataCond tableCond;
- int64_t fillHistoryVer1;
- int64_t fillHistoryVer2;
- SStreamState* pState;
- int64_t dataVersion;
- int64_t checkPointId;
+ STqOffsetVal currentOffset; // for tmq
+ SMqMetaRsp metaRsp; // for tmq fetching meta
+ int64_t snapshotVer;
+ SSchemaWrapper* schema;
+ char tbName[TSDB_TABLE_NAME_LEN]; // this is the current scan table: todo refactor
+ int8_t recoverStep;
+ int8_t recoverScanFinished;
+ SQueryTableDataCond tableCond;
+ int64_t fillHistoryVer1;
+ int64_t fillHistoryVer2;
+ SStreamState* pState;
+ int64_t dataVersion;
+ int64_t checkPointId;
} SStreamTaskInfo;
struct SExecTaskInfo {
@@ -92,10 +91,11 @@ struct SExecTaskInfo {
SArray* pResultBlockList; // result block list
STaskStopInfo stopInfo;
SRWLatch lock; // secure the access of STableListInfo
+ SStorageAPI storageAPI;
};
void buildTaskId(uint64_t taskId, uint64_t queryId, char* dst);
-SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model);
+SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model, SStorageAPI* pAPI);
void doDestroyTask(SExecTaskInfo* pTaskInfo);
bool isTaskKilled(SExecTaskInfo* pTaskInfo);
void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode);
diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h
index 78b3cd2f40b839dddd39d12bf0b0d6fc1ba82555..79837480d79ad265e8e6ce307a09828a3dc5e1d7 100644
--- a/source/libs/executor/inc/tfill.h
+++ b/source/libs/executor/inc/tfill.h
@@ -120,6 +120,8 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, int64_t ekey, int32_t
void taosFillSetStartInfo(struct SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey);
void taosResetFillInfo(struct SFillInfo* pFillInfo, TSKEY startTimestamp);
void taosFillSetInputDataBlock(struct SFillInfo* pFillInfo, const struct SSDataBlock* pInput);
+void taosFillUpdateStartTimestampInfo(SFillInfo* pFillInfo, int64_t ts);
+bool taosFillNotStarted(const SFillInfo* pFillInfo);
SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfFillExpr, SExprInfo* pNotFillExpr,
int32_t numOfNotFillCols, const struct SNodeListNode* val);
bool taosFillHasMoreResults(struct SFillInfo* pFillInfo);
diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c
index 5cd95d33114d8bcb9edf0638a990b4116478d5bd..b34c6f4b82842a9080d628b9ef4cc0fcf60f3b6e 100644
--- a/source/libs/executor/src/aggregateoperator.c
+++ b/source/libs/executor/src/aggregateoperator.c
@@ -21,7 +21,6 @@
#include "tname.h"
#include "executorInt.h"
-#include "index.h"
#include "operator.h"
#include "query.h"
#include "querytask.h"
@@ -30,6 +29,7 @@
#include "tglobal.h"
#include "thash.h"
#include "ttypes.h"
+#include "index.h"
typedef struct {
bool hasAgg;
@@ -84,7 +84,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
int32_t num = 0;
SExprInfo* pExprInfo = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &num);
int32_t code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -95,7 +95,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
pScalarExprInfo = createExprInfo(pAggNode->pExprs, NULL, &numOfScalarExpr);
}
- code = initExprSupp(&pInfo->scalarExprSup, pScalarExprInfo, numOfScalarExpr);
+ code = initExprSupp(&pInfo->scalarExprSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -485,8 +485,8 @@ void cleanupAggSup(SAggSupporter* pAggSup) {
}
int32_t initAggSup(SExprSupp* pSup, SAggSupporter* pAggSup, SExprInfo* pExprInfo, int32_t numOfCols, size_t keyBufSize,
- const char* pkey, void* pState) {
- int32_t code = initExprSupp(pSup, pExprInfo, numOfCols);
+ const char* pkey, void* pState, SFunctionStateStore* pStore) {
+ int32_t code = initExprSupp(pSup, pExprInfo, numOfCols, pStore);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
diff --git a/source/libs/executor/src/cachescanoperator.c b/source/libs/executor/src/cachescanoperator.c
index 96dac643a59cbb758d4856ae1b1c3ca10666c964..48003b277beb61df43a57eaa3ff6435ad4d456a6 100644
--- a/source/libs/executor/src/cachescanoperator.c
+++ b/source/libs/executor/src/cachescanoperator.c
@@ -13,8 +13,8 @@
* along with this program. If not, see .
*/
-#include "function.h"
#include "os.h"
+#include "function.h"
#include "tname.h"
#include "tdatablock.h"
@@ -27,6 +27,8 @@
#include "thash.h"
#include "ttypes.h"
+#include "storageapi.h"
+
typedef struct SCacheRowsScanInfo {
SSDataBlock* pRes;
SReadHandle readHandle;
@@ -102,9 +104,9 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
STableKeyInfo* pList = tableListGetInfo(pTableListInfo, 0);
uint64_t suid = tableListGetSuid(pTableListInfo);
- code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, totalTables,
- taosArrayGetSize(pInfo->matchInfo.pList), pCidList, pInfo->pSlotIds, suid,
- &pInfo->pLastrowReader, pTaskInfo->id.str);
+ code = pInfo->readHandle.api.cacheFn.openReader(pInfo->readHandle.vnode, pInfo->retrieveType, pList, totalTables,
+ taosArrayGetSize(pInfo->matchInfo.pList), pCidList, pInfo->pSlotIds,
+ suid, &pInfo->pLastrowReader, pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -124,7 +126,7 @@ SOperatorInfo* createCacherowsScanOperator(SLastRowScanPhysiNode* pScanNode, SRe
if (pScanNode->scan.pScanPseudoCols != NULL) {
SExprSupp* p = &pInfo->pseudoExprSup;
p->pExprInfo = createExprInfo(pScanNode->scan.pScanPseudoCols, NULL, &p->numOfExprs);
- p->pCtx = createSqlFunctionCtx(p->pExprInfo, p->numOfExprs, &p->rowEntryInfoOffset);
+ p->pCtx = createSqlFunctionCtx(p->pExprInfo, p->numOfExprs, &p->rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore);
}
setOperatorInfo(pOperator, "CachedRowScanOperator", QUERY_NODE_PHYSICAL_PLAN_LAST_ROW_SCAN, false, OP_NOT_OPENED,
@@ -172,7 +174,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
blockDataCleanup(pInfo->pBufferredRes);
taosArrayClear(pInfo->pUidList);
- int32_t code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds,
+ int32_t code = pInfo->readHandle.api.cacheFn.retrieveRows(pInfo->pLastrowReader, pInfo->pBufferredRes, pInfo->pSlotIds,
pInfo->pDstSlotIds, pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
@@ -239,7 +241,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
}
if (NULL == pInfo->pLastrowReader) {
- code = tsdbCacherowsReaderOpen(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num,
+ code = pInfo->readHandle.api.cacheFn.openReader(pInfo->readHandle.vnode, pInfo->retrieveType, pList, num,
taosArrayGetSize(pInfo->matchInfo.pList), pInfo->pCidList, pInfo->pSlotIds, suid, &pInfo->pLastrowReader,
pTaskInfo->id.str);
if (code != TSDB_CODE_SUCCESS) {
@@ -248,12 +250,12 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
continue;
}
} else {
- tsdbReuseCacherowsReader(pInfo->pLastrowReader, pList, num);
+ pInfo->readHandle.api.cacheFn.reuseReader(pInfo->pLastrowReader, pList, num);
}
taosArrayClear(pInfo->pUidList);
- code = tsdbRetrieveCacheRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pDstSlotIds,
+ code = pInfo->readHandle.api.cacheFn.retrieveRows(pInfo->pLastrowReader, pInfo->pRes, pInfo->pSlotIds, pInfo->pDstSlotIds,
pInfo->pUidList);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
@@ -287,7 +289,7 @@ SSDataBlock* doScanCache(SOperatorInfo* pOperator) {
}
}
- pInfo->pLastrowReader = tsdbCacherowsReaderClose(pInfo->pLastrowReader);
+ pInfo->pLastrowReader = pInfo->readHandle.api.cacheFn.closeReader(pInfo->pLastrowReader);
setOperatorCompleted(pOperator);
return NULL;
}
@@ -305,7 +307,7 @@ void destroyCacheScanOperator(void* param) {
tableListDestroy(pInfo->pTableList);
if (pInfo->pLastrowReader != NULL) {
- pInfo->pLastrowReader = tsdbCacherowsReaderClose(pInfo->pLastrowReader);
+ pInfo->pLastrowReader = pInfo->readHandle.api.cacheFn.closeReader(pInfo->pLastrowReader);
}
cleanupExprSupp(&pInfo->pseudoExprSup);
diff --git a/source/libs/executor/src/dataInserter.c b/source/libs/executor/src/dataInserter.c
index d31ac0bc517f810732fc868a6371a97d356d78a7..646964ebf4eaf38b464bba680301b882ceb9d449 100644
--- a/source/libs/executor/src/dataInserter.c
+++ b/source/libs/executor/src/dataInserter.c
@@ -17,6 +17,7 @@
#include "dataSinkMgt.h"
#include "executorInt.h"
#include "planner.h"
+#include "storageapi.h"
#include "tcompression.h"
#include "tdatablock.h"
#include "tglobal.h"
@@ -428,8 +429,7 @@ int32_t createDataInserter(SDataSinkManager* pManager, const SDataSinkNode* pDat
inserter->explain = pInserterNode->explain;
int64_t suid = 0;
- int32_t code =
- tsdbGetTableSchema(inserter->pParam->readHandle->vnode, pInserterNode->tableId, &inserter->pSchema, &suid);
+ int32_t code = pManager->pAPI->metaFn.getTableSchema(inserter->pParam->readHandle->vnode, pInserterNode->tableId, &inserter->pSchema, &suid);
if (code) {
destroyDataSinker((SDataSinkHandle*)inserter);
taosMemoryFree(inserter);
diff --git a/source/libs/executor/src/dataSinkMgt.c b/source/libs/executor/src/dataSinkMgt.c
index b3cb57325ba5dee1d2d2f3b0d07304a1d1ea5ae2..3a972c1c20406d8d61fde988e4ed93140c46c0fa 100644
--- a/source/libs/executor/src/dataSinkMgt.c
+++ b/source/libs/executor/src/dataSinkMgt.c
@@ -21,8 +21,9 @@
static SDataSinkManager gDataSinkManager = {0};
SDataSinkStat gDataSinkStat = {0};
-int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg) {
+int32_t dsDataSinkMgtInit(SDataSinkMgtCfg* cfg, SStorageAPI* pAPI) {
gDataSinkManager.cfg = *cfg;
+ gDataSinkManager.pAPI = pAPI;
return 0; // to avoid compiler eror
}
diff --git a/source/libs/executor/src/eventwindowoperator.c b/source/libs/executor/src/eventwindowoperator.c
index 956d5b714d1fbf98f7aec024605befc31621eeef..b5dea6d94dd79d16cda1597b95eb345746c80483 100644
--- a/source/libs/executor/src/eventwindowoperator.c
+++ b/source/libs/executor/src/eventwindowoperator.c
@@ -92,7 +92,7 @@ SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNo
if (pEventWindowNode->window.pExprs != NULL) {
int32_t numOfScalarExpr = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pEventWindowNode->window.pExprs, NULL, &numOfScalarExpr);
- code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr);
+ code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -110,7 +110,7 @@ SOperatorInfo* createEventwindowOperatorInfo(SOperatorInfo* downstream, SPhysiNo
initResultSizeInfo(&pOperator->resultInfo, 4096);
code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index fa46715c2260ecc71b8ee275626dd7cf15835231..331a2fa7ab23023348c66922fd5edf8fc00f94d6 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -27,6 +27,7 @@
#include "executorInt.h"
#include "querytask.h"
#include "tcompression.h"
+#include "storageapi.h"
typedef struct tagFilterAssist {
SHashObj* colHash;
@@ -41,13 +42,13 @@ typedef enum {
} FilterCondType;
static FilterCondType checkTagCond(SNode* cond);
-static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond);
-static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond);
+static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* pTagCond, SStorageAPI* pAPI);
+static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* list, SNode* pTagCond, SStorageAPI* pStoreAPI);
-static int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond,
- SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr);
+static int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond,
+ SNode* pTagIndexCond, STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI);
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList,
- void* metaHandle);
+ void* pVnode, SStorageAPI* pStorageAPI);
static int64_t getLimit(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->limit; }
static int64_t getOffset(const SNode* pLimit) { return NULL == pLimit ? -1 : ((SLimitNode*)pLimit)->offset; }
@@ -135,7 +136,7 @@ void initGroupedResultInfo(SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap, in
size_t keyLen = 0;
int32_t iter = 0;
- int32_t bufLen = 0, offset = 0;
+ int64_t bufLen = 0, offset = 0;
// todo move away and record this during create window
while ((pData = tSimpleHashIterate(pHashmap, pData, &iter)) != NULL) {
@@ -262,7 +263,7 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
STagVal tagVal = {0};
tagVal.cid = pSColumnNode->colId;
- const char* p = metaGetTableTagVal(mr->me.ctbEntry.pTags, pSColumnNode->node.resType.type, &tagVal);
+ const char* p = mr->pAPI->extractTagVal(mr->me.ctbEntry.pTags, pSColumnNode->node.resType.type, &tagVal);
if (p == NULL) {
res->node.resType.type = TSDB_DATA_TYPE_NULL;
} else if (pSColumnNode->node.resType.type == TSDB_DATA_TYPE_JSON) {
@@ -301,14 +302,14 @@ EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
-int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified) {
+int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle, bool* pQualified, SStorageAPI *pAPI) {
int32_t code = TSDB_CODE_SUCCESS;
SMetaReader mr = {0};
- metaReaderInit(&mr, metaHandle, 0);
- code = metaGetTableEntryByUidCache(&mr, info->uid);
+ pAPI->metaReaderFn.initReader(&mr, metaHandle, 0, &pAPI->metaFn);
+ code = pAPI->metaReaderFn.getEntryGetUidCache(&mr, info->uid);
if (TSDB_CODE_SUCCESS != code) {
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
*pQualified = false;
return TSDB_CODE_SUCCESS;
@@ -317,7 +318,7 @@ int32_t isQualifiedTable(STableKeyInfo* info, SNode* pTagCond, void* metaHandle,
SNode* pTagCondTmp = nodesCloneNode(pTagCond);
nodesRewriteExprPostOrder(&pTagCondTmp, doTranslateTagExpr, &mr);
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
SNode* pNew = NULL;
code = scalarCalculateConstants(pTagCondTmp, &pNew);
@@ -435,7 +436,6 @@ static void genTagFilterDigest(const SNode* pTagCond, T_MD5_CTX* pContext) {
taosMemoryFree(payload);
}
-
static void genTbGroupDigest(const SNode* pGroup, uint8_t* filterDigest, T_MD5_CTX* pContext) {
char* payload = NULL;
int32_t len = 0;
@@ -453,8 +453,8 @@ static void genTbGroupDigest(const SNode* pGroup, uint8_t* filterDigest, T_MD5_C
taosMemoryFree(payload);
}
-
-int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableListInfo* pTableListInfo, uint8_t *digest) {
+int32_t getColInfoResultForGroupby(void* pVnode, SNodeList* group, STableListInfo* pTableListInfo, uint8_t* digest,
+ SStorageAPI* pAPI) {
int32_t code = TSDB_CODE_SUCCESS;
SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
@@ -465,7 +465,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
int32_t rows = taosArrayGetSize(pTableListInfo->pTableList);
if (rows == 0) {
- return TDB_CODE_SUCCESS;
+ return TSDB_CODE_SUCCESS;
}
tagFilterAssist ctx = {0};
@@ -492,18 +492,19 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
if (tsTagFilterCache) {
SNodeListNode* listNode = (SNodeListNode*)nodesMakeNode(QUERY_NODE_NODE_LIST);
listNode->pNodeList = group;
- genTbGroupDigest((SNode *)listNode, digest, &context);
+ genTbGroupDigest((SNode*)listNode, digest, &context);
nodesFree(listNode);
-
- metaGetCachedTbGroup(metaHandle, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), &tableList);
+
+ pAPI->metaFn.metaGetCachedTbGroup(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), &tableList);
if (tableList) {
taosArrayDestroy(pTableListInfo->pTableList);
pTableListInfo->pTableList = tableList;
- qDebug("retrieve tb group list from cache, numOfTables:%d", (int32_t)taosArrayGetSize(pTableListInfo->pTableList));
+ qDebug("retrieve tb group list from cache, numOfTables:%d",
+ (int32_t)taosArrayGetSize(pTableListInfo->pTableList));
goto end;
}
}
-
+
pUidTagList = taosArrayInit(8, sizeof(STUidTagInfo));
for (int32_t i = 0; i < rows; ++i) {
STableKeyInfo* pkeyInfo = taosArrayGet(pTableListInfo->pTableList, i);
@@ -511,14 +512,13 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
taosArrayPush(pUidTagList, &info);
}
- // int64_t stt = taosGetTimestampUs();
- code = metaGetTableTags(metaHandle, pTableListInfo->idInfo.suid, pUidTagList);
+ code = pAPI->metaFn.getTableTags(pVnode, pTableListInfo->idInfo.suid, pUidTagList);
if (code != TSDB_CODE_SUCCESS) {
goto end;
}
int32_t numOfTables = taosArrayGetSize(pUidTagList);
- pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle);
+ pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, pVnode, pAPI);
if (pResBlock == NULL) {
code = terrno;
goto end;
@@ -632,9 +632,9 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
if (tsTagFilterCache) {
tableList = taosArrayDup(pTableListInfo->pTableList, NULL);
- metaPutTbGroupToCache(metaHandle, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), tableList, taosArrayGetSize(tableList) * sizeof(STableKeyInfo));
+ pAPI->metaFn.metaPutTbGroupToCache(pVnode, pTableListInfo->idInfo.suid, context.digest, tListLen(context.digest), tableList, taosArrayGetSize(tableList) * sizeof(STableKeyInfo));
}
-
+
// int64_t st2 = taosGetTimestampUs();
// qDebug("calculate tag block rows:%d, cost:%ld us", rows, st2-st1);
@@ -734,12 +734,12 @@ static FilterCondType checkTagCond(SNode* cond) {
return FILTER_OTHER;
}
-static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list, SNode* cond) {
+static int32_t optimizeTbnameInCond(void* pVnode, int64_t suid, SArray* list, SNode* cond, SStorageAPI* pAPI) {
int32_t ret = -1;
int32_t ntype = nodeType(cond);
if (ntype == QUERY_NODE_OPERATOR) {
- ret = optimizeTbnameInCondImpl(metaHandle, list, cond);
+ ret = optimizeTbnameInCondImpl(pVnode, list, cond, pAPI);
}
if (ntype != QUERY_NODE_LOGIC_CONDITION || ((SLogicConditionNode*)cond)->condType != LOGIC_COND_TYPE_AND) {
@@ -758,7 +758,7 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list
SListCell* cell = pList->pHead;
for (int i = 0; i < len; i++) {
if (cell == NULL) break;
- if (optimizeTbnameInCondImpl(metaHandle, list, cell->pNode) == 0) {
+ if (optimizeTbnameInCondImpl(pVnode, list, cell->pNode, pAPI) == 0) {
hasTbnameCond = true;
break;
}
@@ -769,14 +769,14 @@ static int32_t optimizeTbnameInCond(void* metaHandle, int64_t suid, SArray* list
taosArrayRemoveDuplicate(list, filterTableInfoCompare, NULL);
if (hasTbnameCond) {
- ret = metaGetTableTagsByUids(metaHandle, suid, list);
+ ret = pAPI->metaFn.getTableTagsByUid(pVnode, suid, list);
}
return ret;
}
// only return uid that does not contained in pExistedUidList
-static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidList, SNode* pTagCond) {
+static int32_t optimizeTbnameInCondImpl(void* pVnode, SArray* pExistedUidList, SNode* pTagCond, SStorageAPI* pStoreAPI) {
if (nodeType(pTagCond) != QUERY_NODE_OPERATOR) {
return -1;
}
@@ -813,9 +813,9 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidLis
char* name = taosArrayGetP(pTbList, i);
uint64_t uid = 0;
- if (metaGetTableUidByName(metaHandle, name, &uid) == 0) {
+ if (pStoreAPI->metaFn.getTableUidByName(pVnode, name, &uid) == 0) {
ETableType tbType = TSDB_TABLE_MAX;
- if (metaGetTableTypeByName(metaHandle, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) {
+ if (pStoreAPI->metaFn.getTableTypeByName(pVnode, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) {
if (NULL == uHash || taosHashGet(uHash, &uid, sizeof(uid)) == NULL) {
STUidTagInfo s = {.uid = uid, .name = name, .pTagVal = NULL};
taosArrayPush(pExistedUidList, &s);
@@ -839,9 +839,8 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, SArray* pExistedUidLis
return -1;
}
-
static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTables, SArray* pUidTagList,
- void* metaHandle) {
+ void* pVnode, SStorageAPI* pStorageAPI) {
SSDataBlock* pResBlock = createDataBlock();
if (pResBlock == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -876,7 +875,7 @@ static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTa
if (p1->name != NULL) {
STR_TO_VARSTR(str, p1->name);
} else { // name is not retrieved during filter
- metaGetTableNameByUid(metaHandle, p1->uid, str);
+ pStorageAPI->metaFn.getTableNameByUid(pVnode, p1->uid, str);
}
colDataSetVal(pColInfo, i, str, false);
@@ -889,7 +888,7 @@ static SSDataBlock* createTagValBlockForFilter(SArray* pColList, int32_t numOfTa
if (p1->pTagVal == NULL) {
colDataSetNULL(pColInfo, i);
} else {
- const char* p = metaGetTableTagVal(p1->pTagVal, pColInfo->info.type, &tagVal);
+ const char* p = pStorageAPI->metaFn.extractTagVal(p1->pTagVal, pColInfo->info.type, &tagVal);
if (p == NULL || (pColInfo->info.type == TSDB_DATA_TYPE_JSON && ((STag*)p)->nTag == 0)) {
colDataSetNULL(pColInfo, i);
@@ -949,18 +948,19 @@ static void copyExistedUids(SArray* pUidTagList, const SArray* pUidList) {
}
}
-static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SNode* pTagCond, void* metaHandle,
- SIdxFltStatus status) {
+static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SNode* pTagCond, void* pVnode,
+ SIdxFltStatus status, SStorageAPI* pAPI) {
if (pTagCond == NULL) {
return TSDB_CODE_SUCCESS;
}
- terrno = TDB_CODE_SUCCESS;
+ terrno = TSDB_CODE_SUCCESS;
int32_t code = TSDB_CODE_SUCCESS;
SArray* pBlockList = NULL;
SSDataBlock* pResBlock = NULL;
SScalarParam output = {0};
+ SArray* pUidTagList = NULL;
tagFilterAssist ctx = {0};
ctx.colHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_SMALLINT), false, HASH_NO_LOCK);
@@ -980,12 +980,12 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN
SDataType type = {.type = TSDB_DATA_TYPE_BOOL, .bytes = sizeof(bool)};
// int64_t stt = taosGetTimestampUs();
- SArray* pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo));
+ pUidTagList = taosArrayInit(10, sizeof(STUidTagInfo));
copyExistedUids(pUidTagList, pUidList);
FilterCondType condType = checkTagCond(pTagCond);
- int32_t filter = optimizeTbnameInCond(metaHandle, pListInfo->idInfo.suid, pUidTagList, pTagCond);
+ int32_t filter = optimizeTbnameInCond(pVnode, pListInfo->idInfo.suid, pUidTagList, pTagCond, pAPI);
if (filter == 0) { // tbname in filter is activated, do nothing and return
taosArrayClear(pUidList);
@@ -998,9 +998,9 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN
terrno = 0;
} else {
if ((condType == FILTER_NO_LOGIC || condType == FILTER_AND) && status != SFLT_NOT_INDEX) {
- code = metaGetTableTagsByUids(metaHandle, pListInfo->idInfo.suid, pUidTagList);
+ code = pAPI->metaFn.getTableTagsByUid(pVnode, pListInfo->idInfo.suid, pUidTagList);
} else {
- code = metaGetTableTags(metaHandle, pListInfo->idInfo.suid, pUidTagList);
+ code = pAPI->metaFn.getTableTags(pVnode, pListInfo->idInfo.suid, pUidTagList);
}
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table tags from meta, reason:%s, suid:%" PRIu64, tstrerror(code), pListInfo->idInfo.suid);
@@ -1014,7 +1014,7 @@ static int32_t doFilterByTagCond(STableListInfo* pListInfo, SArray* pUidList, SN
goto end;
}
- pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, metaHandle);
+ pResBlock = createTagValBlockForFilter(ctx.cInfoList, numOfTables, pUidTagList, pVnode, pAPI);
if (pResBlock == NULL) {
code = terrno;
goto end;
@@ -1052,8 +1052,8 @@ end:
return code;
}
-int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
- STableListInfo* pListInfo, uint8_t* digest, const char* idstr) {
+int32_t getTableList(void* pVnode, SScanPhysiNode* pScanNode, SNode* pTagCond, SNode* pTagIndexCond,
+ STableListInfo* pListInfo, uint8_t* digest, const char* idstr, SStorageAPI* pStorageAPI) {
int32_t code = TSDB_CODE_SUCCESS;
size_t numOfTables = 0;
@@ -1065,10 +1065,10 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
SIdxFltStatus status = SFLT_NOT_INDEX;
if (pScanNode->tableType != TSDB_SUPER_TABLE) {
pListInfo->idInfo.uid = pScanNode->uid;
- if (metaIsTableExist(metaHandle, pScanNode->uid)) {
+ if (pStorageAPI->metaFn.isTableExisted(pVnode, pScanNode->uid)) {
taosArrayPush(pUidList, &pScanNode->uid);
}
- code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle, status);
+ code = doFilterByTagCond(pListInfo, pUidList, pTagCond, pVnode, status, pStorageAPI);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
@@ -1080,7 +1080,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
genTagFilterDigest(pTagCond, &context);
bool acquired = false;
- metaGetCachedTableUidList(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pUidList,
+ pStorageAPI->metaFn.getCachedTableList(pVnode, pScanNode->suid, context.digest, tListLen(context.digest), pUidList,
&acquired);
if (acquired) {
digest[0] = 1;
@@ -1091,26 +1091,27 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
}
if (!pTagCond) { // no tag filter condition exists, let's fetch all tables of this super table
- vnodeGetCtbIdList(pVnode, pScanNode->suid, pUidList);
+ pStorageAPI->metaFn.getChildTableList(pVnode, pScanNode->suid, pUidList);
} else {
// failed to find the result in the cache, let try to calculate the results
if (pTagIndexCond) {
- void* pIndex = tsdbGetIvtIdx(metaHandle);
+ void* pIndex = pStorageAPI->metaFn.getInvertIndex(pVnode);
+
SIndexMetaArg metaArg = {
- .metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = pIndex, .suid = pScanNode->uid};
+ .metaEx = pVnode, .idx = pStorageAPI->metaFn.storeGetIndexInfo(pVnode), .ivtIdx = pIndex, .suid = pScanNode->uid};
status = SFLT_NOT_INDEX;
- code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status);
+ code = doFilterTag(pTagIndexCond, &metaArg, pUidList, &status, &pStorageAPI->metaFilter);
if (code != 0 || status == SFLT_NOT_INDEX) { // temporarily disable it for performance sake
qWarn("failed to get tableIds from index, suid:%" PRIu64, pScanNode->uid);
- code = TDB_CODE_SUCCESS;
+ code = TSDB_CODE_SUCCESS;
} else {
qInfo("succ to get filter result, table num: %d", (int)taosArrayGetSize(pUidList));
}
}
}
- code = doFilterByTagCond(pListInfo, pUidList, pTagCond, metaHandle, status);
+ code = doFilterByTagCond(pListInfo, pUidList, pTagCond, pVnode, status, pStorageAPI);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
@@ -1127,7 +1128,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode,
memcpy(pPayload + sizeof(int32_t), taosArrayGet(pUidList, 0), numOfTables * sizeof(uint64_t));
}
- metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
+// metaUidFilterCachePut(metaHandle, pScanNode->suid, context.digest, tListLen(context.digest), pPayload, size, 1);
digest[0] = 1;
memcpy(digest + 1, context.digest, tListLen(context.digest));
}
@@ -1151,6 +1152,21 @@ _end:
return code;
}
+int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList, void* pTaskInfo){
+ SSubplan *pSubplan = (SSubplan *)node;
+ SScanPhysiNode pNode = {0};
+ pNode.suid = suid;
+ pNode.uid = suid;
+ pNode.tableType = TSDB_SUPER_TABLE;
+ STableListInfo* pTableListInfo = tableListCreate();
+ uint8_t digest[17] = {0};
+ int code = getTableList(pVnode, &pNode, pSubplan ? pSubplan->pTagCond : NULL, pSubplan ? pSubplan->pTagIndexCond : NULL, pTableListInfo, digest, "qGetTableList", &((SExecTaskInfo*)pTaskInfo)->storageAPI);
+ *tableList = pTableListInfo->pTableList;
+ pTableListInfo->pTableList = NULL;
+ tableListDestroy(pTableListInfo);
+ return code;
+}
+
size_t getTableTagsBufLen(const SNodeList* pGroups) {
size_t keyLen = 0;
@@ -1164,11 +1180,13 @@ size_t getTableTagsBufLen(const SNodeList* pGroups) {
return keyLen;
}
-int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId) {
+int32_t getGroupIdFromTagsVal(void* pVnode, uint64_t uid, SNodeList* pGroupNode, char* keyBuf, uint64_t* pGroupId,
+ SStorageAPI* pAPI) {
SMetaReader mr = {0};
- metaReaderInit(&mr, pMeta, 0);
- if (metaGetTableEntryByUidCache(&mr, uid) != 0) { // table not exist
- metaReaderClear(&mr);
+
+ pAPI->metaReaderFn.initReader(&mr, pVnode, 0, &pAPI->metaFn);
+ if (pAPI->metaReaderFn.getEntryGetUidCache(&mr, uid) != 0) { // table not exist
+ pAPI->metaReaderFn.clearReader(&mr);
return TSDB_CODE_PAR_TABLE_NOT_EXIST;
}
@@ -1187,7 +1205,7 @@ int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode,
REPLACE_NODE(pNew);
} else {
nodesDestroyList(groupNew);
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
return code;
}
@@ -1204,7 +1222,7 @@ int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode,
if (tTagIsJson(data)) {
terrno = TSDB_CODE_QRY_JSON_IN_GROUP_ERROR;
nodesDestroyList(groupNew);
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
return terrno;
}
int32_t len = getJsonValueLen(data);
@@ -1224,7 +1242,7 @@ int32_t getGroupIdFromTagsVal(void* pMeta, uint64_t uid, SNodeList* pGroupNode,
*pGroupId = calcGroupId(keyBuf, len);
nodesDestroyList(groupNew);
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
return TSDB_CODE_SUCCESS;
}
@@ -1514,7 +1532,7 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
return TSDB_CODE_OUT_OF_MEMORY;
}
- SHashObj *pSelectFuncs = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
+ SHashObj* pSelectFuncs = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
for (int32_t i = 0; i < numOfOutput; ++i) {
const char* pName = pCtx[i].pExpr->pExpr->_function.functionName;
if ((strcmp(pName, "_select_value") == 0) || (strcmp(pName, "_group_key") == 0)) {
@@ -1542,7 +1560,7 @@ static int32_t setSelectValueColumnInfo(SqlFunctionCtx* pCtx, int32_t numOfOutpu
return TSDB_CODE_SUCCESS;
}
-SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset) {
+SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput, int32_t** rowEntryInfoOffset, SFunctionStateStore* pStore) {
SqlFunctionCtx* pFuncCtx = (SqlFunctionCtx*)taosMemoryCalloc(numOfOutput, sizeof(SqlFunctionCtx));
if (pFuncCtx == NULL) {
return NULL;
@@ -1605,6 +1623,7 @@ SqlFunctionCtx* createSqlFunctionCtx(SExprInfo* pExprInfo, int32_t numOfOutput,
pCtx->numOfParams = pExpr->base.numOfParams;
pCtx->param = pFunct->pParam;
pCtx->saveHandle.currentPage = -1;
+ pCtx->pStore = pStore;
}
for (int32_t i = 1; i < numOfOutput; ++i) {
@@ -1742,12 +1761,12 @@ int32_t convertFillType(int32_t mode) {
return type;
}
-static void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery) {
+void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindow* w, bool ascQuery) {
if (ascQuery) {
- *w = getAlignQueryTimeWindow(pInterval, pInterval->precision, ts);
+ *w = getAlignQueryTimeWindow(pInterval, ts);
} else {
// the start position of the first time window in the endpoint that spreads beyond the queried last timestamp
- *w = getAlignQueryTimeWindow(pInterval, pInterval->precision, ts);
+ *w = getAlignQueryTimeWindow(pInterval, ts);
int64_t key = w->skey;
while (key < ts) { // moving towards end
@@ -1764,7 +1783,7 @@ static void getInitialStartTimeWindow(SInterval* pInterval, TSKEY ts, STimeWindo
static STimeWindow doCalculateTimeWindow(int64_t ts, SInterval* pInterval) {
STimeWindow w = {0};
- w.skey = taosTimeTruncate(ts, pInterval, pInterval->precision);
+ w.skey = taosTimeTruncate(ts, pInterval);
w.ekey = taosTimeAdd(w.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
return w;
}
@@ -1798,6 +1817,7 @@ STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowI
if (pRow) {
w = pRow->win;
}
+
// in case of typical time window, we can calculate time window directly.
if (w.skey > ts || w.ekey < ts) {
w = doCalculateTimeWindow(ts, pInterval);
@@ -1812,6 +1832,34 @@ STimeWindow getActiveTimeWindow(SDiskbasedBuf* pBuf, SResultRowInfo* pResultRowI
return w;
}
+void getNextTimeWindow(const SInterval* pInterval, STimeWindow* tw, int32_t order) {
+ int32_t factor = GET_FORWARD_DIRECTION_FACTOR(order);
+ if (!IS_CALENDAR_TIME_DURATION(pInterval->slidingUnit)) {
+ tw->skey += pInterval->sliding * factor;
+ tw->ekey = taosTimeAdd(tw->skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
+ return;
+ }
+
+ // convert key to second
+ int64_t key = convertTimePrecision(tw->skey, pInterval->precision, TSDB_TIME_PRECISION_MILLI) / 1000;
+
+ int64_t duration = pInterval->sliding;
+ if (pInterval->slidingUnit == 'y') {
+ duration *= 12;
+ }
+
+ struct tm tm;
+ time_t t = (time_t) key;
+ taosLocalTime(&t, &tm, NULL);
+
+ int mon = (int)(tm.tm_year * 12 + tm.tm_mon + duration * factor);
+ tm.tm_year = mon / 12;
+ tm.tm_mon = mon % 12;
+ tw->skey = convertTimePrecision((int64_t)taosMktime(&tm) * 1000LL, TSDB_TIME_PRECISION_MILLI, pInterval->precision);
+
+ tw->ekey = taosTimeAdd(tw->skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
+}
+
bool hasLimitOffsetInfo(SLimitInfo* pLimitInfo) {
return (pLimitInfo->limit.limit != -1 || pLimitInfo->limit.offset != -1 || pLimitInfo->slimit.limit != -1 ||
pLimitInfo->slimit.offset != -1);
@@ -1841,9 +1889,7 @@ uint64_t tableListGetSize(const STableListInfo* pTableList) {
return taosArrayGetSize(pTableList->pTableList);
}
-uint64_t tableListGetSuid(const STableListInfo* pTableList) {
- return pTableList->idInfo.suid;
-}
+uint64_t tableListGetSuid(const STableListInfo* pTableList) { return pTableList->idInfo.suid; }
STableKeyInfo* tableListGetInfo(const STableListInfo* pTableList, int32_t index) {
if (taosArrayGetSize(pTableList->pTableList) == 0) {
@@ -2030,11 +2076,11 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
memcpy(pTableListInfo->groupOffset, taosArrayGet(pList, 0), sizeof(int32_t) * pTableListInfo->numOfOuputGroups);
taosArrayDestroy(pList);
- return TDB_CODE_SUCCESS;
+ return TSDB_CODE_SUCCESS;
}
int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SScanPhysiNode* pScanNode, SNodeList* group,
- bool groupSort, uint8_t *digest) {
+ bool groupSort, uint8_t *digest, SStorageAPI* pAPI) {
int32_t code = TSDB_CODE_SUCCESS;
bool groupByTbname = groupbyTbname(group);
@@ -2054,7 +2100,7 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
pTableListInfo->numOfOuputGroups = 1;
}
} else {
- code = getColInfoResultForGroupby(pHandle->meta, group, pTableListInfo, digest);
+ code = getColInfoResultForGroupby(pHandle->vnode, group, pTableListInfo, digest, pAPI);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2086,7 +2132,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
}
uint8_t digest[17] = {0};
- int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr);
+ int32_t code = getTableList(pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo, digest, idStr, &pTaskInfo->storageAPI);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to getTableList, code: %s", tstrerror(code));
return code;
@@ -2104,7 +2150,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
return TSDB_CODE_SUCCESS;
}
- code = buildGroupIdMapForAllTables(pTableListInfo, pHandle, pScanNode, pGroupTags, groupSort, digest);
+ code = buildGroupIdMapForAllTables(pTableListInfo, pHandle, pScanNode, pGroupTags, groupSort, digest, &pTaskInfo->storageAPI);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index b6d4615997ec5a62dc92e98372673f77c6f4b3df..c8b66836d53e9feeffa41d8a9cbe781c30cb409c 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -14,6 +14,8 @@
*/
#include "executor.h"
+#include
+#include
#include "executorInt.h"
#include "operator.h"
#include "planner.h"
@@ -21,7 +23,8 @@
#include "tdatablock.h"
#include "tref.h"
#include "tudf.h"
-#include "vnode.h"
+
+#include "storageapi.h"
static TdThreadOnce initPoolOnce = PTHREAD_ONCE_INIT;
int32_t exchangeObjRefPool = -1;
@@ -129,7 +132,8 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
pOperator->status = OP_NOT_OPENED;
SStreamScanInfo* pInfo = pOperator->info;
- qDebug("s-task:%s set source blocks:%d", id, (int32_t)numOfBlocks);
+
+ qDebug("s-task:%s in this batch, all %d blocks need to be processed and dump results", id, (int32_t)numOfBlocks);
ASSERT(pInfo->validBlockIndex == 0 && taosArrayGetSize(pInfo->pBlockLists) == 0);
if (type == STREAM_INPUT__MERGED_SUBMIT) {
@@ -137,6 +141,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
SPackedData* pReq = POINTER_SHIFT(input, i * sizeof(SPackedData));
taosArrayPush(pInfo->pBlockLists, pReq);
}
+
pInfo->blockType = STREAM_INPUT__DATA_SUBMIT;
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
taosArrayPush(pInfo->pBlockLists, input);
@@ -147,6 +152,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
SPackedData tmp = { .pDataBlock = pDataBlock };
taosArrayPush(pInfo->pBlockLists, &tmp);
}
+
pInfo->blockType = STREAM_INPUT__DATA_BLOCK;
} else {
ASSERT(0);
@@ -156,18 +162,18 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
}
}
-void doSetTaskId(SOperatorInfo* pOperator) {
+void doSetTaskId(SOperatorInfo* pOperator, SStorageAPI *pAPI) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
SStreamScanInfo* pStreamScanInfo = pOperator->info;
if (pStreamScanInfo->pTableScanOp != NULL) {
STableScanInfo* pScanInfo = pStreamScanInfo->pTableScanOp->info;
if (pScanInfo->base.dataReader != NULL) {
- tsdbReaderSetId(pScanInfo->base.dataReader, pTaskInfo->id.str);
+ pAPI->tsdReader.tsdSetReaderTaskId(pScanInfo->base.dataReader, pTaskInfo->id.str);
}
}
} else {
- doSetTaskId(pOperator->pDownstream[0]);
+ doSetTaskId(pOperator->pDownstream[0], pAPI);
}
}
@@ -177,9 +183,14 @@ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId) {
buildTaskId(taskId, queryId, pTaskInfo->id.str);
// set the idstr for tsdbReader
- doSetTaskId(pTaskInfo->pRoot);
+ doSetTaskId(pTaskInfo->pRoot, &pTaskInfo->storageAPI);
}
+//void qSetTaskCode(qTaskInfo_t tinfo, int32_t code) {
+// SExecTaskInfo* pTaskInfo = tinfo;
+// pTaskInfo->code = code;
+//}
+
int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) {
if (tinfo == NULL) {
return TSDB_CODE_APP_ERROR;
@@ -249,7 +260,7 @@ int32_t qSetSMAInput(qTaskInfo_t tinfo, const void* pBlocks, size_t numOfBlocks,
qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int32_t vgId, int32_t* numOfCols,
uint64_t id) {
if (msg == NULL) { // create raw scan
- SExecTaskInfo* pTaskInfo = doCreateTask(0, id, vgId, OPTR_EXEC_MODEL_QUEUE);
+ SExecTaskInfo* pTaskInfo = doCreateTask(0, id, vgId, OPTR_EXEC_MODEL_QUEUE, &pReaderHandle->api);
if (NULL == pTaskInfo) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
@@ -261,6 +272,7 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int3
return NULL;
}
+ pTaskInfo->storageAPI = pReaderHandle->api;
qDebug("create raw scan task info completed, vgId:%d, %s", vgId, GET_TASKID(pTaskInfo));
return pTaskInfo;
}
@@ -320,7 +332,8 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v
return pTaskInfo;
}
-static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const SArray* tableIdList, const char* idstr) {
+static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const SArray* tableIdList, const char* idstr,
+ SStorageAPI* pAPI) {
SArray* qa = taosArrayInit(4, sizeof(tb_uid_t));
int32_t numOfUids = taosArrayGetSize(tableIdList);
if (numOfUids == 0) {
@@ -336,11 +349,11 @@ static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const S
// let's discard the tables those are not created according to the queried super table.
SMetaReader mr = {0};
- metaReaderInit(&mr, pScanInfo->readHandle.meta, 0);
+ pAPI->metaReaderFn.initReader(&mr, pScanInfo->readHandle.vnode, 0, &pAPI->metaFn);
for (int32_t i = 0; i < numOfUids; ++i) {
uint64_t* id = (uint64_t*)taosArrayGet(tableIdList, i);
- int32_t code = metaGetTableEntryByUid(&mr, *id);
+ int32_t code = pAPI->metaReaderFn.getTableEntryByUid(&mr, *id);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table meta, uid:%" PRIu64 " code:%s, %s", *id, tstrerror(terrno), idstr);
continue;
@@ -368,7 +381,7 @@ static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const S
if (pScanInfo->pTagCond != NULL) {
bool qualified = false;
STableKeyInfo info = {.groupId = 0, .uid = mr.me.uid};
- code = isQualifiedTable(&info, pScanInfo->pTagCond, pScanInfo->readHandle.meta, &qualified);
+ code = isQualifiedTable(&info, pScanInfo->pTagCond, pScanInfo->readHandle.vnode, &qualified, pAPI);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to filter new table, uid:0x%" PRIx64 ", %s", info.uid, idstr);
continue;
@@ -383,7 +396,7 @@ static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const S
taosArrayPush(qa, id);
}
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
return qa;
}
@@ -393,7 +406,7 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI
int32_t code = 0;
if (isAdd) {
- qDebug("add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), id);
+ qDebug("try to add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), id);
}
// traverse to the stream scanner node to add this table id
@@ -401,10 +414,10 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI
SStreamScanInfo* pScanInfo = pInfo->info;
if (isAdd) { // add new table id
- SArray* qa = filterUnqualifiedTables(pScanInfo, tableIdList, id);
+ SArray* qa = filterUnqualifiedTables(pScanInfo, tableIdList, id, &pTaskInfo->storageAPI);
int32_t numOfQualifiedTables = taosArrayGetSize(qa);
qDebug("%d qualified child tables added into stream scanner, %s", numOfQualifiedTables, id);
- code = tqReaderAddTbUidList(pScanInfo->tqReader, qa);
+ code = pTaskInfo->storageAPI.tqReaderFn.tqReaderAddTables(pScanInfo->tqReader, qa);
if (code != TSDB_CODE_SUCCESS) {
taosArrayDestroy(qa);
return code;
@@ -433,8 +446,8 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI
if (assignUid) {
keyInfo.groupId = keyInfo.uid;
} else {
- code = getGroupIdFromTagsVal(pScanInfo->readHandle.meta, keyInfo.uid, pScanInfo->pGroupTags, keyBuf,
- &keyInfo.groupId);
+ code = getGroupIdFromTagsVal(pScanInfo->readHandle.vnode, keyInfo.uid, pScanInfo->pGroupTags, keyBuf,
+ &keyInfo.groupId, &pTaskInfo->storageAPI);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFree(keyBuf);
taosArrayDestroy(qa);
@@ -456,7 +469,7 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI
} else { // remove the table id in current list
qDebug("%d remove child tables from the stream scanner, %s", (int32_t)taosArrayGetSize(tableIdList), id);
taosWLockLatch(&pTaskInfo->lock);
- code = tqReaderRemoveTbUidList(pScanInfo->tqReader, tableIdList);
+ code = pTaskInfo->storageAPI.tqReaderFn.tqReaderRemoveTables(pScanInfo->tqReader, tableIdList);
taosWUnLockLatch(&pTaskInfo->lock);
}
@@ -502,7 +515,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
}
SDataSinkMgtCfg cfg = {.maxDataBlockNum = 500, .maxDataBlockNumPerQuery = 50};
- code = dsDataSinkMgtInit(&cfg);
+ code = dsDataSinkMgtInit(&cfg, &(*pTask)->storageAPI);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to dsDataSinkMgtInit, code:%s, %s", tstrerror(code), (*pTask)->id.str);
goto _error;
@@ -1060,6 +1073,8 @@ void qStreamSetOpen(qTaskInfo_t tinfo) {
int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subType) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SOperatorInfo* pOperator = pTaskInfo->pRoot;
const char* id = GET_TASKID(pTaskInfo);
@@ -1081,12 +1096,14 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
if (pOffset->type == TMQ_OFFSET__LOG) {
// todo refactor: move away
- tsdbReaderClose(pScanBaseInfo->dataReader);
+ pTaskInfo->storageAPI.tsdReader.tsdReaderClose(pScanBaseInfo->dataReader);
pScanBaseInfo->dataReader = NULL;
- walReaderVerifyOffset(pInfo->tqReader->pWalReader, pOffset);
- if (tqSeekVer(pInfo->tqReader, pOffset->version + 1, id) < 0) {
- qError("tqSeekVer failed ver:%" PRId64 ", %s", pOffset->version + 1, id);
+ SStoreTqReader* pReaderAPI = &pTaskInfo->storageAPI.tqReaderFn;
+ SWalReader* pWalReader = pReaderAPI->tqReaderGetWalReader(pInfo->tqReader);
+ walReaderVerifyOffset(pWalReader, pOffset);
+ if (pReaderAPI->tqReaderSeek(pInfo->tqReader, pOffset->version + 1, id) < 0) {
+ qError("tqReaderSeek failed ver:%" PRId64 ", %s", pOffset->version + 1, id);
return -1;
}
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_DATA) {
@@ -1141,8 +1158,8 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
pScanInfo->scanTimes = 0;
if (pScanBaseInfo->dataReader == NULL) {
- int32_t code = tsdbReaderOpen(pScanBaseInfo->readHandle.vnode, &pScanBaseInfo->cond, &keyInfo, 1,
- pScanInfo->pResBlock, &pScanBaseInfo->dataReader, id, false, NULL);
+ int32_t code = pTaskInfo->storageAPI.tsdReader.tsdReaderOpen(pScanBaseInfo->readHandle.vnode, &pScanBaseInfo->cond, &keyInfo, 1,
+ pScanInfo->pResBlock, (void**) &pScanBaseInfo->dataReader, id, false, NULL);
if (code != TSDB_CODE_SUCCESS) {
qError("prepare read tsdb snapshot failed, uid:%" PRId64 ", code:%s %s", pOffset->uid, tstrerror(code), id);
terrno = code;
@@ -1152,8 +1169,8 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
qDebug("tsdb reader created with offset(snapshot) uid:%" PRId64 " ts:%" PRId64 " table index:%d, total:%d, %s",
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id);
} else {
- tsdbSetTableList(pScanBaseInfo->dataReader, &keyInfo, 1);
- tsdbReaderReset(pScanBaseInfo->dataReader, &pScanBaseInfo->cond);
+ pTaskInfo->storageAPI.tsdReader.tsdSetQueryTableList(pScanBaseInfo->dataReader, &keyInfo, 1);
+ pTaskInfo->storageAPI.tsdReader.tsdReaderResetStatus(pScanBaseInfo->dataReader, &pScanBaseInfo->cond);
qDebug("tsdb reader offset seek snapshot to uid:%" PRId64 " ts %" PRId64 " table index:%d numOfTable:%d, %s",
uid, pScanBaseInfo->cond.twindows.skey, pScanInfo->currentTable, numOfTables, id);
}
@@ -1175,14 +1192,14 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
SOperatorInfo* p = extractOperatorInTree(pOperator, QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN, id);
STableListInfo* pTableListInfo = ((SStreamRawScanInfo*)(p->info))->pTableListInfo;
- if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ if (pAPI->snapshotFn.createSnapshot(sContext, pOffset->uid) != 0) {
qError("setDataForSnapShot error. uid:%" PRId64 " , %s", pOffset->uid, id);
terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
return -1;
}
- SMetaTableInfo mtInfo = getUidfromSnapShot(sContext);
- tsdbReaderClose(pInfo->dataReader);
+ SMetaTableInfo mtInfo = pTaskInfo->storageAPI.snapshotFn.getMetaTableInfoFromSnapshot(sContext);
+ pTaskInfo->storageAPI.tsdReader.tsdReaderClose(pInfo->dataReader);
pInfo->dataReader = NULL;
cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
@@ -1200,7 +1217,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
STableKeyInfo* pList = tableListGetInfo(pTableListInfo, 0);
int32_t size = tableListGetSize(pTableListInfo);
- tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pList, size, NULL, &pInfo->dataReader, NULL,
+ pTaskInfo->storageAPI.tsdReader.tsdReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pList, size, NULL, (void**) &pInfo->dataReader, NULL,
false, NULL);
cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond);
@@ -1212,7 +1229,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
} else if (pOffset->type == TMQ_OFFSET__SNAPSHOT_META) {
SStreamRawScanInfo* pInfo = pOperator->info;
SSnapContext* sContext = pInfo->sContext;
- if (setForSnapShot(sContext, pOffset->uid) != 0) {
+ if (pTaskInfo->storageAPI.snapshotFn.createSnapshot(sContext, pOffset->uid) != 0) {
qError("setForSnapShot error. uid:%" PRIu64 " ,version:%" PRId64, pOffset->uid, pOffset->version);
terrno = TSDB_CODE_PAR_INTERNAL_ERROR;
return -1;
@@ -1221,7 +1238,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
id);
} else if (pOffset->type == TMQ_OFFSET__LOG) {
SStreamRawScanInfo* pInfo = pOperator->info;
- tsdbReaderClose(pInfo->dataReader);
+ pTaskInfo->storageAPI.tsdReader.tsdReaderClose(pInfo->dataReader);
pInfo->dataReader = NULL;
qDebug("tmqsnap qStreamPrepareScan snapshot log, %s", id);
}
diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c
index ad7258ae1f0b3b61d0cb47e00df3e18d4947473b..fbc0512a26205d3f04159d5672b894c83ab528b0 100644
--- a/source/libs/executor/src/executorInt.c
+++ b/source/libs/executor/src/executorInt.c
@@ -33,7 +33,7 @@
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
-#include "vnode.h"
+#include "storageapi.h"
#define SET_REVERSE_SCAN_FLAG(runtime) ((runtime)->scanFlag = REVERSE_SCAN)
#define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP)
@@ -442,15 +442,15 @@ void setBlockSMAInfo(SqlFunctionCtx* pCtx, SExprInfo* pExprInfo, SSDataBlock* pB
}
/////////////////////////////////////////////////////////////////////////////////////////////
-STimeWindow getAlignQueryTimeWindow(SInterval* pInterval, int32_t precision, int64_t key) {
+STimeWindow getAlignQueryTimeWindow(const SInterval* pInterval, int64_t key) {
STimeWindow win = {0};
- win.skey = taosTimeTruncate(key, pInterval, precision);
+ win.skey = taosTimeTruncate(key, pInterval);
/*
* if the realSkey > INT64_MAX - pInterval->interval, the query duration between
* realSkey and realEkey must be less than one interval.Therefore, no need to adjust the query ranges.
*/
- win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, precision) - 1;
+ win.ekey = taosTimeAdd(win.skey, pInterval->interval, pInterval->intervalUnit, pInterval->precision) - 1;
if (win.ekey < win.skey) {
win.ekey = INT64_MAX;
}
@@ -562,7 +562,6 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD
int32_t numOfRows = 0;
if (IS_VAR_DATA_TYPE(pDst->info.type)) {
int32_t j = 0;
- pDst->varmeta.length = 0;
while (j < totalRows) {
if (pIndicator[j] == 0) {
@@ -574,7 +573,7 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const SColumnInfoD
colDataSetNull_var(pDst, numOfRows);
} else {
char* p1 = colDataGetVarData(pDst, j);
- colDataSetVal(pDst, numOfRows, p1, false);
+ colDataReassignVal(pDst, numOfRows, j, p1);
}
numOfRows += 1;
j += 1;
@@ -716,7 +715,8 @@ void copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResultR
pCtx[j].resultInfo->numOfRes = pRow->numOfRows;
}
}
-
+
+ blockDataEnsureCapacity(pBlock, pBlock->info.rows + pCtx[j].resultInfo->numOfRes);
int32_t code = pCtx[j].fpSet.finalize(&pCtx[j], pBlock);
if (TAOS_FAILED(code)) {
qError("%s build result data block error, code %s", GET_TASKID(pTaskInfo), tstrerror(code));
@@ -817,7 +817,7 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
}
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
- uint32_t newSize = pBlock->info.rows + pRow->numOfRows + (numOfRows - i) > 1 ? 1 : 0;
+ uint32_t newSize = pBlock->info.rows + pRow->numOfRows + ((numOfRows - i) > 1 ? 1 : 0);
blockDataEnsureCapacity(pBlock, newSize);
qDebug("datablock capacity not sufficient, expand to required:%d, current capacity:%d, %s",
newSize, pBlock->info.capacity, GET_TASKID(pTaskInfo));
@@ -844,6 +844,8 @@ int32_t doCopyToSDataBlock(SExecTaskInfo* pTaskInfo, SSDataBlock* pBlock, SExprS
void doBuildStreamResBlock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
SDiskbasedBuf* pBuf) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SSDataBlock* pBlock = pbInfo->pRes;
// set output datablock version
@@ -860,12 +862,13 @@ void doBuildStreamResBlock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGr
doCopyToSDataBlock(pTaskInfo, pBlock, &pOperator->exprSupp, pBuf, pGroupResInfo, pOperator->resultInfo.threshold, false);
void* tbname = NULL;
- if (streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname) < 0) {
+ if (pAPI->stateStore.streamStateGetParName((void*)pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname) < 0) {
pBlock->info.parTbName[0] = 0;
} else {
memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
}
- streamFreeVal(tbname);
+
+ pAPI->stateStore.streamStateFreeVal(tbname);
}
void doBuildResultDatablock(SOperatorInfo* pOperator, SOptrBasicInfo* pbInfo, SGroupResInfo* pGroupResInfo,
@@ -976,11 +979,11 @@ static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
return NULL;
}
-int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr) {
+int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr, SFunctionStateStore* pStore) {
pSup->pExprInfo = pExprInfo;
pSup->numOfExprs = numOfExpr;
if (pSup->pExprInfo != NULL) {
- pSup->pCtx = createSqlFunctionCtx(pExprInfo, numOfExpr, &pSup->rowEntryInfoOffset);
+ pSup->pCtx = createSqlFunctionCtx(pExprInfo, numOfExpr, &pSup->rowEntryInfoOffset, pStore);
if (pSup->pCtx == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}
@@ -1066,122 +1069,22 @@ int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, SExecTaskInfo*
return TSDB_CODE_SUCCESS;
}
-int32_t resultRowEncode(void* k, int32_t* size, char* buf) {
- // SResultRow* key = k;
- // int len = 0;
- // int struLen = *size;
- // len += taosEncodeFixedI32((void**)&buf, key->pageId);
-
- // uint32_t offset = key->offset;
- // len += taosEncodeFixedU32((void**)&buf, offset);
-
- // len += taosEncodeFixedI8((void**)&buf, key->startInterp);
- // len += taosEncodeFixedI8((void**)&buf, key->endInterp);
- // len += taosEncodeFixedI8((void**)&buf, key->closed);
- // len += taosEncodeFixedU32((void**)&buf, key->numOfRows);
-
- // len += taosEncodeFixedI64((void**)&buf, key->win.skey);
- // len += taosEncodeFixedI64((void**)&buf, key->win.ekey);
-
- // int32_t numOfEntryInfo = (struLen - sizeof(SResultRow)) / sizeof(struct SResultRowEntryInfo);
- // len += taosEncodeFixedI32((void**)&buf, numOfEntryInfo);
- // for (int i = 0; i < numOfEntryInfo; i++) {
- // SResultRowEntryInfo* p = &key->pEntryInfo[i];
-
- // uint8_t value = p->initialized ? 1 : 0;
- // len += taosEncodeFixedU8((void**)&buf, value);
-
- // value = p->complete ? 1 : 0;
- // len += taosEncodeFixedU8((void**)&buf, value);
-
- // value = p->isNullRes;
- // len += taosEncodeFixedU8((void**)&buf, value);
-
- // len += taosEncodeFixedU16((void**)&buf, p->numOfRes);
- // }
- // {
- // char* strBuf = taosMemoryCalloc(1, *size * 100);
- // resultRowToString(key, *size, strBuf);
- // qWarn("encode result row:%s", strBuf);
- // }
-
- // return len;
- return 0;
-}
-
-int32_t resultRowDecode(void** k, size_t size, char* buf) {
- // char* p1 = buf;
- // int32_t numOfEntryInfo = 0;
- // uint32_t entryOffset = sizeof(int32_t) + sizeof(uint32_t) + sizeof(int8_t) + sizeof(int8_t) + sizeof(int8_t) +
- // sizeof(uint32_t) + sizeof(int64_t) + sizeof(int64_t);
- // taosDecodeFixedI32(p1 + entryOffset, &numOfEntryInfo);
-
- // char* p = buf;
- // size = sizeof(SResultRow) + numOfEntryInfo * sizeof(SResultRowEntryInfo);
- // SResultRow* key = taosMemoryCalloc(1, size);
-
- // p = taosDecodeFixedI32(p, (int32_t*)&key->pageId);
- // uint32_t offset = 0;
- // p = taosDecodeFixedU32(p, &offset);
- // key->offset = offset;
-
- // p = taosDecodeFixedI8(p, (int8_t*)(&key->startInterp));
- // p = taosDecodeFixedI8(p, (int8_t*)(&key->endInterp));
- // p = taosDecodeFixedI8(p, (int8_t*)&key->closed);
- // p = taosDecodeFixedU32(p, &key->numOfRows);
-
- // p = taosDecodeFixedI64(p, &key->win.skey);
- // p = taosDecodeFixedI64(p, &key->win.ekey);
- // p = taosDecodeFixedI32(p, &numOfEntryInfo);
- // for (int i = 0; i < numOfEntryInfo; i++) {
- // SResultRowEntryInfo* pInfo = &key->pEntryInfo[i];
- // uint8_t value = 0;
- // p = taosDecodeFixedU8(p, &value);
- // pInfo->initialized = (value == 1) ? true : false;
-
- // p = taosDecodeFixedU8(p, &value);
- // pInfo->complete = (value == 1) ? true : false;
-
- // p = taosDecodeFixedU8(p, &value);
- // pInfo->isNullRes = value;
-
- // p = taosDecodeFixedU16(p, &pInfo->numOfRes);
- // }
- // *k = key;
-
- // {
- // char* strBuf = taosMemoryCalloc(1, size * 100);
- // resultRowToString(key, size, strBuf);
- // qWarn("decode result row:%s", strBuf);
- // }
- // return size;
- return 0;
-}
-
-int32_t saveOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult, int32_t resSize) {
- // char* buf = taosMemoryCalloc(1, resSize * 10);
- // int len = resultRowEncode((void*)pResult, &resSize, buf);
- // char* buf = taosMemoryCalloc(1, resSize);
- // memcpy(buf, pResult, resSize);
- streamStatePut(pState, pKey, (char*)pResult, resSize);
- // taosMemoryFree(buf);
- return TSDB_CODE_SUCCESS;
-}
-
-int32_t releaseOutputBuf(SStreamState* pState, SWinKey* pKey, SResultRow* pResult) {
- streamStateReleaseBuf(pState, pKey, pResult);
+int32_t releaseOutputBuf(void* pState, SWinKey* pKey, SResultRow* pResult, SStateStore* pAPI) {
+ pAPI->streamStateReleaseBuf(pState, pKey, pResult);
return TSDB_CODE_SUCCESS;
}
-int32_t saveSessionDiscBuf(SStreamState* pState, SSessionKey* key, void* buf, int32_t size) {
- streamStateSessionPut(pState, key, (const void*)buf, size);
- releaseOutputBuf(pState, NULL, (SResultRow*)buf);
+int32_t saveSessionDiscBuf(void* pState, SSessionKey* key, void* buf, int32_t size, SStateStore* pAPI) {
+ pAPI->streamStateSessionPut(pState, key, (const void*)buf, size);
+ releaseOutputBuf(pState, NULL, (SResultRow*)buf, pAPI);
return TSDB_CODE_SUCCESS;
}
-int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock,
+int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, void* pState, SSDataBlock* pBlock,
SExprSupp* pSup, SGroupResInfo* pGroupResInfo) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SExprInfo* pExprInfo = pSup->pExprInfo;
int32_t numOfExprs = pSup->numOfExprs;
int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
@@ -1193,7 +1096,7 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pSta
SSessionKey* pKey = taosArrayGet(pGroupResInfo->pRows, i);
int32_t size = 0;
void* pVal = NULL;
- int32_t code = streamStateSessionGet(pState, pKey, &pVal, &size);
+ int32_t code = pAPI->stateStore.streamStateSessionGet(pState, pKey, &pVal, &size);
ASSERT(code == 0);
if (code == -1) {
// coverity scan
@@ -1205,7 +1108,7 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pSta
// no results, continue to check the next one
if (pRow->numOfRows == 0) {
pGroupResInfo->index += 1;
- releaseOutputBuf(pState, NULL, pRow);
+ releaseOutputBuf(pState, NULL, pRow, &pAPI->stateStore);
continue;
}
@@ -1213,23 +1116,23 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pSta
pBlock->info.id.groupId = pKey->groupId;
void* tbname = NULL;
- if (streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname) < 0) {
+ if (pAPI->stateStore.streamStateGetParName((void*)pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname) < 0) {
pBlock->info.parTbName[0] = 0;
} else {
memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
}
- streamFreeVal(tbname);
+ pAPI->stateStore.streamStateFreeVal(tbname);
} else {
// current value belongs to different group, it can't be packed into one datablock
if (pBlock->info.id.groupId != pKey->groupId) {
- releaseOutputBuf(pState, NULL, pRow);
+ releaseOutputBuf(pState, NULL, pRow, &pAPI->stateStore);
break;
}
}
if (pBlock->info.rows + pRow->numOfRows > pBlock->info.capacity) {
ASSERT(pBlock->info.rows > 0);
- releaseOutputBuf(pState, NULL, pRow);
+ releaseOutputBuf(pState, NULL, pRow, &pAPI->stateStore);
break;
}
@@ -1260,7 +1163,7 @@ int32_t buildSessionResultDataBlock(SOperatorInfo* pOperator, SStreamState* pSta
pBlock->info.dataLoad = 1;
pBlock->info.rows += pRow->numOfRows;
- releaseOutputBuf(pState, NULL, pRow);
+ releaseOutputBuf(pState, NULL, pRow, &pAPI->stateStore);
}
blockDataUpdateTsWindow(pBlock, 0);
return TSDB_CODE_SUCCESS;
@@ -1292,7 +1195,7 @@ void qStreamCloseTsdbReader(void* task) {
qDebug("wait for the reader stopping");
}
- tsdbReaderClose(pTSInfo->base.dataReader);
+ pTaskInfo->storageAPI.tsdReader.tsdReaderClose(pTSInfo->base.dataReader);
pTSInfo->base.dataReader = NULL;
// restore the status, todo refactor.
diff --git a/source/libs/executor/src/filloperator.c b/source/libs/executor/src/filloperator.c
index 5101e369921e8a6aaa0f37cf195a66ec3c84fdcd..f9e8a32520f6282770bc6926d9db855153ddcc20 100644
--- a/source/libs/executor/src/filloperator.c
+++ b/source/libs/executor/src/filloperator.c
@@ -61,25 +61,28 @@ typedef struct SFillOperatorInfo {
SExprSupp noFillExprSupp;
} SFillOperatorInfo;
+static void revisedFillStartKey(SFillOperatorInfo* pInfo, SSDataBlock* pBlock, int32_t order);
static void destroyFillOperatorInfo(void* param);
static void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int32_t order, int32_t scanFlag);
static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo,
- SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) {
+ SResultInfo* pResultInfo, int32_t order) {
pInfo->totalInputRows = pInfo->existNewGroupBlock->info.rows;
SSDataBlock* pResBlock = pInfo->pFinalRes;
- int32_t order = TSDB_ORDER_ASC;
+// int32_t order = TSDB_ORDER_ASC;
int32_t scanFlag = MAIN_SCAN;
- getTableScanInfo(pOperator, &order, &scanFlag, false);
-
- int64_t ekey = pInfo->existNewGroupBlock->info.window.ekey;
+// getTableScanInfo(pOperator, &order, &scanFlag, false);
taosResetFillInfo(pInfo->pFillInfo, getFillInfoStart(pInfo->pFillInfo));
blockDataCleanup(pInfo->pRes);
doApplyScalarCalculation(pOperator, pInfo->existNewGroupBlock, order, scanFlag);
- taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, ekey);
+ revisedFillStartKey(pInfo, pInfo->existNewGroupBlock, order);
+
+ int64_t ts = (order == TSDB_ORDER_ASC)? pInfo->existNewGroupBlock->info.window.ekey:pInfo->existNewGroupBlock->info.window.skey;
+ taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, ts);
+
taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->pRes);
int32_t numOfResultRows = pResultInfo->capacity - pResBlock->info.rows;
@@ -90,7 +93,7 @@ static void doHandleRemainBlockForNewGroupImpl(SOperatorInfo* pOperator, SFillOp
}
static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOperatorInfo* pInfo,
- SResultInfo* pResultInfo, SExecTaskInfo* pTaskInfo) {
+ SResultInfo* pResultInfo, int32_t order) {
if (taosFillHasMoreResults(pInfo->pFillInfo)) {
int32_t numOfResultRows = pResultInfo->capacity - pInfo->pFinalRes->info.rows;
taosFillResultDataBlock(pInfo->pFillInfo, pInfo->pFinalRes, numOfResultRows);
@@ -100,7 +103,7 @@ static void doHandleRemainBlockFromNewGroup(SOperatorInfo* pOperator, SFillOpera
// handle the cached new group data block
if (pInfo->existNewGroupBlock) {
- doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, pTaskInfo);
+ doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, order);
}
}
@@ -119,6 +122,53 @@ void doApplyScalarCalculation(SOperatorInfo* pOperator, SSDataBlock* pBlock, int
pInfo->pRes->info.id.groupId = pBlock->info.id.groupId;
}
+// todo refactor: decide the start key according to the query time range.
+static void revisedFillStartKey(SFillOperatorInfo* pInfo, SSDataBlock* pBlock, int32_t order) {
+ if (order == TSDB_ORDER_ASC) {
+ int64_t skey = pBlock->info.window.skey;
+ if (skey < pInfo->pFillInfo->start) { // the start key may be smaller than the
+ ASSERT( taosFillNotStarted(pInfo->pFillInfo));
+ taosFillUpdateStartTimestampInfo(pInfo->pFillInfo, skey);
+ } else if (pInfo->pFillInfo->start < skey) {
+ int64_t t = skey;
+ SInterval* pInterval = &pInfo->pFillInfo->interval;
+
+ while(1) {
+ int64_t prev = taosTimeAdd(t, -pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
+ if (prev <= pInfo->pFillInfo->start) {
+ t = prev;
+ break;
+ }
+ t = prev;
+ }
+
+ // todo time window chosen problem: t or prev value?
+ taosFillUpdateStartTimestampInfo(pInfo->pFillInfo, t);
+ }
+ } else {
+ int64_t ekey = pBlock->info.window.ekey;
+ if (ekey > pInfo->pFillInfo->start) {
+ ASSERT( taosFillNotStarted(pInfo->pFillInfo));
+ taosFillUpdateStartTimestampInfo(pInfo->pFillInfo, ekey);
+ } else if (ekey < pInfo->pFillInfo->start) {
+ int64_t t = ekey;
+ SInterval* pInterval = &pInfo->pFillInfo->interval;
+
+ while(1) {
+ int64_t prev = taosTimeAdd(t, pInterval->sliding, pInterval->slidingUnit, pInterval->precision);
+ if (prev >= pInfo->pFillInfo->start) {
+ t = prev;
+ break;
+ }
+ t = prev;
+ }
+
+ // todo time window chosen problem: t or prev value?
+ taosFillUpdateStartTimestampInfo(pInfo->pFillInfo, t);
+ }
+ }
+}
+
static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
SFillOperatorInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@@ -132,13 +182,19 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
int32_t scanFlag = MAIN_SCAN;
getTableScanInfo(pOperator, &order, &scanFlag, false);
- doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, pTaskInfo);
+ SOperatorInfo* pDownstream = pOperator->pDownstream[0];
+
+ // the scan order may be different from the output result order for agg interval operator.
+ if (pDownstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL) {
+ order = ((SIntervalAggOperatorInfo*) pDownstream->info)->resultTsOrder;
+ }
+
+ doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, order);
if (pResBlock->info.rows > 0) {
pResBlock->info.id.groupId = pInfo->curGroupId;
return pResBlock;
}
- SOperatorInfo* pDownstream = pOperator->pDownstream[0];
while (1) {
SSDataBlock* pBlock = pDownstream->fpSet.getNextFn(pDownstream);
if (pBlock == NULL) {
@@ -158,15 +214,16 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
blockDataEnsureCapacity(pInfo->pFinalRes, pBlock->info.rows);
doApplyScalarCalculation(pOperator, pBlock, order, scanFlag);
- if (pInfo->curGroupId == 0 || pInfo->curGroupId == pInfo->pRes->info.id.groupId) {
+ if (pInfo->curGroupId == 0 || (pInfo->curGroupId == pInfo->pRes->info.id.groupId)) {
+ if (pInfo->curGroupId == 0 && taosFillNotStarted(pInfo->pFillInfo)) {
+ revisedFillStartKey(pInfo, pBlock, order);
+ }
+
pInfo->curGroupId = pInfo->pRes->info.id.groupId; // the first data block
pInfo->totalInputRows += pInfo->pRes->info.rows;
- if (order == pInfo->pFillInfo->order) {
- taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.ekey);
- } else {
- taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, pBlock->info.window.skey);
- }
+ int64_t ts = (order == TSDB_ORDER_ASC)? pBlock->info.window.ekey:pBlock->info.window.skey;
+ taosFillSetStartInfo(pInfo->pFillInfo, pInfo->pRes->info.rows, ts);
taosFillSetInputDataBlock(pInfo->pFillInfo, pInfo->pRes);
} else if (pInfo->curGroupId != pBlock->info.id.groupId) { // the new group data block
pInfo->existNewGroupBlock = pBlock;
@@ -190,7 +247,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
return pResBlock;
}
- doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, pTaskInfo);
+ doHandleRemainBlockFromNewGroup(pOperator, pInfo, pResultInfo, order);
if (pResBlock->info.rows >= pOperator->resultInfo.threshold || pBlock == NULL) {
pResBlock->info.id.groupId = pInfo->curGroupId;
return pResBlock;
@@ -198,7 +255,7 @@ static SSDataBlock* doFillImpl(SOperatorInfo* pOperator) {
} else if (pInfo->existNewGroupBlock) { // try next group
blockDataCleanup(pResBlock);
- doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, pTaskInfo);
+ doHandleRemainBlockForNewGroupImpl(pOperator, pInfo, pResultInfo, order);
if (pResBlock->info.rows > pResultInfo->threshold) {
pResBlock->info.id.groupId = pInfo->curGroupId;
return pResBlock;
@@ -256,11 +313,11 @@ static int32_t initFillInfo(SFillOperatorInfo* pInfo, SExprInfo* pExpr, int32_t
const char* id, SInterval* pInterval, int32_t fillType, int32_t order) {
SFillColInfo* pColInfo = createFillColInfo(pExpr, numOfCols, pNotFillExpr, numOfNotFillCols, pValNode);
- int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey;
- STimeWindow w = getAlignQueryTimeWindow(pInterval, pInterval->precision, startKey);
- w = getFirstQualifiedTimeWindow(startKey, &w, pInterval, order);
+ int64_t startKey = (order == TSDB_ORDER_ASC) ? win.skey : win.ekey;
- pInfo->pFillInfo = taosCreateFillInfo(w.skey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo,
+// STimeWindow w = {0};
+// getInitialStartTimeWindow(pInterval, startKey, &w, order == TSDB_ORDER_ASC);
+ pInfo->pFillInfo = taosCreateFillInfo(startKey, numOfCols, numOfNotFillCols, capacity, pInterval, fillType, pColInfo,
pInfo->primaryTsCol, order, id);
if (order == TSDB_ORDER_ASC) {
@@ -338,7 +395,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
goto _error;
}
- code = initExprSupp(pNoFillSupp, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs);
+ code = initExprSupp(pNoFillSupp, pNoFillSupp->pExprInfo, pNoFillSupp->numOfExprs, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -355,7 +412,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
initResultSizeInfo(&pOperator->resultInfo, 4096);
blockDataEnsureCapacity(pInfo->pRes, pOperator->resultInfo.capacity);
- code = initExprSupp(&pOperator->exprSupp, pExprInfo, pInfo->numOfExpr);
+ code = initExprSupp(&pOperator->exprSupp, pExprInfo, pInfo->numOfExpr, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -402,13 +459,13 @@ _error:
TSKEY getNextWindowTs(TSKEY ts, SInterval* pInterval) {
STimeWindow win = {.skey = ts, .ekey = ts};
- getNextIntervalWindow(pInterval, &win, TSDB_ORDER_ASC);
+ getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
return win.skey;
}
TSKEY getPrevWindowTs(TSKEY ts, SInterval* pInterval) {
STimeWindow win = {.skey = ts, .ekey = ts};
- getNextIntervalWindow(pInterval, &win, TSDB_ORDER_DESC);
+ getNextTimeWindow(pInterval, &win, TSDB_ORDER_DESC);
return win.skey;
}
@@ -443,7 +500,7 @@ void* destroyStreamFillSupporter(SStreamFillSupporter* pFillSup) {
pFillSup->pAllColInfo = destroyFillColumnInfo(pFillSup->pAllColInfo, pFillSup->numOfFillCols, pFillSup->numOfAllCols);
tSimpleHashCleanup(pFillSup->pResMap);
pFillSup->pResMap = NULL;
- releaseOutputBuf(NULL, NULL, (SResultRow*)pFillSup->cur.pRowVal);
+ releaseOutputBuf(NULL, NULL, (SResultRow*)pFillSup->cur.pRowVal, &pFillSup->pAPI->stateStore); //?????
pFillSup->cur.pRowVal = NULL;
cleanupExprSupp(&pFillSup->notFillExprSup);
@@ -490,74 +547,78 @@ static void resetFillWindow(SResultRowData* pRowData) {
pRowData->pRowVal = NULL;
}
-void resetPrevAndNextWindow(SStreamFillSupporter* pFillSup, SStreamState* pState) {
+void resetPrevAndNextWindow(SStreamFillSupporter* pFillSup, void* pState, SStorageAPI* pAPI) {
resetFillWindow(&pFillSup->prev);
- releaseOutputBuf(NULL, NULL, (SResultRow*)pFillSup->cur.pRowVal);
+ releaseOutputBuf(NULL, NULL, (SResultRow*)pFillSup->cur.pRowVal, &pAPI->stateStore); //???
resetFillWindow(&pFillSup->cur);
resetFillWindow(&pFillSup->next);
resetFillWindow(&pFillSup->nextNext);
}
void getCurWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, SStreamFillSupporter* pFillSup) {
- SStreamState* pState = pOperator->pTaskInfo->streamInfo.pState;
- resetPrevAndNextWindow(pFillSup, pState);
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
+ void* pState = pOperator->pTaskInfo->streamInfo.pState;
+ resetPrevAndNextWindow(pFillSup, pState, pAPI);
SWinKey key = {.ts = ts, .groupId = groupId};
int32_t curVLen = 0;
- int32_t code = streamStateFillGet(pState, &key, (void**)&pFillSup->cur.pRowVal, &curVLen);
+
+ int32_t code = pAPI->stateStore.streamStateFillGet(pState, &key, (void**)&pFillSup->cur.pRowVal, &curVLen);
ASSERT(code == TSDB_CODE_SUCCESS);
pFillSup->cur.key = key.ts;
}
void getWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId, SStreamFillSupporter* pFillSup) {
- SStreamState* pState = pOperator->pTaskInfo->streamInfo.pState;
- resetPrevAndNextWindow(pFillSup, pState);
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+ void* pState = pOperator->pTaskInfo->streamInfo.pState;
+ resetPrevAndNextWindow(pFillSup, pState, pAPI);
SWinKey key = {.ts = ts, .groupId = groupId};
void* curVal = NULL;
int32_t curVLen = 0;
- int32_t code = streamStateFillGet(pState, &key, (void**)&curVal, &curVLen);
+ int32_t code = pAPI->stateStore.streamStateFillGet(pState, &key, (void**)&curVal, &curVLen);
ASSERT(code == TSDB_CODE_SUCCESS);
pFillSup->cur.key = key.ts;
pFillSup->cur.pRowVal = curVal;
- SStreamStateCur* pCur = streamStateFillSeekKeyPrev(pState, &key);
+ SStreamStateCur* pCur = pAPI->stateStore.streamStateFillSeekKeyPrev(pState, &key);
SWinKey preKey = {.ts = INT64_MIN, .groupId = groupId};
void* preVal = NULL;
int32_t preVLen = 0;
- code = streamStateGetGroupKVByCur(pCur, &preKey, (const void**)&preVal, &preVLen);
+ code = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &preKey, (const void**)&preVal, &preVLen);
if (code == TSDB_CODE_SUCCESS) {
pFillSup->prev.key = preKey.ts;
pFillSup->prev.pRowVal = preVal;
- code = streamStateCurNext(pState, pCur);
+ code = pAPI->stateStore.streamStateCurNext(pState, pCur);
ASSERT(code == TSDB_CODE_SUCCESS);
- code = streamStateCurNext(pState, pCur);
+ code = pAPI->stateStore.streamStateCurNext(pState, pCur);
if (code != TSDB_CODE_SUCCESS) {
- streamStateFreeCur(pCur);
+ pAPI->stateStore.streamStateFreeCur(pCur);
pCur = NULL;
}
} else {
- streamStateFreeCur(pCur);
- pCur = streamStateFillSeekKeyNext(pState, &key);
+ pAPI->stateStore.streamStateFreeCur(pCur);
+ pCur = pAPI->stateStore.streamStateFillSeekKeyNext(pState, &key);
}
SWinKey nextKey = {.ts = INT64_MIN, .groupId = groupId};
void* nextVal = NULL;
int32_t nextVLen = 0;
- code = streamStateGetGroupKVByCur(pCur, &nextKey, (const void**)&nextVal, &nextVLen);
+ code = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &nextKey, (const void**)&nextVal, &nextVLen);
if (code == TSDB_CODE_SUCCESS) {
pFillSup->next.key = nextKey.ts;
pFillSup->next.pRowVal = nextVal;
if (pFillSup->type == TSDB_FILL_PREV || pFillSup->type == TSDB_FILL_NEXT) {
- code = streamStateCurNext(pState, pCur);
+ code = pAPI->stateStore.streamStateCurNext(pState, pCur);
if (code == TSDB_CODE_SUCCESS) {
SWinKey nextNextKey = {.groupId = groupId};
void* nextNextVal = NULL;
int32_t nextNextVLen = 0;
- code = streamStateGetGroupKVByCur(pCur, &nextNextKey, (const void**)&nextNextVal, &nextNextVLen);
+ code = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &nextNextKey, (const void**)&nextNextVal, &nextNextVLen);
if (code == TSDB_CODE_SUCCESS) {
pFillSup->nextNext.key = nextNextKey.ts;
pFillSup->nextNext.pRowVal = nextNextVal;
@@ -565,7 +626,7 @@ void getWindowFromDiscBuf(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId,
}
}
}
- streamStateFreeCur(pCur);
+ pAPI->stateStore.streamStateFreeCur(pCur);
}
static bool hasPrevWindow(SStreamFillSupporter* pFillSup) { return pFillSup->prev.key != INT64_MIN; }
@@ -922,8 +983,10 @@ static void doStreamFillLinear(SStreamFillSupporter* pFillSup, SStreamFillInfo*
}
static void keepResultInDiscBuf(SOperatorInfo* pOperator, uint64_t groupId, SResultRowData* pRow, int32_t len) {
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SWinKey key = {.groupId = groupId, .ts = pRow->key};
- int32_t code = streamStateFillPut(pOperator->pTaskInfo->streamInfo.pState, &key, pRow->pRowVal, len);
+ int32_t code = pAPI->stateStore.streamStateFillPut(pOperator->pTaskInfo->streamInfo.pState, &key, pRow->pRowVal, len);
qDebug("===stream===fill operator save key ts:%" PRId64 " group id:%" PRIu64 " code:%d", key.ts, key.groupId, code);
ASSERT(code == TSDB_CODE_SUCCESS);
}
@@ -996,9 +1059,10 @@ static void doStreamFillImpl(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = pInfo->pSrcBlock;
uint64_t groupId = pBlock->info.id.groupId;
SSDataBlock* pRes = pInfo->pRes;
+ SColumnInfoData* pTsCol = taosArrayGet(pInfo->pSrcBlock->pDataBlock, pInfo->primaryTsCol);
+ TSKEY* tsCol = (TSKEY*)pTsCol->pData;
pRes->info.id.groupId = groupId;
- SColumnInfoData* pTsCol = taosArrayGet(pInfo->pSrcBlock->pDataBlock, pInfo->primaryTsCol);
- TSKEY* tsCol = (TSKEY*)pTsCol->pData;
+ pInfo->srcRowIndex++;
if (pInfo->srcRowIndex == 0) {
keepBlockRowInDiscBuf(pOperator, pFillInfo, pBlock, tsCol, pInfo->srcRowIndex, groupId, pFillSup->rowSize);
@@ -1021,7 +1085,8 @@ static void doStreamFillImpl(SOperatorInfo* pOperator) {
}
static void buildDeleteRange(SOperatorInfo* pOp, TSKEY start, TSKEY end, uint64_t groupId, SSDataBlock* delRes) {
- SStreamState* pState = pOp->pTaskInfo->streamInfo.pState;
+ SStorageAPI* pAPI = &pOp->pTaskInfo->storageAPI;
+ void* pState = pOp->pTaskInfo->streamInfo.pState;
SSDataBlock* pBlock = delRes;
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
@@ -1041,14 +1106,14 @@ static void buildDeleteRange(SOperatorInfo* pOp, TSKEY start, TSKEY end, uint64_
SColumnInfoData* pTableCol = taosArrayGet(pBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
void* tbname = NULL;
- streamStateGetParName(pOp->pTaskInfo->streamInfo.pState, groupId, &tbname);
+ pAPI->stateStore.streamStateGetParName(pOp->pTaskInfo->streamInfo.pState, groupId, &tbname);
if (tbname == NULL) {
colDataSetNULL(pTableCol, pBlock->info.rows);
} else {
char parTbName[VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN];
STR_WITH_MAXSIZE_TO_VARSTR(parTbName, tbname, sizeof(parTbName));
colDataSetVal(pTableCol, pBlock->info.rows, (const char*)parTbName, false);
- streamFreeVal(tbname);
+ pAPI->stateStore.streamStateFreeVal(tbname);
}
pBlock->info.rows++;
@@ -1070,12 +1135,13 @@ static void buildDeleteResult(SOperatorInfo* pOperator, TSKEY startTs, TSKEY end
}
static void doDeleteFillResultImpl(SOperatorInfo* pOperator, TSKEY startTs, TSKEY endTs, uint64_t groupId) {
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
SStreamFillOperatorInfo* pInfo = pOperator->info;
getWindowFromDiscBuf(pOperator, startTs, groupId, pInfo->pFillSup);
setDeleteFillValueInfo(startTs, endTs, pInfo->pFillSup, pInfo->pFillInfo);
SWinKey key = {.ts = startTs, .groupId = groupId};
if (!pInfo->pFillInfo->needFill) {
- streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &key);
+ pAPI->stateStore.streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &key);
buildDeleteResult(pOperator, startTs, endTs, groupId, pInfo->pDelRes);
} else {
STimeRange tw = {
@@ -1093,6 +1159,8 @@ static void doDeleteFillResultImpl(SOperatorInfo* pOperator, TSKEY startTs, TSKE
}
static void doDeleteFillFinalize(SOperatorInfo* pOperator) {
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SStreamFillOperatorInfo* pInfo = pOperator->info;
SStreamFillInfo* pFillInfo = pInfo->pFillInfo;
int32_t size = taosArrayGetSize(pFillInfo->delRanges);
@@ -1109,17 +1177,16 @@ static void doDeleteFillFinalize(SOperatorInfo* pOperator) {
pInfo->pRes->info.id.groupId = range->groupId;
}
SWinKey key = {.ts = range->skey, .groupId = range->groupId};
- streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &key);
+ pAPI->stateStore.streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &key);
}
}
static void doDeleteFillResult(SOperatorInfo* pOperator) {
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SStreamFillOperatorInfo* pInfo = pOperator->info;
- SStreamFillSupporter* pFillSup = pInfo->pFillSup;
SStreamFillInfo* pFillInfo = pInfo->pFillInfo;
SSDataBlock* pBlock = pInfo->pSrcDelBlock;
- SSDataBlock* pRes = pInfo->pRes;
- SSDataBlock* pDelRes = pInfo->pDelRes;
SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
TSKEY* tsStarts = (TSKEY*)pStartCol->pData;
@@ -1130,7 +1197,8 @@ static void doDeleteFillResult(SOperatorInfo* pOperator) {
TSKEY endTs = ts;
uint64_t groupId = groupIds[pInfo->srcDelRowIndex];
SWinKey key = {.ts = ts, .groupId = groupId};
- SStreamStateCur* pCur = streamStateGetAndCheckCur(pOperator->pTaskInfo->streamInfo.pState, &key);
+ SStreamStateCur* pCur = pAPI->stateStore.streamStateGetAndCheckCur(pOperator->pTaskInfo->streamInfo.pState, &key);
+
if (!pCur) {
pInfo->srcDelRowIndex++;
continue;
@@ -1152,15 +1220,15 @@ static void doDeleteFillResult(SOperatorInfo* pOperator) {
SWinKey delKey = {.groupId = delGroupId, .ts = delTs};
if (delTs == nextKey.ts) {
- code = streamStateCurNext(pOperator->pTaskInfo->streamInfo.pState, pCur);
+ code = pAPI->stateStore.streamStateCurNext(pOperator->pTaskInfo->streamInfo.pState, pCur);
if (code == TSDB_CODE_SUCCESS) {
- code = streamStateGetGroupKVByCur(pCur, &nextKey, (const void**)&nextVal, &nextLen);
+ code = pAPI->stateStore.streamStateGetGroupKVByCur(pCur, &nextKey, (const void**)&nextVal, &nextLen);
}
// ts will be deleted later
if (delTs != ts) {
- streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &delKey);
- streamStateFreeCur(pCur);
- pCur = streamStateGetAndCheckCur(pOperator->pTaskInfo->streamInfo.pState, &nextKey);
+ pAPI->stateStore.streamStateFillDel(pOperator->pTaskInfo->streamInfo.pState, &delKey);
+ pAPI->stateStore.streamStateFreeCur(pCur);
+ pCur = pAPI->stateStore.streamStateGetAndCheckCur(pOperator->pTaskInfo->streamInfo.pState, &nextKey);
}
endTs = TMAX(delTs, nextKey.ts - 1);
if (code != TSDB_CODE_SUCCESS) {
@@ -1169,9 +1237,11 @@ static void doDeleteFillResult(SOperatorInfo* pOperator) {
}
pInfo->srcDelRowIndex++;
}
- streamStateFreeCur(pCur);
+
+ pAPI->stateStore.streamStateFreeCur(pCur);
doDeleteFillResultImpl(pOperator, ts, endTs, groupId);
}
+
pFillInfo->current = pFillInfo->end + 1;
}
@@ -1230,7 +1300,7 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) {
SSDataBlock* fillResult = NULL;
SOperatorInfo* downstream = pOperator->pDownstream[0];
while (1) {
- if (pInfo->srcRowIndex >= pInfo->pSrcBlock->info.rows) {
+ if (pInfo->srcRowIndex >= pInfo->pSrcBlock->info.rows || pInfo->pSrcBlock->info.rows == 0) {
// If there are delete datablocks, we receive them first.
SSDataBlock* pBlock = downstream->fpSet.getNextFn(downstream);
if (pBlock == NULL) {
@@ -1269,7 +1339,7 @@ static SSDataBlock* doStreamFill(SOperatorInfo* pOperator) {
case STREAM_PULL_DATA: {
doApplyStreamScalarCalculation(pOperator, pBlock, pInfo->pSrcBlock);
memcpy(pInfo->pSrcBlock->info.parTbName, pBlock->info.parTbName, TSDB_TABLE_NAME_LEN);
- pInfo->srcRowIndex = 0;
+ pInfo->srcRowIndex = -1;
} break;
case STREAM_CREATE_CHILD_TABLE: {
return pBlock;
@@ -1322,7 +1392,7 @@ static int32_t initResultBuf(SStreamFillSupporter* pFillSup) {
}
static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNode, SInterval* pInterval,
- SExprInfo* pFillExprInfo, int32_t numOfFillCols) {
+ SExprInfo* pFillExprInfo, int32_t numOfFillCols, SStorageAPI* pAPI) {
SStreamFillSupporter* pFillSup = taosMemoryCalloc(1, sizeof(SStreamFillSupporter));
if (!pFillSup) {
return NULL;
@@ -1335,6 +1405,7 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod
pFillSup->type = convertFillType(pPhyFillNode->mode);
pFillSup->numOfAllCols = pFillSup->numOfFillCols + numOfNotFillCols;
pFillSup->interval = *pInterval;
+ pFillSup->pAPI = pAPI;
int32_t code = initResultBuf(pFillSup);
if (code != TSDB_CODE_SUCCESS) {
@@ -1343,7 +1414,7 @@ static SStreamFillSupporter* initStreamFillSup(SStreamFillPhysiNode* pPhyFillNod
}
SExprInfo* noFillExpr = createExprInfo(pPhyFillNode->pNotFillExprs, NULL, &numOfNotFillCols);
- code = initExprSupp(&pFillSup->notFillExprSup, noFillExpr, numOfNotFillCols);
+ code = initExprSupp(&pFillSup->notFillExprSup, noFillExpr, numOfNotFillCols, &pAPI->functionStore);
if (code != TSDB_CODE_SUCCESS) {
destroyStreamFillSupporter(pFillSup);
return NULL;
@@ -1414,7 +1485,7 @@ SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFi
SInterval* pInterval = &((SStreamIntervalOperatorInfo*)downstream->info)->interval;
int32_t numOfFillCols = 0;
SExprInfo* pFillExprInfo = createExprInfo(pPhyFillNode->pFillExprs, NULL, &numOfFillCols);
- pInfo->pFillSup = initStreamFillSup(pPhyFillNode, pInterval, pFillExprInfo, numOfFillCols);
+ pInfo->pFillSup = initStreamFillSup(pPhyFillNode, pInterval, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI);
if (!pInfo->pFillSup) {
goto _error;
}
@@ -1479,12 +1550,12 @@ SOperatorInfo* createStreamFillOperatorInfo(SOperatorInfo* downstream, SStreamFi
goto _error;
}
- code = initExprSupp(&pOperator->exprSupp, pFillExprInfo, numOfFillCols);
+ code = initExprSupp(&pOperator->exprSupp, pFillExprInfo, numOfFillCols, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
- pInfo->srcRowIndex = 0;
+ pInfo->srcRowIndex = -1;
setOperatorInfo(pOperator, "StreamFillOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL, false, OP_NOT_OPENED, pInfo,
pTaskInfo);
pOperator->fpSet =
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 936030fa57525d894a46aa53cdf86e571c4c4c35..7aac639027fa63e5aee3b1f8a3c834f8999ff7ac 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -451,7 +451,7 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode*
}
pInfo->pGroupCols = extractColumnInfo(pAggNode->pGroupKeys);
- code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr);
+ code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -467,7 +467,7 @@ SOperatorInfo* createGroupOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode*
int32_t num = 0;
SExprInfo* pExprInfo = createExprInfo(pAggNode->pAggFuncs, pAggNode->pGroupKeys, &num);
code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, pInfo->groupKeyLen, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -849,7 +849,7 @@ SOperatorInfo* createPartitionOperatorInfo(SOperatorInfo* downstream, SPartition
if (pPartNode->pExprs != NULL) {
int32_t num = 0;
SExprInfo* pExprInfo1 = createExprInfo(pPartNode->pExprs, NULL, &num);
- int32_t code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num);
+ int32_t code = initExprSupp(&pInfo->scalarSup, pExprInfo1, num, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
pTaskInfo->code = terrno;
@@ -951,6 +951,8 @@ static bool hasRemainPartion(SStreamPartitionOperatorInfo* pInfo) { return pInfo
static bool hasRemainTbName(SStreamPartitionOperatorInfo* pInfo) { return pInfo->pTbNameIte != NULL; }
static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SStreamPartitionOperatorInfo* pInfo = pOperator->info;
SSDataBlock* pDest = pInfo->binfo.pRes;
ASSERT(hasRemainPartion(pInfo));
@@ -973,9 +975,9 @@ static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
pDest->info.parTbName[0] = 0;
if (pInfo->tbnameCalSup.numOfExprs > 0) {
void* tbname = NULL;
- if (streamStateGetParName(pOperator->pTaskInfo->streamInfo.pState, pParInfo->groupId, &tbname) == 0) {
+ if (pAPI->stateStore.streamStateGetParName(pOperator->pTaskInfo->streamInfo.pState, pParInfo->groupId, &tbname) == 0) {
memcpy(pDest->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
- streamFreeVal(tbname);
+ pAPI->stateStore.streamStateFreeVal(tbname);
}
}
taosArrayDestroy(pParInfo->rowIds);
@@ -991,10 +993,10 @@ static SSDataBlock* buildStreamPartitionResult(SOperatorInfo* pOperator) {
return pDest;
}
-void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, uint64_t groupId,
- SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock) {
+void appendCreateTableRow(void* pState, SExprSupp* pTableSup, SExprSupp* pTagSup, uint64_t groupId,
+ SSDataBlock* pSrcBlock, int32_t rowId, SSDataBlock* pDestBlock, SStateStore* pAPI) {
void* pValue = NULL;
- if (streamStateGetParName(pState, groupId, &pValue) != 0) {
+ if (pAPI->streamStateGetParName(pState, groupId, &pValue) != 0) {
SSDataBlock* pTmpBlock = blockCopyOneRow(pSrcBlock, rowId);
memset(pTmpBlock->info.parTbName, 0, TSDB_TABLE_NAME_LEN);
pTmpBlock->info.id.groupId = groupId;
@@ -1011,7 +1013,7 @@ void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp*
void* pData = colDataGetData(pTbCol, pDestBlock->info.rows - 1);
len = TMIN(varDataLen(pData), TSDB_TABLE_NAME_LEN - 1);
memcpy(tbName, varDataVal(pData), len);
- streamStatePutParName(pState, groupId, tbName);
+ pAPI->streamStatePutParName(pState, groupId, tbName);
}
memcpy(pTmpBlock->info.parTbName, tbName, len);
pDestBlock->info.rows--;
@@ -1035,10 +1037,12 @@ void appendCreateTableRow(SStreamState* pState, SExprSupp* pTableSup, SExprSupp*
} else {
memcpy(pSrcBlock->info.parTbName, pValue, TSDB_TABLE_NAME_LEN);
}
- streamStateReleaseBuf(pState, NULL, pValue);
+ pAPI->streamStateReleaseBuf(pState, NULL, pValue);
}
static SSDataBlock* buildStreamCreateTableResult(SOperatorInfo* pOperator) {
+ SExecTaskInfo* pTask = pOperator->pTaskInfo;
+
SStreamPartitionOperatorInfo* pInfo = pOperator->info;
if ((pInfo->tbnameCalSup.numOfExprs == 0 && pInfo->tagCalSup.numOfExprs == 0) ||
taosHashGetSize(pInfo->pPartitions) == 0) {
@@ -1051,8 +1055,8 @@ static SSDataBlock* buildStreamCreateTableResult(SOperatorInfo* pOperator) {
if (pInfo->pTbNameIte != NULL) {
SPartitionDataInfo* pParInfo = (SPartitionDataInfo*)pInfo->pTbNameIte;
int32_t rowId = *(int32_t*)taosArrayGet(pParInfo->rowIds, 0);
- appendCreateTableRow(pOperator->pTaskInfo->streamInfo.pState, &pInfo->tbnameCalSup, &pInfo->tagCalSup,
- pParInfo->groupId, pSrc, rowId, pInfo->pCreateTbRes);
+ appendCreateTableRow(pTask->streamInfo.pState, &pInfo->tbnameCalSup, &pInfo->tagCalSup,
+ pParInfo->groupId, pSrc, rowId, pInfo->pCreateTbRes, &pTask->storageAPI.stateStore);
pInfo->pTbNameIte = taosHashIterate(pInfo->pPartitions, pInfo->pTbNameIte);
}
return pInfo->pCreateTbRes->info.rows > 0 ? pInfo->pCreateTbRes : NULL;
@@ -1165,14 +1169,17 @@ static void destroyStreamPartitionOperatorInfo(void* param) {
}
void initParDownStream(SOperatorInfo* downstream, SPartitionBySupporter* pParSup, SExprSupp* pExpr) {
+ SStorageAPI* pAPI = &downstream->pTaskInfo->storageAPI;
+
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
return;
}
+
SStreamScanInfo* pScanInfo = downstream->info;
pScanInfo->partitionSup = *pParSup;
pScanInfo->pPartScalarSup = pExpr;
if (!pScanInfo->igCheckUpdate && !pScanInfo->pUpdateInfo) {
- pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, 0);
+ pScanInfo->pUpdateInfo = pAPI->stateStore.updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, 0);
}
}
@@ -1236,7 +1243,7 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr
if (pPartNode->part.pExprs != NULL) {
int32_t num = 0;
SExprInfo* pCalExprInfo = createExprInfo(pPartNode->part.pExprs, NULL, &num);
- code = initExprSupp(&pInfo->scalarSup, pCalExprInfo, num);
+ code = initExprSupp(&pInfo->scalarSup, pCalExprInfo, num, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -1251,7 +1258,7 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr
}
pInfo->tbnameCalSup.pExprInfo = pSubTableExpr;
createExprFromOneNode(pSubTableExpr, pPartNode->pSubtable, 0);
- code = initExprSupp(&pInfo->tbnameCalSup, pSubTableExpr, 1);
+ code = initExprSupp(&pInfo->tbnameCalSup, pSubTableExpr, 1, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -1265,7 +1272,7 @@ SOperatorInfo* createStreamPartitionOperatorInfo(SOperatorInfo* downstream, SStr
code = TSDB_CODE_OUT_OF_MEMORY;
goto _error;
}
- if (initExprSupp(&pInfo->tagCalSup, pTagExpr, numOfTags) != 0) {
+ if (initExprSupp(&pInfo->tagCalSup, pTagExpr, numOfTags, &pTaskInfo->storageAPI.functionStore) != 0) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _error;
}
diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c
index 754b5f47373e8444c55c9eb64c6ab42694c5142d..442f8162edb98c619a3a5cd631acb845e902a978 100644
--- a/source/libs/executor/src/joinoperator.c
+++ b/source/libs/executor/src/joinoperator.c
@@ -30,11 +30,13 @@ typedef struct SJoinRowCtx {
bool rowRemains;
int64_t ts;
SArray* leftRowLocations;
- SArray* rightRowLocations;
SArray* leftCreatedBlocks;
SArray* rightCreatedBlocks;
int32_t leftRowIdx;
int32_t rightRowIdx;
+
+ bool rightUseBuildTable;
+ SArray* rightRowLocations;
} SJoinRowCtx;
typedef struct SJoinOperatorInfo {
@@ -50,7 +52,17 @@ typedef struct SJoinOperatorInfo {
int32_t rightPos;
SColumnInfo rightCol;
SNode* pCondAfterMerge;
+ SNode* pColEqualOnConditions;
+
+ SArray* leftEqOnCondCols;
+ char* leftEqOnCondKeyBuf;
+ int32_t leftEqOnCondKeyLen;
+ SArray* rightEqOnCondCols;
+ char* rightEqOnCondKeyBuf;
+ int32_t rightEqOnCondKeyLen;
+
+ SSHashObj* rightBuildTable;
SJoinRowCtx rowCtx;
} SJoinOperatorInfo;
@@ -92,6 +104,100 @@ static void extractTimeCondition(SJoinOperatorInfo* pInfo, SOperatorInfo** pDown
setJoinColumnInfo(&pInfo->rightCol, rightTsCol);
}
+static void extractEqualOnCondColsFromOper(SJoinOperatorInfo* pInfo, SOperatorInfo** pDownstreams, SOperatorNode* pOperNode,
+ SColumn* pLeft, SColumn* pRight) {
+ SColumnNode* pLeftNode = (SColumnNode*)pOperNode->pLeft;
+ SColumnNode* pRightNode = (SColumnNode*)pOperNode->pRight;
+ if (pLeftNode->dataBlockId == pRightNode->dataBlockId || pLeftNode->dataBlockId == pDownstreams[0]->resultDataBlockId) {
+ *pLeft = extractColumnFromColumnNode((SColumnNode*)pOperNode->pLeft);
+ *pRight = extractColumnFromColumnNode((SColumnNode*)pOperNode->pRight);
+ } else {
+ *pLeft = extractColumnFromColumnNode((SColumnNode*)pOperNode->pRight);
+ *pRight = extractColumnFromColumnNode((SColumnNode*)pOperNode->pLeft);
+ }
+}
+
+static void extractEqualOnCondCols(SJoinOperatorInfo* pInfo, SOperatorInfo** pDownStream, SNode* pEqualOnCondNode,
+ SArray* leftTagEqCols, SArray* rightTagEqCols) {
+ SColumn left = {0};
+ SColumn right = {0};
+ if (nodeType(pEqualOnCondNode) == QUERY_NODE_LOGIC_CONDITION && ((SLogicConditionNode*)pEqualOnCondNode)->condType == LOGIC_COND_TYPE_AND) {
+ SNode* pNode = NULL;
+ FOREACH(pNode, ((SLogicConditionNode*)pEqualOnCondNode)->pParameterList) {
+ SOperatorNode* pOperNode = (SOperatorNode*)pNode;
+ extractEqualOnCondColsFromOper(pInfo, pDownStream, pOperNode, &left, &right);
+ taosArrayPush(leftTagEqCols, &left);
+ taosArrayPush(rightTagEqCols, &right);
+ }
+ return;
+ }
+
+ if (nodeType(pEqualOnCondNode) == QUERY_NODE_OPERATOR) {
+ SOperatorNode* pOperNode = (SOperatorNode*)pEqualOnCondNode;
+ extractEqualOnCondColsFromOper(pInfo, pDownStream, pOperNode, &left, &right);
+ taosArrayPush(leftTagEqCols, &left);
+ taosArrayPush(rightTagEqCols, &right);
+ }
+}
+
+static int32_t initTagColskeyBuf(int32_t* keyLen, char** keyBuf, const SArray* pGroupColList) {
+ int32_t numOfGroupCols = taosArrayGetSize(pGroupColList);
+ for (int32_t i = 0; i < numOfGroupCols; ++i) {
+ SColumn* pCol = (SColumn*)taosArrayGet(pGroupColList, i);
+ (*keyLen) += pCol->bytes; // actual data + null_flag
+ }
+
+ int32_t nullFlagSize = sizeof(int8_t) * numOfGroupCols;
+ (*keyLen) += nullFlagSize;
+
+ (*keyBuf) = taosMemoryCalloc(1, (*keyLen));
+ if ((*keyBuf) == NULL) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t fillKeyBufFromTagCols(SArray* pCols, SSDataBlock* pBlock, int32_t rowIndex, void* pKey) {
+ SColumnDataAgg* pColAgg = NULL;
+ size_t numOfGroupCols = taosArrayGetSize(pCols);
+ char* isNull = (char*)pKey;
+ char* pStart = (char*)pKey + sizeof(int8_t) * numOfGroupCols;
+
+ for (int32_t i = 0; i < numOfGroupCols; ++i) {
+ SColumn* pCol = (SColumn*) taosArrayGet(pCols, i);
+ SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, pCol->slotId);
+
+ // valid range check. todo: return error code.
+ if (pCol->slotId > taosArrayGetSize(pBlock->pDataBlock)) {
+ continue;
+ }
+
+ if (pBlock->pBlockAgg != NULL) {
+ pColAgg = pBlock->pBlockAgg[pCol->slotId]; // TODO is agg data matched?
+ }
+
+ if (colDataIsNull(pColInfoData, pBlock->info.rows, rowIndex, pColAgg)) {
+ isNull[i] = 1;
+ } else {
+ isNull[i] = 0;
+ char* val = colDataGetData(pColInfoData, rowIndex);
+ if (pCol->type == TSDB_DATA_TYPE_JSON) {
+ int32_t dataLen = getJsonValueLen(val);
+ memcpy(pStart, val, dataLen);
+ pStart += dataLen;
+ } else if (IS_VAR_DATA_TYPE(pCol->type)) {
+ varDataCopy(pStart, val);
+ pStart += varDataTLen(val);
+ } else {
+ memcpy(pStart, val, pCol->bytes);
+ pStart += pCol->bytes;
+ }
+ }
+ }
+ return (int32_t)(pStart - (char*)pKey);
+}
+
SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t numOfDownstream,
SSortMergeJoinPhysiNode* pJoinNode, SExecTaskInfo* pTaskInfo) {
SJoinOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SJoinOperatorInfo));
@@ -153,6 +259,16 @@ SOperatorInfo* createMergeJoinOperatorInfo(SOperatorInfo** pDownstream, int32_t
pInfo->inputOrder = TSDB_ORDER_DESC;
}
+ pInfo->pColEqualOnConditions = pJoinNode->pColEqualOnConditions;
+ if (pInfo->pColEqualOnConditions != NULL) {
+ pInfo->leftEqOnCondCols = taosArrayInit(4, sizeof(SColumn));
+ pInfo->rightEqOnCondCols = taosArrayInit(4, sizeof(SColumn));
+ extractEqualOnCondCols(pInfo, pDownstream, pInfo->pColEqualOnConditions, pInfo->leftEqOnCondCols, pInfo->rightEqOnCondCols);
+ initTagColskeyBuf(&pInfo->leftEqOnCondKeyLen, &pInfo->leftEqOnCondKeyBuf, pInfo->leftEqOnCondCols);
+ initTagColskeyBuf(&pInfo->rightEqOnCondKeyLen, &pInfo->rightEqOnCondKeyBuf, pInfo->rightEqOnCondCols);
+ _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
+ pInfo->rightBuildTable = tSimpleHashInit(256, hashFn);
+ }
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doMergeJoin, NULL, destroyMergeJoinOperator, optrDefaultBufFn, NULL);
code = appendDownstream(pOperator, pDownstream, numOfDownstream);
if (code != TSDB_CODE_SUCCESS) {
@@ -179,8 +295,28 @@ void setJoinColumnInfo(SColumnInfo* pColumn, const SColumnNode* pColumnNode) {
pColumn->scale = pColumnNode->node.resType.scale;
}
+static void mergeJoinDestoryBuildTable(SSHashObj* pBuildTable) {
+ void* p = NULL;
+ int32_t iter = 0;
+
+ while ((p = tSimpleHashIterate(pBuildTable, p, &iter)) != NULL) {
+ SArray* rows = (*(SArray**)p);
+ taosArrayDestroy(rows);
+ }
+
+ tSimpleHashCleanup(pBuildTable);
+}
+
void destroyMergeJoinOperator(void* param) {
SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param;
+ if (pJoinOperator->pColEqualOnConditions != NULL) {
+ mergeJoinDestoryBuildTable(pJoinOperator->rightBuildTable);
+ taosMemoryFreeClear(pJoinOperator->rightEqOnCondKeyBuf);
+ taosArrayDestroy(pJoinOperator->rightEqOnCondCols);
+
+ taosMemoryFreeClear(pJoinOperator->leftEqOnCondKeyBuf);
+ taosArrayDestroy(pJoinOperator->leftEqOnCondCols);
+ }
nodesDestroyNode(pJoinOperator->pCondAfterMerge);
pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes);
@@ -300,21 +436,122 @@ static int32_t mergeJoinGetDownStreamRowsEqualTimeStamp(SOperatorInfo* pOperator
return 0;
}
+static int32_t mergeJoinFillBuildTable(SJoinOperatorInfo* pInfo, SArray* rightRowLocations) {
+ for (int32_t i = 0; i < taosArrayGetSize(rightRowLocations); ++i) {
+ SRowLocation* rightRow = taosArrayGet(rightRowLocations, i);
+ int32_t keyLen = fillKeyBufFromTagCols(pInfo->rightEqOnCondCols, rightRow->pDataBlock, rightRow->pos, pInfo->rightEqOnCondKeyBuf);
+ SArray** ppRows = tSimpleHashGet(pInfo->rightBuildTable, pInfo->rightEqOnCondKeyBuf, keyLen);
+ if (!ppRows) {
+ SArray* rows = taosArrayInit(4, sizeof(SRowLocation));
+ taosArrayPush(rows, rightRow);
+ tSimpleHashPut(pInfo->rightBuildTable, pInfo->rightEqOnCondKeyBuf, keyLen, &rows, POINTER_BYTES);
+ } else {
+ taosArrayPush(*ppRows, rightRow);
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t mergeJoinLeftRowsRightRows(SOperatorInfo* pOperator, SSDataBlock* pRes, int32_t* nRows,
+ const SArray* leftRowLocations, int32_t leftRowIdx,
+ int32_t rightRowIdx, bool useBuildTableTSRange, SArray* rightRowLocations, bool* pReachThreshold) {
+ *pReachThreshold = false;
+ uint32_t limitRowNum = pOperator->resultInfo.threshold;
+ SJoinOperatorInfo* pJoinInfo = pOperator->info;
+ size_t leftNumJoin = taosArrayGetSize(leftRowLocations);
+
+ int32_t i,j;
+
+ for (i = leftRowIdx; i < leftNumJoin; ++i, rightRowIdx = 0) {
+ SRowLocation* leftRow = taosArrayGet(leftRowLocations, i);
+ SArray* pRightRows = NULL;
+ if (useBuildTableTSRange) {
+ int32_t keyLen = fillKeyBufFromTagCols(pJoinInfo->leftEqOnCondCols, leftRow->pDataBlock, leftRow->pos, pJoinInfo->leftEqOnCondKeyBuf);
+ SArray** ppRightRows = tSimpleHashGet(pJoinInfo->rightBuildTable, pJoinInfo->leftEqOnCondKeyBuf, keyLen);
+ if (!ppRightRows) {
+ continue;
+ }
+ pRightRows = *ppRightRows;
+ } else {
+ pRightRows = rightRowLocations;
+ }
+ size_t rightRowsSize = taosArrayGetSize(pRightRows);
+ for (j = rightRowIdx; j < rightRowsSize; ++j) {
+ if (*nRows >= limitRowNum) {
+ *pReachThreshold = true;
+ break;
+ }
+
+ SRowLocation* rightRow = taosArrayGet(pRightRows, j);
+ mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock,
+ rightRow->pos);
+ ++*nRows;
+ }
+ if (*pReachThreshold) {
+ break;
+ }
+ }
+
+ if (*pReachThreshold) {
+ pJoinInfo->rowCtx.rowRemains = true;
+ pJoinInfo->rowCtx.leftRowIdx = i;
+ pJoinInfo->rowCtx.rightRowIdx = j;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static void mergeJoinDestroyTSRangeCtx(SJoinOperatorInfo* pJoinInfo, SArray* leftRowLocations, SArray* leftCreatedBlocks,
+ SArray* rightCreatedBlocks, bool rightUseBuildTable, SArray* rightRowLocations) {
+ for (int i = 0; i < taosArrayGetSize(rightCreatedBlocks); ++i) {
+ SSDataBlock* pBlock = taosArrayGetP(rightCreatedBlocks, i);
+ blockDataDestroy(pBlock);
+ }
+ taosArrayDestroy(rightCreatedBlocks);
+ for (int i = 0; i < taosArrayGetSize(leftCreatedBlocks); ++i) {
+ SSDataBlock* pBlock = taosArrayGetP(leftCreatedBlocks, i);
+ blockDataDestroy(pBlock);
+ }
+ if (rightRowLocations != NULL) {
+ taosArrayDestroy(rightRowLocations);
+ }
+ if (rightUseBuildTable) {
+ void* p = NULL;
+ int32_t iter = 0;
+ while ((p = tSimpleHashIterate(pJoinInfo->rightBuildTable, p, &iter)) != NULL) {
+ SArray* rows = (*(SArray**)p);
+ taosArrayDestroy(rows);
+ }
+ tSimpleHashClear(pJoinInfo->rightBuildTable);
+ }
+
+ taosArrayDestroy(leftCreatedBlocks);
+ taosArrayDestroy(leftRowLocations);
+
+ pJoinInfo->rowCtx.rowRemains = false;
+ pJoinInfo->rowCtx.leftRowLocations = NULL;
+ pJoinInfo->rowCtx.leftCreatedBlocks = NULL;
+ pJoinInfo->rowCtx.rightCreatedBlocks = NULL;
+ pJoinInfo->rowCtx.rightUseBuildTable = false;
+ pJoinInfo->rowCtx.rightRowLocations = NULL;
+}
+
static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t timestamp, SSDataBlock* pRes,
int32_t* nRows) {
int32_t code = TSDB_CODE_SUCCESS;
SJoinOperatorInfo* pJoinInfo = pOperator->info;
SArray* leftRowLocations = NULL;
- SArray* leftCreatedBlocks = NULL;
SArray* rightRowLocations = NULL;
+ SArray* leftCreatedBlocks = NULL;
SArray* rightCreatedBlocks = NULL;
int32_t leftRowIdx = 0;
int32_t rightRowIdx = 0;
- int32_t i, j;
-
+ SSHashObj* rightTableHash = NULL;
+ bool rightUseBuildTable = false;
+
if (pJoinInfo->rowCtx.rowRemains) {
leftRowLocations = pJoinInfo->rowCtx.leftRowLocations;
leftCreatedBlocks = pJoinInfo->rowCtx.leftCreatedBlocks;
+ rightUseBuildTable = pJoinInfo->rowCtx.rightUseBuildTable;
rightRowLocations = pJoinInfo->rowCtx.rightRowLocations;
rightCreatedBlocks = pJoinInfo->rowCtx.rightCreatedBlocks;
leftRowIdx = pJoinInfo->rowCtx.leftRowIdx;
@@ -330,78 +567,40 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t
pJoinInfo->leftPos, timestamp, leftRowLocations, leftCreatedBlocks);
mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 1, pJoinInfo->rightCol.slotId, pJoinInfo->pRight,
pJoinInfo->rightPos, timestamp, rightRowLocations, rightCreatedBlocks);
+ if (pJoinInfo->pColEqualOnConditions != NULL && taosArrayGetSize(rightRowLocations) > 16) {
+ mergeJoinFillBuildTable(pJoinInfo, rightRowLocations);
+ rightUseBuildTable = true;
+ taosArrayDestroy(rightRowLocations);
+ rightRowLocations = NULL;
+ }
}
size_t leftNumJoin = taosArrayGetSize(leftRowLocations);
- size_t rightNumJoin = taosArrayGetSize(rightRowLocations);
- uint32_t maxRowNum = *nRows + (leftNumJoin - leftRowIdx - 1) * rightNumJoin + rightNumJoin - rightRowIdx;
- uint32_t limitRowNum = maxRowNum;
- if (maxRowNum > pOperator->resultInfo.threshold) {
- limitRowNum = pOperator->resultInfo.threshold;
- if (!pJoinInfo->rowCtx.rowRemains) {
- pJoinInfo->rowCtx.rowRemains = true;
- pJoinInfo->rowCtx.ts = timestamp;
- pJoinInfo->rowCtx.leftRowLocations = leftRowLocations;
- pJoinInfo->rowCtx.rightRowLocations = rightRowLocations;
- pJoinInfo->rowCtx.leftCreatedBlocks = leftCreatedBlocks;
- pJoinInfo->rowCtx.rightCreatedBlocks = rightCreatedBlocks;
- }
- }
-
- code = blockDataEnsureCapacity(pRes, limitRowNum);
+ code = blockDataEnsureCapacity(pRes, pOperator->resultInfo.threshold);
if (code != TSDB_CODE_SUCCESS) {
- qError("%s can not ensure block capacity for join. left: %zu, right: %zu", GET_TASKID(pOperator->pTaskInfo),
- leftNumJoin, rightNumJoin);
+ qError("%s can not ensure block capacity for join. left: %zu", GET_TASKID(pOperator->pTaskInfo),
+ leftNumJoin);
}
-
- if (code == TSDB_CODE_SUCCESS) {
- bool done = false;
- for (i = leftRowIdx; i < leftNumJoin; ++i, rightRowIdx = 0) {
- for (j = rightRowIdx; j < rightNumJoin; ++j) {
- if (*nRows >= limitRowNum) {
- done = true;
- break;
- }
-
- SRowLocation* leftRow = taosArrayGet(leftRowLocations, i);
- SRowLocation* rightRow = taosArrayGet(rightRowLocations, j);
- mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock,
- rightRow->pos);
- ++*nRows;
- }
- if (done) {
- break;
- }
- }
+ bool reachThreshold = false;
- if (maxRowNum > pOperator->resultInfo.threshold) {
- pJoinInfo->rowCtx.leftRowIdx = i;
- pJoinInfo->rowCtx.rightRowIdx = j;
- }
+ if (code == TSDB_CODE_SUCCESS) {
+ mergeJoinLeftRowsRightRows(pOperator, pRes, nRows, leftRowLocations, leftRowIdx,
+ rightRowIdx, rightUseBuildTable, rightRowLocations, &reachThreshold);
}
- if (maxRowNum <= pOperator->resultInfo.threshold) {
- for (int i = 0; i < taosArrayGetSize(rightCreatedBlocks); ++i) {
- SSDataBlock* pBlock = taosArrayGetP(rightCreatedBlocks, i);
- blockDataDestroy(pBlock);
- }
- taosArrayDestroy(rightCreatedBlocks);
- taosArrayDestroy(rightRowLocations);
- for (int i = 0; i < taosArrayGetSize(leftCreatedBlocks); ++i) {
- SSDataBlock* pBlock = taosArrayGetP(leftCreatedBlocks, i);
- blockDataDestroy(pBlock);
- }
- taosArrayDestroy(leftCreatedBlocks);
- taosArrayDestroy(leftRowLocations);
+ if (!reachThreshold) {
+ mergeJoinDestroyTSRangeCtx(pJoinInfo, leftRowLocations, leftCreatedBlocks, rightCreatedBlocks,
+ rightUseBuildTable, rightRowLocations);
- if (pJoinInfo->rowCtx.rowRemains) {
- pJoinInfo->rowCtx.rowRemains = false;
- pJoinInfo->rowCtx.leftRowLocations = NULL;
- pJoinInfo->rowCtx.rightRowLocations = NULL;
- pJoinInfo->rowCtx.leftCreatedBlocks = NULL;
- pJoinInfo->rowCtx.rightCreatedBlocks = NULL;
- }
+ } else {
+ pJoinInfo->rowCtx.rowRemains = true;
+ pJoinInfo->rowCtx.ts = timestamp;
+ pJoinInfo->rowCtx.leftRowLocations = leftRowLocations;
+ pJoinInfo->rowCtx.leftCreatedBlocks = leftCreatedBlocks;
+ pJoinInfo->rowCtx.rightCreatedBlocks = rightCreatedBlocks;
+ pJoinInfo->rowCtx.rightUseBuildTable = rightUseBuildTable;
+ pJoinInfo->rowCtx.rightRowLocations = rightRowLocations;
}
return TSDB_CODE_SUCCESS;
}
diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c
index 729178dc60b482fc82b0fddba5b78ce5358c216d..730252c7ee2e4ae6fe5bb36d5ab159cfe7cb966f 100644
--- a/source/libs/executor/src/operator.c
+++ b/source/libs/executor/src/operator.c
@@ -25,7 +25,8 @@
#include "operator.h"
#include "query.h"
#include "querytask.h"
-#include "vnode.h"
+
+#include "storageapi.h"
SOperatorFpSet createOperatorFpSet(__optr_open_fn_t openFn, __optr_fn_t nextFn, __optr_fn_t cleanup,
__optr_close_fn_t closeFn, __optr_reqBuf_fn_t reqBufFn,
@@ -233,11 +234,12 @@ int32_t getTableScanInfo(SOperatorInfo* pOperator, int32_t* order, int32_t* scan
}
static ERetType doStopDataReader(SOperatorInfo* pOperator, STraverParam* pParam, const char* pIdStr) {
+ SStorageAPI* pAPI = pParam->pParam;
if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pInfo = pOperator->info;
if (pInfo->base.dataReader != NULL) {
- tsdbReaderSetCloseFlag(pInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderNotifyClosing(pInfo->base.dataReader);
}
return OPTR_FN_RET_ABORT;
} else if (pOperator->operatorType == QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
@@ -246,7 +248,7 @@ static ERetType doStopDataReader(SOperatorInfo* pOperator, STraverParam* pParam,
if (pInfo->pTableScanOp != NULL) {
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
if (pTableScanInfo != NULL && pTableScanInfo->base.dataReader != NULL) {
- tsdbReaderSetCloseFlag(pTableScanInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderNotifyClosing(pTableScanInfo->base.dataReader);
}
}
@@ -256,8 +258,8 @@ static ERetType doStopDataReader(SOperatorInfo* pOperator, STraverParam* pParam,
return OPTR_FN_RET_CONTINUE;
}
-int32_t stopTableScanOperator(SOperatorInfo* pOperator, const char* pIdStr) {
- STraverParam p = {0};
+int32_t stopTableScanOperator(SOperatorInfo* pOperator, const char* pIdStr, SStorageAPI* pAPI) {
+ STraverParam p = {.pParam = pAPI};
traverseOperatorTree(pOperator, doStopDataReader, &p, pIdStr);
return p.code;
}
@@ -378,17 +380,18 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR
STableListInfo* pTableListInfo = tableListCreate();
if (pBlockNode->tableType == TSDB_SUPER_TABLE) {
- SArray* pList = taosArrayInit(4, sizeof(STableKeyInfo));
- int32_t code = vnodeGetAllTableList(pHandle->vnode, pBlockNode->uid, pList);
+ SArray* pList = taosArrayInit(4, sizeof(uint64_t));
+ int32_t code = pTaskInfo->storageAPI.metaFn.getChildTableList(pHandle->vnode, pBlockNode->uid, pList);
if (code != TSDB_CODE_SUCCESS) {
- pTaskInfo->code = terrno;
+ pTaskInfo->code = code;
+ taosArrayDestroy(pList);
return NULL;
}
size_t num = taosArrayGetSize(pList);
for (int32_t i = 0; i < num; ++i) {
- STableKeyInfo* p = taosArrayGet(pList, i);
- tableListAddTableInfo(pTableListInfo, p->uid, 0);
+ uint64_t* id = taosArrayGet(pList, i);
+ tableListAddTableInfo(pTableListInfo, *id, 0);
}
taosArrayDestroy(pList);
diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c
index 02f504bef03aaf4cff5d1193a2c23fd66b0b6146..dde6f7c0e8a604864ee53fa61d303a783a38403e 100644
--- a/source/libs/executor/src/projectoperator.c
+++ b/source/libs/executor/src/projectoperator.c
@@ -18,6 +18,7 @@
#include "functionMgt.h"
#include "operator.h"
#include "querytask.h"
+#include "tdatablock.h"
typedef struct SProjectOperatorInfo {
SOptrBasicInfo binfo;
@@ -114,7 +115,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys
initResultSizeInfo(&pOperator->resultInfo, numOfRows);
code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -369,7 +370,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
if (pPhyNode->pExprs != NULL) {
int32_t num = 0;
SExprInfo* pSExpr = createExprInfo(pPhyNode->pExprs, NULL, &num);
- int32_t code = initExprSupp(&pInfo->scalarSup, pSExpr, num);
+ int32_t code = initExprSupp(&pInfo->scalarSup, pSExpr, num, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -391,7 +392,7 @@ SOperatorInfo* createIndefinitOutputOperatorInfo(SOperatorInfo* downstream, SPhy
blockDataEnsureCapacity(pResBlock, numOfRows);
int32_t code = initAggSup(pSup, &pInfo->aggSup, pExprInfo, numOfExpr, keyBufSize, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
diff --git a/source/libs/executor/src/querytask.c b/source/libs/executor/src/querytask.c
index 7716b5976b2fd37fd3486abc2fd9fc125e5a87c7..22d171e74a5117f13ee648701211d2fff2d9da26 100644
--- a/source/libs/executor/src/querytask.c
+++ b/source/libs/executor/src/querytask.c
@@ -29,13 +29,13 @@
#include "operator.h"
#include "query.h"
#include "querytask.h"
+#include "storageapi.h"
#include "thash.h"
#include "ttypes.h"
-#include "vnode.h"
#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
-SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model) {
+SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOPTR_EXEC_MODEL model, SStorageAPI* pAPI) {
SExecTaskInfo* pTaskInfo = taosMemoryCalloc(1, sizeof(SExecTaskInfo));
if (pTaskInfo == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -48,6 +48,7 @@ SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOP
pTaskInfo->execModel = model;
pTaskInfo->stopInfo.pStopInfo = taosArrayInit(4, sizeof(SExchangeOpStopInfo));
pTaskInfo->pResultBlockList = taosArrayInit(128, POINTER_BYTES);
+ pTaskInfo->storageAPI = *pAPI;
taosInitRWLatch(&pTaskInfo->lock);
@@ -55,7 +56,6 @@ SExecTaskInfo* doCreateTask(uint64_t queryId, uint64_t taskId, int32_t vgId, EOP
pTaskInfo->id.queryId = queryId;
pTaskInfo->id.str = taosMemoryMalloc(64);
buildTaskId(taskId, queryId, pTaskInfo->id.str);
-
return pTaskInfo;
}
@@ -63,7 +63,7 @@ bool isTaskKilled(SExecTaskInfo* pTaskInfo) { return (0 != pTaskInfo->code); }
void setTaskKilled(SExecTaskInfo* pTaskInfo, int32_t rspCode) {
pTaskInfo->code = rspCode;
- stopTableScanOperator(pTaskInfo->pRoot, pTaskInfo->id.str);
+ stopTableScanOperator(pTaskInfo->pRoot, pTaskInfo->id.str, &pTaskInfo->storageAPI);
}
void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
@@ -78,7 +78,7 @@ void setTaskStatus(SExecTaskInfo* pTaskInfo, int8_t status) {
int32_t createExecTaskInfo(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SReadHandle* pHandle, uint64_t taskId,
int32_t vgId, char* sql, EOPTR_EXEC_MODEL model) {
- *pTaskInfo = doCreateTask(pPlan->id.queryId, taskId, vgId, model);
+ *pTaskInfo = doCreateTask(pPlan->id.queryId, taskId, vgId, model, &pHandle->api);
if (*pTaskInfo == NULL) {
taosMemoryFree(sql);
return terrno;
@@ -120,13 +120,15 @@ int32_t initQueriedTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNo
return terrno;
}
- metaReaderInit(&mr, pHandle->meta, 0);
- int32_t code = metaGetTableEntryByUidCache(&mr, pScanNode->uid);
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
+ pAPI->metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pAPI->metaFn);
+ int32_t code = pAPI->metaReaderFn.getEntryGetUidCache(&mr, pScanNode->uid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get the table meta, uid:0x%" PRIx64 ", suid:0x%" PRIx64 ", %s", pScanNode->uid, pScanNode->suid,
GET_TASKID(pTaskInfo));
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
return terrno;
}
@@ -142,9 +144,9 @@ int32_t initQueriedTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNo
tDecoderClear(&mr.coder);
tb_uid_t suid = mr.me.ctbEntry.suid;
- code = metaGetTableEntryByUidCache(&mr, suid);
+ code = pAPI->metaReaderFn.getEntryGetUidCache(&mr, suid);
if (code != TSDB_CODE_SUCCESS) {
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
return terrno;
}
@@ -154,7 +156,7 @@ int32_t initQueriedTableSchemaInfo(SReadHandle* pHandle, SScanPhysiNode* pScanNo
pSchemaInfo->sw = tCloneSSchemaWrapper(&mr.me.ntbEntry.schemaRow);
}
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
pSchemaInfo->qsw = extractQueriedColumnSchema(pScanNode);
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 0537702a754d8e0f0d40f7338f243c3068f20340..2702cf68619d0dd5499cc2b80a40df5522e7c5a0 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -13,6 +13,8 @@
* along with this program. If not, see .
*/
+// clang-format off
+
#include "executorInt.h"
#include "filter.h"
#include "function.h"
@@ -33,6 +35,9 @@
#include "operator.h"
#include "querytask.h"
+#include "storageapi.h"
+#include "wal.h"
+
int32_t scanDebug = 0;
#define MULTI_READER_MAX_TABLE_NUM 5000
@@ -85,39 +90,6 @@ static void switchCtxOrder(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
}
}
-static void getNextTimeWindow(SInterval* pInterval, STimeWindow* tw, int32_t order) {
- int32_t factor = GET_FORWARD_DIRECTION_FACTOR(order);
- if (pInterval->intervalUnit != 'n' && pInterval->intervalUnit != 'y') {
- tw->skey += pInterval->sliding * factor;
- tw->ekey = tw->skey + pInterval->interval - 1;
- return;
- }
-
- int64_t key = tw->skey, interval = pInterval->interval;
- // convert key to second
- key = convertTimePrecision(key, pInterval->precision, TSDB_TIME_PRECISION_MILLI) / 1000;
-
- if (pInterval->intervalUnit == 'y') {
- interval *= 12;
- }
-
- struct tm tm;
- time_t t = (time_t)key;
- taosLocalTime(&t, &tm, NULL);
-
- int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
- tm.tm_year = mon / 12;
- tm.tm_mon = mon % 12;
- tw->skey = convertTimePrecision((int64_t)taosMktime(&tm) * 1000LL, TSDB_TIME_PRECISION_MILLI, pInterval->precision);
-
- mon = (int)(mon + interval);
- tm.tm_year = mon / 12;
- tm.tm_mon = mon % 12;
- tw->ekey = convertTimePrecision((int64_t)taosMktime(&tm) * 1000LL, TSDB_TIME_PRECISION_MILLI, pInterval->precision);
-
- tw->ekey -= 1;
-}
-
static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockInfo, int32_t order) {
STimeWindow w = {0};
@@ -127,7 +99,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
}
if (order == TSDB_ORDER_ASC) {
- w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.skey);
+ w = getAlignQueryTimeWindow(pInterval, pBlockInfo->window.skey);
ASSERT(w.ekey >= pBlockInfo->window.skey);
if (w.ekey < pBlockInfo->window.ekey) {
@@ -146,7 +118,7 @@ static bool overlapWithTimeWindow(SInterval* pInterval, SDataBlockInfo* pBlockIn
}
}
} else {
- w = getAlignQueryTimeWindow(pInterval, pInterval->precision, pBlockInfo->window.ekey);
+ w = getAlignQueryTimeWindow(pInterval, pBlockInfo->window.ekey);
ASSERT(w.skey <= pBlockInfo->window.ekey);
if (w.skey > pBlockInfo->window.skey) {
@@ -203,7 +175,7 @@ static int32_t insertTableToScanIgnoreList(STableScanInfo* pTableScanInfo, uint6
return TSDB_CODE_OUT_OF_MEMORY;
}
}
-
+
taosHashPut(pTableScanInfo->pIgnoreTables, &uid, sizeof(uid), &pTableScanInfo->scanTimes, sizeof(pTableScanInfo->scanTimes));
return TSDB_CODE_SUCCESS;
@@ -212,7 +184,7 @@ static int32_t insertTableToScanIgnoreList(STableScanInfo* pTableScanInfo, uint6
static int32_t doDynamicPruneDataBlock(SOperatorInfo* pOperator, SDataBlockInfo* pBlockInfo, uint32_t* status) {
STableScanInfo* pTableScanInfo = pOperator->info;
int32_t code = TSDB_CODE_SUCCESS;
-
+
if (pTableScanInfo->base.pdInfo.pExprSup == NULL) {
return TSDB_CODE_SUCCESS;
}
@@ -261,13 +233,16 @@ static bool doFilterByBlockSMA(SFilterInfo* pFilterInfo, SColumnDataAgg** pColsA
}
static bool doLoadBlockSMA(STableScanBase* pTableScanInfo, SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo) {
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
bool allColumnsHaveAgg = true;
- int32_t code = tsdbRetrieveDatablockSMA(pTableScanInfo->dataReader, pBlock, &allColumnsHaveAgg);
+ bool hasNullSMA = false;
+ int32_t code = pAPI->tsdReader.tsdReaderRetrieveBlockSMAInfo(pTableScanInfo->dataReader, pBlock, &allColumnsHaveAgg, &hasNullSMA);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
- if (!allColumnsHaveAgg) {
+ if (!allColumnsHaveAgg || hasNullSMA) {
return false;
}
return true;
@@ -323,6 +298,8 @@ bool applyLimitOffset(SLimitInfo* pLimitInfo, SSDataBlock* pBlock, SExecTaskInfo
static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableScanInfo, SSDataBlock* pBlock,
uint32_t* status) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SFileBlockLoadRecorder* pCost = &pTableScanInfo->readRecorder;
pCost->totalBlocks += 1;
@@ -343,7 +320,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->filterOutBlocks += 1;
pCost->totalRows += pBlock->info.rows;
- tsdbReleaseDataBlock(pTableScanInfo->dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64 ", uid:%" PRIu64,
@@ -351,7 +328,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
pBlockInfo->id.uid);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows);
pCost->skipBlocks += 1;
- tsdbReleaseDataBlock(pTableScanInfo->dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_SMA_LOAD) {
pCost->loadBlockStatis += 1;
@@ -361,7 +338,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
qDebug("%s data block SMA loaded, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64, GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
doSetTagColumnData(pTableScanInfo, pBlock, pTaskInfo, pBlock->info.rows);
- tsdbReleaseDataBlock(pTableScanInfo->dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
} else {
qDebug("%s failed to load SMA, since not all columns have SMA", GET_TASKID(pTaskInfo));
@@ -383,7 +360,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
pCost->filterOutBlocks += 1;
(*status) = FUNC_DATA_REQUIRED_FILTEROUT;
- tsdbReleaseDataBlock(pTableScanInfo->dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader);
return TSDB_CODE_SUCCESS;
}
}
@@ -398,10 +375,10 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
qDebug("%s data block skipped due to dynamic prune, brange:%" PRId64 "-%" PRId64 ", rows:%" PRId64,
GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->skipBlocks += 1;
- tsdbReleaseDataBlock(pTableScanInfo->dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->dataReader);
- STableScanInfo* pTableScanInfo = pOperator->info;
- if (taosHashGetSize(pTableScanInfo->pIgnoreTables) == taosArrayGetSize(pTableScanInfo->base.pTableListInfo->pTableList)) {
+ STableScanInfo* p1 = pOperator->info;
+ if (taosHashGetSize(p1->pIgnoreTables) == taosArrayGetSize(p1->base.pTableListInfo->pTableList)) {
*status = FUNC_DATA_REQUIRED_ALL_FILTEROUT;
} else {
*status = FUNC_DATA_REQUIRED_FILTEROUT;
@@ -412,7 +389,7 @@ static int32_t loadDataBlock(SOperatorInfo* pOperator, STableScanBase* pTableSca
pCost->totalCheckedRows += pBlock->info.rows;
pCost->loadBlocks += 1;
- SSDataBlock* p = tsdbRetrieveDataBlock(pTableScanInfo->dataReader, NULL);
+ SSDataBlock* p = pAPI->tsdReader.tsdReaderRetrieveDataBlock(pTableScanInfo->dataReader, NULL);
if (p == NULL) {
return terrno;
}
@@ -524,8 +501,8 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
// 1. check if it is existed in meta cache
if (pCache == NULL) {
- metaReaderInit(&mr, pHandle->meta, 0);
- code = metaGetTableEntryByUidCache(&mr, pBlock->info.id.uid);
+ pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pHandle->api.metaFn);
+ code = pHandle->api.metaReaderFn.getEntryGetUidCache(&mr, pBlock->info.id.uid);
if (code != TSDB_CODE_SUCCESS) {
// when encounter the TSDB_CODE_PAR_TABLE_NOT_EXIST error, we proceed.
if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
@@ -538,11 +515,11 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.id.uid, tstrerror(terrno),
idStr);
}
- metaReaderClear(&mr);
+ pHandle->api.metaReaderFn.clearReader(&mr);
return terrno;
}
- metaReaderReleaseLock(&mr);
+ pHandle->api.metaReaderFn.readerReleaseLock(&mr);
val.pName = mr.me.name;
val.pTags = (STag*)mr.me.ctbEntry.pTags;
@@ -553,8 +530,8 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
h = taosLRUCacheLookup(pCache->pTableMetaEntryCache, &pBlock->info.id.uid, sizeof(pBlock->info.id.uid));
if (h == NULL) {
- metaReaderInit(&mr, pHandle->meta, 0);
- code = metaGetTableEntryByUidCache(&mr, pBlock->info.id.uid);
+ pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pHandle->api.metaFn);
+ code = pHandle->api.metaReaderFn.getEntryGetUidCache(&mr, pBlock->info.id.uid);
if (code != TSDB_CODE_SUCCESS) {
if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
qWarn("failed to get table meta, table may have been dropped, uid:0x%" PRIx64 ", code:%s, %s",
@@ -565,11 +542,11 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", pBlock->info.id.uid, tstrerror(terrno),
idStr);
}
- metaReaderClear(&mr);
+ pHandle->api.metaReaderFn.clearReader(&mr);
return terrno;
}
- metaReaderReleaseLock(&mr);
+ pHandle->api.metaReaderFn.readerReleaseLock(&mr);
STableCachedVal* pVal = createTableCacheVal(&mr);
@@ -609,7 +586,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
} else { // these are tags
STagVal tagVal = {0};
tagVal.cid = pExpr1->base.pParam[0].pCol->colId;
- const char* p = metaGetTableTagVal(val.pTags, pColInfoData->info.type, &tagVal);
+ const char* p = pHandle->api.metaFn.extractTagVal(val.pTags, pColInfoData->info.type, &tagVal);
char* data = NULL;
if (pColInfoData->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
@@ -628,7 +605,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
}
if (code) {
if (freeReader) {
- metaReaderClear(&mr);
+ pHandle->api.metaReaderFn.clearReader(&mr);
}
return code;
}
@@ -643,7 +620,7 @@ int32_t addTagPseudoColumnData(SReadHandle* pHandle, const SExprInfo* pExpr, int
// restore the rows
pBlock->info.rows = backupRows;
if (freeReader) {
- metaReaderClear(&mr);
+ pHandle->api.metaReaderFn.clearReader(&mr);
}
return TSDB_CODE_SUCCESS;
@@ -677,6 +654,8 @@ void setTbNameColData(const SSDataBlock* pBlock, SColumnInfoData* pColInfoData,
static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
STableScanInfo* pTableScanInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SSDataBlock* pBlock = pTableScanInfo->pResBlock;
bool hasNext = false;
int32_t code = TSDB_CODE_SUCCESS;
@@ -684,9 +663,9 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
int64_t st = taosGetTimestampUs();
while (true) {
- code = tsdbNextDataBlock(pTableScanInfo->base.dataReader, &hasNext);
+ code = pAPI->tsdReader.tsdNextDataBlock(pTableScanInfo->base.dataReader, &hasNext);
if (code) {
- tsdbReleaseDataBlock(pTableScanInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->base.dataReader);
T_LONG_JMP(pTaskInfo->env, code);
}
@@ -695,12 +674,12 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
}
if (isTaskKilled(pTaskInfo)) {
- tsdbReleaseDataBlock(pTableScanInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->base.dataReader);
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
if (pOperator->status == OP_EXEC_DONE) {
- tsdbReleaseDataBlock(pTableScanInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pTableScanInfo->base.dataReader);
break;
}
@@ -733,14 +712,6 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
pTableScanInfo->base.readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
pOperator->cost.totalCost = pTableScanInfo->base.readRecorder.elapsedTime;
-
- // todo refactor
- /*pTableScanInfo->lastStatus.uid = pBlock->info.id.uid;*/
- /*pTableScanInfo->lastStatus.ts = pBlock->info.window.ekey;*/
- // pTaskInfo->streamInfo.lastStatus.type = TMQ_OFFSET__SNAPSHOT_DATA;
- // pTaskInfo->streamInfo.lastStatus.uid = pBlock->info.id.uid;
- // pTaskInfo->streamInfo.lastStatus.ts = pBlock->info.window.ekey;
-
return pBlock;
}
return NULL;
@@ -749,6 +720,7 @@ static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
STableScanInfo* pTableScanInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
// The read handle is not initialized yet, since no qualified tables exists
if (pTableScanInfo->base.dataReader == NULL || pOperator->status == OP_EXEC_DONE) {
@@ -772,7 +744,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
qDebug("start to repeat ascending order scan data blocks due to query func required, %s", GET_TASKID(pTaskInfo));
// do prepare for the next round table scan operation
- tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
+ pAPI->tsdReader.tsdReaderResetStatus(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
}
}
@@ -780,7 +752,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
if (pTableScanInfo->scanTimes < total) {
if (pTableScanInfo->base.cond.order == TSDB_ORDER_ASC) {
prepareForDescendingScan(&pTableScanInfo->base, pOperator->exprSupp.pCtx, 0);
- tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
+ pAPI->tsdReader.tsdReaderResetStatus(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
qDebug("%s start to descending order scan data blocks due to query func required", GET_TASKID(pTaskInfo));
}
@@ -798,7 +770,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
pTableScanInfo->base.scanFlag = MAIN_SCAN;
qDebug("%s start to repeat descending order scan data blocks", GET_TASKID(pTaskInfo));
- tsdbReaderReset(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
+ pAPI->tsdReader.tsdReaderResetStatus(pTableScanInfo->base.dataReader, &pTableScanInfo->base.cond);
}
}
}
@@ -809,6 +781,7 @@ static SSDataBlock* doGroupedTableScan(SOperatorInfo* pOperator) {
static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
STableScanInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
// scan table one by one sequentially
if (pInfo->scanMode == TABLE_SCAN__TABLE_ORDER) {
@@ -836,11 +809,11 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
tInfo = *(STableKeyInfo*)tableListGetInfo(pInfo->base.pTableListInfo, pInfo->currentTable);
taosRUnLockLatch(&pTaskInfo->lock);
- tsdbSetTableList(pInfo->base.dataReader, &tInfo, 1);
+ pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, &tInfo, 1);
qDebug("set uid:%" PRIu64 " into scanner, total tables:%d, index:%d/%d %s", tInfo.uid, numOfTables,
pInfo->currentTable, numOfTables, GET_TASKID(pTaskInfo));
- tsdbReaderReset(pInfo->base.dataReader, &pInfo->base.cond);
+ pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond);
pInfo->scanTimes = 0;
}
} else { // scan table group by group sequentially
@@ -855,8 +828,8 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num);
ASSERT(pInfo->base.dataReader == NULL);
- int32_t code = tsdbReaderOpen(pInfo->base.readHandle.vnode, &pInfo->base.cond, pList, num, pInfo->pResBlock,
- (STsdbReader**)&pInfo->base.dataReader, GET_TASKID(pTaskInfo), pInfo->countOnly, &pInfo->pIgnoreTables);
+ int32_t code = pAPI->tsdReader.tsdReaderOpen(pInfo->base.readHandle.vnode, &pInfo->base.cond, pList, num, pInfo->pResBlock,
+ (void**)&pInfo->base.dataReader, GET_TASKID(pTaskInfo), pInfo->countOnly, &pInfo->pIgnoreTables);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
@@ -884,8 +857,8 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) {
STableKeyInfo* pList = NULL;
tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num);
- tsdbSetTableList(pInfo->base.dataReader, pList, num);
- tsdbReaderReset(pInfo->base.dataReader, &pInfo->base.cond);
+ pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num);
+ pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond);
pInfo->scanTimes = 0;
result = doGroupedTableScan(pOperator);
@@ -907,10 +880,10 @@ static int32_t getTableScannerExecInfo(struct SOperatorInfo* pOptr, void** pOptr
return 0;
}
-static void destroyTableScanBase(STableScanBase* pBase) {
+static void destroyTableScanBase(STableScanBase* pBase, TsdReader* pAPI) {
cleanupQueryTableDataCond(&pBase->cond);
- tsdbReaderClose(pBase->dataReader);
+ pAPI->tsdReaderClose(pBase->dataReader);
pBase->dataReader = NULL;
if (pBase->matchInfo.pList != NULL) {
@@ -926,7 +899,7 @@ static void destroyTableScanOperatorInfo(void* param) {
STableScanInfo* pTableScanInfo = (STableScanInfo*)param;
blockDataDestroy(pTableScanInfo->pResBlock);
taosHashCleanup(pTableScanInfo->pIgnoreTables);
- destroyTableScanBase(&pTableScanInfo->base);
+ destroyTableScanBase(&pTableScanInfo->base, &pTableScanInfo->base.readerAPI);
taosMemoryFreeClear(param);
}
@@ -959,7 +932,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
if (pScanNode->pScanPseudoCols != NULL) {
SExprSupp* pSup = &pInfo->base.pseudoSup;
pSup->pExprInfo = createExprInfo(pScanNode->pScanPseudoCols, NULL, &pSup->numOfExprs);
- pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset);
+ pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore);
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
@@ -972,6 +945,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
pInfo->sample.sampleRatio = pTableScanNode->ratio;
pInfo->sample.seed = taosGetTimestampSec();
+ pInfo->base.readerAPI = pTaskInfo->storageAPI.tsdReader;
initResultSizeInfo(&pOperator->resultInfo, 4096);
pInfo->pResBlock = createDataBlockFromDescNode(pDescNode);
// blockDataEnsureCapacity(pInfo->pResBlock, pOperator->resultInfo.capacity);
@@ -1066,13 +1040,13 @@ static void setGroupId(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t grou
pInfo->groupId = groupCol[rowIndex];
}
-void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint64_t version) {
+void resetTableScanInfo(STableScanInfo* pTableScanInfo, STimeWindow* pWin, uint64_t ver) {
pTableScanInfo->base.cond.twindows = *pWin;
- pTableScanInfo->base.cond.endVersion = version;
+ pTableScanInfo->base.cond.startVersion = 0;
+ pTableScanInfo->base.cond.endVersion = ver;
pTableScanInfo->scanTimes = 0;
pTableScanInfo->currentGroupId = -1;
- tsdbReaderClose(pTableScanInfo->base.dataReader);
- qDebug("1");
+ pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader);
pTableScanInfo->base.dataReader = NULL;
}
@@ -1088,11 +1062,12 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
cond.twindows = (STimeWindow){.skey = startTs, .ekey = endTs};
SExecTaskInfo* pTaskInfo = pTableScanOp->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
SSDataBlock* pBlock = pTableScanInfo->pResBlock;
STsdbReader* pReader = NULL;
- int32_t code = tsdbReaderOpen(pTableScanInfo->base.readHandle.vnode, &cond, &tblInfo, 1, pBlock,
- (STsdbReader**)&pReader, GET_TASKID(pTaskInfo), false, NULL);
+ int32_t code = pAPI->tsdReader.tsdReaderOpen(pTableScanInfo->base.readHandle.vnode, &cond, &tblInfo, 1, pBlock,
+ (void**)&pReader, GET_TASKID(pTaskInfo), false, NULL);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
T_LONG_JMP(pTaskInfo->env, code);
@@ -1100,7 +1075,7 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
}
bool hasNext = false;
- code = tsdbNextDataBlock(pReader, &hasNext);
+ code = pAPI->tsdReader.tsdNextDataBlock(pReader, &hasNext);
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
T_LONG_JMP(pTaskInfo->env, code);
@@ -1108,12 +1083,12 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
}
if (hasNext) {
- /*SSDataBlock* p = */ tsdbRetrieveDataBlock(pReader, NULL);
+ /*SSDataBlock* p = */ pAPI->tsdReader.tsdReaderRetrieveDataBlock(pReader, NULL);
doSetTagColumnData(&pTableScanInfo->base, pBlock, pTaskInfo, pBlock->info.rows);
pBlock->info.id.groupId = getTableGroupId(pTableScanInfo->base.pTableListInfo, pBlock->info.id.uid);
}
- tsdbReaderClose(pReader);
+ pAPI->tsdReader.tsdReaderClose(pReader);
qDebug("retrieve prev rows:%" PRId64 ", skey:%" PRId64 ", ekey:%" PRId64 " uid:%" PRIu64 ", max ver:%" PRId64
", suid:%" PRIu64,
pBlock->info.rows, startTs, endTs, tbUid, maxVersion, cond.suid);
@@ -1178,15 +1153,19 @@ static bool prepareRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_
win.ekey = TMAX(win.ekey, endData[*pRowIndex]);
continue;
}
+
if (win.skey == endData[*pRowIndex] && groupId == gpData[*pRowIndex]) {
win.skey = TMIN(win.skey, startData[*pRowIndex]);
continue;
}
+
ASSERT(!(win.skey > startData[*pRowIndex] && win.ekey < endData[*pRowIndex]) ||
!(isInTimeWindow(&win, startData[*pRowIndex], 0) || isInTimeWindow(&win, endData[*pRowIndex], 0)));
break;
}
+ STableScanInfo* pTScanInfo = pInfo->pTableScanOp->info;
+ qDebug("prepare range scan start:%" PRId64 ",end:%" PRId64 ",maxVer:%" PRIu64, win.skey, win.ekey, pInfo->pUpdateInfo->maxDataVersion);
resetTableScanInfo(pInfo->pTableScanOp->info, &win, pInfo->pUpdateInfo->maxDataVersion);
pInfo->pTableScanOp->status = OP_OPENED;
return true;
@@ -1245,8 +1224,7 @@ static SSDataBlock* doRangeScan(SStreamScanInfo* pInfo, SSDataBlock* pSDB, int32
*pRowIndex = 0;
pInfo->updateWin = (STimeWindow){.skey = INT64_MIN, .ekey = INT64_MAX};
STableScanInfo* pTableScanInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTableScanInfo->base.dataReader);
- qDebug("2");
+ pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader);
pTableScanInfo->base.dataReader = NULL;
return NULL;
}
@@ -1291,8 +1269,8 @@ static int32_t getPreSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs,
pKey->win.ekey = endTs;
pKey->groupId = groupId;
- SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentPrev(pAggSup->pState, pKey);
- int32_t code = streamStateSessionGetKVByCur(pCur, pKey, NULL, 0);
+ void* pCur = pAggSup->stateStore.streamStateSessionSeekKeyCurrentPrev(pAggSup->pState, pKey);
+ int32_t code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, pKey, NULL, 0);
if (code != TSDB_CODE_SUCCESS) {
SET_SESSION_WIN_KEY_INVALID(pKey);
}
@@ -1324,9 +1302,9 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr
SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX);
SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
- int64_t version = pSrcBlock->info.version - 1;
+ int64_t ver = pSrcBlock->info.version - 1;
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
- uint64_t groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], version);
+ uint64_t groupId = getGroupIdByData(pInfo, uidCol[i], startData[i], ver);
// gap must be 0.
SSessionKey startWin = {0};
getCurSessionWindow(pInfo->windowSup.pStreamAggSup, startData[i], startData[i], groupId, &startWin);
@@ -1372,13 +1350,13 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
ASSERT(pSrcStartTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
TSKEY* srcStartTsCol = (TSKEY*)pSrcStartTsCol->pData;
TSKEY* srcEndTsCol = (TSKEY*)pSrcEndTsCol->pData;
- int64_t version = pSrcBlock->info.version - 1;
+ int64_t ver = pSrcBlock->info.version - 1;
if (pInfo->partitionSup.needCalc && srcStartTsCol[0] != srcEndTsCol[0]) {
uint64_t srcUid = srcUidData[0];
TSKEY startTs = srcStartTsCol[0];
TSKEY endTs = srcEndTsCol[0];
- SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, srcUid, startTs, endTs, version);
+ SSDataBlock* pPreRes = readPreVersionData(pInfo->pTableScanOp, srcUid, startTs, endTs, ver);
printDataBlock(pPreRes, "pre res");
blockDataCleanup(pSrcBlock);
int32_t code = blockDataEnsureCapacity(pSrcBlock, pPreRes->info.rows);
@@ -1416,7 +1394,7 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS
uint64_t srcUid = srcUidData[i];
uint64_t groupId = srcGp[i];
if (groupId == 0) {
- groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
+ groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], ver);
}
TSKEY calStartTs = srcStartTsCol[i];
colDataSetVal(pCalStartTsCol, pDestBlock->info.rows, (const char*)(&calStartTs), false);
@@ -1453,21 +1431,21 @@ static int32_t generateDeleteResultBlock(SStreamScanInfo* pInfo, SSDataBlock* pS
ASSERT(pSrcStartTsCol->info.type == TSDB_DATA_TYPE_TIMESTAMP);
TSKEY* srcStartTsCol = (TSKEY*)pSrcStartTsCol->pData;
TSKEY* srcEndTsCol = (TSKEY*)pSrcEndTsCol->pData;
- int64_t version = pSrcBlock->info.version - 1;
+ int64_t ver = pSrcBlock->info.version - 1;
for (int32_t i = 0; i < pSrcBlock->info.rows; i++) {
uint64_t srcUid = srcUidData[i];
uint64_t groupId = srcGp[i];
char* tbname[VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN] = {0};
if (groupId == 0) {
- groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], version);
+ groupId = getGroupIdByData(pInfo, srcUid, srcStartTsCol[i], ver);
}
if (pInfo->tbnameCalSup.pExprInfo) {
void* parTbname = NULL;
- streamStateGetParName(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, groupId, &parTbname);
+ pInfo->stateStore.streamStateGetParName(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, groupId, &parTbname);
memcpy(varDataVal(tbname), parTbname, TSDB_TABLE_NAME_LEN);
varDataSetLen(tbname, strlen(varDataVal(tbname)));
- streamFreeVal(parTbname);
+ pInfo->stateStore.streamStateFreeVal(parTbname);
}
appendOneRowToStreamSpecialBlock(pDestBlock, srcStartTsCol + i, srcEndTsCol + i, srcUidData + i, &groupId,
tbname[0] == 0 ? NULL : tbname);
@@ -1498,7 +1476,7 @@ static void calBlockTbName(SStreamScanInfo* pInfo, SSDataBlock* pBlock) {
pBlock->info.parTbName[0] = 0;
} else {
appendCreateTableRow(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, &pInfo->tbnameCalSup, &pInfo->tagCalSup,
- pBlock->info.id.groupId, pBlock, 0, pInfo->pCreateTbRes);
+ pBlock->info.id.groupId, pBlock, 0, pInfo->pCreateTbRes, &pInfo->stateStore);
}
}
@@ -1529,7 +1507,7 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
SColumnInfoData* pColDataInfo = taosArrayGet(pBlock->pDataBlock, pInfo->primaryTsIndex);
ASSERT(pColDataInfo->info.type == TSDB_DATA_TYPE_TIMESTAMP);
TSKEY* tsCol = (TSKEY*)pColDataInfo->pData;
- bool tableInserted = updateInfoIsTableInserted(pInfo->pUpdateInfo, pBlock->info.id.uid);
+ bool tableInserted = pInfo->stateStore.updateInfoIsTableInserted(pInfo->pUpdateInfo, pBlock->info.id.uid);
for (int32_t rowId = 0; rowId < pBlock->info.rows; rowId++) {
SResultRowInfo dumyInfo;
dumyInfo.cur.pageId = -1;
@@ -1545,10 +1523,9 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock
isClosed = isCloseWindow(&win, &pInfo->twAggSup);
}
// must check update info first.
- bool update = updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.id.uid, tsCol[rowId]);
+ bool update = pInfo->stateStore.updateInfoIsUpdated(pInfo->pUpdateInfo, pBlock->info.id.uid, tsCol[rowId]);
bool closedWin = isClosed && isSignleIntervalWindow(pInfo) &&
- isDeletedStreamWindow(&win, pBlock->info.id.groupId,
- pInfo->pState, &pInfo->twAggSup);
+ isDeletedStreamWindow(&win, pBlock->info.id.groupId, pInfo->pState, &pInfo->twAggSup, &pInfo->stateStore);
if ((update || closedWin) && out) {
qDebug("stream update check not pass, update %d, closedWin %d", update, closedWin);
uint64_t gpId = 0;
@@ -1636,58 +1613,34 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
}
static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SStreamScanInfo* pInfo = pOperator->info;
const char* id = GET_TASKID(pTaskInfo);
qDebug("start to exec queue scan, %s", id);
-#if 0
- if (pTaskInfo->streamInfo.submit.msgStr != NULL) {
- if (pInfo->tqReader->msg.msgStr == NULL) {
- SPackedData submit = pTaskInfo->streamInfo.submit;
- if (tqReaderSetSubmitMsg(pInfo->tqReader, submit.msgStr, submit.msgLen, submit.ver) < 0) {
- qError("submit msg messed up when initing stream submit block %p", submit.msgStr);
- return NULL;
- }
- }
-
- blockDataCleanup(pInfo->pRes);
- SDataBlockInfo* pBlockInfo = &pInfo->pRes->info;
-
- while (tqNextBlockImpl(pInfo->tqReader, NULL)) {
- int32_t code = tqRetrieveDataBlock(pInfo->tqReader, NULL);
- if (code != TSDB_CODE_SUCCESS || pInfo->tqReader->pResBlock->info.rows == 0) {
- continue;
- }
-
- setBlockIntoRes(pInfo, pInfo->tqReader->pResBlock, true);
-
- if (pBlockInfo->rows > 0) {
- return pInfo->pRes;
- }
- }
-
- pInfo->tqReader->msg = (SPackedData){0};
- pTaskInfo->streamInfo.submit = (SPackedData){0};
+ if (isTaskKilled(pTaskInfo)) {
return NULL;
}
-#endif
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
SSDataBlock* pResult = doTableScan(pInfo->pTableScanOp);
if (pResult && pResult->info.rows > 0) {
- qDebug("queue scan tsdb return %" PRId64 " rows min:%" PRId64 " max:%" PRId64 " wal curVersion:%" PRId64,
- pResult->info.rows, pResult->info.window.skey, pResult->info.window.ekey,
- pInfo->tqReader->pWalReader->curVersion);
+// qDebug("queue scan tsdb return %" PRId64 " rows min:%" PRId64 " max:%" PRId64 " wal curVersion:%" PRId64,
+// pResult->info.rows, pResult->info.window.skey, pResult->info.window.ekey,
+// pInfo->tqReader->pWalReader->curVersion);
tqOffsetResetToData(&pTaskInfo->streamInfo.currentOffset, pResult->info.id.uid, pResult->info.window.ekey);
return pResult;
}
+
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTSInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderClose(pTSInfo->base.dataReader);
+
pTSInfo->base.dataReader = NULL;
qDebug("queue scan tsdb over, switch to wal ver %" PRId64 "", pTaskInfo->streamInfo.snapshotVer + 1);
- if (tqSeekVer(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1, pTaskInfo->id.str) < 0) {
+ if (pAPI->tqReaderFn.tqReaderSeek(pInfo->tqReader, pTaskInfo->streamInfo.snapshotVer + 1, pTaskInfo->id.str) < 0) {
return NULL;
}
@@ -1697,11 +1650,13 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__LOG) {
while (1) {
- bool hasResult = tqNextBlockInWal(pInfo->tqReader, id);
- SSDataBlock* pRes = pInfo->tqReader->pResBlock;
+ bool hasResult = pAPI->tqReaderFn.tqReaderNextBlockInWal(pInfo->tqReader, id);
+
+ SSDataBlock* pRes = pAPI->tqReaderFn.tqGetResultBlock(pInfo->tqReader);
+ struct SWalReader* pWalReader = pAPI->tqReaderFn.tqReaderGetWalReader(pInfo->tqReader);
// curVersion move to next, so currentOffset = curVersion - 1
- tqOffsetResetToLog(&pTaskInfo->streamInfo.currentOffset, pInfo->tqReader->pWalReader->curVersion - 1);
+ tqOffsetResetToLog(&pTaskInfo->streamInfo.currentOffset, pWalReader->curVersion - 1);
if (hasResult) {
qDebug("doQueueScan get data from log %" PRId64 " rows, version:%" PRId64, pRes->info.rows,
@@ -1737,9 +1692,10 @@ static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, S
SColumnInfoData* pDstStartCol = taosArrayGet(pDst->pDataBlock, START_TS_COLUMN_INDEX);
SColumnInfoData* pDstEndCol = taosArrayGet(pDst->pDataBlock, END_TS_COLUMN_INDEX);
SColumnInfoData* pDstUidCol = taosArrayGet(pDst->pDataBlock, UID_COLUMN_INDEX);
- int32_t j = 0;
+
+ int32_t j = 0;
for (int32_t i = 0; i < rows; i++) {
- if (taosHashGet(pReader->tbIdHash, &uidCol[i], sizeof(uint64_t))) {
+ if (pInfo->readerFn.tqReaderIsQueriedTable(pReader, uidCol[i])) {
colDataSetVal(pDstStartCol, j, (const char*)&startCol[i], false);
colDataSetVal(pDstEndCol, j, (const char*)&endCol[i], false);
colDataSetVal(pDstUidCol, j, (const char*)&uidCol[i], false);
@@ -1750,6 +1706,7 @@ static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, S
j++;
}
}
+
uint32_t cap = pDst->info.capacity;
pDst->info = pSrc->info;
pDst->info.rows = j;
@@ -1794,21 +1751,21 @@ static void doCheckUpdate(SStreamScanInfo* pInfo, TSKEY endKey, SSDataBlock* pBl
}
}
-int32_t streamScanOperatorEncode(SStreamScanInfo* pInfo, void** pBuff) {
- int32_t len = updateInfoSerialize(NULL, 0, pInfo->pUpdateInfo);
- *pBuff = taosMemoryCalloc(1, len);
- updateInfoSerialize(*pBuff, len, pInfo->pUpdateInfo);
- return len;
-}
+//int32_t streamScanOperatorEncode(SStreamScanInfo* pInfo, void** pBuff) {
+// int32_t len = updateInfoSerialize(NULL, 0, pInfo->pUpdateInfo);
+// *pBuff = taosMemoryCalloc(1, len);
+// updateInfoSerialize(*pBuff, len, pInfo->pUpdateInfo);
+// return len;
+//}
// other properties are recovered from the execution plan
-void streamScanOperatorDeocde(void* pBuff, int32_t len, SStreamScanInfo* pInfo) {
+void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo) {
if (!pBuff || len == 0) {
return;
}
- SUpdateInfo* pUpInfo = updateInfoInit(0, TSDB_TIME_PRECISION_MILLI, 0);
- int32_t code = updateInfoDeserialize(pBuff, len, pUpInfo);
+ void* pUpInfo = pInfo->stateStore.updateInfoInit(0, TSDB_TIME_PRECISION_MILLI, 0);
+ int32_t code = pInfo->stateStore.updateInfoDeserialize(pBuff, len, pUpInfo);
if (code == TSDB_CODE_SUCCESS) {
pInfo->pUpdateInfo = pUpInfo;
}
@@ -1816,7 +1773,10 @@ void streamScanOperatorDeocde(void* pBuff, int32_t len, SStreamScanInfo* pInfo)
static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
// NOTE: this operator does never check if current status is done or not
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ const char* id = GET_TASKID(pTaskInfo);
+
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
SStreamScanInfo* pInfo = pOperator->info;
qDebug("stream scan started, %s", GET_TASKID(pTaskInfo));
@@ -1839,7 +1799,7 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__SCAN2;
}
- tsdbReaderClose(pTSInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderClose(pTSInfo->base.dataReader);
pTSInfo->base.dataReader = NULL;
pInfo->pTableScanOp->status = OP_OPENED;
@@ -1900,11 +1860,12 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
if (pInfo->pRecoverRes != NULL) {
pInfo->blockRecoverContiCnt++;
calBlockTbName(pInfo, pInfo->pRecoverRes);
- if (pInfo->pUpdateInfo) {
+ if (!pInfo->igCheckUpdate && pInfo->pUpdateInfo) {
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__SCAN1) {
- TSKEY maxTs = updateInfoFillBlockData(pInfo->pUpdateInfo, pInfo->pRecoverRes, pInfo->primaryTsIndex);
+ TSKEY maxTs = pAPI->stateStore.updateInfoFillBlockData(pInfo->pUpdateInfo, pInfo->pRecoverRes, pInfo->primaryTsIndex);
pInfo->twAggSup.maxTs = TMAX(pInfo->twAggSup.maxTs, maxTs);
} else {
+ pInfo->pUpdateInfo->maxDataVersion = TMAX(pInfo->pUpdateInfo->maxDataVersion, pTaskInfo->streamInfo.fillHistoryVer2);
doCheckUpdate(pInfo, pInfo->pRecoverRes->info.window.ekey, pInfo->pRecoverRes);
}
}
@@ -1913,13 +1874,14 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
printDataBlock(pInfo->pCreateTbRes, "recover createTbl");
return pInfo->pCreateTbRes;
}
+
qDebug("stream recover scan get block, rows %" PRId64, pInfo->pRecoverRes->info.rows);
printDataBlock(pInfo->pRecoverRes, "scan recover");
return pInfo->pRecoverRes;
}
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- tsdbReaderClose(pTSInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderClose(pTSInfo->base.dataReader);
pTSInfo->base.dataReader = NULL;
@@ -1939,11 +1901,13 @@ FETCH_NEXT_BLOCK:
return NULL;
}
- int32_t current = pInfo->validBlockIndex++;
+ int32_t current = pInfo->validBlockIndex++;
+ qDebug("process %d/%d input data blocks, %s", current, (int32_t) total, id);
+
SPackedData* pPacked = taosArrayGet(pInfo->pBlockLists, current);
SSDataBlock* pBlock = pPacked->pDataBlock;
if (pBlock->info.parTbName[0]) {
- streamStatePutParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, pBlock->info.parTbName);
+ pAPI->stateStore.streamStatePutParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, pBlock->info.parTbName);
}
// TODO move into scan
@@ -1964,7 +1928,7 @@ FETCH_NEXT_BLOCK:
copyDataBlock(pInfo->pUpdateRes, pBlock);
pInfo->updateResIndex = 0;
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex);
- updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo);
+ pAPI->stateStore.updateInfoAddCloseWindowSBF(pInfo->pUpdateInfo);
} break;
case STREAM_DELETE_DATA: {
printDataBlock(pBlock, "stream scan delete recv");
@@ -2019,7 +1983,7 @@ FETCH_NEXT_BLOCK:
// printDataBlock(pBlock, "stream scan recv");
return pBlock;
} else if (pInfo->blockType == STREAM_INPUT__DATA_SUBMIT) {
- qDebug("scan mode %d", pInfo->scanMode);
+ qDebug("stream scan mode:%d, %s", pInfo->scanMode, id);
switch (pInfo->scanMode) {
case STREAM_SCAN_FROM_RES: {
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
@@ -2074,16 +2038,15 @@ FETCH_NEXT_BLOCK:
return pInfo->pUpdateRes;
}
- const char* id = GET_TASKID(pTaskInfo);
SSDataBlock* pBlock = pInfo->pRes;
SDataBlockInfo* pBlockInfo = &pBlock->info;
int32_t totalBlocks = taosArrayGetSize(pInfo->pBlockLists);
NEXT_SUBMIT_BLK:
while (1) {
- if (pInfo->tqReader->msg.msgStr == NULL) {
+ if (pInfo->readerFn.tqReaderCurrentBlockConsumed(pInfo->tqReader)) {
if (pInfo->validBlockIndex >= totalBlocks) {
- updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
+ pAPI->stateStore.updateInfoDestoryColseWinSBF(pInfo->pUpdateInfo);
doClearBufferedBlocks(pInfo);
qDebug("stream scan return empty, all %d submit blocks consumed, %s", totalBlocks, id);
@@ -2094,7 +2057,7 @@ FETCH_NEXT_BLOCK:
SPackedData* pSubmit = taosArrayGet(pInfo->pBlockLists, current);
qDebug("set %d/%d as the input submit block, %s", current, totalBlocks, id);
- if (tqReaderSetSubmitMsg(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
+ if (pAPI->tqReaderFn.tqReaderSetSubmitMsg(pInfo->tqReader, pSubmit->msgStr, pSubmit->msgLen, pSubmit->ver) < 0) {
qError("submit msg messed up when initializing stream submit block %p, current %d/%d, %s", pSubmit, current, totalBlocks, id);
continue;
}
@@ -2102,16 +2065,23 @@ FETCH_NEXT_BLOCK:
blockDataCleanup(pBlock);
- while (tqNextBlockImpl(pInfo->tqReader, id)) {
- int32_t code = tqRetrieveDataBlock(pInfo->tqReader, id);
- if (code != TSDB_CODE_SUCCESS || pInfo->tqReader->pResBlock->info.rows == 0) {
+ while (pAPI->tqReaderFn.tqNextBlockImpl(pInfo->tqReader, id)) {
+ SSDataBlock* pRes = NULL;
+
+ int32_t code = pAPI->tqReaderFn.tqRetrieveBlock(pInfo->tqReader, &pRes, id);
+ qDebug("retrieve data from submit completed code:%s, rows:%" PRId64 " %s", tstrerror(code), pRes->info.rows,
+ id);
+
+ if (code != TSDB_CODE_SUCCESS || pRes->info.rows == 0) {
+ qDebug("retrieve data failed, try next block in submit block, %s", id);
continue;
}
- setBlockIntoRes(pInfo, pInfo->tqReader->pResBlock, false);
+ setBlockIntoRes(pInfo, pRes, false);
if (pInfo->pCreateTbRes->info.rows > 0) {
pInfo->scanMode = STREAM_SCAN_FROM_RES;
+ qDebug("create table res exists, rows:%"PRId64" return from stream scan, %s", pInfo->pCreateTbRes->info.rows, id);
return pInfo->pCreateTbRes;
}
@@ -2120,6 +2090,8 @@ FETCH_NEXT_BLOCK:
pBlock->info.dataLoad = 1;
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
+ qDebug("%" PRId64 " rows in datablock, update res:%" PRId64 " %s", pBlockInfo->rows,
+ pInfo->pUpdateDataRes->info.rows, id);
if (pBlockInfo->rows > 0 || pInfo->pUpdateDataRes->info.rows > 0) {
break;
}
@@ -2136,7 +2108,7 @@ FETCH_NEXT_BLOCK:
pInfo->numOfExec++;
pOperator->resultInfo.totalRows += pBlockInfo->rows;
- qDebug("stream scan get source rows:%" PRId64", %s", pBlockInfo->rows, id);
+ qDebug("stream scan completed, and return source rows:%" PRId64", %s", pBlockInfo->rows, id);
if (pBlockInfo->rows > 0) {
return pBlock;
}
@@ -2166,7 +2138,9 @@ static SArray* extractTableIdList(const STableListInfo* pTableListInfo) {
static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
// NOTE: this operator does never check if current status is done or not
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SStreamRawScanInfo* pInfo = pOperator->info;
int32_t code = TSDB_CODE_SUCCESS;
pTaskInfo->streamInfo.metaRsp.metaRspLen = 0; // use metaRspLen !=0 to judge if data is meta
@@ -2176,20 +2150,20 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
if (pTaskInfo->streamInfo.currentOffset.type == TMQ_OFFSET__SNAPSHOT_DATA) {
bool hasNext = false;
if (pInfo->dataReader) {
- code = tsdbNextDataBlock(pInfo->dataReader, &hasNext);
+ code = pAPI->tsdReader.tsdNextDataBlock(pInfo->dataReader, &hasNext);
if (code) {
- tsdbReleaseDataBlock(pInfo->dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pInfo->dataReader);
T_LONG_JMP(pTaskInfo->env, code);
}
}
if (pInfo->dataReader && hasNext) {
if (isTaskKilled(pTaskInfo)) {
- tsdbReleaseDataBlock(pInfo->dataReader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(pInfo->dataReader);
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
- SSDataBlock* pBlock = tsdbRetrieveDataBlock(pInfo->dataReader, NULL);
+ SSDataBlock* pBlock = pAPI->tsdReader.tsdReaderRetrieveDataBlock(pInfo->dataReader, NULL);
if (pBlock == NULL) {
T_LONG_JMP(pTaskInfo->env, terrno);
}
@@ -2199,7 +2173,7 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
return pBlock;
}
- SMetaTableInfo mtInfo = getUidfromSnapShot(pInfo->sContext);
+ SMetaTableInfo mtInfo = pAPI->snapshotFn.getMetaTableInfoFromSnapshot(pInfo->sContext);
STqOffsetVal offset = {0};
if (mtInfo.uid == 0) { // read snapshot done, change to get data from wal
qDebug("tmqsnap read snapshot done, change to get data from wal");
@@ -2217,8 +2191,8 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
int32_t dataLen = 0;
int16_t type = 0;
int64_t uid = 0;
- if (getMetafromSnapShot(sContext, &data, &dataLen, &type, &uid) < 0) {
- qError("tmqsnap getMetafromSnapShot error");
+ if (pAPI->snapshotFn.getTableInfoFromSnapshot(sContext, &data, &dataLen, &type, &uid) < 0) {
+ qError("tmqsnap getTableInfoFromSnapshot error");
taosMemoryFreeClear(data);
return NULL;
}
@@ -2241,8 +2215,8 @@ static SSDataBlock* doRawScan(SOperatorInfo* pOperator) {
static void destroyRawScanOperatorInfo(void* param) {
SStreamRawScanInfo* pRawScan = (SStreamRawScanInfo*)param;
- tsdbReaderClose(pRawScan->dataReader);
- destroySnapContext(pRawScan->sContext);
+ pRawScan->pAPI->tsdReader.tsdReaderClose(pRawScan->dataReader);
+ pRawScan->pAPI->snapshotFn.destroySnapshot(pRawScan->sContext);
tableListDestroy(pRawScan->pTableListInfo);
taosMemoryFree(pRawScan);
}
@@ -2267,6 +2241,7 @@ SOperatorInfo* createRawScanOperatorInfo(SReadHandle* pHandle, SExecTaskInfo* pT
pInfo->pTableListInfo = tableListCreate();
pInfo->vnode = pHandle->vnode;
+ pInfo->pAPI = &pTaskInfo->storageAPI;
pInfo->sContext = pHandle->sContext;
setOperatorInfo(pOperator, "RawScanOperator", QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN, false, OP_NOT_OPENED, pInfo,
@@ -2290,7 +2265,7 @@ static void destroyStreamScanOperatorInfo(void* param) {
}
if (pStreamScan->tqReader) {
- tqCloseReader(pStreamScan->tqReader);
+ pStreamScan->readerFn.tqReaderClose(pStreamScan->tqReader);
}
if (pStreamScan->matchInfo.pList) {
taosArrayDestroy(pStreamScan->matchInfo.pList);
@@ -2303,7 +2278,7 @@ static void destroyStreamScanOperatorInfo(void* param) {
cleanupExprSupp(&pStreamScan->tbnameCalSup);
cleanupExprSupp(&pStreamScan->tagCalSup);
- updateInfoDestroy(pStreamScan->pUpdateInfo);
+ pStreamScan->stateStore.updateInfoDestroy(pStreamScan->pUpdateInfo);
blockDataDestroy(pStreamScan->pRes);
blockDataDestroy(pStreamScan->pUpdateRes);
blockDataDestroy(pStreamScan->pPullDataRes);
@@ -2319,6 +2294,8 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
SArray* pColIds = NULL;
SStreamScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+ const char* idstr = pTaskInfo->id.str;
if (pInfo == NULL || pOperator == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -2362,7 +2339,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->tbnameCalSup.pExprInfo = pSubTableExpr;
createExprFromOneNode(pSubTableExpr, pTableScanNode->pSubtable, 0);
- if (initExprSupp(&pInfo->tbnameCalSup, pSubTableExpr, 1) != 0) {
+ if (initExprSupp(&pInfo->tbnameCalSup, pSubTableExpr, 1, &pTaskInfo->storageAPI.functionStore) != 0) {
tableListDestroy(pTableListInfo);
goto _error;
}
@@ -2376,7 +2353,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
tableListDestroy(pTableListInfo);
goto _error;
}
- if (initExprSupp(&pInfo->tagCalSup, pTagExpr, numOfTags) != 0) {
+ if (initExprSupp(&pInfo->tagCalSup, pTagExpr, numOfTags, &pTaskInfo->storageAPI.functionStore) != 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
tableListDestroy(pTableListInfo);
goto _error;
@@ -2408,7 +2385,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (pHandle->initTqReader) {
ASSERT(pHandle->tqReader == NULL);
- pInfo->tqReader = tqReaderOpen(pHandle->vnode);
+ pInfo->tqReader = pAPI->tqReaderFn.tqReaderOpen(pHandle->vnode);
ASSERT(pInfo->tqReader);
} else {
ASSERT(pHandle->tqReader);
@@ -2418,7 +2395,7 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->pUpdateInfo = NULL;
pInfo->pTableScanOp = pTableScanOp;
if (pInfo->pTableScanOp->pTaskInfo->streamInfo.pState) {
- streamStateSetNumber(pInfo->pTableScanOp->pTaskInfo->streamInfo.pState, -1);
+ pAPI->stateStore.streamStateSetNumber(pInfo->pTableScanOp->pTaskInfo->streamInfo.pState, -1);
}
pInfo->readHandle = *pHandle;
@@ -2427,9 +2404,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
blockDataEnsureCapacity(pInfo->pCreateTbRes, 8);
// set the extract column id to streamHandle
- tqReaderSetColIdList(pInfo->tqReader, pColIds);
+ pAPI->tqReaderFn.tqReaderSetColIdList(pInfo->tqReader, pColIds);
SArray* tableIdList = extractTableIdList(((STableScanInfo*)(pInfo->pTableScanOp->info))->base.pTableListInfo);
- code = tqReaderSetTbUidList(pInfo->tqReader, tableIdList);
+ code = pAPI->tqReaderFn.tqReaderSetQueryTableList(pInfo->tqReader, tableIdList, idstr);
if (code != 0) {
taosArrayDestroy(tableIdList);
goto _error;
@@ -2470,13 +2447,16 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
pInfo->igExpired = pTableScanNode->igExpired;
pInfo->twAggSup.maxTs = INT64_MIN;
pInfo->pState = NULL;
+ pInfo->stateStore = pTaskInfo->storageAPI.stateStore;
+ pInfo->readerFn = pTaskInfo->storageAPI.tqReaderFn;
// for stream
if (pTaskInfo->streamInfo.pState) {
void* buff = NULL;
int32_t len = 0;
- streamStateGetInfo(pTaskInfo->streamInfo.pState, STREAM_SCAN_OP_NAME, strlen(STREAM_SCAN_OP_NAME), &buff, &len);
- streamScanOperatorDeocde(buff, len, pInfo);
+ pAPI->stateStore.streamStateGetInfo(pTaskInfo->streamInfo.pState, STREAM_SCAN_OP_NAME, strlen(STREAM_SCAN_OP_NAME), &buff, &len);
+ streamScanOperatorDecode(buff, len, pInfo);
+ taosMemoryFree(buff);
}
setOperatorInfo(pOperator, STREAM_SCAN_OP_NAME, QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN, false, OP_NOT_OPENED, pInfo,
@@ -2502,18 +2482,18 @@ _error:
return NULL;
}
-static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes, int32_t count, SMetaReader* mr) {
+static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes, int32_t count, SMetaReader* mr, SStorageAPI* pAPI) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
STagScanInfo* pInfo = pOperator->info;
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0];
STableKeyInfo* item = tableListGetInfo(pInfo->pTableListInfo, pInfo->curPos);
- int32_t code = metaGetTableEntryByUid(mr, item->uid);
+ int32_t code = pAPI->metaReaderFn.getTableEntryByUid(mr, item->uid);
tDecoderClear(&(*mr).coder);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", item->uid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
- metaReaderClear(mr);
+ pAPI->metaReaderFn.clearReader(mr);
T_LONG_JMP(pTaskInfo->env, terrno);
}
@@ -2528,7 +2508,7 @@ static void doTagScanOneTable(SOperatorInfo* pOperator, const SSDataBlock* pRes,
} else { // it is a tag value
STagVal val = {0};
val.cid = pExprInfo[j].base.pParam[0].pCol->colId;
- const char* p = metaGetTableTagVal((*mr).me.ctbEntry.pTags, pDst->info.type, &val);
+ const char* p = pAPI->metaFn.extractTagVal((*mr).me.ctbEntry.pTags, pDst->info.type, &val);
char* data = NULL;
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL) {
@@ -2553,6 +2533,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
}
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
STagScanInfo* pInfo = pOperator->info;
SExprInfo* pExprInfo = &pOperator->exprSupp.pExprInfo[0];
@@ -2568,10 +2549,10 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
char str[512] = {0};
int32_t count = 0;
SMetaReader mr = {0};
- metaReaderInit(&mr, pInfo->readHandle.meta, 0);
+ pAPI->metaReaderFn.initReader(&mr, pInfo->readHandle.vnode, 0, &pAPI->metaFn);
while (pInfo->curPos < size && count < pOperator->resultInfo.capacity) {
- doTagScanOneTable(pOperator, pRes, count, &mr);
+ doTagScanOneTable(pOperator, pRes, count, &mr, &pTaskInfo->storageAPI);
++count;
if (++pInfo->curPos >= size) {
setOperatorCompleted(pOperator);
@@ -2589,7 +2570,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
}
}
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
// qDebug("QInfo:0x%"PRIx64" create tag values results completed, rows:%d", GET_TASKID(pRuntimeEnv), count);
if (pOperator->status == OP_EXEC_DONE) {
@@ -2622,7 +2603,7 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
int32_t numOfExprs = 0;
SExprInfo* pExprInfo = createExprInfo(pPhyNode->pScanPseudoCols, NULL, &numOfExprs);
- int32_t code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfExprs);
+ int32_t code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfExprs, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -2661,6 +2642,8 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
SOperatorInfo* pOperator = source->pOperator;
STableMergeScanInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
int32_t readIdx = source->readerIdx;
SSDataBlock* pBlock = source->inputBlock;
int32_t code = 0;
@@ -2672,7 +2655,7 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
SReadHandle* pHandle = &pInfo->base.readHandle;
if (NULL == source->dataReader || !source->multiReader) {
- code = tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, pBlock, &source->dataReader, GET_TASKID(pTaskInfo), false, NULL);
+ code = pAPI->tsdReader.tsdReaderOpen(pHandle->vnode, pQueryCond, p, 1, pBlock, (void**)&source->dataReader, GET_TASKID(pTaskInfo), false, NULL);
if (code != 0) {
T_LONG_JMP(pTaskInfo->env, code);
}
@@ -2684,9 +2667,9 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
qTrace("tsdb/read-table-data: %p, enter next reader", reader);
while (true) {
- code = tsdbNextDataBlock(reader, &hasNext);
+ code = pAPI->tsdReader.tsdNextDataBlock(reader, &hasNext);
if (code != 0) {
- tsdbReleaseDataBlock(reader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(reader);
pInfo->base.dataReader = NULL;
T_LONG_JMP(pTaskInfo->env, code);
}
@@ -2696,7 +2679,7 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
}
if (isTaskKilled(pTaskInfo)) {
- tsdbReleaseDataBlock(reader);
+ pAPI->tsdReader.tsdReaderReleaseDataBlock(reader);
pInfo->base.dataReader = NULL;
T_LONG_JMP(pTaskInfo->env, pTaskInfo->code);
}
@@ -2736,7 +2719,7 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
qTrace("tsdb/read-table-data: %p, close reader", reader);
if (!source->multiReader) {
- tsdbReaderClose(pInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderClose(pInfo->base.dataReader);
source->dataReader = NULL;
}
pInfo->base.dataReader = NULL;
@@ -2744,7 +2727,7 @@ static SSDataBlock* getTableDataBlockImpl(void* param) {
}
if (!source->multiReader) {
- tsdbReaderClose(pInfo->base.dataReader);
+ pAPI->tsdReader.tsdReaderClose(pInfo->base.dataReader);
source->dataReader = NULL;
}
pInfo->base.dataReader = NULL;
@@ -2849,6 +2832,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) {
STableMergeScanInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
int32_t numOfTable = taosArrayGetSize(pInfo->queryConds);
@@ -2862,7 +2846,7 @@ int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) {
for (int32_t i = 0; i < numOfTable; ++i) {
STableMergeScanSortSourceParam* param = taosArrayGet(pInfo->sortSourceParams, i);
blockDataDestroy(param->inputBlock);
- tsdbReaderClose(param->dataReader);
+ pAPI->tsdReader.tsdReaderClose(param->dataReader);
param->dataReader = NULL;
}
taosArrayClear(pInfo->sortSourceParams);
@@ -2974,11 +2958,11 @@ void destroyTableMergeScanOperatorInfo(void* param) {
for (int32_t i = 0; i < numOfTable; i++) {
STableMergeScanSortSourceParam* p = taosArrayGet(pTableScanInfo->sortSourceParams, i);
blockDataDestroy(p->inputBlock);
- tsdbReaderClose(p->dataReader);
+ pTableScanInfo->base.readerAPI.tsdReaderClose(p->dataReader);
p->dataReader = NULL;
}
- tsdbReaderClose(pTableScanInfo->base.dataReader);
+ pTableScanInfo->base.readerAPI.tsdReaderClose(pTableScanInfo->base.dataReader);
pTableScanInfo->base.dataReader = NULL;
taosArrayDestroy(pTableScanInfo->sortSourceParams);
@@ -2991,7 +2975,7 @@ void destroyTableMergeScanOperatorInfo(void* param) {
}
taosArrayDestroy(pTableScanInfo->queryConds);
- destroyTableScanBase(&pTableScanInfo->base);
+ destroyTableScanBase(&pTableScanInfo->base, &pTableScanInfo->base.readerAPI);
pTableScanInfo->pResBlock = blockDataDestroy(pTableScanInfo->pResBlock);
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
@@ -3040,7 +3024,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
if (pTableScanNode->scan.pScanPseudoCols != NULL) {
SExprSupp* pSup = &pInfo->base.pseudoSup;
pSup->pExprInfo = createExprInfo(pTableScanNode->scan.pScanPseudoCols, NULL, &pSup->numOfExprs);
- pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset);
+ pSup->pCtx = createSqlFunctionCtx(pSup->pExprInfo, pSup->numOfExprs, &pSup->rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore);
}
pInfo->scanInfo = (SScanInfo){.numOfAsc = pTableScanNode->scanSeq[0], .numOfDesc = pTableScanNode->scanSeq[1]};
@@ -3051,6 +3035,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
goto _error;
}
+ pInfo->base.readerAPI = pTaskInfo->storageAPI.tsdReader;
pInfo->base.dataBlockLoadFlag = FUNC_DATA_REQUIRED_DATA_LOAD;
pInfo->base.scanFlag = MAIN_SCAN;
pInfo->base.readHandle = *readHandle;
@@ -3102,9 +3087,9 @@ _error:
static SSDataBlock* doTableCountScan(SOperatorInfo* pOperator);
static void destoryTableCountScanOperator(void* param);
static void buildVnodeGroupedStbTableCount(STableCountScanOperatorInfo* pInfo, STableCountScanSupp* pSupp,
- SSDataBlock* pRes, char* dbName, tb_uid_t stbUid);
+ SSDataBlock* pRes, char* dbName, tb_uid_t stbUid, SStorageAPI* pAPI);
static void buildVnodeGroupedNtbTableCount(STableCountScanOperatorInfo* pInfo, STableCountScanSupp* pSupp,
- SSDataBlock* pRes, char* dbName);
+ SSDataBlock* pRes, char* dbName, SStorageAPI* pAPI);
static void buildVnodeFilteredTbCount(SOperatorInfo* pOperator, STableCountScanOperatorInfo* pInfo,
STableCountScanSupp* pSupp, SSDataBlock* pRes, char* dbName);
static void buildVnodeGroupedTableCount(SOperatorInfo* pOperator, STableCountScanOperatorInfo* pInfo,
@@ -3191,6 +3176,7 @@ int32_t getTableCountScanSupp(SNodeList* groupTags, SName* tableName, SNodeList*
qError("%s get table count scan supp. get inputs error", GET_TASKID(taskInfo));
return code;
}
+
supp->dbNameSlotId = -1;
supp->stbNameSlotId = -1;
supp->tbCountSlotId = -1;
@@ -3200,6 +3186,7 @@ int32_t getTableCountScanSupp(SNodeList* groupTags, SName* tableName, SNodeList*
qError("%s get table count scan supp. get group tags slot id error", GET_TASKID(taskInfo));
return code;
}
+
code = tblCountScanGetCountSlotId(pseudoCols, supp);
if (code != TSDB_CODE_SUCCESS) {
qError("%s get table count scan supp. get count error", GET_TASKID(taskInfo));
@@ -3359,9 +3346,11 @@ static SSDataBlock* buildVnodeDbTableCount(SOperatorInfo* pOperator, STableCount
const char* db = NULL;
int32_t vgId = 0;
char dbName[TSDB_DB_NAME_LEN] = {0};
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
// get dbname
- vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, &db, &vgId, NULL, NULL);
SName sn = {0};
tNameFromString(&sn, db, T_NAME_ACCT | T_NAME_DB);
tNameGetDbName(&sn, dbName);
@@ -3376,20 +3365,23 @@ static SSDataBlock* buildVnodeDbTableCount(SOperatorInfo* pOperator, STableCount
static void buildVnodeGroupedTableCount(SOperatorInfo* pOperator, STableCountScanOperatorInfo* pInfo,
STableCountScanSupp* pSupp, SSDataBlock* pRes, int32_t vgId, char* dbName) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
if (pSupp->groupByStbName) {
if (pInfo->stbUidList == NULL) {
pInfo->stbUidList = taosArrayInit(16, sizeof(tb_uid_t));
- if (vnodeGetStbIdList(pInfo->readHandle.vnode, 0, pInfo->stbUidList) < 0) {
+ if (pAPI->metaFn.storeGetTableList(pInfo->readHandle.vnode, TSDB_SUPER_TABLE, pInfo->stbUidList) < 0) {
qError("vgId:%d, failed to get stb id list error: %s", vgId, terrstr());
}
}
if (pInfo->currGrpIdx < taosArrayGetSize(pInfo->stbUidList)) {
tb_uid_t stbUid = *(tb_uid_t*)taosArrayGet(pInfo->stbUidList, pInfo->currGrpIdx);
- buildVnodeGroupedStbTableCount(pInfo, pSupp, pRes, dbName, stbUid);
+ buildVnodeGroupedStbTableCount(pInfo, pSupp, pRes, dbName, stbUid, pAPI);
pInfo->currGrpIdx++;
} else if (pInfo->currGrpIdx == taosArrayGetSize(pInfo->stbUidList)) {
- buildVnodeGroupedNtbTableCount(pInfo, pSupp, pRes, dbName);
+ buildVnodeGroupedNtbTableCount(pInfo, pSupp, pRes, dbName, pAPI);
pInfo->currGrpIdx++;
} else {
@@ -3398,7 +3390,9 @@ static void buildVnodeGroupedTableCount(SOperatorInfo* pOperator, STableCountSca
} else {
uint64_t groupId = calcGroupId(dbName, strlen(dbName));
pRes->info.id.groupId = groupId;
- int64_t dbTableCount = metaGetTbNum(pInfo->readHandle.meta);
+
+ int64_t dbTableCount = 0;
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, NULL, NULL, &dbTableCount, NULL);
fillTableCountScanDataBlock(pSupp, dbName, "", dbTableCount, pRes);
setOperatorCompleted(pOperator);
}
@@ -3406,26 +3400,34 @@ static void buildVnodeGroupedTableCount(SOperatorInfo* pOperator, STableCountSca
static void buildVnodeFilteredTbCount(SOperatorInfo* pOperator, STableCountScanOperatorInfo* pInfo,
STableCountScanSupp* pSupp, SSDataBlock* pRes, char* dbName) {
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
if (strlen(pSupp->dbNameFilter) != 0) {
if (strlen(pSupp->stbNameFilter) != 0) {
- tb_uid_t uid = metaGetTableEntryUidByName(pInfo->readHandle.meta, pSupp->stbNameFilter);
- SMetaStbStats stats = {0};
- metaGetStbStats(pInfo->readHandle.meta, uid, &stats);
- int64_t ctbNum = stats.ctbNum;
- fillTableCountScanDataBlock(pSupp, dbName, pSupp->stbNameFilter, ctbNum, pRes);
+ uint64_t uid = 0;
+ pAPI->metaFn.getTableUidByName(pInfo->readHandle.vnode, pSupp->stbNameFilter, &uid);
+
+ int64_t numOfChildTables = 0;
+ pAPI->metaFn.getNumOfChildTables(pInfo->readHandle.vnode, uid, &numOfChildTables);
+
+ fillTableCountScanDataBlock(pSupp, dbName, pSupp->stbNameFilter, numOfChildTables, pRes);
} else {
- int64_t tbNumVnode = metaGetTbNum(pInfo->readHandle.meta);
+ int64_t tbNumVnode = 0;
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, NULL, NULL, &tbNumVnode, NULL);
fillTableCountScanDataBlock(pSupp, dbName, "", tbNumVnode, pRes);
}
} else {
- int64_t tbNumVnode = metaGetTbNum(pInfo->readHandle.meta);
+ int64_t tbNumVnode = 0;
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, NULL, NULL, &tbNumVnode, NULL);
fillTableCountScanDataBlock(pSupp, dbName, "", tbNumVnode, pRes);
}
+
setOperatorCompleted(pOperator);
}
static void buildVnodeGroupedNtbTableCount(STableCountScanOperatorInfo* pInfo, STableCountScanSupp* pSupp,
- SSDataBlock* pRes, char* dbName) {
+ SSDataBlock* pRes, char* dbName, SStorageAPI* pAPI) {
char fullStbName[TSDB_TABLE_FNAME_LEN] = {0};
if (pSupp->groupByDbName) {
snprintf(fullStbName, TSDB_TABLE_FNAME_LEN, "%s.%s", dbName, "");
@@ -3433,32 +3435,33 @@ static void buildVnodeGroupedNtbTableCount(STableCountScanOperatorInfo* pInfo, S
uint64_t groupId = calcGroupId(fullStbName, strlen(fullStbName));
pRes->info.id.groupId = groupId;
- int64_t ntbNum = metaGetNtbNum(pInfo->readHandle.meta);
- if (ntbNum != 0) {
- fillTableCountScanDataBlock(pSupp, dbName, "", ntbNum, pRes);
+
+ int64_t numOfTables = 0;
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, NULL, NULL, NULL, &numOfTables);
+
+ if (numOfTables != 0) {
+ fillTableCountScanDataBlock(pSupp, dbName, "", numOfTables, pRes);
}
}
static void buildVnodeGroupedStbTableCount(STableCountScanOperatorInfo* pInfo, STableCountScanSupp* pSupp,
- SSDataBlock* pRes, char* dbName, tb_uid_t stbUid) {
+ SSDataBlock* pRes, char* dbName, tb_uid_t stbUid, SStorageAPI* pAPI) {
char stbName[TSDB_TABLE_NAME_LEN] = {0};
- metaGetTableSzNameByUid(pInfo->readHandle.meta, stbUid, stbName);
+ pAPI->metaFn.getTableNameByUid(pInfo->readHandle.vnode, stbUid, stbName);
char fullStbName[TSDB_TABLE_FNAME_LEN] = {0};
if (pSupp->groupByDbName) {
- snprintf(fullStbName, TSDB_TABLE_FNAME_LEN, "%s.%s", dbName, stbName);
+ snprintf(fullStbName, TSDB_TABLE_FNAME_LEN, "%s.%s", dbName, varDataVal(stbName));
} else {
- snprintf(fullStbName, TSDB_TABLE_FNAME_LEN, "%s", stbName);
+ snprintf(fullStbName, TSDB_TABLE_FNAME_LEN, "%s", varDataVal(stbName));
}
uint64_t groupId = calcGroupId(fullStbName, strlen(fullStbName));
pRes->info.id.groupId = groupId;
- SMetaStbStats stats = {0};
- metaGetStbStats(pInfo->readHandle.meta, stbUid, &stats);
- int64_t ctbNum = stats.ctbNum;
-
- fillTableCountScanDataBlock(pSupp, dbName, stbName, ctbNum, pRes);
+ int64_t ctbNum = 0;
+ int32_t code = pAPI->metaFn.getNumOfChildTables(pInfo->readHandle.vnode, stbUid, &ctbNum);
+ fillTableCountScanDataBlock(pSupp, dbName, varDataVal(stbName), ctbNum, pRes);
}
static void destoryTableCountScanOperator(void* param) {
@@ -3468,3 +3471,5 @@ static void destoryTableCountScanOperator(void* param) {
taosArrayDestroy(pTableCountScanInfo->stbUidList);
taosMemoryFreeClear(param);
}
+
+// clang-format on
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index 718bb596c5ef9578b6ff740d4c5d3258a1984472..0395b9ec088f399f9f568a91a4fc2524c78280e6 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -60,7 +60,7 @@ SOperatorInfo* createSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode*
}
pOperator->exprSupp.pCtx =
- createSqlFunctionCtx(pOperator->exprSupp.pExprInfo, numOfCols, &pOperator->exprSupp.rowEntryInfoOffset);
+ createSqlFunctionCtx(pOperator->exprSupp.pExprInfo, numOfCols, &pOperator->exprSupp.rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore);
initResultSizeInfo(&pOperator->resultInfo, 1024);
code = filterInitFromNode((SNode*)pSortNode->node.pConditions, &pOperator->exprSupp.pFilterInfo, 0);
if (code != TSDB_CODE_SUCCESS) {
@@ -153,6 +153,7 @@ SSDataBlock* getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, i
colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info);
}
+ pDataBlock->info.dataLoad = 1;
pDataBlock->info.rows = p->info.rows;
}
@@ -500,7 +501,7 @@ SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSort
pSup->numOfExprs = numOfCols;
initResultSizeInfo(&pOperator->resultInfo, 1024);
- pOperator->exprSupp.pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pOperator->exprSupp.rowEntryInfoOffset);
+ pOperator->exprSupp.pCtx = createSqlFunctionCtx(pExprInfo, numOfCols, &pOperator->exprSupp.rowEntryInfoOffset, &pTaskInfo->storageAPI.functionStore);
pInfo->binfo.pRes = createDataBlockFromDescNode(pDescNode);
blockDataEnsureCapacity(pInfo->binfo.pRes, pOperator->resultInfo.capacity);
diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c
index 04c6c619aadec594c459ad223be1f7117a4dbe67..23a7d2c9e9e0f8c6415d2a8cccb53e5b7ee0cfd4 100644
--- a/source/libs/executor/src/sysscanoperator.c
+++ b/source/libs/executor/src/sysscanoperator.c
@@ -21,27 +21,27 @@
#include "querynodes.h"
#include "systable.h"
#include "tname.h"
-#include "ttime.h"
#include "tdatablock.h"
#include "tmsg.h"
+#include "index.h"
+#include "operator.h"
#include "query.h"
+#include "querytask.h"
+#include "storageapi.h"
#include "tcompare.h"
#include "thash.h"
#include "ttypes.h"
-#include "vnode.h"
-#include "operator.h"
-#include "querytask.h"
-
typedef int (*__optSysFilter)(void* a, void* b, int16_t dtype);
typedef int32_t (*__sys_filte)(void* pMeta, SNode* cond, SArray* result);
typedef int32_t (*__sys_check)(SNode* cond);
typedef struct SSTabFltArg {
- void* pMeta;
- void* pVnode;
+ void* pMeta;
+ void* pVnode;
+ SStorageAPI* pAPI;
} SSTabFltArg;
typedef struct SSysTableIndex {
@@ -72,6 +72,7 @@ typedef struct SSysTableScanInfo {
SLoadRemoteDataInfo loadInfo;
SLimitInfo limitInfo;
int32_t tbnameSlotId;
+ SStorageAPI* pAPI;
} SSysTableScanInfo;
typedef struct {
@@ -153,10 +154,11 @@ static void relocateAndFilterSysTagsScanResult(SSysTableScanInfo* pInfo, int32_t
SFilterInfo* pFilterInfo);
int32_t sysFilte__DbName(void* arg, SNode* pNode, SArray* result) {
- void* pVnode = ((SSTabFltArg*)arg)->pVnode;
+ SSTabFltArg* pArg = arg;
+ void* pVnode = pArg->pVnode;
const char* db = NULL;
- vnodeGetInfo(pVnode, &db, NULL);
+ pArg->pAPI->metaFn.getBasicInfo(pVnode, &db, NULL, NULL, NULL);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
@@ -180,10 +182,11 @@ int32_t sysFilte__DbName(void* arg, SNode* pNode, SArray* result) {
}
int32_t sysFilte__VgroupId(void* arg, SNode* pNode, SArray* result) {
- void* pVnode = ((SSTabFltArg*)arg)->pVnode;
+ SSTabFltArg* pArg = arg;
+ void* pVnode = ((SSTabFltArg*)arg)->pVnode;
int64_t vgId = 0;
- vnodeGetInfo(pVnode, NULL, (int32_t*)&vgId);
+ pArg->pAPI->metaFn.getBasicInfo(pVnode, NULL, (int32_t*)&vgId, NULL, NULL);
SOperatorNode* pOper = (SOperatorNode*)pNode;
SValueNode* pVal = (SValueNode*)pOper->pRight;
@@ -200,7 +203,7 @@ int32_t sysFilte__VgroupId(void* arg, SNode* pNode, SArray* result) {
}
int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result) {
- void* pMeta = ((SSTabFltArg*)arg)->pMeta;
+ SSTabFltArg* pArg = arg;
SOperatorNode* pOper = (SOperatorNode*)pNode;
SValueNode* pVal = (SValueNode*)pOper->pRight;
@@ -220,7 +223,8 @@ int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result) {
}
int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result) {
- void* pMeta = ((SSTabFltArg*)arg)->pMeta;
+ SSTabFltArg* pArg = arg;
+ SStorageAPI* pAPI = pArg->pAPI;
SOperatorNode* pOper = (SOperatorNode*)pNode;
SValueNode* pVal = (SValueNode*)pOper->pRight;
@@ -237,7 +241,7 @@ int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result) {
.equal = equal,
.filterFunc = func};
- int32_t ret = metaFilterCreateTime(pMeta, ¶m, result);
+ int32_t ret = pAPI->metaFilter.metaFilterCreateTime(pArg->pVnode, ¶m, result);
return ret;
}
@@ -431,8 +435,9 @@ static bool sysTableIsCondOnOneTable(SNode* pCond, char* condTable) {
}
static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
- qDebug("sysTableScanUserCols get cols start");
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SSysTableScanInfo* pInfo = pOperator->info;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@@ -446,7 +451,7 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
const char* db = NULL;
int32_t vgId = 0;
- vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, &db, &vgId, NULL, NULL);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
@@ -461,18 +466,18 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
STR_TO_VARSTR(tableName, pInfo->req.filterTb);
SMetaReader smrTable = {0};
- metaReaderInit(&smrTable, pInfo->readHandle.meta, 0);
- int32_t code = metaGetTableEntryByName(&smrTable, pInfo->req.filterTb);
+ pAPI->metaReaderFn.initReader(&smrTable, pInfo->readHandle.vnode, 0, &pAPI->metaFn);
+ int32_t code = pAPI->metaReaderFn.getTableEntryByName(&smrTable, pInfo->req.filterTb);
if (code != TSDB_CODE_SUCCESS) {
- // terrno has been set by metaGetTableEntryByName, therefore, return directly
- metaReaderClear(&smrTable);
+ // terrno has been set by pAPI->metaReaderFn.getTableEntryByName, therefore, return directly
+ pAPI->metaReaderFn.clearReader(&smrTable);
blockDataDestroy(dataBlock);
pInfo->loadInfo.totalRows = 0;
return NULL;
}
if (smrTable.me.type == TSDB_SUPER_TABLE) {
- metaReaderClear(&smrTable);
+ pAPI->metaReaderFn.clearReader(&smrTable);
blockDataDestroy(dataBlock);
pInfo->loadInfo.totalRows = 0;
return NULL;
@@ -480,12 +485,12 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
if (smrTable.me.type == TSDB_CHILD_TABLE) {
int64_t suid = smrTable.me.ctbEntry.suid;
- metaReaderClear(&smrTable);
- metaReaderInit(&smrTable, pInfo->readHandle.meta, 0);
- code = metaGetTableEntryByUid(&smrTable, suid);
+ pAPI->metaReaderFn.clearReader(&smrTable);
+ pAPI->metaReaderFn.initReader(&smrTable, pInfo->readHandle.vnode, 0, &pAPI->metaFn);
+ code = pAPI->metaReaderFn.getTableEntryByUid(&smrTable, suid);
if (code != TSDB_CODE_SUCCESS) {
- // terrno has been set by metaGetTableEntryByName, therefore, return directly
- metaReaderClear(&smrTable);
+ // terrno has been set by pAPI->metaReaderFn.getTableEntryByName, therefore, return directly
+ pAPI->metaReaderFn.clearReader(&smrTable);
blockDataDestroy(dataBlock);
pInfo->loadInfo.totalRows = 0;
return NULL;
@@ -503,7 +508,7 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
}
sysTableUserColsFillOneTableCols(pInfo, dbname, &numOfRows, dataBlock, tableName, schemaRow, typeName);
- metaReaderClear(&smrTable);
+ pAPI->metaReaderFn.clearReader(&smrTable);
if (numOfRows > 0) {
relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo);
@@ -517,7 +522,7 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
int32_t ret = 0;
if (pInfo->pCur == NULL) {
- pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
+ pInfo->pCur = pAPI->metaFn.openTableMetaCursor(pInfo->readHandle.vnode);
}
if (pInfo->pSchema == NULL) {
@@ -535,8 +540,12 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
int32_t restore = pInfo->restore;
pInfo->restore = false;
- while (restore || ((ret = metaTbCursorNext(pInfo->pCur, TSDB_TABLE_MAX)) == 0)) {
- if (restore) restore = false;
+
+ while (restore || ((ret = pAPI->metaFn.cursorNext(pInfo->pCur, TSDB_TABLE_MAX)) == 0)) {
+ if (restore) {
+ restore = false;
+ }
+
char typeName[TSDB_TABLE_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
char tableName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
@@ -560,12 +569,12 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
schemaRow = *(SSchemaWrapper**)schema;
} else {
SMetaReader smrSuperTable = {0};
- metaReaderInit(&smrSuperTable, pInfo->readHandle.meta, 0);
- int code = metaGetTableEntryByUid(&smrSuperTable, suid);
+ pAPI->metaReaderFn.initReader(&smrSuperTable, pInfo->readHandle.vnode, 0, &pAPI->metaFn);
+ int code = pAPI->metaReaderFn.getTableEntryByUid(&smrSuperTable, suid);
if (code != TSDB_CODE_SUCCESS) {
- // terrno has been set by metaGetTableEntryByName, therefore, return directly
+ // terrno has been set by pAPI->metaReaderFn.getTableEntryByName, therefore, return directly
qError("sysTableScanUserCols get meta by suid:%" PRId64 " error, code:%d", suid, code);
- metaReaderClear(&smrSuperTable);
+ pAPI->metaReaderFn.clearReader(&smrSuperTable);
blockDataDestroy(dataBlock);
pInfo->loadInfo.totalRows = 0;
return NULL;
@@ -573,7 +582,7 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
SSchemaWrapper* schemaWrapper = tCloneSSchemaWrapper(&smrSuperTable.me.stbEntry.schemaRow);
taosHashPut(pInfo->pSchema, &suid, sizeof(int64_t), &schemaWrapper, POINTER_BYTES);
schemaRow = schemaWrapper;
- metaReaderClear(&smrSuperTable);
+ pAPI->metaReaderFn.clearReader(&smrSuperTable);
}
} else if (pInfo->pCur->mr.me.type == TSDB_NORMAL_TABLE) {
qDebug("sysTableScanUserCols cursor get normal table");
@@ -605,7 +614,7 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
blockDataDestroy(dataBlock);
if (ret != 0) {
- metaCloseTbCursor(pInfo->pCur);
+ pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
pInfo->pCur = NULL;
setOperatorCompleted(pOperator);
}
@@ -617,7 +626,9 @@ static SSDataBlock* sysTableScanUserCols(SOperatorInfo* pOperator) {
}
static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SSysTableScanInfo* pInfo = pOperator->info;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@@ -631,7 +642,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
const char* db = NULL;
int32_t vgId = 0;
- vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, &db, &vgId, NULL, NULL);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
@@ -647,37 +658,37 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
STR_TO_VARSTR(tableName, condTableName);
SMetaReader smrChildTable = {0};
- metaReaderInit(&smrChildTable, pInfo->readHandle.meta, 0);
- int32_t code = metaGetTableEntryByName(&smrChildTable, condTableName);
+ pAPI->metaReaderFn.initReader(&smrChildTable, pInfo->readHandle.vnode, 0, &pAPI->metaFn);
+ int32_t code = pAPI->metaReaderFn.getTableEntryByName(&smrChildTable, condTableName);
if (code != TSDB_CODE_SUCCESS) {
- // terrno has been set by metaGetTableEntryByName, therefore, return directly
- metaReaderClear(&smrChildTable);
+ // terrno has been set by pAPI->metaReaderFn.getTableEntryByName, therefore, return directly
+ pAPI->metaReaderFn.clearReader(&smrChildTable);
blockDataDestroy(dataBlock);
pInfo->loadInfo.totalRows = 0;
return NULL;
}
if (smrChildTable.me.type != TSDB_CHILD_TABLE) {
- metaReaderClear(&smrChildTable);
+ pAPI->metaReaderFn.clearReader(&smrChildTable);
blockDataDestroy(dataBlock);
pInfo->loadInfo.totalRows = 0;
return NULL;
}
SMetaReader smrSuperTable = {0};
- metaReaderInit(&smrSuperTable, pInfo->readHandle.meta, META_READER_NOLOCK);
- code = metaGetTableEntryByUid(&smrSuperTable, smrChildTable.me.ctbEntry.suid);
+ pAPI->metaReaderFn.initReader(&smrSuperTable, pInfo->readHandle.vnode, META_READER_NOLOCK, &pAPI->metaFn);
+ code = pAPI->metaReaderFn.getTableEntryByUid(&smrSuperTable, smrChildTable.me.ctbEntry.suid);
if (code != TSDB_CODE_SUCCESS) {
- // terrno has been set by metaGetTableEntryByUid
- metaReaderClear(&smrSuperTable);
- metaReaderClear(&smrChildTable);
+ // terrno has been set by pAPI->metaReaderFn.getTableEntryByUid
+ pAPI->metaReaderFn.clearReader(&smrSuperTable);
+ pAPI->metaReaderFn.clearReader(&smrChildTable);
blockDataDestroy(dataBlock);
return NULL;
}
sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &smrChildTable, dbname, tableName, &numOfRows, dataBlock);
- metaReaderClear(&smrSuperTable);
- metaReaderClear(&smrChildTable);
+ pAPI->metaReaderFn.clearReader(&smrSuperTable);
+ pAPI->metaReaderFn.clearReader(&smrChildTable);
if (numOfRows > 0) {
relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo);
@@ -691,11 +702,11 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
int32_t ret = 0;
if (pInfo->pCur == NULL) {
- pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
+ pInfo->pCur = pAPI->metaFn.openTableMetaCursor(pInfo->readHandle.vnode);
}
bool blockFull = false;
- while ((ret = metaTbCursorNext(pInfo->pCur, TSDB_SUPER_TABLE)) == 0) {
+ while ((ret = pAPI->metaFn.cursorNext(pInfo->pCur, TSDB_SUPER_TABLE)) == 0) {
if (pInfo->pCur->mr.me.type != TSDB_CHILD_TABLE) {
continue;
}
@@ -704,27 +715,27 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
STR_TO_VARSTR(tableName, pInfo->pCur->mr.me.name);
SMetaReader smrSuperTable = {0};
- metaReaderInit(&smrSuperTable, pInfo->readHandle.meta, 0);
+ pAPI->metaReaderFn.initReader(&smrSuperTable, pInfo->readHandle.vnode, 0, &pAPI->metaFn);
uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid;
- int32_t code = metaGetTableEntryByUid(&smrSuperTable, suid);
+ int32_t code = pAPI->metaReaderFn.getTableEntryByUid(&smrSuperTable, suid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get super table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno),
GET_TASKID(pTaskInfo));
- metaReaderClear(&smrSuperTable);
- metaCloseTbCursor(pInfo->pCur);
+ pAPI->metaReaderFn.clearReader(&smrSuperTable);
+ pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
pInfo->pCur = NULL;
T_LONG_JMP(pTaskInfo->env, terrno);
}
if ((smrSuperTable.me.stbEntry.schemaTag.nCols + numOfRows) > pOperator->resultInfo.capacity) {
- metaTbCursorPrev(pInfo->pCur, TSDB_TABLE_MAX);
+ pAPI->metaFn.cursorPrev(pInfo->pCur, TSDB_TABLE_MAX);
blockFull = true;
} else {
sysTableUserTagsFillOneTableTags(pInfo, &smrSuperTable, &pInfo->pCur->mr, dbname, tableName, &numOfRows,
dataBlock);
}
- metaReaderClear(&smrSuperTable);
+ pAPI->metaReaderFn.clearReader(&smrSuperTable);
if (blockFull || numOfRows >= pOperator->resultInfo.capacity) {
relocateAndFilterSysTagsScanResult(pInfo, numOfRows, dataBlock, pOperator->exprSupp.pFilterInfo);
@@ -745,7 +756,7 @@ static SSDataBlock* sysTableScanUserTags(SOperatorInfo* pOperator) {
blockDataDestroy(dataBlock);
if (ret != 0) {
- metaCloseTbCursor(pInfo->pCur);
+ pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
pInfo->pCur = NULL;
setOperatorCompleted(pOperator);
}
@@ -1089,7 +1100,9 @@ int32_t buildSysDbTableInfo(const SSysTableScanInfo* pInfo, int32_t capacity) {
}
static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
SSysTableScanInfo* pInfo = pOperator->info;
SSysTableIndex* pIdx = pInfo->pIdx;
@@ -1100,7 +1113,7 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
const char* db = NULL;
int32_t vgId = 0;
- vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, &db, &vgId, NULL, NULL);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
@@ -1118,10 +1131,10 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
tb_uid_t* uid = taosArrayGet(pIdx->uids, i);
SMetaReader mr = {0};
- metaReaderInit(&mr, pInfo->readHandle.meta, 0);
- ret = metaGetTableEntryByUid(&mr, *uid);
+ pAPI->metaReaderFn.initReader(&mr, pInfo->readHandle.vnode, 0, &pAPI->metaFn);
+ ret = pAPI->metaReaderFn.getTableEntryByUid(&mr, *uid);
if (ret < 0) {
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
continue;
}
STR_TO_VARSTR(n, mr.me.name);
@@ -1146,15 +1159,15 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
colDataSetVal(pColInfoData, numOfRows, (char*)&ts, false);
SMetaReader mr1 = {0};
- metaReaderInit(&mr1, pInfo->readHandle.meta, META_READER_NOLOCK);
+ pAPI->metaReaderFn.initReader(&mr1, pInfo->readHandle.vnode, META_READER_NOLOCK, &pAPI->metaFn);
int64_t suid = mr.me.ctbEntry.suid;
- int32_t code = metaGetTableEntryByUid(&mr1, suid);
+ int32_t code = pAPI->metaReaderFn.getTableEntryByUid(&mr1, suid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get super table meta, cname:%s, suid:0x%" PRIx64 ", code:%s, %s", pInfo->pCur->mr.me.name,
suid, tstrerror(terrno), GET_TASKID(pTaskInfo));
- metaReaderClear(&mr1);
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr1);
+ pAPI->metaReaderFn.clearReader(&mr);
T_LONG_JMP(pTaskInfo->env, terrno);
}
pColInfoData = taosArrayGet(p->pDataBlock, 3);
@@ -1164,7 +1177,7 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
STR_TO_VARSTR(n, mr1.me.name);
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataSetVal(pColInfoData, numOfRows, n, false);
- metaReaderClear(&mr1);
+ pAPI->metaReaderFn.clearReader(&mr1);
// table comment
pColInfoData = taosArrayGet(p->pDataBlock, 8);
@@ -1229,7 +1242,7 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
// impl later
}
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
pColInfoData = taosArrayGet(p->pDataBlock, 9);
colDataSetVal(pColInfoData, numOfRows, n, false);
@@ -1275,10 +1288,16 @@ static SSDataBlock* sysTableBuildUserTablesByUids(SOperatorInfo* pOperator) {
static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+ int8_t firstMetaCursor = 0;
SSysTableScanInfo* pInfo = pOperator->info;
if (pInfo->pCur == NULL) {
- pInfo->pCur = metaOpenTbCursor(pInfo->readHandle.meta);
+ pInfo->pCur = pAPI->metaFn.openTableMetaCursor(pInfo->readHandle.vnode);
+ firstMetaCursor = 1;
+ }
+ if (!firstMetaCursor) {
+ pAPI->metaFn.resumeTableMetaCursor(pInfo->pCur, 0);
}
blockDataCleanup(pInfo->pRes);
@@ -1286,7 +1305,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
const char* db = NULL;
int32_t vgId = 0;
- vnodeGetInfo(pInfo->readHandle.vnode, &db, &vgId);
+ pAPI->metaFn.getBasicInfo(pInfo->readHandle.vnode, &db, &vgId, NULL, NULL);
SName sn = {0};
char dbname[TSDB_DB_FNAME_LEN + VARSTR_HEADER_SIZE] = {0};
@@ -1301,7 +1320,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
char n[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t ret = 0;
- while ((ret = metaTbCursorNext(pInfo->pCur, TSDB_SUPER_TABLE)) == 0) {
+ while ((ret = pAPI->metaFn.cursorNext(pInfo->pCur, TSDB_SUPER_TABLE)) == 0) {
STR_TO_VARSTR(n, pInfo->pCur->mr.me.name);
// table name
@@ -1324,15 +1343,15 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
colDataSetVal(pColInfoData, numOfRows, (char*)&ts, false);
SMetaReader mr = {0};
- metaReaderInit(&mr, pInfo->readHandle.meta, META_READER_NOLOCK);
+ pAPI->metaReaderFn.initReader(&mr, pInfo->readHandle.vnode, META_READER_NOLOCK, &pAPI->metaFn);
uint64_t suid = pInfo->pCur->mr.me.ctbEntry.suid;
- int32_t code = metaGetTableEntryByUid(&mr, suid);
+ int32_t code = pAPI->metaReaderFn.getTableEntryByUid(&mr, suid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get super table meta, cname:%s, suid:0x%" PRIx64 ", code:%s, %s", pInfo->pCur->mr.me.name,
suid, tstrerror(terrno), GET_TASKID(pTaskInfo));
- metaReaderClear(&mr);
- metaCloseTbCursor(pInfo->pCur);
+ pAPI->metaReaderFn.clearReader(&mr);
+ pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
pInfo->pCur = NULL;
T_LONG_JMP(pTaskInfo->env, terrno);
}
@@ -1345,7 +1364,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
STR_TO_VARSTR(n, mr.me.name);
pColInfoData = taosArrayGet(p->pDataBlock, 4);
colDataSetVal(pColInfoData, numOfRows, n, false);
- metaReaderClear(&mr);
+ pAPI->metaReaderFn.clearReader(&mr);
// table comment
pColInfoData = taosArrayGet(p->pDataBlock, 8);
@@ -1422,12 +1441,14 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
numOfRows = 0;
if (pInfo->pRes->info.rows > 0) {
+ pAPI->metaFn.pauseTableMetaCursor(pInfo->pCur);
break;
}
}
}
if (numOfRows > 0) {
+ pAPI->metaFn.pauseTableMetaCursor(pInfo->pCur);
p->info.rows = numOfRows;
pInfo->pRes->info.rows = numOfRows;
@@ -1442,7 +1463,7 @@ static SSDataBlock* sysTableBuildUserTables(SOperatorInfo* pOperator) {
// todo temporarily free the cursor here, the true reason why the free is not valid needs to be found
if (ret != 0) {
- metaCloseTbCursor(pInfo->pCur);
+ pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
pInfo->pCur = NULL;
setOperatorCompleted(pOperator);
}
@@ -1471,7 +1492,8 @@ static SSDataBlock* sysTableScanUserTables(SOperatorInfo* pOperator) {
} else {
if (pInfo->showRewrite == false) {
if (pCondition != NULL && pInfo->pIdx == NULL) {
- SSTabFltArg arg = {.pMeta = pInfo->readHandle.meta, .pVnode = pInfo->readHandle.vnode};
+ SSTabFltArg arg = {
+ .pMeta = pInfo->readHandle.vnode, .pVnode = pInfo->readHandle.vnode, .pAPI = &pTaskInfo->storageAPI};
SSysTableIndex* idx = taosMemoryMalloc(sizeof(SSysTableIndex));
idx->init = 0;
@@ -1708,7 +1730,7 @@ static SSDataBlock* sysTableScanFromMNode(SOperatorInfo* pOperator, SSysTableSca
SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScanPhysiNode* pScanPhyNode,
const char* pUser, SExecTaskInfo* pTaskInfo) {
- int32_t code = TDB_CODE_SUCCESS;
+ int32_t code = TSDB_CODE_SUCCESS;
SSysTableScanInfo* pInfo = taosMemoryCalloc(1, sizeof(SSysTableScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@@ -1726,6 +1748,8 @@ SOperatorInfo* createSysTableScanOperatorInfo(void* readHandle, SSystemTableScan
extractTbnameSlotId(pInfo, pScanNode);
+ pInfo->pAPI = &pTaskInfo->storageAPI;
+
pInfo->accountId = pScanPhyNode->accountId;
pInfo->pUser = taosStrdup((void*)pUser);
pInfo->sysInfo = pScanPhyNode->sysInfo;
@@ -1798,16 +1822,20 @@ void destroySysScanOperator(void* param) {
if (strncasecmp(name, TSDB_INS_TABLE_TABLES, TSDB_TABLE_FNAME_LEN) == 0 ||
strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0 ||
strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0 || pInfo->pCur != NULL) {
- metaCloseTbCursor(pInfo->pCur);
+ if (pInfo->pAPI->metaFn.closeTableMetaCursor != NULL) {
+ pInfo->pAPI->metaFn.closeTableMetaCursor(pInfo->pCur);
+ }
+
pInfo->pCur = NULL;
}
+
if (pInfo->pIdx) {
taosArrayDestroy(pInfo->pIdx->uids);
taosMemoryFree(pInfo->pIdx);
pInfo->pIdx = NULL;
}
- if(pInfo->pSchema) {
+ if (pInfo->pSchema) {
taosHashCleanup(pInfo->pSchema);
pInfo->pSchema = NULL;
}
@@ -2124,15 +2152,15 @@ static int32_t optSysTabFilte(void* arg, SNode* cond, SArray* result) {
return -1;
}
-static int32_t doGetTableRowSize(void* pMeta, uint64_t uid, int32_t* rowLen, const char* idstr) {
+static int32_t doGetTableRowSize(SReadHandle* pHandle, uint64_t uid, int32_t* rowLen, const char* idstr) {
*rowLen = 0;
SMetaReader mr = {0};
- metaReaderInit(&mr, pMeta, 0);
- int32_t code = metaGetTableEntryByUid(&mr, uid);
+ pHandle->api.metaReaderFn.initReader(&mr, pHandle->vnode, 0, &pHandle->api.metaFn);
+ int32_t code = pHandle->api.metaReaderFn.getTableEntryByUid(&mr, uid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", uid, tstrerror(terrno), idstr);
- metaReaderClear(&mr);
+ pHandle->api.metaReaderFn.clearReader(&mr);
return terrno;
}
@@ -2144,10 +2172,10 @@ static int32_t doGetTableRowSize(void* pMeta, uint64_t uid, int32_t* rowLen, con
} else if (mr.me.type == TSDB_CHILD_TABLE) {
uint64_t suid = mr.me.ctbEntry.suid;
tDecoderClear(&mr.coder);
- code = metaGetTableEntryByUid(&mr, suid);
+ code = pHandle->api.metaReaderFn.getTableEntryByUid(&mr, suid);
if (code != TSDB_CODE_SUCCESS) {
qError("failed to get table meta, uid:0x%" PRIx64 ", code:%s, %s", suid, tstrerror(terrno), idstr);
- metaReaderClear(&mr);
+ pHandle->api.metaReaderFn.clearReader(&mr);
return terrno;
}
@@ -2163,7 +2191,7 @@ static int32_t doGetTableRowSize(void* pMeta, uint64_t uid, int32_t* rowLen, con
}
}
- metaReaderClear(&mr);
+ pHandle->api.metaReaderFn.clearReader(&mr);
return TSDB_CODE_SUCCESS;
}
@@ -2174,16 +2202,17 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
SBlockDistInfo* pBlockScanInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
STableBlockDistInfo blockDistInfo = {.minRows = INT_MAX, .maxRows = INT_MIN};
- int32_t code = doGetTableRowSize(pBlockScanInfo->readHandle.meta, pBlockScanInfo->uid,
- (int32_t*)&blockDistInfo.rowSize, GET_TASKID(pTaskInfo));
+ int32_t code = doGetTableRowSize(&pBlockScanInfo->readHandle, pBlockScanInfo->uid, (int32_t*)&blockDistInfo.rowSize,
+ GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
- tsdbGetFileBlocksDistInfo(pBlockScanInfo->pHandle, &blockDistInfo);
- blockDistInfo.numOfInmemRows = (int32_t)tsdbGetNumOfRowsInMemTable(pBlockScanInfo->pHandle);
+ pAPI->tsdReader.tsdReaderGetDataBlockDistInfo(pBlockScanInfo->pHandle, &blockDistInfo);
+ blockDistInfo.numOfInmemRows = (int32_t)pAPI->tsdReader.tsdReaderGetNumOfInMemRows(pBlockScanInfo->pHandle);
SSDataBlock* pBlock = pBlockScanInfo->pResBlock;
@@ -2213,7 +2242,7 @@ static SSDataBlock* doBlockInfoScan(SOperatorInfo* pOperator) {
static void destroyBlockDistScanOperatorInfo(void* param) {
SBlockDistInfo* pDistInfo = (SBlockDistInfo*)param;
blockDataDestroy(pDistInfo->pResBlock);
- tsdbReaderClose(pDistInfo->pHandle);
+ pDistInfo->readHandle.api.tsdReader.tsdReaderClose(pDistInfo->pHandle);
tableListDestroy(pDistInfo->pTableListInfo);
taosMemoryFreeClear(param);
}
@@ -2268,7 +2297,8 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi
size_t num = tableListGetSize(pTableListInfo);
void* pList = tableListGetInfo(pTableListInfo, 0);
- code = tsdbReaderOpen(readHandle->vnode, &cond, pList, num, pInfo->pResBlock, &pInfo->pHandle, pTaskInfo->id.str, false, NULL);
+ code = readHandle->api.tsdReader.tsdReaderOpen(readHandle->vnode, &cond, pList, num, pInfo->pResBlock,
+ (void**)&pInfo->pHandle, pTaskInfo->id.str, false, NULL);
cleanupQueryTableDataCond(&cond);
if (code != 0) {
goto _error;
@@ -2280,7 +2310,7 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi
int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pBlockScanNode->pScanPseudoCols, NULL, &numOfCols);
- int32_t code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfCols);
+ int32_t code = initExprSupp(&pOperator->exprSupp, pExprInfo, numOfCols, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -2295,4 +2325,4 @@ _error:
taosMemoryFreeClear(pInfo);
taosMemoryFreeClear(pOperator);
return NULL;
-}
\ No newline at end of file
+}
diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c
index 6b49d235f417b9548d641167b5e7845ee322e114..7cc50a70ab799184de5b350f72526ee44ac23295 100644
--- a/source/libs/executor/src/tfill.c
+++ b/source/libs/executor/src/tfill.c
@@ -517,11 +517,16 @@ void taosFillSetStartInfo(SFillInfo* pFillInfo, int32_t numOfRows, TSKEY endKey)
return;
}
+ // the endKey is now the aligned time window value. truncate time window isn't correct.
pFillInfo->end = endKey;
- if (!FILL_IS_ASC_FILL(pFillInfo)) {
- pFillInfo->end = taosTimeTruncate(endKey, &pFillInfo->interval, pFillInfo->interval.precision);
- pFillInfo->end = taosTimeAdd(pFillInfo->end, pFillInfo->interval.interval, pFillInfo->interval.intervalUnit,pFillInfo->interval.precision);
+
+#if 0
+ if (pFillInfo->order == TSDB_ORDER_ASC) {
+ ASSERT(pFillInfo->start <= pFillInfo->end);
+ } else {
+ ASSERT(pFillInfo->start >= pFillInfo->end);
}
+#endif
pFillInfo->index = 0;
pFillInfo->numOfRows = numOfRows;
@@ -531,6 +536,13 @@ void taosFillSetInputDataBlock(SFillInfo* pFillInfo, const SSDataBlock* pInput)
pFillInfo->pSrcBlock = (SSDataBlock*)pInput;
}
+void taosFillUpdateStartTimestampInfo(SFillInfo* pFillInfo, int64_t ts) {
+ pFillInfo->start = ts;
+ pFillInfo->currentKey = ts;
+}
+
+bool taosFillNotStarted(const SFillInfo* pFillInfo) {return pFillInfo->start == pFillInfo->currentKey;}
+
bool taosFillHasMoreResults(SFillInfo* pFillInfo) {
int32_t remain = taosNumOfRemainRows(pFillInfo);
if (remain > 0) {
@@ -562,9 +574,10 @@ int64_t getNumOfResultsAfterFillGap(SFillInfo* pFillInfo, TSKEY ekey, int32_t ma
ASSERT(numOfRes >= numOfRows);
} else { // reach the end of data
if ((ekey1 < pFillInfo->currentKey && FILL_IS_ASC_FILL(pFillInfo)) ||
- (ekey1 >= pFillInfo->currentKey && !FILL_IS_ASC_FILL(pFillInfo))) {
+ (ekey1 > pFillInfo->currentKey && !FILL_IS_ASC_FILL(pFillInfo))) {
return 0;
}
+
numOfRes = taosTimeCountInterval(ekey1, pFillInfo->currentKey, pFillInfo->interval.sliding,
pFillInfo->interval.slidingUnit, pFillInfo->interval.precision);
numOfRes += 1;
diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c
index 432a627957c35d48301e6e2c3f5f2f239076befc..2421343bd7b57949233419997c6d35b2cfd513f0 100644
--- a/source/libs/executor/src/timesliceoperator.c
+++ b/source/libs/executor/src/timesliceoperator.c
@@ -18,6 +18,7 @@
#include "functionMgt.h"
#include "operator.h"
#include "querytask.h"
+#include "storageapi.h"
#include "tcommon.h"
#include "tcompare.h"
#include "tdatablock.h"
@@ -256,7 +257,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
// output the result
- bool hasInterp = true;
+ int32_t fillColIndex = 0;
+ bool hasInterp = true;
for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) {
SExprInfo* pExprInfo = &pExprSup->pExprInfo[j];
@@ -306,7 +308,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
case TSDB_FILL_SET_VALUE:
case TSDB_FILL_SET_VALUE_F: {
- SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal;
+ SVariant* pVar = &pSliceInfo->pFillColInfo[fillColIndex].fillVal;
if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) {
float v = 0;
@@ -341,6 +343,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp
}
colDataSetVal(pDst, rows, (char*)&v, false);
}
+
+ ++fillColIndex;
break;
}
@@ -873,7 +877,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
int32_t numOfExprs = 0;
SExprInfo* pExprInfo = createExprInfo(pInterpPhyNode->pFuncs, NULL, &numOfExprs);
- int32_t code = initExprSupp(pSup, pExprInfo, numOfExprs);
+ int32_t code = initExprSupp(pSup, pExprInfo, numOfExprs, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -881,7 +885,7 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode
if (pInterpPhyNode->pExprs != NULL) {
int32_t num = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pInterpPhyNode->pExprs, NULL, &num);
- code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, num);
+ code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, num, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c
index 9d882a90ce8300feca915b5900a247cfad4c2bec..2676e097f9340cbf200b74b641c056a4eb17ef40 100644
--- a/source/libs/executor/src/timewindowoperator.c
+++ b/source/libs/executor/src/timewindowoperator.c
@@ -271,43 +271,6 @@ int32_t getNumOfRowsInTimeWindow(SDataBlockInfo* pDataBlockInfo, TSKEY* pPrimary
return num;
}
-static void getNextTimeWindow(SInterval* pInterval, int32_t precision, int32_t order, STimeWindow* tw) {
- int32_t factor = GET_FORWARD_DIRECTION_FACTOR(order);
- if (pInterval->intervalUnit != 'n' && pInterval->intervalUnit != 'y') {
- tw->skey += pInterval->sliding * factor;
- tw->ekey = tw->skey + pInterval->interval - 1;
- return;
- }
-
- int64_t key = tw->skey, interval = pInterval->interval;
- // convert key to second
- key = convertTimePrecision(key, precision, TSDB_TIME_PRECISION_MILLI) / 1000;
-
- if (pInterval->intervalUnit == 'y') {
- interval *= 12;
- }
-
- struct tm tm;
- time_t t = (time_t)key;
- taosLocalTime(&t, &tm, NULL);
-
- int mon = (int)(tm.tm_year * 12 + tm.tm_mon + interval * factor);
- tm.tm_year = mon / 12;
- tm.tm_mon = mon % 12;
- tw->skey = convertTimePrecision((int64_t)taosMktime(&tm) * 1000LL, TSDB_TIME_PRECISION_MILLI, precision);
-
- mon = (int)(mon + interval);
- tm.tm_year = mon / 12;
- tm.tm_mon = mon % 12;
- tw->ekey = convertTimePrecision((int64_t)taosMktime(&tm) * 1000LL, TSDB_TIME_PRECISION_MILLI, precision);
-
- tw->ekey -= 1;
-}
-
-void getNextIntervalWindow(SInterval* pInterval, STimeWindow* tw, int32_t order) {
- getNextTimeWindow(pInterval, pInterval->precision, order, tw);
-}
-
void doTimeWindowInterpolation(SArray* pPrevValues, SArray* pDataBlock, TSKEY prevTs, int32_t prevRowIndex, TSKEY curTs,
int32_t curRowIndex, TSKEY windowKey, int32_t type, SExprSupp* pSup) {
SqlFunctionCtx* pCtx = pSup->pCtx;
@@ -449,7 +412,7 @@ static bool setTimeWindowInterpolationEndTs(SIntervalAggOperatorInfo* pInfo, SEx
}
bool inCalSlidingWindow(SInterval* pInterval, STimeWindow* pWin, TSKEY calStart, TSKEY calEnd, EStreamType blockType) {
- if (pInterval->interval != pInterval->sliding &&
+ if (pInterval->interval != pInterval->sliding &&
((pWin->ekey < calStart || pWin->skey > calEnd) || (blockType == STREAM_PULL_DATA && pWin->skey < calStart) )) {
return false;
}
@@ -466,7 +429,7 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext,
bool ascQuery = (order == TSDB_ORDER_ASC);
int32_t precision = pInterval->precision;
- getNextTimeWindow(pInterval, precision, order, pNext);
+ getNextTimeWindow(pInterval, pNext, order);
// next time window is not in current block
if ((pNext->skey > pDataBlockInfo->window.ekey && order == TSDB_ORDER_ASC) ||
@@ -511,7 +474,7 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext,
if (ascQuery && primaryKeys[startPos] > pNext->ekey) {
TSKEY next = primaryKeys[startPos];
if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
- pNext->skey = taosTimeTruncate(next, pInterval, precision);
+ pNext->skey = taosTimeTruncate(next, pInterval);
pNext->ekey = taosTimeAdd(pNext->skey, pInterval->interval, pInterval->intervalUnit, precision) - 1;
} else {
pNext->ekey += ((next - pNext->ekey + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding;
@@ -520,7 +483,7 @@ static int32_t getNextQualifiedWindow(SInterval* pInterval, STimeWindow* pNext,
} else if ((!ascQuery) && primaryKeys[startPos] < pNext->skey) {
TSKEY next = primaryKeys[startPos];
if (pInterval->intervalUnit == 'n' || pInterval->intervalUnit == 'y') {
- pNext->skey = taosTimeTruncate(next, pInterval, precision);
+ pNext->skey = taosTimeTruncate(next, pInterval);
pNext->ekey = taosTimeAdd(pNext->skey, pInterval->interval, pInterval->intervalUnit, precision) - 1;
} else {
pNext->skey -= ((pNext->skey - next + pInterval->sliding - 1) / pInterval->sliding) * pInterval->sliding;
@@ -1334,13 +1297,17 @@ static void doClearWindowImpl(SResultRowPosition* p1, SDiskbasedBuf* pResultBuf,
}
static bool doDeleteWindow(SOperatorInfo* pOperator, TSKEY ts, uint64_t groupId) {
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
SWinKey key = {.ts = ts, .groupId = groupId};
tSimpleHashRemove(pInfo->aggSup.pResultRowHashTable, &key, sizeof(SWinKey));
- streamStateDel(pInfo->pState, &key);
+ pAPI->stateStore.streamStateDel(pInfo->pState, &key);
return true;
}
+static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
+
static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDataBlock* pBlock, SArray* pUpWins,
SSHashObj* pUpdatedMap) {
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
@@ -1368,15 +1335,21 @@ static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDa
do {
if (!inCalSlidingWindow(pInterval, &win, calStTsCols[i], calEnTsCols[i], pBlock->info.type)) {
- getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
+ getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
continue;
}
uint64_t winGpId = pGpDatas[i];
SWinKey winRes = {.ts = win.skey, .groupId = winGpId};
void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
if (chIds) {
- getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
- continue;
+ int32_t childId = getChildIndex(pBlock);
+ SArray* chArray = *(void**)chIds;
+ int32_t index = taosArraySearchIdx(chArray, &childId, compareInt32Val, TD_EQ);
+ if (index != -1) {
+ qDebug("===stream===try push delete window%" PRId64 "chId:%d ,continue", win.skey, childId);
+ getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
+ continue;
+ }
}
bool res = doDeleteWindow(pOperator, win.skey, winGpId);
if (pUpWins && res) {
@@ -1385,7 +1358,7 @@ static void doDeleteWindows(SOperatorInfo* pOperator, SInterval* pInterval, SSDa
if (pUpdatedMap) {
tSimpleHashRemove(pUpdatedMap, &winRes, sizeof(SWinKey));
}
- getNextTimeWindow(pInterval, pInterval->precision, TSDB_ORDER_ASC, &win);
+ getNextTimeWindow(pInterval, &win, TSDB_ORDER_ASC);
} while (win.ekey <= endTsCols[i]);
}
}
@@ -1474,7 +1447,7 @@ static void doBuildDeleteResult(SStreamIntervalOperatorInfo* pInfo, SArray* pWin
for (int32_t i = *index; i < size; i++) {
SWinKey* pWin = taosArrayGet(pWins, i);
void* tbname = NULL;
- streamStateGetParName(pInfo->pState, pWin->groupId, &tbname);
+ pInfo->statestore.streamStateGetParName(pInfo->pState, pWin->groupId, &tbname);
if (tbname == NULL) {
appendOneRowToStreamSpecialBlock(pBlock, &pWin->ts, &pWin->ts, &uid, &pWin->groupId, NULL);
} else {
@@ -1482,7 +1455,7 @@ static void doBuildDeleteResult(SStreamIntervalOperatorInfo* pInfo, SArray* pWin
STR_WITH_MAXSIZE_TO_VARSTR(parTbName, tbname, sizeof(parTbName));
appendOneRowToStreamSpecialBlock(pBlock, &pWin->ts, &pWin->ts, &uid, &pWin->groupId, parTbName);
}
- streamFreeVal(tbname);
+ pInfo->statestore.streamStateFreeVal(tbname);
(*index)++;
}
}
@@ -1532,11 +1505,12 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) {
taosArrayDestroy(*(void**)pIte);
}
taosHashCleanup(pInfo->pPullDataMap);
+ taosHashCleanup(pInfo->pFinalPullDataMap);
taosArrayDestroy(pInfo->pPullWins);
blockDataDestroy(pInfo->pPullDataRes);
taosArrayDestroy(pInfo->pDelWins);
blockDataDestroy(pInfo->pDelRes);
- streamFileStateDestroy(pInfo->pState->pFileState);
+ pInfo->statestore.streamFileStateDestroy(pInfo->pState->pFileState);
taosMemoryFreeClear(pInfo->pState);
nodesDestroyNode((SNode*)pInfo->pPhyNode);
@@ -1611,16 +1585,20 @@ static bool timeWindowinterpNeeded(SqlFunctionCtx* pCtx, int32_t numOfCols, SInt
}
void initIntervalDownStream(SOperatorInfo* downstream, uint16_t type, SStreamIntervalOperatorInfo* pInfo) {
+ SStateStore* pAPI = &downstream->pTaskInfo->storageAPI.stateStore;
+
if (downstream->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) {
initIntervalDownStream(downstream->pDownstream[0], type, pInfo);
return;
}
+
SStreamScanInfo* pScanInfo = downstream->info;
pScanInfo->windowSup.parentType = type;
pScanInfo->windowSup.pIntervalAggSup = &pInfo->aggSup;
- if (!pScanInfo->igCheckUpdate && !pScanInfo->pUpdateInfo) {
- pScanInfo->pUpdateInfo = updateInfoInitP(&pInfo->interval, pInfo->twAggSup.waterMark);
+ if (!pScanInfo->pUpdateInfo) {
+ pScanInfo->pUpdateInfo = pAPI->updateInfoInitP(&pInfo->interval, pInfo->twAggSup.waterMark);
}
+
pScanInfo->interval = pInfo->interval;
pScanInfo->twAggSup = pInfo->twAggSup;
pScanInfo->pState = pInfo->pState;
@@ -1653,7 +1631,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh
int32_t num = 0;
SExprInfo* pExprInfo = createExprInfo(pPhyNode->window.pFuncs, NULL, &num);
int32_t code =
- initAggSup(pSup, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState);
+ initAggSup(pSup, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str, pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -1681,7 +1659,7 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh
if (pPhyNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pPhyNode->window.pExprs, NULL, &numOfScalar);
- code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -1881,7 +1859,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi
if (pStateNode->window.pExprs != NULL) {
int32_t numOfScalarExpr = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pStateNode->window.pExprs, NULL, &numOfScalarExpr);
- int32_t code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr);
+ int32_t code = initExprSupp(&pInfo->scalarSup, pScalarExprInfo, numOfScalarExpr, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -1907,7 +1885,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi
initResultSizeInfo(&pOperator->resultInfo, 4096);
code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -1976,7 +1954,7 @@ SOperatorInfo* createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionW
initBasicInfo(&pInfo->binfo, pResBlock);
int32_t code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -2048,33 +2026,33 @@ void compactFunctions(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx, int3
}
}
-bool hasIntervalWindow(SStreamState* pState, SWinKey* pKey) { return streamStateCheck(pState, pKey); }
+bool hasIntervalWindow(void* pState, SWinKey* pKey, SStateStore* pStore) { return pStore->streamStateCheck(pState, pKey); }
-int32_t setIntervalOutputBuf(SStreamState* pState, STimeWindow* win, SRowBuffPos** pResult, int64_t groupId,
+int32_t setIntervalOutputBuf(void* pState, STimeWindow* win, SRowBuffPos** pResult, int64_t groupId,
SqlFunctionCtx* pCtx, int32_t numOfOutput, int32_t* rowEntryInfoOffset,
- SAggSupporter* pAggSup) {
- SWinKey key = {
- .ts = win->skey,
- .groupId = groupId,
- };
+ SAggSupporter* pAggSup, SStateStore* pStore) {
+
+ SWinKey key = { .ts = win->skey, .groupId = groupId };
char* value = NULL;
int32_t size = pAggSup->resultRowSize;
- if (streamStateAddIfNotExist(pState, &key, (void**)&value, &size) < 0) {
+ if (pStore->streamStateAddIfNotExist(pState, &key, (void**)&value, &size) < 0) {
return TSDB_CODE_OUT_OF_MEMORY;
}
+
*pResult = (SRowBuffPos*)value;
SResultRow* res = (SResultRow*)((*pResult)->pRowBuff);
+
// set time window for current result
res->win = (*win);
setResultRowInitCtx(res, pCtx, numOfOutput, rowEntryInfoOffset);
return TSDB_CODE_SUCCESS;
}
-bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, SStreamState* pState, STimeWindowAggSupp* pTwSup) {
+bool isDeletedStreamWindow(STimeWindow* pWin, uint64_t groupId, void* pState, STimeWindowAggSupp* pTwSup, SStateStore* pStore) {
if (pTwSup->maxTs != INT64_MIN && pWin->ekey < pTwSup->maxTs - pTwSup->deleteMark) {
SWinKey key = {.ts = pWin->skey, .groupId = groupId};
- if (!hasIntervalWindow(pState, &key)) {
+ if (!hasIntervalWindow(pState, &key, pStore)) {
return true;
}
return false;
@@ -2098,14 +2076,12 @@ void addPullWindow(SHashObj* pMap, SWinKey* pWinRes, int32_t size) {
taosHashPut(pMap, pWinRes, sizeof(SWinKey), &childIds, sizeof(void*));
}
-static int32_t getChildIndex(SSDataBlock* pBlock) { return pBlock->info.childId; }
-
static void clearStreamIntervalOperator(SStreamIntervalOperatorInfo* pInfo) {
tSimpleHashClear(pInfo->aggSup.pResultRowHashTable);
clearDiskbasedBuf(pInfo->aggSup.pResultBuf);
initResultRowInfo(&pInfo->binfo.resultRowInfo);
pInfo->aggSup.currentPageId = -1;
- streamStateClear(pInfo->pState);
+ pInfo->statestore.streamStateClear(pInfo->pState);
}
static void clearSpecialDataBlock(SSDataBlock* pBlock) {
@@ -2143,29 +2119,46 @@ static void doBuildPullDataBlock(SArray* array, int32_t* pIndex, SSDataBlock* pB
blockDataUpdateTsWindow(pBlock, 0);
}
-void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval) {
- SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX);
+void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SHashObj* pFinalMap, SInterval* pInterval, SArray* pPullWins, int32_t numOfCh, SOperatorInfo* pOperator) {
+ SColumnInfoData* pStartCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX);
TSKEY* tsData = (TSKEY*)pStartCol->pData;
- SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX);
+ SColumnInfoData* pEndCol = taosArrayGet(pBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX);
TSKEY* tsEndData = (TSKEY*)pEndCol->pData;
SColumnInfoData* pGroupCol = taosArrayGet(pBlock->pDataBlock, GROUPID_COLUMN_INDEX);
uint64_t* groupIdData = (uint64_t*)pGroupCol->pData;
int32_t chId = getChildIndex(pBlock);
for (int32_t i = 0; i < pBlock->info.rows; i++) {
TSKEY winTs = tsData[i];
- while (winTs < tsEndData[i]) {
+ while (winTs <= tsEndData[i]) {
SWinKey winRes = {.ts = winTs, .groupId = groupIdData[i]};
void* chIds = taosHashGet(pMap, &winRes, sizeof(SWinKey));
if (chIds) {
SArray* chArray = *(SArray**)chIds;
int32_t index = taosArraySearchIdx(chArray, &chId, compareInt32Val, TD_EQ);
if (index != -1) {
- qDebug("===stream===window %" PRId64 " delete child id %d", winRes.ts, chId);
+ qDebug("===stream===retrive window %" PRId64 " delete child id %d", winRes.ts, chId);
taosArrayRemove(chArray, index);
if (taosArrayGetSize(chArray) == 0) {
// pull data is over
taosArrayDestroy(chArray);
taosHashRemove(pMap, &winRes, sizeof(SWinKey));
+ qDebug("===stream===retrive pull data over.window %" PRId64 , winRes.ts);
+
+ void* pFinalCh = taosHashGet(pFinalMap, &winRes, sizeof(SWinKey));
+ if (pFinalCh) {
+ taosHashRemove(pFinalMap, &winRes, sizeof(SWinKey));
+ doDeleteWindow(pOperator, winRes.ts, winRes.groupId);
+ STimeWindow nextWin = getFinalTimeWindow(winRes.ts, pInterval);
+ SPullWindowInfo pull = {.window = nextWin,
+ .groupId = winRes.groupId,
+ .calWin.skey = nextWin.skey,
+ .calWin.ekey = nextWin.skey};
+ // add pull data request
+ if (savePullWindow(&pull, pPullWins) == TSDB_CODE_SUCCESS) {
+ addPullWindow(pMap, &winRes, numOfCh);
+ qDebug("===stream===prepare final retrive for delete %" PRId64 ", size:%d", winRes.ts, numOfCh);
+ }
+ }
}
}
}
@@ -2174,7 +2167,7 @@ void processPullOver(SSDataBlock* pBlock, SHashObj* pMap, SInterval* pInterval)
}
}
-static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) {
+static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo, int32_t childId) {
int32_t size = taosArrayGetSize(wins);
for (int32_t i = 0; i < size; i++) {
SWinKey* winKey = taosArrayGet(wins, i);
@@ -2191,6 +2184,14 @@ static void addRetriveWindow(SArray* wins, SStreamIntervalOperatorInfo* pInfo) {
addPullWindow(pInfo->pPullDataMap, winKey, pInfo->numOfChild);
qDebug("===stream===prepare retrive for delete %" PRId64 ", size:%d", winKey->ts, pInfo->numOfChild);
}
+ } else {
+ SArray* chArray = *(void**)chIds;
+ int32_t index = taosArraySearchIdx(chArray, &childId, compareInt32Val, TD_EQ);
+ qDebug("===stream===check final retrive %" PRId64",chid:%d", winKey->ts, index);
+ if (index == -1) {
+ qDebug("===stream===add final retrive %" PRId64, winKey->ts);
+ taosHashPut(pInfo->pFinalPullDataMap, winKey, sizeof(SWinKey), NULL, 0);
+ }
}
}
}
@@ -2201,13 +2202,15 @@ static void clearFunctionContext(SExprSupp* pSup) {
}
}
-int32_t getOutputBuf(SStreamState* pState, SRowBuffPos* pPos, SResultRow** pResult) {
- return streamStateGetByPos(pState, pPos, (void**)pResult);
+int32_t getOutputBuf(void* pState, SRowBuffPos* pPos, SResultRow** pResult, SStateStore* pStore) {
+ return pStore->streamStateGetByPos(pState, pPos, (void**)pResult);
}
-int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock, SExprSupp* pSup,
+int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, void* pState, SSDataBlock* pBlock, SExprSupp* pSup,
SGroupResInfo* pGroupResInfo) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SExprInfo* pExprInfo = pSup->pExprInfo;
int32_t numOfExprs = pSup->numOfExprs;
int32_t* rowEntryOffset = pSup->rowEntryInfoOffset;
@@ -2218,7 +2221,7 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat
for (int32_t i = pGroupResInfo->index; i < numOfRows; i += 1) {
SRowBuffPos* pPos = *(SRowBuffPos**)taosArrayGet(pGroupResInfo->pRows, i);
SResultRow* pRow = NULL;
- int32_t code = getOutputBuf(pState, pPos, &pRow);
+ int32_t code = getOutputBuf(pState, pPos, &pRow, &pAPI->stateStore);
uint64_t groupId = ((SWinKey*)pPos->pKey)->groupId;
ASSERT(code == 0);
doUpdateNumOfRows(pCtx, pRow, numOfExprs, rowEntryOffset);
@@ -2230,12 +2233,12 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat
if (pBlock->info.id.groupId == 0) {
pBlock->info.id.groupId = groupId;
void* tbname = NULL;
- if (streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname) < 0) {
+ if (pAPI->stateStore.streamStateGetParName(pTaskInfo->streamInfo.pState, pBlock->info.id.groupId, &tbname) < 0) {
pBlock->info.parTbName[0] = 0;
} else {
memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN);
}
- streamFreeVal(tbname);
+ pAPI->stateStore.streamStateFreeVal(tbname);
} else {
// current value belongs to different group, it can't be packed into one datablock
if (pBlock->info.id.groupId != groupId) {
@@ -2282,7 +2285,7 @@ int32_t buildDataBlockFromGroupRes(SOperatorInfo* pOperator, SStreamState* pStat
return TSDB_CODE_SUCCESS;
}
-void doBuildStreamIntervalResult(SOperatorInfo* pOperator, SStreamState* pState, SSDataBlock* pBlock,
+void doBuildStreamIntervalResult(SOperatorInfo* pOperator, void* pState, SSDataBlock* pBlock,
SGroupResInfo* pGroupResInfo) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
// set output datablock version
@@ -2342,7 +2345,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
}
while (1) {
bool isClosed = isCloseWindow(&nextWin, &pInfo->twAggSup);
- if ((pInfo->ignoreExpiredData && isClosed) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
+ if ((pInfo->ignoreExpiredData && isClosed && !IS_FINAL_OP(pInfo)) || !inSlidingWindow(&pInfo->interval, &nextWin, &pSDataBlock->info)) {
startPos = getNexWindowPos(&pInfo->interval, &pSDataBlock->info, tsCols, startPos, nextWin.ekey, &nextWin);
if (startPos < 0) {
break;
@@ -2357,7 +2360,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
.groupId = groupId,
};
void* chIds = taosHashGet(pInfo->pPullDataMap, &winRes, sizeof(SWinKey));
- if (isDeletedStreamWindow(&nextWin, groupId, pInfo->pState, &pInfo->twAggSup) && isClosed && !chIds) {
+ if (isDeletedStreamWindow(&nextWin, groupId, pInfo->pState, &pInfo->twAggSup, &pInfo->statestore) && isClosed && !chIds) {
SPullWindowInfo pull = {
.window = nextWin, .groupId = groupId, .calWin.skey = nextWin.skey, .calWin.ekey = nextWin.skey};
// add pull data request
@@ -2388,7 +2391,7 @@ static void doStreamIntervalAggImpl(SOperatorInfo* pOperatorInfo, SSDataBlock* p
}
int32_t code = setIntervalOutputBuf(pInfo->pState, &nextWin, &pResPos, groupId, pSup->pCtx, numOfOutput,
- pSup->rowEntryInfoOffset, &pInfo->aggSup);
+ pSup->rowEntryInfoOffset, &pInfo->aggSup, &pInfo->statestore);
pResult = (SResultRow*)pResPos->pRowBuff;
if (code != TSDB_CODE_SUCCESS || pResult == NULL) {
T_LONG_JMP(pTaskInfo->env, TSDB_CODE_OUT_OF_MEMORY);
@@ -2467,9 +2470,20 @@ static inline int winPosCmprImpl(const void* pKey1, const void* pKey2) {
return 0;
}
+static void resetUnCloseWinInfo(SSHashObj* winMap) {
+ void* pIte = NULL;
+ int32_t iter = 0;
+ while ((pIte = tSimpleHashIterate(winMap, pIte, &iter)) != NULL) {
+ SRowBuffPos* pPos = *(SRowBuffPos**)pIte;
+ pPos->beUsed = true;
+ }
+}
+
static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SOperatorInfo* downstream = pOperator->pDownstream[0];
SExprSupp* pSup = &pOperator->exprSupp;
@@ -2498,6 +2512,11 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
return pInfo->binfo.pRes;
}
+ if (pInfo->recvGetAll) {
+ pInfo->recvGetAll = false;
+ resetUnCloseWinInfo(pInfo->aggSup.pResultRowHashTable);
+ }
+
setOperatorCompleted(pOperator);
if (!IS_FINAL_OP(pInfo)) {
clearFunctionContext(&pOperator->exprSupp);
@@ -2508,8 +2527,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
} else {
if (pInfo->twAggSup.maxTs > 0 &&
pInfo->twAggSup.maxTs - pInfo->twAggSup.checkPointInterval > pInfo->twAggSup.checkPointTs) {
- streamStateCommit(pInfo->pState);
- streamStateDeleteCheckPoint(pInfo->pState, pInfo->twAggSup.maxTs - pInfo->twAggSup.deleteMark);
+ pAPI->stateStore.streamStateCommit(pInfo->pState);
+ pAPI->stateStore.streamStateDeleteCheckPoint(pInfo->pState, pInfo->twAggSup.maxTs - pInfo->twAggSup.deleteMark);
pInfo->twAggSup.checkPointTs = pInfo->twAggSup.maxTs;
}
qDebug("===stream===interval final close");
@@ -2566,7 +2585,8 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
SArray* delWins = taosArrayInit(8, sizeof(SWinKey));
doDeleteWindows(pOperator, &pInfo->interval, pBlock, delWins, pInfo->pUpdatedMap);
if (IS_FINAL_OP(pInfo)) {
- addRetriveWindow(delWins, pInfo);
+ int32_t chId = getChildIndex(pBlock);
+ addRetriveWindow(delWins, pInfo, chId);
if (pBlock->info.type != STREAM_CLEAR) {
taosArrayAddAll(pInfo->pDelWins, delWins);
}
@@ -2591,6 +2611,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
break;
} else if (pBlock->info.type == STREAM_GET_ALL && IS_FINAL_OP(pInfo)) {
+ pInfo->recvGetAll = true;
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pInfo->pUpdatedMap);
continue;
} else if (pBlock->info.type == STREAM_RETRIEVE && !IS_FINAL_OP(pInfo)) {
@@ -2600,7 +2621,7 @@ static SSDataBlock* doStreamFinalIntervalAgg(SOperatorInfo* pOperator) {
}
continue;
} else if (pBlock->info.type == STREAM_PULL_OVER && IS_FINAL_OP(pInfo)) {
- processPullOver(pBlock, pInfo->pPullDataMap, &pInfo->interval);
+ processPullOver(pBlock, pInfo->pPullDataMap, pInfo->pFinalPullDataMap, &pInfo->interval, pInfo->pPullWins, pInfo->numOfChild, pOperator);
continue;
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
return pBlock;
@@ -2710,6 +2731,8 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
}
pOperator->pTaskInfo = pTaskInfo;
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
+
pInfo->interval = (SInterval){.interval = pIntervalPhyNode->interval,
.sliding = pIntervalPhyNode->sliding,
.intervalUnit = pIntervalPhyNode->intervalUnit,
@@ -2735,7 +2758,7 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
if (pIntervalPhyNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar);
- int32_t code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ int32_t code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -2748,18 +2771,18 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->pState = taosMemoryCalloc(1, sizeof(SStreamState));
*(pInfo->pState) = *(pTaskInfo->streamInfo.pState);
- streamStateSetNumber(pInfo->pState, -1);
+
+ pAPI->stateStore.streamStateSetNumber(pInfo->pState, -1);
int32_t code = initAggSup(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str,
- pInfo->pState);
+ pInfo->pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
initStreamFunciton(pOperator->exprSupp.pCtx, pOperator->exprSupp.numOfExprs);
-
initExecTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pTaskInfo->window);
-
initResultRowInfo(&pInfo->binfo.resultRowInfo);
+
pInfo->numOfChild = numOfChild;
pInfo->pPhyNode = (SPhysiNode*)nodesCloneNode((SNode*)pPhyNode);
@@ -2776,10 +2799,12 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
if (!IS_FINAL_OP(pInfo) || numOfChild == 0) {
pInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
}
+
pInfo->pPullWins = taosArrayInit(8, sizeof(SPullWindowInfo));
pInfo->pullIndex = 0;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pInfo->pPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
+ pInfo->pFinalPullDataMap = taosHashInit(64, hashFn, false, HASH_NO_LOCK);
pInfo->pPullDataRes = createSpecialDataBlock(STREAM_RETRIEVE);
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
pInfo->ignoreExpiredDataSaved = false;
@@ -2792,9 +2817,11 @@ SOperatorInfo* createStreamFinalIntervalOperatorInfo(SOperatorInfo* downstream,
pInfo->pUpdated = NULL;
pInfo->pUpdatedMap = NULL;
int32_t funResSize= getMaxFunResSize(&pOperator->exprSupp, numOfCols);
- pInfo->pState->pFileState = streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize,
+ pInfo->pState->pFileState = pAPI->stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize,
compareTs, pInfo->pState, pInfo->twAggSup.deleteMark);
pInfo->dataVersion = 0;
+ pInfo->statestore = pTaskInfo->storageAPI.stateStore;
+ pInfo->recvGetAll = false;
pOperator->operatorType = pPhyNode->type;
pOperator->blocking = true;
@@ -2851,9 +2878,9 @@ void destroyStreamSessionAggOperatorInfo(void* param) {
}
int32_t initBasicInfoEx(SOptrBasicInfo* pBasicInfo, SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfCols,
- SSDataBlock* pResultBlock) {
+ SSDataBlock* pResultBlock, SFunctionStateStore* pStore) {
initBasicInfo(pBasicInfo, pResultBlock);
- int32_t code = initExprSupp(pSup, pExprInfo, numOfCols);
+ int32_t code = initExprSupp(pSup, pExprInfo, numOfCols, pStore);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@@ -2889,14 +2916,14 @@ void initDownStream(SOperatorInfo* downstream, SStreamAggSupporter* pAggSup, uin
SStreamScanInfo* pScanInfo = downstream->info;
pScanInfo->windowSup = (SWindowSupporter){.pStreamAggSup = pAggSup, .gap = pAggSup->gap, .parentType = type};
pScanInfo->pState = pAggSup->pState;
- if ((!pScanInfo->igCheckUpdate || type == QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE) && !pScanInfo->pUpdateInfo) {
- pScanInfo->pUpdateInfo = updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, pTwSup->waterMark);
+ if (!pScanInfo->pUpdateInfo) {
+ pScanInfo->pUpdateInfo = pAggSup->stateStore.updateInfoInit(60000, TSDB_TIME_PRECISION_MILLI, pTwSup->waterMark);
}
pScanInfo->twAggSup = *pTwSup;
}
int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SqlFunctionCtx* pCtx, int32_t numOfOutput, int64_t gap,
- SStreamState* pState, int32_t keySize, int16_t keyType) {
+ SStreamState* pState, int32_t keySize, int16_t keyType, SStateStore* pStore) {
pSup->resultRowSize = keySize + getResultRowSize(pCtx, numOfOutput);
pSup->pScanBlock = createSpecialDataBlock(STREAM_CLEAR);
pSup->gap = gap;
@@ -2907,10 +2934,12 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SqlFunctionCtx* pCtx,
return TSDB_CODE_OUT_OF_MEMORY;
}
+ pSup->stateStore = *pStore;
+
initDummyFunction(pSup->pDummyCtx, pCtx, numOfOutput);
pSup->pState = taosMemoryCalloc(1, sizeof(SStreamState));
*(pSup->pState) = *pState;
- streamStateSetNumber(pSup->pState, -1);
+ pSup->stateStore.streamStateSetNumber(pSup->pState, -1);
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY);
pSup->pResultRows = tSimpleHashInit(32, hashFn);
@@ -2924,11 +2953,13 @@ int32_t initStreamAggSupporter(SStreamAggSupporter* pSup, SqlFunctionCtx* pCtx,
if (bufSize <= pageSize) {
bufSize = pageSize * 4;
}
+
if (!osTempSpaceAvailable()) {
terrno = TSDB_CODE_NO_DISKSPACE;
qError("Init stream agg supporter failed since %s, tempDir:%s", terrstr(), tsTempDir);
return terrno;
}
+
int32_t code = createDiskbasedBuf(&pSup->pResultBuf, pageSize, bufSize, "function", tsTempDir);
for (int32_t i = 0; i < numOfOutput; ++i) {
pCtx[i].saveHandle.pBuf = pSup->pResultBuf;
@@ -2953,7 +2984,7 @@ void getCurSessionWindow(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endT
pKey->win.skey = startTs;
pKey->win.ekey = endTs;
pKey->groupId = groupId;
- int32_t code = streamStateSessionGetKeyByRange(pAggSup->pState, pKey, pKey);
+ int32_t code = pAggSup->stateStore.streamStateSessionGetKeyByRange(pAggSup->pState, pKey, pKey);
if (code != TSDB_CODE_SUCCESS) {
SET_SESSION_WIN_KEY_INVALID(pKey);
}
@@ -2967,8 +2998,8 @@ void setSessionOutputBuf(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endT
pCurWin->sessionWin.win.skey = startTs;
pCurWin->sessionWin.win.ekey = endTs;
int32_t size = pAggSup->resultRowSize;
- int32_t code =
- streamStateSessionAddIfNotExist(pAggSup->pState, &pCurWin->sessionWin, pAggSup->gap, &pCurWin->pOutputBuf, &size);
+ int32_t code = pAggSup->stateStore.streamStateSessionAddIfNotExist(pAggSup->pState, &pCurWin->sessionWin,
+ pAggSup->gap, &pCurWin->pOutputBuf, &size);
if (code == TSDB_CODE_SUCCESS) {
pCurWin->isOutput = true;
} else {
@@ -2979,11 +3010,12 @@ void setSessionOutputBuf(SStreamAggSupporter* pAggSup, TSKEY startTs, TSKEY endT
int32_t getSessionWinBuf(SStreamAggSupporter* pAggSup, SStreamStateCur* pCur, SResultWindowInfo* pWinInfo) {
int32_t size = 0;
- int32_t code = streamStateSessionGetKVByCur(pCur, &pWinInfo->sessionWin, &pWinInfo->pOutputBuf, &size);
+ int32_t code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &pWinInfo->sessionWin, &pWinInfo->pOutputBuf, &size);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
- streamStateCurNext(pAggSup->pState, pCur);
+
+ pAggSup->stateStore.streamStateCurNext(pAggSup->pState, pCur);
return TSDB_CODE_SUCCESS;
}
void saveDeleteInfo(SArray* pWins, SSessionKey key) {
@@ -3068,7 +3100,7 @@ static int32_t doOneWindowAggImpl(SColumnInfoData* pTimeWindowData, SResultWindo
}
static bool doDeleteSessionWindow(SStreamAggSupporter* pAggSup, SSessionKey* pKey) {
- streamStateSessionDel(pAggSup->pState, pKey);
+ pAggSup->stateStore.streamStateSessionDel(pAggSup->pState, pKey);
SSessionKey hashKey = {0};
getSessionHashKey(pKey, &hashKey);
tSimpleHashRemove(pAggSup->pResultRows, &hashKey, sizeof(SSessionKey));
@@ -3086,12 +3118,12 @@ static int32_t setSessionWinOutputInfo(SSHashObj* pStUpdated, SResultWindowInfo*
SStreamStateCur* getNextSessionWinInfo(SStreamAggSupporter* pAggSup, SSHashObj* pStUpdated, SResultWindowInfo* pCurWin,
SResultWindowInfo* pNextWin) {
- SStreamStateCur* pCur = streamStateSessionSeekKeyNext(pAggSup->pState, &pCurWin->sessionWin);
+ SStreamStateCur* pCur = pAggSup->stateStore.streamStateSessionSeekKeyNext(pAggSup->pState, &pCurWin->sessionWin);
pNextWin->isOutput = true;
setSessionWinOutputInfo(pStUpdated, pNextWin);
int32_t size = 0;
pNextWin->sessionWin = pCurWin->sessionWin;
- int32_t code = streamStateSessionGetKVByCur(pCur, &pNextWin->sessionWin, &pNextWin->pOutputBuf, &size);
+ int32_t code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &pNextWin->sessionWin, &pNextWin->pOutputBuf, &size);
if (code != TSDB_CODE_SUCCESS) {
taosMemoryFreeClear(pNextWin->pOutputBuf);
SET_SESSION_WIN_INVALID(*pNextWin);
@@ -3103,6 +3135,8 @@ static void compactSessionWindow(SOperatorInfo* pOperator, SResultWindowInfo* pC
SSHashObj* pStDeleted) {
SExprSupp* pSup = &pOperator->exprSupp;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
SResultRow* pCurResult = NULL;
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
@@ -3114,7 +3148,7 @@ static void compactSessionWindow(SOperatorInfo* pOperator, SResultWindowInfo* pC
SStreamStateCur* pCur = getNextSessionWinInfo(pAggSup, pStUpdated, pCurWin, &winInfo);
if (!IS_VALID_SESSION_WIN(winInfo) || !isInWindow(pCurWin, winInfo.sessionWin.win.skey, pAggSup->gap)) {
taosMemoryFree(winInfo.pOutputBuf);
- streamStateFreeCur(pCur);
+ pAPI->stateStore.streamStateFreeCur(pCur);
break;
}
SResultRow* pWinResult = NULL;
@@ -3128,13 +3162,13 @@ static void compactSessionWindow(SOperatorInfo* pOperator, SResultWindowInfo* pC
}
removeSessionResult(pStUpdated, pAggSup->pResultRows, winInfo.sessionWin);
doDeleteSessionWindow(pAggSup, &winInfo.sessionWin);
- streamStateFreeCur(pCur);
+ pAPI->stateStore.streamStateFreeCur(pCur);
taosMemoryFree(winInfo.pOutputBuf);
}
}
int32_t saveSessionOutputBuf(SStreamAggSupporter* pAggSup, SResultWindowInfo* pWinInfo) {
- saveSessionDiscBuf(pAggSup->pState, &pWinInfo->sessionWin, pWinInfo->pOutputBuf, pAggSup->resultRowSize);
+ saveSessionDiscBuf(pAggSup->pState, &pWinInfo->sessionWin, pWinInfo->pOutputBuf, pAggSup->resultRowSize, &pAggSup->stateStore);
return TSDB_CODE_SUCCESS;
}
@@ -3254,6 +3288,8 @@ static int32_t copyUpdateResult(SSHashObj* pStUpdated, SArray* pUpdated) {
}
void doBuildDeleteDataBlock(SOperatorInfo* pOp, SSHashObj* pStDeleted, SSDataBlock* pBlock, void** Ite) {
+ SStorageAPI* pAPI = &pOp->pTaskInfo->storageAPI;
+
blockDataCleanup(pBlock);
int32_t size = tSimpleHashGetSize(pStDeleted);
if (size == 0) {
@@ -3282,14 +3318,14 @@ void doBuildDeleteDataBlock(SOperatorInfo* pOp, SSHashObj* pStDeleted, SSDataBlo
SColumnInfoData* pTableCol = taosArrayGet(pBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
void* tbname = NULL;
- streamStateGetParName(pOp->pTaskInfo->streamInfo.pState, res->groupId, &tbname);
+ pAPI->stateStore.streamStateGetParName(pOp->pTaskInfo->streamInfo.pState, res->groupId, &tbname);
if (tbname == NULL) {
colDataSetNULL(pTableCol, pBlock->info.rows);
} else {
char parTbName[VARSTR_HEADER_SIZE + TSDB_TABLE_NAME_LEN];
STR_WITH_MAXSIZE_TO_VARSTR(parTbName, tbname, sizeof(parTbName));
colDataSetVal(pTableCol, pBlock->info.rows, (const char*)parTbName, false);
- streamFreeVal(tbname);
+ pAPI->stateStore.streamStateFreeVal(tbname);
}
pBlock->info.rows += 1;
}
@@ -3301,6 +3337,8 @@ void doBuildDeleteDataBlock(SOperatorInfo* pOp, SSHashObj* pStDeleted, SSDataBlo
static void rebuildSessionWindow(SOperatorInfo* pOperator, SArray* pWinArray, SSHashObj* pStUpdated) {
SExprSupp* pSup = &pOperator->exprSupp;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
int32_t size = taosArrayGetSize(pWinArray);
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
SStreamAggSupporter* pAggSup = &pInfo->streamAggSup;
@@ -3317,7 +3355,7 @@ static void rebuildSessionWindow(SOperatorInfo* pOperator, SArray* pWinArray, SS
SStreamAggSupporter* pChAggSup = &pChInfo->streamAggSup;
SSessionKey chWinKey = {0};
getSessionHashKey(pWinKey, &chWinKey);
- SStreamStateCur* pCur = streamStateSessionSeekKeyCurrentNext(pChAggSup->pState, &chWinKey);
+ SStreamStateCur* pCur = pAggSup->stateStore.streamStateSessionSeekKeyCurrentNext(pChAggSup->pState, &chWinKey);
SResultRow* pResult = NULL;
SResultRow* pChResult = NULL;
while (1) {
@@ -3344,7 +3382,7 @@ static void rebuildSessionWindow(SOperatorInfo* pOperator, SArray* pWinArray, SS
break;
}
}
- streamStateFreeCur(pCur);
+ pAPI->stateStore.streamStateFreeCur(pCur);
}
if (num > 0) {
saveSessionOutputBuf(pAggSup, &parentWin);
@@ -3409,7 +3447,7 @@ void initGroupResInfoFromArrayList(SGroupResInfo* pGroupResInfo, SArray* pArrayL
pGroupResInfo->pBuf = NULL;
}
-void doBuildSessionResult(SOperatorInfo* pOperator, SStreamState* pState, SGroupResInfo* pGroupResInfo,
+void doBuildSessionResult(SOperatorInfo* pOperator, void* pState, SGroupResInfo* pGroupResInfo,
SSDataBlock* pBlock) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
// set output datablock version
@@ -3568,7 +3606,7 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
if (pSessionNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pSessionNode->window.pExprs, NULL, &numOfScalar);
- code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -3577,13 +3615,13 @@ SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPh
SExprInfo* pExprInfo = createExprInfo(pSessionNode->window.pFuncs, NULL, &numOfCols);
SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc);
- code = initBasicInfoEx(&pInfo->binfo, pSup, pExprInfo, numOfCols, pResBlock);
+ code = initBasicInfoEx(&pInfo->binfo, pSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
code = initStreamAggSupporter(&pInfo->streamAggSup, pSup->pCtx, numOfCols, pSessionNode->gap,
- pTaskInfo->streamInfo.pState, 0, 0);
+ pTaskInfo->streamInfo.pState, 0, 0, &pTaskInfo->storageAPI.stateStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -3639,7 +3677,7 @@ _error:
static void clearStreamSessionOperator(SStreamSessionAggOperatorInfo* pInfo) {
tSimpleHashClear(pInfo->streamAggSup.pResultRows);
- streamStateSessionClear(pInfo->streamAggSup.pState);
+ pInfo->streamAggSup.stateStore.streamStateSessionClear(pInfo->streamAggSup.pState);
}
static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) {
@@ -3765,6 +3803,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
goto _error;
}
+ SStorageAPI* pAPI = &pTaskInfo->storageAPI;
SStreamSessionAggOperatorInfo* pInfo = pOperator->info;
pInfo->isFinal = (pPhyNode->type == QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION);
@@ -3789,7 +3828,7 @@ SOperatorInfo* createStreamFinalSessionAggOperatorInfo(SOperatorInfo* downstream
}
SStreamSessionAggOperatorInfo* pChInfo = pChildOp->info;
pChInfo->twAggSup.calTrigger = STREAM_TRIGGER_AT_ONCE;
- streamStateSetNumber(pChInfo->streamAggSup.pState, i);
+ pAPI->stateStore.streamStateSetNumber(pChInfo->streamAggSup.pState, i);
taosArrayPush(pInfo->pChildren, &pChildOp);
}
}
@@ -3852,7 +3891,7 @@ void setStateOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId,
pCurWin->winInfo.sessionWin.win.skey = ts;
pCurWin->winInfo.sessionWin.win.ekey = ts;
int32_t code =
- streamStateStateAddIfNotExist(pAggSup->pState, &pCurWin->winInfo.sessionWin, pKeyData, pAggSup->stateKeySize,
+ pAggSup->stateStore.streamStateStateAddIfNotExist(pAggSup->pState, &pCurWin->winInfo.sessionWin, pKeyData, pAggSup->stateKeySize,
compareStateKey, &pCurWin->winInfo.pOutputBuf, &size);
pCurWin->pStateKey =
(SStateKeys*)((char*)pCurWin->winInfo.pOutputBuf + (pAggSup->resultRowSize - pAggSup->stateKeySize));
@@ -3873,12 +3912,12 @@ void setStateOutputBuf(SStreamAggSupporter* pAggSup, TSKEY ts, uint64_t groupId,
pNextWin->winInfo.sessionWin = pCurWin->winInfo.sessionWin;
pNextWin->winInfo.pOutputBuf = NULL;
- SStreamStateCur* pCur = streamStateSessionSeekKeyNext(pAggSup->pState, &pCurWin->winInfo.sessionWin);
- code = streamStateSessionGetKVByCur(pCur, &pNextWin->winInfo.sessionWin, NULL, 0);
+ SStreamStateCur* pCur = pAggSup->stateStore.streamStateSessionSeekKeyNext(pAggSup->pState, &pCurWin->winInfo.sessionWin);
+ code = pAggSup->stateStore.streamStateSessionGetKVByCur(pCur, &pNextWin->winInfo.sessionWin, NULL, 0);
if (code != TSDB_CODE_SUCCESS) {
SET_SESSION_WIN_INVALID(pNextWin->winInfo);
}
- streamStateFreeCur(pCur);
+ pAggSup->stateStore.streamStateFreeCur(pCur);
}
int32_t updateStateWindowInfo(SStateWindowInfo* pWinInfo, SStateWindowInfo* pNextWin, TSKEY* pTs, uint64_t groupId,
@@ -3917,7 +3956,9 @@ int32_t updateStateWindowInfo(SStateWindowInfo* pWinInfo, SStateWindowInfo* pNex
static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBlock, SSHashObj* pSeUpdated,
SSHashObj* pStDeleted) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
SStreamStateAggOperatorInfo* pInfo = pOperator->info;
int32_t numOfOutput = pOperator->exprSupp.numOfExprs;
uint64_t groupId = pSDataBlock->info.id.groupId;
@@ -3959,7 +4000,7 @@ static void doStreamStateAggImpl(SOperatorInfo* pOperator, SSDataBlock* pSDataBl
&curWin.winInfo.sessionWin.win.ekey, &uid, &groupId, NULL);
tSimpleHashRemove(pSeUpdated, &curWin.winInfo.sessionWin, sizeof(SSessionKey));
doDeleteSessionWindow(pAggSup, &curWin.winInfo.sessionWin);
- releaseOutputBuf(pAggSup->pState, NULL, (SResultRow*)curWin.winInfo.pOutputBuf);
+ releaseOutputBuf(pAggSup->pState, NULL, (SResultRow*)curWin.winInfo.pOutputBuf, &pAPI->stateStore);
continue;
}
code = doOneWindowAggImpl(&pInfo->twAggSup.timeWindowData, &curWin.winInfo, &pResult, i, winRows, rows, numOfOutput,
@@ -4103,7 +4144,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
if (pStateNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pStateNode->window.pExprs, NULL, &numOfScalar);
- code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -4122,14 +4163,14 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys
int32_t numOfCols = 0;
SExprInfo* pExprInfo = createExprInfo(pStateNode->window.pFuncs, NULL, &numOfCols);
SSDataBlock* pResBlock = createDataBlockFromDescNode(pPhyNode->pOutputDataBlockDesc);
- code = initBasicInfoEx(&pInfo->binfo, pSup, pExprInfo, numOfCols, pResBlock);
+ code = initBasicInfoEx(&pInfo->binfo, pSup, pExprInfo, numOfCols, pResBlock, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
int32_t keySize = sizeof(SStateKeys) + pColNode->node.resType.bytes;
int16_t type = pColNode->node.resType.type;
code = initStreamAggSupporter(&pInfo->streamAggSup, pSup->pCtx, numOfCols, 0, pTaskInfo->streamInfo.pState, keySize,
- type);
+ type, &pTaskInfo->storageAPI.stateStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -4410,7 +4451,7 @@ SOperatorInfo* createMergeAlignedIntervalOperatorInfo(SOperatorInfo* downstream,
SExprInfo* pExprInfo = createExprInfo(pNode->window.pFuncs, NULL, &num);
code = initAggSup(&pOperator->exprSupp, &iaInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -4696,7 +4737,7 @@ SOperatorInfo* createMergeIntervalOperatorInfo(SOperatorInfo* downstream, SMerge
initResultSizeInfo(&pOperator->resultInfo, 4096);
int32_t code = initAggSup(pExprSupp, &pIntervalInfo->aggSup, pExprInfo, num, keyBufSize, pTaskInfo->id.str,
- pTaskInfo->streamInfo.pState);
+ pTaskInfo->streamInfo.pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -4739,7 +4780,9 @@ _error:
static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
SStreamIntervalOperatorInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- SExprSupp* pSup = &pOperator->exprSupp;
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
+ SExprSupp* pSup = &pOperator->exprSupp;
if (pOperator->status == OP_EXEC_DONE) {
return NULL;
@@ -4757,11 +4800,17 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
printDataBlock(pInfo->binfo.pRes, "single interval");
return pInfo->binfo.pRes;
}
+
+ if (pInfo->recvGetAll) {
+ pInfo->recvGetAll = false;
+ resetUnCloseWinInfo(pInfo->aggSup.pResultRowHashTable);
+ }
+
setOperatorCompleted(pOperator);
if (pInfo->twAggSup.maxTs > 0 &&
pInfo->twAggSup.maxTs - pInfo->twAggSup.checkPointInterval > pInfo->twAggSup.checkPointTs) {
- streamStateCommit(pInfo->pState);
- streamStateDeleteCheckPoint(pInfo->pState, pInfo->twAggSup.maxTs - pInfo->twAggSup.deleteMark);
+ pAPI->stateStore.streamStateCommit(pInfo->pState);
+ pAPI->stateStore.streamStateDeleteCheckPoint(pInfo->pState, pInfo->twAggSup.maxTs - pInfo->twAggSup.deleteMark);
setStreamDataVersion(pTaskInfo, pInfo->dataVersion, pInfo->pState->checkPointId);
pInfo->twAggSup.checkPointTs = pInfo->twAggSup.maxTs;
}
@@ -4796,6 +4845,7 @@ static SSDataBlock* doStreamIntervalAgg(SOperatorInfo* pOperator) {
continue;
} else if (pBlock->info.type == STREAM_GET_ALL) {
qDebug("===stream===single interval recv|block type STREAM_GET_ALL");
+ pInfo->recvGetAll = true;
getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pInfo->pUpdatedMap);
continue;
} else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
@@ -4903,6 +4953,8 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
ASSERTS(pInfo->twAggSup.calTrigger != STREAM_TRIGGER_MAX_DELAY, "trigger type should not be max delay");
pOperator->pTaskInfo = pTaskInfo;
+ SStorageAPI* pAPI = &pOperator->pTaskInfo->storageAPI;
+
pInfo->ignoreExpiredData = pIntervalPhyNode->window.igExpired;
pInfo->ignoreExpiredDataSaved = false;
pInfo->isFinal = false;
@@ -4917,11 +4969,11 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->pState = taosMemoryCalloc(1, sizeof(SStreamState));
*(pInfo->pState) = *(pTaskInfo->streamInfo.pState);
- streamStateSetNumber(pInfo->pState, -1);
+ pAPI->stateStore.streamStateSetNumber(pInfo->pState, -1);
size_t keyBufSize = sizeof(int64_t) + sizeof(int64_t) + POINTER_BYTES;
code = initAggSup(pSup, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str,
- pInfo->pState);
+ pInfo->pState, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -4929,7 +4981,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
if (pIntervalPhyNode->window.pExprs != NULL) {
int32_t numOfScalar = 0;
SExprInfo* pScalarExprInfo = createExprInfo(pIntervalPhyNode->window.pExprs, NULL, &numOfScalar);
- code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar);
+ code = initExprSupp(&pInfo->scalarSupp, pScalarExprInfo, numOfScalar, &pTaskInfo->storageAPI.functionStore);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@@ -4944,6 +4996,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->pPhyNode = NULL; // create new child
pInfo->pPullDataMap = NULL;
+ pInfo->pFinalPullDataMap = NULL;
pInfo->pPullWins = NULL; // SPullWindowInfo
pInfo->pullIndex = 0;
pInfo->pPullDataRes = NULL;
@@ -4955,7 +5008,7 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pInfo->pUpdated = NULL;
pInfo->pUpdatedMap = NULL;
int32_t funResSize= getMaxFunResSize(pSup, numOfCols);
- pInfo->pState->pFileState = streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize,
+ pInfo->pState->pFileState = pTaskInfo->storageAPI.stateStore.streamFileStateInit(tsStreamBufferSize, sizeof(SWinKey), pInfo->aggSup.resultRowSize, funResSize,
compareTs, pInfo->pState, pInfo->twAggSup.deleteMark);
setOperatorInfo(pOperator, "StreamIntervalOperator", QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL, true, OP_NOT_OPENED,
@@ -4963,6 +5016,9 @@ SOperatorInfo* createStreamIntervalOperatorInfo(SOperatorInfo* downstream, SPhys
pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doStreamIntervalAgg, NULL,
destroyStreamFinalIntervalOperatorInfo, optrDefaultBufFn, NULL);
+ pInfo->statestore = pTaskInfo->storageAPI.stateStore;
+ pInfo->recvGetAll = false;
+
initIntervalDownStream(downstream, pPhyNode->type, pInfo);
code = appendDownstream(pOperator, &downstream, 1);
if (code != TSDB_CODE_SUCCESS) {
diff --git a/source/libs/executor/test/timewindowTest.cpp b/source/libs/executor/test/timewindowTest.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2894c665870e79dc93d01565f7fffe87d8552c86
--- /dev/null
+++ b/source/libs/executor/test/timewindowTest.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include
+#include
+#include "taos.h"
+#include "thash.h"
+#include "tsimplehash.h"
+#include "executor.h"
+#include "ttime.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wwrite-strings"
+#pragma GCC diagnostic ignored "-Wunused-function"
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#pragma GCC diagnostic ignored "-Wsign-compare"
+
+namespace {
+SInterval createInterval(int64_t interval, int64_t sliding, int64_t offset, char intervalUnit, char slidingUnit,
+ char offsetUnit, int8_t precision) {
+ SInterval v = {0};
+ v.interval = interval;
+ v.intervalUnit = intervalUnit;
+ v.sliding = sliding;
+ v.slidingUnit = slidingUnit;
+ v.offset = offset;
+ v.offsetUnit = offsetUnit;
+ v.precision = precision;
+ return v;
+}
+
+void printTimeWindow(STimeWindow* pWindow, int8_t precision, int64_t ts) {
+ char buf[64] = {0};
+ char bufs[64] = {0};
+ char bufe[64] = {0};
+
+ taosFormatUtcTime(buf, tListLen(buf), ts, precision);
+
+ taosFormatUtcTime(bufs, tListLen(bufs), pWindow->skey, precision);
+ taosFormatUtcTime(bufe, tListLen(bufe), pWindow->ekey, precision);
+
+ printf("%s [%s - %s]\n", buf, bufs, bufe);
+}
+} // namespace
+
+TEST(testCase, timewindow_gen) {
+ // set correct time zone
+ osSetTimezone("UTC");
+ int32_t precision = TSDB_TIME_PRECISION_MILLI;
+
+ SInterval interval =
+ createInterval(10 * 86400 * 1000, 10 * 86400 * 1000, 0, 'd', 'd', 'd', precision);
+
+ int64_t key = 1659312000L * 1000; // 2022-8-1 00:00:00 // UTC+8 (ms)
+
+ STimeWindow w = {0};
+ getInitialStartTimeWindow(&interval, key, &w, true);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&interval, &w, TSDB_ORDER_ASC);
+ printf("next\n");
+ printTimeWindow(&w, precision, key);
+
+ printf("---------------------------------------------------\n");
+ SInterval monthInterval =
+ createInterval(1, 1, 0, 'n', 'n', 'd', TSDB_TIME_PRECISION_MILLI);
+ getInitialStartTimeWindow(&monthInterval, key, &w, true);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&monthInterval, &w, TSDB_ORDER_ASC);
+ printf("next\n");
+ printTimeWindow(&w, precision, key);
+
+ printf("----------------------------------------------------------\n");
+ SInterval slidingInterval = createInterval(1, 10*86400*1000, 0, 'n', 'd', 'd', TSDB_TIME_PRECISION_MILLI);
+ getInitialStartTimeWindow(&slidingInterval, key, &w, true);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&slidingInterval, &w, TSDB_ORDER_ASC);
+ printf("next\n");
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&slidingInterval, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&slidingInterval, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&slidingInterval, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&slidingInterval, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&slidingInterval, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&slidingInterval, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&slidingInterval, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ printf("-----------------calendar_interval_1n_sliding_1d-------\n");
+ SInterval calendar_interval_1n = createInterval(1, 1*86400*1000, 0, 'n', 'd', 'd', TSDB_TIME_PRECISION_MILLI);
+ int64_t k1 = 1664409600 * 1000L;
+ getInitialStartTimeWindow(&calendar_interval_1n, k1, &w, true);
+ printTimeWindow(&w, precision, k1);
+
+ printf("next\n");
+
+ getNextTimeWindow(&calendar_interval_1n, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&calendar_interval_1n, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&calendar_interval_1n, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ getNextTimeWindow(&calendar_interval_1n, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, key);
+
+ printf("----------------interval_1d_clendar_sliding_1n---------\n");
+ SInterval interval_1d_calendar_sliding_1n = createInterval(1*86400*1000L, 1, 0, 'd', 'n', 'd', TSDB_TIME_PRECISION_MILLI);
+
+ k1 = 1664409600 * 1000L;
+ getInitialStartTimeWindow(&interval_1d_calendar_sliding_1n, k1, &w, true);
+ printTimeWindow(&w, precision, k1);
+
+ printf("next time window:\n");
+ getNextTimeWindow(&interval_1d_calendar_sliding_1n, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, k1);
+
+ getNextTimeWindow(&interval_1d_calendar_sliding_1n, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, k1);
+
+ getNextTimeWindow(&interval_1d_calendar_sliding_1n, &w, TSDB_ORDER_ASC);
+ printTimeWindow(&w, precision, k1);
+
+ printf("----------------interval_1d_sliding_1d_calendar_offset_1n---------\n");
+ SInterval offset_1n = createInterval(10*86400*1000L, 10*86400*1000L, 1, 'd', 'd', 'n', TSDB_TIME_PRECISION_MILLI);
+ getInitialStartTimeWindow(&offset_1n, k1, &w, true);
+ printTimeWindow(&w, precision, k1);
+
+
+}
+
+#pragma GCC diagnostic pop
\ No newline at end of file
diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt
index 3e1e84bdc5a5f511745fc573c4c3dc7ed37a1155..3a68648d493358c986b4fd52c736a85155c9d38b 100644
--- a/source/libs/function/CMakeLists.txt
+++ b/source/libs/function/CMakeLists.txt
@@ -34,7 +34,6 @@ target_link_libraries(
PRIVATE scalar
PRIVATE geometry
PRIVATE transport
- PRIVATE stream ${LINK_JEMALLOC}
PUBLIC uv_a
)
diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c
index 1a43802e6b4677b8fc6879608b437b7d84a9668b..1265c64c8c1d832d1ce2916ec923eb6db44460d0 100644
--- a/source/libs/function/src/builtinsimpl.c
+++ b/source/libs/function/src/builtinsimpl.c
@@ -18,7 +18,6 @@
#include "function.h"
#include "query.h"
#include "querynodes.h"
-#include "streamState.h"
#include "tcompare.h"
#include "tdatablock.h"
#include "tdigest.h"
@@ -500,7 +499,7 @@ static int64_t getNumOfElems(SqlFunctionCtx* pCtx) {
*/
SInputColumnInfoData* pInput = &pCtx->input;
SColumnInfoData* pInputCol = pInput->pData[0];
- if (pInput->colDataSMAIsSet && pInput->totalRows == pInput->numOfRows && !IS_VAR_DATA_TYPE(pInputCol->info.type)) {
+ if (pInput->colDataSMAIsSet && pInput->totalRows == pInput->numOfRows) {
numOfElem = pInput->numOfRows - pInput->pColumnDataAgg[0]->numOfNull;
} else {
if (pInputCol->hasNull) {
@@ -1697,7 +1696,7 @@ int32_t percentileFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
GET_TYPED_DATA(v, double, pVal->nType, &pVal->i);
- int32_t code = getPercentile(pMemBucket, v, &ppInfo->result);
+ code = getPercentile(pMemBucket, v, &ppInfo->result);
if (code != TSDB_CODE_SUCCESS) {
goto _fin_error;
}
@@ -3120,7 +3119,7 @@ void* serializeTupleData(const SSDataBlock* pSrcBlock, int32_t rowIndex, SSubsid
}
static int32_t doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, SWinKey* key,
- STuplePos* pPos) {
+ STuplePos* pPos, SFunctionStateStore* pStore) {
STuplePos p = {0};
if (pHandle->pBuf != NULL) {
SFilePage* pPage = NULL;
@@ -3153,9 +3152,8 @@ static int32_t doSaveTupleData(SSerializeDataHandle* pHandle, const void* pBuf,
pPage->num += length;
setBufPageDirty(pPage, true);
releaseBufPage(pHandle->pBuf, pPage);
- } else {
- // other tuple save policy
- if (streamStateFuncPut(pHandle->pState, key, pBuf, length) >= 0) {
+ } else { // other tuple save policy
+ if (pStore->streamStateFuncPut(pHandle->pState, key, pBuf, length) >= 0) {
p.streamTupleKey = *key;
}
}
@@ -3179,10 +3177,10 @@ int32_t saveTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBlock*
}
char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
- return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, &key, pPos);
+ return doSaveTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, &key, pPos, pCtx->pStore);
}
-static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos) {
+static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf, size_t length, STuplePos* pPos, SFunctionStateStore* pStore) {
if (pHandle->pBuf != NULL) {
SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
if (pPage == NULL) {
@@ -3192,7 +3190,7 @@ static int32_t doUpdateTupleData(SSerializeDataHandle* pHandle, const void* pBuf
setBufPageDirty(pPage, true);
releaseBufPage(pHandle->pBuf, pPage);
} else {
- streamStateFuncPut(pHandle->pState, &pPos->streamTupleKey, pBuf, length);
+ pStore->streamStateFuncPut(pHandle->pState, &pPos->streamTupleKey, pBuf, length);
}
return TSDB_CODE_SUCCESS;
@@ -3202,10 +3200,10 @@ int32_t updateTupleData(SqlFunctionCtx* pCtx, int32_t rowIndex, const SSDataBloc
prepareBuf(pCtx);
char* buf = serializeTupleData(pSrcBlock, rowIndex, &pCtx->subsidiaries, pCtx->subsidiaries.buf);
- return doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos);
+ return doUpdateTupleData(&pCtx->saveHandle, buf, pCtx->subsidiaries.rowLen, pPos, pCtx->pStore);
}
-static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos) {
+static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPos, SFunctionStateStore* pStore) {
if (pHandle->pBuf != NULL) {
SFilePage* pPage = getBufPage(pHandle->pBuf, pPos->pageId);
if (pPage == NULL) {
@@ -3217,13 +3215,13 @@ static char* doLoadTupleData(SSerializeDataHandle* pHandle, const STuplePos* pPo
} else {
void* value = NULL;
int32_t vLen;
- streamStateFuncGet(pHandle->pState, &pPos->streamTupleKey, &value, &vLen);
+ pStore->streamStateFuncGet(pHandle->pState, &pPos->streamTupleKey, &value, &vLen);
return (char*)value;
}
}
const char* loadTupleData(SqlFunctionCtx* pCtx, const STuplePos* pPos) {
- return doLoadTupleData(&pCtx->saveHandle, pPos);
+ return doLoadTupleData(&pCtx->saveHandle, pPos, pCtx->pStore);
}
int32_t topBotFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
@@ -4991,7 +4989,7 @@ static int32_t saveModeTupleData(SqlFunctionCtx* pCtx, char* data, SModeInfo *pI
memcpy(pInfo->buf, data, pInfo->colBytes);
}
- return doSaveTupleData(&pCtx->saveHandle, pInfo->buf, pInfo->colBytes, NULL, pPos);
+ return doSaveTupleData(&pCtx->saveHandle, pInfo->buf, pInfo->colBytes, NULL, pPos, pCtx->pStore);
}
static int32_t doModeAdd(SModeInfo* pInfo, int32_t rowIndex, SqlFunctionCtx* pCtx, char* data) {
@@ -5020,7 +5018,7 @@ static int32_t doModeAdd(SModeInfo* pInfo, int32_t rowIndex, SqlFunctionCtx* pCt
} else {
pHashItem->count += 1;
if (pCtx->subsidiaries.num > 0) {
- int32_t code = updateTupleData(pCtx, rowIndex, pCtx->pSrcBlock, &pHashItem->tuplePos);
+ code = updateTupleData(pCtx, rowIndex, pCtx->pSrcBlock, &pHashItem->tuplePos);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index 6b70422ac8fe20bf8089eb8efba11ecc3c5e6ae5..31a7dfdbc530fb4e2e5742f725e23335ae18eb9f 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -791,7 +791,21 @@ int32_t convertDataBlockToUdfDataBlock(SSDataBlock *block, SUdfDataBlock *udfBlo
memcpy(udfCol->colData.varLenCol.varOffsets, col->varmeta.offset, udfCol->colData.varLenCol.varOffsetsLen);
udfCol->colData.varLenCol.payloadLen = colDataGetLength(col, udfBlock->numOfRows);
udfCol->colData.varLenCol.payload = taosMemoryMalloc(udfCol->colData.varLenCol.payloadLen);
- memcpy(udfCol->colData.varLenCol.payload, col->pData, udfCol->colData.varLenCol.payloadLen);
+ if (col->reassigned) {
+ for (int32_t row = 0; row < udfCol->colData.numOfRows; ++row) {
+ char* pColData = col->pData + col->varmeta.offset[row];
+ int32_t colSize = 0;
+ if (col->info.type == TSDB_DATA_TYPE_JSON) {
+ colSize = getJsonValueLen(pColData);
+ } else {
+ colSize = varDataTLen(pColData);
+ }
+ memcpy(udfCol->colData.varLenCol.payload, pColData, colSize);
+ udfCol->colData.varLenCol.payload += colSize;
+ }
+ } else {
+ memcpy(udfCol->colData.varLenCol.payload, col->pData, udfCol->colData.varLenCol.payloadLen);
+ }
} else {
udfCol->colData.fixLenCol.nullBitmapLen = BitmapLen(udfCol->colData.numOfRows);
int32_t bitmapLen = udfCol->colData.fixLenCol.nullBitmapLen;
diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c
index b6ab7bd6f1c54c0b7cafebf5fa81eb602494633d..3b827a2f99a446b11236f53acda1fb4da0592e88 100644
--- a/source/libs/function/src/udfd.c
+++ b/source/libs/function/src/udfd.c
@@ -600,9 +600,9 @@ SUdf *udfdGetOrCreateUdf(const char *udfName) {
return udf;
} else {
(*pUdfHash)->expired = true;
- taosHashRemove(global.udfsHash, udfName, strlen(udfName));
fnInfo("udfd expired, check for new version. existing udf %s udf version %d, udf created time %" PRIx64,
(*pUdfHash)->name, (*pUdfHash)->version, (*pUdfHash)->createdTime);
+ taosHashRemove(global.udfsHash, udfName, strlen(udfName));
}
}
diff --git a/source/libs/index/CMakeLists.txt b/source/libs/index/CMakeLists.txt
index 0c2ce37c4030dbb018eb7871f396be60aa0fc076..6f3f48610c866f02b1d02f88bfc22d040a88a575 100644
--- a/source/libs/index/CMakeLists.txt
+++ b/source/libs/index/CMakeLists.txt
@@ -12,7 +12,6 @@ target_link_libraries(
PUBLIC os
PUBLIC util
PUBLIC common
- PUBLIC vnode
PUBLIC nodes
PUBLIC scalar
PUBLIC function
diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c
index 02ed0d2d05270511244b329c3e05c1b8a311035a..2c12c840816266882ea7ccb5ab8a95b84be414ab 100644
--- a/source/libs/index/src/indexFilter.c
+++ b/source/libs/index/src/indexFilter.c
@@ -13,6 +13,7 @@
* along with this program. If not, see .
*/
+#include "filter.h"
#include "index.h"
#include "indexComm.h"
#include "indexInt.h"
@@ -20,7 +21,6 @@
#include "querynodes.h"
#include "scalar.h"
#include "tdatablock.h"
-#include "vnode.h"
// clang-format off
#define SIF_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
@@ -67,9 +67,8 @@ typedef union {
typedef struct SIFParam {
SHashObj *pFilter;
-
- SArray *result;
- char *condValue;
+ SArray *result;
+ char *condValue;
SIdxFltStatus status;
uint8_t colValType;
@@ -79,6 +78,7 @@ typedef struct SIFParam {
char colName[TSDB_COL_NAME_LEN * 2 + 4];
SIndexMetaArg arg;
+ SMetaDataFilterAPI api;
} SIFParam;
typedef struct SIFCtx {
@@ -86,6 +86,7 @@ typedef struct SIFCtx {
SHashObj *pRes; /* element is SIFParam */
bool noExec; // true: just iterate condition tree, and add hint to executor plan
SIndexMetaArg arg;
+ SMetaDataFilterAPI *pAPI;
} SIFCtx;
static FORCE_INLINE int32_t sifGetFuncFromSql(EOperatorType src, EIndexQueryType *dst) {
@@ -288,6 +289,8 @@ static int32_t sifInitParamValByCol(SNode *r, SNode *l, SIFParam *param, SIFCtx
}
static int32_t sifInitParam(SNode *node, SIFParam *param, SIFCtx *ctx) {
param->status = SFLT_COARSE_INDEX;
+ param->api = *ctx->pAPI;
+
switch (nodeType(node)) {
case QUERY_NODE_VALUE: {
SValueNode *vn = (SValueNode *)node;
@@ -364,6 +367,7 @@ static int32_t sifInitOperParams(SIFParam **params, SOperatorNode *node, SIFCtx
SIF_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
}
+
SIFParam *paramList = taosMemoryCalloc(nParam, sizeof(SIFParam));
if (NULL == paramList) {
@@ -659,7 +663,7 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP
} else {
if (sifSetFltParam(left, right, &typedata, ¶m) != 0) return -1;
}
- ret = metaFilterTableIds(arg->metaEx, ¶m, output->result);
+ ret = left->api.metaFilterTableIds(arg->metaEx, ¶m, output->result);
}
return ret;
}
@@ -972,8 +976,9 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
if (pNode == NULL || pDst == NULL) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
+
int32_t code = 0;
- SIFCtx ctx = {.code = 0, .noExec = false, .arg = pDst->arg};
+ SIFCtx ctx = {.code = 0, .noExec = false, .arg = pDst->arg, .pAPI = &pDst->api};
ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
if (NULL == ctx.pRes) {
@@ -1006,13 +1011,13 @@ static int32_t sifCalculate(SNode *pNode, SIFParam *pDst) {
return code;
}
-static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) {
+static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status, SMetaDataFilterAPI* pAPI) {
int32_t code = TSDB_CODE_SUCCESS;
if (pNode == NULL) {
return TSDB_CODE_QRY_INVALID_INPUT;
}
- SIFCtx ctx = {.code = 0, .noExec = true};
+ SIFCtx ctx = {.code = 0, .noExec = true, .pAPI = pAPI};
ctx.pRes = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
if (NULL == ctx.pRes) {
indexError("index-filter failed to taosHashInit");
@@ -1044,8 +1049,8 @@ static int32_t sifGetFltHint(SNode *pNode, SIdxFltStatus *status) {
return code;
}
-int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result, SIdxFltStatus *status) {
- SIdxFltStatus st = idxGetFltStatus(pFilterNode);
+int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result, SIdxFltStatus *status, SMetaDataFilterAPI* pAPI) {
+ SIdxFltStatus st = idxGetFltStatus(pFilterNode, pAPI);
if (st == SFLT_NOT_INDEX) {
*status = st;
return 0;
@@ -1054,7 +1059,7 @@ int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result,
SFilterInfo *filter = NULL;
SArray *output = taosArrayInit(8, sizeof(uint64_t));
- SIFParam param = {.arg = *metaArg, .result = output, .status = SFLT_NOT_INDEX};
+ SIFParam param = {.arg = *metaArg, .result = output, .status = SFLT_NOT_INDEX, .api = *pAPI};
int32_t code = sifCalculate((SNode *)pFilterNode, ¶m);
if (code != 0) {
sifFreeParam(¶m);
@@ -1071,13 +1076,13 @@ int32_t doFilterTag(SNode *pFilterNode, SIndexMetaArg *metaArg, SArray *result,
return TSDB_CODE_SUCCESS;
}
-SIdxFltStatus idxGetFltStatus(SNode *pFilterNode) {
+SIdxFltStatus idxGetFltStatus(SNode *pFilterNode, SMetaDataFilterAPI* pAPI) {
SIdxFltStatus st = SFLT_NOT_INDEX;
if (pFilterNode == NULL) {
return SFLT_NOT_INDEX;
}
- if (sifGetFltHint((SNode *)pFilterNode, &st) != TSDB_CODE_SUCCESS) {
+ if (sifGetFltHint((SNode *)pFilterNode, &st, pAPI) != TSDB_CODE_SUCCESS) {
st = SFLT_NOT_INDEX;
}
return st;
diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c
index 2115044228338df51ce82555895aa5dce2846b36..a8e4f692abcfbebd42390b2d8b41e6aaf7055f4c 100644
--- a/source/libs/nodes/src/nodesCloneFuncs.c
+++ b/source/libs/nodes/src/nodesCloneFuncs.c
@@ -402,6 +402,7 @@ static int32_t logicJoinCopy(const SJoinLogicNode* pSrc, SJoinLogicNode* pDst) {
COPY_SCALAR_FIELD(joinType);
CLONE_NODE_FIELD(pMergeCondition);
CLONE_NODE_FIELD(pOnConditions);
+ CLONE_NODE_FIELD(pColEqualOnConditions);
COPY_SCALAR_FIELD(isSingleTableJoin);
COPY_SCALAR_FIELD(inputTsOrder);
return TSDB_CODE_SUCCESS;
@@ -589,7 +590,7 @@ static int32_t physiSysTableScanCopy(const SSystemTableScanPhysiNode* pSrc, SSys
return TSDB_CODE_SUCCESS;
}
-static int32_t physiWindowCopy(const SWinodwPhysiNode* pSrc, SWinodwPhysiNode* pDst) {
+static int32_t physiWindowCopy(const SWindowPhysiNode* pSrc, SWindowPhysiNode* pDst) {
COPY_BASE_OBJECT_FIELD(node, physiNodeCopy);
CLONE_NODE_LIST_FIELD(pExprs);
CLONE_NODE_LIST_FIELD(pFuncs);
@@ -668,7 +669,7 @@ static int32_t selectStmtCopy(const SSelectStmt* pSrc, SSelectStmt* pDst) {
COPY_CHAR_ARRAY_FIELD(stmtName);
COPY_SCALAR_FIELD(precision);
COPY_SCALAR_FIELD(isEmptyResult);
- COPY_SCALAR_FIELD(isTimeLineResult);
+ COPY_SCALAR_FIELD(timeLineResMode);
COPY_SCALAR_FIELD(hasAggFuncs);
COPY_SCALAR_FIELD(hasRepeatScanFuncs);
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c
index a6d808608d13086382cb2c51653891271d8a7761..6bf1ad01a8d3b9aef17257658fcb4c9d749305ea 100644
--- a/source/libs/nodes/src/nodesCodeFuncs.c
+++ b/source/libs/nodes/src/nodesCodeFuncs.c
@@ -1428,6 +1428,7 @@ static int32_t jsonToLogicPlan(const SJson* pJson, void* pObj) {
static const char* jkJoinLogicPlanJoinType = "JoinType";
static const char* jkJoinLogicPlanOnConditions = "OnConditions";
static const char* jkJoinLogicPlanMergeCondition = "MergeConditions";
+static const char* jkJoinLogicPlanColEqualOnConditions = "ColumnEqualOnConditions";
static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) {
const SJoinLogicNode* pNode = (const SJoinLogicNode*)pObj;
@@ -1442,7 +1443,9 @@ static int32_t logicJoinNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkJoinLogicPlanOnConditions, nodeToJson, pNode->pOnConditions);
}
-
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkJoinLogicPlanColEqualOnConditions, nodeToJson, pNode->pColEqualOnConditions);
+ }
return code;
}
@@ -1459,7 +1462,9 @@ static int32_t jsonToLogicJoinNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkJoinLogicPlanOnConditions, &pNode->pOnConditions);
}
-
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkJoinLogicPlanColEqualOnConditions, &pNode->pColEqualOnConditions);
+ }
return code;
}
@@ -1890,6 +1895,7 @@ static const char* jkJoinPhysiPlanInputTsOrder = "InputTsOrder";
static const char* jkJoinPhysiPlanMergeCondition = "MergeCondition";
static const char* jkJoinPhysiPlanOnConditions = "OnConditions";
static const char* jkJoinPhysiPlanTargets = "Targets";
+static const char* jkJoinPhysiPlanColEqualOnConditions = "ColumnEqualOnConditions";
static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) {
const SSortMergeJoinPhysiNode* pNode = (const SSortMergeJoinPhysiNode*)pObj;
@@ -1910,7 +1916,9 @@ static int32_t physiJoinNodeToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = nodeListToJson(pJson, jkJoinPhysiPlanTargets, pNode->pTargets);
}
-
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tjsonAddObject(pJson, jkJoinPhysiPlanColEqualOnConditions, nodeToJson, pNode->pColEqualOnConditions);
+ }
return code;
}
@@ -1933,7 +1941,9 @@ static int32_t jsonToPhysiJoinNode(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeList(pJson, jkJoinPhysiPlanTargets, &pNode->pTargets);
}
-
+ if (TSDB_CODE_SUCCESS == code) {
+ code = jsonToNodeObject(pJson, jkJoinPhysiPlanColEqualOnConditions, &pNode->pColEqualOnConditions);
+ }
return code;
}
@@ -2144,7 +2154,7 @@ static const char* jkWindowPhysiPlanOutputTsOrder = "outputTsOrder";
static const char* jkWindowPhysiPlanMergeDataBlock = "MergeDataBlock";
static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
- const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj;
+ const SWindowPhysiNode* pNode = (const SWindowPhysiNode*)pObj;
int32_t code = physicPlanNodeToJson(pObj, pJson);
if (TSDB_CODE_SUCCESS == code) {
@@ -2185,7 +2195,7 @@ static int32_t physiWindowNodeToJson(const void* pObj, SJson* pJson) {
}
static int32_t jsonToPhysiWindowNode(const SJson* pJson, void* pObj) {
- SWinodwPhysiNode* pNode = (SWinodwPhysiNode*)pObj;
+ SWindowPhysiNode* pNode = (SWindowPhysiNode*)pObj;
int32_t code = jsonToPhysicPlanNode(pJson, pObj);
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/nodes/src/nodesEqualFuncs.c b/source/libs/nodes/src/nodesEqualFuncs.c
index eed70cfccfd5c6a9478dc047a1df898de503f2b1..f755b8cb8c013132239aa2dc19ad25c83bb0322e 100644
--- a/source/libs/nodes/src/nodesEqualFuncs.c
+++ b/source/libs/nodes/src/nodesEqualFuncs.c
@@ -82,9 +82,7 @@ static bool columnNodeEqual(const SColumnNode* a, const SColumnNode* b) {
COMPARE_STRING_FIELD(dbName);
COMPARE_STRING_FIELD(tableName);
COMPARE_STRING_FIELD(colName);
- if (0 == a->tableId) {
- COMPARE_STRING_FIELD(tableAlias);
- }
+ COMPARE_STRING_FIELD(tableAlias);
return true;
}
diff --git a/source/libs/nodes/src/nodesMatchFuncs.c b/source/libs/nodes/src/nodesMatchFuncs.c
new file mode 100755
index 0000000000000000000000000000000000000000..401c7aad283c3b514349178f88332d329a18b005
--- /dev/null
+++ b/source/libs/nodes/src/nodesMatchFuncs.c
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2019 TAOS Data, Inc.
+ *
+ * This program is free software: you can use, redistribute, and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3
+ * or later ("AGPL"), as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see .
+ */
+
+#include "querynodes.h"
+
+#define MATCH_SCALAR_FIELD(fldname) \
+ do { \
+ if (p->fldname != pSub->fldname) return false; \
+ } while (0)
+
+#define MATCH_STRING(a, b) (((a) != NULL && (b) != NULL) ? (strcmp((a), (b)) == 0) : (a) == (b))
+
+#define MATCH_VARDATA(a, b) \
+ (((a) != NULL && (b) != NULL) \
+ ? (varDataLen((a)) == varDataLen((b)) && memcmp(varDataVal((a)), varDataVal((b)), varDataLen((a))) == 0) \
+ : (a) == (b))
+
+#define MATCH_STRING_FIELD(fldname) \
+ do { \
+ if (!MATCH_STRING(p->fldname, pSub->fldname)) return false; \
+ } while (0)
+
+#define MATCH_VARDATA_FIELD(fldname) \
+ do { \
+ if (!MATCH_VARDATA(p->fldname, pSub->fldname)) return false; \
+ } while (0)
+
+#define MATCH_OBJECT_FIELD(fldname, matchFunc) \
+ do { \
+ if (!matchFunc(p->fldname, pSub->fldname)) return false; \
+ } while (0)
+
+#define MATCH_NODE_FIELD(fldname) \
+ do { \
+ if (!nodesMatchNode(pSub->fldname, p->fldname)) return false; \
+ } while (0)
+
+#define MATCH_NODE_LIST_FIELD(fldname) \
+ do { \
+ if (!nodesListMatch(p->fldname, pSub->fldname)) return false; \
+ } while (0)
+
+
+bool nodesListMatchExists(const SNodeList* pList, const SNode* pTarget) {
+ if (NULL == pList || NULL == pTarget) {
+ return false;
+ }
+ SNode* node = NULL;
+ bool exists = false;
+ FOREACH(node, pList) {
+ if (nodesMatchNode(node, pTarget)) {
+ exists = true;
+ break;
+ }
+ }
+
+ return exists;
+}
+
+bool nodesListMatch(const SNodeList* pList, const SNodeList* pSubList) {
+ if (pList == pSubList) {
+ return true;
+ }
+
+ if (NULL == pList || NULL == pSubList) {
+ return false;
+ }
+
+ if (pList->length != pSubList->length) {
+ return false;
+ }
+
+ SNode* node = NULL;
+ bool match = true;
+ FOREACH(node, pList) {
+ if (!nodesListMatchExists(pSubList, node)) {
+ match = false;
+ break;
+ }
+ }
+ return match;
+}
+
+static bool columnNodeMatch(const SColumnNode* pSub, const SColumnNode* p) {
+ if (0 == strcmp(p->colName, pSub->node.aliasName)) {
+ return true;
+ }
+ return false;
+}
+
+static bool valueNodeMatch(const SValueNode* pSub, const SValueNode* p) {
+ return nodesEqualNode((SNode*)pSub, (SNode*)p);
+}
+
+static bool operatorNodeMatch(const SOperatorNode* pSub, const SOperatorNode* p) {
+ MATCH_SCALAR_FIELD(opType);
+ MATCH_NODE_FIELD(pLeft);
+ MATCH_NODE_FIELD(pRight);
+ return true;
+}
+
+static bool logicConditionNodeMatch(const SLogicConditionNode* pSub, const SLogicConditionNode* p) {
+ MATCH_SCALAR_FIELD(condType);
+ MATCH_NODE_LIST_FIELD(pParameterList);
+ return true;
+}
+
+static bool functionNodeMatch(const SFunctionNode* pSub, const SFunctionNode* p) {
+ MATCH_SCALAR_FIELD(funcId);
+ MATCH_STRING_FIELD(functionName);
+ MATCH_NODE_LIST_FIELD(pParameterList);
+ return true;
+}
+
+static bool whenThenNodeMatch(const SWhenThenNode* pSub, const SWhenThenNode* p) {
+ MATCH_NODE_FIELD(pWhen);
+ MATCH_NODE_FIELD(pThen);
+ return true;
+}
+
+static bool caseWhenNodeMatch(const SCaseWhenNode* pSub, const SCaseWhenNode* p) {
+ MATCH_NODE_FIELD(pCase);
+ MATCH_NODE_FIELD(pElse);
+ MATCH_NODE_LIST_FIELD(pWhenThenList);
+ return true;
+}
+
+bool nodesMatchNode(const SNode* pSub, const SNode* p) {
+ if (pSub == p) {
+ return true;
+ }
+
+ if (NULL == pSub || NULL == p) {
+ return false;
+ }
+
+ if (nodeType(pSub) != nodeType(p)) {
+ return false;
+ }
+
+ switch (nodeType(p)) {
+ case QUERY_NODE_COLUMN:
+ return columnNodeMatch((const SColumnNode*)pSub, (const SColumnNode*)p);
+ case QUERY_NODE_VALUE:
+ return valueNodeMatch((const SValueNode*)pSub, (const SValueNode*)p);
+ case QUERY_NODE_OPERATOR:
+ return operatorNodeMatch((const SOperatorNode*)pSub, (const SOperatorNode*)p);
+ case QUERY_NODE_LOGIC_CONDITION:
+ return logicConditionNodeMatch((const SLogicConditionNode*)pSub, (const SLogicConditionNode*)p);
+ case QUERY_NODE_FUNCTION:
+ return functionNodeMatch((const SFunctionNode*)pSub, (const SFunctionNode*)p);
+ case QUERY_NODE_WHEN_THEN:
+ return whenThenNodeMatch((const SWhenThenNode*)pSub, (const SWhenThenNode*)p);
+ case QUERY_NODE_CASE_WHEN:
+ return caseWhenNodeMatch((const SCaseWhenNode*)pSub, (const SCaseWhenNode*)p);
+ case QUERY_NODE_REAL_TABLE:
+ case QUERY_NODE_TEMP_TABLE:
+ case QUERY_NODE_JOIN_TABLE:
+ case QUERY_NODE_GROUPING_SET:
+ case QUERY_NODE_ORDER_BY_EXPR:
+ case QUERY_NODE_LIMIT:
+ return false;
+ default:
+ break;
+ }
+
+ return false;
+}
diff --git a/source/libs/nodes/src/nodesMsgFuncs.c b/source/libs/nodes/src/nodesMsgFuncs.c
index 4ffd38ee36fe6643f3a67b10f65579b0055febba..0631b91323146ed6dc37c2417971d5d49beafba4 100644
--- a/source/libs/nodes/src/nodesMsgFuncs.c
+++ b/source/libs/nodes/src/nodesMsgFuncs.c
@@ -2319,7 +2319,8 @@ enum {
PHY_SORT_MERGE_JOIN_CODE_MERGE_CONDITION,
PHY_SORT_MERGE_JOIN_CODE_ON_CONDITIONS,
PHY_SORT_MERGE_JOIN_CODE_TARGETS,
- PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER
+ PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER,
+ PHY_SORT_MERGE_JOIN_CODE_TAG_EQUAL_CONDITIONS
};
static int32_t physiJoinNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
@@ -2341,7 +2342,9 @@ static int32_t physiJoinNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
if (TSDB_CODE_SUCCESS == code) {
code = tlvEncodeEnum(pEncoder, PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER, pNode->inputTsOrder);
}
-
+ if (TSDB_CODE_SUCCESS == code) {
+ code = tlvEncodeObj(pEncoder, PHY_SORT_MERGE_JOIN_CODE_TAG_EQUAL_CONDITIONS, nodeToMsg, pNode->pColEqualOnConditions);
+ }
return code;
}
@@ -2370,6 +2373,9 @@ static int32_t msgToPhysiJoinNode(STlvDecoder* pDecoder, void* pObj) {
case PHY_SORT_MERGE_JOIN_CODE_INPUT_TS_ORDER:
code = tlvDecodeEnum(pTlv, &pNode->inputTsOrder, sizeof(pNode->inputTsOrder));
break;
+ case PHY_SORT_MERGE_JOIN_CODE_TAG_EQUAL_CONDITIONS:
+ code = msgToNodeFromTlv(pTlv, (void**)&pNode->pColEqualOnConditions);
+ break;
default:
break;
}
@@ -2642,7 +2648,7 @@ enum {
};
static int32_t physiWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
- const SWinodwPhysiNode* pNode = (const SWinodwPhysiNode*)pObj;
+ const SWindowPhysiNode* pNode = (const SWindowPhysiNode*)pObj;
int32_t code = tlvEncodeObj(pEncoder, PHY_WINDOW_CODE_BASE_NODE, physiNodeToMsg, &pNode->node);
if (TSDB_CODE_SUCCESS == code) {
@@ -2683,7 +2689,7 @@ static int32_t physiWindowNodeToMsg(const void* pObj, STlvEncoder* pEncoder) {
}
static int32_t msgToPhysiWindowNode(STlvDecoder* pDecoder, void* pObj) {
- SWinodwPhysiNode* pNode = (SWinodwPhysiNode*)pObj;
+ SWindowPhysiNode* pNode = (SWindowPhysiNode*)pObj;
int32_t code = TSDB_CODE_SUCCESS;
STlv* pTlv = NULL;
diff --git a/source/libs/nodes/src/nodesToSQLFuncs.c b/source/libs/nodes/src/nodesToSQLFuncs.c
index 0181da92a9e7eeb5387a68ed18b0b20470979101..b57bba0cc9bd510576e611a5f0416ef3d21982d4 100644
--- a/source/libs/nodes/src/nodesToSQLFuncs.c
+++ b/source/libs/nodes/src/nodesToSQLFuncs.c
@@ -120,9 +120,9 @@ int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
}
if (colNode->tableAlias[0]) {
- *len += snprintf(buf + *len, bufSize - *len, "`%s`", colNode->colName);
+ *len += snprintf(buf + *len, bufSize - *len, "`%s`", colNode->node.userAlias);
} else {
- *len += snprintf(buf + *len, bufSize - *len, "%s", colNode->colName);
+ *len += snprintf(buf + *len, bufSize - *len, "%s", colNode->node.userAlias);
}
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c
index 17a5677987f1f4b82490115243cc28db371be945..39e288f6947b6d83390b623bebdc09d0d27b9077 100644
--- a/source/libs/nodes/src/nodesUtilFuncs.c
+++ b/source/libs/nodes/src/nodesUtilFuncs.c
@@ -608,7 +608,7 @@ static void destroyPhysiNode(SPhysiNode* pNode) {
nodesDestroyNode(pNode->pSlimit);
}
-static void destroyWinodwPhysiNode(SWinodwPhysiNode* pNode) {
+static void destroyWinodwPhysiNode(SWindowPhysiNode* pNode) {
destroyPhysiNode((SPhysiNode*)pNode);
nodesDestroyList(pNode->pExprs);
nodesDestroyList(pNode->pFuncs);
@@ -921,6 +921,7 @@ void nodesDestroyNode(SNode* pNode) {
break;
case QUERY_NODE_CREATE_TOPIC_STMT:
nodesDestroyNode(((SCreateTopicStmt*)pNode)->pQuery);
+ nodesDestroyNode(((SCreateTopicStmt*)pNode)->pWhere);
break;
case QUERY_NODE_DROP_TOPIC_STMT: // no pointer field
case QUERY_NODE_DROP_CGROUP_STMT: // no pointer field
@@ -1088,6 +1089,7 @@ void nodesDestroyNode(SNode* pNode) {
destroyLogicNode((SLogicNode*)pLogicNode);
nodesDestroyNode(pLogicNode->pMergeCondition);
nodesDestroyNode(pLogicNode->pOnConditions);
+ nodesDestroyNode(pLogicNode->pColEqualOnConditions);
break;
}
case QUERY_NODE_LOGIC_PLAN_AGG: {
@@ -1220,6 +1222,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pPhyNode->pMergeCondition);
nodesDestroyNode(pPhyNode->pOnConditions);
nodesDestroyList(pPhyNode->pTargets);
+ nodesDestroyNode(pPhyNode->pColEqualOnConditions);
break;
}
case QUERY_NODE_PHYSICAL_PLAN_HASH_AGG: {
@@ -1257,7 +1260,7 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_INTERVAL:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_INTERVAL:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_INTERVAL:
- destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
+ destroyWinodwPhysiNode((SWindowPhysiNode*)pNode);
break;
case QUERY_NODE_PHYSICAL_PLAN_FILL:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FILL: {
@@ -1273,19 +1276,19 @@ void nodesDestroyNode(SNode* pNode) {
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SESSION:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_SEMI_SESSION:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_FINAL_SESSION:
- destroyWinodwPhysiNode((SWinodwPhysiNode*)pNode);
+ destroyWinodwPhysiNode((SWindowPhysiNode*)pNode);
break;
case QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_STATE: {
SStateWinodwPhysiNode* pPhyNode = (SStateWinodwPhysiNode*)pNode;
- destroyWinodwPhysiNode((SWinodwPhysiNode*)pPhyNode);
+ destroyWinodwPhysiNode((SWindowPhysiNode*)pPhyNode);
nodesDestroyNode(pPhyNode->pStateKey);
break;
}
case QUERY_NODE_PHYSICAL_PLAN_MERGE_EVENT:
case QUERY_NODE_PHYSICAL_PLAN_STREAM_EVENT: {
SEventWinodwPhysiNode* pPhyNode = (SEventWinodwPhysiNode*)pNode;
- destroyWinodwPhysiNode((SWinodwPhysiNode*)pPhyNode);
+ destroyWinodwPhysiNode((SWindowPhysiNode*)pPhyNode);
nodesDestroyNode(pPhyNode->pStartCond);
nodesDestroyNode(pPhyNode->pEndCond);
break;
@@ -2073,7 +2076,7 @@ char* nodesGetNameFromColumnNode(SNode* pNode) {
return "NULL";
}
- return ((SColumnNode*)pNode)->colName;
+ return ((SColumnNode*)pNode)->node.userAlias;
}
int32_t nodesGetOutputNumFromSlotList(SNodeList* pSlots) {
diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h
index 43765c8112f267979c37a66e6b9824fff6f03552..4fecb1cd33affcf6cf1e737282f217eae583630c 100644
--- a/source/libs/parser/inc/parAst.h
+++ b/source/libs/parser/inc/parAst.h
@@ -127,6 +127,7 @@ SNode* createIntervalWindowNode(SAstCreateContext* pCxt, SNode* pInterval, SNode
SNode* createFillNode(SAstCreateContext* pCxt, EFillMode mode, SNode* pValues);
SNode* createGroupingSetNode(SAstCreateContext* pCxt, SNode* pNode);
SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd);
+SNode* createInterpTimePoint(SAstCreateContext* pCxt, SNode* pPoint);
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen);
SNode* createCaseWhenNode(SAstCreateContext* pCxt, SNode* pCase, SNodeList* pWhenThenList, SNode* pElse);
@@ -207,7 +208,7 @@ SNode* createCreateTopicStmtUseQuery(SAstCreateContext* pCxt, bool ignoreExists,
SNode* createCreateTopicStmtUseDb(SAstCreateContext* pCxt, bool ignoreExists, SToken* pTopicName, SToken* pSubDbName,
bool withMeta);
SNode* createCreateTopicStmtUseTable(SAstCreateContext* pCxt, bool ignoreExists, SToken* pTopicName, SNode* pRealTable,
- bool withMeta);
+ bool withMeta, SNode* pWhere);
SNode* createDropTopicStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pTopicName);
SNode* createDropCGroupStmt(SAstCreateContext* pCxt, bool ignoreNotExists, const SToken* pCGroupId, SToken* pTopicName);
SNode* createAlterLocalStmt(SAstCreateContext* pCxt, const SToken* pConfig, const SToken* pValue);
diff --git a/source/libs/parser/inc/parUtil.h b/source/libs/parser/inc/parUtil.h
index 850571eea1533541318c4dc57c60b14311fcc5a8..1a4ee3e91a1aa861d06d0e2829ac806963c5b20c 100644
--- a/source/libs/parser/inc/parUtil.h
+++ b/source/libs/parser/inc/parUtil.h
@@ -84,6 +84,7 @@ int32_t getNumOfColumns(const STableMeta* pTableMeta);
int32_t getNumOfTags(const STableMeta* pTableMeta);
STableComInfo getTableInfo(const STableMeta* pTableMeta);
STableMeta* tableMetaDup(const STableMeta* pTableMeta);
+int32_t getTableTypeFromTableNode(SNode *pTable);
int32_t trimString(const char* src, int32_t len, char* dst, int32_t dlen);
int32_t getVnodeSysTableTargetName(int32_t acctId, SNode* pWhere, SName* pName);
@@ -115,6 +116,7 @@ int32_t getTableIndexFromCache(SParseMetaCache* pMetaCache, const SName* pName,
int32_t getTableCfgFromCache(SParseMetaCache* pMetaCache, const SName* pName, STableCfg** pOutput);
int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes);
void destoryParseMetaCache(SParseMetaCache* pMetaCache, bool request);
+SNode* createSelectStmtImpl(bool isDistinct, SNodeList* pProjectionList, SNode* pTable);
#ifdef __cplusplus
}
diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y
index 9b8393f624617b589ded9ab119beebfcb7b12a54..518dd95f23314a5dc5ce115785b19db4575ff299 100755
--- a/source/libs/parser/inc/sql.y
+++ b/source/libs/parser/inc/sql.y
@@ -543,9 +543,9 @@ cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B) AS DATABASE db_name(C).
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B)
WITH META AS DATABASE db_name(C). { pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, A, &B, &C, true); }
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B)
- AS STABLE full_table_name(C). { pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, A, &B, C, false); }
+ AS STABLE full_table_name(C) where_clause_opt(D). { pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, A, &B, C, false, D); }
cmd ::= CREATE TOPIC not_exists_opt(A) topic_name(B)
- WITH META AS STABLE full_table_name(C). { pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, A, &B, C, true); }
+ WITH META AS STABLE full_table_name(C) where_clause_opt(D). { pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, A, &B, C, true, D); }
cmd ::= DROP TOPIC exists_opt(A) topic_name(B). { pCxt->pRootNode = createDropTopicStmt(pCxt, A, &B); }
cmd ::= DROP CONSUMER GROUP exists_opt(A) cgroup_name(B) ON topic_name(C). { pCxt->pRootNode = createDropCGroupStmt(pCxt, A, &B, &C); }
@@ -1095,6 +1095,8 @@ having_clause_opt(A) ::= HAVING search_condition(B).
range_opt(A) ::= . { A = NULL; }
range_opt(A) ::=
RANGE NK_LP expr_or_subquery(B) NK_COMMA expr_or_subquery(C) NK_RP. { A = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, B), releaseRawExprNode(pCxt, C)); }
+range_opt(A) ::=
+ RANGE NK_LP expr_or_subquery(B) NK_RP. { A = createInterpTimePoint(pCxt, releaseRawExprNode(pCxt, B)); }
every_opt(A) ::= . { A = NULL; }
every_opt(A) ::= EVERY NK_LP duration_literal(B) NK_RP. { A = releaseRawExprNode(pCxt, B); }
diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c
index 4223455b658e37f8917bcbfa05dae1b843bac7d4..e5a0fc3d76c9163abd2880233e551048ff6a11cf 100644
--- a/source/libs/parser/src/parAstCreater.c
+++ b/source/libs/parser/src/parAstCreater.c
@@ -259,8 +259,19 @@ SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) {
strcpy(pExpr->userAlias, ((SColumnNode*)pExpr)->colName);
} else {
int32_t len = TMIN(sizeof(pExpr->aliasName) - 1, pRawExpr->n);
- strncpy(pExpr->aliasName, pRawExpr->p, len);
- pExpr->aliasName[len] = '\0';
+
+ // See TS-3398.
+ // Len of pRawExpr->p could be larger than len of aliasName[TSDB_COL_NAME_LEN].
+ // If aliasName is truncated, hash value of aliasName could be the same.
+ T_MD5_CTX ctx;
+ tMD5Init(&ctx);
+ tMD5Update(&ctx, (uint8_t*)pRawExpr->p, pRawExpr->n);
+ tMD5Final(&ctx);
+ char* p = pExpr->aliasName;
+ for (uint8_t i = 0; i < tListLen(ctx.digest); ++i) {
+ sprintf(p, "%02x", ctx.digest[i]);
+ p += 2;
+ }
strncpy(pExpr->userAlias, pRawExpr->p, len);
pExpr->userAlias[len] = '\0';
}
@@ -684,6 +695,11 @@ SNode* createInterpTimeRange(SAstCreateContext* pCxt, SNode* pStart, SNode* pEnd
return createBetweenAnd(pCxt, createPrimaryKeyCol(pCxt, NULL), pStart, pEnd);
}
+SNode* createInterpTimePoint(SAstCreateContext* pCxt, SNode* pPoint) {
+ CHECK_PARSER_STATUS(pCxt);
+ return createOperatorNode(pCxt, OP_TYPE_EQUAL, createPrimaryKeyCol(pCxt, NULL), pPoint);
+}
+
SNode* createWhenThenNode(SAstCreateContext* pCxt, SNode* pWhen, SNode* pThen) {
CHECK_PARSER_STATUS(pCxt);
SWhenThenNode* pWhenThen = (SWhenThenNode*)nodesMakeNode(QUERY_NODE_WHEN_THEN);
@@ -822,16 +838,9 @@ SNode* addFillClause(SAstCreateContext* pCxt, SNode* pStmt, SNode* pFill) {
SNode* createSelectStmt(SAstCreateContext* pCxt, bool isDistinct, SNodeList* pProjectionList, SNode* pTable) {
CHECK_PARSER_STATUS(pCxt);
- SSelectStmt* select = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT);
+ SNode* select = createSelectStmtImpl(isDistinct, pProjectionList, pTable);
CHECK_OUT_OF_MEM(select);
- select->isDistinct = isDistinct;
- select->pProjectionList = pProjectionList;
- select->pFromTable = pTable;
- sprintf(select->stmtName, "%p", select);
- select->isTimeLineResult = true;
- select->onlyHasKeepOrderFunc = true;
- select->timeRange = TSWINDOW_INITIALIZER;
- return (SNode*)select;
+ return select;
}
static void setSubquery(SNode* pStmt) {
@@ -1712,7 +1721,7 @@ SNode* createCreateTopicStmtUseDb(SAstCreateContext* pCxt, bool ignoreExists, ST
}
SNode* createCreateTopicStmtUseTable(SAstCreateContext* pCxt, bool ignoreExists, SToken* pTopicName, SNode* pRealTable,
- bool withMeta) {
+ bool withMeta, SNode* pWhere) {
CHECK_PARSER_STATUS(pCxt);
if (!checkTopicName(pCxt, pTopicName)) {
return NULL;
@@ -1722,6 +1731,8 @@ SNode* createCreateTopicStmtUseTable(SAstCreateContext* pCxt, bool ignoreExists,
COPY_STRING_FORM_ID_TOKEN(pStmt->topicName, pTopicName);
pStmt->ignoreExists = ignoreExists;
pStmt->withMeta = withMeta;
+ pStmt->pWhere = pWhere;
+
strcpy(pStmt->subDbName, ((SRealTableNode*)pRealTable)->table.dbName);
strcpy(pStmt->subSTbName, ((SRealTableNode*)pRealTable)->table.tableName);
nodesDestroyNode(pRealTable);
diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c
index 5db1f5dbdbca70bed529373c3bb2caa76d74efd2..801d43e2a4725342125acfe59117886a96c973ad 100644
--- a/source/libs/parser/src/parAstParser.c
+++ b/source/libs/parser/src/parAstParser.c
@@ -355,6 +355,11 @@ static int32_t collectMetaKeyFromCreateTopic(SCollectMetaKeyCxt* pCxt, SCreateTo
if (NULL != pStmt->pQuery) {
return collectMetaKeyFromQuery(pCxt, pStmt->pQuery);
}
+ if (NULL != pStmt->pWhere) {
+ int32_t code = collectMetaKeyFromRealTableImpl(pCxt, pStmt->subDbName, pStmt->subSTbName,
+ AUTH_TYPE_READ);
+ return code;
+ }
return TSDB_CODE_SUCCESS;
}
@@ -581,7 +586,7 @@ static int32_t collectMetaKeyFromShowCreateTable(SCollectMetaKeyCxt* pCxt, SShow
code = reserveDbCfgInCache(pCxt->pParseCxt->acctId, pStmt->dbName, pCxt->pMetaCache);
}
if (TSDB_CODE_SUCCESS == code) {
- code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName, NULL, AUTH_TYPE_READ,
+ code = reserveUserAuthInCache(pCxt->pParseCxt->acctId, pCxt->pParseCxt->pUser, pStmt->dbName, pStmt->tableName, AUTH_TYPE_READ,
pCxt->pMetaCache);
}
return code;
diff --git a/source/libs/parser/src/parAuthenticator.c b/source/libs/parser/src/parAuthenticator.c
index 1586d8128ba8d425575526ecf5567e688408af42..251d3bd0cba6269ac594d1538894ee6a3ee3454b 100644
--- a/source/libs/parser/src/parAuthenticator.c
+++ b/source/libs/parser/src/parAuthenticator.c
@@ -175,7 +175,7 @@ static int32_t authShowTables(SAuthCxt* pCxt, SShowStmt* pStmt) {
static int32_t authShowCreateTable(SAuthCxt* pCxt, SShowCreateTableStmt* pStmt) {
SNode* pTagCond = NULL;
// todo check tag condition for subtable
- return checkAuth(pCxt, pStmt->dbName, NULL, AUTH_TYPE_READ, &pTagCond);
+ return checkAuth(pCxt, pStmt->dbName, pStmt->tableName, AUTH_TYPE_READ, &pTagCond);
}
static int32_t authCreateTable(SAuthCxt* pCxt, SCreateTableStmt* pStmt) {
diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c
index 43a73af3c33753852bdb97d640d1857622cbb74e..f9b4e54318b4c3c87895ad2b6a1ec6b03444d342 100644
--- a/source/libs/parser/src/parInsertSql.c
+++ b/source/libs/parser/src/parInsertSql.c
@@ -1605,7 +1605,7 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt
(*pNumOfRows)++;
}
- if (TSDB_CODE_SUCCESS == code && (*pNumOfRows) > tsMaxMemUsedByInsert * 1024 * 1024) {
+ if (TSDB_CODE_SUCCESS == code && (*pNumOfRows) > tsMaxInsertBatchRows) {
pStmt->fileProcessing = true;
break;
}
@@ -1614,6 +1614,8 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt
}
taosMemoryFree(pLine);
+ parserDebug("0x%" PRIx64 " %d rows have been parsed", pCxt->pComCxt->requestId, *pNumOfRows);
+
if (TSDB_CODE_SUCCESS == code && 0 == (*pNumOfRows) &&
(!TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) && !pStmt->fileProcessing) {
code = buildSyntaxErrMsg(&pCxt->msg, "no any data points", NULL);
diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c
index f654d2527f514cb732df0a2c901366ecbdc3d63a..bcbea6cd2e67b803f364dddeb22c7af6e4d902d8 100644
--- a/source/libs/parser/src/parInsertStmt.c
+++ b/source/libs/parser/src/parInsertStmt.c
@@ -267,7 +267,10 @@ int32_t qBindStmtColsValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBuf, in
pBind = bind + c;
}
- tColDataAddValueByBind(pCol, pBind);
+ code = tColDataAddValueByBind(pCol, pBind, IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE: -1);
+ if (code) {
+ goto _return;
+ }
}
qDebug("stmt all %d columns bind %d rows data", boundInfo->numOfBound, rowNum);
@@ -310,7 +313,7 @@ int32_t qBindStmtSingleColValue(void* pBlock, TAOS_MULTI_BIND* bind, char* msgBu
pBind = bind;
}
- tColDataAddValueByBind(pCol, pBind);
+ tColDataAddValueByBind(pCol, pBind, IS_VAR_DATA_TYPE(pColSchema->type) ? pColSchema->bytes - VARSTR_HEADER_SIZE: -1);
qDebug("stmt col %d bind %d rows data", colIdx, rowNum);
diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c
index f921094752f337796b66c24c27db42fd8e54d5eb..de7d154db6a8ecf604fa2197ad09e323ae185a43 100644
--- a/source/libs/parser/src/parInsertUtil.c
+++ b/source/libs/parser/src/parInsertUtil.c
@@ -272,6 +272,41 @@ static int32_t createTableDataCxt(STableMeta* pTableMeta, SVCreateTbReq** pCreat
return code;
}
+static int32_t rebuildTableData(SSubmitTbData* pSrc, SSubmitTbData** pDst) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ SSubmitTbData* pTmp = taosMemoryCalloc(1, sizeof(SSubmitTbData));
+ if (NULL == pTmp) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ } else {
+ pTmp->flags = pSrc->flags;
+ pTmp->suid = pSrc->suid;
+ pTmp->uid = pSrc->uid;
+ pTmp->sver = pSrc->sver;
+ pTmp->pCreateTbReq = NULL;
+ if (pTmp->flags & SUBMIT_REQ_COLUMN_DATA_FORMAT) {
+ pTmp->aCol = taosArrayInit(128, sizeof(SColData));
+ if (NULL == pTmp->aCol) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pTmp);
+ }
+ } else {
+ pTmp->aRowP = taosArrayInit(128, POINTER_BYTES);
+ if (NULL == pTmp->aRowP) {
+ code = TSDB_CODE_OUT_OF_MEMORY;
+ taosMemoryFree(pTmp);
+ }
+ }
+ }
+
+ taosMemoryFree(pSrc);
+ if (TSDB_CODE_SUCCESS == code) {
+ *pDst = pTmp;
+ }
+
+ return code;
+}
+
+
static void resetColValues(SArray* pValues) {
int32_t num = taosArrayGetSize(pValues);
for (int32_t i = 0; i < num; ++i) {
@@ -381,7 +416,7 @@ static int32_t fillVgroupDataCxt(STableDataCxt* pTableCxt, SVgroupDataCxt* pVgCx
}
}
taosArrayPush(pVgCxt->pData->aSubmitTbData, pTableCxt->pData);
- taosMemoryFreeClear(pTableCxt->pData);
+ rebuildTableData(pTableCxt->pData, &pTableCxt->pData);
qDebug("add tableDataCxt uid:%" PRId64 " to vgId:%d", pTableCxt->pMeta->uid, pVgCxt->vgId);
diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c
index 864513f15d5a5f1e01399c6ba28865a51d3ae1cd..5890ba15f4131273cfa92dfe958e87aeb3f6a04f 100644
--- a/source/libs/parser/src/parTranslater.c
+++ b/source/libs/parser/src/parTranslater.c
@@ -55,6 +55,13 @@ typedef struct STranslateContext {
bool showRewrite;
} STranslateContext;
+typedef struct SBuildTopicContext {
+ bool colExists;
+ bool colNotFound;
+ STableMeta* pMeta;
+ SNodeList* pTags;
+} SBuildTopicContext;
+
typedef struct SFullDatabaseName {
char fullDbName[TSDB_DB_FNAME_LEN];
} SFullDatabaseName;
@@ -700,6 +707,10 @@ static bool isWindowPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
+static bool isInterpFunc(const SNode* pNode) {
+ return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpFunc(((SFunctionNode*)pNode)->funcId));
+}
+
static bool isInterpPseudoColumnFunc(const SNode* pNode) {
return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId));
}
@@ -751,18 +762,40 @@ static SNodeList* getProjectList(const SNode* pNode) {
static bool isTimeLineQuery(SNode* pStmt) {
if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
- return ((SSelectStmt*)pStmt)->isTimeLineResult;
+ return (TIME_LINE_MULTI == ((SSelectStmt*)pStmt)->timeLineResMode) || (TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode);
+ } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) {
+ return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode;
} else {
return false;
}
}
static bool isGlobalTimeLineQuery(SNode* pStmt) {
- if (!isTimeLineQuery(pStmt)) {
+ if (QUERY_NODE_SELECT_STMT == nodeType(pStmt)) {
+ return TIME_LINE_GLOBAL == ((SSelectStmt*)pStmt)->timeLineResMode;
+ } else if (QUERY_NODE_SET_OPERATOR == nodeType(pStmt)) {
+ return TIME_LINE_GLOBAL == ((SSetOperator*)pStmt)->timeLineResMode;
+ } else {
return false;
}
- SSelectStmt* pSelect = (SSelectStmt*)pStmt;
- return NULL == pSelect->pPartitionByList || NULL != pSelect->pOrderByList;
+}
+
+static bool isTimeLineAlignedQuery(SNode* pStmt) {
+ SSelectStmt *pSelect = (SSelectStmt *)pStmt;
+ if (isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
+ return true;
+ }
+ if (!isTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
+ return false;
+ }
+ if (QUERY_NODE_SELECT_STMT != nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
+ return false;
+ }
+ SSelectStmt *pSub = (SSelectStmt *)((STempTableNode*)pSelect->pFromTable)->pSubquery;
+ if (nodesListMatch(pSelect->pPartitionByList, pSub->pPartitionByList)) {
+ return true;
+ }
+ return false;
}
static bool isPrimaryKeyImpl(SNode* pExpr) {
@@ -827,7 +860,7 @@ static void setColumnInfoByExpr(STempTableNode* pTable, SExprNode* pExpr, SColum
strcpy(pCol->node.aliasName, pCol->colName);
}
if ('\0' == pCol->node.userAlias[0]) {
- strcpy(pCol->node.userAlias, pCol->colName);
+ strcpy(pCol->node.userAlias, pExpr->userAlias);
}
pCol->node.resType = pExpr->resType;
}
@@ -1568,7 +1601,7 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu
}
SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt;
if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
- !isTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) {
+ !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery) && !isTimeLineAlignedQuery(pCxt->pCurrStmt)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC,
"%s function requires valid time series input", pFunc->functionName);
}
@@ -1763,6 +1796,7 @@ static int32_t rewriteFuncToValue(STranslateContext* pCxt, char* pLiteral, SNode
return TSDB_CODE_OUT_OF_MEMORY;
}
strcpy(pVal->node.aliasName, ((SExprNode*)*pNode)->aliasName);
+ strcpy(pVal->node.userAlias, ((SExprNode*)*pNode)->userAlias);
pVal->node.resType = ((SExprNode*)*pNode)->resType;
if (NULL == pLiteral) {
pVal->isNull = true;
@@ -2281,7 +2315,7 @@ static int32_t checkAggColCoexist(STranslateContext* pCxt, SSelectStmt* pSelect)
return TSDB_CODE_SUCCESS;
}
if (!pSelect->onlyHasKeepOrderFunc) {
- pSelect->isTimeLineResult = false;
+ pSelect->timeLineResMode = TIME_LINE_NONE;
}
CheckAggColCoexistCxt cxt = {.pTranslateCxt = pCxt, .existCol = false};
nodesRewriteExprs(pSelect->pProjectionList, doCheckAggColCoexist, &cxt);
@@ -2628,9 +2662,9 @@ static int32_t replaceTbName(STranslateContext* pCxt, SSelectStmt* pSelect) {
static int32_t checkJoinTable(STranslateContext* pCxt, SJoinTableNode* pJoinTable) {
if ((QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pLeft) &&
- !isTimeLineQuery(((STempTableNode*)pJoinTable->pLeft)->pSubquery)) ||
+ !isGlobalTimeLineQuery(((STempTableNode*)pJoinTable->pLeft)->pSubquery)) ||
(QUERY_NODE_TEMP_TABLE == nodeType(pJoinTable->pRight) &&
- !isTimeLineQuery(((STempTableNode*)pJoinTable->pRight)->pSubquery))) {
+ !isGlobalTimeLineQuery(((STempTableNode*)pJoinTable->pRight)->pSubquery))) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_SUPPORT_JOIN,
"Join requires valid time series input");
}
@@ -2665,7 +2699,7 @@ static int32_t translateTable(STranslateContext* pCxt, SNode* pTable) {
}
if (TSDB_SYSTEM_TABLE == pRealTable->pMeta->tableType) {
if (isSelectStmt(pCxt->pCurrStmt)) {
- ((SSelectStmt*)pCxt->pCurrStmt)->isTimeLineResult = false;
+ ((SSelectStmt*)pCxt->pCurrStmt)->timeLineResMode = TIME_LINE_NONE;
} else if (isDeleteStmt(pCxt->pCurrStmt)) {
code = TSDB_CODE_TSC_INVALID_OPERATION;
break;
@@ -2761,6 +2795,7 @@ static SNode* createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr) {
} else {
len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->aliasName);
strncpy(pFunc->node.aliasName, buf, TMIN(len, sizeof(pFunc->node.aliasName) - 1));
+ len = snprintf(buf, sizeof(buf), "%s(%s)", pSrcFunc->functionName, pExpr->userAlias);
strncpy(pFunc->node.userAlias, buf, TMIN(len, sizeof(pFunc->node.userAlias) - 1));
}
@@ -2999,7 +3034,7 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
}
static EDealRes needFillImpl(SNode* pNode, void* pContext) {
- if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
+ if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) {
*(bool*)pContext = true;
return DEAL_RES_END;
}
@@ -3023,7 +3058,7 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList
code = scalarCalculateConstants(pCaseFunc, &pCell->pNode);
}
if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) {
- code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value is just a constant");
+ code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant");
} else if (TSDB_CODE_SUCCESS != code) {
code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch");
}
@@ -3047,6 +3082,7 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL
if (TSDB_CODE_SUCCESS != code) {
return code;
}
+
++fillNo;
}
}
@@ -3143,7 +3179,7 @@ static int32_t translateGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GROUPBY_WINDOW_COEXIST);
}
pCxt->currClause = SQL_CLAUSE_GROUP_BY;
- pSelect->isTimeLineResult = false;
+ pSelect->timeLineResMode = TIME_LINE_NONE;
return translateExprList(pCxt, pSelect->pGroupByList);
}
@@ -3207,7 +3243,7 @@ static int32_t checkFill(STranslateContext* pCxt, SFillNode* pFill, SValueNode*
int64_t timeRange = TABS(pFill->timeRange.skey - pFill->timeRange.ekey);
int64_t intervalRange = 0;
- if (TIME_IS_VAR_DURATION(pInterval->unit)) {
+ if (IS_CALENDAR_TIME_DURATION(pInterval->unit)) {
int64_t f = 1;
if (pInterval->unit == 'n') {
f = 30LL * MILLISECOND_PER_DAY;
@@ -3288,7 +3324,7 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode*
uint8_t precision = ((SColumnNode*)pInterval->pCol)->node.resType.precision;
SValueNode* pInter = (SValueNode*)pInterval->pInterval;
- bool valInter = TIME_IS_VAR_DURATION(pInter->unit);
+ bool valInter = IS_CALENDAR_TIME_DURATION(pInter->unit);
if (pInter->datum.i <= 0 || (!valInter && pInter->datum.i < tsMinIntervalTime)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_VALUE_TOO_SMALL, tsMinIntervalTime,
getPrecisionStr(precision));
@@ -3302,7 +3338,7 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode*
if (pInter->unit == 'n' && pOffset->unit == 'y') {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_OFFSET_UNIT);
}
- bool fixed = !TIME_IS_VAR_DURATION(pOffset->unit) && !valInter;
+ bool fixed = !IS_CALENDAR_TIME_DURATION(pOffset->unit) && !valInter;
if ((fixed && pOffset->datum.i >= pInter->datum.i) ||
(!fixed && getMonthsFromTimeVal(pOffset->datum.i, precision, pOffset->unit) >=
getMonthsFromTimeVal(pInter->datum.i, precision, pInter->unit))) {
@@ -3318,7 +3354,7 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode*
const static int32_t INTERVAL_SLIDING_FACTOR = 100;
SValueNode* pSliding = (SValueNode*)pInterval->pSliding;
- if (TIME_IS_VAR_DURATION(pSliding->unit)) {
+ if (IS_CALENDAR_TIME_DURATION(pSliding->unit)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_SLIDING_UNIT);
}
if ((pSliding->datum.i < convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, precision)) ||
@@ -3466,6 +3502,22 @@ static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) {
return TSDB_CODE_SUCCESS;
}
+static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) {
+ SValueNode* pEvery = (SValueNode*)nodesMakeNode(QUERY_NODE_VALUE);
+ if (NULL == pEvery) {
+ return TSDB_CODE_OUT_OF_MEMORY;
+ }
+
+ pEvery->node.resType.type = TSDB_DATA_TYPE_BIGINT;
+ pEvery->node.resType.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes;
+ pEvery->isDuration = true;
+ pEvery->literal = taosStrdup("1s");
+
+
+ *pOutput = (SNode*)pEvery;
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) {
int32_t len = strlen(pInterval->literal);
@@ -3481,7 +3533,12 @@ static int32_t checkEvery(STranslateContext* pCxt, SValueNode* pInterval) {
static int32_t translateInterpEvery(STranslateContext* pCxt, SNode** pEvery) {
int32_t code = TSDB_CODE_SUCCESS;
- code = checkEvery(pCxt, (SValueNode*)(*pEvery));
+ if (NULL == *pEvery) {
+ code = createDefaultEveryNode(pCxt, pEvery);
+ }
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkEvery(pCxt, (SValueNode*)(*pEvery));
+ }
if (TSDB_CODE_SUCCESS == code) {
code = translateExpr(pCxt, pEvery);
}
@@ -3510,6 +3567,9 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList);
+ }
return code;
}
@@ -3527,8 +3587,12 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
}
if (NULL == pSelect->pRange || NULL == pSelect->pEvery || NULL == pSelect->pFill) {
- return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE,
- "Missing RANGE clause, EVERY clause or FILL clause");
+ if (pSelect->pRange != NULL && QUERY_NODE_OPERATOR == nodeType(pSelect->pRange) && pSelect->pEvery == NULL) {
+ // single point interp every can be omitted
+ } else {
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_INTERP_CLAUSE,
+ "Missing RANGE clause, EVERY clause or FILL clause");
+ }
}
int32_t code = translateExpr(pCxt, &pSelect->pRange);
@@ -3543,7 +3607,18 @@ static int32_t translateInterp(STranslateContext* pCxt, SSelectStmt* pSelect) {
static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelect) {
pCxt->currClause = SQL_CLAUSE_PARTITION_BY;
- int32_t code = translateExprList(pCxt, pSelect->pPartitionByList);
+ int32_t code = TSDB_CODE_SUCCESS;
+
+ if (pSelect->pPartitionByList) {
+ int8_t typeType = getTableTypeFromTableNode(pSelect->pFromTable);
+ SNode* pPar = nodesListGetNode(pSelect->pPartitionByList, 0);
+ if (!((TSDB_NORMAL_TABLE == typeType || TSDB_CHILD_TABLE == typeType) &&
+ 1 == pSelect->pPartitionByList->length && (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) {
+ pSelect->timeLineResMode = TIME_LINE_MULTI;
+ }
+
+ code = translateExprList(pCxt, pSelect->pPartitionByList);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = translateExprList(pCxt, pSelect->pTags);
}
@@ -3672,9 +3747,9 @@ static void resetResultTimeline(SSelectStmt* pSelect) {
if ((QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) &&
isPrimaryKey((STempTableNode*)pSelect->pFromTable, pOrder)) ||
(QUERY_NODE_TEMP_TABLE != nodeType(pSelect->pFromTable) && isPrimaryKeyImpl(pOrder))) {
- pSelect->isTimeLineResult = true;
+ pSelect->timeLineResMode = TIME_LINE_GLOBAL;
} else {
- pSelect->isTimeLineResult = false;
+ pSelect->timeLineResMode = TIME_LINE_NONE;
}
}
@@ -3744,7 +3819,7 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
code = replaceTbName(pCxt, pSelect);
}
-
+
return code;
}
@@ -3804,8 +3879,13 @@ static int32_t translateSetOperProject(STranslateContext* pCxt, SSetOperator* pS
pLeftExpr = pLeftFuncExpr;
}
snprintf(pRightExpr->aliasName, sizeof(pRightExpr->aliasName), "%s", pLeftExpr->aliasName);
- if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList,
- createSetOperProject(pSetOperator->stmtName, pLeft))) {
+ SNode* pProj = createSetOperProject(pSetOperator->stmtName, pLeft);
+ if (QUERY_NODE_COLUMN == nodeType(pLeft) && QUERY_NODE_COLUMN == nodeType(pRight)
+ && ((SColumnNode*)pLeft)->colId == PRIMARYKEY_TIMESTAMP_COL_ID
+ && ((SColumnNode*)pRight)->colId == PRIMARYKEY_TIMESTAMP_COL_ID) {
+ ((SColumnNode*)pProj)->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ }
+ if (TSDB_CODE_SUCCESS != nodesListMakeStrictAppend(&pSetOperator->pProjectionList, pProj)) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
@@ -3817,6 +3897,10 @@ static uint8_t calcSetOperatorPrecision(SSetOperator* pSetOperator) {
}
static int32_t translateSetOperOrderBy(STranslateContext* pCxt, SSetOperator* pSetOperator) {
+ if (NULL == pSetOperator->pOrderByList || pSetOperator->pOrderByList->length <= 0) {
+ return TSDB_CODE_SUCCESS;
+ }
+
bool other;
int32_t code = translateOrderByPosition(pCxt, pSetOperator->pProjectionList, pSetOperator->pOrderByList, &other);
if (TSDB_CODE_SUCCESS == code) {
@@ -3829,6 +3913,14 @@ static int32_t translateSetOperOrderBy(STranslateContext* pCxt, SSetOperator* pS
if (TSDB_CODE_SUCCESS == code) {
code = replaceOrderByAlias(pCxt, pSetOperator->pProjectionList, pSetOperator->pOrderByList);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ SNode* pOrder = ((SOrderByExprNode*)nodesListGetNode(pSetOperator->pOrderByList, 0))->pExpr;
+ if (isPrimaryKeyImpl(pOrder)) {
+ pSetOperator->timeLineResMode = TIME_LINE_GLOBAL;
+ } else {
+ pSetOperator->timeLineResMode = TIME_LINE_NONE;
+ }
+ }
return code;
}
@@ -5823,6 +5915,9 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS
toName(pCxt->pParseCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name);
tNameGetFullDbName(&name, pReq->subDbName);
tNameExtractFullName(&name, pReq->subStbName);
+ if(pStmt->pQuery != NULL) {
+ code = nodesNodeToString(pStmt->pQuery, false, &pReq->ast, NULL);
+ }
} else if ('\0' != pStmt->subDbName[0]) {
pReq->subType = TOPIC_SUB_TYPE__DB;
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->subDbName, strlen(pStmt->subDbName));
@@ -5845,12 +5940,108 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS
return code;
}
+static int32_t addTagList(SNodeList** ppList, SNode* pNode) {
+ if (NULL == *ppList) {
+ *ppList = nodesMakeList();
+ }
+
+ nodesListStrictAppend(*ppList, pNode);
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static EDealRes checkColumnTagsInCond(SNode* pNode, void* pContext) {
+ SBuildTopicContext* pCxt = (SBuildTopicContext*)pContext;
+ if (QUERY_NODE_COLUMN == nodeType(pNode)) {
+ ETableColumnType type;
+ getColumnTypeFromMeta(pCxt->pMeta, ((SColumnNode*)pNode)->colName, &type);
+ if (type == TCOL_TYPE_COLUMN) {
+ pCxt->colExists = true;
+ return DEAL_RES_ERROR;
+ } else if (type == TCOL_TYPE_TAG) {
+ addTagList(&pCxt->pTags, nodesCloneNode(pNode));
+ } else {
+ pCxt->colNotFound = true;
+ return DEAL_RES_ERROR;
+ }
+ } else if (QUERY_NODE_FUNCTION == nodeType(pNode)) {
+ SFunctionNode* pFunc = (SFunctionNode*)pNode;
+ if (0 == strcasecmp(pFunc->functionName, "tbname")) {
+ addTagList(&pCxt->pTags, nodesCloneNode(pNode));
+ }
+ }
+
+ return DEAL_RES_CONTINUE;
+}
+
+static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* pStmt, STableMeta* pMeta, SNodeList** ppProjection) {
+ SBuildTopicContext colCxt = {.colExists = false, .colNotFound = false, .pMeta = pMeta, .pTags = NULL};
+ nodesWalkExprPostOrder(pStmt->pWhere, checkColumnTagsInCond, &colCxt);
+ if (colCxt.colNotFound) {
+ nodesDestroyList(colCxt.pTags);
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Invalid column name");
+ } else if (colCxt.colExists) {
+ nodesDestroyList(colCxt.pTags);
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Columns are forbidden in where clause");
+ }
+ if (NULL == colCxt.pTags) { // put one column to select
+// for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) {
+ SSchema* column = &pMeta->schema[0];
+ SColumnNode* col = (SColumnNode*)nodesMakeNode(QUERY_NODE_COLUMN);
+ strcpy(col->colName, column->name);
+ strcpy(col->node.aliasName, col->colName);
+ strcpy(col->node.userAlias, col->colName);
+ addTagList(&colCxt.pTags, (SNode*)col);
+// }
+ }
+
+ *ppProjection = colCxt.pTags;
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt, SNode** pSelect) {
+ SParseContext* pParCxt = pCxt->pParseCxt;
+ SRequestConnInfo connInfo = {.pTrans = pParCxt->pTransporter,
+ .requestId = pParCxt->requestId,
+ .requestObjRefId = pParCxt->requestRid,
+ .mgmtEps = pParCxt->mgmtEpSet};
+ SName name;
+ STableMeta* pMeta = NULL;
+ int32_t code = getTableMetaImpl(pCxt, toName(pParCxt->acctId, pStmt->subDbName, pStmt->subSTbName, &name), &pMeta);
+ if (code) {
+ taosMemoryFree(pMeta);
+ return code;
+ }
+ if (TSDB_SUPER_TABLE != pMeta->tableType) {
+ taosMemoryFree(pMeta);
+ return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used");
+ }
+
+ SNodeList* pProjection = NULL;
+ code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection);
+ if (TSDB_CODE_SUCCESS == code) {
+ SRealTableNode* realTable = (SRealTableNode*)nodesMakeNode(QUERY_NODE_REAL_TABLE);
+ strcpy(realTable->table.dbName, pStmt->subDbName);
+ strcpy(realTable->table.tableName, pStmt->subSTbName);
+ strcpy(realTable->table.tableAlias, pStmt->subSTbName);
+ *pSelect = createSelectStmtImpl(true, pProjection, (SNode*)realTable);
+ ((SSelectStmt*)*pSelect)->pWhere = nodesCloneNode(pStmt->pWhere);
+ pCxt->pParseCxt->topicQuery = true;
+ code = translateQuery(pCxt, *pSelect);
+ }
+
+ taosMemoryFree(pMeta);
+ return code;
+}
+
static int32_t checkCreateTopic(STranslateContext* pCxt, SCreateTopicStmt* pStmt) {
- if (NULL == pStmt->pQuery) {
+ if (NULL == pStmt->pQuery && NULL == pStmt->pWhere) {
return TSDB_CODE_SUCCESS;
}
- if (QUERY_NODE_SELECT_STMT == nodeType(pStmt->pQuery)) {
+ if (pStmt->pWhere) {
+ return buildQueryForTableTopic(pCxt, pStmt, &pStmt->pQuery);
+ } else if (QUERY_NODE_SELECT_STMT == nodeType(pStmt->pQuery)) {
SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
if (!pSelect->isDistinct &&
(NULL != pSelect->pFromTable && QUERY_NODE_REAL_TABLE == nodeType(pSelect->pFromTable)) &&
@@ -6216,7 +6407,7 @@ static int32_t subtableExprHasColumnOrPseudoColumn(SNode* pNode) {
static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStmt) {
SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery;
if (TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type ||
- !pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect) || NULL != pSelect->pOrderByList ||
+ !isTimeLineQuery(pStmt->pQuery) || crossTableWithoutAggOper(pSelect) || NULL != pSelect->pOrderByList ||
crossTableWithUdaf(pSelect) || isEventWindowQuery(pSelect) || hasJsonTypeProjection(pSelect)) {
return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query");
}
@@ -8008,6 +8199,9 @@ static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClau
if (pTagSchema->type == TSDB_DATA_TYPE_JSON) {
isJson = true;
code = buildJsonTagVal(pCxt, pTagSchema, pVal, pTagArray, ppTag);
+ if (TSDB_CODE_SUCCESS != code) {
+ nodesDestroyNode((SNode*)pVal);
+ }
taosArrayPush(tagName, pTagSchema->name);
} else if (pVal->node.resType.type != TSDB_DATA_TYPE_NULL && !pVal->isNull) {
char* tmpVal = nodesGetValueFromNode(pVal);
@@ -8328,13 +8522,7 @@ static int32_t buildUpdateTagValReq(STranslateContext* pCxt, SAlterTableStmt* pS
SArray* pTagVals = taosArrayInit(1, sizeof(STagVal));
int32_t code = TSDB_CODE_SUCCESS;
STag* pTag = NULL;
- do {
- code = parseJsontoTagData(pStmt->pVal->literal, pTagVals, &pTag, &pCxt->msgBuf);
- if (TSDB_CODE_SUCCESS != code) {
- break;
- }
- } while (0);
-
+ code = parseJsontoTagData(pStmt->pVal->literal, pTagVals, &pTag, &pCxt->msgBuf);
taosArrayDestroy(pTagVals);
if (code != TSDB_CODE_SUCCESS) {
return code;
diff --git a/source/libs/parser/src/parUtil.c b/source/libs/parser/src/parUtil.c
index 5597bd3df8ec08efca0420c8dde034e074313cd2..1c13f66f960b2fb341b90004655b09a2e6aa6250 100644
--- a/source/libs/parser/src/parUtil.c
+++ b/source/libs/parser/src/parUtil.c
@@ -138,7 +138,7 @@ static char* getSyntaxErrFormat(int32_t errCode) {
case TSDB_CODE_PAR_CANNOT_DROP_PRIMARY_KEY:
return "Primary timestamp column cannot be dropped";
case TSDB_CODE_PAR_INVALID_MODIFY_COL:
- return "Only binary/nchar column length could be modified, and the length can only be increased, not decreased";
+ return "Only binary/nchar/geometry column length could be modified, and the length can only be increased, not decreased";
case TSDB_CODE_PAR_INVALID_TBNAME:
return "Invalid tbname pseudo column";
case TSDB_CODE_PAR_INVALID_FUNCTION_NAME:
@@ -249,6 +249,17 @@ int32_t getNumOfTags(const STableMeta* pTableMeta) { return getTableInfo(pTableM
STableComInfo getTableInfo(const STableMeta* pTableMeta) { return pTableMeta->tableInfo; }
+int32_t getTableTypeFromTableNode(SNode *pTable) {
+ if (NULL == pTable) {
+ return -1;
+ }
+ if (QUERY_NODE_REAL_TABLE != nodeType(pTable)) {
+ return -1;
+ }
+ return ((SRealTableNode *)pTable)->pMeta->tableType;
+}
+
+
STableMeta* tableMetaDup(const STableMeta* pTableMeta) {
int32_t numOfFields = TABLE_TOTAL_COL_NUM(pTableMeta);
if (numOfFields > TSDB_MAX_COLUMNS || numOfFields < TSDB_MIN_COLUMNS) {
@@ -416,7 +427,7 @@ int32_t parseJsontoTagData(const char* json, SArray* pTagVals, STag** ppTag, voi
end:
taosHashCleanup(keyHash);
if (retCode == TSDB_CODE_SUCCESS) {
- tTagNew(pTagVals, 1, true, ppTag);
+ retCode = tTagNew(pTagVals, 1, true, ppTag);
}
for (int i = 0; i < taosArrayGetSize(pTagVals); ++i) {
STagVal* p = (STagVal*)taosArrayGet(pTagVals, i);
@@ -499,7 +510,7 @@ int32_t getVnodeSysTableTargetName(int32_t acctId, SNode* pWhere, SName* pName)
static int32_t userAuthToString(int32_t acctId, const char* pUser, const char* pDb, const char* pTable, AUTH_TYPE type,
char* pStr) {
- return sprintf(pStr, "%s*%d*%s*%s*%d", pUser, acctId, pDb, (NULL != pTable && '\0' == pTable[0]) ? NULL : pTable,
+ return sprintf(pStr, "%s*%d*%s*%s*%d", pUser, acctId, pDb, (NULL == pTable || '\0' == pTable[0]) ? "``" : pTable,
type);
}
@@ -525,6 +536,9 @@ static void getStringFromAuthStr(const char* pStart, char* pStr, char** pNext) {
strncpy(pStr, pStart, p - pStart);
*pNext = ++p;
}
+ if (*pStart == '`' && *(pStart + 1) == '`') {
+ *pStr = 0;
+ }
}
static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUserAuth) {
@@ -533,7 +547,11 @@ static void stringToUserAuth(const char* pStr, int32_t len, SUserAuthInfo* pUser
pUserAuth->tbName.acctId = getIntegerFromAuthStr(p, &p);
getStringFromAuthStr(p, pUserAuth->tbName.dbname, &p);
getStringFromAuthStr(p, pUserAuth->tbName.tname, &p);
- pUserAuth->tbName.type = TSDB_TABLE_NAME_T;
+ if (pUserAuth->tbName.tname[0]) {
+ pUserAuth->tbName.type = TSDB_TABLE_NAME_T;
+ } else {
+ pUserAuth->tbName.type = TSDB_DB_NAME_T;
+ }
pUserAuth->type = getIntegerFromAuthStr(p, &p);
}
@@ -667,6 +685,22 @@ int32_t buildCatalogReq(const SParseMetaCache* pMetaCache, SCatalogReq* pCatalog
return code;
}
+
+SNode* createSelectStmtImpl(bool isDistinct, SNodeList* pProjectionList, SNode* pTable) {
+ SSelectStmt* select = (SSelectStmt*)nodesMakeNode(QUERY_NODE_SELECT_STMT);
+ if (NULL == select) {
+ return NULL;
+ }
+ select->isDistinct = isDistinct;
+ select->pProjectionList = pProjectionList;
+ select->pFromTable = pTable;
+ sprintf(select->stmtName, "%p", select);
+ select->timeLineResMode = select->isDistinct ? TIME_LINE_NONE : TIME_LINE_GLOBAL;
+ select->onlyHasKeepOrderFunc = true;
+ select->timeRange = TSWINDOW_INITIALIZER;
+ return (SNode*)select;
+}
+
static int32_t putMetaDataToHash(const char* pKey, int32_t len, const SArray* pData, int32_t index, SHashObj** pHash) {
if (NULL == *pHash) {
*pHash = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c
index 43ef166c41dc1905f35ca4da936453f1cc12c23c..3f05e3269ac84a60bb6fa752e63165451dbb1e85 100644
--- a/source/libs/parser/src/sql.c
+++ b/source/libs/parser/src/sql.c
@@ -140,18 +140,18 @@ typedef union {
#define ParseCTX_FETCH
#define ParseCTX_STORE
#define YYFALLBACK 1
-#define YYNSTATE 792
-#define YYNRULE 595
-#define YYNRULE_WITH_ACTION 595
+#define YYNSTATE 794
+#define YYNRULE 596
+#define YYNRULE_WITH_ACTION 596
#define YYNTOKEN 337
-#define YY_MAX_SHIFT 791
-#define YY_MIN_SHIFTREDUCE 1169
-#define YY_MAX_SHIFTREDUCE 1763
-#define YY_ERROR_ACTION 1764
-#define YY_ACCEPT_ACTION 1765
-#define YY_NO_ACTION 1766
-#define YY_MIN_REDUCE 1767
-#define YY_MAX_REDUCE 2361
+#define YY_MAX_SHIFT 793
+#define YY_MIN_SHIFTREDUCE 1172
+#define YY_MAX_SHIFTREDUCE 1767
+#define YY_ERROR_ACTION 1768
+#define YY_ACCEPT_ACTION 1769
+#define YY_NO_ACTION 1770
+#define YY_MIN_REDUCE 1771
+#define YY_MAX_REDUCE 2366
/************* End control #defines *******************************************/
#define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0])))
@@ -218,559 +218,560 @@ typedef union {
** yy_default[] Default action for each state.
**
*********** Begin parsing tables **********************************************/
-#define YY_ACTTAB_COUNT (2716)
+#define YY_ACTTAB_COUNT (2730)
static const YYACTIONTYPE yy_action[] = {
- /* 0 */ 2173, 169, 2337, 38, 303, 2332, 170, 445, 1779, 1885,
- /* 10 */ 660, 444, 48, 46, 1691, 1933, 181, 2337, 2151, 1790,
- /* 20 */ 399, 2336, 1540, 41, 40, 2333, 2335, 47, 45, 44,
- /* 30 */ 43, 42, 2159, 1621, 1567, 1538, 366, 2046, 2191, 41,
- /* 40 */ 40, 451, 2155, 47, 45, 44, 43, 42, 402, 663,
- /* 50 */ 2141, 1568, 698, 1566, 620, 526, 164, 2332, 527, 1803,
- /* 60 */ 2173, 218, 1616, 380, 1946, 529, 167, 1810, 19, 2141,
- /* 70 */ 699, 1995, 2338, 188, 1947, 1546, 184, 2333, 646, 2157,
- /* 80 */ 396, 2106, 2337, 358, 2172, 2332, 345, 2208, 1984, 692,
- /* 90 */ 110, 2174, 702, 2176, 2177, 697, 620, 692, 2191, 2332,
- /* 100 */ 788, 2336, 185, 15, 2261, 2333, 2334, 220, 395, 2257,
- /* 110 */ 2141, 529, 698, 1810, 2338, 188, 48, 46, 1753, 2333,
- /* 120 */ 646, 190, 682, 1944, 399, 1789, 1540, 1650, 251, 2287,
- /* 130 */ 47, 45, 44, 43, 42, 668, 1567, 1621, 205, 1538,
- /* 140 */ 1623, 1624, 133, 2063, 2172, 682, 1944, 2208, 1304, 566,
- /* 150 */ 110, 2174, 702, 2176, 2177, 697, 2124, 692, 2061, 669,
- /* 160 */ 145, 1565, 152, 2232, 2261, 193, 1616, 668, 395, 2257,
- /* 170 */ 1596, 1606, 19, 1395, 1396, 2141, 1622, 1625, 84, 1546,
- /* 180 */ 1920, 83, 657, 142, 1651, 1768, 284, 541, 1306, 2056,
- /* 190 */ 1541, 123, 1539, 681, 122, 121, 120, 119, 118, 117,
- /* 200 */ 116, 115, 114, 262, 788, 668, 123, 15, 2173, 122,
- /* 210 */ 121, 120, 119, 118, 117, 116, 115, 114, 699, 666,
- /* 220 */ 1812, 2056, 1544, 1545, 1767, 1595, 1598, 1599, 1600, 1601,
- /* 230 */ 1602, 1603, 1604, 1605, 694, 690, 1614, 1615, 1617, 1618,
- /* 240 */ 1619, 1620, 2, 681, 1623, 1624, 2191, 737, 132, 131,
- /* 250 */ 130, 129, 128, 127, 126, 125, 124, 677, 2141, 2056,
- /* 260 */ 698, 37, 397, 1645, 1646, 1647, 1648, 1649, 1653, 1654,
- /* 270 */ 1655, 1656, 543, 2173, 1596, 1606, 51, 682, 1944, 66,
- /* 280 */ 1622, 1625, 9, 660, 659, 186, 2269, 2270, 284, 140,
- /* 290 */ 2274, 1227, 2172, 1226, 1541, 2208, 1539, 133, 110, 2174,
- /* 300 */ 702, 2176, 2177, 697, 571, 692, 138, 531, 682, 1944,
- /* 310 */ 2352, 2191, 2261, 528, 41, 40, 395, 2257, 47, 45,
- /* 320 */ 44, 43, 42, 2141, 1228, 698, 1544, 1545, 57, 1595,
- /* 330 */ 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 694, 690,
- /* 340 */ 1614, 1615, 1617, 1618, 1619, 1620, 2, 12, 48, 46,
- /* 350 */ 1227, 2173, 1226, 408, 407, 2336, 399, 2172, 1540, 2276,
- /* 360 */ 2208, 696, 1922, 110, 2174, 702, 2176, 2177, 697, 1621,
- /* 370 */ 692, 1538, 428, 1997, 181, 185, 1547, 2261, 62, 639,
- /* 380 */ 379, 395, 2257, 1228, 2063, 2273, 41, 40, 1995, 2191,
- /* 390 */ 47, 45, 44, 43, 42, 2047, 392, 1360, 1616, 2060,
- /* 400 */ 669, 2141, 2288, 698, 19, 534, 150, 2191, 527, 1803,
- /* 410 */ 1730, 1546, 1351, 727, 726, 725, 1355, 724, 1357, 1358,
- /* 420 */ 723, 720, 30, 1366, 717, 1368, 1369, 714, 711, 708,
- /* 430 */ 667, 1695, 191, 1565, 107, 2172, 788, 1565, 2208, 15,
- /* 440 */ 101, 339, 2174, 702, 2176, 2177, 697, 695, 692, 683,
- /* 450 */ 2226, 143, 48, 46, 1626, 87, 635, 34, 234, 1936,
- /* 460 */ 399, 638, 1540, 41, 40, 1937, 681, 47, 45, 44,
- /* 470 */ 43, 42, 368, 1621, 174, 1538, 1623, 1624, 640, 682,
- /* 480 */ 1944, 1939, 560, 556, 552, 548, 2042, 233, 285, 393,
- /* 490 */ 735, 157, 156, 732, 731, 730, 154, 167, 2173, 449,
- /* 500 */ 1202, 1684, 1616, 1565, 1719, 1946, 1596, 1606, 699, 12,
- /* 510 */ 2295, 10, 1622, 1625, 2042, 1546, 41, 40, 682, 1944,
- /* 520 */ 47, 45, 44, 43, 42, 1564, 1541, 88, 1539, 62,
- /* 530 */ 231, 93, 201, 1550, 191, 489, 2191, 1568, 450, 1204,
- /* 540 */ 788, 1207, 1208, 49, 1788, 641, 636, 629, 2141, 51,
- /* 550 */ 698, 632, 631, 1717, 1718, 1720, 1721, 1722, 1544, 1545,
- /* 560 */ 203, 1595, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605,
- /* 570 */ 694, 690, 1614, 1615, 1617, 1618, 1619, 1620, 2, 62,
- /* 580 */ 1623, 1624, 2172, 1450, 1451, 2208, 62, 1597, 110, 2174,
- /* 590 */ 702, 2176, 2177, 697, 2141, 692, 209, 208, 230, 224,
- /* 600 */ 2352, 523, 2261, 229, 737, 539, 395, 2257, 2173, 521,
- /* 610 */ 1596, 1606, 517, 513, 682, 1944, 1622, 1625, 699, 488,
- /* 620 */ 2308, 682, 1944, 222, 607, 2151, 52, 62, 294, 295,
- /* 630 */ 1541, 648, 1539, 293, 459, 584, 583, 582, 684, 1935,
- /* 640 */ 2233, 474, 574, 139, 578, 2127, 2191, 1787, 577, 2155,
- /* 650 */ 657, 142, 1549, 576, 581, 374, 373, 2027, 2141, 575,
- /* 660 */ 698, 2134, 1544, 1545, 1566, 1595, 1598, 1599, 1600, 1601,
- /* 670 */ 1602, 1603, 1604, 1605, 694, 690, 1614, 1615, 1617, 1618,
- /* 680 */ 1619, 1620, 2, 48, 46, 191, 2157, 682, 1944, 202,
- /* 690 */ 1760, 399, 2172, 1540, 421, 2208, 692, 2141, 110, 2174,
- /* 700 */ 702, 2176, 2177, 697, 1621, 692, 1538, 475, 620, 2173,
- /* 710 */ 2352, 2332, 2261, 443, 402, 442, 395, 2257, 1921, 699,
- /* 720 */ 404, 589, 167, 1990, 1992, 2276, 2338, 188, 682, 1944,
- /* 730 */ 1946, 2333, 646, 1616, 1631, 191, 599, 1991, 1992, 2173,
- /* 740 */ 1565, 433, 191, 682, 1944, 441, 1546, 2191, 542, 699,
- /* 750 */ 247, 2272, 598, 282, 2269, 656, 12, 134, 655, 2141,
- /* 760 */ 2332, 698, 405, 1941, 2276, 596, 592, 594, 435, 431,
- /* 770 */ 167, 788, 1546, 586, 49, 644, 188, 2191, 1946, 246,
- /* 780 */ 2333, 646, 191, 191, 1467, 1468, 747, 48, 46, 2141,
- /* 790 */ 2271, 698, 1759, 2172, 1315, 399, 2208, 1540, 250, 111,
- /* 800 */ 2174, 702, 2176, 2177, 697, 1652, 692, 1314, 1621, 1552,
- /* 810 */ 1538, 1623, 1624, 2261, 657, 142, 469, 2260, 2257, 70,
- /* 820 */ 1466, 1469, 69, 2172, 1919, 468, 2208, 728, 1997, 171,
- /* 830 */ 2174, 702, 2176, 2177, 697, 389, 692, 1616, 1786, 2173,
- /* 840 */ 1210, 1596, 1606, 1995, 682, 1944, 1564, 1622, 1625, 699,
- /* 850 */ 1546, 735, 157, 156, 732, 731, 730, 154, 452, 1997,
- /* 860 */ 686, 1541, 2233, 1539, 252, 155, 394, 1511, 1512, 621,
- /* 870 */ 2298, 453, 498, 36, 1995, 788, 1929, 2191, 15, 41,
- /* 880 */ 40, 1931, 35, 47, 45, 44, 43, 42, 2141, 2141,
- /* 890 */ 1597, 698, 1657, 1544, 1545, 1927, 1595, 1598, 1599, 1600,
- /* 900 */ 1601, 1602, 1603, 1604, 1605, 694, 690, 1614, 1615, 1617,
- /* 910 */ 1618, 1619, 1620, 2, 1785, 1623, 1624, 187, 2269, 2270,
- /* 920 */ 1765, 140, 2274, 2172, 1784, 56, 2208, 1783, 1782, 171,
- /* 930 */ 2174, 702, 2176, 2177, 697, 349, 692, 1563, 44, 43,
- /* 940 */ 42, 1997, 259, 2173, 482, 1596, 1606, 496, 403, 1781,
- /* 950 */ 495, 1622, 1625, 699, 1997, 627, 1995, 735, 157, 156,
- /* 960 */ 732, 731, 730, 154, 2141, 1541, 465, 1539, 497, 1996,
- /* 970 */ 2299, 682, 1944, 467, 2141, 1540, 1319, 2141, 2141, 41,
- /* 980 */ 40, 2191, 1778, 47, 45, 44, 43, 42, 1538, 1318,
- /* 990 */ 1568, 616, 414, 2141, 1777, 698, 413, 1544, 1545, 2141,
- /* 1000 */ 1595, 1598, 1599, 1600, 1601, 1602, 1603, 1604, 1605, 694,
- /* 1010 */ 690, 1614, 1615, 1617, 1618, 1619, 1620, 2, 1843, 367,
- /* 1020 */ 1565, 682, 1944, 657, 142, 580, 579, 2172, 1546, 1948,
- /* 1030 */ 2208, 455, 2141, 110, 2174, 702, 2176, 2177, 697, 620,
- /* 1040 */ 692, 661, 2332, 1776, 2141, 2352, 570, 2261, 1230, 1231,
- /* 1050 */ 569, 395, 2257, 788, 500, 562, 561, 2338, 188, 564,
- /* 1060 */ 563, 493, 2333, 646, 487, 486, 485, 484, 481, 480,
- /* 1070 */ 479, 478, 477, 473, 472, 471, 470, 348, 462, 461,
- /* 1080 */ 460, 652, 457, 456, 365, 759, 757, 1775, 765, 764,
- /* 1090 */ 763, 762, 411, 2141, 761, 760, 146, 755, 754, 753,
- /* 1100 */ 752, 751, 750, 749, 159, 745, 744, 743, 410, 409,
- /* 1110 */ 740, 739, 738, 177, 176, 168, 682, 1944, 2135, 1774,
- /* 1120 */ 323, 1773, 1772, 408, 407, 256, 189, 2269, 2270, 2173,
- /* 1130 */ 140, 2274, 371, 1554, 321, 73, 665, 2141, 72, 699,
- /* 1140 */ 693, 2326, 261, 1541, 1621, 1539, 1547, 41, 40, 346,
- /* 1150 */ 1688, 47, 45, 44, 43, 42, 2042, 1771, 682, 1944,
- /* 1160 */ 216, 508, 506, 503, 54, 620, 3, 2191, 2332, 2141,
- /* 1170 */ 1597, 2141, 2141, 1616, 649, 1544, 1545, 2173, 298, 2141,
- /* 1180 */ 1770, 698, 260, 2338, 188, 2151, 1546, 699, 2333, 646,
- /* 1190 */ 41, 40, 14, 13, 47, 45, 44, 43, 42, 2160,
- /* 1200 */ 62, 372, 207, 370, 369, 74, 568, 2141, 87, 2155,
- /* 1210 */ 689, 688, 605, 2172, 144, 2191, 2208, 2232, 1886, 110,
- /* 1220 */ 2174, 702, 2176, 2177, 697, 91, 692, 2141, 570, 698,
- /* 1230 */ 2141, 2352, 569, 2261, 1940, 2173, 572, 395, 2257, 109,
- /* 1240 */ 148, 729, 135, 1707, 1988, 699, 2157, 2280, 733, 734,
- /* 1250 */ 317, 1988, 1988, 1974, 748, 82, 692, 1906, 1302, 620,
- /* 1260 */ 239, 2172, 2332, 237, 2208, 2281, 1684, 333, 2174, 702,
- /* 1270 */ 2176, 2177, 697, 2191, 692, 1207, 1208, 2338, 188, 81,
- /* 1280 */ 80, 448, 2333, 646, 200, 2141, 2173, 698, 241, 1780,
- /* 1290 */ 573, 240, 619, 682, 1944, 1664, 699, 440, 438, 243,
- /* 1300 */ 2301, 1555, 242, 1550, 245, 1762, 1763, 244, 347, 1830,
- /* 1310 */ 642, 429, 1300, 679, 427, 423, 419, 416, 441, 2172,
- /* 1320 */ 653, 602, 2208, 601, 2191, 110, 2174, 702, 2176, 2177,
- /* 1330 */ 697, 585, 692, 1558, 1560, 1821, 2141, 2352, 698, 2261,
- /* 1340 */ 1819, 155, 645, 395, 2257, 2332, 690, 1614, 1615, 1617,
- /* 1350 */ 1618, 1619, 1620, 2173, 166, 249, 191, 587, 155, 248,
- /* 1360 */ 644, 188, 590, 699, 1642, 2333, 646, 279, 50, 50,
- /* 1370 */ 2172, 266, 155, 2208, 50, 291, 110, 2174, 702, 2176,
- /* 1380 */ 2177, 697, 633, 692, 682, 1944, 2173, 137, 2236, 106,
- /* 1390 */ 2261, 2191, 682, 1944, 395, 2257, 699, 682, 1944, 103,
- /* 1400 */ 71, 1506, 2162, 2141, 680, 698, 1687, 153, 155, 14,
- /* 1410 */ 13, 650, 304, 2173, 64, 273, 55, 406, 1509, 90,
- /* 1420 */ 1548, 1813, 353, 699, 2191, 378, 50, 600, 1716, 1715,
- /* 1430 */ 741, 268, 664, 50, 1464, 296, 2141, 2172, 698, 2192,
- /* 1440 */ 2208, 1884, 706, 110, 2174, 702, 2176, 2177, 697, 2173,
- /* 1450 */ 692, 2191, 1280, 1883, 153, 2234, 155, 2261, 2164, 699,
- /* 1460 */ 674, 395, 2257, 2141, 136, 698, 412, 300, 1345, 153,
- /* 1470 */ 2172, 783, 1261, 2208, 1658, 2173, 110, 2174, 702, 2176,
- /* 1480 */ 2177, 697, 2051, 692, 1804, 699, 1607, 2191, 685, 742,
- /* 1490 */ 2261, 1809, 1985, 316, 395, 2257, 2291, 2172, 658, 2141,
- /* 1500 */ 2208, 698, 1373, 111, 2174, 702, 2176, 2177, 697, 281,
- /* 1510 */ 692, 1278, 1262, 2191, 1377, 278, 1384, 2261, 1, 5,
- /* 1520 */ 415, 687, 2257, 420, 1382, 2141, 362, 698, 1571, 158,
- /* 1530 */ 437, 436, 196, 700, 439, 198, 2208, 195, 1487, 111,
- /* 1540 */ 2174, 702, 2176, 2177, 697, 311, 692, 206, 454, 1568,
- /* 1550 */ 2052, 458, 491, 2261, 2173, 463, 1563, 357, 2257, 2172,
- /* 1560 */ 476, 2044, 2208, 483, 699, 172, 2174, 702, 2176, 2177,
- /* 1570 */ 697, 490, 692, 492, 501, 502, 499, 1551, 210, 504,
- /* 1580 */ 2173, 211, 505, 213, 507, 509, 1569, 524, 4, 525,
- /* 1590 */ 699, 532, 2191, 533, 1566, 221, 536, 535, 1570, 223,
- /* 1600 */ 1572, 537, 565, 538, 2141, 1997, 698, 540, 544, 226,
- /* 1610 */ 228, 85, 364, 86, 2115, 232, 647, 2353, 2191, 112,
- /* 1620 */ 1995, 2112, 352, 382, 567, 604, 1934, 606, 2111, 89,
- /* 1630 */ 2141, 610, 698, 609, 312, 151, 611, 253, 2172, 236,
- /* 1640 */ 1930, 2208, 2173, 238, 111, 2174, 702, 2176, 2177, 697,
- /* 1650 */ 160, 692, 699, 161, 1932, 255, 1928, 162, 2261, 2173,
- /* 1660 */ 163, 614, 257, 2258, 2172, 1494, 617, 2208, 2292, 699,
- /* 1670 */ 340, 2174, 702, 2176, 2177, 697, 672, 692, 2173, 615,
- /* 1680 */ 2191, 624, 634, 645, 2302, 383, 2332, 8, 699, 630,
- /* 1690 */ 2307, 2306, 2141, 264, 698, 267, 2283, 2191, 384, 643,
- /* 1700 */ 637, 644, 188, 385, 625, 623, 2333, 646, 622, 2141,
- /* 1710 */ 1684, 698, 654, 277, 651, 141, 2191, 2355, 1567, 175,
- /* 1720 */ 2331, 390, 2277, 662, 388, 286, 2172, 274, 2141, 2208,
- /* 1730 */ 698, 96, 340, 2174, 702, 2176, 2177, 697, 272, 692,
- /* 1740 */ 1573, 313, 2057, 2172, 280, 276, 2208, 670, 275, 172,
- /* 1750 */ 2174, 702, 2176, 2177, 697, 675, 692, 671, 2071, 314,
- /* 1760 */ 676, 2070, 2172, 98, 2173, 2208, 2069, 391, 340, 2174,
- /* 1770 */ 702, 2176, 2177, 697, 696, 692, 315, 1945, 100, 61,
- /* 1780 */ 2242, 2173, 102, 704, 1989, 318, 784, 1907, 307, 322,
- /* 1790 */ 785, 699, 787, 53, 354, 327, 355, 342, 2133, 320,
- /* 1800 */ 2173, 2354, 2191, 341, 2132, 331, 2131, 78, 2128, 417,
- /* 1810 */ 699, 418, 1531, 1532, 2141, 194, 698, 422, 2126, 2191,
- /* 1820 */ 424, 425, 426, 2125, 398, 363, 2123, 430, 2122, 432,
- /* 1830 */ 2121, 2141, 434, 698, 2102, 1522, 197, 2101, 2191, 199,
- /* 1840 */ 1490, 79, 1489, 400, 2083, 2082, 2081, 446, 2172, 447,
- /* 1850 */ 2141, 2208, 698, 2080, 339, 2174, 702, 2176, 2177, 697,
- /* 1860 */ 2079, 692, 1441, 2227, 2035, 2172, 147, 608, 2208, 2034,
- /* 1870 */ 2032, 340, 2174, 702, 2176, 2177, 697, 2031, 692, 2030,
- /* 1880 */ 2033, 2029, 2028, 2026, 2172, 791, 2025, 2208, 2024, 204,
- /* 1890 */ 340, 2174, 702, 2176, 2177, 697, 464, 692, 2173, 310,
- /* 1900 */ 2023, 466, 2037, 2022, 2021, 2020, 2019, 2018, 699, 2017,
- /* 1910 */ 2016, 2015, 2014, 2013, 2012, 180, 149, 2007, 2006, 2005,
- /* 1920 */ 2036, 2004, 2003, 781, 777, 773, 769, 2011, 308, 2010,
- /* 1930 */ 2009, 2173, 2008, 2002, 2001, 2000, 2191, 1443, 494, 1999,
- /* 1940 */ 1998, 699, 350, 351, 1849, 1316, 1320, 212, 2141, 1848,
- /* 1950 */ 698, 214, 1847, 215, 1845, 1842, 1841, 511, 1834, 1823,
- /* 1960 */ 1312, 510, 1799, 1798, 2173, 227, 514, 512, 108, 2191,
- /* 1970 */ 2100, 301, 2090, 2078, 699, 516, 518, 522, 2077, 1209,
- /* 1980 */ 2055, 2141, 603, 698, 1923, 2208, 520, 515, 335, 2174,
- /* 1990 */ 702, 2176, 2177, 697, 519, 692, 1844, 182, 2161, 217,
- /* 2000 */ 2173, 1840, 2191, 77, 678, 547, 76, 183, 530, 219,
- /* 2010 */ 699, 545, 1254, 225, 2141, 2172, 698, 546, 2208, 1838,
- /* 2020 */ 549, 324, 2174, 702, 2176, 2177, 697, 551, 692, 1836,
- /* 2030 */ 550, 554, 553, 2173, 555, 1833, 558, 557, 2191, 288,
- /* 2040 */ 1818, 1816, 559, 699, 287, 1817, 1815, 1795, 2172, 1925,
- /* 2050 */ 2141, 2208, 698, 1389, 325, 2174, 702, 2176, 2177, 697,
- /* 2060 */ 63, 692, 2173, 1924, 254, 1388, 1303, 1301, 1299, 1298,
- /* 2070 */ 756, 2191, 699, 1297, 1296, 1831, 1290, 1295, 375, 1292,
- /* 2080 */ 758, 235, 1822, 2141, 2172, 698, 1291, 2208, 376, 1820,
- /* 2090 */ 326, 2174, 702, 2176, 2177, 697, 1289, 692, 377, 588,
- /* 2100 */ 2191, 597, 591, 1794, 593, 1793, 595, 1792, 113, 1516,
- /* 2110 */ 1520, 29, 2141, 1518, 698, 1515, 2099, 2172, 58, 67,
- /* 2120 */ 2208, 1496, 2173, 332, 2174, 702, 2176, 2177, 697, 2089,
- /* 2130 */ 692, 1498, 699, 612, 2076, 2074, 2337, 20, 17, 6,
- /* 2140 */ 31, 2173, 7, 21, 22, 271, 2172, 1500, 270, 2208,
- /* 2150 */ 33, 699, 336, 2174, 702, 2176, 2177, 697, 613, 692,
- /* 2160 */ 2191, 1732, 381, 258, 265, 263, 626, 628, 165, 2162,
- /* 2170 */ 618, 65, 2141, 173, 698, 24, 1714, 1706, 1747, 2191,
- /* 2180 */ 269, 1746, 386, 1751, 32, 1750, 92, 283, 387, 2173,
- /* 2190 */ 2075, 2141, 178, 698, 1752, 60, 1753, 2073, 2072, 699,
- /* 2200 */ 1681, 1680, 2054, 95, 289, 94, 2172, 2053, 2173, 2208,
- /* 2210 */ 97, 25, 328, 2174, 702, 2176, 2177, 697, 699, 692,
- /* 2220 */ 26, 290, 1712, 292, 297, 2172, 68, 2191, 2208, 99,
- /* 2230 */ 673, 337, 2174, 702, 2176, 2177, 697, 299, 692, 2141,
- /* 2240 */ 23, 698, 1846, 103, 11, 1633, 2191, 13, 302, 1556,
- /* 2250 */ 59, 179, 1643, 1611, 1588, 18, 2173, 1632, 2141, 1609,
- /* 2260 */ 698, 2211, 691, 39, 192, 1608, 699, 1580, 16, 27,
- /* 2270 */ 28, 705, 401, 2172, 701, 707, 2208, 2173, 709, 329,
- /* 2280 */ 2174, 702, 2176, 2177, 697, 1374, 692, 699, 703, 1371,
- /* 2290 */ 710, 712, 2172, 1370, 2191, 2208, 713, 715, 338, 2174,
- /* 2300 */ 702, 2176, 2177, 697, 718, 692, 2141, 1367, 698, 716,
- /* 2310 */ 721, 719, 584, 583, 582, 2191, 1361, 1359, 722, 574,
- /* 2320 */ 139, 578, 104, 305, 1365, 577, 105, 2141, 1383, 698,
- /* 2330 */ 576, 581, 374, 373, 1364, 1363, 575, 75, 1379, 1362,
- /* 2340 */ 2172, 1252, 736, 2208, 1284, 2173, 330, 2174, 702, 2176,
- /* 2350 */ 2177, 697, 1283, 692, 1282, 699, 1281, 1279, 1277, 1276,
- /* 2360 */ 1275, 2172, 1310, 2173, 2208, 746, 1270, 343, 2174, 702,
- /* 2370 */ 2176, 2177, 697, 699, 692, 306, 1273, 1272, 1271, 1307,
- /* 2380 */ 1269, 1268, 1267, 2191, 1305, 1264, 1263, 1260, 1259, 1258,
- /* 2390 */ 1257, 1839, 766, 2173, 1837, 2141, 770, 698, 768, 772,
- /* 2400 */ 1835, 2191, 776, 699, 774, 767, 1832, 771, 778, 780,
- /* 2410 */ 1814, 775, 782, 2141, 779, 698, 1199, 1791, 309, 786,
- /* 2420 */ 1766, 1542, 790, 319, 2173, 789, 1766, 1766, 1766, 2172,
- /* 2430 */ 1766, 2191, 2208, 1766, 699, 344, 2174, 702, 2176, 2177,
- /* 2440 */ 697, 2173, 692, 2141, 1766, 698, 1766, 2172, 1766, 1766,
- /* 2450 */ 2208, 699, 1766, 2185, 2174, 702, 2176, 2177, 697, 1766,
- /* 2460 */ 692, 1766, 2191, 1766, 1766, 1766, 1766, 1766, 1766, 1766,
- /* 2470 */ 1766, 1766, 1766, 1766, 2141, 1766, 698, 2172, 1766, 2191,
- /* 2480 */ 2208, 1766, 1766, 2184, 2174, 702, 2176, 2177, 697, 1766,
- /* 2490 */ 692, 2141, 2173, 698, 1766, 1766, 1766, 1766, 1766, 1766,
- /* 2500 */ 1766, 1766, 699, 1766, 1766, 1766, 1766, 1766, 2172, 1766,
- /* 2510 */ 1766, 2208, 1766, 2173, 2183, 2174, 702, 2176, 2177, 697,
- /* 2520 */ 1766, 692, 1766, 699, 1766, 2172, 1766, 1766, 2208, 1766,
- /* 2530 */ 2191, 359, 2174, 702, 2176, 2177, 697, 1766, 692, 1766,
- /* 2540 */ 2173, 1766, 2141, 1766, 698, 1766, 1766, 1766, 1766, 1766,
- /* 2550 */ 699, 2191, 1766, 1766, 1766, 1766, 1766, 1766, 1766, 1766,
- /* 2560 */ 1766, 1766, 1766, 2141, 1766, 698, 1766, 1766, 1766, 1766,
- /* 2570 */ 1766, 1766, 1766, 1766, 1766, 1766, 2172, 1766, 2191, 2208,
- /* 2580 */ 1766, 1766, 360, 2174, 702, 2176, 2177, 697, 1766, 692,
- /* 2590 */ 2141, 1766, 698, 1766, 1766, 1766, 1766, 2172, 1766, 1766,
- /* 2600 */ 2208, 2173, 1766, 356, 2174, 702, 2176, 2177, 697, 1766,
- /* 2610 */ 692, 699, 1766, 1766, 1766, 1766, 1766, 1766, 2173, 1766,
- /* 2620 */ 1766, 1766, 1766, 1766, 2172, 1766, 1766, 2208, 699, 1766,
- /* 2630 */ 361, 2174, 702, 2176, 2177, 697, 1766, 692, 1766, 2191,
- /* 2640 */ 1766, 1766, 1766, 1766, 1766, 1766, 1766, 1766, 1766, 1766,
- /* 2650 */ 1766, 2141, 1766, 698, 1766, 1766, 2191, 1766, 1766, 1766,
- /* 2660 */ 1766, 1766, 1766, 1766, 1766, 1766, 1766, 1766, 2141, 1766,
- /* 2670 */ 698, 1766, 1766, 1766, 1766, 1766, 1766, 1766, 1766, 1766,
- /* 2680 */ 1766, 1766, 1766, 1766, 1766, 700, 1766, 1766, 2208, 1766,
- /* 2690 */ 1766, 335, 2174, 702, 2176, 2177, 697, 1766, 692, 1766,
- /* 2700 */ 1766, 1766, 2172, 1766, 1766, 2208, 1766, 1766, 334, 2174,
- /* 2710 */ 702, 2176, 2177, 697, 1766, 692,
+ /* 0 */ 2177, 2067, 170, 218, 1783, 670, 447, 531, 665, 1814,
+ /* 10 */ 662, 446, 48, 46, 1694, 1937, 2065, 671, 647, 1794,
+ /* 20 */ 401, 2337, 1543, 41, 40, 410, 409, 47, 45, 44,
+ /* 30 */ 43, 42, 686, 1624, 2237, 1541, 646, 188, 2195, 41,
+ /* 40 */ 40, 2338, 648, 47, 45, 44, 43, 42, 1550, 2138,
+ /* 50 */ 2145, 1571, 700, 622, 1568, 622, 2337, 543, 2337, 2060,
+ /* 60 */ 2177, 181, 1619, 47, 45, 44, 43, 42, 19, 2145,
+ /* 70 */ 701, 2343, 188, 2343, 188, 1549, 2338, 648, 2338, 648,
+ /* 80 */ 220, 368, 2050, 360, 531, 2176, 1814, 2212, 107, 184,
+ /* 90 */ 110, 2178, 704, 2180, 2181, 699, 622, 694, 2195, 2337,
+ /* 100 */ 790, 1988, 185, 15, 2265, 143, 87, 641, 397, 2261,
+ /* 110 */ 2145, 1793, 700, 1940, 2343, 188, 48, 46, 2067, 2338,
+ /* 120 */ 648, 190, 2155, 370, 401, 2342, 1543, 1653, 2337, 2291,
+ /* 130 */ 394, 683, 1943, 2064, 671, 2195, 1939, 1624, 205, 1541,
+ /* 140 */ 1626, 1627, 453, 1318, 2341, 2176, 2159, 2212, 2338, 2340,
+ /* 150 */ 110, 2178, 704, 2180, 2181, 699, 1317, 694, 1569, 404,
+ /* 160 */ 145, 2145, 152, 2236, 2265, 181, 1619, 164, 397, 2261,
+ /* 170 */ 1599, 1609, 19, 2001, 382, 1950, 1625, 1628, 84, 1549,
+ /* 180 */ 381, 83, 1999, 2161, 1654, 1772, 2051, 347, 1999, 640,
+ /* 190 */ 1544, 123, 1542, 694, 122, 121, 120, 119, 118, 117,
+ /* 200 */ 116, 115, 114, 263, 790, 1553, 123, 15, 2177, 122,
+ /* 210 */ 121, 120, 119, 118, 117, 116, 115, 114, 701, 669,
+ /* 220 */ 1816, 500, 1547, 1548, 1771, 1598, 1601, 1602, 1603, 1604,
+ /* 230 */ 1605, 1606, 1607, 1608, 696, 692, 1617, 1618, 1620, 1621,
+ /* 240 */ 1622, 1623, 2, 642, 1626, 1627, 2195, 1792, 132, 131,
+ /* 250 */ 130, 129, 128, 127, 126, 125, 124, 670, 2145, 62,
+ /* 260 */ 700, 37, 399, 1648, 1649, 1650, 1651, 1652, 1656, 1657,
+ /* 270 */ 1658, 1659, 38, 305, 1599, 1609, 1568, 287, 1398, 1399,
+ /* 280 */ 1625, 1628, 1205, 683, 41, 40, 684, 1948, 47, 45,
+ /* 290 */ 44, 43, 42, 2176, 1544, 2212, 1542, 2145, 110, 2178,
+ /* 300 */ 704, 2180, 2181, 699, 528, 694, 133, 529, 1807, 668,
+ /* 310 */ 2357, 2060, 2265, 568, 1568, 2177, 397, 2261, 62, 1567,
+ /* 320 */ 93, 1207, 533, 1210, 1211, 662, 1547, 1548, 530, 1598,
+ /* 330 */ 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 696, 692,
+ /* 340 */ 1617, 1618, 1620, 1621, 1622, 1623, 2, 12, 48, 46,
+ /* 350 */ 12, 637, 10, 2195, 395, 683, 401, 406, 1543, 1363,
+ /* 360 */ 1994, 1996, 167, 373, 545, 2145, 51, 700, 670, 1624,
+ /* 370 */ 1950, 1541, 684, 1948, 1354, 729, 728, 727, 1358, 726,
+ /* 380 */ 1360, 1361, 725, 722, 2131, 1369, 719, 1371, 1372, 716,
+ /* 390 */ 713, 710, 133, 2177, 8, 2280, 659, 142, 1619, 573,
+ /* 400 */ 2176, 1791, 2212, 698, 19, 110, 2178, 704, 2180, 2181,
+ /* 410 */ 699, 1549, 694, 2342, 1230, 191, 1229, 185, 1925, 2265,
+ /* 420 */ 679, 2277, 2060, 397, 2261, 250, 1600, 659, 142, 138,
+ /* 430 */ 1570, 2195, 374, 423, 372, 371, 790, 570, 51, 15,
+ /* 440 */ 643, 638, 631, 2145, 2292, 700, 1570, 1231, 586, 585,
+ /* 450 */ 584, 2145, 48, 46, 1629, 576, 139, 580, 1764, 572,
+ /* 460 */ 401, 579, 1543, 571, 1453, 1454, 578, 583, 376, 375,
+ /* 470 */ 609, 1722, 577, 1624, 191, 1541, 1626, 1627, 2176, 2280,
+ /* 480 */ 2212, 1995, 1996, 341, 2178, 704, 2180, 2181, 699, 697,
+ /* 490 */ 694, 685, 2230, 536, 1514, 1515, 529, 1807, 2177, 187,
+ /* 500 */ 2273, 2274, 1619, 140, 2278, 2276, 1599, 1609, 701, 1230,
+ /* 510 */ 2312, 1229, 1625, 1628, 1757, 1549, 659, 142, 634, 633,
+ /* 520 */ 1720, 1721, 1723, 1724, 1725, 491, 1544, 739, 1542, 661,
+ /* 530 */ 186, 2273, 2274, 525, 140, 2278, 2195, 14, 13, 1568,
+ /* 540 */ 790, 523, 1231, 49, 519, 515, 659, 142, 2145, 1790,
+ /* 550 */ 700, 737, 157, 156, 734, 733, 732, 154, 1547, 1548,
+ /* 560 */ 1763, 1598, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608,
+ /* 570 */ 696, 692, 1617, 1618, 1620, 1621, 1622, 1623, 2, 574,
+ /* 580 */ 1626, 1627, 285, 2176, 2341, 2212, 209, 208, 110, 2178,
+ /* 590 */ 704, 2180, 2181, 699, 191, 694, 12, 2001, 285, 2145,
+ /* 600 */ 2357, 1305, 2265, 101, 391, 2177, 397, 2261, 2110, 490,
+ /* 610 */ 1599, 1609, 1999, 1687, 600, 701, 1625, 1628, 1924, 283,
+ /* 620 */ 2273, 658, 62, 134, 657, 66, 2337, 598, 1941, 596,
+ /* 630 */ 1544, 435, 1542, 737, 157, 156, 734, 733, 732, 154,
+ /* 640 */ 1667, 646, 188, 2195, 684, 1948, 2338, 648, 1769, 189,
+ /* 650 */ 2273, 2274, 1549, 140, 2278, 2145, 251, 700, 437, 433,
+ /* 660 */ 2031, 2139, 1547, 1548, 193, 1598, 1601, 1602, 1603, 1604,
+ /* 670 */ 1605, 1606, 1607, 1608, 696, 692, 1617, 1618, 1620, 1621,
+ /* 680 */ 1622, 1623, 2, 48, 46, 739, 169, 1233, 1234, 52,
+ /* 690 */ 2176, 401, 2212, 1543, 1889, 110, 2178, 704, 2180, 2181,
+ /* 700 */ 699, 575, 694, 2001, 1624, 2155, 1541, 2240, 622, 2265,
+ /* 710 */ 396, 2337, 234, 397, 2261, 44, 43, 42, 1999, 2164,
+ /* 720 */ 416, 1322, 1543, 1303, 62, 415, 2343, 188, 174, 2159,
+ /* 730 */ 2177, 2338, 648, 1619, 1321, 1541, 562, 558, 554, 550,
+ /* 740 */ 701, 233, 688, 1789, 2237, 2155, 1549, 41, 40, 1307,
+ /* 750 */ 34, 47, 45, 44, 43, 42, 41, 40, 1264, 2163,
+ /* 760 */ 47, 45, 44, 43, 42, 1926, 2161, 622, 2195, 2159,
+ /* 770 */ 2337, 790, 684, 1948, 49, 1549, 694, 730, 191, 1788,
+ /* 780 */ 2145, 88, 700, 30, 231, 2343, 188, 48, 46, 1309,
+ /* 790 */ 2338, 648, 681, 2145, 2177, 401, 1571, 1543, 1265, 502,
+ /* 800 */ 790, 1787, 564, 563, 701, 749, 2161, 398, 1624, 150,
+ /* 810 */ 1541, 1626, 1627, 684, 1948, 2176, 694, 2212, 1569, 471,
+ /* 820 */ 111, 2178, 704, 2180, 2181, 699, 1933, 694, 470, 2145,
+ /* 830 */ 566, 565, 2195, 57, 2265, 684, 1948, 1619, 2264, 2261,
+ /* 840 */ 1571, 1599, 1609, 1734, 2145, 191, 700, 1625, 1628, 2046,
+ /* 850 */ 1549, 2145, 230, 224, 572, 451, 1935, 229, 571, 541,
+ /* 860 */ 650, 1544, 144, 1542, 249, 2236, 41, 40, 248, 1786,
+ /* 870 */ 47, 45, 44, 43, 42, 790, 1698, 222, 15, 2176,
+ /* 880 */ 191, 2212, 1568, 2046, 171, 2178, 704, 2180, 2181, 699,
+ /* 890 */ 1544, 694, 1542, 1547, 1548, 201, 1598, 1601, 1602, 1603,
+ /* 900 */ 1604, 1605, 1606, 1607, 1608, 696, 692, 1617, 1618, 1620,
+ /* 910 */ 1621, 1622, 1623, 2, 1931, 1626, 1627, 582, 581, 2145,
+ /* 920 */ 684, 1948, 1547, 1548, 623, 2302, 1952, 2001, 90, 203,
+ /* 930 */ 2280, 355, 2046, 404, 380, 351, 602, 1566, 1470, 1471,
+ /* 940 */ 452, 167, 2000, 2177, 484, 1599, 1609, 498, 202, 1950,
+ /* 950 */ 497, 1625, 1628, 701, 256, 629, 2275, 684, 1948, 684,
+ /* 960 */ 1948, 2342, 1733, 1213, 2337, 1544, 467, 1542, 499, 1567,
+ /* 970 */ 1785, 684, 1948, 469, 1469, 1472, 36, 461, 207, 476,
+ /* 980 */ 2341, 2195, 41, 40, 2338, 2339, 47, 45, 44, 43,
+ /* 990 */ 42, 477, 262, 2145, 695, 700, 731, 1547, 1548, 1992,
+ /* 1000 */ 1598, 1601, 1602, 1603, 1604, 1605, 1606, 1607, 1608, 696,
+ /* 1010 */ 692, 1617, 1618, 1620, 1621, 1622, 1623, 2, 1847, 369,
+ /* 1020 */ 2145, 167, 684, 1948, 684, 1948, 684, 1948, 2176, 1951,
+ /* 1030 */ 2212, 457, 1600, 110, 2178, 704, 2180, 2181, 699, 54,
+ /* 1040 */ 694, 3, 544, 259, 1945, 2357, 252, 2265, 1890, 41,
+ /* 1050 */ 40, 397, 2261, 47, 45, 44, 43, 42, 296, 297,
+ /* 1060 */ 2128, 495, 2305, 295, 489, 488, 487, 486, 483, 482,
+ /* 1070 */ 481, 480, 479, 475, 474, 473, 472, 350, 464, 463,
+ /* 1080 */ 462, 607, 459, 458, 367, 1691, 1782, 1634, 767, 766,
+ /* 1090 */ 765, 764, 413, 1568, 763, 762, 146, 757, 756, 755,
+ /* 1100 */ 754, 753, 752, 751, 159, 747, 746, 745, 412, 411,
+ /* 1110 */ 742, 741, 740, 177, 176, 168, 1850, 454, 41, 40,
+ /* 1120 */ 326, 1655, 47, 45, 44, 43, 42, 87, 622, 2177,
+ /* 1130 */ 455, 2337, 1784, 2001, 323, 73, 2145, 155, 72, 701,
+ /* 1140 */ 405, 2330, 1781, 761, 759, 1710, 2343, 188, 1999, 348,
+ /* 1150 */ 280, 2338, 648, 1944, 684, 1948, 62, 635, 684, 1948,
+ /* 1160 */ 216, 510, 508, 505, 137, 2177, 1923, 2195, 1780, 691,
+ /* 1170 */ 2001, 684, 1948, 735, 260, 701, 1992, 366, 286, 2145,
+ /* 1180 */ 1888, 700, 684, 1948, 1779, 1999, 586, 585, 584, 684,
+ /* 1190 */ 1948, 667, 2145, 576, 139, 580, 1778, 56, 35, 579,
+ /* 1200 */ 62, 651, 300, 2195, 578, 583, 376, 375, 1660, 682,
+ /* 1210 */ 577, 274, 1210, 1211, 2176, 2145, 2212, 700, 2145, 110,
+ /* 1220 */ 2178, 704, 2180, 2181, 699, 1777, 694, 654, 1776, 684,
+ /* 1230 */ 1948, 2357, 1887, 2265, 2145, 2177, 414, 397, 2261, 109,
+ /* 1240 */ 1775, 1774, 445, 1600, 444, 701, 2145, 2284, 647, 306,
+ /* 1250 */ 2176, 2337, 2212, 2285, 1687, 172, 2178, 704, 2180, 2181,
+ /* 1260 */ 699, 407, 694, 684, 1948, 736, 646, 188, 1992, 167,
+ /* 1270 */ 2196, 2338, 648, 2195, 443, 2145, 430, 1950, 2145, 81,
+ /* 1280 */ 80, 450, 166, 408, 200, 2145, 2177, 700, 261, 319,
+ /* 1290 */ 2145, 2145, 1978, 604, 2055, 603, 701, 442, 440, 737,
+ /* 1300 */ 157, 156, 734, 733, 732, 154, 649, 2358, 349, 2177,
+ /* 1310 */ 750, 431, 191, 1910, 429, 425, 421, 418, 443, 701,
+ /* 1320 */ 2176, 2299, 2212, 1645, 2195, 110, 2178, 704, 2180, 2181,
+ /* 1330 */ 699, 91, 694, 148, 74, 135, 2145, 2357, 700, 2265,
+ /* 1340 */ 1834, 1690, 1825, 397, 2261, 410, 409, 2195, 239, 241,
+ /* 1350 */ 243, 237, 240, 242, 245, 1557, 191, 244, 621, 2145,
+ /* 1360 */ 2177, 700, 587, 155, 589, 1823, 1624, 155, 1550, 50,
+ /* 1370 */ 701, 2176, 50, 2212, 267, 155, 335, 2178, 704, 2180,
+ /* 1380 */ 2181, 699, 50, 694, 82, 41, 40, 592, 293, 47,
+ /* 1390 */ 45, 44, 43, 42, 2176, 1619, 2212, 1552, 2195, 110,
+ /* 1400 */ 2178, 704, 2180, 2181, 699, 71, 694, 153, 1549, 1551,
+ /* 1410 */ 2145, 2357, 700, 2265, 1766, 1767, 155, 397, 2261, 644,
+ /* 1420 */ 2177, 14, 13, 1509, 64, 50, 50, 1512, 708, 1719,
+ /* 1430 */ 701, 153, 1718, 690, 269, 666, 2166, 155, 652, 743,
+ /* 1440 */ 136, 744, 1467, 153, 1817, 2176, 106, 2212, 298, 2177,
+ /* 1450 */ 110, 2178, 704, 2180, 2181, 699, 103, 694, 2195, 701,
+ /* 1460 */ 1808, 1283, 2238, 1281, 2265, 676, 655, 302, 397, 2261,
+ /* 1470 */ 2145, 1813, 700, 1989, 2295, 660, 1348, 282, 279, 1,
+ /* 1480 */ 9, 417, 55, 422, 1661, 1610, 318, 2195, 1376, 364,
+ /* 1490 */ 1574, 1380, 2168, 438, 785, 439, 195, 1387, 196, 2145,
+ /* 1500 */ 1385, 700, 441, 158, 198, 2176, 1490, 2212, 313, 206,
+ /* 1510 */ 110, 2178, 704, 2180, 2181, 699, 456, 694, 1571, 460,
+ /* 1520 */ 2056, 493, 687, 1558, 2265, 1553, 465, 1566, 397, 2261,
+ /* 1530 */ 478, 2048, 485, 492, 2176, 504, 2212, 494, 503, 111,
+ /* 1540 */ 2178, 704, 2180, 2181, 699, 501, 694, 591, 210, 211,
+ /* 1550 */ 506, 507, 213, 2265, 1555, 1561, 1563, 689, 2261, 509,
+ /* 1560 */ 511, 1572, 601, 526, 4, 2177, 1554, 537, 692, 1617,
+ /* 1570 */ 1618, 1620, 1621, 1622, 1623, 701, 247, 527, 534, 535,
+ /* 1580 */ 221, 1569, 538, 1573, 1575, 223, 539, 540, 226, 542,
+ /* 1590 */ 228, 2177, 594, 546, 567, 85, 569, 86, 232, 588,
+ /* 1600 */ 354, 701, 1938, 2195, 112, 246, 236, 1934, 608, 606,
+ /* 1610 */ 151, 89, 238, 2119, 314, 2145, 253, 700, 612, 613,
+ /* 1620 */ 611, 255, 257, 160, 161, 2177, 1936, 1932, 162, 2195,
+ /* 1630 */ 1497, 163, 617, 619, 636, 701, 2311, 674, 2287, 2310,
+ /* 1640 */ 7, 2145, 645, 700, 2116, 70, 2115, 273, 69, 616,
+ /* 1650 */ 702, 175, 2212, 626, 627, 111, 2178, 704, 2180, 2181,
+ /* 1660 */ 699, 632, 694, 2195, 2296, 2306, 386, 265, 639, 2265,
+ /* 1670 */ 275, 618, 268, 359, 2261, 2145, 2176, 700, 2212, 625,
+ /* 1680 */ 276, 111, 2178, 704, 2180, 2181, 699, 2177, 694, 624,
+ /* 1690 */ 278, 387, 2360, 656, 653, 2265, 1687, 701, 141, 1570,
+ /* 1700 */ 2262, 2281, 663, 664, 390, 288, 96, 2061, 1576, 677,
+ /* 1710 */ 2176, 2177, 2212, 315, 672, 171, 2178, 704, 2180, 2181,
+ /* 1720 */ 699, 701, 694, 678, 673, 2195, 2075, 2074, 61, 2073,
+ /* 1730 */ 384, 277, 316, 317, 98, 2177, 281, 2145, 1949, 700,
+ /* 1740 */ 2246, 393, 100, 102, 786, 701, 2336, 1993, 309, 2195,
+ /* 1750 */ 706, 1911, 320, 787, 385, 789, 2303, 329, 53, 324,
+ /* 1760 */ 2137, 2145, 356, 700, 357, 322, 344, 2136, 2135, 343,
+ /* 1770 */ 333, 78, 2176, 2195, 2212, 419, 1534, 342, 2178, 704,
+ /* 1780 */ 2180, 2181, 699, 2132, 694, 2145, 420, 700, 1535, 194,
+ /* 1790 */ 424, 2130, 426, 427, 428, 2129, 2176, 365, 2212, 2177,
+ /* 1800 */ 2127, 342, 2178, 704, 2180, 2181, 699, 432, 694, 701,
+ /* 1810 */ 2126, 434, 2125, 436, 1525, 2106, 197, 2105, 199, 1493,
+ /* 1820 */ 2176, 79, 2212, 1492, 2087, 172, 2178, 704, 2180, 2181,
+ /* 1830 */ 699, 2086, 694, 2085, 448, 449, 2084, 2195, 2083, 1444,
+ /* 1840 */ 2039, 2038, 392, 2036, 147, 2035, 2034, 2037, 2033, 2145,
+ /* 1850 */ 2032, 700, 2030, 2029, 2028, 204, 466, 2027, 468, 2041,
+ /* 1860 */ 2026, 2025, 2024, 2177, 149, 2011, 2010, 2009, 2040, 2008,
+ /* 1870 */ 2007, 1446, 2006, 698, 2023, 2022, 2021, 2359, 2020, 2019,
+ /* 1880 */ 2018, 2017, 2016, 2015, 2176, 2177, 2212, 2014, 2013, 342,
+ /* 1890 */ 2178, 704, 2180, 2181, 699, 701, 694, 2012, 2005, 2004,
+ /* 1900 */ 2003, 2195, 496, 2002, 352, 1853, 1319, 1852, 1323, 2177,
+ /* 1910 */ 63, 353, 1315, 2145, 1851, 700, 1849, 212, 1846, 701,
+ /* 1920 */ 214, 1845, 513, 2195, 512, 1838, 516, 1827, 400, 514,
+ /* 1930 */ 520, 524, 518, 215, 1803, 2145, 1212, 700, 1802, 2104,
+ /* 1940 */ 522, 517, 2094, 521, 217, 76, 2082, 2195, 2176, 219,
+ /* 1950 */ 2212, 2165, 402, 341, 2178, 704, 2180, 2181, 699, 2145,
+ /* 1960 */ 694, 700, 2231, 610, 182, 77, 183, 225, 227, 2081,
+ /* 1970 */ 2176, 532, 2212, 2059, 1927, 342, 2178, 704, 2180, 2181,
+ /* 1980 */ 699, 793, 694, 1848, 1257, 1844, 547, 549, 1842, 548,
+ /* 1990 */ 551, 553, 552, 1840, 2176, 312, 2212, 2177, 555, 342,
+ /* 2000 */ 2178, 704, 2180, 2181, 699, 556, 694, 701, 1837, 557,
+ /* 2010 */ 559, 180, 1392, 560, 1822, 561, 2177, 1820, 1821, 783,
+ /* 2020 */ 779, 775, 771, 1819, 310, 1391, 701, 2177, 1799, 1929,
+ /* 2030 */ 1928, 758, 1306, 235, 1304, 2195, 1302, 701, 760, 1301,
+ /* 2040 */ 1300, 1299, 1298, 1293, 1295, 1294, 1292, 2145, 1835, 700,
+ /* 2050 */ 1826, 590, 1824, 377, 2195, 378, 379, 593, 1798, 595,
+ /* 2060 */ 1797, 597, 1796, 599, 108, 2195, 2145, 303, 700, 113,
+ /* 2070 */ 1519, 1521, 1518, 2103, 1499, 1523, 1501, 2145, 2093, 700,
+ /* 2080 */ 58, 29, 605, 67, 2212, 614, 258, 337, 2178, 704,
+ /* 2090 */ 2180, 2181, 699, 2080, 694, 2177, 2078, 2342, 17, 20,
+ /* 2100 */ 680, 2176, 1736, 2212, 165, 701, 327, 2178, 704, 2180,
+ /* 2110 */ 2181, 699, 2176, 694, 2212, 2177, 615, 325, 2178, 704,
+ /* 2120 */ 2180, 2181, 699, 31, 694, 701, 2177, 1503, 620, 383,
+ /* 2130 */ 264, 5, 6, 2195, 21, 290, 701, 65, 272, 628,
+ /* 2140 */ 289, 630, 22, 271, 266, 2145, 2177, 700, 1717, 173,
+ /* 2150 */ 270, 2166, 33, 2195, 32, 24, 701, 1709, 1751, 92,
+ /* 2160 */ 254, 1756, 1750, 1757, 2195, 2145, 388, 700, 1755, 1684,
+ /* 2170 */ 1754, 389, 1683, 284, 59, 178, 2145, 2079, 700, 2077,
+ /* 2180 */ 2176, 60, 2212, 2076, 2195, 328, 2178, 704, 2180, 2181,
+ /* 2190 */ 699, 2058, 694, 95, 23, 18, 2145, 291, 700, 292,
+ /* 2200 */ 2176, 1715, 2212, 94, 2177, 334, 2178, 704, 2180, 2181,
+ /* 2210 */ 699, 2176, 694, 2212, 701, 25, 338, 2178, 704, 2180,
+ /* 2220 */ 2181, 699, 294, 694, 299, 2177, 2057, 68, 97, 103,
+ /* 2230 */ 26, 2176, 99, 2212, 301, 701, 330, 2178, 704, 2180,
+ /* 2240 */ 2181, 699, 2195, 694, 675, 304, 1636, 1635, 13, 1559,
+ /* 2250 */ 2177, 2215, 179, 1614, 2145, 693, 700, 11, 192, 1612,
+ /* 2260 */ 701, 39, 16, 2195, 1611, 1646, 27, 1591, 1583, 703,
+ /* 2270 */ 28, 705, 1377, 707, 403, 2145, 2177, 700, 709, 711,
+ /* 2280 */ 1374, 712, 714, 717, 1373, 715, 701, 718, 2195, 2176,
+ /* 2290 */ 720, 2212, 1370, 721, 339, 2178, 704, 2180, 2181, 699,
+ /* 2300 */ 2145, 694, 700, 1364, 1362, 723, 1368, 724, 307, 1386,
+ /* 2310 */ 2176, 104, 2212, 105, 2195, 331, 2178, 704, 2180, 2181,
+ /* 2320 */ 699, 1367, 694, 75, 1382, 1255, 2145, 1366, 700, 738,
+ /* 2330 */ 1287, 1365, 1286, 1285, 1284, 2176, 2177, 2212, 1282, 1280,
+ /* 2340 */ 340, 2178, 704, 2180, 2181, 699, 701, 694, 1279, 1278,
+ /* 2350 */ 1313, 748, 2177, 308, 1276, 1275, 1274, 1273, 1272, 1271,
+ /* 2360 */ 1270, 2176, 701, 2212, 1310, 2177, 332, 2178, 704, 2180,
+ /* 2370 */ 2181, 699, 1308, 694, 2195, 701, 1267, 1266, 1263, 1262,
+ /* 2380 */ 1261, 1260, 1843, 768, 770, 769, 2145, 1841, 700, 772,
+ /* 2390 */ 2195, 774, 1839, 1836, 776, 778, 773, 780, 777, 781,
+ /* 2400 */ 782, 1818, 2145, 2195, 700, 784, 1202, 1795, 311, 788,
+ /* 2410 */ 1770, 1545, 321, 1770, 792, 2145, 791, 700, 1770, 1770,
+ /* 2420 */ 1770, 2176, 1770, 2212, 1770, 2177, 345, 2178, 704, 2180,
+ /* 2430 */ 2181, 699, 1770, 694, 1770, 701, 1770, 2176, 1770, 2212,
+ /* 2440 */ 1770, 2177, 346, 2178, 704, 2180, 2181, 699, 1770, 694,
+ /* 2450 */ 2176, 701, 2212, 1770, 2177, 2189, 2178, 704, 2180, 2181,
+ /* 2460 */ 699, 1770, 694, 2195, 701, 1770, 1770, 1770, 1770, 1770,
+ /* 2470 */ 1770, 1770, 1770, 1770, 1770, 2145, 1770, 700, 1770, 2195,
+ /* 2480 */ 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770,
+ /* 2490 */ 1770, 2145, 2195, 700, 1770, 1770, 1770, 1770, 1770, 1770,
+ /* 2500 */ 1770, 1770, 1770, 1770, 2145, 1770, 700, 1770, 1770, 1770,
+ /* 2510 */ 2176, 1770, 2212, 1770, 1770, 2188, 2178, 704, 2180, 2181,
+ /* 2520 */ 699, 1770, 694, 1770, 1770, 1770, 2176, 2177, 2212, 1770,
+ /* 2530 */ 1770, 2187, 2178, 704, 2180, 2181, 699, 701, 694, 2176,
+ /* 2540 */ 1770, 2212, 1770, 2177, 361, 2178, 704, 2180, 2181, 699,
+ /* 2550 */ 1770, 694, 1770, 701, 2177, 1770, 1770, 1770, 1770, 1770,
+ /* 2560 */ 1770, 1770, 1770, 1770, 701, 2195, 1770, 1770, 1770, 1770,
+ /* 2570 */ 1770, 1770, 1770, 1770, 1770, 1770, 1770, 2145, 1770, 700,
+ /* 2580 */ 1770, 2195, 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770,
+ /* 2590 */ 1770, 1770, 2195, 2145, 1770, 700, 1770, 1770, 1770, 1770,
+ /* 2600 */ 1770, 1770, 1770, 1770, 2145, 1770, 700, 1770, 1770, 1770,
+ /* 2610 */ 1770, 1770, 2176, 1770, 2212, 1770, 2177, 362, 2178, 704,
+ /* 2620 */ 2180, 2181, 699, 1770, 694, 1770, 701, 1770, 2176, 1770,
+ /* 2630 */ 2212, 1770, 2177, 358, 2178, 704, 2180, 2181, 699, 2176,
+ /* 2640 */ 694, 2212, 701, 1770, 363, 2178, 704, 2180, 2181, 699,
+ /* 2650 */ 1770, 694, 1770, 1770, 2195, 1770, 1770, 1770, 1770, 1770,
+ /* 2660 */ 1770, 1770, 1770, 1770, 1770, 1770, 2145, 1770, 700, 1770,
+ /* 2670 */ 2195, 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770,
+ /* 2680 */ 1770, 1770, 2145, 1770, 700, 1770, 1770, 1770, 1770, 1770,
+ /* 2690 */ 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770, 1770,
+ /* 2700 */ 1770, 702, 1770, 2212, 1770, 1770, 337, 2178, 704, 2180,
+ /* 2710 */ 2181, 699, 1770, 694, 1770, 1770, 1770, 2176, 1770, 2212,
+ /* 2720 */ 1770, 1770, 336, 2178, 704, 2180, 2181, 699, 1770, 694,
};
static const YYCODETYPE yy_lookahead[] = {
- /* 0 */ 340, 359, 456, 445, 446, 459, 339, 409, 341, 367,
- /* 10 */ 350, 413, 12, 13, 14, 379, 378, 3, 366, 340,
- /* 20 */ 20, 475, 22, 8, 9, 479, 480, 12, 13, 14,
- /* 30 */ 15, 16, 380, 33, 20, 35, 398, 399, 378, 8,
- /* 40 */ 9, 349, 390, 12, 13, 14, 15, 16, 370, 409,
- /* 50 */ 390, 20, 392, 20, 456, 344, 378, 459, 347, 348,
- /* 60 */ 340, 345, 62, 385, 386, 349, 378, 351, 68, 390,
- /* 70 */ 350, 393, 474, 475, 386, 75, 377, 479, 480, 427,
- /* 80 */ 428, 374, 456, 68, 424, 459, 394, 427, 389, 437,
+ /* 0 */ 340, 392, 339, 345, 341, 349, 409, 349, 409, 351,
+ /* 10 */ 350, 414, 12, 13, 14, 379, 407, 408, 456, 340,
+ /* 20 */ 20, 459, 22, 8, 9, 12, 13, 12, 13, 14,
+ /* 30 */ 15, 16, 441, 33, 443, 35, 474, 475, 378, 8,
+ /* 40 */ 9, 479, 480, 12, 13, 14, 15, 16, 35, 409,
+ /* 50 */ 390, 20, 392, 456, 20, 456, 459, 401, 459, 403,
+ /* 60 */ 340, 378, 62, 12, 13, 14, 15, 16, 68, 390,
+ /* 70 */ 350, 474, 475, 474, 475, 75, 479, 480, 479, 480,
+ /* 80 */ 345, 398, 399, 68, 349, 425, 351, 427, 356, 377,
/* 90 */ 430, 431, 432, 433, 434, 435, 456, 437, 378, 459,
- /* 100 */ 100, 475, 442, 103, 444, 479, 480, 345, 448, 449,
- /* 110 */ 390, 349, 392, 351, 474, 475, 12, 13, 104, 479,
- /* 120 */ 480, 461, 349, 350, 20, 340, 22, 112, 421, 469,
- /* 130 */ 12, 13, 14, 15, 16, 349, 20, 33, 62, 35,
- /* 140 */ 140, 141, 369, 392, 424, 349, 350, 427, 35, 376,
- /* 150 */ 430, 431, 432, 433, 434, 435, 0, 437, 407, 408,
- /* 160 */ 440, 20, 442, 443, 444, 369, 62, 349, 448, 449,
- /* 170 */ 170, 171, 68, 140, 141, 390, 176, 177, 102, 75,
- /* 180 */ 0, 105, 349, 350, 169, 0, 172, 401, 75, 403,
- /* 190 */ 190, 21, 192, 20, 24, 25, 26, 27, 28, 29,
- /* 200 */ 30, 31, 32, 172, 100, 349, 21, 103, 340, 24,
- /* 210 */ 25, 26, 27, 28, 29, 30, 31, 32, 350, 401,
- /* 220 */ 352, 403, 222, 223, 0, 225, 226, 227, 228, 229,
+ /* 100 */ 100, 389, 442, 103, 444, 373, 358, 350, 448, 449,
+ /* 110 */ 390, 340, 392, 381, 474, 475, 12, 13, 392, 479,
+ /* 120 */ 480, 461, 366, 375, 20, 456, 22, 112, 459, 469,
+ /* 130 */ 404, 20, 384, 407, 408, 378, 380, 33, 62, 35,
+ /* 140 */ 140, 141, 349, 22, 475, 425, 390, 427, 479, 480,
+ /* 150 */ 430, 431, 432, 433, 434, 435, 35, 437, 20, 370,
+ /* 160 */ 440, 390, 442, 443, 444, 378, 62, 378, 448, 449,
+ /* 170 */ 170, 171, 68, 378, 385, 386, 176, 177, 102, 75,
+ /* 180 */ 385, 105, 393, 427, 169, 0, 399, 394, 393, 432,
+ /* 190 */ 190, 21, 192, 437, 24, 25, 26, 27, 28, 29,
+ /* 200 */ 30, 31, 32, 172, 100, 192, 21, 103, 340, 24,
+ /* 210 */ 25, 26, 27, 28, 29, 30, 31, 32, 350, 20,
+ /* 220 */ 352, 100, 222, 223, 0, 225, 226, 227, 228, 229,
/* 230 */ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
- /* 240 */ 240, 241, 242, 20, 140, 141, 378, 67, 24, 25,
- /* 250 */ 26, 27, 28, 29, 30, 31, 32, 401, 390, 403,
+ /* 240 */ 240, 241, 242, 20, 140, 141, 378, 340, 24, 25,
+ /* 250 */ 26, 27, 28, 29, 30, 31, 32, 349, 390, 103,
/* 260 */ 392, 246, 247, 248, 249, 250, 251, 252, 253, 254,
- /* 270 */ 255, 256, 67, 340, 170, 171, 103, 349, 350, 4,
- /* 280 */ 176, 177, 39, 350, 451, 452, 453, 454, 172, 456,
- /* 290 */ 457, 20, 424, 22, 190, 427, 192, 369, 430, 431,
- /* 300 */ 432, 433, 434, 435, 376, 437, 35, 14, 349, 350,
- /* 310 */ 442, 378, 444, 20, 8, 9, 448, 449, 12, 13,
- /* 320 */ 14, 15, 16, 390, 53, 392, 222, 223, 369, 225,
+ /* 270 */ 255, 256, 445, 446, 170, 171, 20, 62, 140, 141,
+ /* 280 */ 176, 177, 4, 20, 8, 9, 349, 350, 12, 13,
+ /* 290 */ 14, 15, 16, 425, 190, 427, 192, 390, 430, 431,
+ /* 300 */ 432, 433, 434, 435, 344, 437, 369, 347, 348, 401,
+ /* 310 */ 442, 403, 444, 376, 20, 340, 448, 449, 103, 20,
+ /* 320 */ 105, 43, 14, 45, 46, 350, 222, 223, 20, 225,
/* 330 */ 226, 227, 228, 229, 230, 231, 232, 233, 234, 235,
/* 340 */ 236, 237, 238, 239, 240, 241, 242, 243, 12, 13,
- /* 350 */ 20, 340, 22, 12, 13, 3, 20, 424, 22, 429,
- /* 360 */ 427, 350, 0, 430, 431, 432, 433, 434, 435, 33,
- /* 370 */ 437, 35, 216, 378, 378, 442, 35, 444, 103, 350,
- /* 380 */ 385, 448, 449, 53, 392, 455, 8, 9, 393, 378,
- /* 390 */ 12, 13, 14, 15, 16, 399, 404, 100, 62, 407,
- /* 400 */ 408, 390, 469, 392, 68, 344, 44, 378, 347, 348,
- /* 410 */ 104, 75, 115, 116, 117, 118, 119, 120, 121, 122,
- /* 420 */ 123, 124, 44, 126, 127, 128, 129, 130, 131, 132,
- /* 430 */ 20, 14, 259, 20, 356, 424, 100, 20, 427, 103,
- /* 440 */ 356, 430, 431, 432, 433, 434, 435, 436, 437, 438,
- /* 450 */ 439, 373, 12, 13, 14, 358, 175, 2, 33, 381,
- /* 460 */ 20, 432, 22, 8, 9, 381, 20, 12, 13, 14,
- /* 470 */ 15, 16, 375, 33, 49, 35, 140, 141, 20, 349,
- /* 480 */ 350, 384, 57, 58, 59, 60, 350, 62, 62, 370,
- /* 490 */ 133, 134, 135, 136, 137, 138, 139, 378, 340, 369,
- /* 500 */ 4, 258, 62, 20, 222, 386, 170, 171, 350, 243,
- /* 510 */ 352, 245, 176, 177, 350, 75, 8, 9, 349, 350,
- /* 520 */ 12, 13, 14, 15, 16, 20, 190, 102, 192, 103,
- /* 530 */ 105, 105, 396, 192, 259, 84, 378, 20, 369, 43,
- /* 540 */ 100, 45, 46, 103, 340, 264, 265, 266, 390, 103,
- /* 550 */ 392, 269, 270, 271, 272, 273, 274, 275, 222, 223,
- /* 560 */ 396, 225, 226, 227, 228, 229, 230, 231, 232, 233,
- /* 570 */ 234, 235, 236, 237, 238, 239, 240, 241, 242, 103,
- /* 580 */ 140, 141, 424, 170, 171, 427, 103, 170, 430, 431,
- /* 590 */ 432, 433, 434, 435, 390, 437, 145, 146, 173, 174,
- /* 600 */ 442, 49, 444, 178, 67, 180, 448, 449, 340, 57,
- /* 610 */ 170, 171, 60, 61, 349, 350, 176, 177, 350, 168,
- /* 620 */ 352, 349, 350, 198, 114, 366, 103, 103, 134, 135,
- /* 630 */ 190, 279, 192, 139, 369, 70, 71, 72, 441, 380,
- /* 640 */ 443, 369, 77, 78, 79, 0, 378, 340, 83, 390,
- /* 650 */ 349, 350, 35, 88, 89, 90, 91, 0, 390, 94,
- /* 660 */ 392, 409, 222, 223, 20, 225, 226, 227, 228, 229,
+ /* 350 */ 243, 175, 245, 378, 370, 20, 20, 388, 22, 100,
+ /* 360 */ 391, 392, 378, 37, 67, 390, 103, 392, 349, 33,
+ /* 370 */ 386, 35, 349, 350, 115, 116, 117, 118, 119, 120,
+ /* 380 */ 121, 122, 123, 124, 0, 126, 127, 128, 129, 130,
+ /* 390 */ 131, 132, 369, 340, 39, 429, 349, 350, 62, 376,
+ /* 400 */ 425, 340, 427, 350, 68, 430, 431, 432, 433, 434,
+ /* 410 */ 435, 75, 437, 3, 20, 259, 22, 442, 0, 444,
+ /* 420 */ 401, 455, 403, 448, 449, 134, 170, 349, 350, 35,
+ /* 430 */ 20, 378, 106, 49, 108, 109, 100, 111, 103, 103,
+ /* 440 */ 264, 265, 266, 390, 469, 392, 20, 53, 70, 71,
+ /* 450 */ 72, 390, 12, 13, 14, 77, 78, 79, 182, 133,
+ /* 460 */ 20, 83, 22, 137, 170, 171, 88, 89, 90, 91,
+ /* 470 */ 114, 222, 94, 33, 259, 35, 140, 141, 425, 429,
+ /* 480 */ 427, 391, 392, 430, 431, 432, 433, 434, 435, 436,
+ /* 490 */ 437, 438, 439, 344, 203, 204, 347, 348, 340, 452,
+ /* 500 */ 453, 454, 62, 456, 457, 455, 170, 171, 350, 20,
+ /* 510 */ 352, 22, 176, 177, 104, 75, 349, 350, 269, 270,
+ /* 520 */ 271, 272, 273, 274, 275, 84, 190, 67, 192, 451,
+ /* 530 */ 452, 453, 454, 49, 456, 457, 378, 1, 2, 20,
+ /* 540 */ 100, 57, 53, 103, 60, 61, 349, 350, 390, 340,
+ /* 550 */ 392, 133, 134, 135, 136, 137, 138, 139, 222, 223,
+ /* 560 */ 284, 225, 226, 227, 228, 229, 230, 231, 232, 233,
+ /* 570 */ 234, 235, 236, 237, 238, 239, 240, 241, 242, 13,
+ /* 580 */ 140, 141, 172, 425, 3, 427, 145, 146, 430, 431,
+ /* 590 */ 432, 433, 434, 435, 259, 437, 243, 378, 172, 390,
+ /* 600 */ 442, 35, 444, 356, 385, 340, 448, 449, 374, 168,
+ /* 610 */ 170, 171, 393, 258, 21, 350, 176, 177, 0, 452,
+ /* 620 */ 453, 454, 103, 456, 457, 4, 459, 34, 381, 36,
+ /* 630 */ 190, 185, 192, 133, 134, 135, 136, 137, 138, 139,
+ /* 640 */ 104, 474, 475, 378, 349, 350, 479, 480, 337, 452,
+ /* 650 */ 453, 454, 75, 456, 457, 390, 422, 392, 212, 213,
+ /* 660 */ 0, 409, 222, 223, 369, 225, 226, 227, 228, 229,
/* 670 */ 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
- /* 680 */ 240, 241, 242, 12, 13, 259, 427, 349, 350, 172,
- /* 690 */ 182, 20, 424, 22, 49, 427, 437, 390, 430, 431,
- /* 700 */ 432, 433, 434, 435, 33, 437, 35, 369, 456, 340,
- /* 710 */ 442, 459, 444, 189, 370, 191, 448, 449, 0, 350,
- /* 720 */ 388, 4, 378, 391, 392, 429, 474, 475, 349, 350,
- /* 730 */ 386, 479, 480, 62, 14, 259, 19, 391, 392, 340,
- /* 740 */ 20, 185, 259, 349, 350, 221, 75, 378, 369, 350,
- /* 750 */ 33, 455, 21, 452, 453, 454, 243, 456, 457, 390,
- /* 760 */ 459, 392, 370, 369, 429, 34, 49, 36, 212, 213,
- /* 770 */ 378, 100, 75, 56, 103, 474, 475, 378, 386, 62,
- /* 780 */ 479, 480, 259, 259, 140, 141, 75, 12, 13, 390,
- /* 790 */ 455, 392, 284, 424, 22, 20, 427, 22, 134, 430,
- /* 800 */ 431, 432, 433, 434, 435, 169, 437, 35, 33, 192,
- /* 810 */ 35, 140, 141, 444, 349, 350, 159, 448, 449, 102,
- /* 820 */ 176, 177, 105, 424, 0, 168, 427, 114, 378, 430,
- /* 830 */ 431, 432, 433, 434, 435, 385, 437, 62, 340, 340,
- /* 840 */ 14, 170, 171, 393, 349, 350, 20, 176, 177, 350,
- /* 850 */ 75, 133, 134, 135, 136, 137, 138, 139, 22, 378,
- /* 860 */ 441, 190, 443, 192, 369, 44, 385, 203, 204, 470,
- /* 870 */ 471, 35, 100, 2, 393, 100, 379, 378, 103, 8,
- /* 880 */ 9, 379, 246, 12, 13, 14, 15, 16, 390, 390,
- /* 890 */ 170, 392, 256, 222, 223, 379, 225, 226, 227, 228,
+ /* 680 */ 240, 241, 242, 12, 13, 67, 359, 54, 55, 103,
+ /* 690 */ 425, 20, 427, 22, 367, 430, 431, 432, 433, 434,
+ /* 700 */ 435, 13, 437, 378, 33, 366, 35, 442, 456, 444,
+ /* 710 */ 385, 459, 33, 448, 449, 14, 15, 16, 393, 380,
+ /* 720 */ 409, 22, 22, 35, 103, 414, 474, 475, 49, 390,
+ /* 730 */ 340, 479, 480, 62, 35, 35, 57, 58, 59, 60,
+ /* 740 */ 350, 62, 441, 340, 443, 366, 75, 8, 9, 35,
+ /* 750 */ 2, 12, 13, 14, 15, 16, 8, 9, 35, 380,
+ /* 760 */ 12, 13, 14, 15, 16, 0, 427, 456, 378, 390,
+ /* 770 */ 459, 100, 349, 350, 103, 75, 437, 114, 259, 340,
+ /* 780 */ 390, 102, 392, 44, 105, 474, 475, 12, 13, 75,
+ /* 790 */ 479, 480, 369, 390, 340, 20, 20, 22, 75, 100,
+ /* 800 */ 100, 340, 354, 355, 350, 75, 427, 428, 33, 44,
+ /* 810 */ 35, 140, 141, 349, 350, 425, 437, 427, 20, 159,
+ /* 820 */ 430, 431, 432, 433, 434, 435, 379, 437, 168, 390,
+ /* 830 */ 354, 355, 378, 369, 444, 349, 350, 62, 448, 449,
+ /* 840 */ 20, 170, 171, 104, 390, 259, 392, 176, 177, 350,
+ /* 850 */ 75, 390, 173, 174, 133, 369, 379, 178, 137, 180,
+ /* 860 */ 279, 190, 440, 192, 135, 443, 8, 9, 139, 340,
+ /* 870 */ 12, 13, 14, 15, 16, 100, 14, 198, 103, 425,
+ /* 880 */ 259, 427, 20, 350, 430, 431, 432, 433, 434, 435,
+ /* 890 */ 190, 437, 192, 222, 223, 396, 225, 226, 227, 228,
/* 900 */ 229, 230, 231, 232, 233, 234, 235, 236, 237, 238,
- /* 910 */ 239, 240, 241, 242, 340, 140, 141, 452, 453, 454,
- /* 920 */ 337, 456, 457, 424, 340, 104, 427, 340, 340, 430,
- /* 930 */ 431, 432, 433, 434, 435, 18, 437, 20, 14, 15,
- /* 940 */ 16, 378, 414, 340, 27, 170, 171, 30, 385, 340,
- /* 950 */ 33, 176, 177, 350, 378, 352, 393, 133, 134, 135,
- /* 960 */ 136, 137, 138, 139, 390, 190, 49, 192, 51, 393,
- /* 970 */ 471, 349, 350, 56, 390, 22, 22, 390, 390, 8,
- /* 980 */ 9, 378, 340, 12, 13, 14, 15, 16, 35, 35,
- /* 990 */ 20, 369, 409, 390, 340, 392, 413, 222, 223, 390,
+ /* 910 */ 239, 240, 241, 242, 379, 140, 141, 363, 364, 390,
+ /* 920 */ 349, 350, 222, 223, 470, 471, 379, 378, 199, 396,
+ /* 930 */ 429, 202, 350, 370, 205, 18, 207, 20, 140, 141,
+ /* 940 */ 369, 378, 393, 340, 27, 170, 171, 30, 172, 386,
+ /* 950 */ 33, 176, 177, 350, 379, 352, 455, 349, 350, 349,
+ /* 960 */ 350, 456, 104, 14, 459, 190, 49, 192, 51, 20,
+ /* 970 */ 340, 349, 350, 56, 176, 177, 2, 369, 396, 369,
+ /* 980 */ 475, 378, 8, 9, 479, 480, 12, 13, 14, 15,
+ /* 990 */ 16, 369, 172, 390, 379, 392, 387, 222, 223, 390,
/* 1000 */ 225, 226, 227, 228, 229, 230, 231, 232, 233, 234,
/* 1010 */ 235, 236, 237, 238, 239, 240, 241, 242, 0, 102,
- /* 1020 */ 20, 349, 350, 349, 350, 363, 364, 424, 75, 379,
- /* 1030 */ 427, 114, 390, 430, 431, 432, 433, 434, 435, 456,
- /* 1040 */ 437, 369, 459, 340, 390, 442, 133, 444, 54, 55,
- /* 1050 */ 137, 448, 449, 100, 100, 354, 355, 474, 475, 354,
- /* 1060 */ 355, 144, 479, 480, 147, 148, 149, 150, 151, 152,
+ /* 1020 */ 390, 378, 349, 350, 349, 350, 349, 350, 425, 386,
+ /* 1030 */ 427, 114, 170, 430, 431, 432, 433, 434, 435, 42,
+ /* 1040 */ 437, 44, 369, 415, 369, 442, 369, 444, 367, 8,
+ /* 1050 */ 9, 448, 449, 12, 13, 14, 15, 16, 134, 135,
+ /* 1060 */ 0, 144, 400, 139, 147, 148, 149, 150, 151, 152,
/* 1070 */ 153, 154, 155, 156, 157, 158, 159, 160, 161, 162,
- /* 1080 */ 163, 44, 165, 166, 167, 363, 364, 340, 70, 71,
- /* 1090 */ 72, 73, 74, 390, 76, 77, 78, 79, 80, 81,
+ /* 1080 */ 163, 409, 165, 166, 167, 4, 340, 14, 70, 71,
+ /* 1090 */ 72, 73, 74, 20, 76, 77, 78, 79, 80, 81,
/* 1100 */ 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
- /* 1110 */ 92, 93, 94, 95, 96, 18, 349, 350, 409, 340,
- /* 1120 */ 23, 340, 340, 12, 13, 379, 452, 453, 454, 340,
- /* 1130 */ 456, 457, 37, 22, 37, 38, 369, 390, 41, 350,
- /* 1140 */ 379, 352, 172, 190, 33, 192, 35, 8, 9, 52,
- /* 1150 */ 4, 12, 13, 14, 15, 16, 350, 340, 349, 350,
- /* 1160 */ 63, 64, 65, 66, 42, 456, 44, 378, 459, 390,
- /* 1170 */ 170, 390, 390, 62, 44, 222, 223, 340, 369, 390,
- /* 1180 */ 340, 392, 62, 474, 475, 366, 75, 350, 479, 480,
- /* 1190 */ 8, 9, 1, 2, 12, 13, 14, 15, 16, 380,
- /* 1200 */ 103, 106, 396, 108, 109, 114, 111, 390, 358, 390,
- /* 1210 */ 68, 100, 409, 424, 440, 378, 427, 443, 367, 430,
- /* 1220 */ 431, 432, 433, 434, 435, 105, 437, 390, 133, 392,
- /* 1230 */ 390, 442, 137, 444, 384, 340, 13, 448, 449, 142,
- /* 1240 */ 42, 387, 44, 104, 390, 350, 427, 352, 387, 387,
- /* 1250 */ 371, 390, 390, 374, 365, 164, 437, 368, 35, 456,
- /* 1260 */ 107, 424, 459, 110, 427, 257, 258, 430, 431, 432,
- /* 1270 */ 433, 434, 435, 378, 437, 45, 46, 474, 475, 182,
- /* 1280 */ 183, 184, 479, 480, 187, 390, 340, 392, 107, 341,
- /* 1290 */ 13, 110, 48, 349, 350, 104, 350, 200, 201, 107,
- /* 1300 */ 400, 190, 110, 192, 107, 140, 141, 110, 211, 0,
- /* 1310 */ 473, 214, 35, 369, 217, 218, 219, 220, 221, 424,
- /* 1320 */ 283, 206, 427, 208, 378, 430, 431, 432, 433, 434,
- /* 1330 */ 435, 22, 437, 222, 223, 0, 390, 442, 392, 444,
- /* 1340 */ 0, 44, 456, 448, 449, 459, 235, 236, 237, 238,
- /* 1350 */ 239, 240, 241, 340, 172, 135, 259, 22, 44, 139,
- /* 1360 */ 474, 475, 22, 350, 222, 479, 480, 483, 44, 44,
- /* 1370 */ 424, 44, 44, 427, 44, 44, 430, 431, 432, 433,
- /* 1380 */ 434, 435, 472, 437, 349, 350, 340, 353, 442, 103,
- /* 1390 */ 444, 378, 349, 350, 448, 449, 350, 349, 350, 113,
- /* 1400 */ 44, 104, 47, 390, 369, 392, 260, 44, 44, 1,
- /* 1410 */ 2, 281, 369, 340, 44, 466, 172, 369, 104, 199,
- /* 1420 */ 35, 0, 202, 350, 378, 205, 44, 207, 104, 104,
- /* 1430 */ 13, 104, 104, 44, 104, 104, 390, 424, 392, 378,
- /* 1440 */ 427, 366, 44, 430, 431, 432, 433, 434, 435, 340,
- /* 1450 */ 437, 378, 35, 366, 44, 442, 44, 444, 103, 350,
- /* 1460 */ 104, 448, 449, 390, 44, 392, 353, 104, 104, 44,
- /* 1470 */ 424, 50, 35, 427, 104, 340, 430, 431, 432, 433,
- /* 1480 */ 434, 435, 400, 437, 348, 350, 104, 378, 442, 13,
- /* 1490 */ 444, 350, 389, 104, 448, 449, 400, 424, 458, 390,
- /* 1500 */ 427, 392, 104, 430, 431, 432, 433, 434, 435, 476,
- /* 1510 */ 437, 35, 75, 378, 104, 450, 104, 444, 460, 261,
- /* 1520 */ 426, 448, 449, 49, 104, 390, 425, 392, 20, 104,
- /* 1530 */ 418, 205, 358, 424, 418, 358, 427, 423, 188, 430,
- /* 1540 */ 431, 432, 433, 434, 435, 411, 437, 42, 397, 20,
- /* 1550 */ 400, 397, 169, 444, 340, 395, 20, 448, 449, 424,
- /* 1560 */ 349, 349, 427, 397, 350, 430, 431, 432, 433, 434,
- /* 1570 */ 435, 395, 437, 395, 101, 362, 99, 192, 361, 98,
- /* 1580 */ 340, 349, 360, 349, 349, 349, 20, 342, 48, 346,
- /* 1590 */ 350, 342, 378, 346, 20, 358, 392, 418, 20, 358,
- /* 1600 */ 20, 351, 342, 410, 390, 378, 392, 351, 349, 358,
- /* 1610 */ 358, 358, 385, 358, 390, 358, 481, 482, 378, 349,
- /* 1620 */ 393, 390, 342, 383, 378, 209, 378, 422, 390, 103,
- /* 1630 */ 390, 196, 392, 195, 418, 420, 417, 356, 424, 378,
- /* 1640 */ 378, 427, 340, 378, 430, 431, 432, 433, 434, 435,
- /* 1650 */ 378, 437, 350, 378, 378, 416, 378, 378, 444, 340,
- /* 1660 */ 378, 392, 356, 449, 424, 194, 349, 427, 400, 350,
- /* 1670 */ 430, 431, 432, 433, 434, 435, 267, 437, 340, 415,
- /* 1680 */ 378, 390, 268, 456, 400, 383, 459, 276, 350, 390,
- /* 1690 */ 465, 465, 390, 405, 392, 405, 468, 378, 390, 181,
- /* 1700 */ 390, 474, 475, 285, 278, 277, 479, 480, 262, 390,
- /* 1710 */ 258, 392, 282, 426, 280, 350, 378, 484, 20, 465,
- /* 1720 */ 478, 383, 429, 349, 351, 356, 424, 464, 390, 427,
- /* 1730 */ 392, 356, 430, 431, 432, 433, 434, 435, 467, 437,
- /* 1740 */ 20, 405, 403, 424, 477, 462, 427, 390, 463, 430,
- /* 1750 */ 431, 432, 433, 434, 435, 174, 437, 390, 390, 405,
- /* 1760 */ 402, 390, 424, 356, 340, 427, 390, 390, 430, 431,
- /* 1770 */ 432, 433, 434, 435, 350, 437, 374, 350, 356, 103,
- /* 1780 */ 447, 340, 103, 382, 390, 349, 36, 368, 356, 338,
- /* 1790 */ 343, 350, 342, 412, 406, 372, 406, 419, 0, 357,
- /* 1800 */ 340, 482, 378, 372, 0, 372, 0, 42, 0, 35,
- /* 1810 */ 350, 215, 35, 35, 390, 35, 392, 215, 0, 378,
- /* 1820 */ 35, 35, 215, 0, 383, 215, 0, 35, 0, 22,
- /* 1830 */ 0, 390, 35, 392, 0, 210, 198, 0, 378, 198,
- /* 1840 */ 192, 199, 190, 383, 0, 0, 0, 186, 424, 185,
- /* 1850 */ 390, 427, 392, 0, 430, 431, 432, 433, 434, 435,
- /* 1860 */ 0, 437, 47, 439, 0, 424, 42, 1, 427, 0,
- /* 1870 */ 0, 430, 431, 432, 433, 434, 435, 0, 437, 0,
- /* 1880 */ 0, 0, 0, 0, 424, 19, 0, 427, 0, 159,
- /* 1890 */ 430, 431, 432, 433, 434, 435, 35, 437, 340, 33,
- /* 1900 */ 0, 159, 0, 0, 0, 0, 0, 0, 350, 0,
- /* 1910 */ 0, 0, 0, 0, 0, 49, 42, 0, 0, 0,
- /* 1920 */ 0, 0, 0, 57, 58, 59, 60, 0, 62, 0,
- /* 1930 */ 0, 340, 0, 0, 0, 0, 378, 22, 143, 0,
- /* 1940 */ 0, 350, 48, 48, 0, 22, 22, 62, 390, 0,
- /* 1950 */ 392, 62, 0, 62, 0, 0, 0, 49, 0, 0,
- /* 1960 */ 35, 35, 0, 0, 340, 181, 35, 39, 102, 378,
- /* 1970 */ 0, 105, 0, 0, 350, 39, 35, 35, 0, 14,
- /* 1980 */ 0, 390, 424, 392, 0, 427, 39, 49, 430, 431,
- /* 1990 */ 432, 433, 434, 435, 49, 437, 0, 44, 47, 42,
- /* 2000 */ 340, 0, 378, 39, 138, 39, 39, 47, 47, 40,
- /* 2010 */ 350, 35, 69, 39, 390, 424, 392, 49, 427, 0,
- /* 2020 */ 35, 430, 431, 432, 433, 434, 435, 39, 437, 0,
- /* 2030 */ 49, 49, 35, 340, 39, 0, 49, 35, 378, 173,
- /* 2040 */ 0, 0, 39, 350, 178, 0, 0, 0, 424, 0,
- /* 2050 */ 390, 427, 392, 35, 430, 431, 432, 433, 434, 435,
- /* 2060 */ 112, 437, 340, 0, 198, 22, 35, 35, 35, 35,
- /* 2070 */ 44, 378, 350, 35, 35, 0, 22, 35, 22, 35,
- /* 2080 */ 44, 110, 0, 390, 424, 392, 35, 427, 22, 0,
- /* 2090 */ 430, 431, 432, 433, 434, 435, 35, 437, 22, 51,
- /* 2100 */ 378, 22, 35, 0, 35, 0, 35, 0, 20, 35,
- /* 2110 */ 104, 103, 390, 35, 392, 35, 0, 424, 172, 103,
- /* 2120 */ 427, 35, 340, 430, 431, 432, 433, 434, 435, 0,
- /* 2130 */ 437, 22, 350, 22, 0, 0, 3, 44, 263, 48,
- /* 2140 */ 103, 340, 48, 44, 44, 47, 424, 197, 44, 427,
- /* 2150 */ 44, 350, 430, 431, 432, 433, 434, 435, 172, 437,
- /* 2160 */ 378, 104, 172, 174, 104, 103, 101, 99, 193, 47,
- /* 2170 */ 179, 3, 390, 103, 392, 44, 104, 104, 35, 378,
- /* 2180 */ 103, 35, 35, 35, 103, 35, 103, 47, 35, 340,
- /* 2190 */ 0, 390, 47, 392, 104, 44, 104, 0, 0, 350,
- /* 2200 */ 104, 104, 0, 39, 47, 103, 424, 0, 340, 427,
- /* 2210 */ 39, 103, 430, 431, 432, 433, 434, 435, 350, 437,
- /* 2220 */ 44, 104, 104, 103, 103, 424, 103, 378, 427, 103,
- /* 2230 */ 175, 430, 431, 432, 433, 434, 435, 173, 437, 390,
- /* 2240 */ 263, 392, 0, 113, 244, 101, 378, 2, 47, 22,
- /* 2250 */ 257, 47, 222, 104, 22, 263, 340, 101, 390, 104,
- /* 2260 */ 392, 103, 103, 103, 47, 104, 350, 104, 103, 103,
- /* 2270 */ 103, 35, 35, 424, 224, 103, 427, 340, 35, 430,
- /* 2280 */ 431, 432, 433, 434, 435, 104, 437, 350, 114, 104,
- /* 2290 */ 103, 35, 424, 104, 378, 427, 103, 35, 430, 431,
- /* 2300 */ 432, 433, 434, 435, 35, 437, 390, 104, 392, 103,
- /* 2310 */ 35, 103, 70, 71, 72, 378, 104, 104, 103, 77,
- /* 2320 */ 78, 79, 103, 44, 125, 83, 103, 390, 35, 392,
- /* 2330 */ 88, 89, 90, 91, 125, 125, 94, 103, 22, 125,
- /* 2340 */ 424, 69, 68, 427, 35, 340, 430, 431, 432, 433,
- /* 2350 */ 434, 435, 35, 437, 35, 350, 35, 35, 35, 35,
- /* 2360 */ 35, 424, 75, 340, 427, 97, 22, 430, 431, 432,
- /* 2370 */ 433, 434, 435, 350, 437, 44, 35, 35, 35, 75,
- /* 2380 */ 35, 35, 35, 378, 35, 35, 35, 35, 35, 22,
- /* 2390 */ 35, 0, 35, 340, 0, 390, 35, 392, 39, 39,
- /* 2400 */ 0, 378, 39, 350, 35, 49, 0, 49, 35, 39,
- /* 2410 */ 0, 49, 35, 390, 49, 392, 35, 0, 22, 21,
- /* 2420 */ 485, 22, 20, 22, 340, 21, 485, 485, 485, 424,
- /* 2430 */ 485, 378, 427, 485, 350, 430, 431, 432, 433, 434,
- /* 2440 */ 435, 340, 437, 390, 485, 392, 485, 424, 485, 485,
- /* 2450 */ 427, 350, 485, 430, 431, 432, 433, 434, 435, 485,
- /* 2460 */ 437, 485, 378, 485, 485, 485, 485, 485, 485, 485,
- /* 2470 */ 485, 485, 485, 485, 390, 485, 392, 424, 485, 378,
- /* 2480 */ 427, 485, 485, 430, 431, 432, 433, 434, 435, 485,
- /* 2490 */ 437, 390, 340, 392, 485, 485, 485, 485, 485, 485,
- /* 2500 */ 485, 485, 350, 485, 485, 485, 485, 485, 424, 485,
- /* 2510 */ 485, 427, 485, 340, 430, 431, 432, 433, 434, 435,
- /* 2520 */ 485, 437, 485, 350, 485, 424, 485, 485, 427, 485,
- /* 2530 */ 378, 430, 431, 432, 433, 434, 435, 485, 437, 485,
- /* 2540 */ 340, 485, 390, 485, 392, 485, 485, 485, 485, 485,
- /* 2550 */ 350, 378, 485, 485, 485, 485, 485, 485, 485, 485,
- /* 2560 */ 485, 485, 485, 390, 485, 392, 485, 485, 485, 485,
- /* 2570 */ 485, 485, 485, 485, 485, 485, 424, 485, 378, 427,
- /* 2580 */ 485, 485, 430, 431, 432, 433, 434, 435, 485, 437,
- /* 2590 */ 390, 485, 392, 485, 485, 485, 485, 424, 485, 485,
- /* 2600 */ 427, 340, 485, 430, 431, 432, 433, 434, 435, 485,
- /* 2610 */ 437, 350, 485, 485, 485, 485, 485, 485, 340, 485,
- /* 2620 */ 485, 485, 485, 485, 424, 485, 485, 427, 350, 485,
- /* 2630 */ 430, 431, 432, 433, 434, 435, 485, 437, 485, 378,
- /* 2640 */ 485, 485, 485, 485, 485, 485, 485, 485, 485, 485,
- /* 2650 */ 485, 390, 485, 392, 485, 485, 378, 485, 485, 485,
- /* 2660 */ 485, 485, 485, 485, 485, 485, 485, 485, 390, 485,
- /* 2670 */ 392, 485, 485, 485, 485, 485, 485, 485, 485, 485,
- /* 2680 */ 485, 485, 485, 485, 485, 424, 485, 485, 427, 485,
- /* 2690 */ 485, 430, 431, 432, 433, 434, 435, 485, 437, 485,
- /* 2700 */ 485, 485, 424, 485, 485, 427, 485, 485, 430, 431,
- /* 2710 */ 432, 433, 434, 435, 485, 437, 485, 485, 485, 485,
- /* 2720 */ 485, 485, 485, 485, 485, 485, 485, 485, 485, 485,
+ /* 1110 */ 92, 93, 94, 95, 96, 18, 0, 22, 8, 9,
+ /* 1120 */ 23, 169, 12, 13, 14, 15, 16, 358, 456, 340,
+ /* 1130 */ 35, 459, 341, 378, 37, 38, 390, 44, 41, 350,
+ /* 1140 */ 385, 352, 340, 363, 364, 104, 474, 475, 393, 52,
+ /* 1150 */ 483, 479, 480, 384, 349, 350, 103, 472, 349, 350,
+ /* 1160 */ 63, 64, 65, 66, 353, 340, 0, 378, 340, 68,
+ /* 1170 */ 378, 349, 350, 387, 369, 350, 390, 385, 369, 390,
+ /* 1180 */ 366, 392, 349, 350, 340, 393, 70, 71, 72, 349,
+ /* 1190 */ 350, 369, 390, 77, 78, 79, 340, 104, 246, 83,
+ /* 1200 */ 103, 44, 369, 378, 88, 89, 90, 91, 256, 369,
+ /* 1210 */ 94, 466, 45, 46, 425, 390, 427, 392, 390, 430,
+ /* 1220 */ 431, 432, 433, 434, 435, 340, 437, 44, 340, 349,
+ /* 1230 */ 350, 442, 366, 444, 390, 340, 353, 448, 449, 142,
+ /* 1240 */ 340, 340, 189, 170, 191, 350, 390, 352, 456, 369,
+ /* 1250 */ 425, 459, 427, 257, 258, 430, 431, 432, 433, 434,
+ /* 1260 */ 435, 370, 437, 349, 350, 387, 474, 475, 390, 378,
+ /* 1270 */ 378, 479, 480, 378, 221, 390, 216, 386, 390, 182,
+ /* 1280 */ 183, 184, 172, 369, 187, 390, 340, 392, 62, 371,
+ /* 1290 */ 390, 390, 374, 206, 400, 208, 350, 200, 201, 133,
+ /* 1300 */ 134, 135, 136, 137, 138, 139, 481, 482, 211, 340,
+ /* 1310 */ 365, 214, 259, 368, 217, 218, 219, 220, 221, 350,
+ /* 1320 */ 425, 352, 427, 222, 378, 430, 431, 432, 433, 434,
+ /* 1330 */ 435, 105, 437, 42, 114, 44, 390, 442, 392, 444,
+ /* 1340 */ 0, 260, 0, 448, 449, 12, 13, 378, 107, 107,
+ /* 1350 */ 107, 110, 110, 110, 107, 22, 259, 110, 48, 390,
+ /* 1360 */ 340, 392, 22, 44, 22, 0, 33, 44, 35, 44,
+ /* 1370 */ 350, 425, 44, 427, 44, 44, 430, 431, 432, 433,
+ /* 1380 */ 434, 435, 44, 437, 164, 8, 9, 22, 44, 12,
+ /* 1390 */ 13, 14, 15, 16, 425, 62, 427, 35, 378, 430,
+ /* 1400 */ 431, 432, 433, 434, 435, 44, 437, 44, 75, 35,
+ /* 1410 */ 390, 442, 392, 444, 140, 141, 44, 448, 449, 473,
+ /* 1420 */ 340, 1, 2, 104, 44, 44, 44, 104, 44, 104,
+ /* 1430 */ 350, 44, 104, 100, 104, 104, 47, 44, 281, 13,
+ /* 1440 */ 44, 13, 104, 44, 0, 425, 103, 427, 104, 340,
+ /* 1450 */ 430, 431, 432, 433, 434, 435, 113, 437, 378, 350,
+ /* 1460 */ 348, 35, 442, 35, 444, 104, 283, 104, 448, 449,
+ /* 1470 */ 390, 350, 392, 389, 400, 458, 104, 476, 450, 460,
+ /* 1480 */ 261, 410, 172, 49, 104, 104, 104, 378, 104, 426,
+ /* 1490 */ 20, 104, 103, 205, 50, 419, 424, 104, 358, 390,
+ /* 1500 */ 104, 392, 419, 104, 358, 425, 188, 427, 412, 42,
+ /* 1510 */ 430, 431, 432, 433, 434, 435, 397, 437, 20, 397,
+ /* 1520 */ 400, 169, 442, 190, 444, 192, 395, 20, 448, 449,
+ /* 1530 */ 349, 349, 397, 395, 425, 362, 427, 395, 101, 430,
+ /* 1540 */ 431, 432, 433, 434, 435, 99, 437, 4, 361, 349,
+ /* 1550 */ 98, 360, 349, 444, 192, 222, 223, 448, 449, 349,
+ /* 1560 */ 349, 20, 19, 342, 48, 340, 192, 419, 235, 236,
+ /* 1570 */ 237, 238, 239, 240, 241, 350, 33, 346, 342, 346,
+ /* 1580 */ 358, 20, 392, 20, 20, 358, 351, 411, 358, 351,
+ /* 1590 */ 358, 340, 49, 349, 342, 358, 378, 358, 358, 56,
+ /* 1600 */ 342, 350, 378, 378, 349, 62, 378, 378, 423, 209,
+ /* 1610 */ 421, 103, 378, 390, 419, 390, 356, 392, 196, 418,
+ /* 1620 */ 195, 417, 356, 378, 378, 340, 378, 378, 378, 378,
+ /* 1630 */ 194, 378, 416, 349, 268, 350, 465, 267, 468, 465,
+ /* 1640 */ 276, 390, 181, 392, 390, 102, 390, 467, 105, 392,
+ /* 1650 */ 425, 465, 427, 390, 278, 430, 431, 432, 433, 434,
+ /* 1660 */ 435, 390, 437, 378, 400, 400, 390, 405, 390, 444,
+ /* 1670 */ 464, 410, 405, 448, 449, 390, 425, 392, 427, 277,
+ /* 1680 */ 463, 430, 431, 432, 433, 434, 435, 340, 437, 262,
+ /* 1690 */ 410, 285, 484, 282, 280, 444, 258, 350, 350, 20,
+ /* 1700 */ 449, 429, 410, 349, 351, 356, 356, 403, 20, 174,
+ /* 1710 */ 425, 340, 427, 405, 390, 430, 431, 432, 433, 434,
+ /* 1720 */ 435, 350, 437, 402, 390, 378, 390, 390, 103, 390,
+ /* 1730 */ 383, 462, 405, 374, 356, 340, 477, 390, 350, 392,
+ /* 1740 */ 447, 390, 356, 103, 36, 350, 478, 390, 356, 378,
+ /* 1750 */ 382, 368, 349, 343, 383, 342, 471, 372, 413, 338,
+ /* 1760 */ 0, 390, 406, 392, 406, 357, 420, 0, 0, 372,
+ /* 1770 */ 372, 42, 425, 378, 427, 35, 35, 430, 431, 432,
+ /* 1780 */ 433, 434, 435, 0, 437, 390, 215, 392, 35, 35,
+ /* 1790 */ 215, 0, 35, 35, 215, 0, 425, 215, 427, 340,
+ /* 1800 */ 0, 430, 431, 432, 433, 434, 435, 35, 437, 350,
+ /* 1810 */ 0, 22, 0, 35, 210, 0, 198, 0, 198, 192,
+ /* 1820 */ 425, 199, 427, 190, 0, 430, 431, 432, 433, 434,
+ /* 1830 */ 435, 0, 437, 0, 186, 185, 0, 378, 0, 47,
+ /* 1840 */ 0, 0, 383, 0, 42, 0, 0, 0, 0, 390,
+ /* 1850 */ 0, 392, 0, 0, 0, 159, 35, 0, 159, 0,
+ /* 1860 */ 0, 0, 0, 340, 42, 0, 0, 0, 0, 0,
+ /* 1870 */ 0, 22, 0, 350, 0, 0, 0, 482, 0, 0,
+ /* 1880 */ 0, 0, 0, 0, 425, 340, 427, 0, 0, 430,
+ /* 1890 */ 431, 432, 433, 434, 435, 350, 437, 0, 0, 0,
+ /* 1900 */ 0, 378, 143, 0, 48, 0, 22, 0, 22, 340,
+ /* 1910 */ 112, 48, 35, 390, 0, 392, 0, 62, 0, 350,
+ /* 1920 */ 62, 0, 49, 378, 35, 0, 35, 0, 383, 39,
+ /* 1930 */ 35, 35, 39, 62, 0, 390, 14, 392, 0, 0,
+ /* 1940 */ 39, 49, 0, 49, 42, 39, 0, 378, 425, 40,
+ /* 1950 */ 427, 47, 383, 430, 431, 432, 433, 434, 435, 390,
+ /* 1960 */ 437, 392, 439, 1, 44, 39, 47, 39, 181, 0,
+ /* 1970 */ 425, 47, 427, 0, 0, 430, 431, 432, 433, 434,
+ /* 1980 */ 435, 19, 437, 0, 69, 0, 35, 39, 0, 49,
+ /* 1990 */ 35, 39, 49, 0, 425, 33, 427, 340, 35, 430,
+ /* 2000 */ 431, 432, 433, 434, 435, 49, 437, 350, 0, 39,
+ /* 2010 */ 35, 49, 35, 49, 0, 39, 340, 0, 0, 57,
+ /* 2020 */ 58, 59, 60, 0, 62, 22, 350, 340, 0, 0,
+ /* 2030 */ 0, 44, 35, 110, 35, 378, 35, 350, 44, 35,
+ /* 2040 */ 35, 35, 35, 22, 35, 35, 35, 390, 0, 392,
+ /* 2050 */ 0, 51, 0, 22, 378, 22, 22, 35, 0, 35,
+ /* 2060 */ 0, 35, 0, 22, 102, 378, 390, 105, 392, 20,
+ /* 2070 */ 35, 35, 35, 0, 35, 104, 22, 390, 0, 392,
+ /* 2080 */ 172, 103, 425, 103, 427, 22, 174, 430, 431, 432,
+ /* 2090 */ 433, 434, 435, 0, 437, 340, 0, 3, 263, 44,
+ /* 2100 */ 138, 425, 104, 427, 193, 350, 430, 431, 432, 433,
+ /* 2110 */ 434, 435, 425, 437, 427, 340, 172, 430, 431, 432,
+ /* 2120 */ 433, 434, 435, 103, 437, 350, 340, 197, 179, 172,
+ /* 2130 */ 103, 48, 48, 378, 44, 173, 350, 3, 47, 101,
+ /* 2140 */ 178, 99, 44, 44, 104, 390, 340, 392, 104, 103,
+ /* 2150 */ 103, 47, 44, 378, 103, 44, 350, 104, 35, 103,
+ /* 2160 */ 198, 104, 35, 104, 378, 390, 35, 392, 35, 104,
+ /* 2170 */ 35, 35, 104, 47, 257, 47, 390, 0, 392, 0,
+ /* 2180 */ 425, 44, 427, 0, 378, 430, 431, 432, 433, 434,
+ /* 2190 */ 435, 0, 437, 39, 263, 263, 390, 47, 392, 104,
+ /* 2200 */ 425, 104, 427, 103, 340, 430, 431, 432, 433, 434,
+ /* 2210 */ 435, 425, 437, 427, 350, 103, 430, 431, 432, 433,
+ /* 2220 */ 434, 435, 103, 437, 103, 340, 0, 103, 39, 113,
+ /* 2230 */ 44, 425, 103, 427, 173, 350, 430, 431, 432, 433,
+ /* 2240 */ 434, 435, 378, 437, 175, 47, 101, 101, 2, 22,
+ /* 2250 */ 340, 103, 47, 104, 390, 103, 392, 244, 47, 104,
+ /* 2260 */ 350, 103, 103, 378, 104, 222, 103, 22, 104, 224,
+ /* 2270 */ 103, 114, 104, 35, 35, 390, 340, 392, 103, 35,
+ /* 2280 */ 104, 103, 35, 35, 104, 103, 350, 103, 378, 425,
+ /* 2290 */ 35, 427, 104, 103, 430, 431, 432, 433, 434, 435,
+ /* 2300 */ 390, 437, 392, 104, 104, 35, 125, 103, 44, 35,
+ /* 2310 */ 425, 103, 427, 103, 378, 430, 431, 432, 433, 434,
+ /* 2320 */ 435, 125, 437, 103, 22, 69, 390, 125, 392, 68,
+ /* 2330 */ 35, 125, 35, 35, 35, 425, 340, 427, 35, 35,
+ /* 2340 */ 430, 431, 432, 433, 434, 435, 350, 437, 35, 35,
+ /* 2350 */ 75, 97, 340, 44, 35, 35, 35, 22, 35, 35,
+ /* 2360 */ 35, 425, 350, 427, 75, 340, 430, 431, 432, 433,
+ /* 2370 */ 434, 435, 35, 437, 378, 350, 35, 35, 35, 35,
+ /* 2380 */ 22, 35, 0, 35, 39, 49, 390, 0, 392, 35,
+ /* 2390 */ 378, 39, 0, 0, 35, 39, 49, 35, 49, 49,
+ /* 2400 */ 39, 0, 390, 378, 392, 35, 35, 0, 22, 21,
+ /* 2410 */ 485, 22, 22, 485, 20, 390, 21, 392, 485, 485,
+ /* 2420 */ 485, 425, 485, 427, 485, 340, 430, 431, 432, 433,
+ /* 2430 */ 434, 435, 485, 437, 485, 350, 485, 425, 485, 427,
+ /* 2440 */ 485, 340, 430, 431, 432, 433, 434, 435, 485, 437,
+ /* 2450 */ 425, 350, 427, 485, 340, 430, 431, 432, 433, 434,
+ /* 2460 */ 435, 485, 437, 378, 350, 485, 485, 485, 485, 485,
+ /* 2470 */ 485, 485, 485, 485, 485, 390, 485, 392, 485, 378,
+ /* 2480 */ 485, 485, 485, 485, 485, 485, 485, 485, 485, 485,
+ /* 2490 */ 485, 390, 378, 392, 485, 485, 485, 485, 485, 485,
+ /* 2500 */ 485, 485, 485, 485, 390, 485, 392, 485, 485, 485,
+ /* 2510 */ 425, 485, 427, 485, 485, 430, 431, 432, 433, 434,
+ /* 2520 */ 435, 485, 437, 485, 485, 485, 425, 340, 427, 485,
+ /* 2530 */ 485, 430, 431, 432, 433, 434, 435, 350, 437, 425,
+ /* 2540 */ 485, 427, 485, 340, 430, 431, 432, 433, 434, 435,
+ /* 2550 */ 485, 437, 485, 350, 340, 485, 485, 485, 485, 485,
+ /* 2560 */ 485, 485, 485, 485, 350, 378, 485, 485, 485, 485,
+ /* 2570 */ 485, 485, 485, 485, 485, 485, 485, 390, 485, 392,
+ /* 2580 */ 485, 378, 485, 485, 485, 485, 485, 485, 485, 485,
+ /* 2590 */ 485, 485, 378, 390, 485, 392, 485, 485, 485, 485,
+ /* 2600 */ 485, 485, 485, 485, 390, 485, 392, 485, 485, 485,
+ /* 2610 */ 485, 485, 425, 485, 427, 485, 340, 430, 431, 432,
+ /* 2620 */ 433, 434, 435, 485, 437, 485, 350, 485, 425, 485,
+ /* 2630 */ 427, 485, 340, 430, 431, 432, 433, 434, 435, 425,
+ /* 2640 */ 437, 427, 350, 485, 430, 431, 432, 433, 434, 435,
+ /* 2650 */ 485, 437, 485, 485, 378, 485, 485, 485, 485, 485,
+ /* 2660 */ 485, 485, 485, 485, 485, 485, 390, 485, 392, 485,
+ /* 2670 */ 378, 485, 485, 485, 485, 485, 485, 485, 485, 485,
+ /* 2680 */ 485, 485, 390, 485, 392, 485, 485, 485, 485, 485,
+ /* 2690 */ 485, 485, 485, 485, 485, 485, 485, 485, 485, 485,
+ /* 2700 */ 485, 425, 485, 427, 485, 485, 430, 431, 432, 433,
+ /* 2710 */ 434, 435, 485, 437, 485, 485, 485, 425, 485, 427,
+ /* 2720 */ 485, 485, 430, 431, 432, 433, 434, 435, 485, 437,
/* 2730 */ 485, 485, 485, 485, 485, 485, 485, 485, 485, 485,
/* 2740 */ 485, 485, 485, 485, 485, 485, 485, 485, 485, 485,
- /* 2750 */ 485, 485, 485, 485, 485, 485, 485, 485, 485, 485,
- /* 2760 */ 485, 485, 485, 337, 337, 337, 337, 337, 337, 337,
+ /* 2750 */ 485, 485, 485, 485, 337, 337, 337, 337, 337, 337,
+ /* 2760 */ 337, 337, 337, 337, 337, 337, 337, 337, 337, 337,
/* 2770 */ 337, 337, 337, 337, 337, 337, 337, 337, 337, 337,
/* 2780 */ 337, 337, 337, 337, 337, 337, 337, 337, 337, 337,
/* 2790 */ 337, 337, 337, 337, 337, 337, 337, 337, 337, 337,
@@ -799,211 +800,213 @@ static const YYCODETYPE yy_lookahead[] = {
/* 3020 */ 337, 337, 337, 337, 337, 337, 337, 337, 337, 337,
/* 3030 */ 337, 337, 337, 337, 337, 337, 337, 337, 337, 337,
/* 3040 */ 337, 337, 337, 337, 337, 337, 337, 337, 337, 337,
- /* 3050 */ 337, 337, 337,
+ /* 3050 */ 337, 337, 337, 337, 337, 337, 337, 337, 337, 337,
+ /* 3060 */ 337, 337, 337, 337, 337, 337, 337,
};
-#define YY_SHIFT_COUNT (791)
+#define YY_SHIFT_COUNT (793)
#define YY_SHIFT_MIN (0)
-#define YY_SHIFT_MAX (2417)
+#define YY_SHIFT_MAX (2407)
static const unsigned short int yy_shift_ofst[] = {
/* 0 */ 1097, 0, 104, 0, 336, 336, 336, 336, 336, 336,
/* 10 */ 336, 336, 336, 336, 336, 336, 440, 671, 671, 775,
/* 20 */ 671, 671, 671, 671, 671, 671, 671, 671, 671, 671,
/* 30 */ 671, 671, 671, 671, 671, 671, 671, 671, 671, 671,
/* 40 */ 671, 671, 671, 671, 671, 671, 671, 671, 671, 671,
- /* 50 */ 671, 173, 483, 524, 446, 426, 476, 523, 476, 446,
- /* 60 */ 446, 1111, 476, 1111, 1111, 275, 476, 141, 644, 223,
- /* 70 */ 223, 644, 496, 496, 413, 33, 293, 293, 223, 223,
- /* 80 */ 223, 223, 223, 223, 223, 410, 223, 223, 205, 141,
- /* 90 */ 223, 223, 458, 223, 141, 223, 410, 223, 410, 141,
- /* 100 */ 223, 223, 141, 223, 141, 141, 141, 223, 537, 917,
- /* 110 */ 15, 15, 565, 170, 953, 953, 953, 953, 953, 953,
- /* 120 */ 953, 953, 953, 953, 953, 953, 953, 953, 953, 953,
- /* 130 */ 953, 953, 953, 1095, 14, 413, 33, 994, 994, 113,
- /* 140 */ 116, 116, 116, 180, 266, 266, 113, 505, 505, 505,
- /* 150 */ 205, 510, 513, 141, 697, 141, 697, 697, 713, 711,
- /* 160 */ 297, 297, 297, 297, 297, 297, 297, 297, 1866, 2242,
- /* 170 */ 185, 31, 508, 282, 271, 281, 341, 341, 417, 720,
- /* 180 */ 330, 517, 1230, 826, 913, 970, 1008, 243, 352, 1008,
- /* 190 */ 1122, 1146, 1000, 1258, 1474, 1508, 1326, 205, 1508, 205,
- /* 200 */ 1350, 1505, 1529, 1505, 1383, 1536, 1536, 1505, 1383, 1383,
- /* 210 */ 1473, 1477, 1536, 1481, 1536, 1536, 1536, 1566, 1540, 1566,
- /* 220 */ 1540, 1508, 205, 1574, 205, 1578, 1580, 205, 1578, 205,
- /* 230 */ 205, 205, 1536, 205, 1566, 141, 141, 141, 141, 141,
- /* 240 */ 141, 141, 141, 141, 141, 141, 1536, 1566, 697, 697,
- /* 250 */ 697, 1416, 1526, 1508, 537, 1435, 1438, 1574, 537, 1471,
- /* 260 */ 1536, 1529, 1529, 697, 1414, 1409, 697, 1414, 1409, 697,
- /* 270 */ 697, 141, 1411, 1518, 1414, 1426, 1428, 1446, 1258, 1418,
- /* 280 */ 1430, 1434, 1452, 505, 1698, 1536, 1578, 537, 537, 1720,
- /* 290 */ 1409, 697, 697, 697, 697, 697, 1409, 697, 1581, 537,
- /* 300 */ 713, 537, 505, 1676, 1679, 697, 711, 1536, 537, 1750,
- /* 310 */ 1566, 2716, 2716, 2716, 2716, 2716, 2716, 2716, 2716, 2716,
- /* 320 */ 1018, 425, 224, 717, 306, 378, 1139, 718, 455, 871,
- /* 330 */ 1182, 824, 971, 971, 971, 971, 971, 971, 971, 971,
- /* 340 */ 971, 357, 1220, 118, 118, 451, 552, 556, 657, 76,
- /* 350 */ 772, 954, 731, 664, 494, 494, 924, 1191, 636, 924,
- /* 360 */ 924, 924, 645, 156, 821, 836, 1198, 1091, 362, 1153,
- /* 370 */ 1181, 1192, 1197, 1223, 1277, 1309, 1335, 1340, 1115, 1297,
- /* 380 */ 1314, 1120, 1324, 1325, 1327, 1165, 1130, 1037, 1244, 1328,
- /* 390 */ 1330, 1331, 1356, 1363, 1364, 1408, 1370, 1142, 1382, 1355,
- /* 400 */ 1389, 1398, 1410, 1412, 1420, 1425, 1286, 617, 1385, 1417,
- /* 410 */ 1476, 1437, 1421, 1798, 1804, 1806, 1765, 1808, 1774, 1596,
- /* 420 */ 1777, 1778, 1780, 1602, 1818, 1785, 1786, 1607, 1823, 1610,
- /* 430 */ 1826, 1792, 1828, 1807, 1830, 1797, 1625, 1834, 1638, 1837,
- /* 440 */ 1641, 1642, 1648, 1652, 1844, 1845, 1846, 1661, 1664, 1853,
- /* 450 */ 1860, 1815, 1864, 1869, 1870, 1824, 1877, 1879, 1880, 1881,
- /* 460 */ 1882, 1883, 1886, 1888, 1730, 1861, 1900, 1742, 1902, 1903,
- /* 470 */ 1904, 1905, 1906, 1907, 1909, 1910, 1911, 1912, 1913, 1914,
- /* 480 */ 1927, 1929, 1930, 1932, 1874, 1917, 1918, 1919, 1920, 1921,
- /* 490 */ 1922, 1915, 1933, 1934, 1935, 1795, 1939, 1940, 1923, 1894,
- /* 500 */ 1924, 1895, 1944, 1885, 1925, 1949, 1889, 1952, 1891, 1954,
- /* 510 */ 1955, 1926, 1908, 1928, 1956, 1931, 1938, 1936, 1958, 1941,
- /* 520 */ 1945, 1947, 1959, 1942, 1962, 1957, 1967, 1953, 1951, 1960,
- /* 530 */ 1965, 1961, 1963, 1969, 1964, 1970, 1972, 1973, 1974, 1784,
- /* 540 */ 1978, 1980, 1984, 1943, 1996, 2001, 1976, 1968, 1966, 2019,
- /* 550 */ 1985, 1981, 1988, 2029, 1997, 1982, 1995, 2035, 2002, 1987,
- /* 560 */ 2003, 2040, 2041, 2045, 2046, 2047, 2049, 1948, 1971, 2018,
- /* 570 */ 2043, 2063, 2031, 2032, 2033, 2034, 2038, 2039, 2042, 2026,
- /* 580 */ 2036, 2044, 2051, 2054, 2061, 2075, 2056, 2082, 2066, 2048,
- /* 590 */ 2089, 2076, 2067, 2103, 2069, 2105, 2071, 2107, 2079, 2088,
- /* 600 */ 2074, 2078, 2080, 2006, 2008, 2116, 1946, 2016, 1950, 2086,
- /* 610 */ 2109, 2129, 1975, 2111, 1986, 1989, 2134, 2135, 1990, 1991,
- /* 620 */ 2133, 2093, 1875, 2037, 2057, 2062, 2091, 2065, 2094, 2068,
- /* 630 */ 2060, 2099, 2100, 2072, 2070, 2077, 2081, 2073, 2104, 2098,
- /* 640 */ 2122, 2083, 2106, 1977, 2090, 2092, 2168, 2131, 1992, 2143,
- /* 650 */ 2146, 2147, 2148, 2150, 2153, 2096, 2097, 2140, 1993, 2151,
- /* 660 */ 2145, 2190, 2197, 2198, 2202, 2102, 2164, 1951, 2157, 2108,
- /* 670 */ 2117, 2118, 2120, 2121, 2055, 2123, 2207, 2171, 2064, 2126,
- /* 680 */ 2130, 1951, 2201, 2176, 2144, 2000, 2156, 2245, 2227, 2030,
- /* 690 */ 2158, 2149, 2159, 2155, 2160, 2161, 2204, 2165, 2166, 2217,
- /* 700 */ 2163, 2232, 2050, 2167, 2174, 2181, 2236, 2237, 2172, 2185,
- /* 710 */ 2243, 2187, 2189, 2256, 2193, 2203, 2262, 2206, 2212, 2269,
- /* 720 */ 2208, 2213, 2275, 2215, 2199, 2209, 2210, 2214, 2219, 2279,
- /* 730 */ 2223, 2293, 2234, 2279, 2279, 2316, 2272, 2274, 2309, 2317,
- /* 740 */ 2319, 2321, 2322, 2323, 2324, 2325, 2287, 2268, 2331, 2341,
- /* 750 */ 2342, 2343, 2344, 2345, 2346, 2347, 2304, 2026, 2349, 2036,
- /* 760 */ 2350, 2351, 2352, 2353, 2367, 2355, 2391, 2357, 2356, 2359,
- /* 770 */ 2394, 2361, 2358, 2360, 2400, 2369, 2362, 2363, 2406, 2373,
- /* 780 */ 2365, 2370, 2410, 2377, 2381, 2417, 2396, 2398, 2399, 2401,
- /* 790 */ 2404, 2402,
+ /* 50 */ 671, 335, 519, 1053, 263, 215, 156, 586, 156, 263,
+ /* 60 */ 263, 1333, 156, 1333, 1333, 621, 156, 34, 798, 111,
+ /* 70 */ 111, 798, 278, 278, 294, 138, 308, 308, 111, 111,
+ /* 80 */ 111, 111, 111, 111, 111, 199, 111, 111, 297, 34,
+ /* 90 */ 111, 111, 223, 111, 34, 111, 199, 111, 199, 34,
+ /* 100 */ 111, 111, 34, 111, 34, 34, 34, 111, 460, 917,
+ /* 110 */ 15, 15, 378, 170, 700, 700, 700, 700, 700, 700,
+ /* 120 */ 700, 700, 700, 700, 700, 700, 700, 700, 700, 700,
+ /* 130 */ 700, 700, 700, 326, 410, 294, 138, 633, 633, 714,
+ /* 140 */ 426, 426, 426, 618, 107, 107, 714, 299, 299, 299,
+ /* 150 */ 297, 356, 353, 34, 577, 34, 577, 577, 663, 730,
+ /* 160 */ 259, 259, 259, 259, 259, 259, 259, 259, 1962, 1116,
+ /* 170 */ 185, 31, 276, 249, 394, 176, 13, 13, 862, 1073,
+ /* 180 */ 489, 776, 1167, 949, 721, 820, 996, 355, 581, 996,
+ /* 190 */ 997, 1081, 256, 1219, 1434, 1470, 1288, 297, 1470, 297,
+ /* 200 */ 1318, 1467, 1498, 1467, 1352, 1507, 1507, 1467, 1352, 1352,
+ /* 210 */ 1437, 1446, 1507, 1452, 1507, 1507, 1507, 1541, 1516, 1541,
+ /* 220 */ 1516, 1470, 297, 1561, 297, 1563, 1564, 297, 1563, 297,
+ /* 230 */ 297, 297, 1507, 297, 1541, 34, 34, 34, 34, 34,
+ /* 240 */ 34, 34, 34, 34, 34, 34, 1507, 1541, 577, 577,
+ /* 250 */ 577, 1400, 1508, 1470, 460, 1422, 1425, 1561, 460, 1436,
+ /* 260 */ 1219, 1507, 1498, 1498, 577, 1366, 1370, 577, 1366, 1370,
+ /* 270 */ 577, 577, 34, 1364, 1461, 1366, 1376, 1402, 1427, 1219,
+ /* 280 */ 1406, 1411, 1414, 1438, 299, 1679, 1219, 1507, 1563, 460,
+ /* 290 */ 460, 1688, 1370, 577, 577, 577, 577, 577, 1370, 577,
+ /* 300 */ 1535, 460, 663, 460, 299, 1625, 1640, 577, 730, 1507,
+ /* 310 */ 460, 1708, 1541, 2730, 2730, 2730, 2730, 2730, 2730, 2730,
+ /* 320 */ 2730, 2730, 1018, 679, 224, 739, 1543, 858, 1041, 418,
+ /* 330 */ 748, 974, 1110, 1166, 1377, 1377, 1377, 1377, 1377, 1377,
+ /* 340 */ 1377, 1377, 1377, 500, 729, 51, 51, 441, 484, 446,
+ /* 350 */ 660, 76, 121, 699, 593, 291, 924, 924, 701, 536,
+ /* 360 */ 952, 701, 701, 701, 384, 1060, 1093, 1095, 1291, 1220,
+ /* 370 */ 765, 1241, 1242, 1243, 1247, 566, 688, 1340, 1342, 1365,
+ /* 380 */ 1087, 1319, 1323, 1226, 1325, 1328, 1330, 1274, 1157, 1183,
+ /* 390 */ 1310, 1331, 1338, 1344, 1361, 1363, 1372, 1420, 1380, 1101,
+ /* 400 */ 1381, 1389, 1382, 1384, 1387, 1393, 1396, 1399, 1343, 1362,
+ /* 410 */ 1374, 1426, 1428, 723, 1444, 1760, 1767, 1768, 1729, 1783,
+ /* 420 */ 1740, 1571, 1741, 1753, 1754, 1575, 1791, 1757, 1758, 1579,
+ /* 430 */ 1795, 1582, 1800, 1772, 1810, 1789, 1812, 1778, 1604, 1815,
+ /* 440 */ 1618, 1817, 1620, 1622, 1627, 1633, 1824, 1831, 1833, 1648,
+ /* 450 */ 1650, 1836, 1838, 1792, 1840, 1841, 1843, 1802, 1845, 1846,
+ /* 460 */ 1847, 1848, 1850, 1852, 1853, 1854, 1696, 1821, 1857, 1699,
+ /* 470 */ 1859, 1860, 1861, 1862, 1874, 1875, 1876, 1878, 1879, 1880,
+ /* 480 */ 1881, 1882, 1883, 1887, 1888, 1897, 1822, 1865, 1866, 1867,
+ /* 490 */ 1868, 1869, 1870, 1849, 1872, 1898, 1899, 1759, 1900, 1903,
+ /* 500 */ 1884, 1856, 1886, 1863, 1905, 1855, 1877, 1907, 1858, 1914,
+ /* 510 */ 1871, 1916, 1918, 1889, 1873, 1890, 1921, 1891, 1892, 1893,
+ /* 520 */ 1925, 1895, 1894, 1901, 1927, 1896, 1934, 1902, 1906, 1920,
+ /* 530 */ 1904, 1919, 1922, 1924, 1938, 1909, 1926, 1939, 1942, 1946,
+ /* 540 */ 1928, 1787, 1969, 1973, 1974, 1915, 1983, 1985, 1951, 1940,
+ /* 550 */ 1948, 1988, 1955, 1943, 1952, 1993, 1963, 1956, 1970, 2008,
+ /* 560 */ 1975, 1964, 1976, 2014, 2017, 2018, 2023, 2028, 2029, 1798,
+ /* 570 */ 1923, 1977, 2003, 2030, 1997, 1999, 2001, 2004, 2005, 2006,
+ /* 580 */ 2007, 1987, 1994, 2009, 2010, 2021, 2011, 2048, 2031, 2050,
+ /* 590 */ 2033, 2000, 2052, 2034, 2022, 2058, 2024, 2060, 2026, 2062,
+ /* 600 */ 2041, 2049, 2035, 2036, 2037, 1971, 1978, 2073, 1908, 1980,
+ /* 610 */ 1930, 2039, 2054, 2078, 1911, 2063, 1944, 1912, 2093, 2096,
+ /* 620 */ 1957, 1949, 2094, 2055, 1835, 2020, 1998, 2027, 2083, 2038,
+ /* 630 */ 2084, 2042, 2040, 2090, 2098, 2044, 2046, 2047, 2051, 2053,
+ /* 640 */ 2099, 2091, 2104, 2056, 2108, 1931, 2057, 2059, 2134, 2111,
+ /* 650 */ 1932, 2123, 2127, 2131, 2133, 2135, 2136, 2065, 2068, 2126,
+ /* 660 */ 1917, 2137, 2128, 2177, 2179, 2183, 2191, 2100, 2154, 1904,
+ /* 670 */ 2150, 2112, 2095, 2097, 2119, 2121, 2069, 2124, 2226, 2189,
+ /* 680 */ 2061, 2129, 2116, 1904, 2198, 2186, 2145, 2013, 2146, 2246,
+ /* 690 */ 2227, 2043, 2148, 2149, 2152, 2155, 2158, 2160, 2205, 2159,
+ /* 700 */ 2163, 2211, 2164, 2245, 2045, 2167, 2157, 2168, 2238, 2239,
+ /* 710 */ 2175, 2176, 2244, 2178, 2180, 2247, 2182, 2188, 2248, 2184,
+ /* 720 */ 2199, 2255, 2190, 2200, 2270, 2204, 2181, 2196, 2202, 2206,
+ /* 730 */ 2208, 2264, 2210, 2274, 2220, 2264, 2264, 2302, 2256, 2261,
+ /* 740 */ 2295, 2297, 2298, 2299, 2303, 2304, 2313, 2314, 2275, 2254,
+ /* 750 */ 2309, 2319, 2320, 2321, 2335, 2323, 2324, 2325, 2289, 1987,
+ /* 760 */ 2337, 1994, 2341, 2342, 2343, 2344, 2358, 2346, 2382, 2348,
+ /* 770 */ 2336, 2345, 2387, 2354, 2347, 2352, 2392, 2359, 2349, 2356,
+ /* 780 */ 2393, 2362, 2350, 2361, 2401, 2370, 2371, 2407, 2386, 2388,
+ /* 790 */ 2389, 2390, 2395, 2394,
};
-#define YY_REDUCE_COUNT (319)
-#define YY_REDUCE_MIN (-454)
-#define YY_REDUCE_MAX (2278)
+#define YY_REDUCE_COUNT (321)
+#define YY_REDUCE_MIN (-438)
+#define YY_REDUCE_MAX (2292)
static const short yy_reduce_ofst[] = {
- /* 0 */ 583, -340, -280, -67, -132, 158, 268, 603, 789, 895,
- /* 10 */ 946, 1013, 1046, 369, 1073, 1109, 11, 399, 1135, 1214,
- /* 20 */ 499, 1240, 1302, 837, 1319, 1338, 1424, 1441, 1460, 1558,
- /* 30 */ 1591, 1624, 1660, 1693, 1722, 1782, 1801, 1849, 1868, 1916,
- /* 40 */ 1937, 2005, 2023, 2053, 2084, 2101, 2152, 2173, 2200, 2261,
- /* 50 */ 2278, 301, 1227, -402, -167, -360, 252, 709, 803, 465,
- /* 60 */ 674, -348, 886, 259, 819, -454, -374, -322, -8, -227,
- /* 70 */ -72, -249, -289, 61, -362, 332, -284, -238, -204, -41,
- /* 80 */ 130, 169, 265, 272, 338, -214, 379, 394, 97, -5,
- /* 90 */ 495, 622, 29, 672, 450, 767, -182, 809, -144, 119,
- /* 100 */ 944, 1035, 481, 1043, 344, 563, 392, 1048, 78, -308,
- /* 110 */ -442, -442, -358, -333, -321, -215, 204, 307, 498, 574,
- /* 120 */ 584, 587, 588, 609, 642, 654, 703, 747, 779, 781,
- /* 130 */ 782, 817, 840, -301, -70, -4, 346, 701, 705, 662,
- /* 140 */ -70, 296, 335, 84, 197, 419, 722, 136, 164, 806,
- /* 150 */ 850, -293, 774, -312, 854, 576, 861, 862, 879, 889,
- /* 160 */ -364, 497, 502, 516, 650, 746, 761, 650, 528, 851,
- /* 170 */ 948, 900, 884, 910, 1034, 949, 1075, 1087, 1061, 1061,
- /* 180 */ 1113, 1082, 1136, 1141, 1103, 1096, 1040, 1040, 1033, 1040,
- /* 190 */ 1065, 1058, 1061, 1094, 1101, 1112, 1114, 1174, 1116, 1177,
- /* 200 */ 1134, 1151, 1150, 1154, 1160, 1211, 1212, 1166, 1176, 1178,
- /* 210 */ 1213, 1217, 1232, 1222, 1234, 1235, 1236, 1245, 1243, 1249,
- /* 220 */ 1247, 1179, 1237, 1204, 1241, 1250, 1193, 1251, 1256, 1252,
- /* 230 */ 1253, 1255, 1259, 1257, 1260, 1246, 1248, 1261, 1262, 1265,
- /* 240 */ 1272, 1275, 1276, 1278, 1279, 1282, 1270, 1280, 1224, 1231,
- /* 250 */ 1238, 1205, 1215, 1216, 1281, 1219, 1239, 1269, 1306, 1264,
- /* 260 */ 1317, 1268, 1284, 1291, 1225, 1288, 1299, 1226, 1290, 1308,
- /* 270 */ 1310, 1061, 1228, 1271, 1254, 1263, 1285, 1283, 1287, 1233,
- /* 280 */ 1242, 1267, 1040, 1365, 1293, 1374, 1373, 1369, 1375, 1339,
- /* 290 */ 1336, 1357, 1367, 1368, 1371, 1376, 1354, 1377, 1358, 1407,
- /* 300 */ 1402, 1422, 1427, 1333, 1401, 1394, 1419, 1436, 1432, 1447,
- /* 310 */ 1450, 1381, 1378, 1388, 1390, 1423, 1431, 1433, 1442, 1451,
+ /* 0 */ 311, -340, -280, -25, -132, 158, 603, 789, 895, 969,
+ /* 10 */ 265, 1020, 1080, 390, 1109, 1225, 53, 454, 825, 1251,
+ /* 20 */ 1285, 1347, 1371, 946, 1395, 1459, 1523, 1545, 1569, 1657,
+ /* 30 */ 1676, 1687, 1755, 1775, 1786, 1806, 1864, 1885, 1910, 1936,
+ /* 40 */ 1996, 2012, 2025, 2085, 2101, 2114, 2187, 2203, 2214, 2276,
+ /* 50 */ 2292, 167, 792, -403, 78, -401, -360, 252, 672, 47,
+ /* 60 */ 197, 379, -438, -244, 339, -331, 505, -211, -274, -63,
+ /* 70 */ 23, -391, -40, 149, -317, -31, -342, -265, 295, 464,
+ /* 80 */ 486, 571, 608, 610, 622, -344, 673, 675, -252, -205,
+ /* 90 */ 677, 805, -243, 809, 219, 822, -92, 833, 19, -16,
+ /* 100 */ 423, 840, 325, 880, 563, 755, 891, 914, -268, -207,
+ /* 110 */ -173, -173, 327, -337, -321, -229, -93, 61, 209, 403,
+ /* 120 */ 439, 461, 529, 630, 746, 802, 828, 844, 856, 885,
+ /* 130 */ 888, 900, 901, -288, -34, -213, 90, 448, 476, 554,
+ /* 140 */ -34, 50, 501, 247, -409, 301, 780, 499, 533, 582,
+ /* 150 */ 769, 234, 422, 643, 609, 549, 786, 878, 918, 945,
+ /* 160 */ -364, 447, 477, 535, 547, 575, 615, 547, 628, 681,
+ /* 170 */ 791, 662, 667, 685, 811, 745, 814, 866, 892, 892,
+ /* 180 */ 883, 894, 1112, 1121, 1084, 1074, 1017, 1017, 1001, 1017,
+ /* 190 */ 1028, 1019, 892, 1071, 1063, 1076, 1072, 1140, 1083, 1146,
+ /* 200 */ 1096, 1119, 1120, 1122, 1131, 1181, 1182, 1135, 1138, 1142,
+ /* 210 */ 1173, 1187, 1200, 1191, 1203, 1210, 1211, 1221, 1231, 1236,
+ /* 220 */ 1233, 1148, 1222, 1190, 1227, 1235, 1176, 1230, 1238, 1232,
+ /* 230 */ 1237, 1239, 1244, 1240, 1252, 1218, 1224, 1228, 1229, 1234,
+ /* 240 */ 1245, 1246, 1248, 1249, 1250, 1253, 1255, 1258, 1223, 1254,
+ /* 250 */ 1256, 1185, 1189, 1195, 1260, 1201, 1204, 1257, 1266, 1216,
+ /* 260 */ 1261, 1284, 1264, 1265, 1263, 1171, 1262, 1271, 1174, 1267,
+ /* 270 */ 1276, 1278, 892, 1170, 1180, 1186, 1206, 1217, 1269, 1280,
+ /* 280 */ 1208, 1268, 1259, 1017, 1348, 1272, 1292, 1354, 1353, 1349,
+ /* 290 */ 1350, 1304, 1308, 1324, 1334, 1336, 1337, 1339, 1327, 1351,
+ /* 300 */ 1321, 1378, 1359, 1386, 1388, 1293, 1368, 1357, 1383, 1403,
+ /* 310 */ 1392, 1410, 1413, 1345, 1346, 1356, 1358, 1385, 1397, 1398,
+ /* 320 */ 1408, 1421,
};
static const YYACTIONTYPE yy_default[] = {
- /* 0 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 10 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 20 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 30 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 40 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 50 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 60 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 70 */ 1764, 1764, 1764, 1764, 2045, 1764, 1764, 1764, 1764, 1764,
- /* 80 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1853, 1764,
- /* 90 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 100 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1851, 2038,
- /* 110 */ 2263, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 120 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 130 */ 1764, 1764, 1764, 1764, 2275, 1764, 1764, 1827, 1827, 1764,
- /* 140 */ 2275, 2275, 2275, 1851, 2235, 2235, 1764, 1764, 1764, 1764,
- /* 150 */ 1853, 2105, 1764, 1764, 1764, 1764, 1764, 1764, 1973, 1764,
- /* 160 */ 1764, 1764, 1764, 1764, 1997, 1764, 1764, 1764, 2097, 1764,
- /* 170 */ 1764, 2300, 2356, 1764, 1764, 2303, 1764, 1764, 1764, 1764,
- /* 180 */ 1764, 2050, 1764, 1764, 1926, 2290, 2267, 2281, 2340, 2268,
- /* 190 */ 2265, 2284, 1764, 2294, 1764, 1764, 2119, 1853, 1764, 1853,
- /* 200 */ 2084, 2043, 1764, 2043, 2040, 1764, 1764, 2043, 2040, 2040,
- /* 210 */ 1915, 1911, 1764, 1909, 1764, 1764, 1764, 1764, 1811, 1764,
- /* 220 */ 1811, 1764, 1853, 1764, 1853, 1764, 1764, 1853, 1764, 1853,
- /* 230 */ 1853, 1853, 1764, 1853, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 240 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 250 */ 1764, 2117, 2103, 1764, 1851, 2095, 2093, 1764, 1851, 2091,
- /* 260 */ 1764, 1764, 1764, 1764, 2311, 2309, 1764, 2311, 2309, 1764,
- /* 270 */ 1764, 1764, 2325, 2321, 2311, 2329, 2327, 2296, 2294, 2359,
- /* 280 */ 2346, 2342, 2281, 1764, 1764, 1764, 1764, 1851, 1851, 1764,
- /* 290 */ 2309, 1764, 1764, 1764, 1764, 1764, 2309, 1764, 1764, 1851,
- /* 300 */ 1764, 1851, 1764, 1764, 1942, 1764, 1764, 1764, 1851, 1796,
- /* 310 */ 1764, 2086, 2108, 2068, 2068, 1976, 1976, 1976, 1854, 1769,
- /* 320 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 330 */ 1764, 1764, 2324, 2323, 2190, 1764, 2239, 2238, 2237, 2228,
- /* 340 */ 2189, 1938, 1764, 2188, 2187, 1764, 1764, 1764, 1764, 1764,
- /* 350 */ 1764, 1764, 1764, 1764, 2059, 2058, 2181, 1764, 1764, 2182,
- /* 360 */ 2180, 2179, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 370 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 380 */ 1764, 1764, 1764, 1764, 1764, 1764, 2343, 2347, 1764, 1764,
- /* 390 */ 1764, 1764, 1764, 1764, 1764, 2264, 1764, 1764, 1764, 2163,
- /* 400 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 410 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 420 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 430 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 440 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 450 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 460 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 470 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 480 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 490 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 500 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 510 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 520 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1801, 2168, 1764,
- /* 530 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 540 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 550 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 560 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 570 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1892,
- /* 580 */ 1891, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 590 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 600 */ 1764, 1764, 1764, 2172, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 610 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 620 */ 2339, 2297, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 630 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 640 */ 2163, 1764, 2322, 1764, 1764, 2337, 1764, 2341, 1764, 1764,
- /* 650 */ 1764, 1764, 1764, 1764, 1764, 2274, 2270, 1764, 1764, 2266,
- /* 660 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 2171, 1764, 1764,
- /* 670 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 680 */ 1764, 2162, 1764, 2225, 1764, 1764, 1764, 2259, 1764, 1764,
- /* 690 */ 2210, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 700 */ 2172, 1764, 2175, 1764, 1764, 1764, 1764, 1764, 1970, 1764,
- /* 710 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 720 */ 1764, 1764, 1764, 1764, 1954, 1952, 1951, 1950, 1764, 1983,
- /* 730 */ 1764, 1764, 1764, 1979, 1978, 1764, 1764, 1764, 1764, 1764,
- /* 740 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1872, 1764,
- /* 750 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1864, 1764, 1863,
- /* 760 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 770 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 780 */ 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764, 1764,
- /* 790 */ 1764, 1764,
+ /* 0 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 10 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 20 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 30 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 40 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 50 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 60 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 70 */ 1768, 1768, 1768, 1768, 2049, 1768, 1768, 1768, 1768, 1768,
+ /* 80 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1857, 1768,
+ /* 90 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 100 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1855, 2042,
+ /* 110 */ 2267, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 120 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 130 */ 1768, 1768, 1768, 1768, 2279, 1768, 1768, 1831, 1831, 1768,
+ /* 140 */ 2279, 2279, 2279, 1855, 2239, 2239, 1768, 1768, 1768, 1768,
+ /* 150 */ 1857, 2109, 1768, 1768, 1768, 1768, 1768, 1768, 1977, 1768,
+ /* 160 */ 1768, 1768, 1768, 1768, 2001, 1768, 1768, 1768, 2101, 1768,
+ /* 170 */ 1768, 2304, 2361, 1768, 1768, 2307, 1768, 1768, 1768, 1768,
+ /* 180 */ 1768, 2054, 1768, 1768, 1930, 2294, 2271, 2285, 2345, 2272,
+ /* 190 */ 2269, 2288, 1768, 2298, 1768, 1768, 2123, 1857, 1768, 1857,
+ /* 200 */ 2088, 2047, 1768, 2047, 2044, 1768, 1768, 2047, 2044, 2044,
+ /* 210 */ 1919, 1915, 1768, 1913, 1768, 1768, 1768, 1768, 1815, 1768,
+ /* 220 */ 1815, 1768, 1857, 1768, 1857, 1768, 1768, 1857, 1768, 1857,
+ /* 230 */ 1857, 1857, 1768, 1857, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 240 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 250 */ 1768, 2121, 2107, 1768, 1855, 2099, 2097, 1768, 1855, 2095,
+ /* 260 */ 2298, 1768, 1768, 1768, 1768, 2315, 2313, 1768, 2315, 2313,
+ /* 270 */ 1768, 1768, 1768, 2329, 2325, 2315, 2334, 2331, 2300, 2298,
+ /* 280 */ 2364, 2351, 2347, 2285, 1768, 1768, 2298, 1768, 1768, 1855,
+ /* 290 */ 1855, 1768, 2313, 1768, 1768, 1768, 1768, 1768, 2313, 1768,
+ /* 300 */ 1768, 1855, 1768, 1855, 1768, 1768, 1946, 1768, 1768, 1768,
+ /* 310 */ 1855, 1800, 1768, 2090, 2112, 2072, 2072, 1980, 1980, 1980,
+ /* 320 */ 1858, 1773, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 330 */ 1768, 1768, 1768, 1768, 2328, 2327, 2194, 1768, 2243, 2242,
+ /* 340 */ 2241, 2232, 2193, 1942, 1768, 2192, 2191, 1768, 1768, 1768,
+ /* 350 */ 1768, 1768, 1768, 1768, 1768, 1768, 2063, 2062, 2185, 1768,
+ /* 360 */ 1768, 2186, 2184, 2183, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 370 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 380 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 2348, 2352,
+ /* 390 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 2268, 1768, 1768,
+ /* 400 */ 1768, 2167, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 410 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 420 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 430 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 440 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 450 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 460 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 470 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 480 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 490 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 500 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 510 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 520 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1805,
+ /* 530 */ 2172, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 540 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 550 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 560 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 570 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 580 */ 1768, 1896, 1895, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 590 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 600 */ 1768, 1768, 1768, 1768, 1768, 2176, 1768, 1768, 1768, 1768,
+ /* 610 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 620 */ 1768, 1768, 2344, 2301, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 630 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 640 */ 1768, 1768, 2167, 1768, 2326, 1768, 1768, 2342, 1768, 2346,
+ /* 650 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 2278, 2274, 1768,
+ /* 660 */ 1768, 2270, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 2175,
+ /* 670 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 680 */ 1768, 1768, 1768, 2166, 1768, 2229, 1768, 1768, 1768, 2263,
+ /* 690 */ 1768, 1768, 2214, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 700 */ 1768, 1768, 2176, 1768, 2179, 1768, 1768, 1768, 1768, 1768,
+ /* 710 */ 1974, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 720 */ 1768, 1768, 1768, 1768, 1768, 1768, 1958, 1956, 1955, 1954,
+ /* 730 */ 1768, 1987, 1768, 1768, 1768, 1983, 1982, 1768, 1768, 1768,
+ /* 740 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 750 */ 1876, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1868,
+ /* 760 */ 1768, 1867, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 770 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 780 */ 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768, 1768,
+ /* 790 */ 1768, 1768, 1768, 1768,
};
/********** End of lemon-generated parsing tables *****************************/
@@ -1857,23 +1860,23 @@ static const char *const yyTokenName[] = {
/* 407 */ "func",
/* 408 */ "sma_func_name",
/* 409 */ "query_or_subquery",
- /* 410 */ "cgroup_name",
- /* 411 */ "analyze_opt",
- /* 412 */ "explain_options",
- /* 413 */ "insert_query",
- /* 414 */ "or_replace_opt",
- /* 415 */ "agg_func_opt",
- /* 416 */ "bufsize_opt",
- /* 417 */ "language_opt",
- /* 418 */ "stream_name",
- /* 419 */ "stream_options",
- /* 420 */ "col_list_opt",
- /* 421 */ "tag_def_or_ref_opt",
- /* 422 */ "subtable_opt",
- /* 423 */ "ignore_opt",
- /* 424 */ "expression",
- /* 425 */ "dnode_list",
- /* 426 */ "where_clause_opt",
+ /* 410 */ "where_clause_opt",
+ /* 411 */ "cgroup_name",
+ /* 412 */ "analyze_opt",
+ /* 413 */ "explain_options",
+ /* 414 */ "insert_query",
+ /* 415 */ "or_replace_opt",
+ /* 416 */ "agg_func_opt",
+ /* 417 */ "bufsize_opt",
+ /* 418 */ "language_opt",
+ /* 419 */ "stream_name",
+ /* 420 */ "stream_options",
+ /* 421 */ "col_list_opt",
+ /* 422 */ "tag_def_or_ref_opt",
+ /* 423 */ "subtable_opt",
+ /* 424 */ "ignore_opt",
+ /* 425 */ "expression",
+ /* 426 */ "dnode_list",
/* 427 */ "literal_func",
/* 428 */ "literal_list",
/* 429 */ "table_alias",
@@ -2247,8 +2250,8 @@ static const char *const yyRuleName[] = {
/* 305 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery",
/* 306 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name",
/* 307 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name",
- /* 308 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name",
- /* 309 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name",
+ /* 308 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name where_clause_opt",
+ /* 309 */ "cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name where_clause_opt",
/* 310 */ "cmd ::= DROP TOPIC exists_opt topic_name",
/* 311 */ "cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name",
/* 312 */ "cmd ::= DESC full_table_name",
@@ -2501,39 +2504,40 @@ static const char *const yyRuleName[] = {
/* 559 */ "having_clause_opt ::= HAVING search_condition",
/* 560 */ "range_opt ::=",
/* 561 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP",
- /* 562 */ "every_opt ::=",
- /* 563 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP",
- /* 564 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt",
- /* 565 */ "query_simple ::= query_specification",
- /* 566 */ "query_simple ::= union_query_expression",
- /* 567 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery",
- /* 568 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery",
- /* 569 */ "query_simple_or_subquery ::= query_simple",
- /* 570 */ "query_simple_or_subquery ::= subquery",
- /* 571 */ "query_or_subquery ::= query_expression",
- /* 572 */ "query_or_subquery ::= subquery",
- /* 573 */ "order_by_clause_opt ::=",
- /* 574 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
- /* 575 */ "slimit_clause_opt ::=",
- /* 576 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
- /* 577 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
- /* 578 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 579 */ "limit_clause_opt ::=",
- /* 580 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
- /* 581 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
- /* 582 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
- /* 583 */ "subquery ::= NK_LP query_expression NK_RP",
- /* 584 */ "subquery ::= NK_LP subquery NK_RP",
- /* 585 */ "search_condition ::= common_expression",
- /* 586 */ "sort_specification_list ::= sort_specification",
- /* 587 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
- /* 588 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt",
- /* 589 */ "ordering_specification_opt ::=",
- /* 590 */ "ordering_specification_opt ::= ASC",
- /* 591 */ "ordering_specification_opt ::= DESC",
- /* 592 */ "null_ordering_opt ::=",
- /* 593 */ "null_ordering_opt ::= NULLS FIRST",
- /* 594 */ "null_ordering_opt ::= NULLS LAST",
+ /* 562 */ "range_opt ::= RANGE NK_LP expr_or_subquery NK_RP",
+ /* 563 */ "every_opt ::=",
+ /* 564 */ "every_opt ::= EVERY NK_LP duration_literal NK_RP",
+ /* 565 */ "query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt",
+ /* 566 */ "query_simple ::= query_specification",
+ /* 567 */ "query_simple ::= union_query_expression",
+ /* 568 */ "union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery",
+ /* 569 */ "union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery",
+ /* 570 */ "query_simple_or_subquery ::= query_simple",
+ /* 571 */ "query_simple_or_subquery ::= subquery",
+ /* 572 */ "query_or_subquery ::= query_expression",
+ /* 573 */ "query_or_subquery ::= subquery",
+ /* 574 */ "order_by_clause_opt ::=",
+ /* 575 */ "order_by_clause_opt ::= ORDER BY sort_specification_list",
+ /* 576 */ "slimit_clause_opt ::=",
+ /* 577 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER",
+ /* 578 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER",
+ /* 579 */ "slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 580 */ "limit_clause_opt ::=",
+ /* 581 */ "limit_clause_opt ::= LIMIT NK_INTEGER",
+ /* 582 */ "limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER",
+ /* 583 */ "limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER",
+ /* 584 */ "subquery ::= NK_LP query_expression NK_RP",
+ /* 585 */ "subquery ::= NK_LP subquery NK_RP",
+ /* 586 */ "search_condition ::= common_expression",
+ /* 587 */ "sort_specification_list ::= sort_specification",
+ /* 588 */ "sort_specification_list ::= sort_specification_list NK_COMMA sort_specification",
+ /* 589 */ "sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt",
+ /* 590 */ "ordering_specification_opt ::=",
+ /* 591 */ "ordering_specification_opt ::= ASC",
+ /* 592 */ "ordering_specification_opt ::= DESC",
+ /* 593 */ "null_ordering_opt ::=",
+ /* 594 */ "null_ordering_opt ::= NULLS FIRST",
+ /* 595 */ "null_ordering_opt ::= NULLS LAST",
};
#endif /* NDEBUG */
@@ -2692,12 +2696,12 @@ static void yy_destructor(
case 406: /* sma_stream_opt */
case 407: /* func */
case 409: /* query_or_subquery */
- case 412: /* explain_options */
- case 413: /* insert_query */
- case 419: /* stream_options */
- case 422: /* subtable_opt */
- case 424: /* expression */
- case 426: /* where_clause_opt */
+ case 410: /* where_clause_opt */
+ case 413: /* explain_options */
+ case 414: /* insert_query */
+ case 420: /* stream_options */
+ case 423: /* subtable_opt */
+ case 425: /* expression */
case 427: /* literal_func */
case 430: /* expr_or_subquery */
case 431: /* pseudo_column */
@@ -2742,7 +2746,7 @@ static void yy_destructor(
case 339: /* alter_account_options */
case 341: /* alter_account_option */
case 360: /* speed_opt */
- case 416: /* bufsize_opt */
+ case 417: /* bufsize_opt */
{
}
@@ -2757,9 +2761,9 @@ static void yy_destructor(
case 400: /* column_alias */
case 403: /* index_name */
case 408: /* sma_func_name */
- case 410: /* cgroup_name */
- case 417: /* language_opt */
- case 418: /* stream_name */
+ case 411: /* cgroup_name */
+ case 418: /* language_opt */
+ case 419: /* stream_name */
case 429: /* table_alias */
case 435: /* star_func */
case 437: /* noarg_func */
@@ -2789,10 +2793,10 @@ static void yy_destructor(
case 355: /* unsafe_opt */
case 356: /* not_exists_opt */
case 358: /* exists_opt */
- case 411: /* analyze_opt */
- case 414: /* or_replace_opt */
- case 415: /* agg_func_opt */
- case 423: /* ignore_opt */
+ case 412: /* analyze_opt */
+ case 415: /* or_replace_opt */
+ case 416: /* agg_func_opt */
+ case 424: /* ignore_opt */
case 460: /* set_quantifier_opt */
{
@@ -2813,9 +2817,9 @@ static void yy_destructor(
case 388: /* rollup_func_list */
case 398: /* tag_list_opt */
case 404: /* func_list */
- case 420: /* col_list_opt */
- case 421: /* tag_def_or_ref_opt */
- case 425: /* dnode_list */
+ case 421: /* col_list_opt */
+ case 422: /* tag_def_or_ref_opt */
+ case 426: /* dnode_list */
case 428: /* literal_list */
case 436: /* star_func_para_list */
case 438: /* other_para_list */
@@ -3462,8 +3466,8 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
337, /* (305) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */
337, /* (306) cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
337, /* (307) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
- 337, /* (308) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
- 337, /* (309) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
+ 337, /* (308) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name where_clause_opt */
+ 337, /* (309) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name where_clause_opt */
337, /* (310) cmd ::= DROP TOPIC exists_opt topic_name */
337, /* (311) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
337, /* (312) cmd ::= DESC full_table_name */
@@ -3471,43 +3475,43 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
337, /* (314) cmd ::= RESET QUERY CACHE */
337, /* (315) cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */
337, /* (316) cmd ::= EXPLAIN analyze_opt explain_options insert_query */
- 411, /* (317) analyze_opt ::= */
- 411, /* (318) analyze_opt ::= ANALYZE */
- 412, /* (319) explain_options ::= */
- 412, /* (320) explain_options ::= explain_options VERBOSE NK_BOOL */
- 412, /* (321) explain_options ::= explain_options RATIO NK_FLOAT */
+ 412, /* (317) analyze_opt ::= */
+ 412, /* (318) analyze_opt ::= ANALYZE */
+ 413, /* (319) explain_options ::= */
+ 413, /* (320) explain_options ::= explain_options VERBOSE NK_BOOL */
+ 413, /* (321) explain_options ::= explain_options RATIO NK_FLOAT */
337, /* (322) cmd ::= CREATE or_replace_opt agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt language_opt */
337, /* (323) cmd ::= DROP FUNCTION exists_opt function_name */
- 415, /* (324) agg_func_opt ::= */
- 415, /* (325) agg_func_opt ::= AGGREGATE */
- 416, /* (326) bufsize_opt ::= */
- 416, /* (327) bufsize_opt ::= BUFSIZE NK_INTEGER */
- 417, /* (328) language_opt ::= */
- 417, /* (329) language_opt ::= LANGUAGE NK_STRING */
- 414, /* (330) or_replace_opt ::= */
- 414, /* (331) or_replace_opt ::= OR REPLACE */
+ 416, /* (324) agg_func_opt ::= */
+ 416, /* (325) agg_func_opt ::= AGGREGATE */
+ 417, /* (326) bufsize_opt ::= */
+ 417, /* (327) bufsize_opt ::= BUFSIZE NK_INTEGER */
+ 418, /* (328) language_opt ::= */
+ 418, /* (329) language_opt ::= LANGUAGE NK_STRING */
+ 415, /* (330) or_replace_opt ::= */
+ 415, /* (331) or_replace_opt ::= OR REPLACE */
337, /* (332) cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery */
337, /* (333) cmd ::= DROP STREAM exists_opt stream_name */
337, /* (334) cmd ::= PAUSE STREAM exists_opt stream_name */
337, /* (335) cmd ::= RESUME STREAM exists_opt ignore_opt stream_name */
- 420, /* (336) col_list_opt ::= */
- 420, /* (337) col_list_opt ::= NK_LP col_name_list NK_RP */
- 421, /* (338) tag_def_or_ref_opt ::= */
- 421, /* (339) tag_def_or_ref_opt ::= tags_def */
- 421, /* (340) tag_def_or_ref_opt ::= TAGS NK_LP col_name_list NK_RP */
- 419, /* (341) stream_options ::= */
- 419, /* (342) stream_options ::= stream_options TRIGGER AT_ONCE */
- 419, /* (343) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
- 419, /* (344) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
- 419, /* (345) stream_options ::= stream_options WATERMARK duration_literal */
- 419, /* (346) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
- 419, /* (347) stream_options ::= stream_options FILL_HISTORY NK_INTEGER */
- 419, /* (348) stream_options ::= stream_options DELETE_MARK duration_literal */
- 419, /* (349) stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */
- 422, /* (350) subtable_opt ::= */
- 422, /* (351) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */
- 423, /* (352) ignore_opt ::= */
- 423, /* (353) ignore_opt ::= IGNORE UNTREATED */
+ 421, /* (336) col_list_opt ::= */
+ 421, /* (337) col_list_opt ::= NK_LP col_name_list NK_RP */
+ 422, /* (338) tag_def_or_ref_opt ::= */
+ 422, /* (339) tag_def_or_ref_opt ::= tags_def */
+ 422, /* (340) tag_def_or_ref_opt ::= TAGS NK_LP col_name_list NK_RP */
+ 420, /* (341) stream_options ::= */
+ 420, /* (342) stream_options ::= stream_options TRIGGER AT_ONCE */
+ 420, /* (343) stream_options ::= stream_options TRIGGER WINDOW_CLOSE */
+ 420, /* (344) stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */
+ 420, /* (345) stream_options ::= stream_options WATERMARK duration_literal */
+ 420, /* (346) stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */
+ 420, /* (347) stream_options ::= stream_options FILL_HISTORY NK_INTEGER */
+ 420, /* (348) stream_options ::= stream_options DELETE_MARK duration_literal */
+ 420, /* (349) stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */
+ 423, /* (350) subtable_opt ::= */
+ 423, /* (351) subtable_opt ::= SUBTABLE NK_LP expression NK_RP */
+ 424, /* (352) ignore_opt ::= */
+ 424, /* (353) ignore_opt ::= IGNORE UNTREATED */
337, /* (354) cmd ::= KILL CONNECTION NK_INTEGER */
337, /* (355) cmd ::= KILL QUERY NK_STRING */
337, /* (356) cmd ::= KILL TRANSACTION NK_INTEGER */
@@ -3516,13 +3520,13 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
337, /* (359) cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */
337, /* (360) cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */
337, /* (361) cmd ::= SPLIT VGROUP NK_INTEGER */
- 425, /* (362) dnode_list ::= DNODE NK_INTEGER */
- 425, /* (363) dnode_list ::= dnode_list DNODE NK_INTEGER */
+ 426, /* (362) dnode_list ::= DNODE NK_INTEGER */
+ 426, /* (363) dnode_list ::= dnode_list DNODE NK_INTEGER */
337, /* (364) cmd ::= DELETE FROM full_table_name where_clause_opt */
337, /* (365) cmd ::= query_or_subquery */
337, /* (366) cmd ::= insert_query */
- 413, /* (367) insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */
- 413, /* (368) insert_query ::= INSERT INTO full_table_name query_or_subquery */
+ 414, /* (367) insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */
+ 414, /* (368) insert_query ::= INSERT INTO full_table_name query_or_subquery */
340, /* (369) literal ::= NK_INTEGER */
340, /* (370) literal ::= NK_FLOAT */
340, /* (371) literal ::= NK_STRING */
@@ -3556,26 +3560,26 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
400, /* (399) column_alias ::= NK_ID */
342, /* (400) user_name ::= NK_ID */
351, /* (401) topic_name ::= NK_ID */
- 418, /* (402) stream_name ::= NK_ID */
- 410, /* (403) cgroup_name ::= NK_ID */
+ 419, /* (402) stream_name ::= NK_ID */
+ 411, /* (403) cgroup_name ::= NK_ID */
403, /* (404) index_name ::= NK_ID */
430, /* (405) expr_or_subquery ::= expression */
- 424, /* (406) expression ::= literal */
- 424, /* (407) expression ::= pseudo_column */
- 424, /* (408) expression ::= column_reference */
- 424, /* (409) expression ::= function_expression */
- 424, /* (410) expression ::= case_when_expression */
- 424, /* (411) expression ::= NK_LP expression NK_RP */
- 424, /* (412) expression ::= NK_PLUS expr_or_subquery */
- 424, /* (413) expression ::= NK_MINUS expr_or_subquery */
- 424, /* (414) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */
- 424, /* (415) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */
- 424, /* (416) expression ::= expr_or_subquery NK_STAR expr_or_subquery */
- 424, /* (417) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */
- 424, /* (418) expression ::= expr_or_subquery NK_REM expr_or_subquery */
- 424, /* (419) expression ::= column_reference NK_ARROW NK_STRING */
- 424, /* (420) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */
- 424, /* (421) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */
+ 425, /* (406) expression ::= literal */
+ 425, /* (407) expression ::= pseudo_column */
+ 425, /* (408) expression ::= column_reference */
+ 425, /* (409) expression ::= function_expression */
+ 425, /* (410) expression ::= case_when_expression */
+ 425, /* (411) expression ::= NK_LP expression NK_RP */
+ 425, /* (412) expression ::= NK_PLUS expr_or_subquery */
+ 425, /* (413) expression ::= NK_MINUS expr_or_subquery */
+ 425, /* (414) expression ::= expr_or_subquery NK_PLUS expr_or_subquery */
+ 425, /* (415) expression ::= expr_or_subquery NK_MINUS expr_or_subquery */
+ 425, /* (416) expression ::= expr_or_subquery NK_STAR expr_or_subquery */
+ 425, /* (417) expression ::= expr_or_subquery NK_SLASH expr_or_subquery */
+ 425, /* (418) expression ::= expr_or_subquery NK_REM expr_or_subquery */
+ 425, /* (419) expression ::= column_reference NK_ARROW NK_STRING */
+ 425, /* (420) expression ::= expr_or_subquery NK_BITAND expr_or_subquery */
+ 425, /* (421) expression ::= expr_or_subquery NK_BITOR expr_or_subquery */
383, /* (422) expression_list ::= expr_or_subquery */
383, /* (423) expression_list ::= expression_list NK_COMMA expr_or_subquery */
432, /* (424) column_reference ::= column_name */
@@ -3681,8 +3685,8 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
469, /* (524) select_item ::= common_expression column_alias */
469, /* (525) select_item ::= common_expression AS column_alias */
469, /* (526) select_item ::= table_name NK_DOT NK_STAR */
- 426, /* (527) where_clause_opt ::= */
- 426, /* (528) where_clause_opt ::= WHERE search_condition */
+ 410, /* (527) where_clause_opt ::= */
+ 410, /* (528) where_clause_opt ::= WHERE search_condition */
462, /* (529) partition_by_clause_opt ::= */
462, /* (530) partition_by_clause_opt ::= PARTITION BY partition_list */
470, /* (531) partition_list ::= partition_item */
@@ -3716,39 +3720,40 @@ static const YYCODETYPE yyRuleInfoLhs[] = {
468, /* (559) having_clause_opt ::= HAVING search_condition */
463, /* (560) range_opt ::= */
463, /* (561) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */
- 464, /* (562) every_opt ::= */
- 464, /* (563) every_opt ::= EVERY NK_LP duration_literal NK_RP */
- 474, /* (564) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */
- 475, /* (565) query_simple ::= query_specification */
- 475, /* (566) query_simple ::= union_query_expression */
- 479, /* (567) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */
- 479, /* (568) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */
- 480, /* (569) query_simple_or_subquery ::= query_simple */
- 480, /* (570) query_simple_or_subquery ::= subquery */
- 409, /* (571) query_or_subquery ::= query_expression */
- 409, /* (572) query_or_subquery ::= subquery */
- 476, /* (573) order_by_clause_opt ::= */
- 476, /* (574) order_by_clause_opt ::= ORDER BY sort_specification_list */
- 477, /* (575) slimit_clause_opt ::= */
- 477, /* (576) slimit_clause_opt ::= SLIMIT NK_INTEGER */
- 477, /* (577) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- 477, /* (578) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- 478, /* (579) limit_clause_opt ::= */
- 478, /* (580) limit_clause_opt ::= LIMIT NK_INTEGER */
- 478, /* (581) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
- 478, /* (582) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- 456, /* (583) subquery ::= NK_LP query_expression NK_RP */
- 456, /* (584) subquery ::= NK_LP subquery NK_RP */
- 352, /* (585) search_condition ::= common_expression */
- 481, /* (586) sort_specification_list ::= sort_specification */
- 481, /* (587) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
- 482, /* (588) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */
- 483, /* (589) ordering_specification_opt ::= */
- 483, /* (590) ordering_specification_opt ::= ASC */
- 483, /* (591) ordering_specification_opt ::= DESC */
- 484, /* (592) null_ordering_opt ::= */
- 484, /* (593) null_ordering_opt ::= NULLS FIRST */
- 484, /* (594) null_ordering_opt ::= NULLS LAST */
+ 463, /* (562) range_opt ::= RANGE NK_LP expr_or_subquery NK_RP */
+ 464, /* (563) every_opt ::= */
+ 464, /* (564) every_opt ::= EVERY NK_LP duration_literal NK_RP */
+ 474, /* (565) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ 475, /* (566) query_simple ::= query_specification */
+ 475, /* (567) query_simple ::= union_query_expression */
+ 479, /* (568) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */
+ 479, /* (569) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */
+ 480, /* (570) query_simple_or_subquery ::= query_simple */
+ 480, /* (571) query_simple_or_subquery ::= subquery */
+ 409, /* (572) query_or_subquery ::= query_expression */
+ 409, /* (573) query_or_subquery ::= subquery */
+ 476, /* (574) order_by_clause_opt ::= */
+ 476, /* (575) order_by_clause_opt ::= ORDER BY sort_specification_list */
+ 477, /* (576) slimit_clause_opt ::= */
+ 477, /* (577) slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ 477, /* (578) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ 477, /* (579) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ 478, /* (580) limit_clause_opt ::= */
+ 478, /* (581) limit_clause_opt ::= LIMIT NK_INTEGER */
+ 478, /* (582) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
+ 478, /* (583) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ 456, /* (584) subquery ::= NK_LP query_expression NK_RP */
+ 456, /* (585) subquery ::= NK_LP subquery NK_RP */
+ 352, /* (586) search_condition ::= common_expression */
+ 481, /* (587) sort_specification_list ::= sort_specification */
+ 481, /* (588) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
+ 482, /* (589) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */
+ 483, /* (590) ordering_specification_opt ::= */
+ 483, /* (591) ordering_specification_opt ::= ASC */
+ 483, /* (592) ordering_specification_opt ::= DESC */
+ 484, /* (593) null_ordering_opt ::= */
+ 484, /* (594) null_ordering_opt ::= NULLS FIRST */
+ 484, /* (595) null_ordering_opt ::= NULLS LAST */
};
/* For rule J, yyRuleInfoNRhs[J] contains the negative of the number
@@ -4062,8 +4067,8 @@ static const signed char yyRuleInfoNRhs[] = {
-6, /* (305) cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */
-7, /* (306) cmd ::= CREATE TOPIC not_exists_opt topic_name AS DATABASE db_name */
-9, /* (307) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
- -7, /* (308) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
- -9, /* (309) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
+ -8, /* (308) cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name where_clause_opt */
+ -10, /* (309) cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name where_clause_opt */
-4, /* (310) cmd ::= DROP TOPIC exists_opt topic_name */
-7, /* (311) cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */
-2, /* (312) cmd ::= DESC full_table_name */
@@ -4316,39 +4321,40 @@ static const signed char yyRuleInfoNRhs[] = {
-2, /* (559) having_clause_opt ::= HAVING search_condition */
0, /* (560) range_opt ::= */
-6, /* (561) range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */
- 0, /* (562) every_opt ::= */
- -4, /* (563) every_opt ::= EVERY NK_LP duration_literal NK_RP */
- -4, /* (564) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */
- -1, /* (565) query_simple ::= query_specification */
- -1, /* (566) query_simple ::= union_query_expression */
- -4, /* (567) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */
- -3, /* (568) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */
- -1, /* (569) query_simple_or_subquery ::= query_simple */
- -1, /* (570) query_simple_or_subquery ::= subquery */
- -1, /* (571) query_or_subquery ::= query_expression */
- -1, /* (572) query_or_subquery ::= subquery */
- 0, /* (573) order_by_clause_opt ::= */
- -3, /* (574) order_by_clause_opt ::= ORDER BY sort_specification_list */
- 0, /* (575) slimit_clause_opt ::= */
- -2, /* (576) slimit_clause_opt ::= SLIMIT NK_INTEGER */
- -4, /* (577) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- -4, /* (578) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- 0, /* (579) limit_clause_opt ::= */
- -2, /* (580) limit_clause_opt ::= LIMIT NK_INTEGER */
- -4, /* (581) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
- -4, /* (582) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- -3, /* (583) subquery ::= NK_LP query_expression NK_RP */
- -3, /* (584) subquery ::= NK_LP subquery NK_RP */
- -1, /* (585) search_condition ::= common_expression */
- -1, /* (586) sort_specification_list ::= sort_specification */
- -3, /* (587) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
- -3, /* (588) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */
- 0, /* (589) ordering_specification_opt ::= */
- -1, /* (590) ordering_specification_opt ::= ASC */
- -1, /* (591) ordering_specification_opt ::= DESC */
- 0, /* (592) null_ordering_opt ::= */
- -2, /* (593) null_ordering_opt ::= NULLS FIRST */
- -2, /* (594) null_ordering_opt ::= NULLS LAST */
+ -4, /* (562) range_opt ::= RANGE NK_LP expr_or_subquery NK_RP */
+ 0, /* (563) every_opt ::= */
+ -4, /* (564) every_opt ::= EVERY NK_LP duration_literal NK_RP */
+ -4, /* (565) query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ -1, /* (566) query_simple ::= query_specification */
+ -1, /* (567) query_simple ::= union_query_expression */
+ -4, /* (568) union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */
+ -3, /* (569) union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */
+ -1, /* (570) query_simple_or_subquery ::= query_simple */
+ -1, /* (571) query_simple_or_subquery ::= subquery */
+ -1, /* (572) query_or_subquery ::= query_expression */
+ -1, /* (573) query_or_subquery ::= subquery */
+ 0, /* (574) order_by_clause_opt ::= */
+ -3, /* (575) order_by_clause_opt ::= ORDER BY sort_specification_list */
+ 0, /* (576) slimit_clause_opt ::= */
+ -2, /* (577) slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ -4, /* (578) slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ -4, /* (579) slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ 0, /* (580) limit_clause_opt ::= */
+ -2, /* (581) limit_clause_opt ::= LIMIT NK_INTEGER */
+ -4, /* (582) limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */
+ -4, /* (583) limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ -3, /* (584) subquery ::= NK_LP query_expression NK_RP */
+ -3, /* (585) subquery ::= NK_LP subquery NK_RP */
+ -1, /* (586) search_condition ::= common_expression */
+ -1, /* (587) sort_specification_list ::= sort_specification */
+ -3, /* (588) sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */
+ -3, /* (589) sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */
+ 0, /* (590) ordering_specification_opt ::= */
+ -1, /* (591) ordering_specification_opt ::= ASC */
+ -1, /* (592) ordering_specification_opt ::= DESC */
+ 0, /* (593) null_ordering_opt ::= */
+ -2, /* (594) null_ordering_opt ::= NULLS FIRST */
+ -2, /* (595) null_ordering_opt ::= NULLS LAST */
};
static void yy_accept(yyParser*); /* Forward Declaration */
@@ -4564,9 +4570,9 @@ static YYACTIONTYPE yy_reduce(
case 544: /* fill_opt ::= */ yytestcase(yyruleno==544);
case 558: /* having_clause_opt ::= */ yytestcase(yyruleno==558);
case 560: /* range_opt ::= */ yytestcase(yyruleno==560);
- case 562: /* every_opt ::= */ yytestcase(yyruleno==562);
- case 575: /* slimit_clause_opt ::= */ yytestcase(yyruleno==575);
- case 579: /* limit_clause_opt ::= */ yytestcase(yyruleno==579);
+ case 563: /* every_opt ::= */ yytestcase(yyruleno==563);
+ case 576: /* slimit_clause_opt ::= */ yytestcase(yyruleno==576);
+ case 580: /* limit_clause_opt ::= */ yytestcase(yyruleno==580);
{ yymsp[1].minor.yy242 = NULL; }
break;
case 45: /* with_opt ::= WITH search_condition */
@@ -4941,7 +4947,7 @@ static YYACTIONTYPE yy_reduce(
case 465: /* when_then_list ::= when_then_expr */ yytestcase(yyruleno==465);
case 520: /* select_list ::= select_item */ yytestcase(yyruleno==520);
case 531: /* partition_list ::= partition_item */ yytestcase(yyruleno==531);
- case 586: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==586);
+ case 587: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==587);
{ yylhsminor.yy174 = createNodeList(pCxt, yymsp[0].minor.yy242); }
yymsp[0].minor.yy174 = yylhsminor.yy174;
break;
@@ -4956,7 +4962,7 @@ static YYACTIONTYPE yy_reduce(
case 460: /* other_para_list ::= other_para_list NK_COMMA star_func_para */ yytestcase(yyruleno==460);
case 521: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==521);
case 532: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==532);
- case 587: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==587);
+ case 588: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==588);
{ yylhsminor.yy174 = addNodeToList(pCxt, yymsp[-2].minor.yy174, yymsp[0].minor.yy242); }
yymsp[-2].minor.yy174 = yylhsminor.yy174;
break;
@@ -5065,7 +5071,7 @@ static YYACTIONTYPE yy_reduce(
case 338: /* tag_def_or_ref_opt ::= */ yytestcase(yyruleno==338);
case 529: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==529);
case 554: /* group_by_clause_opt ::= */ yytestcase(yyruleno==554);
- case 573: /* order_by_clause_opt ::= */ yytestcase(yyruleno==573);
+ case 574: /* order_by_clause_opt ::= */ yytestcase(yyruleno==574);
{ yymsp[1].minor.yy174 = NULL; }
break;
case 176: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */
@@ -5437,11 +5443,11 @@ static YYACTIONTYPE yy_reduce(
case 307: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS DATABASE db_name */
{ pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-6].minor.yy777, &yymsp[-5].minor.yy669, &yymsp[0].minor.yy669, true); }
break;
- case 308: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-4].minor.yy777, &yymsp[-3].minor.yy669, yymsp[0].minor.yy242, false); }
+ case 308: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS STABLE full_table_name where_clause_opt */
+{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-5].minor.yy777, &yymsp[-4].minor.yy669, yymsp[-1].minor.yy242, false, yymsp[0].minor.yy242); }
break;
- case 309: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name */
-{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-6].minor.yy777, &yymsp[-5].minor.yy669, yymsp[0].minor.yy242, true); }
+ case 309: /* cmd ::= CREATE TOPIC not_exists_opt topic_name WITH META AS STABLE full_table_name where_clause_opt */
+{ pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-7].minor.yy777, &yymsp[-6].minor.yy669, yymsp[-1].minor.yy242, true, yymsp[0].minor.yy242); }
break;
case 310: /* cmd ::= DROP TOPIC exists_opt topic_name */
{ pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy777, &yymsp[0].minor.yy669); }
@@ -5526,7 +5532,7 @@ static YYACTIONTYPE yy_reduce(
break;
case 351: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */
case 543: /* sliding_opt ::= SLIDING NK_LP duration_literal NK_RP */ yytestcase(yyruleno==543);
- case 563: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==563);
+ case 564: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==564);
{ yymsp[-3].minor.yy242 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy242); }
break;
case 354: /* cmd ::= KILL CONNECTION NK_INTEGER */
@@ -5602,10 +5608,10 @@ static YYACTIONTYPE yy_reduce(
case 502: /* table_reference ::= table_primary */ yytestcase(yyruleno==502);
case 503: /* table_reference ::= joined_table */ yytestcase(yyruleno==503);
case 507: /* table_primary ::= parenthesized_joined_table */ yytestcase(yyruleno==507);
- case 565: /* query_simple ::= query_specification */ yytestcase(yyruleno==565);
- case 566: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==566);
- case 569: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==569);
- case 571: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==571);
+ case 566: /* query_simple ::= query_specification */ yytestcase(yyruleno==566);
+ case 567: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==567);
+ case 570: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==570);
+ case 572: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==572);
{ yylhsminor.yy242 = yymsp[0].minor.yy242; }
yymsp[0].minor.yy242 = yylhsminor.yy242;
break;
@@ -5667,9 +5673,9 @@ static YYACTIONTYPE yy_reduce(
case 461: /* star_func_para ::= expr_or_subquery */ yytestcase(yyruleno==461);
case 523: /* select_item ::= common_expression */ yytestcase(yyruleno==523);
case 533: /* partition_item ::= expr_or_subquery */ yytestcase(yyruleno==533);
- case 570: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==570);
- case 572: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==572);
- case 585: /* search_condition ::= common_expression */ yytestcase(yyruleno==585);
+ case 571: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==571);
+ case 573: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==573);
+ case 586: /* search_condition ::= common_expression */ yytestcase(yyruleno==586);
{ yylhsminor.yy242 = releaseRawExprNode(pCxt, yymsp[0].minor.yy242); }
yymsp[0].minor.yy242 = yylhsminor.yy242;
break;
@@ -5683,7 +5689,7 @@ static YYACTIONTYPE yy_reduce(
break;
case 411: /* expression ::= NK_LP expression NK_RP */
case 495: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==495);
- case 584: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==584);
+ case 585: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==585);
{ yylhsminor.yy242 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy242)); }
yymsp[-2].minor.yy242 = yylhsminor.yy242;
break;
@@ -6000,7 +6006,7 @@ static YYACTIONTYPE yy_reduce(
break;
case 530: /* partition_by_clause_opt ::= PARTITION BY partition_list */
case 555: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==555);
- case 574: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==574);
+ case 575: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==575);
{ yymsp[-2].minor.yy174 = yymsp[0].minor.yy174; }
break;
case 537: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA duration_literal NK_RP */
@@ -6056,7 +6062,10 @@ static YYACTIONTYPE yy_reduce(
case 561: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */
{ yymsp[-5].minor.yy242 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy242), releaseRawExprNode(pCxt, yymsp[-1].minor.yy242)); }
break;
- case 564: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */
+ case 562: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_RP */
+{ yymsp[-3].minor.yy242 = createInterpTimePoint(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy242)); }
+ break;
+ case 565: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */
{
yylhsminor.yy242 = addOrderByClause(pCxt, yymsp[-3].minor.yy242, yymsp[-2].minor.yy174);
yylhsminor.yy242 = addSlimitClause(pCxt, yylhsminor.yy242, yymsp[-1].minor.yy242);
@@ -6064,50 +6073,50 @@ static YYACTIONTYPE yy_reduce(
}
yymsp[-3].minor.yy242 = yylhsminor.yy242;
break;
- case 567: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */
+ case 568: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */
{ yylhsminor.yy242 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy242, yymsp[0].minor.yy242); }
yymsp[-3].minor.yy242 = yylhsminor.yy242;
break;
- case 568: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */
+ case 569: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */
{ yylhsminor.yy242 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy242, yymsp[0].minor.yy242); }
yymsp[-2].minor.yy242 = yylhsminor.yy242;
break;
- case 576: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
- case 580: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==580);
+ case 577: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */
+ case 581: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==581);
{ yymsp[-1].minor.yy242 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); }
break;
- case 577: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
- case 581: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==581);
+ case 578: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */
+ case 582: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==582);
{ yymsp[-3].minor.yy242 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); }
break;
- case 578: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
- case 582: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==582);
+ case 579: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */
+ case 583: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==583);
{ yymsp[-3].minor.yy242 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); }
break;
- case 583: /* subquery ::= NK_LP query_expression NK_RP */
+ case 584: /* subquery ::= NK_LP query_expression NK_RP */
{ yylhsminor.yy242 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy242); }
yymsp[-2].minor.yy242 = yylhsminor.yy242;
break;
- case 588: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */
+ case 589: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */
{ yylhsminor.yy242 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy242), yymsp[-1].minor.yy48, yymsp[0].minor.yy687); }
yymsp[-2].minor.yy242 = yylhsminor.yy242;
break;
- case 589: /* ordering_specification_opt ::= */
+ case 590: /* ordering_specification_opt ::= */
{ yymsp[1].minor.yy48 = ORDER_ASC; }
break;
- case 590: /* ordering_specification_opt ::= ASC */
+ case 591: /* ordering_specification_opt ::= ASC */
{ yymsp[0].minor.yy48 = ORDER_ASC; }
break;
- case 591: /* ordering_specification_opt ::= DESC */
+ case 592: /* ordering_specification_opt ::= DESC */
{ yymsp[0].minor.yy48 = ORDER_DESC; }
break;
- case 592: /* null_ordering_opt ::= */
+ case 593: /* null_ordering_opt ::= */
{ yymsp[1].minor.yy687 = NULL_ORDER_DEFAULT; }
break;
- case 593: /* null_ordering_opt ::= NULLS FIRST */
+ case 594: /* null_ordering_opt ::= NULLS FIRST */
{ yymsp[-1].minor.yy687 = NULL_ORDER_FIRST; }
break;
- case 594: /* null_ordering_opt ::= NULLS LAST */
+ case 595: /* null_ordering_opt ::= NULLS LAST */
{ yymsp[-1].minor.yy687 = NULL_ORDER_LAST; }
break;
default:
diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp
index 6a08193a39a1e1da7f321ae111445e5d714ac1ac..a4e8bdd87a6072a20722e0c858775261f8c593b0 100644
--- a/source/libs/parser/test/parInitialCTest.cpp
+++ b/source/libs/parser/test/parInitialCTest.cpp
@@ -1145,6 +1145,15 @@ TEST_F(ParserInitialCTest, createTopic) {
setCreateTopicReq("tp1", 1, "create topic if not exists tp1 with meta as stable st1", nullptr, "test", "st1", 1);
run("CREATE TOPIC IF NOT EXISTS tp1 WITH META AS STABLE st1");
clearCreateTopicReq();
+
+ setCreateTopicReq("tp1", 1, "create topic if not exists tp1 as stable st1 where tag1 > 0", nullptr, "test", "st1");
+ run("CREATE TOPIC IF NOT EXISTS tp1 AS STABLE st1 WHERE tag1 > 0");
+ clearCreateTopicReq();
+
+ setCreateTopicReq("tp1", 1, "create topic if not exists tp1 with meta as stable st1 where tag1 > 0", nullptr, "test", "st1", 1);
+ run("CREATE TOPIC IF NOT EXISTS tp1 WITH META AS STABLE st1 WHERE tag1 > 0");
+ clearCreateTopicReq();
+
}
/*
diff --git a/source/libs/parser/test/parSelectTest.cpp b/source/libs/parser/test/parSelectTest.cpp
index 2d8ce55b72152a474f142634875f2c3e8201729b..68ded3afddd627c37091e30af551a1a826c27bc3 100644
--- a/source/libs/parser/test/parSelectTest.cpp
+++ b/source/libs/parser/test/parSelectTest.cpp
@@ -117,6 +117,15 @@ TEST_F(ParserSelectTest, timelineFunc) {
run("SELECT LAST(*), FIRST(*) FROM t1 INTERVAL(10s)");
run("SELECT diff(c1) FROM t1");
+
+ run("select diff(ts) from (select _wstart as ts, count(*) from st1 partition by tbname interval(1d))", TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
+
+ run("select diff(ts) from (select _wstart as ts, count(*) from st1 partition by tbname interval(1d) order by ts)");
+
+ run("select t1.* from st1s1 t1, (select _wstart as ts, count(*) from st1s2 partition by tbname interval(1d)) WHERE t1.ts = t2.ts", TSDB_CODE_PAR_NOT_SUPPORT_JOIN);
+
+ run("select t1.* from st1s1 t1, (select _wstart as ts, count(*) from st1s2 partition by tbname interval(1d) order by ts) t2 WHERE t1.ts = t2.ts");
+
}
TEST_F(ParserSelectTest, selectFunc) {
@@ -325,6 +334,10 @@ TEST_F(ParserSelectTest, subquery) {
run("SELECT SUM(a) FROM (SELECT MAX(c1) a, _wstart FROM st1s1 PARTITION BY TBNAME INTERVAL(1m) ORDER BY _WSTART) "
"INTERVAL(1n)");
+ run("SELECT diff(a) FROM (SELECT _wstart, tag1, tag2, MAX(c1) a FROM st1 PARTITION BY tag1 INTERVAL(1m)) PARTITION BY tag1");
+
+ run("SELECT diff(a) FROM (SELECT _wstart, tag1, tag2, MAX(c1) a FROM st1 PARTITION BY tag1 INTERVAL(1m)) PARTITION BY tag2", TSDB_CODE_PAR_NOT_ALLOWED_FUNC);
+
run("SELECT _C0 FROM (SELECT _ROWTS, ts FROM st1s1)");
run("SELECT ts FROM (SELECT t1.ts FROM st1s1 t1)");
@@ -472,6 +485,8 @@ TEST_F(ParserSelectTest, joinSemanticCheck) {
run("SELECT * FROM (SELECT tag1, SUM(c1) s FROM st1 GROUP BY tag1) t1, st1 t2 where t1.tag1 = t2.tag1",
TSDB_CODE_PAR_NOT_SUPPORT_JOIN);
+
+ run("SELECT count(*) FROM t1 a join t1 b on a.ts=b.ts where a.ts=b.ts");
}
} // namespace ParserTest
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 66b478004f9dfbc3040047b7ea5d36a37bb95b95..5bbc9acdadfe671d54cbe8ce534797ed580146ed 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -37,19 +37,24 @@ typedef struct SRewriteExprCxt {
int32_t errCode;
SNodeList* pExprs;
bool* pOutputs;
+ bool isPartitionBy;
} SRewriteExprCxt;
-static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol) {
+static void setColumnInfo(SFunctionNode* pFunc, SColumnNode* pCol, bool isPartitionBy) {
switch (pFunc->funcType) {
case FUNCTION_TYPE_TBNAME:
pCol->colType = COLUMN_TYPE_TBNAME;
break;
case FUNCTION_TYPE_WSTART:
- pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ if (!isPartitionBy) {
+ pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ }
pCol->colType = COLUMN_TYPE_WINDOW_START;
break;
case FUNCTION_TYPE_WEND:
- pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ if (!isPartitionBy) {
+ pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID;
+ }
pCol->colType = COLUMN_TYPE_WINDOW_END;
break;
case FUNCTION_TYPE_WDURATION:
@@ -100,9 +105,10 @@ static EDealRes doRewriteExpr(SNode** pNode, void* pContext) {
SExprNode* pToBeRewrittenExpr = (SExprNode*)(*pNode);
pCol->node.resType = pToBeRewrittenExpr->resType;
strcpy(pCol->node.aliasName, pToBeRewrittenExpr->aliasName);
+ strcpy(pCol->node.userAlias, ((SExprNode*)pExpr)->userAlias);
strcpy(pCol->colName, ((SExprNode*)pExpr)->aliasName);
if (QUERY_NODE_FUNCTION == nodeType(pExpr)) {
- setColumnInfo((SFunctionNode*)pExpr, pCol);
+ setColumnInfo((SFunctionNode*)pExpr, pCol, pCxt->isPartitionBy);
}
nodesDestroyNode(*pNode);
*pNode = (SNode*)pCol;
@@ -141,7 +147,8 @@ static EDealRes doNameExpr(SNode* pNode, void* pContext) {
static int32_t rewriteExprForSelect(SNode* pExpr, SSelectStmt* pSelect, ESqlClause clause) {
nodesWalkExpr(pExpr, doNameExpr, NULL);
- SRewriteExprCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pExprs = NULL, .pOutputs = NULL};
+ bool isPartitionBy = (pSelect->pPartitionByList && pSelect->pPartitionByList->length > 0) ? true : false;
+ SRewriteExprCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pExprs = NULL, .pOutputs = NULL, .isPartitionBy = isPartitionBy};
cxt.errCode = nodesListMakeAppend(&cxt.pExprs, pExpr);
if (TSDB_CODE_SUCCESS == cxt.errCode) {
nodesRewriteSelectStmt(pSelect, clause, doRewriteExpr, &cxt);
@@ -169,7 +176,8 @@ static int32_t cloneRewriteExprs(SNodeList* pExprs, bool* pOutputs, SNodeList**
static int32_t rewriteExprsForSelect(SNodeList* pExprs, SSelectStmt* pSelect, ESqlClause clause,
SNodeList** pRewriteExprs) {
nodesWalkExprs(pExprs, doNameExpr, NULL);
- SRewriteExprCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pExprs = pExprs, .pOutputs = NULL};
+ bool isPartitionBy = (pSelect->pPartitionByList && pSelect->pPartitionByList->length > 0) ? true : false;
+ SRewriteExprCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pExprs = pExprs, .pOutputs = NULL, .isPartitionBy = isPartitionBy};
if (NULL != pRewriteExprs) {
cxt.pOutputs = taosMemoryCalloc(LIST_LENGTH(pExprs), sizeof(bool));
if (NULL == cxt.pOutputs) {
@@ -186,14 +194,14 @@ static int32_t rewriteExprsForSelect(SNodeList* pExprs, SSelectStmt* pSelect, ES
static int32_t rewriteExpr(SNodeList* pExprs, SNode** pTarget) {
nodesWalkExprs(pExprs, doNameExpr, NULL);
- SRewriteExprCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pExprs = pExprs, .pOutputs = NULL};
+ SRewriteExprCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pExprs = pExprs, .pOutputs = NULL, .isPartitionBy = false};
nodesRewriteExpr(pTarget, doRewriteExpr, &cxt);
return cxt.errCode;
}
static int32_t rewriteExprs(SNodeList* pExprs, SNodeList* pTarget) {
nodesWalkExprs(pExprs, doNameExpr, NULL);
- SRewriteExprCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pExprs = pExprs, .pOutputs = NULL};
+ SRewriteExprCxt cxt = {.errCode = TSDB_CODE_SUCCESS, .pExprs = pExprs, .pOutputs = NULL, .isPartitionBy = false};
nodesRewriteExprs(pTarget, doRewriteExpr, &cxt);
return cxt.errCode;
}
@@ -543,11 +551,16 @@ static SNode* createGroupingSetNode(SNode* pExpr) {
return (SNode*)pGroupingSet;
}
-static EGroupAction getGroupAction(SLogicPlanContext* pCxt, SSelectStmt* pSelect) {
+static EGroupAction getDistinctGroupAction(SLogicPlanContext* pCxt, SSelectStmt* pSelect) {
return (pCxt->pPlanCxt->streamQuery || NULL != pSelect->pLimit || NULL != pSelect->pSlimit) ? GROUP_ACTION_KEEP
: GROUP_ACTION_NONE;
}
+static EGroupAction getGroupAction(SLogicPlanContext* pCxt, SSelectStmt* pSelect) {
+ return ((pCxt->pPlanCxt->streamQuery || NULL != pSelect->pLimit || NULL != pSelect->pSlimit) && !pSelect->isDistinct) ? GROUP_ACTION_KEEP
+ : GROUP_ACTION_NONE;
+}
+
static EDataOrderLevel getRequireDataOrder(bool needTimeline, SSelectStmt* pSelect) {
return needTimeline ? (NULL != pSelect->pPartitionByList ? DATA_ORDER_LEVEL_IN_GROUP : DATA_ORDER_LEVEL_GLOBAL)
: DATA_ORDER_LEVEL_NONE;
@@ -1166,7 +1179,7 @@ static int32_t createDistinctLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSe
return TSDB_CODE_OUT_OF_MEMORY;
}
- pAgg->node.groupAction = GROUP_ACTION_CLEAR;
+ pAgg->node.groupAction = GROUP_ACTION_CLEAR;//getDistinctGroupAction(pCxt, pSelect);
pAgg->node.requireDataOrder = DATA_ORDER_LEVEL_NONE;
pAgg->node.resultDataOrder = DATA_ORDER_LEVEL_NONE;
diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c
index 59b7c417a060245900e683815d9e8c402affaf0a..8b75fe6b33b4821bd813c73a92507e7bda3e05f2 100644
--- a/source/libs/planner/src/planOptimizer.c
+++ b/source/libs/planner/src/planOptimizer.c
@@ -740,6 +740,85 @@ static int32_t pushDownCondOptJoinExtractMergeCond(SOptimizeContext* pCxt, SJoin
return code;
}
+static bool pushDownCondOptIsTableColumn(SNode* pNode, SNodeList* pTableCols) {
+ if (QUERY_NODE_COLUMN != nodeType(pNode)) {
+ return false;
+ }
+ SColumnNode* pCol = (SColumnNode*)pNode;
+ return pushDownCondOptBelongThisTable(pNode, pTableCols);
+}
+
+static bool pushDownCondOptIsColEqualOnCond(SJoinLogicNode* pJoin, SNode* pCond) {
+ if (QUERY_NODE_OPERATOR != nodeType(pCond)) {
+ return false;
+ }
+ SOperatorNode* pOper = (SOperatorNode*)pCond;
+ if (OP_TYPE_EQUAL != pOper->opType) {
+ return false;
+ }
+ if (QUERY_NODE_COLUMN != nodeType(pOper->pLeft) || QUERY_NODE_COLUMN != nodeType(pOper->pRight)) {
+ return false;
+ }
+ SColumnNode* pLeft = (SColumnNode*)(pOper->pLeft);
+ SColumnNode* pRight = (SColumnNode*)(pOper->pRight);
+ //TODO: add cast to operator and remove this restriction of optimization
+ if (pLeft->node.resType.type != pRight->node.resType.type || pLeft->node.resType.bytes != pRight->node.resType.bytes) {
+ return false;
+ }
+ SNodeList* pLeftCols = ((SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 0))->pTargets;
+ SNodeList* pRightCols = ((SLogicNode*)nodesListGetNode(pJoin->node.pChildren, 1))->pTargets;
+ if (pushDownCondOptIsTableColumn(pOper->pLeft, pLeftCols)) {
+ return pushDownCondOptIsTableColumn(pOper->pRight, pRightCols);
+ } else if (pushDownCondOptIsTableColumn(pOper->pLeft, pRightCols)) {
+ return pushDownCondOptIsTableColumn(pOper->pRight, pLeftCols);
+ }
+ return false;
+}
+
+static int32_t pushDownCondOptJoinExtractColEqualOnLogicCond(SJoinLogicNode* pJoin) {
+ SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(pJoin->pOnConditions);
+
+ int32_t code = TSDB_CODE_SUCCESS;
+ SNodeList* pEqualOnConds = NULL;
+ SNode* pCond = NULL;
+ FOREACH(pCond, pLogicCond->pParameterList) {
+ if (pushDownCondOptIsColEqualOnCond(pJoin, pCond)) {
+ code = nodesListMakeAppend(&pEqualOnConds, nodesCloneNode(pCond));
+ }
+ }
+
+ SNode* pTempTagEqCond = NULL;
+ if (TSDB_CODE_SUCCESS == code) {
+ code = nodesMergeConds(&pTempTagEqCond, &pEqualOnConds);
+ }
+
+ if (TSDB_CODE_SUCCESS == code) {
+ pJoin->pColEqualOnConditions = pTempTagEqCond;
+ return TSDB_CODE_SUCCESS;
+ } else {
+ nodesDestroyList(pEqualOnConds);
+ return TSDB_CODE_PLAN_INTERNAL_ERROR;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t pushDownCondOptJoinExtractColEqualOnCond(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
+ if (NULL == pJoin->pOnConditions) {
+ pJoin->pColEqualOnConditions = NULL;
+ return TSDB_CODE_SUCCESS;
+ }
+ if (QUERY_NODE_LOGIC_CONDITION == nodeType(pJoin->pOnConditions) &&
+ LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)(pJoin->pOnConditions))->condType) {
+ return pushDownCondOptJoinExtractColEqualOnLogicCond(pJoin);
+ }
+
+ if (pushDownCondOptIsColEqualOnCond(pJoin, pJoin->pOnConditions)) {
+ pJoin->pColEqualOnConditions = nodesCloneNode(pJoin->pOnConditions);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* pJoin) {
if (OPTIMIZE_FLAG_TEST_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE)) {
return TSDB_CODE_SUCCESS;
@@ -774,6 +853,10 @@ static int32_t pushDownCondOptDealJoin(SOptimizeContext* pCxt, SJoinLogicNode* p
code = pushDownCondOptJoinExtractMergeCond(pCxt, pJoin);
}
+ if (TSDB_CODE_SUCCESS == code) {
+ code = pushDownCondOptJoinExtractColEqualOnCond(pCxt, pJoin);
+ }
+
if (TSDB_CODE_SUCCESS == code) {
OPTIMIZE_FLAG_SET_MASK(pJoin->node.optimizedFlag, OPTIMIZE_FLAG_PUSH_DOWN_CONDE);
pCxt->optimized = true;
@@ -1259,8 +1342,8 @@ static bool smaIndexOptEqualInterval(SScanLogicNode* pScan, SWindowLogicNode* pW
.sliding = pIndex->sliding,
.slidingUnit = pIndex->slidingUnit,
.precision = pScan->node.precision};
- return (pScan->scanRange.skey == taosTimeTruncate(pScan->scanRange.skey, &interval, pScan->node.precision)) &&
- (pScan->scanRange.ekey + 1 == taosTimeTruncate(pScan->scanRange.ekey + 1, &interval, pScan->node.precision));
+ return (pScan->scanRange.skey == taosTimeTruncate(pScan->scanRange.skey, &interval)) &&
+ (pScan->scanRange.ekey + 1 == taosTimeTruncate(pScan->scanRange.ekey + 1, &interval));
}
return true;
}
@@ -2234,7 +2317,7 @@ static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) {
if (QUERY_NODE_COLUMN == nodeType(pPar)) {
SColumnNode* pCol = (SColumnNode*)pPar;
if (pCol->colType != COLUMN_TYPE_COLUMN) {
- return false;
+ return false;
}
}
if (hasSelectFunc || QUERY_NODE_VALUE == nodeType(nodesListGetNode(pAggFunc->pParameterList, 0))) {
diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c
index be43bb008c7ece11cd2cf1eb9695b2a6a410415d..4f57193856ea5016aad37731fc342e9bd1b5b9f2 100644
--- a/source/libs/planner/src/planPhysiCreater.c
+++ b/source/libs/planner/src/planPhysiCreater.c
@@ -705,6 +705,9 @@ static int32_t createJoinPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChildren
pJoinLogicNode->pOnConditions, &pJoin->pOnConditions);
}
+ if (TSDB_CODE_SUCCESS == code && NULL != pJoinLogicNode->pColEqualOnConditions) {
+ code = setNodeSlotId(pCxt, pLeftDesc->dataBlockId, pRightDesc->dataBlockId, pJoinLogicNode->pColEqualOnConditions, &pJoin->pColEqualOnConditions);
+ }
if (TSDB_CODE_SUCCESS == code) {
code = setConditionsSlotId(pCxt, (const SLogicNode*)pJoinLogicNode, (SPhysiNode*)pJoin);
}
@@ -1147,7 +1150,7 @@ static int32_t createExchangePhysiNode(SPhysiPlanContext* pCxt, SExchangeLogicNo
}
}
-static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWinodwPhysiNode* pWindow,
+static int32_t createWindowPhysiNodeFinalize(SPhysiPlanContext* pCxt, SNodeList* pChildren, SWindowPhysiNode* pWindow,
SWindowLogicNode* pWindowLogicNode) {
pWindow->triggerType = pWindowLogicNode->triggerType;
pWindow->watermark = pWindowLogicNode->watermark;
@@ -1644,6 +1647,9 @@ static int32_t createPhysiNode(SPhysiPlanContext* pCxt, SLogicNode* pLogicNode,
if (TSDB_CODE_SUCCESS == code) {
code = nodesListStrictAppend(pChildren, (SNode*)pChild);
}
+ if (TSDB_CODE_SUCCESS != code) {
+ break;
+ }
}
if (TSDB_CODE_SUCCESS == code) {
diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c
index b3627e1a9609b0425b3bd555f7c8e3317fb12265..fa2ddd9163c8309c3853e5c189dd1f9124c64307 100644
--- a/source/libs/qcom/src/queryUtil.c
+++ b/source/libs/qcom/src/queryUtil.c
@@ -454,6 +454,18 @@ int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) {
return TSDB_CODE_SUCCESS;
}
+void getColumnTypeFromMeta(STableMeta* pMeta, char* pName, ETableColumnType* pType) {
+ int32_t nums = pMeta->tableInfo.numOfTags + pMeta->tableInfo.numOfColumns;
+ for (int32_t i = 0; i < nums; ++i) {
+ if (0 == strcmp(pName, pMeta->schema[i].name)) {
+ *pType = (i < pMeta->tableInfo.numOfColumns) ? TCOL_TYPE_COLUMN : TCOL_TYPE_TAG;
+ return;
+ }
+ }
+
+ *pType = TCOL_TYPE_NONE;
+}
+
void freeVgInfo(SDBVgInfo* vgInfo) {
if (NULL == vgInfo) {
return;
diff --git a/source/libs/qworker/CMakeLists.txt b/source/libs/qworker/CMakeLists.txt
index 92ccde31634eb8051952ff313c86056fba08f075..8ba8b79ab80430395131ad10d7c7912dc17879c2 100644
--- a/source/libs/qworker/CMakeLists.txt
+++ b/source/libs/qworker/CMakeLists.txt
@@ -7,9 +7,15 @@ target_include_directories(
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
-target_link_libraries(qworker
- PRIVATE os util transport nodes planner qcom executor
- )
+IF (TD_GRANT)
+ TARGET_LINK_LIBRARIES(qworker
+ PRIVATE os util transport nodes planner qcom executor index grant
+ )
+ELSE ()
+ TARGET_LINK_LIBRARIES(qworker
+ PRIVATE os util transport nodes planner qcom executor index
+ )
+ENDIF()
if(${BUILD_TEST})
ADD_SUBDIRECTORY(test)
diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c
index 1a3a740b34918c695dfb79848f8df2c3500b0f3a..231e597724f7bb03c4e1a726bb9e59e18660d532 100644
--- a/source/libs/qworker/src/qwMsg.c
+++ b/source/libs/qworker/src/qwMsg.c
@@ -440,11 +440,7 @@ int32_t qWorkerProcessQueryMsg(void *node, void *qWorkerMgmt, SRpcMsg *pMsg, int
int64_t rId = msg.refId;
int32_t eId = msg.execId;
- SQWMsg qwMsg = {.node = node,
- .msg = msg.msg,
- .msgLen = msg.msgLen,
- .connInfo = pMsg->info,
- .msgType = pMsg->msgType};
+ SQWMsg qwMsg = {.node = node, .msg = msg.msg, .msgLen = msg.msgLen, .connInfo = pMsg->info, .msgType = pMsg->msgType};
qwMsg.msgInfo.explain = msg.explain;
qwMsg.msgInfo.taskType = msg.taskType;
qwMsg.msgInfo.needFetch = msg.needFetch;
diff --git a/source/libs/qworker/test/CMakeLists.txt b/source/libs/qworker/test/CMakeLists.txt
index 780f5ae84be12a8b1feea24c25bfa148a1f9de5b..22870ea94dc1df36b8a50ebafef204c1a614d23a 100644
--- a/source/libs/qworker/test/CMakeLists.txt
+++ b/source/libs/qworker/test/CMakeLists.txt
@@ -8,7 +8,7 @@ IF(NOT TD_DARWIN)
ADD_EXECUTABLE(qworkerTest ${SOURCE_LIST})
TARGET_LINK_LIBRARIES(
qworkerTest
- PUBLIC os util common transport gtest qcom nodes planner qworker executor
+ PUBLIC os util common transport gtest qcom nodes planner qworker executor index
)
TARGET_INCLUDE_DIRECTORIES(
diff --git a/source/libs/scalar/CMakeLists.txt b/source/libs/scalar/CMakeLists.txt
index 193a6971e54b52b1c59749c0b41eefe6b9bbb2fb..30c68cb512b47b725caadb454fe3bff008520938 100644
--- a/source/libs/scalar/CMakeLists.txt
+++ b/source/libs/scalar/CMakeLists.txt
@@ -14,6 +14,7 @@ target_link_libraries(scalar
PRIVATE nodes
PRIVATE function
PRIVATE qcom
+ PRIVATE parser
)
if(${BUILD_TEST})
diff --git a/source/libs/scalar/inc/filterInt.h b/source/libs/scalar/inc/filterInt.h
index 2023d387773aa294b6949f35365060dc8661c322..1ca8ac1d8c0e9c86e02cde3f317bf7de5af510a8 100644
--- a/source/libs/scalar/inc/filterInt.h
+++ b/source/libs/scalar/inc/filterInt.h
@@ -227,8 +227,10 @@ typedef struct SFltTreeStat {
SFilterInfo *info;
} SFltTreeStat;
+
typedef struct SFltScalarCtx {
SNode *node;
+ SArray* fltSclRange;
} SFltScalarCtx;
typedef struct SFltBuildGroupCtx {
@@ -237,6 +239,11 @@ typedef struct SFltBuildGroupCtx {
int32_t code;
} SFltBuildGroupCtx;
+typedef struct {
+ SColumnNode *colNode;
+ SArray *points;
+} SFltSclColumnRange;
+
struct SFilterInfo {
bool scalarMode;
SFltScalarCtx sclCtx;
diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c
index 344c3dcca30966d2b40f37c95d78ecae69801f80..bbefcc6b3ae96157ea138a68aef0453a1caf7489 100644
--- a/source/libs/scalar/src/filter.c
+++ b/source/libs/scalar/src/filter.c
@@ -14,6 +14,7 @@
*/
#include
#include "os.h"
+#include "tglobal.h"
#include "thash.h"
// #include "queryLog.h"
#include "filter.h"
@@ -22,6 +23,7 @@
#include "sclInt.h"
#include "tcompare.h"
#include "tdatablock.h"
+#include "tsimplehash.h"
#include "ttime.h"
bool filterRangeCompGi(const void *minv, const void *maxv, const void *minr, const void *maxr, __compar_fn_t cfunc) {
@@ -261,7 +263,7 @@ int8_t filterGetCompFuncIdx(int32_t type, int32_t optr) {
comparFn = 19;
} else if (optr == OP_TYPE_NMATCH) {
comparFn = 20;
- } else if (optr == OP_TYPE_LIKE) { /* wildcard query using like operator */
+ } else if (optr == OP_TYPE_LIKE) { /* wildcard query using like operator */
comparFn = 7;
} else if (optr == OP_TYPE_NOT_LIKE) { /* wildcard query using like operator */
comparFn = 26;
@@ -1636,7 +1638,7 @@ void filterDumpInfoToString(SFilterInfo *info, const char *msg, int32_t options)
SDataType *dType = &var->node.resType;
qDebug("VAL%d => [type:%d][val:%" PRIx64 "]", i, dType->type, var->datum.i); // TODO
} else if (field->data) {
- qDebug("VAL%d => [type:NIL][val:NIL]", i); // TODO
+ qDebug("VAL%d => [type:NIL][val:NIL]", i); // TODO
}
}
@@ -1843,6 +1845,13 @@ void filterFreeInfo(SFilterInfo *info) {
return;
}
+ for (int32_t i = 0; i < taosArrayGetSize(info->sclCtx.fltSclRange); ++i) {
+ SFltSclColumnRange *colRange = taosArrayGet(info->sclCtx.fltSclRange, i);
+ nodesDestroyNode((SNode *)colRange->colNode);
+ taosArrayDestroy(colRange->points);
+ }
+ taosArrayDestroy(info->sclCtx.fltSclRange);
+
taosMemoryFreeClear(info->cunits);
taosMemoryFreeClear(info->blkUnitRes);
taosMemoryFreeClear(info->blkUnits);
@@ -3426,8 +3435,356 @@ _return:
return code;
}
+// compare ranges, null < min < val < max. null=null, min=min, max=max
+typedef enum {
+ FLT_SCL_DATUM_KIND_NULL,
+ FLT_SCL_DATUM_KIND_MIN,
+ FLT_SCL_DATUM_KIND_INT64,
+ FLT_SCL_DATUM_KIND_UINT64,
+ FLT_SCL_DATUM_KIND_FLOAT64,
+ FLT_SCL_DATUM_KIND_VARCHAR,
+ FLT_SCL_DATUM_KIND_NCHAR,
+ FLT_SCL_DATUM_KIND_MAX,
+} SFltSclDatumKind;
+
+typedef struct {
+ SFltSclDatumKind kind;
+ union {
+ int64_t i; // for int and bool (1 true, 0 false) and ts
+ uint64_t u; // for uint
+ double d; // for double
+ uint8_t *pData; // for varchar, nchar, len prefixed
+ };
+ SDataType type; // TODO: original data type, may not be used?
+} SFltSclDatum;
+
+typedef struct {
+ SFltSclDatum val;
+ bool excl;
+ bool start;
+} SFltSclPoint;
+
+int32_t fltSclCompareWithFloat64(SFltSclDatum *val1, SFltSclDatum *val2) {
+ // val2->kind == float64
+ switch (val1->kind) {
+ case FLT_SCL_DATUM_KIND_UINT64:
+ return compareUint64Double(&val1->u, &val2->d);
+ case FLT_SCL_DATUM_KIND_INT64:
+ return compareInt64Double(&val1->i, &val2->d);
+ case FLT_SCL_DATUM_KIND_FLOAT64: {
+ return compareDoubleVal(&val1->d, &val2->d);
+ }
+ // TODO: varchar, nchar
+ default:
+ qError("not supported comparsion. kind1 %d, kind2 %d", val1->kind, val2->kind);
+ return (val1->kind - val2->kind);
+ }
+}
+
+int32_t fltSclCompareWithInt64(SFltSclDatum *val1, SFltSclDatum *val2) {
+ // val2->kind == int64
+ switch (val1->kind) {
+ case FLT_SCL_DATUM_KIND_UINT64:
+ return compareUint64Int64(&val1->u, &val2->i);
+ case FLT_SCL_DATUM_KIND_INT64:
+ return compareInt64Val(&val1->i, &val2->i);
+ case FLT_SCL_DATUM_KIND_FLOAT64: {
+ return compareDoubleInt64(&val1->d, &val2->i);
+ }
+ // TODO: varchar, nchar
+ default:
+ qError("not supported comparsion. kind1 %d, kind2 %d", val1->kind, val2->kind);
+ return (val1->kind - val2->kind);
+ }
+}
+
+int32_t fltSclCompareWithUInt64(SFltSclDatum *val1, SFltSclDatum *val2) {
+ // val2 kind == uint64
+ switch (val1->kind) {
+ case FLT_SCL_DATUM_KIND_UINT64:
+ return compareUint64Val(&val1->u, &val2->u);
+ case FLT_SCL_DATUM_KIND_INT64:
+ return compareInt64Uint64(&val1->i, &val2->u);
+ case FLT_SCL_DATUM_KIND_FLOAT64: {
+ return compareDoubleUint64(&val1->d, &val2->u);
+ }
+ // TODO: varchar, nchar
+ default:
+ qError("not supported comparsion. kind1 %d, kind2 %d", val1->kind, val2->kind);
+ return (val1->kind - val2->kind);
+ }
+}
+
+int32_t fltSclCompareDatum(SFltSclDatum *val1, SFltSclDatum *val2) {
+ if (val2->kind == FLT_SCL_DATUM_KIND_NULL || val2->kind == FLT_SCL_DATUM_KIND_MIN ||
+ val2->kind == FLT_SCL_DATUM_KIND_MAX) {
+ return (val1->kind < val2->kind) ? -1 : ((val1->kind > val2->kind) ? 1 : 0);
+ }
+
+ switch (val2->kind) {
+ case FLT_SCL_DATUM_KIND_UINT64: {
+ return fltSclCompareWithUInt64(val1, val2);
+ }
+ case FLT_SCL_DATUM_KIND_INT64: {
+ return fltSclCompareWithInt64(val1, val2);
+ }
+ case FLT_SCL_DATUM_KIND_FLOAT64: {
+ return fltSclCompareWithFloat64(val1, val2);
+ }
+ // TODO: varchar/nchar
+ default:
+ qError("not supported kind when compare datum. kind2 : %d", val2->kind);
+ return 0;
+ break;
+ }
+ return 0;
+}
+
+bool fltSclLessPoint(SFltSclPoint *pt1, SFltSclPoint *pt2) {
+ // first value compare
+ int32_t cmp = fltSclCompareDatum(&pt1->val, &pt2->val);
+ if (cmp != 0) {
+ return cmp < 0;
+ }
+
+ if (pt1->start && pt2->start) {
+ return !pt1->excl && pt2->excl;
+ } else if (pt1->start) {
+ return !pt1->excl && !pt2->excl;
+ } else if (pt2->start) {
+ return pt1->excl || pt2->excl;
+ }
+ return pt1->excl && !pt2->excl;
+}
+
+int32_t fltSclMergeSort(SArray *pts1, SArray *pts2, SArray *result) {
+ size_t len1 = taosArrayGetSize(pts1);
+ size_t len2 = taosArrayGetSize(pts2);
+ size_t i = 0;
+ size_t j = 0;
+ while (i < len1 && j < len2) {
+ SFltSclPoint *pt1 = taosArrayGet(pts1, i);
+ SFltSclPoint *pt2 = taosArrayGet(pts2, j);
+ bool less = fltSclLessPoint(pt1, pt2);
+ if (less) {
+ taosArrayPush(result, pt1);
+ ++i;
+ } else {
+ taosArrayPush(result, pt2);
+ ++j;
+ }
+ }
+ if (i < len1) {
+ for (; i < len1; ++i) {
+ SFltSclPoint *pt1 = taosArrayGet(pts1, i);
+ taosArrayPush(result, pt1);
+ }
+ }
+ if (j < len2) {
+ for (; j < len2; ++j) {
+ SFltSclPoint *pt2 = taosArrayGet(pts2, j);
+ taosArrayPush(result, pt2);
+ }
+ }
+ return 0;
+}
+
+int32_t fltSclMerge(SArray *pts1, SArray *pts2, bool isUnion, SArray *merged) {
+ size_t len1 = taosArrayGetSize(pts1);
+ size_t len2 = taosArrayGetSize(pts2);
+ // first merge sort pts1 and pts2
+ SArray *all = taosArrayInit(len1 + len2, sizeof(SFltSclPoint));
+ fltSclMergeSort(pts1, pts2, all);
+ int32_t countRequired = (isUnion) ? 1 : 2;
+ int32_t count = 0;
+ for (int32_t i = 0; i < taosArrayGetSize(all); ++i) {
+ SFltSclPoint *pt = taosArrayGet(all, i);
+ if (pt->start) {
+ ++count;
+ if (count == countRequired) {
+ taosArrayPush(merged, pt);
+ }
+ } else {
+ if (count == countRequired) {
+ taosArrayPush(merged, pt);
+ }
+ --count;
+ }
+ }
+ taosArrayDestroy(all);
+ return 0;
+}
+
+int32_t fltSclIntersect(SArray *pts1, SArray *pts2, SArray *merged) { return fltSclMerge(pts1, pts2, false, merged); }
+
+int32_t fltSclUnion(SArray *pts1, SArray *pts2, SArray *merged) { return fltSclMerge(pts1, pts2, true, merged); }
+
+typedef struct {
+ SColumnNode *colNode;
+ SValueNode *valNode;
+ EOperatorType type;
+} SFltSclOperator;
+
+SFltSclColumnRange *fltSclGetOrCreateColumnRange(SColumnNode *colNode, SArray *colRangeList) {
+ for (int32_t i = 0; i < taosArrayGetSize(colRangeList); ++i) {
+ SFltSclColumnRange *colRange = taosArrayGet(colRangeList, i);
+ if (nodesEqualNode((SNode *)colRange->colNode, (SNode *)colNode)) {
+ return colRange;
+ }
+ }
+ SColumnNode *pColumnNode = (SColumnNode *)nodesCloneNode((SNode *)colNode);
+ SFltSclColumnRange newColRange = {.colNode = pColumnNode, .points = taosArrayInit(4, sizeof(SFltSclPoint))};
+ taosArrayPush(colRangeList, &newColRange);
+ return taosArrayGetLast(colRangeList);
+}
+
+int32_t fltSclBuildDatumFromValueNode(SFltSclDatum *datum, SValueNode *valNode) {
+ datum->type = valNode->node.resType;
+
+ if (valNode->isNull) {
+ datum->kind = FLT_SCL_DATUM_KIND_NULL;
+ } else {
+ switch (valNode->node.resType.type) {
+ case TSDB_DATA_TYPE_NULL: {
+ datum->kind = FLT_SCL_DATUM_KIND_NULL;
+ break;
+ }
+ case TSDB_DATA_TYPE_BOOL: {
+ datum->kind = FLT_SCL_DATUM_KIND_INT64;
+ datum->i = (valNode->datum.b) ? 0 : 1;
+ break;
+ }
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_TIMESTAMP: {
+ datum->kind = FLT_SCL_DATUM_KIND_INT64;
+ datum->i = valNode->datum.i;
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ case TSDB_DATA_TYPE_UINT:
+ case TSDB_DATA_TYPE_UBIGINT: {
+ datum->kind = FLT_SCL_DATUM_KIND_UINT64;
+ datum->u = valNode->datum.u;
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE: {
+ datum->kind = FLT_SCL_DATUM_KIND_FLOAT64;
+ datum->d = valNode->datum.d;
+ break;
+ }
+ // TODO:varchar/nchar/json
+ default: {
+ qError("not supported type %d when build datum from value node", valNode->node.resType.type);
+ break;
+ }
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t fltSclBuildDatumFromBlockSmaValue(SFltSclDatum *datum, uint8_t type, int64_t val) {
+ switch (type) {
+ case TSDB_DATA_TYPE_BOOL:
+ case TSDB_DATA_TYPE_TINYINT:
+ case TSDB_DATA_TYPE_SMALLINT:
+ case TSDB_DATA_TYPE_INT:
+ case TSDB_DATA_TYPE_BIGINT:
+ case TSDB_DATA_TYPE_TIMESTAMP: {
+ datum->kind = FLT_SCL_DATUM_KIND_INT64;
+ datum->i = val;
+ break;
+ }
+ case TSDB_DATA_TYPE_UTINYINT:
+ case TSDB_DATA_TYPE_USMALLINT:
+ case TSDB_DATA_TYPE_UINT:
+ case TSDB_DATA_TYPE_UBIGINT: {
+ datum->kind = FLT_SCL_DATUM_KIND_UINT64;
+ datum->u = *(uint64_t *)&val;
+ break;
+ }
+ case TSDB_DATA_TYPE_FLOAT:
+ case TSDB_DATA_TYPE_DOUBLE: {
+ datum->kind = FLT_SCL_DATUM_KIND_FLOAT64;
+ datum->d = *(double *)&val;
+ break;
+ }
+ // TODO:varchar/nchar/json
+ default: {
+ datum->kind = FLT_SCL_DATUM_KIND_NULL;
+ qError("not supported type %d when build datum from block sma value", type);
+ break;
+ }
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+int32_t fltSclBuildRangeFromBlockSma(SFltSclColumnRange *colRange, SColumnDataAgg *pAgg, int32_t numOfRows,
+ SArray *points) {
+ if (pAgg->numOfNull == numOfRows) {
+ SFltSclDatum datum = {.kind = FLT_SCL_DATUM_KIND_NULL};
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = datum};
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = datum};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ return TSDB_CODE_SUCCESS;
+ }
+ if (pAgg->numOfNull > 0) {
+ SFltSclDatum nullDatum = {.kind = FLT_SCL_DATUM_KIND_NULL};
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = nullDatum};
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = nullDatum};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ }
+ SFltSclDatum min;
+ fltSclBuildDatumFromBlockSmaValue(&min, colRange->colNode->node.resType.type, pAgg->min);
+ SFltSclPoint minPt = {.excl = false, .start = true, .val = min};
+ SFltSclDatum max;
+ fltSclBuildDatumFromBlockSmaValue(&max, colRange->colNode->node.resType.type, pAgg->max);
+ SFltSclPoint maxPt = {.excl = false, .start = false, .val = max};
+ taosArrayPush(points, &minPt);
+ taosArrayPush(points, &maxPt);
+ return TSDB_CODE_SUCCESS;
+}
+
bool filterRangeExecute(SFilterInfo *info, SColumnDataAgg **pDataStatis, int32_t numOfCols, int32_t numOfRows) {
if (info->scalarMode) {
+ SArray *colRanges = info->sclCtx.fltSclRange;
+ for (int32_t i = 0; i < taosArrayGetSize(colRanges); ++i) {
+ SFltSclColumnRange *colRange = taosArrayGet(colRanges, i);
+ bool foundCol = false;
+ int32_t j = 0;
+ for (; j < numOfCols; ++j) {
+ if (pDataStatis[j] != NULL && pDataStatis[j]->colId == colRange->colNode->colId) {
+ foundCol = true;
+ break;
+ }
+ }
+ if (foundCol) {
+ SColumnDataAgg *pAgg = pDataStatis[j];
+ SArray *points = taosArrayInit(2, sizeof(SFltSclPoint));
+ fltSclBuildRangeFromBlockSma(colRange, pAgg, numOfRows, points);
+ qDebug("column data agg: nulls %d, rows %d, max %" PRId64 " min %" PRId64, pAgg->numOfNull, numOfRows,
+ pAgg->max, pAgg->min);
+
+ SArray *merged = taosArrayInit(8, sizeof(SFltSclPoint));
+ fltSclIntersect(points, colRange->points, merged);
+ bool isIntersect = taosArrayGetSize(merged) != 0;
+ qDebug("filter range execute, scalar mode, column range found. colId: %d colName: %s has overlap: %d",
+ colRange->colNode->colId, colRange->colNode->colName, isIntersect);
+
+ taosArrayDestroy(merged);
+ taosArrayDestroy(points);
+ if (!isIntersect) {
+ return false;
+ }
+ }
+ }
return true;
}
@@ -3607,6 +3964,31 @@ _return:
return code;
}
+static int32_t fltSclGetDatumValueFromPoint(SFltSclPoint *point, SFltSclDatum *d) {
+ *d = point->val;
+ if (point->val.kind == FLT_SCL_DATUM_KIND_NULL) {
+ return TSDB_CODE_SUCCESS;
+ }
+ if (point->val.kind == FLT_SCL_DATUM_KIND_MAX) {
+ getDataMax(d->type.type, &(d->i));
+ } else if (point->val.kind == FLT_SCL_DATUM_KIND_MIN) {
+ getDataMin(d->type.type, &(d->i));
+ }
+
+ if (IS_INTEGER_TYPE(d->type.type) || IS_TIMESTAMP_TYPE(d->type.type)) {
+ if (point->excl) {
+ if (point->start) {
+ ++d->i;
+ } else {
+ --d->i;
+ }
+ }
+ } else {
+ qError("not supported type %d when get datum from point", d->type.type);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) {
SFilterInfo *info = NULL;
int32_t code = 0;
@@ -3616,6 +3998,26 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) {
FLT_ERR_RET(filterInitFromNode(pNode, &info, FLT_OPTION_NO_REWRITE | FLT_OPTION_TIMESTAMP));
if (info->scalarMode) {
+ SArray *colRanges = info->sclCtx.fltSclRange;
+ if (taosArrayGetSize(colRanges) == 1) {
+ SFltSclColumnRange *colRange = taosArrayGet(colRanges, 0);
+ SArray *points = colRange->points;
+ if (taosArrayGetSize(points) == 2) {
+ SFltSclPoint *startPt = taosArrayGet(points, 0);
+ SFltSclPoint *endPt = taosArrayGet(points, 1);
+ SFltSclDatum start;
+ SFltSclDatum end;
+ fltSclGetDatumValueFromPoint(startPt, &start);
+ fltSclGetDatumValueFromPoint(endPt, &end);
+ win->skey = start.i;
+ win->ekey = end.i;
+ *isStrict = true;
+ goto _return;
+ } else if (taosArrayGetSize(points) == 0) {
+ *win = TSWINDOW_DESC_INITIALIZER;
+ goto _return;
+ }
+ }
*win = TSWINDOW_INITIALIZER;
*isStrict = false;
goto _return;
@@ -3946,8 +4348,204 @@ _return:
FLT_RET(code);
}
+int32_t fltSclBuildRangePoints(SFltSclOperator *oper, SArray *points) {
+ switch (oper->type) {
+ case OP_TYPE_GREATER_THAN: {
+ SFltSclDatum start;
+ fltSclBuildDatumFromValueNode(&start, oper->valNode);
+ SFltSclPoint startPt = {.start = true, .excl = true, .val = start};
+ SFltSclDatum end = {.kind = FLT_SCL_DATUM_KIND_MAX, .type = oper->colNode->node.resType};
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = end};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ break;
+ }
+ case OP_TYPE_GREATER_EQUAL: {
+ SFltSclDatum start;
+ fltSclBuildDatumFromValueNode(&start, oper->valNode);
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = start};
+ SFltSclDatum end = {.kind = FLT_SCL_DATUM_KIND_MAX, .type = oper->colNode->node.resType};
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = end};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ break;
+ }
+ case OP_TYPE_LOWER_THAN: {
+ SFltSclDatum end;
+ fltSclBuildDatumFromValueNode(&end, oper->valNode);
+ SFltSclPoint endPt = {.start = false, .excl = true, .val = end};
+ SFltSclDatum start = {.kind = FLT_SCL_DATUM_KIND_MIN, .type = oper->colNode->node.resType};
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = start};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ break;
+ }
+ case OP_TYPE_LOWER_EQUAL: {
+ SFltSclDatum end;
+ fltSclBuildDatumFromValueNode(&end, oper->valNode);
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = end};
+ SFltSclDatum start = {.kind = FLT_SCL_DATUM_KIND_MIN, .type = oper->colNode->node.resType};
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = start};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ break;
+ }
+ case OP_TYPE_EQUAL: {
+ SFltSclDatum valDatum;
+ fltSclBuildDatumFromValueNode(&valDatum, oper->valNode);
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = valDatum};
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = valDatum};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ break;
+ }
+ case OP_TYPE_NOT_EQUAL: {
+ SFltSclDatum valDatum;
+ fltSclBuildDatumFromValueNode(&valDatum, oper->valNode);
+ {
+ SFltSclDatum start = {.kind = FLT_SCL_DATUM_KIND_MIN, .type = oper->colNode->node.resType};
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = start};
+ SFltSclPoint endPt = {.start = false, .excl = true, .val = valDatum};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ }
+ {
+ SFltSclPoint startPt = {.start = true, .excl = true, .val = valDatum};
+ SFltSclDatum end = {.kind = FLT_SCL_DATUM_KIND_MAX, .type = oper->colNode->node.resType};
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = end};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ }
+ break;
+ }
+ case OP_TYPE_IS_NULL: {
+ SFltSclDatum nullDatum = {.kind = FLT_SCL_DATUM_KIND_NULL};
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = nullDatum};
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = nullDatum};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ break;
+ }
+ case OP_TYPE_IS_NOT_NULL: {
+ SFltSclDatum minDatum = {.kind = FLT_SCL_DATUM_KIND_MIN, .type = oper->colNode->node.resType};
+ SFltSclPoint startPt = {.start = true, .excl = false, .val = minDatum};
+ SFltSclDatum maxDatum = {.kind = FLT_SCL_DATUM_KIND_MAX, .type = oper->colNode->node.resType};
+ SFltSclPoint endPt = {.start = false, .excl = false, .val = maxDatum};
+ taosArrayPush(points, &startPt);
+ taosArrayPush(points, &endPt);
+ break;
+ }
+ default: {
+ qError("not supported operator type : %d when build range points", oper->type);
+ break;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+// TODO: process DNF composed of CNF
+int32_t fltSclProcessCNF(SArray *sclOpListCNF, SArray *colRangeList) {
+ size_t sz = taosArrayGetSize(sclOpListCNF);
+ for (int32_t i = 0; i < sz; ++i) {
+ SFltSclOperator *sclOper = taosArrayGet(sclOpListCNF, i);
+ SFltSclColumnRange *colRange = fltSclGetOrCreateColumnRange(sclOper->colNode, colRangeList);
+ SArray *points = taosArrayInit(4, sizeof(SFltSclPoint));
+ fltSclBuildRangePoints(sclOper, points);
+ if (taosArrayGetSize(colRange->points) != 0) {
+ SArray *merged = taosArrayInit(4, sizeof(SFltSclPoint));
+ int32_t code = fltSclIntersect(colRange->points, points, merged);
+ taosArrayDestroy(colRange->points);
+ taosArrayDestroy(points);
+ colRange->points = merged;
+ } else {
+ taosArrayDestroy(colRange->points);
+ colRange->points = points;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static bool fltSclIsCollectableNode(SNode *pNode) {
+ if (nodeType(pNode) != QUERY_NODE_OPERATOR) {
+ return false;
+ }
+
+ SOperatorNode *pOper = (SOperatorNode *)pNode;
+ if (pOper->pLeft == NULL || pOper->pRight == NULL) {
+ return false;
+ }
+
+ if (!(pOper->opType == OP_TYPE_GREATER_THAN || pOper->opType == OP_TYPE_GREATER_EQUAL ||
+ pOper->opType == OP_TYPE_LOWER_THAN || pOper->opType == OP_TYPE_LOWER_EQUAL ||
+ pOper->opType == OP_TYPE_NOT_EQUAL || pOper->opType == OP_TYPE_EQUAL)) {
+ return false;
+ }
+
+ if (!(nodeType(pOper->pLeft) == QUERY_NODE_COLUMN && nodeType(pOper->pRight) == QUERY_NODE_VALUE)) {
+ return false;
+ }
+ return true;
+}
+
+static int32_t fltSclCollectOperatorFromNode(SNode *pNode, SArray *sclOpList) {
+ if (!fltSclIsCollectableNode(pNode)) {
+ return TSDB_CODE_SUCCESS;
+ }
+
+ SOperatorNode *pOper = (SOperatorNode *)pNode;
+
+ SValueNode *valNode = (SValueNode *)pOper->pRight;
+ if (IS_NUMERIC_TYPE(valNode->node.resType.type) || valNode->node.resType.type == TSDB_DATA_TYPE_TIMESTAMP) {
+ SFltSclOperator sclOp = {.colNode = (SColumnNode *)nodesCloneNode(pOper->pLeft),
+ .valNode = (SValueNode *)nodesCloneNode(pOper->pRight),
+ .type = pOper->opType};
+ taosArrayPush(sclOpList, &sclOp);
+ }
+
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t fltSclCollectOperatorsFromLogicCond(SNode *pNode, SArray *sclOpList) {
+ if (nodeType(pNode) != QUERY_NODE_LOGIC_CONDITION) {
+ return TSDB_CODE_SUCCESS;
+ }
+ SLogicConditionNode *pLogicCond = (SLogicConditionNode *)pNode;
+ // TODO: support LOGIC_COND_TYPE_OR
+ if (pLogicCond->condType != LOGIC_COND_TYPE_AND) {
+ return TSDB_CODE_SUCCESS;
+ }
+ SNode *pExpr = NULL;
+ FOREACH(pExpr, pLogicCond->pParameterList) {
+ if (!fltSclIsCollectableNode(pExpr)) {
+ return TSDB_CODE_SUCCESS;
+ }
+ }
+ FOREACH(pExpr, pLogicCond->pParameterList) { fltSclCollectOperatorFromNode(pExpr, sclOpList); }
+ return TSDB_CODE_SUCCESS;
+}
+
+static int32_t fltSclCollectOperators(SNode *pNode, SArray *sclOpList) {
+ if (nodeType(pNode) == QUERY_NODE_OPERATOR) {
+ fltSclCollectOperatorFromNode(pNode, sclOpList);
+ } else if (nodeType(pNode) == QUERY_NODE_LOGIC_CONDITION) {
+ fltSclCollectOperatorsFromLogicCond(pNode, sclOpList);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t fltOptimizeNodes(SFilterInfo *pInfo, SNode **pNode, SFltTreeStat *pStat) {
- // TODO
+ SArray *sclOpList = taosArrayInit(16, sizeof(SFltSclOperator));
+ fltSclCollectOperators(*pNode, sclOpList);
+ SArray *colRangeList = taosArrayInit(16, sizeof(SFltSclColumnRange));
+ fltSclProcessCNF(sclOpList, colRangeList);
+ pInfo->sclCtx.fltSclRange = colRangeList;
+
+ for (int32_t i = 0; i < taosArrayGetSize(sclOpList); ++i) {
+ SFltSclOperator *sclOp = taosArrayGet(sclOpList, i);
+ nodesDestroyNode((SNode *)sclOp->colNode);
+ nodesDestroyNode((SNode *)sclOp->valNode);
+ }
+ taosArrayDestroy(sclOpList);
return TSDB_CODE_SUCCESS;
}
@@ -4021,8 +4619,11 @@ int32_t filterInitFromNode(SNode *pNode, SFilterInfo **pInfo, uint32_t options)
stat.info = info;
FLT_ERR_JRET(fltReviseNodes(info, &pNode, &stat));
-
- info->scalarMode = stat.scalarMode;
+ if (tsFilterScalarMode) {
+ info->scalarMode = true;
+ } else {
+ info->scalarMode = stat.scalarMode;
+ }
fltDebug("scalar mode: %d", info->scalarMode);
if (!info->scalarMode) {
diff --git a/source/libs/scalar/src/sclvector.c b/source/libs/scalar/src/sclvector.c
index 4f76f93c6cd5288f012c451c108460ce62ac9eb6..b41eba293bdc64d646c8c39c1112eb19bb27ff49 100644
--- a/source/libs/scalar/src/sclvector.c
+++ b/source/libs/scalar/src/sclvector.c
@@ -1791,7 +1791,11 @@ void vectorNotMatch(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOu
void vectorIsNull(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut, int32_t _ord) {
for (int32_t i = 0; i < pLeft->numOfRows; ++i) {
int8_t v = IS_HELPER_NULL(pLeft->columnData, i) ? 1 : 0;
+ if (v) {
+ ++pOut->numOfQualified;
+ }
colDataSetInt8(pOut->columnData, i, &v);
+ colDataClearNull_f(pOut->columnData->nullbitmap, i);
}
pOut->numOfRows = pLeft->numOfRows;
}
@@ -1799,7 +1803,11 @@ void vectorIsNull(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut,
void vectorNotNull(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut, int32_t _ord) {
for (int32_t i = 0; i < pLeft->numOfRows; ++i) {
int8_t v = IS_HELPER_NULL(pLeft->columnData, i) ? 0 : 1;
+ if (v) {
+ ++pOut->numOfQualified;
+ }
colDataSetInt8(pOut->columnData, i, &v);
+ colDataClearNull_f(pOut->columnData->nullbitmap, i);
}
pOut->numOfRows = pLeft->numOfRows;
}
@@ -1812,6 +1820,13 @@ void vectorIsTrue(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam *pOut,
colDataSetInt8(pOut->columnData, i, &v);
colDataClearNull_f(pOut->columnData->nullbitmap, i);
}
+ {
+ bool v = false;
+ GET_TYPED_DATA(v, bool, pOut->columnData->info.type, colDataGetData(pOut->columnData, i));
+ if (v) {
+ ++pOut->numOfQualified;
+ }
+ }
}
pOut->columnData->hasNull = false;
}
@@ -1851,7 +1866,9 @@ void vectorJsonContains(SScalarParam *pLeft, SScalarParam *pRight, SScalarParam
char *pLeftData = colDataGetVarData(pLeft->columnData, i);
getJsonValue(pLeftData, jsonKey, &isExist);
}
-
+ if (isExist) {
+ ++pOut->numOfQualified;
+ }
colDataSetVal(pOutputCol, i, (const char *)(&isExist), false);
}
taosMemoryFree(jsonKey);
diff --git a/source/libs/scheduler/CMakeLists.txt b/source/libs/scheduler/CMakeLists.txt
index 3288120b67518aa532db7579a7677086899514c7..fafc2a27e0f48740926d250d47d5f34fc88e203c 100644
--- a/source/libs/scheduler/CMakeLists.txt
+++ b/source/libs/scheduler/CMakeLists.txt
@@ -9,7 +9,7 @@ target_include_directories(
target_link_libraries(
scheduler
- PUBLIC os util nodes planner qcom common catalog transport command qworker executor
+ PUBLIC os util nodes planner qcom common catalog transport command qworker executor index
)
if(${BUILD_TEST})
diff --git a/source/libs/stream/CMakeLists.txt b/source/libs/stream/CMakeLists.txt
index 2edbc44aaeb8e87c0dc51971a4c92c292549a87b..fa6c709c8ffce6122fc3508eb2844042973eb5e5 100644
--- a/source/libs/stream/CMakeLists.txt
+++ b/source/libs/stream/CMakeLists.txt
@@ -6,13 +6,23 @@ target_include_directories(
PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc"
)
+
if(${BUILD_WITH_ROCKSDB})
+ IF (TD_LINUX)
+ target_link_libraries(
+ stream
+ PUBLIC rocksdb-shared tdb
+ PRIVATE os util transport qcom executor wal index
+ )
+ ELSE()
target_link_libraries(
stream
PUBLIC rocksdb tdb
- PRIVATE os util transport qcom executor wal
+ PRIVATE os util transport qcom executor wal index
)
+ ENDIF()
+
target_include_directories(
stream
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h
index 0f39cf817b1bbd191d9ab49d3456be9e1bfa1c66..1cbd7b042c3ee3bfc84e3448136f41b0bb9b63ba 100644
--- a/source/libs/stream/inc/streamBackendRocksdb.h
+++ b/source/libs/stream/inc/streamBackendRocksdb.h
@@ -17,7 +17,6 @@
#define _STREAM_BACKEDN_ROCKSDB_H_
#include "rocksdb/c.h"
-// #include "streamInc.h"
#include "streamState.h"
#include "tcoding.h"
#include "tcommon.h"
diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c
index 8cc1ef1dd39db8db785aeab9417f759207d0a635..7457b2197e03b456012c63e624eacd170bfffcfa 100644
--- a/source/libs/stream/src/stream.c
+++ b/source/libs/stream/src/stream.c
@@ -126,14 +126,12 @@ int32_t streamTaskEnqueueBlocks(SStreamTask* pTask, const SStreamDispatchReq* pR
if (pBlock == NULL) {
streamTaskInputFail(pTask);
status = TASK_INPUT_STATUS__FAILED;
- qDebug("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId,
+ qError("vgId:%d, s-task:%s failed to receive dispatch msg, reason: out of memory", pTask->pMeta->vgId,
pTask->id.idStr);
} else {
- if (tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pBlock) == 0) {
- status = TASK_INPUT_STATUS__NORMAL;
- } else { // input queue is full, upstream is blocked now
- status = TASK_INPUT_STATUS__BLOCKED;
- }
+ int32_t code = tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pBlock);
+ // input queue is full, upstream is blocked now
+ status = (code == TSDB_CODE_SUCCESS)? TASK_INPUT_STATUS__NORMAL:TASK_INPUT_STATUS__BLOCKED;
}
// rsp by input status
@@ -235,12 +233,11 @@ int32_t streamProcessDispatchMsg(SStreamTask* pTask, SStreamDispatchReq* pReq, S
}
int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) {
- ASSERT(pRsp->inputStatus == TASK_OUTPUT_STATUS__NORMAL || pRsp->inputStatus == TASK_OUTPUT_STATUS__BLOCKED);
- qDebug("s-task:%s receive dispatch rsp, code: %x", pTask->id.idStr, code);
+ qDebug("s-task:%s receive dispatch rsp, output status:%d code:%d", pTask->id.idStr, pRsp->inputStatus, code);
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
- qDebug("task %d is shuffle, left waiting rsp %d", pTask->id.taskId, leftRsp);
+ qDebug("s-task:%s is shuffle, left waiting rsp %d", pTask->id.idStr, leftRsp);
if (leftRsp > 0) {
return 0;
}
@@ -248,13 +245,20 @@ int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, i
int8_t old = atomic_exchange_8(&pTask->outputStatus, pRsp->inputStatus);
ASSERT(old == TASK_OUTPUT_STATUS__WAIT);
+
+ // the input queue of the (down stream) task that receive the output data is full, so the TASK_INPUT_STATUS_BLOCKED is rsp
+ // todo we need to send EMPTY PACKAGE to detect if the input queue is available for output of upstream task, every 50 ms.
if (pRsp->inputStatus == TASK_INPUT_STATUS__BLOCKED) {
// TODO: init recover timer
- ASSERT(0);
+ qError("s-task:%s inputQ of downstream task:0x%x is full, need to block output", pTask->id.idStr, pRsp->downstreamTaskId);
+
+ atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);
+ qError("s-task:%s ignore error, and reset task output status:%d", pTask->id.idStr, pTask->outputStatus);
+
return 0;
}
- // continue dispatch one block to down stream in pipeline
+ // otherwise, continue dispatch the first block to down stream task in pipeline
streamDispatchStreamBlock(pTask);
return 0;
}
@@ -292,32 +296,44 @@ int32_t tAppendDataToInputQueue(SStreamTask* pTask, SStreamQueueItem* pItem) {
if (type == STREAM_INPUT__DATA_SUBMIT) {
SStreamDataSubmit* px = (SStreamDataSubmit*)pItem;
- qDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
- px->submit.msgLen, px->submit.ver, total, size);
-
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && tInputQueueIsFull(pTask)) {
- qError("s-task:%s input queue is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) abort",
+ qError("s-task:%s input queue is full, capacity(size:%d num:%dMiB), current(blocks:%d, size:%.2fMiB) stop to push data",
pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, total,
size);
streamDataSubmitDestroy(px);
taosFreeQitem(pItem);
return -1;
}
- taosWriteQitem(pTask->inputQueue->queue, pItem);
+
+ int32_t code = taosWriteQitem(pTask->inputQueue->queue, pItem);
+ if (code != TSDB_CODE_SUCCESS) {
+ streamDataSubmitDestroy(px);
+ taosFreeQitem(pItem);
+ return code;
+ }
+
+ qDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr,
+ px->submit.msgLen, px->submit.ver, total, size + px->submit.msgLen/1048576.0);
} else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE ||
type == STREAM_INPUT__REF_DATA_BLOCK) {
if ((pTask->taskLevel == TASK_LEVEL__SOURCE) && (tInputQueueIsFull(pTask))) {
qError("s-task:%s input queue is full, capacity:%d size:%d MiB, current(blocks:%d, size:%.2fMiB) abort",
pTask->id.idStr, STREAM_TASK_INPUT_QUEUEU_CAPACITY, STREAM_TASK_INPUT_QUEUEU_CAPACITY_IN_SIZE, total,
size);
+ destroyStreamDataBlock((SStreamDataBlock*) pItem);
return -1;
}
qDebug("s-task:%s data block enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size);
- taosWriteQitem(pTask->inputQueue->queue, pItem);
+ int32_t code = taosWriteQitem(pTask->inputQueue->queue, pItem);
+ if (code != TSDB_CODE_SUCCESS) {
+ destroyStreamDataBlock((SStreamDataBlock*) pItem);
+ return code;
+ }
} else if (type == STREAM_INPUT__CHECKPOINT) {
taosWriteQitem(pTask->inputQueue->queue, pItem);
} else if (type == STREAM_INPUT__GET_RES) {
+ // use the default memory limit, refactor later.
taosWriteQitem(pTask->inputQueue->queue, pItem);
qDebug("s-task:%s data res enqueue, current(blocks:%d, size:%.2fMiB)", pTask->id.idStr, total, size);
}
diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c
index 16ba81c74a610c761d6ad697b14a2db2ff5cba24..b3995f020b1147f8ec3fccd3109f022af72a5729 100644
--- a/source/libs/stream/src/streamBackendRocksdb.c
+++ b/source/libs/stream/src/streamBackendRocksdb.c
@@ -22,6 +22,9 @@ typedef struct SCompactFilteFactory {
void* status;
} SCompactFilteFactory;
+typedef struct {
+ void* tableOpt;
+} RocksdbCfParam;
typedef struct {
rocksdb_t* db;
rocksdb_column_family_handle_t** pHandle;
@@ -29,12 +32,22 @@ typedef struct {
rocksdb_readoptions_t* rOpt;
rocksdb_options_t** cfOpt;
rocksdb_options_t* dbOpt;
- void* param;
- void* pBackendHandle;
+ RocksdbCfParam* param;
+ void* pBackend;
SListNode* pCompareNode;
+ rocksdb_comparator_t** pCompares;
} RocksdbCfInst;
-int32_t streamStateOpenBackendCf(void* backend, char* name, SHashObj* ids);
+uint32_t nextPow2(uint32_t x) {
+ x = x - 1;
+ x = x | (x >> 1);
+ x = x | (x >> 2);
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >> 16);
+ return x + 1;
+}
+int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t nCf);
void destroyRocksdbCfInst(RocksdbCfInst* inst);
@@ -46,9 +59,6 @@ unsigned char compactFilte(void* arg, int level, const char* key, size_t klen, c
char** newval, size_t* newvlen, unsigned char* value_changed);
rocksdb_compactionfilter_t* compactFilteFactoryCreateFilter(void* arg, rocksdb_compactionfiltercontext_t* ctx);
-typedef struct {
- void* tableOpt;
-} RocksdbCfParam;
const char* cfName[] = {"default", "state", "fill", "sess", "func", "parname", "partag"};
typedef int (*EncodeFunc)(void* key, char* buf);
@@ -77,20 +87,20 @@ void* streamBackendInit(const char* path) {
pHandle->cfInst = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
rocksdb_env_t* env = rocksdb_create_default_env(); // rocksdb_envoptions_create();
- rocksdb_env_set_low_priority_background_threads(env, 4);
- rocksdb_env_set_high_priority_background_threads(env, 2);
- rocksdb_cache_t* cache = rocksdb_cache_create_lru(128 << 20);
+ rocksdb_cache_t* cache = rocksdb_cache_create_lru(64 << 20);
rocksdb_options_t* opts = rocksdb_options_create();
rocksdb_options_set_env(opts, env);
rocksdb_options_set_create_if_missing(opts, 1);
rocksdb_options_set_create_missing_column_families(opts, 1);
- rocksdb_options_set_write_buffer_size(opts, 128 << 20);
+ rocksdb_options_set_write_buffer_size(opts, 48 << 20);
rocksdb_options_set_max_total_wal_size(opts, 128 << 20);
rocksdb_options_set_recycle_log_file_num(opts, 6);
- rocksdb_options_set_max_write_buffer_number(opts, 3);
+ rocksdb_options_set_max_write_buffer_number(opts, 2);
rocksdb_options_set_info_log_level(opts, 0);
+ uint32_t dbLimit = nextPow2(tsMaxStreamBackendCache);
+ rocksdb_options_set_db_write_buffer_size(opts, dbLimit << 20);
pHandle->env = env;
pHandle->dbOpt = opts;
@@ -114,27 +124,11 @@ void* streamBackendInit(const char* path) {
/*
list all cf and get prefix
*/
- int64_t streamId;
- int32_t taskId, dummpy = 0;
- SHashObj* tbl = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
- for (size_t i = 0; i < nCf; i++) {
- char* cf = cfs[i];
- char suffix[64] = {0};
- if (3 == sscanf(cf, "0x%" PRIx64 "-%d_%s", &streamId, &taskId, suffix)) {
- char idstr[128] = {0};
- sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId);
- // qError("make cf name %s", idstr);
- if (taosHashGet(tbl, idstr, strlen(idstr) + 1) == NULL) {
- taosHashPut(tbl, idstr, strlen(idstr) + 1, &dummpy, sizeof(dummpy));
- }
- } else {
- continue;
- }
- }
- streamStateOpenBackendCf(pHandle, (char*)path, tbl);
- taosHashCleanup(tbl);
+ streamStateOpenBackendCf(pHandle, (char*)path, cfs, nCf);
+ }
+ if (cfs != NULL) {
+ rocksdb_list_column_families_destroy(cfs, nCf);
}
- rocksdb_list_column_families_destroy(cfs, nCf);
return (void*)pHandle;
_EXIT:
@@ -159,16 +153,17 @@ void streamBackendCleanup(void* arg) {
}
taosHashCleanup(pHandle->cfInst);
- rocksdb_flushoptions_t* flushOpt = rocksdb_flushoptions_create();
- char* err = NULL;
- rocksdb_flush(pHandle->db, flushOpt, &err);
- if (err != NULL) {
- qError("failed to flush db before streamBackend clean up, reason:%s", err);
- taosMemoryFree(err);
+ if (pHandle->db) {
+ char* err = NULL;
+ rocksdb_flushoptions_t* flushOpt = rocksdb_flushoptions_create();
+ rocksdb_flush(pHandle->db, flushOpt, &err);
+ if (err != NULL) {
+ qError("failed to flush db before streamBackend clean up, reason:%s", err);
+ taosMemoryFree(err);
+ }
+ rocksdb_flushoptions_destroy(flushOpt);
+ rocksdb_close(pHandle->db);
}
- rocksdb_flushoptions_destroy(flushOpt);
-
- rocksdb_close(pHandle->db);
rocksdb_options_destroy(pHandle->dbOpt);
rocksdb_env_destroy(pHandle->env);
rocksdb_cache_destroy(pHandle->cache);
@@ -209,7 +204,7 @@ void streamBackendDelCompare(void* backend, void* arg) {
}
void streamStateDestroy_rocksdb(SStreamState* pState, bool remove) { streamStateCloseBackend(pState, remove); }
static bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len);
-int streamGetInit(const char* funcName);
+int streamGetInit(SStreamState* pState, const char* funcName);
// |key|-----value------|
// |key|ttl|len|userData|
@@ -577,9 +572,14 @@ int32_t decodeValueFunc(void* value, int32_t vlen, int64_t* ttl, char** dest) {
*dest = NULL;
return -1;
}
- int64_t now = taosGetTimestampMs();
p = taosDecodeFixedI64(p, &key.unixTimestamp);
p = taosDecodeFixedI32(p, &key.len);
+ if (vlen != (sizeof(int64_t) + sizeof(int32_t) + key.len)) {
+ if (dest != NULL) *dest = NULL;
+ qError("vlen: %d, read len: %d", vlen, key.len);
+ return -1;
+ }
+
if (key.len == 0) {
key.data = NULL;
} else {
@@ -587,6 +587,7 @@ int32_t decodeValueFunc(void* value, int32_t vlen, int64_t* ttl, char** dest) {
}
if (ttl != NULL) {
+ int64_t now = taosGetTimestampMs();
*ttl = key.unixTimestamp == 0 ? 0 : key.unixTimestamp - now;
}
if (dest != NULL) {
@@ -667,7 +668,7 @@ rocksdb_compactionfilter_t* compactFilteFactoryCreateFilter(void* arg, rocksdb_c
void destroyRocksdbCfInst(RocksdbCfInst* inst) {
int cfLen = sizeof(ginitDict) / sizeof(ginitDict[0]);
for (int i = 0; i < cfLen; i++) {
- rocksdb_column_family_handle_destroy(inst->pHandle[i]);
+ if (inst->pHandle[i]) rocksdb_column_family_handle_destroy((inst->pHandle)[i]);
}
rocksdb_writeoptions_destroy(inst->wOpt);
@@ -675,118 +676,130 @@ void destroyRocksdbCfInst(RocksdbCfInst* inst) {
rocksdb_readoptions_destroy(inst->rOpt);
taosMemoryFree(inst->cfOpt);
- taosMemoryFree(inst->param);
taosMemoryFreeClear(inst->param);
taosMemoryFree(inst);
}
-int32_t streamStateOpenBackendCf(void* backend, char* name, SHashObj* ids) {
+int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t nCf) {
SBackendHandle* handle = backend;
char* err = NULL;
- size_t nSize = taosHashGetSize(ids);
- int cfLen = sizeof(ginitDict) / sizeof(ginitDict[0]);
-
- char** cfNames = taosMemoryCalloc(nSize * cfLen + 1, sizeof(char*));
- void* pIter = taosHashIterate(ids, NULL);
- size_t keyLen = 0;
- char* idstr = taosHashGetKey(pIter, &keyLen);
- for (int i = 0; i < nSize * cfLen + 1; i++) {
- cfNames[i] = (char*)taosMemoryCalloc(1, 128);
- if (i == 0) {
- memcpy(cfNames[0], "default", strlen("default"));
- continue;
- }
-
- GEN_COLUMN_FAMILY_NAME(cfNames[i], idstr, ginitDict[(i - 1) % (cfLen)].key);
- if (i % cfLen == 0) {
- pIter = taosHashIterate(ids, pIter);
- if (pIter != NULL) idstr = taosHashGetKey(pIter, &keyLen);
- }
- }
- rocksdb_options_t** cfOpts = taosMemoryCalloc(nSize * cfLen + 1, sizeof(rocksdb_options_t*));
- RocksdbCfParam* params = taosMemoryCalloc(nSize * cfLen + 1, sizeof(RocksdbCfParam*));
- for (int i = 0; i < nSize * cfLen + 1; i++) {
+ int64_t streamId;
+ int32_t taskId, dummy = 0;
+ char suffix[64] = {0};
+
+ rocksdb_options_t** cfOpts = taosMemoryCalloc(nCf, sizeof(rocksdb_options_t*));
+ RocksdbCfParam* params = taosMemoryCalloc(nCf, sizeof(RocksdbCfParam*));
+ rocksdb_comparator_t** pCompare = taosMemoryCalloc(nCf, sizeof(rocksdb_comparator_t**));
+ rocksdb_column_family_handle_t** cfHandle = taosMemoryCalloc(nCf, sizeof(rocksdb_column_family_handle_t*));
+
+ for (int i = 0; i < nCf; i++) {
+ char* cf = cfs[i];
+ char funcname[64] = {0};
cfOpts[i] = rocksdb_options_create_copy(handle->dbOpt);
- if (i == 0) {
- continue;
- }
- // refactor later
- rocksdb_block_based_table_options_t* tableOpt = rocksdb_block_based_options_create();
- rocksdb_block_based_options_set_block_cache(tableOpt, handle->cache);
+ if (i == 0) continue;
+ if (3 == sscanf(cf, "0x%" PRIx64 "-%d_%s", &streamId, &taskId, funcname)) {
+ rocksdb_block_based_table_options_t* tableOpt = rocksdb_block_based_options_create();
+ rocksdb_block_based_options_set_block_cache(tableOpt, handle->cache);
- rocksdb_filterpolicy_t* filter = rocksdb_filterpolicy_create_bloom(15);
- rocksdb_block_based_options_set_filter_policy(tableOpt, filter);
+ rocksdb_filterpolicy_t* filter = rocksdb_filterpolicy_create_bloom(15);
+ rocksdb_block_based_options_set_filter_policy(tableOpt, filter);
- rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpts[i], tableOpt);
- params[i].tableOpt = tableOpt;
- };
+ rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)cfOpts[i], tableOpt);
+ params[i].tableOpt = tableOpt;
- rocksdb_comparator_t** pCompare = taosMemoryCalloc(nSize * cfLen + 1, sizeof(rocksdb_comparator_t**));
- for (int i = 0; i < nSize * cfLen + 1; i++) {
- if (i == 0) {
- continue;
- }
- SCfInit* cf = &ginitDict[(i - 1) % cfLen];
+ int idx = streamGetInit(NULL, funcname);
+ SCfInit* cfPara = &ginitDict[idx];
- rocksdb_comparator_t* compare = rocksdb_comparator_create(NULL, cf->detroyFunc, cf->cmpFunc, cf->cmpName);
- rocksdb_options_set_comparator((rocksdb_options_t*)cfOpts[i], compare);
- pCompare[i] = compare;
+ rocksdb_comparator_t* compare =
+ rocksdb_comparator_create(NULL, cfPara->detroyFunc, cfPara->cmpFunc, cfPara->cmpName);
+ rocksdb_options_set_comparator((rocksdb_options_t*)cfOpts[i], compare);
+ pCompare[i] = compare;
+ }
}
- rocksdb_column_family_handle_t** cfHandle =
- taosMemoryCalloc(nSize * cfLen + 1, sizeof(rocksdb_column_family_handle_t*));
- rocksdb_t* db = rocksdb_open_column_families(handle->dbOpt, name, nSize * cfLen + 1, (const char* const*)cfNames,
+ rocksdb_t* db = rocksdb_open_column_families(handle->dbOpt, name, nCf, (const char* const*)cfs,
(const rocksdb_options_t* const*)cfOpts, cfHandle, &err);
if (err != NULL) {
qError("failed to open rocksdb cf, reason:%s", err);
taosMemoryFree(err);
} else {
- qDebug("succ to open rocksdb cf, reason:%s", err);
- }
-
- pIter = taosHashIterate(ids, NULL);
- idstr = taosHashGetKey(pIter, &keyLen);
- for (int i = 0; i < nSize; i++) {
- RocksdbCfInst* inst = taosMemoryCalloc(1, sizeof(RocksdbCfInst));
- rocksdb_column_family_handle_t** subCf = taosMemoryCalloc(cfLen, sizeof(rocksdb_column_family_handle_t*));
- rocksdb_comparator_t** subCompare = taosMemoryCalloc(cfLen, sizeof(rocksdb_comparator_t*));
- RocksdbCfParam* subParam = taosMemoryCalloc(cfLen, sizeof(RocksdbCfParam));
- rocksdb_options_t** subOpt = taosMemoryCalloc(cfLen, sizeof(rocksdb_options_t*));
- for (int j = 0; j < cfLen; j++) {
- subCf[j] = cfHandle[i * cfLen + j + 1];
- subCompare[j] = pCompare[i * cfLen + j + 1];
- subParam[j] = params[i * cfLen + j + 1];
- subOpt[j] = cfOpts[i * cfLen + j + 1];
+ qDebug("succ to open rocksdb cf");
+ }
+ // close default cf
+ if (((rocksdb_column_family_handle_t**)cfHandle)[0] != 0) rocksdb_column_family_handle_destroy(cfHandle[0]);
+ rocksdb_options_destroy(cfOpts[0]);
+ handle->db = db;
+
+ static int32_t cfLen = sizeof(ginitDict) / sizeof(ginitDict[0]);
+ for (int i = 0; i < nCf; i++) {
+ char* cf = cfs[i];
+ if (i == 0) continue;
+ char funcname[64] = {0};
+ if (3 == sscanf(cf, "0x%" PRIx64 "-%d_%s", &streamId, &taskId, funcname)) {
+ char idstr[128] = {0};
+ sprintf(idstr, "0x%" PRIx64 "-%d", streamId, taskId);
+
+ int idx = streamGetInit(NULL, funcname);
+
+ RocksdbCfInst* inst = NULL;
+ RocksdbCfInst** pInst = taosHashGet(handle->cfInst, idstr, strlen(idstr) + 1);
+ if (pInst == NULL || *pInst == NULL) {
+ inst = taosMemoryCalloc(1, sizeof(RocksdbCfInst));
+ inst->pHandle = taosMemoryCalloc(cfLen, sizeof(rocksdb_column_family_handle_t*));
+ inst->cfOpt = taosMemoryCalloc(cfLen, sizeof(rocksdb_options_t*));
+ inst->wOpt = rocksdb_writeoptions_create();
+ inst->rOpt = rocksdb_readoptions_create();
+ inst->param = taosMemoryCalloc(cfLen, sizeof(RocksdbCfParam));
+ inst->pBackend = handle;
+ inst->db = db;
+ inst->pCompares = taosMemoryCalloc(cfLen, sizeof(rocksdb_comparator_t*));
+
+ inst->dbOpt = handle->dbOpt;
+ rocksdb_writeoptions_disable_WAL(inst->wOpt, 1);
+ taosHashPut(handle->cfInst, idstr, strlen(idstr) + 1, &inst, sizeof(void*));
+ } else {
+ inst = *pInst;
+ }
+ inst->cfOpt[idx] = cfOpts[i];
+ inst->pCompares[idx] = pCompare[i];
+ memcpy(&(inst->param[idx]), &(params[i]), sizeof(RocksdbCfParam));
+ inst->pHandle[idx] = cfHandle[i];
}
- inst->db = db;
- inst->pHandle = subCf;
- inst->wOpt = rocksdb_writeoptions_create();
- inst->rOpt = rocksdb_readoptions_create();
- inst->cfOpt = (rocksdb_options_t**)subOpt;
- inst->dbOpt = handle->dbOpt;
- inst->param = subParam;
- inst->pBackendHandle = handle;
- handle->db = db;
- SCfComparator compare = {.comp = subCompare, .numOfComp = cfLen};
- inst->pCompareNode = streamBackendAddCompare(handle, &compare);
- rocksdb_writeoptions_disable_WAL(inst->wOpt, 1);
+ }
+ void** pIter = taosHashIterate(handle->cfInst, NULL);
+ while (pIter) {
+ RocksdbCfInst* inst = *pIter;
+
+ for (int i = 0; i < cfLen; i++) {
+ if (inst->cfOpt[i] == NULL) {
+ rocksdb_options_t* opt = rocksdb_options_create_copy(handle->dbOpt);
+ rocksdb_block_based_table_options_t* tableOpt = rocksdb_block_based_options_create();
+ rocksdb_block_based_options_set_block_cache(tableOpt, handle->cache);
- taosHashPut(handle->cfInst, idstr, keyLen, &inst, sizeof(void*));
+ rocksdb_filterpolicy_t* filter = rocksdb_filterpolicy_create_bloom(15);
+ rocksdb_block_based_options_set_filter_policy(tableOpt, filter);
- pIter = taosHashIterate(ids, pIter);
- if (pIter != NULL) idstr = taosHashGetKey(pIter, &keyLen);
- }
- rocksdb_column_family_handle_destroy(cfHandle[0]);
- rocksdb_options_destroy(cfOpts[0]);
+ rocksdb_options_set_block_based_table_factory((rocksdb_options_t*)opt, tableOpt);
- for (int i = 0; i < nSize * cfLen + 1; i++) {
- taosMemoryFree(cfNames[i]);
+ SCfInit* cfPara = &ginitDict[i];
+
+ rocksdb_comparator_t* compare =
+ rocksdb_comparator_create(NULL, cfPara->detroyFunc, cfPara->cmpFunc, cfPara->cmpName);
+ rocksdb_options_set_comparator((rocksdb_options_t*)opt, compare);
+
+ inst->pCompares[i] = compare;
+ inst->cfOpt[i] = opt;
+ inst->param[i].tableOpt = tableOpt;
+ }
+ }
+ SCfComparator compare = {.comp = inst->pCompares, .numOfComp = cfLen};
+ inst->pCompareNode = streamBackendAddCompare(handle, &compare);
+ pIter = taosHashIterate(handle->cfInst, pIter);
}
- taosMemoryFree(cfNames);
+
taosMemoryFree(cfHandle);
taosMemoryFree(pCompare);
taosMemoryFree(params);
taosMemoryFree(cfOpts);
-
return 0;
}
int streamStateOpenBackend(void* backend, SStreamState* pState) {
@@ -799,13 +812,13 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
if (ppInst != NULL && *ppInst != NULL) {
RocksdbCfInst* inst = *ppInst;
pState->pTdbState->rocksdb = inst->db;
- pState->pTdbState->pHandle = inst->pHandle;
+ pState->pTdbState->pHandle = (void**)inst->pHandle;
pState->pTdbState->writeOpts = inst->wOpt;
pState->pTdbState->readOpts = inst->rOpt;
- pState->pTdbState->cfOpts = inst->cfOpt;
+ pState->pTdbState->cfOpts = (void**)(inst->cfOpt);
pState->pTdbState->dbOpt = handle->dbOpt;
pState->pTdbState->param = inst->param;
- pState->pTdbState->pBackendHandle = handle;
+ pState->pTdbState->pBackend = handle;
pState->pTdbState->pComparNode = inst->pCompareNode;
taosThreadMutexUnlock(&handle->cfMutex);
return 0;
@@ -839,25 +852,17 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
rocksdb_options_set_comparator((rocksdb_options_t*)cfOpt[i], compare);
pCompare[i] = compare;
}
- rocksdb_column_family_handle_t** cfHandle = taosMemoryMalloc(cfLen * sizeof(rocksdb_column_family_handle_t*));
- for (int i = 0; i < cfLen; i++) {
- char buf[128] = {0};
- GEN_COLUMN_FAMILY_NAME(buf, pState->pTdbState->idstr, ginitDict[i].key);
- cfHandle[i] = rocksdb_create_column_family(handle->db, cfOpt[i], buf, &err);
- if (err != NULL) {
- qError("failed to create cf:%s_%s, reason:%s", pState->pTdbState->idstr, ginitDict[i].key, err);
- taosMemoryFreeClear(err);
- }
- }
+ rocksdb_column_family_handle_t** cfHandle = taosMemoryCalloc(cfLen, sizeof(rocksdb_column_family_handle_t*));
pState->pTdbState->rocksdb = handle->db;
- pState->pTdbState->pHandle = cfHandle;
+ pState->pTdbState->pHandle = (void**)cfHandle;
pState->pTdbState->writeOpts = rocksdb_writeoptions_create();
pState->pTdbState->readOpts = rocksdb_readoptions_create();
- pState->pTdbState->cfOpts = (rocksdb_options_t**)cfOpt;
+ pState->pTdbState->cfOpts = (void**)cfOpt;
pState->pTdbState->dbOpt = handle->dbOpt;
pState->pTdbState->param = param;
- pState->pTdbState->pBackendHandle = handle;
+ pState->pTdbState->pBackend = handle;
+ taosThreadRwlockInit(&pState->pTdbState->rwLock, NULL);
SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen};
pState->pTdbState->pComparNode = streamBackendAddCompare(handle, &compare);
// rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1);
@@ -866,7 +871,7 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) {
}
void streamStateCloseBackend(SStreamState* pState, bool remove) {
- SBackendHandle* pHandle = pState->pTdbState->pBackendHandle;
+ SBackendHandle* pHandle = pState->pTdbState->pBackend;
taosThreadMutexLock(&pHandle->cfMutex);
RocksdbCfInst** ppInst = taosHashGet(pHandle->cfInst, pState->pTdbState->idstr, strlen(pState->pTdbState->idstr) + 1);
if (ppInst != NULL && *ppInst != NULL) {
@@ -888,7 +893,9 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
char* err = NULL;
if (remove) {
for (int i = 0; i < cfLen; i++) {
- rocksdb_drop_column_family(pState->pTdbState->rocksdb, pState->pTdbState->pHandle[i], &err);
+ if (pState->pTdbState->pHandle[i] != NULL)
+ rocksdb_drop_column_family(pState->pTdbState->rocksdb,
+ ((rocksdb_column_family_handle_t**)pState->pTdbState->pHandle)[i], &err);
if (err != NULL) {
qError("failed to create cf:%s_%s, reason:%s", pState->pTdbState->idstr, ginitDict[i].key, err);
taosMemoryFreeClear(err);
@@ -897,7 +904,8 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
} else {
rocksdb_flushoptions_t* flushOpt = rocksdb_flushoptions_create();
for (int i = 0; i < cfLen; i++) {
- rocksdb_flush_cf(pState->pTdbState->rocksdb, flushOpt, pState->pTdbState->pHandle[i], &err);
+ if (pState->pTdbState->pHandle[i] != NULL)
+ rocksdb_flush_cf(pState->pTdbState->rocksdb, flushOpt, pState->pTdbState->pHandle[i], &err);
if (err != NULL) {
qError("failed to create cf:%s_%s, reason:%s", pState->pTdbState->idstr, ginitDict[i].key, err);
taosMemoryFreeClear(err);
@@ -907,7 +915,9 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
}
for (int i = 0; i < cfLen; i++) {
- rocksdb_column_family_handle_destroy(pState->pTdbState->pHandle[i]);
+ if (pState->pTdbState->pHandle[i] != NULL) {
+ rocksdb_column_family_handle_destroy(pState->pTdbState->pHandle[i]);
+ }
}
taosMemoryFreeClear(pState->pTdbState->pHandle);
for (int i = 0; i < cfLen; i++) {
@@ -916,7 +926,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
}
if (remove) {
- streamBackendDelCompare(pState->pTdbState->pBackendHandle, pState->pTdbState->pComparNode);
+ streamBackendDelCompare(pState->pTdbState->pBackend, pState->pTdbState->pComparNode);
}
rocksdb_writeoptions_destroy(pState->pTdbState->writeOpts);
pState->pTdbState->writeOpts = NULL;
@@ -925,24 +935,52 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) {
pState->pTdbState->readOpts = NULL;
taosMemoryFreeClear(pState->pTdbState->cfOpts);
taosMemoryFreeClear(pState->pTdbState->param);
+
+ taosThreadRwlockDestroy(&pState->pTdbState->rwLock);
pState->pTdbState->rocksdb = NULL;
}
void streamStateDestroyCompar(void* arg) {
SCfComparator* comp = (SCfComparator*)arg;
for (int i = 0; i < comp->numOfComp; i++) {
- rocksdb_comparator_destroy(comp->comp[i]);
+ if (comp->comp[i]) rocksdb_comparator_destroy(comp->comp[i]);
}
taosMemoryFree(comp->comp);
}
-int streamGetInit(const char* funcName) {
+int streamGetInit(SStreamState* pState, const char* funcName) {
+ int idx = -1;
size_t len = strlen(funcName);
for (int i = 0; i < sizeof(ginitDict) / sizeof(ginitDict[0]); i++) {
if (len == ginitDict[i].len && strncmp(funcName, ginitDict[i].key, strlen(funcName)) == 0) {
- return i;
+ idx = i;
+ break;
}
}
- return -1;
+ if (pState != NULL && idx != -1) {
+ rocksdb_column_family_handle_t* cf = NULL;
+ taosThreadRwlockRdlock(&pState->pTdbState->rwLock);
+ cf = pState->pTdbState->pHandle[idx];
+ taosThreadRwlockUnlock(&pState->pTdbState->rwLock);
+ if (cf == NULL) {
+ char buf[128] = {0};
+ GEN_COLUMN_FAMILY_NAME(buf, pState->pTdbState->idstr, ginitDict[idx].key);
+ char* err = NULL;
+
+ taosThreadRwlockWrlock(&pState->pTdbState->rwLock);
+ cf = rocksdb_create_column_family(pState->pTdbState->rocksdb, pState->pTdbState->cfOpts[idx], buf, &err);
+ if (err != NULL) {
+ idx = -1;
+ qError("failed to to open cf, %p 0x%" PRIx64 "-%d_%s, reason:%s", pState, pState->streamId, pState->taskId,
+ funcName, err);
+ taosMemoryFree(err);
+ } else {
+ pState->pTdbState->pHandle[idx] = cf;
+ }
+ taosThreadRwlockUnlock(&pState->pTdbState->rwLock);
+ }
+ }
+
+ return idx;
}
bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len) {
rocksdb_iter_seek(iter, buf, len);
@@ -956,7 +994,7 @@ bool streamStateIterSeekAndValid(rocksdb_iterator_t* iter, char* buf, size_t len
}
rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfName, rocksdb_snapshot_t** snapshot,
rocksdb_readoptions_t** readOpt) {
- int idx = streamGetInit(cfName);
+ int idx = streamGetInit(pState, cfName);
if (snapshot != NULL) {
*snapshot = (rocksdb_snapshot_t*)rocksdb_create_snapshot(pState->pTdbState->rocksdb);
@@ -967,37 +1005,39 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
rocksdb_readoptions_set_snapshot(rOpt, *snapshot);
rocksdb_readoptions_set_fill_cache(rOpt, 0);
- return rocksdb_create_iterator_cf(pState->pTdbState->rocksdb, rOpt, pState->pTdbState->pHandle[idx]);
-}
-
-#define STREAM_STATE_PUT_ROCKSDB(pState, funcname, key, value, vLen) \
- do { \
- code = 0; \
- char buf[128] = {0}; \
- char* err = NULL; \
- int i = streamGetInit(funcname); \
- if (i < 0) { \
- qWarn("streamState failed to get cf name: %s", funcname); \
- code = -1; \
- break; \
- } \
- char toString[128] = {0}; \
- if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \
- int32_t klen = ginitDict[i].enFunc((void*)key, buf); \
- rocksdb_column_family_handle_t* pHandle = pState->pTdbState->pHandle[ginitDict[i].idx]; \
- rocksdb_t* db = pState->pTdbState->rocksdb; \
- rocksdb_writeoptions_t* opts = pState->pTdbState->writeOpts; \
- char* ttlV = NULL; \
- int32_t ttlVLen = ginitDict[i].enValueFunc((char*)value, vLen, 0, &ttlV); \
- rocksdb_put_cf(db, opts, pHandle, (const char*)buf, klen, (const char*)ttlV, (size_t)ttlVLen, &err); \
- if (err != NULL) { \
- taosMemoryFree(err); \
- qDebug("streamState str: %s failed to write to %s, err: %s", toString, funcname, err); \
- code = -1; \
- } else { \
- qDebug("streamState str:%s succ to write to %s, valLen:%d", toString, funcname, vLen); \
- } \
- taosMemoryFree(ttlV); \
+ return rocksdb_create_iterator_cf(pState->pTdbState->rocksdb, rOpt,
+ ((rocksdb_column_family_handle_t**)pState->pTdbState->pHandle)[idx]);
+}
+
+#define STREAM_STATE_PUT_ROCKSDB(pState, funcname, key, value, vLen) \
+ do { \
+ code = 0; \
+ char buf[128] = {0}; \
+ char* err = NULL; \
+ int i = streamGetInit(pState, funcname); \
+ if (i < 0) { \
+ qWarn("streamState failed to get cf name: %s", funcname); \
+ code = -1; \
+ break; \
+ } \
+ char toString[128] = {0}; \
+ if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \
+ int32_t klen = ginitDict[i].enFunc((void*)key, buf); \
+ rocksdb_column_family_handle_t* pHandle = \
+ ((rocksdb_column_family_handle_t**)pState->pTdbState->pHandle)[ginitDict[i].idx]; \
+ rocksdb_t* db = pState->pTdbState->rocksdb; \
+ rocksdb_writeoptions_t* opts = pState->pTdbState->writeOpts; \
+ char* ttlV = NULL; \
+ int32_t ttlVLen = ginitDict[i].enValueFunc((char*)value, vLen, 0, &ttlV); \
+ rocksdb_put_cf(db, opts, pHandle, (const char*)buf, klen, (const char*)ttlV, (size_t)ttlVLen, &err); \
+ if (err != NULL) { \
+ taosMemoryFree(err); \
+ qError("streamState str: %s failed to write to %s, err: %s", toString, funcname, err); \
+ code = -1; \
+ } else { \
+ qTrace("streamState str:%s succ to write to %s, rowValLen:%d, ttlValLen:%d", toString, funcname, vLen, ttlVLen); \
+ } \
+ taosMemoryFree(ttlV); \
} while (0);
#define STREAM_STATE_GET_ROCKSDB(pState, funcname, key, pVal, vLen) \
@@ -1005,7 +1045,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \
char buf[128] = {0}; \
char* err = NULL; \
- int i = streamGetInit(funcname); \
+ int i = streamGetInit(pState, funcname); \
if (i < 0) { \
qWarn("streamState failed to get cf name: %s", funcname); \
code = -1; \
@@ -1014,34 +1054,35 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
char toString[128] = {0}; \
if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \
int32_t klen = ginitDict[i].enFunc((void*)key, buf); \
- rocksdb_column_family_handle_t* pHandle = pState->pTdbState->pHandle[ginitDict[i].idx]; \
- rocksdb_t* db = pState->pTdbState->rocksdb; \
- rocksdb_readoptions_t* opts = pState->pTdbState->readOpts; \
- size_t len = 0; \
- char* val = rocksdb_get_cf(db, opts, pHandle, (const char*)buf, klen, (size_t*)&len, &err); \
- if (val == NULL) { \
+ rocksdb_column_family_handle_t* pHandle = \
+ ((rocksdb_column_family_handle_t**)pState->pTdbState->pHandle)[ginitDict[i].idx]; \
+ rocksdb_t* db = pState->pTdbState->rocksdb; \
+ rocksdb_readoptions_t* opts = pState->pTdbState->readOpts; \
+ size_t len = 0; \
+ char* val = rocksdb_get_cf(db, opts, pHandle, (const char*)buf, klen, (size_t*)&len, &err); \
+ if (val == NULL || len == 0) { \
if (err == NULL) { \
- qDebug("streamState str: %s failed to read from %s_%s, err: not exist", toString, pState->pTdbState->idstr, \
+ qTrace("streamState str: %s failed to read from %s_%s, err: not exist", toString, pState->pTdbState->idstr, \
funcname); \
} else { \
- qDebug("streamState str: %s failed to read from %s_%s, err: %s", toString, pState->pTdbState->idstr, funcname, \
+ qError("streamState str: %s failed to read from %s_%s, err: %s", toString, pState->pTdbState->idstr, funcname, \
err); \
taosMemoryFreeClear(err); \
} \
code = -1; \
} else { \
char* p = NULL; \
- int32_t len = ginitDict[i].deValueFunc(val, len, NULL, (char**)pVal); \
- if (len < 0) { \
- qDebug("streamState str: %s failed to read from %s_%s, err: already ttl ", toString, pState->pTdbState->idstr, \
+ int32_t tlen = ginitDict[i].deValueFunc(val, len, NULL, (char**)pVal); \
+ if (tlen <= 0) { \
+ qError("streamState str: %s failed to read from %s_%s, err: already ttl ", toString, pState->pTdbState->idstr, \
funcname); \
code = -1; \
} else { \
- qDebug("streamState str: %s succ to read from %s_%s, valLen:%d", toString, pState->pTdbState->idstr, funcname, \
- len); \
+ qTrace("streamState str: %s succ to read from %s_%s, valLen:%d", toString, pState->pTdbState->idstr, funcname, \
+ tlen); \
} \
taosMemoryFree(val); \
- if (vLen != NULL) *vLen = len; \
+ if (vLen != NULL) *vLen = tlen; \
} \
if (code == 0) \
qDebug("streamState str: %s succ to read from %s_%s", toString, pState->pTdbState->idstr, funcname); \
@@ -1052,7 +1093,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
code = 0; \
char buf[128] = {0}; \
char* err = NULL; \
- int i = streamGetInit(funcname); \
+ int i = streamGetInit(pState, funcname); \
if (i < 0) { \
qWarn("streamState failed to get cf name: %s_%s", pState->pTdbState->idstr, funcname); \
code = -1; \
@@ -1061,9 +1102,10 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
char toString[128] = {0}; \
if (qDebugFlag & DEBUG_TRACE) ginitDict[i].toStrFunc((void*)key, toString); \
int32_t klen = ginitDict[i].enFunc((void*)key, buf); \
- rocksdb_column_family_handle_t* pHandle = pState->pTdbState->pHandle[ginitDict[i].idx]; \
- rocksdb_t* db = pState->pTdbState->rocksdb; \
- rocksdb_writeoptions_t* opts = pState->pTdbState->writeOpts; \
+ rocksdb_column_family_handle_t* pHandle = \
+ ((rocksdb_column_family_handle_t**)pState->pTdbState->pHandle)[ginitDict[i].idx]; \
+ rocksdb_t* db = pState->pTdbState->rocksdb; \
+ rocksdb_writeoptions_t* opts = pState->pTdbState->writeOpts; \
rocksdb_delete_cf(db, opts, pHandle, (const char*)buf, klen, &err); \
if (err != NULL) { \
qError("streamState str: %s failed to del from %s_%s, err: %s", toString, pState->pTdbState->idstr, funcname, \
@@ -1071,7 +1113,7 @@ rocksdb_iterator_t* streamStateIterCreate(SStreamState* pState, const char* cfNa
taosMemoryFree(err); \
code = -1; \
} else { \
- qDebug("streamState str: %s succ to del from %s_%s", toString, pState->pTdbState->idstr, funcname); \
+ qTrace("streamState str: %s succ to del from %s_%s", toString, pState->pTdbState->idstr, funcname); \
} \
} while (0);
@@ -1098,29 +1140,29 @@ int32_t streamStateDel_rocksdb(SStreamState* pState, const SWinKey* key) {
int32_t streamStateClear_rocksdb(SStreamState* pState) {
qDebug("streamStateClear_rocksdb");
- SStateKey sKey = {.key = {.ts = 0, .groupId = 0}, .opNum = pState->number};
- SStateKey eKey = {.key = {.ts = INT64_MAX, .groupId = UINT64_MAX}, .opNum = pState->number};
char sKeyStr[128] = {0};
char eKeyStr[128] = {0};
+ SStateKey sKey = {.key = {.ts = 0, .groupId = 0}, .opNum = pState->number};
+ SStateKey eKey = {.key = {.ts = INT64_MAX, .groupId = UINT64_MAX}, .opNum = pState->number};
int sLen = stateKeyEncode(&sKey, sKeyStr);
int eLen = stateKeyEncode(&eKey, eKeyStr);
- char toStringStart[128] = {0};
- char toStringEnd[128] = {0};
- if (qDebugFlag & DEBUG_TRACE) {
- stateKeyToString(&sKey, toStringStart);
- stateKeyToString(&eKey, toStringEnd);
- }
+ if (pState->pTdbState->pHandle[1] != NULL) {
+ char* err = NULL;
+ rocksdb_delete_range_cf(pState->pTdbState->rocksdb, pState->pTdbState->writeOpts, pState->pTdbState->pHandle[1],
+ sKeyStr, sLen, eKeyStr, eLen, &err);
+ if (err != NULL) {
+ char toStringStart[128] = {0};
+ char toStringEnd[128] = {0};
+ stateKeyToString(&sKey, toStringStart);
+ stateKeyToString(&eKey, toStringEnd);
- char* err = NULL;
- rocksdb_delete_range_cf(pState->pTdbState->rocksdb, pState->pTdbState->writeOpts, pState->pTdbState->pHandle[1],
- sKeyStr, sLen, eKeyStr, eLen, &err);
- // rocksdb_compact_range_cf(pState->pTdbState->rocksdb, pState->pTdbState->pHandle[0], sKeyStr, sLen, eKeyStr,
- // eLen);
- if (err != NULL) {
- qWarn("failed to delete range cf(state) start: %s, end:%s, reason:%s", toStringStart, toStringEnd, err);
- taosMemoryFree(err);
+ qWarn("failed to delete range cf(state) start: %s, end:%s, reason:%s", toStringStart, toStringEnd, err);
+ taosMemoryFree(err);
+ } else {
+ rocksdb_compact_range_cf(pState->pTdbState->rocksdb, pState->pTdbState->pHandle[1], sKeyStr, sLen, eKeyStr, eLen);
+ }
}
return 0;
@@ -1215,7 +1257,8 @@ SStreamStateCur* streamStateSeekKeyNext_rocksdb(SStreamState* pState, const SWin
}
pCur->number = pState->number;
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "state", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "state", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
SStateKey sKey = {.key = *key, .opNum = pState->number};
char buf[128] = {0};
@@ -1255,7 +1298,8 @@ SStreamStateCur* streamStateSeekToLast_rocksdb(SStreamState* pState, const SWinK
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
if (pCur == NULL) return NULL;
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "state", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "state", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
rocksdb_iter_seek(pCur->iter, buf, (size_t)klen);
rocksdb_iter_prev(pCur->iter);
@@ -1277,7 +1321,8 @@ SStreamStateCur* streamStateGetCur_rocksdb(SStreamState* pState, const SWinKey*
if (pCur == NULL) return NULL;
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "state", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "state", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
SStateKey sKey = {.key = *key, .opNum = pState->number};
char buf[128] = {0};
@@ -1369,7 +1414,8 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentPrev_rocksdb(SStreamState* pSta
}
pCur->number = pState->number;
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "sess", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "sess", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
char buf[128] = {0};
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
@@ -1408,7 +1454,8 @@ SStreamStateCur* streamStateSessionSeekKeyCurrentNext_rocksdb(SStreamState* pSta
return NULL;
}
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "sess", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "sess", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
pCur->number = pState->number;
char buf[128] = {0};
@@ -1444,7 +1491,8 @@ SStreamStateCur* streamStateSessionSeekKeyNext_rocksdb(SStreamState* pState, con
return NULL;
}
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "sess", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "sess", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
pCur->number = pState->number;
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
@@ -1536,7 +1584,8 @@ SStreamStateCur* streamStateFillGetCur_rocksdb(SStreamState* pState, const SWinK
if (pCur == NULL) return NULL;
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "fill", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "fill", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
char buf[128] = {0};
int len = winKeyEncode((void*)key, buf);
@@ -1595,7 +1644,8 @@ SStreamStateCur* streamStateFillSeekKeyNext_rocksdb(SStreamState* pState, const
}
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "fill", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "fill", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
char buf[128] = {0};
int len = winKeyEncode((void*)key, buf);
@@ -1630,7 +1680,8 @@ SStreamStateCur* streamStateFillSeekKeyPrev_rocksdb(SStreamState* pState, const
}
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "fill", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "fill", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
char buf[128] = {0};
int len = winKeyEncode((void*)key, buf);
@@ -1665,7 +1716,8 @@ int32_t streamStateSessionGetKeyByRange_rocksdb(SStreamState* pState, const SSes
}
pCur->number = pState->number;
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "sess", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "sess", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
SStateSessionKey sKey = {.key = *key, .opNum = pState->number};
int32_t c = 0;
@@ -1876,17 +1928,17 @@ int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, voi
int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen) {
int code = 0;
- STREAM_STATE_PUT_ROCKSDB(pState, "default", &key, pVal, pVLen);
+ STREAM_STATE_PUT_ROCKSDB(pState, "default", key, pVal, pVLen);
return code;
}
int32_t streamDefaultGet_rocksdb(SStreamState* pState, const void* key, void** pVal, int32_t* pVLen) {
int code = 0;
- STREAM_STATE_GET_ROCKSDB(pState, "default", &key, pVal, pVLen);
+ STREAM_STATE_GET_ROCKSDB(pState, "default", key, pVal, pVLen);
return code;
}
int32_t streamDefaultDel_rocksdb(SStreamState* pState, const void* key) {
int code = 0;
- STREAM_STATE_DEL_ROCKSDB(pState, "default", &key);
+ STREAM_STATE_DEL_ROCKSDB(pState, "default", key);
return code;
}
@@ -1935,7 +1987,8 @@ void* streamDefaultIterCreate_rocksdb(SStreamState* pState) {
SStreamStateCur* pCur = taosMemoryCalloc(1, sizeof(SStreamStateCur));
pCur->db = pState->pTdbState->rocksdb;
- pCur->iter = streamStateIterCreate(pState, "default", &pCur->snapshot, &pCur->readOpt);
+ pCur->iter = streamStateIterCreate(pState, "default", (rocksdb_snapshot_t**)&pCur->snapshot,
+ (rocksdb_readoptions_t**)&pCur->readOpt);
return pCur;
}
int32_t streamDefaultIterValid_rocksdb(void* iter) {
@@ -1980,7 +2033,7 @@ void streamStateClearBatch(void* pBatch) { rocksdb_writebatch_clear((rocksdb_
void streamStateDestroyBatch(void* pBatch) { rocksdb_writebatch_destroy((rocksdb_writebatch_t*)pBatch); }
int32_t streamStatePutBatch(SStreamState* pState, const char* cfName, rocksdb_writebatch_t* pBatch, void* key,
void* val, int32_t vlen, int64_t ttl) {
- int i = streamGetInit(cfName);
+ int i = streamGetInit(pState, cfName);
if (i < 0) {
qError("streamState failed to put to cf name:%s", cfName);
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index 401a8b9e741904e89e751180f29f6b9aaa58ea3c..922a1f534592dd11a1af7f34592a8251f178ec7b 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -15,6 +15,13 @@
#include "streamInc.h"
+#define MAX_BLOCK_NAME_NUM 1024
+
+typedef struct SBlockName {
+ uint32_t hashValue;
+ char parTbName[TSDB_TABLE_NAME_LEN];
+} SBlockName;
+
int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
@@ -157,7 +164,7 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
tEncodeStreamRetrieveReq(&encoder, &req);
tEncoderClear(&encoder);
- SRpcMsg rpcMsg = { .code = 0, .msgType = TDMT_STREAM_RETRIEVE, .pCont = buf, .contLen = sizeof(SMsgHead) + len };
+ SRpcMsg rpcMsg = {.code = 0, .msgType = TDMT_STREAM_RETRIEVE, .pCont = buf, .contLen = sizeof(SMsgHead) + len};
if (tmsgSendReq(&pEpInfo->epSet, &rpcMsg) < 0) {
ASSERT(0);
goto CLEAR;
@@ -283,7 +290,7 @@ int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecov
msg.info.noResp = 1;
tmsgSendReq(pEpSet, &msg);
- qDebug("s-task:%s dispatch recover finish msg to taskId:%d node %d: recover finish msg", pTask->id.idStr,
+ qDebug("s-task:%s dispatch recover finish msg to downstream taskId:0x%x node %d: recover finish msg", pTask->id.idStr,
pReq->taskId, vgId);
return 0;
@@ -318,7 +325,7 @@ int32_t doSendDispatchMsg(SStreamTask* pTask, const SStreamDispatchReq* pReq, in
msg.pCont = buf;
msg.msgType = pTask->dispatchMsgType;
- qDebug("dispatch from s-task:%s to taskId:0x%x vgId:%d data msg", pTask->id.idStr, pReq->taskId, vgId);
+ qDebug("s-task:%s dispatch msg to taskId:0x%x vgId:%d data msg", pTask->id.idStr, pReq->taskId, vgId);
tmsgSendReq(pEpSet, &msg);
code = 0;
@@ -331,26 +338,46 @@ FAIL:
int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz,
int64_t groupId) {
- char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
- if (ctbName == NULL) {
- return -1;
+ uint32_t hashValue = 0;
+ SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+ if (pTask->pNameMap == NULL) {
+ pTask->pNameMap = tSimpleHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT));
}
- if (pDataBlock->info.parTbName[0]) {
- snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
+ void* pVal = tSimpleHashGet(pTask->pNameMap, &groupId, sizeof(int64_t));
+ if (pVal) {
+ SBlockName* pBln = (SBlockName*)pVal;
+ hashValue = pBln->hashValue;
+ if (!pDataBlock->info.parTbName[0]) {
+ memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName));
+ }
} else {
- char* ctbShortName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, groupId);
- snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, ctbShortName);
- taosMemoryFree(ctbShortName);
- }
+ char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN);
+ if (ctbName == NULL) {
+ return -1;
+ }
- SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+ if (pDataBlock->info.parTbName[0]) {
+ snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
+ } else {
+ buildCtbNameByGroupIdImpl(pTask->shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName);
+ snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName);
+ }
- /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
- SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
- uint32_t hashValue =
- taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
- taosMemoryFree(ctbName);
+ SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+
+ /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/
+ SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo;
+ hashValue =
+ taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix);
+ taosMemoryFree(ctbName);
+ SBlockName bln = {0};
+ bln.hashValue = hashValue;
+ memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName));
+ if (tSimpleHashGetSize(pTask->pNameMap) < MAX_BLOCK_NAME_NUM) {
+ tSimpleHashPut(pTask->pNameMap, &groupId, sizeof(int64_t), &bln, sizeof(SBlockName));
+ }
+ }
bool found = false;
// TODO: optimize search
@@ -414,7 +441,7 @@ int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* pDat
req.taskId = downstreamTaskId;
- qDebug("s-task:%s (child taskId:%d) fix-dispatch blocks:%d to down stream s-task:%d in vgId:%d", pTask->id.idStr,
+ qDebug("s-task:%s (child taskId:%d) fix-dispatch %d block(s) to down stream s-task:0x%x in vgId:%d", pTask->id.idStr,
pTask->selfChildId, numOfBlocks, downstreamTaskId, vgId);
code = doSendDispatchMsg(pTask, &req, vgId, pEpSet);
@@ -510,14 +537,17 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
int8_t old =
atomic_val_compare_exchange_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL, TASK_OUTPUT_STATUS__WAIT);
if (old != TASK_OUTPUT_STATUS__NORMAL) {
- qDebug("s-task:%s task wait for dispatch rsp, not dispatch now", pTask->id.idStr);
+ qDebug("s-task:%s task wait for dispatch rsp, not dispatch now, output status:%d", pTask->id.idStr, old);
return 0;
}
+ qDebug("s-task:%s start to dispatch msg, set output status:%d", pTask->id.idStr, pTask->outputStatus);
+
SStreamDataBlock* pDispatchedBlock = streamQueueNextItem(pTask->outputQueue);
if (pDispatchedBlock == NULL) {
- qDebug("s-task:%s stop dispatching since no output in output queue", pTask->id.idStr);
atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);
+ qDebug("s-task:%s stop dispatching since no output in output queue, output status:%d", pTask->id.idStr,
+ pTask->outputStatus);
return 0;
}
@@ -527,6 +557,7 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) {
if (code != TSDB_CODE_SUCCESS) {
streamQueueProcessFail(pTask->outputQueue);
atomic_store_8(&pTask->outputStatus, TASK_OUTPUT_STATUS__NORMAL);
+ qDebug("s-task:%s failed to dispatch msg to downstream, output status:%d", pTask->id.idStr, pTask->outputStatus);
}
// this block can be freed only when it has been pushed to down stream.
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index 55474541ede5d51236b3e53098ed480d9ce4ad0c..46290c306f5b641c21309b2dcc9c97554c93e723 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -16,11 +16,11 @@
#include "streamInc.h"
// maximum allowed processed block batches. One block may include several submit blocks
-#define MAX_STREAM_EXEC_BATCH_NUM 128
-#define MIN_STREAM_EXEC_BATCH_NUM 16
-#define MAX_STREAM_RESULT_DUMP_THRESHOLD 1000
+#define MAX_STREAM_EXEC_BATCH_NUM 32
+#define MIN_STREAM_EXEC_BATCH_NUM 4
+#define MAX_STREAM_RESULT_DUMP_THRESHOLD 100
-static int32_t updateCheckPointInfo (SStreamTask* pTask);
+static int32_t updateCheckPointInfo(SStreamTask* pTask);
bool streamTaskShouldStop(const SStreamStatus* pStatus) {
int32_t status = atomic_load_8((int8_t*)&pStatus->taskStatus);
@@ -44,14 +44,16 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray*
if (numOfBlocks > 0) {
SStreamDataBlock* pStreamBlocks = createStreamBlockFromResults(pItem, pTask, size, pRes);
if (pStreamBlocks == NULL) {
+ qError("s-task:%s failed to create result stream data block, code:%s", pTask->id.idStr, tstrerror(terrno));
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
return -1;
}
- qDebug("s-task:%s dump stream result data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks, size/1048576.0);
+ qDebug("s-task:%s dump stream result data blocks, num:%d, size:%.2fMiB", pTask->id.idStr, numOfBlocks,
+ size / 1048576.0);
code = streamTaskOutputResultBlock(pTask, pStreamBlocks);
- if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { // back pressure and record position
+ if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { // back pressure and record position
destroyStreamDataBlock(pStreamBlocks);
return -1;
}
@@ -65,7 +67,8 @@ static int32_t doDumpResult(SStreamTask* pTask, SStreamQueueItem* pItem, SArray*
return TSDB_CODE_SUCCESS;
}
-static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* totalSize, int32_t* totalBlocks) {
+static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, int64_t* totalSize,
+ int32_t* totalBlocks) {
int32_t code = TSDB_CODE_SUCCESS;
void* pExecutor = pTask->exec.pExecutor;
@@ -82,7 +85,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
}
if (streamTaskShouldStop(&pTask->status)) {
- taosArrayDestroy(pRes); // memory leak
+ taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
return 0;
}
@@ -99,9 +102,8 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
if (output == NULL) {
if (pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
- SSDataBlock block = {0};
-
- const SStreamDataBlock* pRetrieveBlock = (const SStreamDataBlock*) pItem;
+ SSDataBlock block = {0};
+ const SStreamDataBlock* pRetrieveBlock = (const SStreamDataBlock*)pItem;
ASSERT(taosArrayGetSize(pRetrieveBlock->blocks) == 1);
assignOneDataBlock(&block, taosArrayGet(pRetrieveBlock->blocks, 0));
@@ -132,7 +134,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
taosArrayPush(pRes, &block);
- qDebug("s-task:%s (child %d) executed and get block, total blocks:%d, size:%.2fMiB", pTask->id.idStr,
+ qDebug("s-task:%s (child %d) executed and get %d result blocks, size:%.2fMiB", pTask->id.idStr,
pTask->selfChildId, numOfBlocks, size / 1048576.0);
// current output should be dispatched to down stream nodes
@@ -153,7 +155,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i
ASSERT(numOfBlocks == taosArrayGetSize(pRes));
code = doDumpResult(pTask, pItem, pRes, size, totalSize, totalBlocks);
} else {
- taosArrayDestroy(pRes);
+ taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
}
return code;
@@ -163,7 +165,6 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
int32_t code = 0;
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
-
void* exec = pTask->exec.pExecutor;
qSetStreamOpOpen(exec);
@@ -236,11 +237,11 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) {
taosFreeQitem(qRes);
return code;
}
-
- if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
- qDebug("s-task:%s scan exec dispatch blocks:%d", pTask->id.idStr, batchCnt);
- streamDispatchStreamBlock(pTask);
- }
+//
+// if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH || pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
+// qDebug("s-task:%s scan exec dispatch blocks:%d", pTask->id.idStr, batchCnt);
+// streamDispatchStreamBlock(pTask);
+// }
if (finished) {
break;
@@ -287,7 +288,7 @@ int32_t streamBatchExec(SStreamTask* pTask, int32_t batchLimit) {
}
#endif
-int32_t updateCheckPointInfo (SStreamTask* pTask) {
+int32_t updateCheckPointInfo(SStreamTask* pTask) {
int64_t ckId = 0;
int64_t dataVer = 0;
qGetCheckpointVersion(pTask->exec.pExecutor, &dataVer, &ckId);
@@ -295,7 +296,8 @@ int32_t updateCheckPointInfo (SStreamTask* pTask) {
SCheckpointInfo* pCkInfo = &pTask->chkInfo;
if (ckId > pCkInfo->id) { // save it since the checkpoint is updated
qDebug("s-task:%s exec end, start to update check point, ver from %" PRId64 " to %" PRId64
- ", checkPoint id:%" PRId64 " -> %" PRId64, pTask->id.idStr, pCkInfo->version, dataVer, pCkInfo->id, ckId);
+ ", checkPoint id:%" PRId64 " -> %" PRId64,
+ pTask->id.idStr, pCkInfo->version, dataVer, pCkInfo->id, ckId);
pTask->chkInfo = (SCheckpointInfo){.version = dataVer, .id = ckId, .currentVer = pCkInfo->currentVer};
@@ -315,8 +317,13 @@ int32_t updateCheckPointInfo (SStreamTask* pTask) {
return TSDB_CODE_SUCCESS;
}
+/**
+ * todo: the batch of blocks should be tuned dynamic, according to the total elapsed time of each batch of blocks, the
+ * appropriate batch of blocks should be handled in 5 to 10 sec.
+ */
int32_t streamExecForAll(SStreamTask* pTask) {
- int32_t code = 0;
+ const char* id = pTask->id.idStr;
+
while (1) {
int32_t batchSize = 1;
int16_t times = 0;
@@ -324,18 +331,22 @@ int32_t streamExecForAll(SStreamTask* pTask) {
SStreamQueueItem* pInput = NULL;
// merge multiple input data if possible in the input queue.
- qDebug("s-task:%s start to extract data block from inputQ", pTask->id.idStr);
+ qDebug("s-task:%s start to extract data block from inputQ", id);
while (1) {
if (streamTaskShouldPause(&pTask->status)) {
- return 0;
+ if (batchSize > 1) {
+ break;
+ } else {
+ return 0;
+ }
}
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue);
if (qItem == NULL) {
if (pTask->taskLevel == TASK_LEVEL__SOURCE && batchSize < MIN_STREAM_EXEC_BATCH_NUM && times < 5) {
times++;
- taosMsleep(1);
+ taosMsleep(10);
qDebug("===stream===try again batchSize:%d", batchSize);
continue;
}
@@ -360,8 +371,10 @@ int32_t streamExecForAll(SStreamTask* pTask) {
batchSize++;
pInput = newRet;
streamQueueProcessSuccess(pTask->inputQueue);
+
if (batchSize > MAX_STREAM_EXEC_BATCH_NUM) {
- qDebug("maximum batch limit:%d reached, processing, %s", MAX_STREAM_EXEC_BATCH_NUM, pTask->id.idStr);
+ qDebug("s-task:%s batch size limit:%d reached, start to process blocks", id,
+ MAX_STREAM_EXEC_BATCH_NUM);
break;
}
}
@@ -372,7 +385,6 @@ int32_t streamExecForAll(SStreamTask* pTask) {
if (pInput) {
streamFreeQitem(pInput);
}
-
return 0;
}
@@ -382,7 +394,7 @@ int32_t streamExecForAll(SStreamTask* pTask) {
if (pTask->taskLevel == TASK_LEVEL__SINK) {
ASSERT(pInput->type == STREAM_INPUT__DATA_BLOCK);
- qDebug("s-task:%s sink node start to sink result. numOfBlocks:%d", pTask->id.idStr, batchSize);
+ qDebug("s-task:%s sink task start to sink %d blocks", id, batchSize);
streamTaskOutputResultBlock(pTask, (SStreamDataBlock*)pInput);
continue;
}
@@ -391,20 +403,20 @@ int32_t streamExecForAll(SStreamTask* pTask) {
while (pTask->taskLevel == TASK_LEVEL__SOURCE) {
int8_t status = atomic_load_8(&pTask->status.taskStatus);
if (status != TASK_STATUS__NORMAL && status != TASK_STATUS__PAUSE) {
- qError("stream task wait for the end of fill history, s-task:%s, status:%d", pTask->id.idStr,
+ qError("stream task wait for the end of fill history, s-task:%s, status:%d", id,
atomic_load_8(&pTask->status.taskStatus));
- taosMsleep(2);
+ taosMsleep(100);
} else {
break;
}
}
int64_t st = taosGetTimestampMs();
- qDebug("s-task:%s start to execute, block batches:%d", pTask->id.idStr, batchSize);
+ qDebug("s-task:%s start to process batch of blocks, num:%d", id, batchSize);
{
// set input
- void* pExecutor = pTask->exec.pExecutor;
+ void* pExecutor = pTask->exec.pExecutor;
const SStreamQueueItem* pItem = pInput;
if (pItem->type == STREAM_INPUT__GET_RES) {
@@ -414,21 +426,21 @@ int32_t streamExecForAll(SStreamTask* pTask) {
ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
const SStreamDataSubmit* pSubmit = (const SStreamDataSubmit*)pInput;
qSetMultiStreamInput(pExecutor, &pSubmit->submit, 1, STREAM_INPUT__DATA_SUBMIT);
- qDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, pTask->id.idStr, pSubmit,
+ qDebug("s-task:%s set submit blocks as source block completed, %p %p len:%d ver:%" PRId64, id, pSubmit,
pSubmit->submit.msgStr, pSubmit->submit.msgLen, pSubmit->submit.ver);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK || pItem->type == STREAM_INPUT__DATA_RETRIEVE) {
const SStreamDataBlock* pBlock = (const SStreamDataBlock*)pInput;
SArray* pBlockList = pBlock->blocks;
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
- qDebug("s-task:%s set sdata blocks as input num:%d, ver:%" PRId64, pTask->id.idStr, numOfBlocks, pBlock->sourceVer);
+ qDebug("s-task:%s set sdata blocks as input num:%d, ver:%" PRId64, id, numOfBlocks, pBlock->sourceVer);
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__DATA_BLOCK);
} else if (pItem->type == STREAM_INPUT__MERGED_SUBMIT) {
const SStreamMergedSubmit* pMerged = (const SStreamMergedSubmit*)pInput;
SArray* pBlockList = pMerged->submits;
int32_t numOfBlocks = taosArrayGetSize(pBlockList);
- qDebug("s-task:%s %p set submit input (merged), batch num:%d", pTask->id.idStr, pTask, numOfBlocks);
+ qDebug("s-task:%s %p set (merged) submit blocks as a batch, numOfBlocks:%d", id, pTask, numOfBlocks);
qSetMultiStreamInput(pExecutor, pBlockList->pData, numOfBlocks, STREAM_INPUT__MERGED_SUBMIT);
} else if (pItem->type == STREAM_INPUT__REF_DATA_BLOCK) {
const SStreamRefDataBlock* pRefBlock = (const SStreamRefDataBlock*)pInput;
@@ -443,7 +455,8 @@ int32_t streamExecForAll(SStreamTask* pTask) {
streamTaskExecImpl(pTask, pInput, &resSize, &totalBlocks);
double el = (taosGetTimestampMs() - st) / 1000.0;
- qDebug("s-task:%s exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d", pTask->id.idStr, el, resSize / 1048576.0, totalBlocks);
+ qDebug("s-task:%s batch of input blocks exec end, elapsed time:%.2fs, result size:%.2fMiB, numOfBlocks:%d",
+ id, el, resSize / 1048576.0, totalBlocks);
streamFreeQitem(pInput);
}
diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c
index 33375fe921b2971a96c36761182db63e61e95ad0..8c26052fdbc14a5a7c83ebc1792e8ec9d90832e7 100644
--- a/source/libs/stream/src/streamMeta.c
+++ b/source/libs/stream/src/streamMeta.c
@@ -205,24 +205,25 @@ int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) {
// add to the ready tasks hash map, not the restored tasks hash map
int32_t streamMetaAddDeployedTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask) {
- if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) {
- tFreeStreamTask(pTask);
- return -1;
- }
-
- if (streamMetaSaveTask(pMeta, pTask) < 0) {
- tFreeStreamTask(pTask);
- return -1;
- }
-
- if (streamMetaCommit(pMeta) < 0) {
- tFreeStreamTask(pTask);
- return -1;
- }
-
void* p = taosHashGet(pMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId));
if (p == NULL) {
+ if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) {
+ tFreeStreamTask(pTask);
+ return -1;
+ }
+
+ if (streamMetaSaveTask(pMeta, pTask) < 0) {
+ tFreeStreamTask(pTask);
+ return -1;
+ }
+
+ if (streamMetaCommit(pMeta) < 0) {
+ tFreeStreamTask(pTask);
+ return -1;
+ }
taosArrayPush(pMeta->pTaskList, &pTask->id.taskId);
+ } else {
+ return 0;
}
taosHashPut(pMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId), &pTask, POINTER_BYTES);
@@ -268,12 +269,14 @@ void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) {
SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t));
if (ppTask) {
SStreamTask* pTask = *ppTask;
+
taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t));
tdbTbDelete(pMeta->pTaskDb, &taskId, sizeof(int32_t), pMeta->txn);
atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__DROPPING);
-
int32_t num = taosArrayGetSize(pMeta->pTaskList);
+
+ qDebug("s-task:%s set the drop task flag, remain running s-task:%d", pTask->id.idStr, num - 1);
for (int32_t i = 0; i < num; ++i) {
int32_t* pTaskId = taosArrayGet(pMeta->pTaskList, i);
if (*pTaskId == taskId) {
@@ -283,6 +286,8 @@ void streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) {
}
streamMetaReleaseTask(pMeta, pTask);
+ } else {
+ qDebug("vgId:%d failed to find the task:0x%x, it may be dropped already", pMeta->vgId, taskId);
}
taosWUnLockLatch(&pMeta->lock);
@@ -355,22 +360,29 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) {
tDecodeStreamTask(&decoder, pTask);
tDecoderClear(&decoder);
- if (pMeta->expandFunc(pMeta->ahandle, pTask, pTask->chkInfo.version) < 0) {
- tdbFree(pKey);
- tdbFree(pVal);
- tdbTbcClose(pCur);
- return -1;
- }
-
+ // remove duplicate
void* p = taosHashGet(pMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId));
if (p == NULL) {
+ if (pMeta->expandFunc(pMeta->ahandle, pTask, pTask->chkInfo.version) < 0) {
+ tdbFree(pKey);
+ tdbFree(pVal);
+ tdbTbcClose(pCur);
+ taosMemoryFree(pTask);
+ return -1;
+ }
taosArrayPush(pMeta->pTaskList, &pTask->id.taskId);
+ } else {
+ tdbFree(pKey);
+ tdbFree(pVal);
+ tdbTbcClose(pCur);
+ taosMemoryFree(pTask);
+ continue;
}
-
if (taosHashPut(pMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId), &pTask, sizeof(void*)) < 0) {
tdbFree(pKey);
tdbFree(pVal);
tdbTbcClose(pCur);
+ taosMemoryFree(pTask);
return -1;
}
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
index 7e2c62f73a4624d764745de8d916781576db25d6..71a21ac15071ad5ab859b74133f23776c1d58f24 100644
--- a/source/libs/stream/src/streamState.c
+++ b/source/libs/stream/src/streamState.c
@@ -23,7 +23,6 @@
#include "tcommon.h"
#include "tcompare.h"
#include "tref.h"
-#include "ttimer.h"
#define MAX_TABLE_NAME_NUM 200000
@@ -91,13 +90,14 @@ int stateKeyCmpr(const void* pKey1, int kLen1, const void* pKey2, int kLen2) {
return winKeyCmprImpl(&pWin1->key, &pWin2->key);
}
-SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int32_t szPage, int32_t pages) {
- qWarn("open stream state, %s", path);
+SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t szPage, int32_t pages) {
+ qDebug("open stream state, %s", path);
SStreamState* pState = taosMemoryCalloc(1, sizeof(SStreamState));
if (pState == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
+
pState->pTdbState = taosMemoryCalloc(1, sizeof(STdbState));
if (pState->pTdbState == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@@ -105,29 +105,33 @@ SStreamState* streamStateOpen(char* path, SStreamTask* pTask, bool specPath, int
return NULL;
}
+ SStreamTask* pStreamTask = pTask;
char statePath[1024];
if (!specPath) {
- sprintf(statePath, "%s/%d", path, pTask->id.taskId);
+ sprintf(statePath, "%s/%d", path, pStreamTask->id.taskId);
} else {
memset(statePath, 0, 1024);
tstrncpy(statePath, path, 1024);
}
- pState->taskId = pTask->id.taskId;
- pState->streamId = pTask->id.streamId;
+
+ pState->taskId = pStreamTask->id.taskId;
+ pState->streamId = pStreamTask->id.streamId;
+
#ifdef USE_ROCKSDB
- // qWarn("open stream state1");
- taosAcquireRef(pTask->pMeta->streamBackendId, pTask->pMeta->streamBackendRid);
- int code = streamStateOpenBackend(pTask->pMeta->streamBackend, pState);
+ SStreamMeta* pMeta = pStreamTask->pMeta;
+ taosAcquireRef(pMeta->streamBackendId, pMeta->streamBackendRid);
+ int code = streamStateOpenBackend(pMeta->streamBackend, pState);
if (code == -1) {
- taosReleaseRef(pTask->pMeta->streamBackendId, pTask->pMeta->streamBackendRid);
+ taosReleaseRef(pMeta->streamBackendId, pMeta->streamBackendRid);
taosMemoryFree(pState);
pState = NULL;
}
+
pState->pTdbState->pOwner = pTask;
pState->pFileState = NULL;
_hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT);
- pState->parNameMap = tSimpleHashInit(1024, hashFn);
+ pState->parNameMap = tSimpleHashInit(1024, hashFn);
return pState;
#else
@@ -449,7 +453,7 @@ int32_t streamStateReleaseBuf(SStreamState* pState, const SWinKey* key, void* pV
#ifdef USE_ROCKSDB
taosMemoryFree(pVal);
#else
- streamFreeVal(pVal);
+ streamStateFreeVal(pVal);
#endif
return 0;
}
@@ -700,7 +704,7 @@ void streamStateFreeCur(SStreamStateCur* pCur) {
taosMemoryFree(pCur);
}
-void streamFreeVal(void* val) {
+void streamStateFreeVal(void* val) {
#ifdef USE_ROCKSDB
taosMemoryFree(val);
#else
@@ -1058,7 +1062,7 @@ _end:
}
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]) {
- qWarn("try to write to cf parname");
+ qDebug("try to write to cf parname");
#ifdef USE_ROCKSDB
if (tSimpleHashGetSize(pState->parNameMap) > MAX_TABLE_NAME_NUM) {
if (tSimpleHashGet(pState->parNameMap, &groupId, sizeof(int64_t)) == NULL) {
diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c
index 8a038969785b9a4f2a64436cca1953d95805d128..284d1ecab63423a7b52e80c23d197639d36b6844 100644
--- a/source/libs/stream/src/streamTask.c
+++ b/source/libs/stream/src/streamTask.c
@@ -17,14 +17,25 @@
#include "tstream.h"
#include "wal.h"
-SStreamTask* tNewStreamTask(int64_t streamId) {
+static int32_t mndAddToTaskset(SArray* pArray, SStreamTask* pTask) {
+ int32_t childId = taosArrayGetSize(pArray);
+ pTask->selfChildId = childId;
+ taosArrayPush(pArray, &pTask);
+ return 0;
+}
+
+SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam, SArray* pTaskList) {
SStreamTask* pTask = (SStreamTask*)taosMemoryCalloc(1, sizeof(SStreamTask));
if (pTask == NULL) {
+ terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
pTask->id.taskId = tGenIdPI32();
pTask->id.streamId = streamId;
+ pTask->taskLevel = taskLevel;
+ pTask->fillHistory = fillHistory;
+ pTask->triggerParam = triggerParam;
char buf[128] = {0};
sprintf(buf, "0x%" PRIx64 "-%d", pTask->id.streamId, pTask->id.taskId);
@@ -34,6 +45,7 @@ SStreamTask* tNewStreamTask(int64_t streamId) {
pTask->inputStatus = TASK_INPUT_STATUS__NORMAL;
pTask->outputStatus = TASK_OUTPUT_STATUS__NORMAL;
+ mndAddToTaskset(pTaskList, pTask);
return pTask;
}
@@ -212,5 +224,9 @@ void tFreeStreamTask(SStreamTask* pTask) {
taosMemoryFree((void*)pTask->id.idStr);
}
+ if (pTask->pNameMap) {
+ tSimpleHashCleanup(pTask->pNameMap);
+ }
+
taosMemoryFree(pTask);
}
diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c
index fff666ec9f3f90bd60d921a011fbd8925c52696f..85be120dbd562f2ce6526b391c937a362396b569 100644
--- a/source/libs/stream/src/streamUpdate.c
+++ b/source/libs/stream/src/streamUpdate.c
@@ -20,9 +20,9 @@
#include "ttime.h"
#define DEFAULT_FALSE_POSITIVE 0.01
-#define DEFAULT_BUCKET_SIZE 1310720
-#define DEFAULT_MAP_CAPACITY 1310720
-#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 10)
+#define DEFAULT_BUCKET_SIZE 131072
+#define DEFAULT_MAP_CAPACITY 131072
+#define DEFAULT_MAP_SIZE (DEFAULT_MAP_CAPACITY * 100)
#define ROWS_PER_MILLISECOND 1
#define MAX_NUM_SCALABLE_BF 100000
#define MIN_NUM_SCALABLE_BF 10
@@ -44,8 +44,8 @@ static void windowSBfAdd(SUpdateInfo *pInfo, uint64_t count) {
}
}
-static void clearItemHelper(void* p) {
- SScalableBf** pBf = p;
+static void clearItemHelper(void *p) {
+ SScalableBf **pBf = p;
tScalableBfDestroy(*pBf);
}
@@ -274,7 +274,7 @@ void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo) {
}
int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo) {
- if(!pInfo) {
+ if (!pInfo) {
return 0;
}
diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c
index ad66bb5a275b0ac83626219d26c389dbc207b8f7..bc8450972880e56054f2adacb780b09086213ccd 100644
--- a/source/libs/stream/src/tstreamFileState.c
+++ b/source/libs/stream/src/tstreamFileState.c
@@ -16,6 +16,7 @@
#include "tstreamFileState.h"
#include "query.h"
+#include "storageapi.h"
#include "streamBackendRocksdb.h"
#include "taos.h"
#include "tcommon.h"
@@ -136,7 +137,7 @@ void clearExpiredRowBuff(SStreamFileState* pFileState, TSKEY ts, bool all) {
SListNode* pNode = NULL;
while ((pNode = tdListNext(&iter)) != NULL) {
SRowBuffPos* pPos = *(SRowBuffPos**)(pNode->data);
- if (all || (pFileState->getTs(pPos->pKey) < ts)) {
+ if (all || (pFileState->getTs(pPos->pKey) < ts && !pPos->beUsed)) {
ASSERT(pPos->pRowBuff != NULL);
tdListAppend(pFileState->freeBuffs, &(pPos->pRowBuff));
pPos->pRowBuff = NULL;
@@ -415,10 +416,13 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
int32_t len = 0;
memcpy(buf, taskKey, strlen(taskKey));
code = streamDefaultGet_rocksdb(pFileState->pFileStore, buf, &val, &len);
- if (code != 0) {
+ if (code != 0 || len == 0 || val == NULL) {
return TSDB_CODE_FAILED;
}
- sscanf(val, "%" PRId64 "", &maxCheckPointId);
+ memcpy(val, buf, len);
+ buf[len] = 0;
+ maxCheckPointId = atol((char*)buf);
+ taosMemoryFree(val);
}
for (int64_t i = maxCheckPointId; i > 0; i--) {
char buf[128] = {0};
@@ -429,13 +433,16 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) {
if (code != 0) {
return TSDB_CODE_FAILED;
}
+ memcpy(val, buf, len);
+ buf[len] = 0;
+ taosMemoryFree(val);
+
TSKEY ts;
- sscanf(val, "%" PRId64 "", &ts);
+ ts = atol((char*)buf);
if (ts < mark) {
// statekey winkey.ts < mark
forceRemoveCheckpoint(pFileState, i);
break;
- } else {
}
}
return code;
diff --git a/source/libs/stream/test/CMakeLists.txt b/source/libs/stream/test/CMakeLists.txt
index a0c171769025f62b0a70a3677d9faeab2a4f6693..049bfbbb3aadedbfc59e4b587e3fc9630028b0f1 100644
--- a/source/libs/stream/test/CMakeLists.txt
+++ b/source/libs/stream/test/CMakeLists.txt
@@ -8,10 +8,20 @@ AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST)
# bloomFilterTest
ADD_EXECUTABLE(streamUpdateTest "tstreamUpdateTest.cpp")
-TARGET_LINK_LIBRARIES(
- streamUpdateTest
- PUBLIC os util common gtest gtest_main stream
-)
+#TARGET_LINK_LIBRARIES(
+# streamUpdateTest
+# PUBLIC os util common gtest gtest_main stream executor
+#)
+
+IF (TD_GRANT)
+ TARGET_LINK_LIBRARIES(streamUpdateTest
+ PUBLIC os util common gtest gtest_main stream executor index grant
+ )
+ELSE ()
+ TARGET_LINK_LIBRARIES(streamUpdateTest
+ PUBLIC os util common gtest gtest_main stream executor index
+ )
+ENDIF()
TARGET_INCLUDE_DIRECTORIES(
streamUpdateTest
diff --git a/source/libs/sync/inc/syncPipeline.h b/source/libs/sync/inc/syncPipeline.h
index 02790732a23e02e21a5708169780e222711754c2..65e2cc22a07134893f171552b55acf22d8050725 100644
--- a/source/libs/sync/inc/syncPipeline.h
+++ b/source/libs/sync/inc/syncPipeline.h
@@ -79,17 +79,16 @@ static FORCE_INLINE int32_t syncLogReplGetNextRetryBackoff(SSyncLogReplMgr* pMgr
SyncTerm syncLogReplGetPrevLogTerm(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index);
-int32_t syncLogReplDoOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
+int32_t syncLogReplStart(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index);
-
int32_t syncLogReplRetryOnNeed(SSyncLogReplMgr* pMgr, SSyncNode* pNode);
int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex index, SyncTerm* pTerm, SRaftId* pDestId,
bool* pBarrier);
int32_t syncLogReplProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg);
-int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg);
-int32_t syncLogReplProcessReplyAsNormal(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg);
+int32_t syncLogReplRecover(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg);
+int32_t syncLogReplContinue(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg);
int32_t syncLogReplProcessHeartbeatReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncHeartbeatReply* pMsg);
diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c
index 8bb72de518cd7ae4f91d0a6db631f7bc34aeeffc..794d80bbfaec7c38eca4b38f78c62022fd6cc7cc 100644
--- a/source/libs/sync/src/syncPipeline.c
+++ b/source/libs/sync/src/syncPipeline.c
@@ -717,7 +717,7 @@ _out:
return ret;
}
-int32_t syncLogReplProcessReplyAsRecovery(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg) {
+int32_t syncLogReplRecover(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg) {
SSyncLogBuffer* pBuf = pNode->pLogBuf;
SRaftId destId = pMsg->srcId;
ASSERT(pMgr->restored == false);
@@ -820,15 +820,15 @@ int32_t syncLogReplProcessReply(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncApp
}
if (pMgr->restored) {
- (void)syncLogReplProcessReplyAsNormal(pMgr, pNode, pMsg);
+ (void)syncLogReplContinue(pMgr, pNode, pMsg);
} else {
- (void)syncLogReplProcessReplyAsRecovery(pMgr, pNode, pMsg);
+ (void)syncLogReplRecover(pMgr, pNode, pMsg);
}
taosThreadMutexUnlock(&pBuf->mutex);
return 0;
}
-int32_t syncLogReplDoOnce(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
+int32_t syncLogReplStart(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
if (pMgr->restored) {
(void)syncLogReplAttempt(pMgr, pNode);
} else {
@@ -931,7 +931,7 @@ int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) {
return 0;
}
-int32_t syncLogReplProcessReplyAsNormal(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg) {
+int32_t syncLogReplContinue(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEntriesReply* pMsg) {
ASSERT(pMgr->restored == true);
if (pMgr->startIndex <= pMsg->lastSendIndex && pMsg->lastSendIndex < pMgr->endIndex) {
if (pMgr->startIndex < pMgr->matchIndex && pMgr->retryBackoff > 0) {
diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c
index 5a3a3bbb709e63d3fb512a6e3bead861df16a5e5..2c7fc70ae9f303b3b7d57e863aa1a453828d93b0 100644
--- a/source/libs/sync/src/syncReplication.c
+++ b/source/libs/sync/src/syncReplication.c
@@ -72,7 +72,7 @@ int32_t syncNodeReplicateWithoutLock(SSyncNode* pNode) {
continue;
}
SSyncLogReplMgr* pMgr = pNode->logReplMgrs[i];
- (void)syncLogReplDoOnce(pMgr, pNode);
+ (void)syncLogReplStart(pMgr, pNode);
}
return 0;
}
diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c
index c29d82bcf394b65beb5dcd94e6d35835e0a7cc60..1223e3756cc0ffc436c9eb5d04b4b178af66f6dc 100644
--- a/source/libs/wal/src/walRead.c
+++ b/source/libs/wal/src/walRead.c
@@ -108,6 +108,14 @@ int32_t walNextValidMsg(SWalReader *pReader) {
int64_t walReaderGetCurrentVer(const SWalReader *pReader) { return pReader->curVersion; }
int64_t walReaderGetValidFirstVer(const SWalReader *pReader) { return walGetFirstVer(pReader->pWal); }
+void walReaderSetSkipToVersion(SWalReader *pReader, int64_t ver) { atomic_store_64(&pReader->skipToVersion, ver); }
+
+// this function is NOT multi-thread safe, and no need to be.
+int64_t walReaderGetSkipToVersion(SWalReader *pReader) {
+ int64_t newVersion = pReader->skipToVersion;
+ pReader->skipToVersion = 0;
+ return newVersion;
+}
void walReaderValidVersionRange(SWalReader *pReader, int64_t *sver, int64_t *ever) {
*sver = walGetFirstVer(pReader->pWal);
@@ -325,7 +333,7 @@ static int32_t walFetchBodyNew(SWalReader *pReader) {
return -1;
}
- wDebug("vgId:%d, index:%" PRId64 " is fetched, cursor advance", pReader->pWal->cfg.vgId, ver);
+ wDebug("vgId:%d, index:%" PRId64 " is fetched, type:%d, cursor advance", pReader->pWal->cfg.vgId, ver, pReader->pHead->head.msgType);
pReader->curVersion = ver + 1;
return 0;
}
diff --git a/source/libs/wal/src/walRef.c b/source/libs/wal/src/walRef.c
index 6aba66192684e262baddf4215d043a875c15f6fc..eb36389f1d22c21c828a1c79bdad5ab1deeedbfc 100644
--- a/source/libs/wal/src/walRef.c
+++ b/source/libs/wal/src/walRef.c
@@ -63,21 +63,22 @@ int32_t walSetRefVer(SWalRef *pRef, int64_t ver) {
return 0;
}
-SWalRef *walRefFirstVer(SWal *pWal, SWalRef *pRef) {
- if (pRef == NULL) {
- pRef = walOpenRef(pWal);
- if (pRef == NULL) {
- return NULL;
- }
- }
+void walRefFirstVer(SWal *pWal, SWalRef *pRef) {
taosThreadMutexLock(&pWal->mutex);
int64_t ver = walGetFirstVer(pWal);
pRef->refVer = ver;
taosThreadMutexUnlock(&pWal->mutex);
wDebug("vgId:%d, wal ref version %" PRId64 " for first", pWal->cfg.vgId, ver);
+}
- return pRef;
+void walRefLastVer(SWal *pWal, SWalRef *pRef) {
+ taosThreadMutexLock(&pWal->mutex);
+ int64_t ver = walGetLastVer(pWal);
+ pRef->refVer = ver;
+
+ taosThreadMutexUnlock(&pWal->mutex);
+ wDebug("vgId:%d, wal ref version %" PRId64 " for last", pWal->cfg.vgId, ver);
}
SWalRef *walRefCommittedVer(SWal *pWal) {
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 31727f753579a1471ee4422fc29fee45d0d03dce..7cf95dcdea31a6ad32e766e0e616d21edeb8d5c3 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -325,6 +325,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MNODE_NOT_CATCH_UP, "Mnode didn't catch th
TAOS_DEFINE_ERROR(TSDB_CODE_MNODE_ALREADY_IS_VOTER, "Mnode already is a leader")
TAOS_DEFINE_ERROR(TSDB_CODE_MNODE_ONLY_TWO_MNODE, "Only two mnodes exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MNODE_NO_NEED_RESTORE, "No need to restore on this dnode")
+TAOS_DEFINE_ERROR(TSDB_CODE_DNODE_ONLY_USE_WHEN_OFFLINE, "Please use this command when the dnode is offline")
// vnode
TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VGROUP_ID, "Vnode is closed or removed")
@@ -381,7 +382,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_QRY_EXCEED_TAGS_LIMIT, "Tag conditon too many
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_READY, "Query not ready")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_HAS_RSP, "Query should response")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_IN_EXEC, "Multiple retrieval of this query")
-TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW, "Too many time window in query")
+TAOS_DEFINE_ERROR(TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW, "Too many groups/time window in query")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_NOT_ENOUGH_BUFFER, "Query buffer limit has reached")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_INCONSISTAN, "File inconsistance in replica")
TAOS_DEFINE_ERROR(TSDB_CODE_QRY_SYS_ERROR, "System error")
@@ -541,7 +542,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TIMELINE_FUNC, "Invalid timeline fu
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_PASSWD, "Invalid password")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_ALTER_TABLE, "Invalid alter table statement")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_CANNOT_DROP_PRIMARY_KEY, "Primary timestamp column cannot be dropped")
-TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_MODIFY_COL, "Only binary/nchar column length could be modified, and the length can only be increased, not decreased")
+TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_MODIFY_COL, "Only binary/nchar/geometry column length could be modified, and the length can only be increased, not decreased")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_TBNAME, "Invalid tbname pseudo column")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_INVALID_FUNCTION_NAME, "Invalid function name")
TAOS_DEFINE_ERROR(TSDB_CODE_PAR_COMMENT_TOO_LONG, "Comment too long")
diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c
index 711760e5499abff27317395669b485c9fd74c9bb..70588887a00040d0b0e679846ab8256cef6bbe2a 100644
--- a/source/util/src/tlog.c
+++ b/source/util/src/tlog.c
@@ -253,15 +253,15 @@ static void taosKeepOldLog(char *oldName) {
(void)taosRenameFile(oldName, fileName);
- if (tsLogKeepDays < 0) {
- char compressFileName[LOG_FILE_NAME_LEN + 20];
- snprintf(compressFileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64 ".gz", tsLogObj.logName, fileSec);
- if (taosCompressFile(fileName, compressFileName) == 0) {
- (void)taosRemoveFile(fileName);
- }
+ char compressFileName[LOG_FILE_NAME_LEN + 20];
+ snprintf(compressFileName, LOG_FILE_NAME_LEN + 20, "%s.%" PRId64 ".gz", tsLogObj.logName, fileSec);
+ if (taosCompressFile(fileName, compressFileName) == 0) {
+ (void)taosRemoveFile(fileName);
}
- taosRemoveOldFiles(tsLogDir, TABS(tsLogKeepDays));
+ if (tsLogKeepDays > 0) {
+ taosRemoveOldFiles(tsLogDir, tsLogKeepDays);
+ }
}
static void *taosThreadToOpenNewFile(void *param) {
diff --git a/source/util/src/tpagedbuf.c b/source/util/src/tpagedbuf.c
index fa8b5d33b7e8f45fe37d658f1ba336d21a43e5b4..8f3a80ded42894e86763631198e220bb5f14410b 100644
--- a/source/util/src/tpagedbuf.c
+++ b/source/util/src/tpagedbuf.c
@@ -166,7 +166,7 @@ static char* doFlushBufPage(SDiskbasedBuf* pBuf, SPageInfo* pg) {
char* t = NULL;
if ((!HAS_DATA_IN_DISK(pg)) || pg->dirty) {
void* payload = GET_PAYLOAD_DATA(pg);
- t = doCompressData(payload, pBuf->pageSize, &size, pBuf);
+ t = doCompressData(payload, pBuf->pageSize + sizeof(SFilePage), &size, pBuf);
if (size < 0) {
uError("failed to compress data when flushing data to disk, %s", pBuf->id);
terrno = TSDB_CODE_INVALID_PARA;
@@ -482,6 +482,7 @@ void* getBufPage(SDiskbasedBuf* pBuf, int32_t id) {
SPageInfo** pInfo = (SPageInfo**)((*pi)->pn->data);
if (*pInfo != *pi) {
+ terrno = TSDB_CODE_APP_ERROR;
uError("inconsistently data in paged buffer, pInfo:%p, pi:%p, %s", *pInfo, *pi, pBuf->id);
return NULL;
}
diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt
index 2e307771b7ed6c5473c4edc0b1947de199f82f10..0bf06e6f44ece7587573f7f0131f62e1c46df62f 100644
--- a/source/util/test/CMakeLists.txt
+++ b/source/util/test/CMakeLists.txt
@@ -75,4 +75,12 @@ target_link_libraries(rbtreeTest os util gtest_main)
add_test(
NAME rbtreeTest
COMMAND rbtreeTest
-)
\ No newline at end of file
+)
+
+# pageBufferTest
+add_executable(pageBufferTest "pageBufferTest.cpp")
+target_link_libraries(pageBufferTest os util gtest_main)
+add_test(
+ NAME pageBufferTest
+ COMMAND pageBufferTest
+)
diff --git a/source/util/test/pageBufferTest.cpp b/source/util/test/pageBufferTest.cpp
index 00ed80493073c74822fed86d3cc6ef970f810bcb..50d3656ccd66f49e4875ba775ead48c4c6559c5e 100644
--- a/source/util/test/pageBufferTest.cpp
+++ b/source/util/test/pageBufferTest.cpp
@@ -157,6 +157,68 @@ void recyclePageTest() {
destroyDiskbasedBuf(pBuf);
}
+
+int saveDataToPage(SFilePage* pPg, const char* data, uint32_t len) {
+ memcpy(pPg->data + pPg->num, data, len);
+ pPg->num += len;
+ setBufPageDirty(pPg, true);
+ return 0;
+}
+
+bool checkBufVarData(SFilePage* pPg, const char* varData, uint32_t varLen) {
+ const char* start = pPg->data + sizeof(SFilePage);
+ for (uint32_t i = 0; i < (pPg->num - sizeof(SFilePage)) / varLen; ++i) {
+ if (0 != strncmp(start + 6 * i + 3, varData, varLen - 3)) {
+ using namespace std;
+ cout << "pos: " << sizeof(SFilePage) + 6 * i + 3 << " should be " << varData << " but is: " << start + 6 * i + 3
+ << endl;
+ return false;
+ }
+ }
+ return true;
+}
+
+// SPageInfo.pData: | sizeof(void*) 8 bytes | sizeof(SFilePage) 4 bytes| 4096 bytes |
+// ^
+// |
+// SFilePage: flush to disk from here
+void testFlushAndReadBackBuffer() {
+ SDiskbasedBuf* pBuf = NULL;
+ uint32_t totalLen = 4096;
+ auto code = createDiskbasedBuf(&pBuf, totalLen, totalLen * 2, "1", TD_TMP_DIR_PATH);
+ int32_t pageId = -1;
+ auto* pPg = (SFilePage*)getNewBufPage(pBuf, &pageId);
+ ASSERT_TRUE(pPg != nullptr);
+ pPg->num = sizeof(SFilePage);
+
+ // save data into page
+ uint32_t len = 6; // sizeof(SFilePage) + 6 * 682 = 4096
+ // nullbitmap(1) + len(2) + AA\0(3)
+ char* rowData = (char*)taosMemoryCalloc(1, len);
+ *(uint16_t*)(rowData + 2) = (uint16_t)2;
+ rowData[3] = 'A';
+ rowData[4] = 'A';
+
+ while (pPg->num + len <= getBufPageSize(pBuf)) {
+ saveDataToPage(pPg, rowData, len);
+ }
+ ASSERT_EQ(pPg->num, totalLen);
+ ASSERT_TRUE(checkBufVarData(pPg, rowData + 3, len));
+ releaseBufPage(pBuf, pPg);
+
+ // flush to disk
+ int32_t newPgId = -1;
+ pPg = (SFilePage*)getNewBufPage(pBuf, &newPgId);
+ releaseBufPage(pBuf, pPg);
+ pPg = (SFilePage*)getNewBufPage(pBuf, &newPgId);
+ releaseBufPage(pBuf, pPg);
+
+ // reload it from disk
+ pPg = (SFilePage*)getBufPage(pBuf, pageId);
+ ASSERT_TRUE(checkBufVarData(pPg, rowData + 3, len));
+ destroyDiskbasedBuf(pBuf);
+}
+
} // namespace
TEST(testCase, resultBufferTest) {
@@ -164,6 +226,7 @@ TEST(testCase, resultBufferTest) {
simpleTest();
writeDownTest();
recyclePageTest();
+ testFlushAndReadBackBuffer();
}
-#pragma GCC diagnostic pop
\ No newline at end of file
+#pragma GCC diagnostic pop
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index e0d94c0c55b082e7e82693052dd766cdd209e522..a81209b835bc344baac88104257ae9250e1321ab 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -497,6 +497,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/db.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqError.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/schema.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/stbFilterWhere.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/stbFilter.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqCheckData.py
,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqCheckData1.py
@@ -563,7 +564,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py
-#,,n,system-test,python3 ./test.py -f 0-others/compatibility.py
+,,n,system-test,python3 ./test.py -f 0-others/compatibility.py
,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py
,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py
,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py
@@ -571,7 +572,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_telnet_line_taosc_insert.py
-#,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/opentsdb_json_taosc_insert.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_stmt_muti_insert_query.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/test_stmt_set_tbname_tag.py
,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_stable.py
@@ -742,6 +743,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sample.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sin.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sin.py -R
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/smaBasic.py -N 3
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/smaTest.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/smaTest.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/sml.py
@@ -818,6 +820,8 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/function_diff.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/tagFilter.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/projectionDesc.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/ts_3398.py -N 3 -n 3
+
,,n,system-test,python3 ./test.py -f 2-query/queryQnode.py
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode1mnode.py
,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5
@@ -1089,7 +1093,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/timetruncate.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/diff.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Timediff.py -Q 4
-#,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py -Q 4
+,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/json_tag.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/top.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/bottom.py -Q 4
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/percentile.py -Q 4
@@ -1274,6 +1278,7 @@
,,y,script,./test.sh -f tsim/parser/columnValue_tinyint.sim
,,y,script,./test.sh -f tsim/parser/columnValue_unsign.sim
,,y,script,./test.sh -f tsim/parser/condition.sim
+,,y,script,./test.sh -f tsim/parser/condition_scl.sim
,,y,script,./test.sh -f tsim/parser/constCol.sim
,,y,script,./test.sh -f tsim/parser/create_db.sim
,,y,script,./test.sh -f tsim/parser/create_mt.sim
@@ -1350,13 +1355,16 @@
,,y,script,./test.sh -f tsim/query/multi_order_by.sim
,,y,script,./test.sh -f tsim/query/sys_tbname.sim
,,y,script,./test.sh -f tsim/query/groupby.sim
+,,y,script,./test.sh -f tsim/query/groupby_distinct.sim
,,y,script,./test.sh -f tsim/query/event.sim
,,y,script,./test.sh -f tsim/query/forceFill.sim
,,y,script,./test.sh -f tsim/query/emptyTsRange.sim
+,,y,script,./test.sh -f tsim/query/emptyTsRange_scl.sim
,,y,script,./test.sh -f tsim/query/partitionby.sim
,,y,script,./test.sh -f tsim/query/tableCount.sim
,,y,script,./test.sh -f tsim/query/tag_scan.sim
,,y,script,./test.sh -f tsim/query/nullColSma.sim
+,,y,script,./test.sh -f tsim/query/bug3398.sim
,,y,script,./test.sh -f tsim/qnode/basic1.sim
,,y,script,./test.sh -f tsim/snode/basic1.sim
,,y,script,./test.sh -f tsim/mnode/basic1.sim
diff --git a/tests/pytest/crash_gen/crash_gen_main.py b/tests/pytest/crash_gen/crash_gen_main.py
index ec588659e93ffc30d21355b970e595d08f04a9f7..5024f1e2fe25b2ac26e6434da3218c304661f89c 100755
--- a/tests/pytest/crash_gen/crash_gen_main.py
+++ b/tests/pytest/crash_gen/crash_gen_main.py
@@ -1722,12 +1722,14 @@ class TaskCreateDb(StateTransitionTask):
vg_nums = random.randint(1, 8)
cache_model = Dice.choice(['none', 'last_row', 'last_value', 'both'])
buffer = random.randint(3, 128)
+ walRetentionPeriod = random.randint(1, 10000)
dbName = self._db.getName()
- self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} ".format(dbName, repStr,
+ self.execWtSql(wt, "create database {} {} {} vgroups {} cachemodel '{}' buffer {} wal_retention_period {} ".format(dbName, repStr,
updatePostfix,
vg_nums,
cache_model,
- buffer))
+ buffer,
+ walRetentionPeriod))
if dbName == "db_0" and Config.getConfig().use_shadow_db:
self.execWtSql(wt, "create database {} {} {} ".format("db_s", repStr, updatePostfix))
@@ -2041,18 +2043,19 @@ class TdSuperTable:
for topic in current_topic_list:
topic_list.append(topic)
- consumer.subscribe(topic_list)
-
- # consumer with random work life
- time_start = time.time()
- while 1:
- res = consumer.poll(1)
- consumer.commit(res)
- if time.time() - time_start > random.randint(5, 50):
- break
try:
+ consumer.subscribe(topic_list)
+
+ # consumer with random work life
+ time_start = time.time()
+ while 1:
+ res = consumer.poll(1)
+ consumer.commit(res)
+ if time.time() - time_start > random.randint(5, 50):
+ break
consumer.unsubscribe()
- except TmqError as e:
+ consumer.close()
+ except TmqError as err: # topic deleted by other threads
pass
return
diff --git a/tests/script/api/batchprepare.c b/tests/script/api/batchprepare.c
index a8feefec1e984414a4ade8d09ef887de917a4272..604d6ade8944570d44331f9b3ecc9e722a096ee7 100644
--- a/tests/script/api/batchprepare.c
+++ b/tests/script/api/batchprepare.c
@@ -16,8 +16,8 @@
int32_t shortColList[] = {TSDB_DATA_TYPE_TIMESTAMP, TSDB_DATA_TYPE_INT};
int32_t fullColList[] = {TSDB_DATA_TYPE_TIMESTAMP, TSDB_DATA_TYPE_BOOL, TSDB_DATA_TYPE_TINYINT, TSDB_DATA_TYPE_UTINYINT, TSDB_DATA_TYPE_SMALLINT, TSDB_DATA_TYPE_USMALLINT, TSDB_DATA_TYPE_INT, TSDB_DATA_TYPE_UINT, TSDB_DATA_TYPE_BIGINT, TSDB_DATA_TYPE_UBIGINT, TSDB_DATA_TYPE_FLOAT, TSDB_DATA_TYPE_DOUBLE, TSDB_DATA_TYPE_BINARY, TSDB_DATA_TYPE_NCHAR};
-int32_t bindColTypeList[] = {TSDB_DATA_TYPE_TIMESTAMP, TSDB_DATA_TYPE_INT};
-int32_t optrIdxList[] = {0, 7};
+int32_t bindColTypeList[] = {TSDB_DATA_TYPE_TIMESTAMP, TSDB_DATA_TYPE_NCHAR};
+int32_t optrIdxList[] = {5, 11};
typedef struct {
char* oper;
@@ -123,6 +123,7 @@ int insertAUTOTest3(TAOS_STMT *stmt, TAOS *taos);
int queryColumnTest(TAOS_STMT *stmt, TAOS *taos);
int queryMiscTest(TAOS_STMT *stmt, TAOS *taos);
int insertNonExistsTb(TAOS_STMT *stmt, TAOS *taos);
+int insertVarLenErr(TAOS_STMT *stmt, TAOS *taos);
enum {
TTYPE_INSERT = 1,
@@ -190,6 +191,7 @@ CaseCfg gCase[] = {
{"query:SUBT-MISC", tListLen(fullColList), fullColList, TTYPE_QUERY, 0, false, false, queryMiscTest, 10, 10, 1, 3, 0, 0, 1, 2},
{"query:NG-TBNEXISTS",tListLen(fullColList), fullColList, TTYPE_INSERT_NG,0, false, false, insertNonExistsTb, 10, 10, 1, 3, 0, 0, 1, -1},
+ {"query:NG-VARLENERR",tListLen(fullColList), fullColList, TTYPE_INSERT_NG,0, false, true, insertVarLenErr, 10, 10, 1, 3, 0, 0, 1, -1},
// {"query:SUBT-COLUMN", tListLen(fullColList), fullColList, TTYPE_QUERY, 0, false, false, queryColumnTest, 1, 10, 1, 1, 0, 0, 1, 2},
// {"query:SUBT-MISC", tListLen(fullColList), fullColList, TTYPE_QUERY, 0, false, false, queryMiscTest, 2, 10, 1, 1, 0, 0, 1, 2},
@@ -319,7 +321,7 @@ CaseCtrl gCaseCtrl = { // query case with specified col&oper
#if 0
CaseCtrl gCaseCtrl = { // query case with specified col&oper
- .bindNullNum = 1,
+ .bindNullNum = 0,
.printCreateTblSql = true,
.printQuerySql = true,
.printStmtSql = true,
@@ -329,18 +331,19 @@ CaseCtrl gCaseCtrl = { // query case with specified col&oper
.bindTagNum = 0,
.bindRowNum = 0,
.bindColTypeNum = 0,
- .bindColTypeList = NULL,
+ .bindColTypeList = bindColTypeList,
.optrIdxListNum = 0,
- .optrIdxList = NULL,
+ .optrIdxList = optrIdxList,
.checkParamNum = false,
.printRes = true,
.runTimes = 0,
.caseRunIdx = -1,
- //.optrIdxListNum = tListLen(optrIdxList),
- //.optrIdxList = optrIdxList,
- //.bindColTypeNum = tListLen(bindColTypeList),
- //.bindColTypeList = bindColTypeList,
- .caseIdx = 8,
+ .optrIdxListNum = tListLen(optrIdxList),
+ .optrIdxList = optrIdxList,
+ .bindColTypeNum = tListLen(bindColTypeList),
+ .bindColTypeList = bindColTypeList,
+ .caseRunIdx = -1,
+ .caseIdx = 24,
.caseNum = 1,
.caseRunNum = 1,
};
@@ -1450,14 +1453,17 @@ void bpShowBindParam(TAOS_MULTI_BIND *bind, int32_t num) {
}
}
-int32_t bpBindParam(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
+int32_t bpBindParam(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind, bool expectFail) {
static int32_t n = 0;
- bpCheckColFields(stmt, bind);
+ if (!expectFail) {
+ bpCheckColFields(stmt, bind);
+ }
if (gCurCase->bindRowNum > 1) {
if (0 == (n++%2)) {
if (taos_stmt_bind_param_batch(stmt, bind)) {
+ if (expectFail) return 0;
printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt));
bpShowBindParam(bind, gCurCase->bindColNum);
exit(1);
@@ -1465,6 +1471,7 @@ int32_t bpBindParam(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
} else {
for (int32_t i = 0; i < gCurCase->bindColNum; ++i) {
if (taos_stmt_bind_single_param_batch(stmt, bind+i, i)) {
+ if (expectFail) continue;
printf("!!!taos_stmt_bind_single_param_batch %d error:%s\n", taos_stmt_errstr(stmt), i);
bpShowBindParam(bind, gCurCase->bindColNum);
exit(1);
@@ -1474,12 +1481,14 @@ int32_t bpBindParam(TAOS_STMT *stmt, TAOS_MULTI_BIND *bind) {
} else {
if (0 == (n++%2)) {
if (taos_stmt_bind_param_batch(stmt, bind)) {
+ if (expectFail) return 0;
printf("!!!taos_stmt_bind_param_batch error:%s\n", taos_stmt_errstr(stmt));
bpShowBindParam(bind, gCurCase->bindColNum);
exit(1);
}
} else {
if (taos_stmt_bind_param(stmt, bind)) {
+ if (expectFail) return 0;
printf("!!!taos_stmt_bind_param error:%s\n", taos_stmt_errstr(stmt));
bpShowBindParam(bind, gCurCase->bindColNum);
exit(1);
@@ -1542,7 +1551,7 @@ int insertMBSETest1(TAOS_STMT *stmt, TAOS *taos) {
}
for (int32_t b = 0; b bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -1594,7 +1603,7 @@ int insertMBSETest2(TAOS_STMT *stmt, TAOS *taos) {
}
}
- if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -1652,7 +1661,7 @@ int insertMBMETest1(TAOS_STMT *stmt, TAOS *taos) {
}
for (int32_t b = 0; b bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -1702,7 +1711,7 @@ int insertMBMETest2(TAOS_STMT *stmt, TAOS *taos) {
}
for (int32_t b = 0; b bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -1770,7 +1779,7 @@ int insertMBMETest3(TAOS_STMT *stmt, TAOS *taos) {
}
}
- if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -1822,7 +1831,7 @@ int insertMBMETest4(TAOS_STMT *stmt, TAOS *taos) {
}
}
- if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -1883,7 +1892,7 @@ int insertMPMETest1(TAOS_STMT *stmt, TAOS *taos) {
}
for (int32_t b = 0; b bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -1949,7 +1958,7 @@ int insertAUTOTest1(TAOS_STMT *stmt, TAOS *taos) {
}
for (int32_t b = 0; b bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -2016,7 +2025,7 @@ int insertAUTOTest2(TAOS_STMT *stmt, TAOS *taos) {
if (gCaseCtrl.checkParamNum) {
bpCheckParamNum(stmt);
}
- if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -2076,7 +2085,7 @@ int insertAUTOTest3(TAOS_STMT *stmt, TAOS *taos) {
bpCheckParamNum(stmt);
}
- if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + t*bindTimes*gCurCase->bindColNum + b*gCurCase->bindColNum, false)) {
exit(1);
}
@@ -2130,7 +2139,7 @@ int queryColumnTest(TAOS_STMT *stmt, TAOS *taos) {
bpCheckParamNum(stmt);
}
- if (bpBindParam(stmt, data.pBind + n * gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + n * gCurCase->bindColNum, false)) {
exit(1);
}
@@ -2178,7 +2187,7 @@ int queryMiscTest(TAOS_STMT *stmt, TAOS *taos) {
bpCheckParamNum(stmt);
}
- if (bpBindParam(stmt, data.pBind + n * gCurCase->bindColNum)) {
+ if (bpBindParam(stmt, data.pBind + n * gCurCase->bindColNum, false)) {
exit(1);
}
@@ -2245,6 +2254,42 @@ int insertNonExistsTb(TAOS_STMT *stmt, TAOS *taos) {
return 0;
}
+void bpAddWrongVarBuffLen(TAOS_MULTI_BIND* pBind) {
+ for (int32_t i = 0; i < gCurCase->bindColNum; ++i) {
+ if (pBind[i].buffer_type == TSDB_DATA_TYPE_BINARY || pBind[i].buffer_type == TSDB_DATA_TYPE_NCHAR) {
+ *pBind[i].length += 100;
+ }
+ }
+}
+
+int insertVarLenErr(TAOS_STMT *stmt, TAOS *taos) {
+ BindData data = {0};
+ prepareInsertData(&data);
+
+ int code = taos_stmt_prepare(stmt, data.sql, 0);
+ if (code != 0){
+ printf("!!!failed to execute taos_stmt_prepare. error:%s\n", taos_stmt_errstr(stmt));
+ exit(1);
+ }
+
+ bpCheckIsInsert(stmt, 1);
+
+ code = bpSetTableNameTags(&data, 0, "t0", stmt);
+ if (code != 0){
+ printf("!!!taos_stmt_set_tbname error:%s\n", taos_stmt_errstr(stmt));
+ exit(1);
+ }
+
+ bpAddWrongVarBuffLen(data.pBind);
+
+ if (bpBindParam(stmt, data.pBind, true)) {
+ exit(1);
+ }
+
+ destroyData(&data);
+
+ return 0;
+}
int errorSQLTest1(TAOS_STMT *stmt, TAOS *taos) {
diff --git a/tests/script/sh/pycumsum.py b/tests/script/sh/pycumsum.py
new file mode 100644
index 0000000000000000000000000000000000000000..27d575aec4f997874172d3823295cd3621375b3f
--- /dev/null
+++ b/tests/script/sh/pycumsum.py
@@ -0,0 +1,29 @@
+import pickle
+import numpy as np
+
+def init():
+ pass
+
+def destroy():
+ pass
+
+def start():
+ return pickle.dumps(0.0)
+
+def finish(buf):
+ return pickle.loads(buf)
+
+def reduce(datablock, buf):
+ (rows, cols) = datablock.shape()
+ state = pickle.loads(buf)
+ row = []
+ for i in range(rows):
+ for j in range(cols):
+ cell = datablock.data(i, j)
+ if cell is not None:
+ row.append(datablock.data(i, j))
+ if len(row) > 1:
+ new_state = np.cumsum(row)[-1]
+ else:
+ new_state = state
+ return pickle.dumps(new_state)
diff --git a/tests/script/tsim/compute/last_row.sim b/tests/script/tsim/compute/last_row.sim
index 2e060dc28526dba7f0a692046b9d987be861cebb..8e62fbffb5101fc2c12c885565a2506bcb84a350 100644
--- a/tests/script/tsim/compute/last_row.sim
+++ b/tests/script/tsim/compute/last_row.sim
@@ -213,4 +213,54 @@ if $rows != 2 then
return -1
endi
+print =======================> regresss bug in last_row query
+sql drop database if exists db;
+sql create database if not exists db vgroups 1 cachemodel 'both';
+sql create table db.stb (ts timestamp, c0 bigint) tags(t1 int);
+sql insert into db.stb_0 using db.stb tags(1) values ('2023-11-23 19:06:40.000', 491173569);
+sql insert into db.stb_2 using db.stb tags(3) values ('2023-11-25 19:30:00.000', 2080726142);
+sql insert into db.stb_3 using db.stb tags(4) values ('2023-11-26 06:48:20.000', 1907405128);
+sql insert into db.stb_4 using db.stb tags(5) values ('2023-11-24 22:56:40.000', 220783803);
+
+sql create table db.stb_1 using db.stb tags(2);
+sql insert into db.stb_1 (ts) values('2023-11-26 13:11:40.000');
+sql insert into db.stb_1 (ts, c0) values('2023-11-26 13:11:39.000', 11);
+
+sql select tbname,ts,last_row(c0) from db.stb;
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != @stb_1@ then
+ return -1
+endi
+
+if $data01 != @23-11-26 13:11:40.000@ then
+ return -1
+endi
+
+if $data02 != NULL then
+ return -1
+endi
+
+sql alter database db cachemodel 'none';
+sql reset query cache;
+sql select tbname,last_row(c0, ts) from db.stb;
+
+if $rows != 1 then
+ return -1
+endi
+
+if $data00 != @stb_1@ then
+ return -1
+endi
+
+if $data02 != @23-11-26 13:11:40.000@ then
+ return -1
+endi
+
+if $data01 != NULL then
+ return -1
+endi
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/dnode/drop_dnode_has_mnode.sim b/tests/script/tsim/dnode/drop_dnode_has_mnode.sim
index 054f9786072f002ff8a7cce05104e879a9f3e552..8a7436772668bc4846180ccedd34e3ad31550119 100644
--- a/tests/script/tsim/dnode/drop_dnode_has_mnode.sim
+++ b/tests/script/tsim/dnode/drop_dnode_has_mnode.sim
@@ -35,7 +35,7 @@ endi
print =============== step2 drop dnode 3
sql_error drop dnode 1
-sql drop dnode 3
+sql drop dnode 3 force
sql select * from information_schema.ins_dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
diff --git a/tests/script/tsim/dnode/offline_reason.sim b/tests/script/tsim/dnode/offline_reason.sim
index 8c4d8b47f765f22f07e8f5114797c94dc89c67ca..23f015392b943f56d1c93bceb57d0700ee9e945c 100644
--- a/tests/script/tsim/dnode/offline_reason.sim
+++ b/tests/script/tsim/dnode/offline_reason.sim
@@ -57,7 +57,7 @@ if $data(2)[7] != @status msg timeout@ then
endi
print ========== step4
-sql drop dnode 2
+sql drop dnode 2 force
sql select * from information_schema.ins_dnodes
if $rows != 1 then
return -1
diff --git a/tests/script/tsim/parser/condition_scl.sim b/tests/script/tsim/parser/condition_scl.sim
new file mode 100644
index 0000000000000000000000000000000000000000..f377988006d8136f95c6ea458eec3e19e9df4ba0
--- /dev/null
+++ b/tests/script/tsim/parser/condition_scl.sim
@@ -0,0 +1,136 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c filterScalarMode -v 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+sql drop database if exists cdb
+sql create database if not exists cdb
+sql use cdb
+sql create table stb1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double)
+sql create table tb1 using stb1 tags(1,'1',1.0)
+sql create table tb2 using stb1 tags(2,'2',2.0)
+sql create table tb3 using stb1 tags(3,'3',3.0)
+sql create table tb4 using stb1 tags(4,'4',4.0)
+sql create table tb5 using stb1 tags(5,'5',5.0)
+sql create table tb6 using stb1 tags(6,'6',6.0)
+
+sql insert into tb1 values ('2021-05-05 18:19:00',1,1.0,1,1,1,1.0,true ,'1','1')
+sql insert into tb1 values ('2021-05-05 18:19:01',2,2.0,2,2,2,2.0,true ,'2','2')
+sql insert into tb1 values ('2021-05-05 18:19:02',3,3.0,3,3,3,3.0,false,'3','3')
+sql insert into tb1 values ('2021-05-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4')
+sql insert into tb1 values ('2021-05-05 18:19:04',11,11.0,11,11,11,11.0,true ,'11','11')
+sql insert into tb1 values ('2021-05-05 18:19:05',12,12.0,12,12,12,12.0,true ,'12','12')
+sql insert into tb1 values ('2021-05-05 18:19:06',13,13.0,13,13,13,13.0,false,'13','13')
+sql insert into tb1 values ('2021-05-05 18:19:07',14,14.0,14,14,14,14.0,false,'14','14')
+sql insert into tb2 values ('2021-05-05 18:19:08',21,21.0,21,21,21,21.0,true ,'21','21')
+sql insert into tb2 values ('2021-05-05 18:19:09',22,22.0,22,22,22,22.0,true ,'22','22')
+sql insert into tb2 values ('2021-05-05 18:19:10',23,23.0,23,23,23,23.0,false,'23','23')
+sql insert into tb2 values ('2021-05-05 18:19:11',24,24.0,24,24,24,24.0,false,'24','24')
+sql insert into tb3 values ('2021-05-05 18:19:12',31,31.0,31,31,31,31.0,true ,'31','31')
+sql insert into tb3 values ('2021-05-05 18:19:13',32,32.0,32,32,32,32.0,true ,'32','32')
+sql insert into tb3 values ('2021-05-05 18:19:14',33,33.0,33,33,33,33.0,false,'33','33')
+sql insert into tb3 values ('2021-05-05 18:19:15',34,34.0,34,34,34,34.0,false,'34','34')
+sql insert into tb4 values ('2021-05-05 18:19:16',41,41.0,41,41,41,41.0,true ,'41','41')
+sql insert into tb4 values ('2021-05-05 18:19:17',42,42.0,42,42,42,42.0,true ,'42','42')
+sql insert into tb4 values ('2021-05-05 18:19:18',43,43.0,43,43,43,43.0,false,'43','43')
+sql insert into tb4 values ('2021-05-05 18:19:19',44,44.0,44,44,44,44.0,false,'44','44')
+sql insert into tb5 values ('2021-05-05 18:19:20',51,51.0,51,51,51,51.0,true ,'51','51')
+sql insert into tb5 values ('2021-05-05 18:19:21',52,52.0,52,52,52,52.0,true ,'52','52')
+sql insert into tb5 values ('2021-05-05 18:19:22',53,53.0,53,53,53,53.0,false,'53','53')
+sql insert into tb5 values ('2021-05-05 18:19:23',54,54.0,54,54,54,54.0,false,'54','54')
+sql insert into tb6 values ('2021-05-05 18:19:24',61,61.0,61,61,61,61.0,true ,'61','61')
+sql insert into tb6 values ('2021-05-05 18:19:25',62,62.0,62,62,62,62.0,true ,'62','62')
+sql insert into tb6 values ('2021-05-05 18:19:26',63,63.0,63,63,63,63.0,false,'63','63')
+sql insert into tb6 values ('2021-05-05 18:19:27',64,64.0,64,64,64,64.0,false,'64','64')
+sql insert into tb6 values ('2021-05-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
+
+sql create table stb2 (ts timestamp, u1 int unsigned, u2 bigint unsigned, u3 smallint unsigned, u4 tinyint unsigned, ts2 timestamp) TAGS(t1 int unsigned, t2 bigint unsigned, t3 timestamp, t4 int)
+sql create table tb2_1 using stb2 tags(1,1,'2021-05-05 18:38:38',1)
+sql create table tb2_2 using stb2 tags(2,2,'2021-05-05 18:58:58',2)
+
+sql insert into tb2_1 values ('2021-05-05 18:19:00',1,2,3,4,'2021-05-05 18:28:01')
+sql insert into tb2_1 values ('2021-05-05 18:19:01',5,6,7,8,'2021-05-05 18:28:02')
+sql insert into tb2_1 values ('2021-05-05 18:19:02',2,2,3,4,'2021-05-05 18:28:03')
+sql insert into tb2_1 values ('2021-05-05 18:19:03',5,6,7,8,'2021-05-05 18:28:04')
+sql insert into tb2_1 values ('2021-05-05 18:19:04',3,2,3,4,'2021-05-05 18:28:05')
+sql insert into tb2_1 values ('2021-05-05 18:19:05',5,6,7,8,'2021-05-05 18:28:06')
+sql insert into tb2_1 values ('2021-05-05 18:19:06',4,2,3,4,'2021-05-05 18:28:07')
+sql insert into tb2_1 values ('2021-05-05 18:19:07',5,6,7,8,'2021-05-05 18:28:08')
+sql insert into tb2_1 values ('2021-05-05 18:19:08',5,2,3,4,'2021-05-05 18:28:09')
+sql insert into tb2_1 values ('2021-05-05 18:19:09',5,6,7,8,'2021-05-05 18:28:10')
+sql insert into tb2_1 values ('2021-05-05 18:19:10',6,2,3,4,'2021-05-05 18:28:11')
+sql insert into tb2_2 values ('2021-05-05 18:19:11',5,6,7,8,'2021-05-05 18:28:12')
+sql insert into tb2_2 values ('2021-05-05 18:19:12',7,2,3,4,'2021-05-05 18:28:13')
+sql insert into tb2_2 values ('2021-05-05 18:19:13',5,6,7,8,'2021-05-05 18:28:14')
+sql insert into tb2_2 values ('2021-05-05 18:19:14',8,2,3,4,'2021-05-05 18:28:15')
+sql insert into tb2_2 values ('2021-05-05 18:19:15',5,6,7,8,'2021-05-05 18:28:16')
+
+sql create table stb3 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double)
+sql create table tb3_1 using stb3 tags(1,'1',1.0)
+sql create table tb3_2 using stb3 tags(2,'2',2.0)
+
+sql insert into tb3_1 values ('2021-01-05 18:19:00',1,1.0,1,1,1,1.0,true ,'1','1')
+sql insert into tb3_1 values ('2021-02-05 18:19:01',2,2.0,2,2,2,2.0,true ,'2','2')
+sql insert into tb3_1 values ('2021-03-05 18:19:02',3,3.0,3,3,3,3.0,false,'3','3')
+sql insert into tb3_1 values ('2021-04-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4')
+sql insert into tb3_1 values ('2021-05-05 18:19:28',5,NULL,5,NULL,5,NULL,true,NULL,'5')
+sql insert into tb3_1 values ('2021-06-05 18:19:28',NULL,6.0,NULL,6,NULL,6.0,NULL,'6',NULL)
+sql insert into tb3_1 values ('2021-07-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
+sql insert into tb3_2 values ('2021-01-06 18:19:00',11,11.0,11,11,11,11.0,true ,'11','11')
+sql insert into tb3_2 values ('2021-02-06 18:19:01',12,12.0,12,12,12,12.0,true ,'12','12')
+sql insert into tb3_2 values ('2021-03-06 18:19:02',13,13.0,13,13,13,13.0,false,'13','13')
+sql insert into tb3_2 values ('2021-04-06 18:19:03',14,14.0,14,14,14,14.0,false,'14','14')
+sql insert into tb3_2 values ('2021-05-06 18:19:28',15,NULL,15,NULL,15,NULL,true,NULL,'15')
+sql insert into tb3_2 values ('2021-06-06 18:19:28',NULL,16.0,NULL,16,NULL,16.0,NULL,'16',NULL)
+sql insert into tb3_2 values ('2021-07-06 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)
+
+sql create table stb4 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9),c10 binary(16300)) TAGS(t1 int, t2 binary(10), t3 double)
+sql create table tb4_0 using stb4 tags(0,'0',0.0)
+sql create table tb4_1 using stb4 tags(1,'1',1.0)
+sql create table tb4_2 using stb4 tags(2,'2',2.0)
+sql create table tb4_3 using stb4 tags(3,'3',3.0)
+sql create table tb4_4 using stb4 tags(4,'4',4.0)
+
+$i = 0
+$ts0 = 1625850000000
+$blockNum = 5
+$delta = 0
+$tbname0 = tb4_
+$a = 0
+$b = 200
+$c = 400
+while $i < $blockNum
+ $x = 0
+ $rowNum = 1200
+ while $x < $rowNum
+ $ts = $ts0 + $x
+ $a = $a + 1
+ $b = $b + 1
+ $c = $c + 1
+ $d = $x / 10
+ $tin = $rowNum
+ $binary = 'binary . $c
+ $binary = $binary . '
+ $nchar = 'nchar . $c
+ $nchar = $nchar . '
+ $tbname = 'tb4_ . $i
+ $tbname = $tbname . '
+ sql insert into $tbname values ( $ts , $a , $b , $c , $d , $d , $c , true, $binary , $nchar , $binary )
+ $x = $x + 1
+ endw
+
+ $i = $i + 1
+ $ts0 = $ts0 + 259200000
+endw
+
+run tsim/parser/condition_query.sim
+
+print ================== restart server to commit data into disk
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode1 -s start
+print ================== server restart completed
+sql connect
+
+run tsim/parser/condition_query.sim
+
diff --git a/tests/script/tsim/parser/function.sim b/tests/script/tsim/parser/function.sim
index 7f69aa2d02adeaa2cd64eafbefaa2d2df3a79184..120c4b8148d9eef6e423f0ff0a06ac5af5e88559 100644
--- a/tests/script/tsim/parser/function.sim
+++ b/tests/script/tsim/parser/function.sim
@@ -954,11 +954,14 @@ endi
print =========================>TD-5190
sql select _wstart, stddev(f1) from st1 where ts>'2021-07-01 1:1:1' and ts<'2021-07-30 00:00:00' interval(1d) fill(NULL);
if $rows != 29 then
+ print expect 29, actual: $rows
return -1
endi
+
if $data00 != @21-07-01 00:00:00.000@ then
return -1
endi
+
if $data01 != NULL then
return -1
endi
diff --git a/tests/script/tsim/parser/join_manyblocks.sim b/tests/script/tsim/parser/join_manyblocks.sim
index a40a75f50c54cb9ffdfed24edd2a7e03e0c13e5c..7fd0df21b31b598b54dbeaa3e7780a1e9c0ea010 100644
--- a/tests/script/tsim/parser/join_manyblocks.sim
+++ b/tests/script/tsim/parser/join_manyblocks.sim
@@ -6,8 +6,8 @@ sql connect
$dbPrefix = join_m_db
$tbPrefix = join_tb
$mtPrefix = join_mt
-$tbNum = 3
-$rowNum = 2000
+$tbNum = 20
+$rowNum = 200
$totalNum = $tbNum * $rowNum
print =============== join_manyBlocks.sim
@@ -78,8 +78,8 @@ print ==============> td-3313
sql select join_mt0.ts,join_mt0.ts,join_mt0.t1 from join_mt0, join_mt1 where join_mt0.ts=join_mt1.ts and join_mt0.t1=join_mt1.t1;
print $row
-if $row != 6000 then
- print expect 6000, actual: $row
+if $row != 4000 then
+ print expect 4000, actual: $row
return -1
endi
diff --git a/tests/script/tsim/parser/sliding.sim b/tests/script/tsim/parser/sliding.sim
index 1cb4cb5340f993ee25b4f73e8b076375ea28a445..7aa69ce9a9c164b8741ba40e607387e8ba184b30 100644
--- a/tests/script/tsim/parser/sliding.sim
+++ b/tests/script/tsim/parser/sliding.sim
@@ -450,10 +450,11 @@ endi
print ====================>check boundary check crash at client side
sql select count(*) from sliding_mt0 where ts>now and ts < now-1h;
+sql select sum(c1) from sliding_tb0 interval(1a) sliding(1a);
+
print ========================query on super table
print ========================error case
-sql_error select sum(c1) from sliding_tb0 interval(1a) sliding(1a);
sql_error select sum(c1) from sliding_tb0 interval(10a) sliding(12a);
sql_error select sum(c1) from sliding_tb0 sliding(1n) interval(1y);
sql_error select sum(c1) from sliding_tb0 interval(-1y) sliding(1n);
diff --git a/tests/script/tsim/query/bug3398.sim b/tests/script/tsim/query/bug3398.sim
new file mode 100644
index 0000000000000000000000000000000000000000..3ca88cf4596fcc8e732eb9ef94b3e1cdb8fc45de
--- /dev/null
+++ b/tests/script/tsim/query/bug3398.sim
@@ -0,0 +1,30 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print =============== create database
+sql create database test
+
+print =============== create super table and child table
+sql use test
+
+sql CREATE STABLE st (day timestamp, c2 int) TAGS (vin binary(32))
+
+sql insert into test.g using st TAGS ("TAG1") values("2023-05-03 00:00:00.000", 1)
+sql insert into test.t using st TAGS ("TAG1") values("2023-05-03 00:00:00.000", 1)
+sql insert into test.tg using st TAGS ("TAG1") values("2023-05-03 00:00:00.000", 1)
+
+sql select sum(case when t.c2 is NULL then 0 else 1 end + case when t.c2 is NULL then 0 else 1 end), sum(case when t.c2 is NULL then 0 else 1 end + case when t.c2 is NULL then 0 else 1 end + case when t.c2 is NULL then 0 else 1 end) from test.t t, test.g g, test.tg tg where t.day = g.day and t.day = tg.day and t.day between '2021-05-03' and '2023-05-04' and t.vin = 'TAG1' and t.vin = g.vin and t.vin = tg.vin group by t.day;
+
+print $rows $data00 $data01
+if $rows != 1 then
+ return -1
+endi
+if $data00 != 2.000000000 then
+ return -1
+endi
+
+if $data01 != 3.000000000 then
+ return -1
+endi
diff --git a/tests/script/tsim/query/emptyTsRange_scl.sim b/tests/script/tsim/query/emptyTsRange_scl.sim
new file mode 100644
index 0000000000000000000000000000000000000000..43734b047dd6aaec0ac5b3f8a048da30f12d26ad
--- /dev/null
+++ b/tests/script/tsim/query/emptyTsRange_scl.sim
@@ -0,0 +1,21 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c filterScalarMode -v 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+sql drop database if exists db1;
+sql create database if not exists db1;
+sql use db1;
+sql create stable sta (ts timestamp, f1 double, f2 binary(200)) tags(t1 int);
+sql create table tba1 using sta tags(1);
+sql insert into tba1 values ('2022-04-26 15:15:01', 1.0, "a");
+sql insert into tba1 values ('2022-04-26 15:15:02', 2.0, "b");
+sql insert into tba1 values ('2022-04-26 15:15:04', 4.0, "b");
+sql insert into tba1 values ('2022-04-26 15:15:05', 5.0, "b");
+sql select last_row(*) from sta where ts >= 1678901803783 and ts <= 1678901803783 and _c0 <= 1678901803782 interval(10d,8d) fill(linear) order by _wstart desc;
+if $rows != 0 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/groupby_distinct.sim b/tests/script/tsim/query/groupby_distinct.sim
new file mode 100644
index 0000000000000000000000000000000000000000..8b16bb1910e565d3bc586a6610cef6c0d7b25e4e
--- /dev/null
+++ b/tests/script/tsim/query/groupby_distinct.sim
@@ -0,0 +1,30 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+
+sql drop database if exists db1;
+sql create database db1;
+sql use db1;
+
+sql create stable sta (ts timestamp, f1 int, f2 binary(200)) tags(t1 int, t2 int, t3 int);
+sql create table tba1 using sta tags(1, 1, 1);
+sql insert into tba1 values ('2022-04-26 15:15:08', 1, "a");
+sql insert into tba1 values ('2022-04-26 15:15:07', 1, "b");
+sql insert into tba1 values ('2022-04-26 15:15:06', 1, "a");
+sql insert into tba1 values ('2022-04-26 15:15:05', 1, "b");
+sql insert into tba1 values ('2022-04-26 15:15:04', 1, "c");
+sql insert into tba1 values ('2022-04-26 15:15:03', 1, "c");
+sql insert into tba1 values ('2022-04-26 15:15:02', 1, "d");
+sql insert into tba1 values ('2022-04-26 15:15:01', 1, "d");
+sql select distinct avg(f1) as avgv from sta group by f2;
+if $rows != 1 then
+ return -1
+endi
+sql select distinct avg(f1) as avgv from sta group by f2 limit 1,10;
+if $rows != 0 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/query/interval.sim b/tests/script/tsim/query/interval.sim
index 833da4a8ba2b3daf495167f06c99d222564a6bf3..e2b0d219cb0eaaace3e571d31a19fa329364c575 100644
--- a/tests/script/tsim/query/interval.sim
+++ b/tests/script/tsim/query/interval.sim
@@ -170,6 +170,42 @@ endi
# return -1
#endi
+print ================ step10
+print -------- create database and table
+sql create database if not exists test
+sql use test
+sql create stable st (ts timestamp, c2 int) tags(tg int)
+print -------- insert 300 rows data
+$i = 0
+while $i < 300
+ $t = 1577807983000
+ $cc = $i * 1000
+ $t = $t + $cc
+ sql select $i % 3
+ if $data00 != 0.000000000 then
+ goto continue_while
+ endi
+ sql select $i % 4
+ if $data00 == 0.000000000 then
+ goto continue_while
+ endi
+ sql insert into t1 using st tags(1) values ( $t , $i )
+ continue_while:
+ $i = $i + 1
+endw
+
+$ms1 = 1577808120000
+$ms2 = 1577808000000
+sql select * from (select _wstart, last(ts) as ts, avg(c2) as av from t1 where ts <= $ms1 and ts >= $ms2 interval(10s) sliding(1s) fill(NULL)) order by ts asc
+print ----> select asc rows: $rows
+$asc_rows = $rows
+sql select * from (select _wstart, last(ts) as ts, avg(c2) as av from t1 where ts <= $ms1 and ts >= $ms2 interval(10s) sliding(1s) fill(NULL)) order by ts desc
+print ----> select desc rows: $rows
+$desc_rows = $rows
+if $desc_rows != $asc_rows then
+ return -1
+endi
+
print =============== clear
#sql drop database $db
#sql select * from information_schema.ins_databases
diff --git a/tests/script/tsim/query/partitionby.sim b/tests/script/tsim/query/partitionby.sim
index 4c221e02d39098ed078e25e5aa4739764f8c0fe9..76d4f87908590839cdec1e8689404be974d4e86d 100644
--- a/tests/script/tsim/query/partitionby.sim
+++ b/tests/script/tsim/query/partitionby.sim
@@ -24,18 +24,85 @@ sql use $db
sql create table $mt1 (ts timestamp, f1 int) TAGS(tag1 int, tag2 binary(500))
sql create table tb0 using $mt1 tags(0, 'a');
sql create table tb1 using $mt1 tags(1, 'b');
-sql create table tb2 using $mt1 tags(1, 'a');
-sql create table tb3 using $mt1 tags(1, 'a');
-sql create table tb4 using $mt1 tags(3, 'b');
-sql create table tb5 using $mt1 tags(3, 'a');
-sql create table tb6 using $mt1 tags(3, 'b');
-sql create table tb7 using $mt1 tags(3, 'b');
+sql create table tb2 using $mt1 tags(2, 'a');
+sql create table tb3 using $mt1 tags(3, 'a');
+sql create table tb4 using $mt1 tags(4, 'b');
+sql create table tb5 using $mt1 tags(5, 'a');
+sql create table tb6 using $mt1 tags(6, 'b');
+sql create table tb7 using $mt1 tags(7, 'b');
sql select * from $mt1 partition by tag1,tag2 limit 1;
if $rows != 0 then
return -1
endi
+sql insert into tb0 values ('2022-04-26 15:15:08', 1);
+sql insert into tb1 values ('2022-04-26 15:15:07', 2);
+sql insert into tb2 values ('2022-04-26 15:15:06', 3);
+sql insert into tb3 values ('2022-04-26 15:15:05', 4);
+sql insert into tb4 values ('2022-04-26 15:15:04', 5);
+sql insert into tb5 values ('2022-04-26 15:15:03', 6);
+sql insert into tb6 values ('2022-04-26 15:15:02', 7);
+sql insert into tb7 values ('2022-04-26 15:15:01', 8);
+
+sql select _wstart as ts, count(*) from $mt1 partition by tag1 interval(1s) order by _wstart;
+if $rows != 8 then
+ return -1
+endi
+if $data00 != @22-04-26 15:15:01.000@ then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+if $data10 != @22-04-26 15:15:02.000@ then
+ return -1
+endi
+if $data11 != 1 then
+ return -1
+endi
+if $data20 != @22-04-26 15:15:03.000@ then
+ return -1
+endi
+if $data21 != 1 then
+ return -1
+endi
+if $data30 != @22-04-26 15:15:04.000@ then
+ return -1
+endi
+if $data31 != 1 then
+ return -1
+endi
+if $data40 != @22-04-26 15:15:05.000@ then
+ return -1
+endi
+if $data41 != 1 then
+ return -1
+endi
+if $data50 != @22-04-26 15:15:06.000@ then
+ return -1
+endi
+if $data51 != 1 then
+ return -1
+endi
+if $data60 != @22-04-26 15:15:07.000@ then
+ return -1
+endi
+if $data61 != 1 then
+ return -1
+endi
+if $data70 != @22-04-26 15:15:08.000@ then
+ return -1
+endi
+if $data71 != 1 then
+ return -1
+endi
+sql select * from (select _wstart as ts, count(*) from $mt1 partition by tag1 interval(1s) order by _wstart) order by ts;
+sql select _wstart as ts, count(*) from $mt1 interval(1s) order by _wstart;
+sql select * from (select _wstart as ts, count(*) from $mt1 interval(1s) order by _wstart) order by ts;
+sql select diff(a) from (select _wstart as ts, count(*) a from $mt1 interval(1s) order by _wstart);
+sql select diff(a) from (select _wstart as ts, count(*) a from $mt1 partition by tag1 interval(1s) order by _wstart);
+
sql insert into tb0 values (now, 0);
sql insert into tb1 values (now, 1);
sql insert into tb2 values (now, 2);
@@ -54,7 +121,7 @@ sql select count(*) from (select ts from $mt1 where ts is not null partition by
if $rows != 1 then
return -1
endi
-if $data00 != 2 then
+if $data00 != 4 then
return -1
endi
@@ -62,7 +129,7 @@ sql select count(*) from (select ts from $mt1 where ts is not null partition by
if $rows != 1 then
return -1
endi
-if $data00 != 8 then
+if $data00 != 16 then
return -1
endi
diff --git a/tests/script/tsim/query/udfpy.sim b/tests/script/tsim/query/udfpy.sim
index 9e0492ffd95e3baed20a77ddf3423a1e8872c1d0..2a4daedd5d6afba79b1aba63769ab584f7d8e644 100644
--- a/tests/script/tsim/query/udfpy.sim
+++ b/tests/script/tsim/query/udfpy.sim
@@ -15,6 +15,7 @@ system sh/prepare_pyudf.sh
system mkdir -p /tmp/pyudf
system cp sh/pybitand.py /tmp/pyudf/
system cp sh/pyl2norm.py /tmp/pyudf/
+system cp sh/pycumsum.py /tmp/pyudf/
system ls /tmp/pyudf
sql create database udf vgroups 3;
@@ -280,6 +281,18 @@ if $data20 != 8.000000000 then
return -1
endi
+#sql create aggregate function pycumsum as '/tmp/pyudf/pycumsum.py' outputtype double bufSize 128 language 'python';
+#sql select pycumsum(f2) from udf.t2
+#print ======= pycumsum
+#print $rows $data00
+#if $rows != 1 then
+# return -1
+#endi
+#if $data00 != 20.000000000 then
+# return -1
+#endi
+#sql drop function pycumsum
+
sql create or replace function bit_and as '/tmp/udf/libbitand.so' outputtype int
sql select func_version from information_schema.ins_functions where name='bit_and'
if $data00 != 1 then
diff --git a/tests/script/tsim/stream/distributeInterval0.sim b/tests/script/tsim/stream/distributeInterval0.sim
index 959b32fa5990ef72bc6d469ea6f24de59e5b1a0d..5bb03c8cbf0fcd03e6a401e60465248a91bfdfe5 100644
--- a/tests/script/tsim/stream/distributeInterval0.sim
+++ b/tests/script/tsim/stream/distributeInterval0.sim
@@ -1,36 +1,11 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
-system sh/deploy.sh -n dnode2 -i 2
system sh/exec.sh -n dnode1 -s start
sleep 50
sql connect
-sql create dnode $hostname2 port 7200
-
-system sh/exec.sh -n dnode2 -s start
-
-print ===== step1
-$x = 0
-step1:
- $x = $x + 1
- sleep 1000
- if $x == 10 then
- print ====> dnode not ready!
- return -1
- endi
-sql select * from information_schema.ins_dnodes
-print ===> $data00 $data01 $data02 $data03 $data04 $data05
-print ===> $data10 $data11 $data12 $data13 $data14 $data15
-if $rows != 2 then
- return -1
-endi
-if $data(1)[4] != ready then
- goto step1
-endi
-if $data(2)[4] != ready then
- goto step1
-endi
+
print ===== step2
sql drop stream if exists stream_t1;
@@ -248,10 +223,56 @@ sql insert into ts3 values(1648791223002,2,2,3,1.1);
sql insert into ts4 values(1648791233003,3,2,3,2.1);
sql insert into ts3 values(1648791243004,4,2,43,73.1);
sql insert into ts4 values(1648791213002,24,22,23,4.1);
+
+$loop_count = 0
+loop032:
+
+$loop_count = $loop_count + 1
+if $loop_count == 30 then
+ return -1
+endi
+
+sleep 1000
+print 6-0 select * from streamtST1;
+sql select * from streamtST1;
+
+if $rows != 4 then
+ print =====rows=$rows
+ goto loop032
+endi
+
+if $data01 != 8 then
+ print =6====data01=$data01
+ goto loop032
+endi
+
sql insert into ts3 values(1648791243005,4,20,3,3.1);
sql insert into ts4 values(1648791243006,4,2,3,3.1) (1648791243007,4,2,3,3.1) ;
sql insert into ts3 values(1648791243008,4,2,30,3.1) (1648791243009,4,2,3,3.1) (1648791243010,4,2,3,3.1) ;
sql insert into ts4 values(1648791243011,4,2,3,3.1) (1648791243012,34,32,33,3.1) (1648791243013,4,2,3,3.1) (1648791243014,4,2,13,3.1);
+
+$loop_count = 0
+loop033:
+
+$loop_count = $loop_count + 1
+if $loop_count == 30 then
+ return -1
+endi
+
+sleep 1000
+print 6-1 select * from streamtST1;
+sql select * from streamtST1;
+
+if $rows != 4 then
+ print =====rows=$rows
+ goto loop033
+endi
+
+if $data01 != 8 then
+ print =6====data01=$data01
+ goto loop033
+endi
+
sql insert into ts3 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ;
$loop_count = 0
diff --git a/tests/script/tsim/stream/ignoreExpiredData.sim b/tests/script/tsim/stream/ignoreExpiredData.sim
index 27920dd539826fb13715b7daf3136156c643f8ed..884b7cbb5f6db4e6a371b7f3eef1b6a8062240cd 100644
--- a/tests/script/tsim/stream/ignoreExpiredData.sim
+++ b/tests/script/tsim/stream/ignoreExpiredData.sim
@@ -132,12 +132,12 @@ if $loop_count == 10 then
return -1
endi
-if $data01 != 1 then
+if $data01 != 2 then
print =====data01=$data01
goto loop4
endi
-if $data02 != 1 then
+if $data02 != 2 then
print =====data02=$data02
goto loop4
endi
diff --git a/tests/script/tsim/stream/sliding.sim b/tests/script/tsim/stream/sliding.sim
index 3312ccbec47738f0e3f593279dd369e2299358ed..05eb7dacba92254c947289332a5cbcce8410271a 100644
--- a/tests/script/tsim/stream/sliding.sim
+++ b/tests/script/tsim/stream/sliding.sim
@@ -576,13 +576,6 @@ $loop_count = 0
print step 7
-loop4:
-sleep 100
-
-$loop_count = $loop_count + 1
-if $loop_count == 10 then
- return -1
-endi
sql create database test3 vgroups 6;
sql use test3;
diff --git a/tests/script/tsim/tmq/topic.sim b/tests/script/tsim/tmq/topic.sim
index 0bf0873e9f0513b08b6868bed220c5e3d74072f9..78c4c561af9ae55d203635cdccc562eb8bec869d 100644
--- a/tests/script/tsim/tmq/topic.sim
+++ b/tests/script/tsim/tmq/topic.sim
@@ -108,4 +108,15 @@ if $rows != 6 then
return -1
endi
+sql create topic topic_stable_1 as stable stb where t1 > 0
+sql create topic topic_stable_2 as stable stb where t1 > 0 and t1 < 0
+sql create topic topic_stable_3 as stable stb where 1 > 0
+sql create topic topic_stable_4 as stable stb where abs(t1) > 0
+sql_error create topic topic_stable_5 as stable stb where last(t1) > 0
+sql_error create topic topic_stable_5 as stable stb where sum(t1) > 0
+sql create topic topic_stable_6 as stable stb where tbname is not null
+sql create topic topic_stable_7 as stable stb where tbname > 'a'
+sql_error create topic topic_stable_8 as stable stb where tbname > 0 and xx < 0
+sql_error create topic topic_stable_9 as stable stb where tbname > 0 and c1 < 0
+
system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/script/tsim/user/privilege_create_db.sim b/tests/script/tsim/user/privilege_create_db.sim
new file mode 100644
index 0000000000000000000000000000000000000000..c81bd1b2581793a6005d2278190c3e416471b816
--- /dev/null
+++ b/tests/script/tsim/user/privilege_create_db.sim
@@ -0,0 +1,97 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ========================root user create user
+sql create user u1 pass "taosdata"
+sql create user u2 pass "taosdata"
+sql create database test
+sql select * from information_schema.ins_user_privileges where user_name == "root"
+if $rows != 1 then
+ return -1
+endi
+
+print =============connect with u1
+sql connect u1
+sql create database u1_d1
+sql use u1_d1
+sql create table u1_d1.t1(ts timestamp, c2 int)
+sql use information_schema
+sql select * from ins_user_privileges where user_name == "u1" order by privilege
+if $rows != 2 then
+ return -1
+endi
+if $data01 != read then
+ return -1
+endi
+if $data11 != write then
+ return -1
+endi
+if $data02 != u1_d1 then
+ return -1
+endi
+if $data12 != u1_d1 then
+ return -1
+endi
+
+sql_error grant all on *.* to u1
+sql_error grant all on test.* to u1
+
+print =============connect with u2
+sql connect u2
+sql create database u2_d1
+sql use u2_d1
+sql create table u2_d1.t1(ts timestamp, c2 int)
+sql use information_schema
+sql select * from ins_user_privileges where user_name == "u2" order by privilege
+if $rows != 2 then
+ return -1
+endi
+if $data01 != read then
+ return -1
+endi
+if $data11 != write then
+ return -1
+endi
+if $data02 != u2_d1 then
+ return -1
+endi
+if $data12 != u2_d1 then
+ return -1
+endi
+
+sql_error select * from u1_d1.t1
+sql_error revoke read on u2_d1.* from u2
+
+print =============connect with root, revoke read from u1, all from u2
+sql connect
+sql revoke read on u1_d1.* from u1
+sql revoke all on u2_d1.* from u2
+sleep 1000
+
+print =============connect with u1
+sql connect u1
+sql insert into u1_d1.t1 values(now, 1)
+sql_error select * from u1_d1.t1;
+
+print =============connect with u2
+sql connect u2
+sql_error select * from u2_d1.t1;
+sql_error insert into u2_d1.t1 values(now, 1)
+
+print =============connect with root, grant read to u1, all to u2
+sql connect
+sql grant read on u1_d1.* to u1
+sql grant all on u2_d1.* to u2
+
+sleep 1000
+print =============connect with u1
+sql connect u1
+sql select * from u1_d1.t1;
+sql insert into u1_d1.t1 values(now, 2)
+
+print =============connect with u2
+sql connect u2
+sql select * from u2_d1.t1;
+sql insert into u2_d1.t1 values(now, 2)
diff --git a/tests/script/tsim/valgrind/checkError1.sim b/tests/script/tsim/valgrind/checkError1.sim
index 5f82d2d93569b36e8cef68accda6fbdb49756520..debe633f06078c5432d4a7af1c45cceb5878d55b 100644
--- a/tests/script/tsim/valgrind/checkError1.sim
+++ b/tests/script/tsim/valgrind/checkError1.sim
@@ -20,7 +20,7 @@ sql_error alter user u2 sysinfo 0
print =============== step2 create drop dnode
sql create dnode $hostname port 7200
sql create dnode $hostname port 7300
-sql drop dnode 3
+sql drop dnode 3 force
sql alter dnode 1 'debugflag 131'
print =============== step3: select * from information_schema.ins_dnodes
diff --git a/tests/script/win-test-file b/tests/script/win-test-file
index b7fbbed5c16e2190c78985867d80eeb57efee00b..adef71cb45a0de2b570f649eedc5f2dcebad3ca4 100644
--- a/tests/script/win-test-file
+++ b/tests/script/win-test-file
@@ -4,6 +4,7 @@
./test.sh -f tsim/user/privilege_sysinfo.sim
./test.sh -f tsim/user/privilege_topic.sim
./test.sh -f tsim/user/privilege_table.sim
+./test.sh -f tsim/user/privilege_create_db.sim
./test.sh -f tsim/db/alter_option.sim
rem ./test.sh -f tsim/db/alter_replica_13.sim
./test.sh -f tsim/db/alter_replica_31.sim
diff --git a/tests/system-test/1-insert/manyVgroups.json b/tests/system-test/1-insert/manyVgroups.json
index 3b0fa96b08f73e26e11c35c89d6673268f764ddc..8c6f39cf9665633ac501dc032293fe2d1e4d7aec 100644
--- a/tests/system-test/1-insert/manyVgroups.json
+++ b/tests/system-test/1-insert/manyVgroups.json
@@ -11,7 +11,7 @@
"confirm_parameter_prompt": "no",
"insert_interval": 0,
"interlace_rows": 0,
- "num_of_records_per_req": 100000,
+ "num_of_records_per_req": 10000,
"databases": [
{
"dbinfo": {
@@ -73,4 +73,4 @@
]
}
]
-}
\ No newline at end of file
+}
diff --git a/tests/system-test/2-query/count_null.py b/tests/system-test/2-query/count_null.py
new file mode 100644
index 0000000000000000000000000000000000000000..6d2c8db8d6b4bd33ae75213ab838279ff9e65503
--- /dev/null
+++ b/tests/system-test/2-query/count_null.py
@@ -0,0 +1,144 @@
+import taos
+import sys
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+
+
+
+class TDTestCase:
+
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ #tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql) # output sql.txt file
+
+ def check_results(self):
+ tdSql.query(f"select count(*) from tb1")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c1) from tb1")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query(f"select count(c2) from tb1")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query(f"select count(c3) from tb1")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query(f"select count(c4) from tb1")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query(f"select count(c5) from tb1")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query(f"select count(c6) from tb1")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query(f"select count(c7) from tb1")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query(f"select count(c8) from tb1")
+ tdSql.checkData(0, 0, 0)
+
+ tdSql.query(f"select count(*) from tb2")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c1) from tb2")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c2) from tb2")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c3) from tb2")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c4) from tb2")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c5) from tb2")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c6) from tb2")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c7) from tb2")
+ tdSql.checkData(0, 0, 0)
+ tdSql.query(f"select count(c8) from tb2")
+ tdSql.checkData(0, 0, 0)
+
+ for i in range (3, 6):
+ tdSql.query(f"select count(*) from tb{i}")
+ tdSql.checkData(0, 0, 20000)
+ tdSql.query(f"select count(c1) from tb{i}")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query(f"select count(c2) from tb{i}")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query(f"select count(c3) from tb{i}")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query(f"select count(c4) from tb{i}")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query(f"select count(c5) from tb{i}")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query(f"select count(c6) from tb{i}")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query(f"select count(c7) from tb{i}")
+ tdSql.checkData(0, 0, 10000)
+ tdSql.query(f"select count(c8) from tb{i}")
+ tdSql.checkData(0, 0, 10000)
+
+
+ def run(self):
+ dbname = 'db'
+ tbnames = ['tb1', 'tb2', 'tb3', 'tb4', 'tb5', 'tb6']
+ num_rows = 20000
+ num_tables = 6
+ ts_base = 1685548800000
+
+ tdSql.prepare()
+
+ tdLog.printNoPrefix("==========step1:create table")
+
+ for i in range (num_tables):
+ tdSql.execute(
+ f'''create table if not exists {dbname}.{tbnames[i]}
+ (ts timestamp, c0 tinyint, c1 smallint, c2 int, c3 bigint, c4 double, c5 float, c6 bool, c7 varchar(10), c8 nchar(10))
+
+ '''
+ )
+
+
+ tdLog.printNoPrefix("==========step2:insert data")
+
+ for i in range(num_rows):
+ tdSql.execute(f"insert into {dbname}.{tbnames[0]} values ({ts_base + i}, null, null, null, null, null, null, null, null, null)")
+
+ for i in range(num_rows):
+ tdSql.execute(f"insert into {dbname}.{tbnames[1]} values ({ts_base + i}, 1, 1, 1, 1, 1, 1, 1, null, null)")
+
+ for i in range(num_rows):
+ if i % 2 == 0:
+ tdSql.execute(f"insert into {dbname}.{tbnames[2]} values ({ts_base + i}, null, null, null, null, null, null, null, null, null)")
+ else:
+ tdSql.execute(f"insert into {dbname}.{tbnames[2]} values ({ts_base + i}, 1, 1, 1, 1, 1, 1, 1, 'binary', 'nchar')")
+
+ for i in range(num_rows):
+ if i % 2 == 0:
+ tdSql.execute(f"insert into {dbname}.{tbnames[3]} values ({ts_base + i}, null, null, null, null, null, null, null, 'binary', 'nchar')")
+ else:
+ tdSql.execute(f"insert into {dbname}.{tbnames[3]} values ({ts_base + i}, 1, 1, 1, 1, 1, 1, 1, null, null)")
+
+ for i in range(num_rows):
+ if i < num_rows / 2:
+ tdSql.execute(f"insert into {dbname}.{tbnames[4]} values ({ts_base + i}, null, null, null, null, null, null, null, null, null)")
+ else:
+ tdSql.execute(f"insert into {dbname}.{tbnames[4]} values ({ts_base + i}, 1, 1, 1, 1, 1, 1, 1, 'binary', 'nchar')")
+
+ for i in range(num_rows):
+ if i >= num_rows / 2:
+ tdSql.execute(f"insert into {dbname}.{tbnames[5]} values ({ts_base + i}, null, null, null, null, null, null, null, null, null)")
+ else:
+ tdSql.execute(f"insert into {dbname}.{tbnames[5]} values ({ts_base + i}, 1, 1, 1, 1, 1, 1, 1, 'binary', 'nchar')")
+
+
+ tdLog.printNoPrefix("==========step3:check result in memory")
+ self.check_results();
+
+ tdLog.printNoPrefix("==========step3:check result from disk")
+ tdSql.execute(f"flush database db")
+ self.check_results();
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py
index 121d4dcff6e3187938b4d1c386fec6ce7f7f3b37..f733a3d008cdfa278fbd24d40b9d102f91e830a6 100644
--- a/tests/system-test/2-query/interp.py
+++ b/tests/system-test/2-query/interp.py
@@ -32,6 +32,12 @@ class TDTestCase:
ctbname3_null = "ctb3_null"
stbname_null = "stb_null"
+ tbname_single = "tb_single"
+ ctbname1_single = "ctb1_single"
+ ctbname2_single = "ctb2_single"
+ ctbname3_single = "ctb3_single"
+ stbname_single = "stb_single"
+
tdSql.prepare()
tdLog.printNoPrefix("==========step1:create table")
@@ -219,6 +225,56 @@ class TDTestCase:
tdSql.checkData(2, 0, 12)
tdSql.checkData(3, 0, 12)
+ ## test fill value with scalar expression
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1.0 + 2.0)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2.5)")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + '2')")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + '2.0')")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '3' + 'abc')")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
+ tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '2' + '1abc')")
+ tdSql.checkRows(4)
+ tdSql.checkData(0, 0, 3)
+ tdSql.checkData(1, 0, 3)
+ tdSql.checkData(2, 0, 3)
+ tdSql.checkData(3, 0, 3)
+
tdLog.printNoPrefix("==========step5:fill prev")
## {. . .}
@@ -1759,47 +1815,10 @@ class TDTestCase:
tdSql.checkData(60, 1, 60) #
# test fill value
- tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123)")
+ tdSql.query(f"select _irowts, interp(c0), _irowts, interp(c1), _irowts from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123, 456)")
tdSql.checkRows(61)
- tdSql.checkCols(2)
- tdSql.checkData(0, 0, 0) #
- tdSql.checkData(1, 0, 123)
- tdSql.checkData(4, 0, 123)
- tdSql.checkData(5, 0, None) #
- tdSql.checkData(6, 0, 123)
- tdSql.checkData(9, 0, 123)
- tdSql.checkData(10, 0, 10) #
- tdSql.checkData(11, 0, 123)
- tdSql.checkData(14, 0, 123)
- tdSql.checkData(15, 0, None) #
- tdSql.checkData(16, 0, 123)
- tdSql.checkData(19, 0, 123)
- tdSql.checkData(20, 0, 20) #
- tdSql.checkData(21, 0, 123)
- tdSql.checkData(24, 0, 123)
- tdSql.checkData(25, 0, None) #
- tdSql.checkData(26, 0, 123)
- tdSql.checkData(29, 0, 123)
- tdSql.checkData(30, 0, 30) #
- tdSql.checkData(31, 0, 123)
- tdSql.checkData(34, 0, 123)
- tdSql.checkData(35, 0, 35) #
- tdSql.checkData(36, 0, 123)
- tdSql.checkData(39, 0, 123)
- tdSql.checkData(40, 0, 40) #
- tdSql.checkData(41, 0, 123)
- tdSql.checkData(44, 0, 123)
- tdSql.checkData(45, 0, None) #
- tdSql.checkData(46, 0, 123)
- tdSql.checkData(49, 0, 123)
- tdSql.checkData(50, 0, 50) #
- tdSql.checkData(51, 0, 123)
- tdSql.checkData(54, 0, 123)
- tdSql.checkData(55, 0, None) #
- tdSql.checkData(59, 0, 123)
- tdSql.checkData(60, 0, 55) #
-
- tdSql.checkData(0, 1, None) #
+ tdSql.checkCols(5)
+ tdSql.checkData(0, 1, 0) #
tdSql.checkData(1, 1, 123)
tdSql.checkData(4, 1, 123)
tdSql.checkData(5, 1, None) #
@@ -1811,7 +1830,7 @@ class TDTestCase:
tdSql.checkData(15, 1, None) #
tdSql.checkData(16, 1, 123)
tdSql.checkData(19, 1, 123)
- tdSql.checkData(20, 1, None) #
+ tdSql.checkData(20, 1, 20) #
tdSql.checkData(21, 1, 123)
tdSql.checkData(24, 1, 123)
tdSql.checkData(25, 1, None) #
@@ -1820,22 +1839,137 @@ class TDTestCase:
tdSql.checkData(30, 1, 30) #
tdSql.checkData(31, 1, 123)
tdSql.checkData(34, 1, 123)
- tdSql.checkData(35, 1, None) #
+ tdSql.checkData(35, 1, 35) #
tdSql.checkData(36, 1, 123)
tdSql.checkData(39, 1, 123)
tdSql.checkData(40, 1, 40) #
tdSql.checkData(41, 1, 123)
tdSql.checkData(44, 1, 123)
- tdSql.checkData(45, 1, 45) #
+ tdSql.checkData(45, 1, None) #
tdSql.checkData(46, 1, 123)
tdSql.checkData(49, 1, 123)
- tdSql.checkData(50, 1, None) #
+ tdSql.checkData(50, 1, 50) #
tdSql.checkData(51, 1, 123)
tdSql.checkData(54, 1, 123)
tdSql.checkData(55, 1, None) #
- tdSql.checkData(56, 1, 123)
tdSql.checkData(59, 1, 123)
- tdSql.checkData(60, 1, 60) #
+ tdSql.checkData(60, 1, 55) #
+
+ tdSql.checkData(0, 3, None) #
+ tdSql.checkData(1, 3, 456)
+ tdSql.checkData(4, 3, 456)
+ tdSql.checkData(5, 3, None) #
+ tdSql.checkData(6, 3, 456)
+ tdSql.checkData(9, 3, 456)
+ tdSql.checkData(10, 3, 10) #
+ tdSql.checkData(11, 3, 456)
+ tdSql.checkData(14, 3, 456)
+ tdSql.checkData(15, 3, None) #
+ tdSql.checkData(16, 3, 456)
+ tdSql.checkData(19, 3, 456)
+ tdSql.checkData(20, 3, None) #
+ tdSql.checkData(21, 3, 456)
+ tdSql.checkData(24, 3, 456)
+ tdSql.checkData(25, 3, None) #
+ tdSql.checkData(26, 3, 456)
+ tdSql.checkData(29, 3, 456)
+ tdSql.checkData(30, 3, 30) #
+ tdSql.checkData(31, 3, 456)
+ tdSql.checkData(34, 3, 456)
+ tdSql.checkData(35, 3, None) #
+ tdSql.checkData(36, 3, 456)
+ tdSql.checkData(39, 3, 456)
+ tdSql.checkData(40, 3, 40) #
+ tdSql.checkData(41, 3, 456)
+ tdSql.checkData(44, 3, 456)
+ tdSql.checkData(45, 3, 45) #
+ tdSql.checkData(46, 3, 456)
+ tdSql.checkData(49, 3, 456)
+ tdSql.checkData(50, 3, None) #
+ tdSql.checkData(51, 3, 456)
+ tdSql.checkData(54, 3, 456)
+ tdSql.checkData(55, 3, None) #
+ tdSql.checkData(56, 3, 456)
+ tdSql.checkData(59, 3, 456)
+ tdSql.checkData(60, 3, 60) #
+
+ tdSql.query(f"select _isfilled, interp(c0), _isfilled, interp(c1), _isfilled from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123 + 123, 234 + 234)")
+ tdSql.checkRows(61)
+ tdSql.checkCols(5)
+ tdSql.checkData(0, 1, 0) #
+ tdSql.checkData(1, 1, 246)
+ tdSql.checkData(4, 1, 246)
+ tdSql.checkData(5, 1, None) #
+ tdSql.checkData(6, 1, 246)
+ tdSql.checkData(9, 1, 246)
+ tdSql.checkData(10, 1, 10) #
+ tdSql.checkData(11, 1, 246)
+ tdSql.checkData(14, 1, 246)
+ tdSql.checkData(15, 1, None) #
+ tdSql.checkData(16, 1, 246)
+ tdSql.checkData(19, 1, 246)
+ tdSql.checkData(20, 1, 20) #
+ tdSql.checkData(21, 1, 246)
+ tdSql.checkData(24, 1, 246)
+ tdSql.checkData(25, 1, None) #
+ tdSql.checkData(26, 1, 246)
+ tdSql.checkData(29, 1, 246)
+ tdSql.checkData(30, 1, 30) #
+ tdSql.checkData(31, 1, 246)
+ tdSql.checkData(34, 1, 246)
+ tdSql.checkData(35, 1, 35) #
+ tdSql.checkData(36, 1, 246)
+ tdSql.checkData(39, 1, 246)
+ tdSql.checkData(40, 1, 40) #
+ tdSql.checkData(41, 1, 246)
+ tdSql.checkData(44, 1, 246)
+ tdSql.checkData(45, 1, None) #
+ tdSql.checkData(46, 1, 246)
+ tdSql.checkData(49, 1, 246)
+ tdSql.checkData(50, 1, 50) #
+ tdSql.checkData(51, 1, 246)
+ tdSql.checkData(54, 1, 246)
+ tdSql.checkData(55, 1, None) #
+ tdSql.checkData(59, 1, 246)
+ tdSql.checkData(60, 1, 55) #
+
+ tdSql.checkData(0, 3, None) #
+ tdSql.checkData(1, 3, 468)
+ tdSql.checkData(4, 3, 468)
+ tdSql.checkData(5, 3, None) #
+ tdSql.checkData(6, 3, 468)
+ tdSql.checkData(9, 3, 468)
+ tdSql.checkData(10, 3, 10) #
+ tdSql.checkData(11, 3, 468)
+ tdSql.checkData(14, 3, 468)
+ tdSql.checkData(15, 3, None) #
+ tdSql.checkData(16, 3, 468)
+ tdSql.checkData(19, 3, 468)
+ tdSql.checkData(20, 3, None) #
+ tdSql.checkData(21, 3, 468)
+ tdSql.checkData(24, 3, 468)
+ tdSql.checkData(25, 3, None) #
+ tdSql.checkData(26, 3, 468)
+ tdSql.checkData(29, 3, 468)
+ tdSql.checkData(30, 3, 30) #
+ tdSql.checkData(31, 3, 468)
+ tdSql.checkData(34, 3, 468)
+ tdSql.checkData(35, 3, None) #
+ tdSql.checkData(36, 3, 468)
+ tdSql.checkData(39, 3, 468)
+ tdSql.checkData(40, 3, 40) #
+ tdSql.checkData(41, 3, 468)
+ tdSql.checkData(44, 3, 468)
+ tdSql.checkData(45, 3, 45) #
+ tdSql.checkData(46, 3, 468)
+ tdSql.checkData(49, 3, 468)
+ tdSql.checkData(50, 3, None) #
+ tdSql.checkData(51, 3, 468)
+ tdSql.checkData(54, 3, 468)
+ tdSql.checkData(55, 3, None) #
+ tdSql.checkData(56, 3, 468)
+ tdSql.checkData(59, 3, 468)
+ tdSql.checkData(60, 3, 60) #
# test fill prev
tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(prev)")
@@ -2010,7 +2144,7 @@ class TDTestCase:
tdSql.checkData(3, i, None)
tdSql.checkData(4, i, None)
- tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1)")
+ tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1, 1, 1, 1)")
tdSql.checkRows(5)
tdSql.checkCols(4)
@@ -2436,6 +2570,10 @@ class TDTestCase:
tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _isfilled = true range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _irowts > 0 range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)")
+ # fill value number mismatch
+ tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(value, 1, 2)")
+ tdSql.error(f"select interp(c0), interp(c1) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(value, 1)")
+
@@ -4020,7 +4158,7 @@ class TDTestCase:
tdLog.printNoPrefix("======step 15: test interp pseudo columns")
tdSql.error(f"select _irowts, c6 from {dbname}.{tbname}")
- tdLog.printNoPrefix("======step 15: test interp in nested query")
+ tdLog.printNoPrefix("======step 16: test interp in nested query")
tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{stbname}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
@@ -4028,16 +4166,1157 @@ class TDTestCase:
tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1}) partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union select * from {dbname}.{ctbname2}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
- tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
- tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2}) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
- tdSql.error(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select * from {dbname}.{ctbname1} union all select * from {dbname}.{ctbname2} order by ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
tdSql.query(f"select _irowts, _isfilled, interp(c0) from (select {ctbname1}.ts,{ctbname1}.c0 from {dbname}.{ctbname1}, {dbname}.{ctbname2} where {ctbname1}.ts = {ctbname2}.ts) range('2020-02-01 00:00:00', '2020-02-01 00:00:14') every(1s) fill(null)")
+ tdLog.printNoPrefix("======step 17: test interp single point")
+ tdSql.execute(
+ f'''create table if not exists {dbname}.{tbname_single}
+ (ts timestamp, c0 int)
+ '''
+ )
+
+ tdSql.execute(f"insert into {dbname}.{tbname_single} values ('2020-02-01 00:00:01', 1)")
+ tdSql.execute(f"insert into {dbname}.{tbname_single} values ('2020-02-01 00:00:03', 3)")
+ tdSql.execute(f"insert into {dbname}.{tbname_single} values ('2020-02-01 00:00:05', 5)")
+
+ tdSql.execute(
+ f'''create table if not exists {dbname}.{stbname_single}
+ (ts timestamp, c0 int, c1 float, c2 bool) tags (t0 int)
+ '''
+ )
+
+ tdSql.execute(
+ f'''create table if not exists {dbname}.{ctbname1_single} using {dbname}.{stbname_single} tags(1)
+ '''
+ )
+
+ tdSql.execute(
+ f'''create table if not exists {dbname}.{ctbname2_single} using {dbname}.{stbname_single} tags(2)
+ '''
+ )
+
+ tdSql.execute(
+ f'''create table if not exists {dbname}.{ctbname3_single} using {dbname}.{stbname_single} tags(3)
+ '''
+ )
+
+ tdSql.execute(f"insert into {dbname}.{ctbname1_single} values ('2020-02-01 00:00:01', 1, 1.0, true)")
+
+ tdSql.execute(f"insert into {dbname}.{ctbname2_single} values ('2020-02-01 00:00:03', 3, 3.0, false)")
+
+ tdSql.execute(f"insert into {dbname}.{ctbname3_single} values ('2020-02-01 00:00:05', 5, 5.0, true)")
+
+ # normal table
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(prev)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00') fill(prev)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(next)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06') fill(next)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(linear)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:00') fill(linear)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:01') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 2)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:02') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 2)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:03') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 4)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:04') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 4)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:05') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(linear)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{tbname_single} range('2020-02-01 00:00:06') fill(linear)")
+ tdSql.checkRows(0)
+
+ #super table
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06') fill(null)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06') fill(value, 0)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(prev)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00') fill(prev)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(next)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06') fill(next)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(linear)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:00') fill(linear)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:01') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 2)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:02') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 2)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:03') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 4)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:04') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 4)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:05') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(linear)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} range('2020-02-01 00:00:06') fill(linear)")
+ tdSql.checkRows(0)
+
+ # partition by tbname
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(null)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00') fill(null)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(null)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ for i in range(1, 3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01') fill(null)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ for i in range(1, 3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(null)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02') fill(null)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(null)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, None)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06') fill(null)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, None)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(value,0)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00') fill(value,0)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(value,0)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ for i in range(1, 3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01') fill(value,0)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ for i in range(1, 3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(value,0)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02') fill(value,0)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(value,0)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, 0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06') fill(value,0)")
+ tdSql.checkRows(3)
+ for i in range(3):
+ tdSql.checkData(i, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(i, 1, True)
+ tdSql.checkData(i, 2, 0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(prev)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00') fill(prev)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02') fill(prev)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(prev)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(1, 1, False)
+ tdSql.checkData(1, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:03') fill(prev)")
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(1, 1, False)
+ tdSql.checkData(1, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(prev)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:04') fill(prev)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(prev)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.checkData(2, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(2, 1, False)
+ tdSql.checkData(2, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:05') fill(prev)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.checkData(2, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(2, 1, False)
+ tdSql.checkData(2, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(prev)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.checkData(2, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(2, 1, True)
+ tdSql.checkData(2, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06') fill(prev)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.checkData(2, 0, '2020-02-01 00:00:06.000')
+ tdSql.checkData(2, 1, True)
+ tdSql.checkData(2, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(next)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06') fill(next)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:05') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:04') fill(next)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:04.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(next)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:03') fill(next)")
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(next)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02') fill(next)")
+ tdSql.checkRows(2)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 3)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:02.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(next)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.checkData(2, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(2, 1, True)
+ tdSql.checkData(2, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01') fill(next)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.checkData(2, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(2, 1, True)
+ tdSql.checkData(2, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00', '2020-02-01 00:00:00') every(1s) fill(next)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.checkData(2, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(2, 1, True)
+ tdSql.checkData(2, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00') fill(next)")
+ tdSql.checkRows(3)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(0, 1, True)
+ tdSql.checkData(0, 2, 1)
+ tdSql.checkData(1, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(1, 1, True)
+ tdSql.checkData(1, 2, 3)
+ tdSql.checkData(2, 0, '2020-02-01 00:00:00.000')
+ tdSql.checkData(2, 1, True)
+ tdSql.checkData(2, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:00') fill(linear)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01', '2020-02-01 00:00:01') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:01') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:01.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 1)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02', '2020-02-01 00:00:02') every(1s) fill(linear)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:02') fill(linear)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:03', '2020-02-01 00:00:03') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:03') fill(linear)")
+ tdSql.checkData(0, 0, '2020-02-01 00:00:03.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 3)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:04', '2020-02-01 00:00:04') every(1s) fill(linear)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:04') fill(linear)")
+ tdSql.checkRows(0)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:05', '2020-02-01 00:00:05') every(1s) fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:05') fill(linear)")
+ tdSql.checkRows(1)
+ tdSql.checkData(0, 0, '2020-02-01 00:00:05.000')
+ tdSql.checkData(0, 1, False)
+ tdSql.checkData(0, 2, 5)
+
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06', '2020-02-01 00:00:06') every(1s) fill(linear)")
+ tdSql.checkRows(0)
+ tdSql.query(f"select _irowts, _isfilled, interp(c0) from {dbname}.{stbname_single} partition by tbname range('2020-02-01 00:00:06') fill(linear)")
+ tdSql.checkRows(0)
+
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
diff --git a/tests/system-test/2-query/smaBasic.py b/tests/system-test/2-query/smaBasic.py
new file mode 100644
index 0000000000000000000000000000000000000000..43c379ee53889aa0af5410332bf9d02cfb1ca291
--- /dev/null
+++ b/tests/system-test/2-query/smaBasic.py
@@ -0,0 +1,296 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import random
+import time
+
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+class TDTestCase:
+
+ # get col value and total max min ...
+ def getColsValue(self, i, j):
+ # c1 value
+ if random.randint(1, 10) == 5:
+ c1 = None
+ else:
+ c1 = 1
+
+ # c2 value
+ if j % 3200 == 0:
+ c2 = 8764231
+ elif random.randint(1, 10) == 5:
+ c2 = None
+ else:
+ c2 = random.randint(-87654297, 98765321)
+
+
+ value = f"({self.ts}, "
+ self.ts += 1
+
+ # c1
+ if c1 is None:
+ value += "null,"
+ else:
+ self.c1Cnt += 1
+ value += f"{c1},"
+ # c2
+ if c2 is None:
+ value += "null)"
+ else:
+ value += f"{c2})"
+ # total count
+ self.c2Cnt += 1
+ # max
+ if self.c2Max is None:
+ self.c2Max = c2
+ else:
+ if c2 > self.c2Max:
+ self.c2Max = c2
+ # min
+ if self.c2Min is None:
+ self.c2Min = c2
+ else:
+ if c2 < self.c2Min:
+ self.c2Min = c2
+ # sum
+ if self.c2Sum is None:
+ self.c2Sum = c2
+ else:
+ self.c2Sum += c2
+
+ return value
+
+ # insert data
+ def insertData(self):
+ tdLog.info("insert data ....")
+ sqls = ""
+ for i in range(self.childCnt):
+ # insert child table
+ values = ""
+ pre_insert = f"insert into t{i} values "
+ for j in range(self.childRow):
+ if values == "":
+ values = self.getColsValue(i, j)
+ else:
+ values += "," + self.getColsValue(i, j)
+
+ # batch insert
+ if j % self.batchSize == 0 and values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+ # append last
+ if values != "":
+ sql = pre_insert + values
+ tdSql.execute(sql)
+ values = ""
+
+ sql = "flush database db;"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ # insert finished
+ tdLog.info(f"insert data successfully.\n"
+ f" inserted child table = {self.childCnt}\n"
+ f" inserted child rows = {self.childRow}\n"
+ f" total inserted rows = {self.childCnt*self.childRow}\n")
+ return
+
+
+ # prepareEnv
+ def prepareEnv(self):
+ # init
+ self.ts = 1600000000000
+ self.childCnt = 5
+ self.childRow = 1000000
+ self.batchSize = 5000
+
+ # total
+ self.c1Cnt = 0
+ self.c2Cnt = 0
+ self.c2Max = None
+ self.c2Min = None
+ self.c2Sum = None
+
+ # create database db
+ sql = f"create database db vgroups 5 replica 3"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ sql = f"use db"
+ tdSql.execute(sql)
+
+ # create super talbe st
+ sql = f"create table st(ts timestamp, c1 int, c2 bigint) tags(area int)"
+ tdLog.info(sql)
+ tdSql.execute(sql)
+
+ # create child table
+ for i in range(self.childCnt):
+ sql = f"create table t{i} using st tags({i}) "
+ tdSql.execute(sql)
+
+ # insert data
+ self.insertData()
+
+ # query sql value
+ def queryValue(self, sql):
+ tdSql.query(sql)
+ return tdSql.getData(0, 0)
+
+ # sum
+ def checkCorrentSum(self):
+ # query count
+ sql = "select sum(c1) from st"
+ val = self.queryValue(sql)
+ # c1Sum is equal c1Cnt
+ if val != self.c1Cnt:
+ tdLog.exit(f"Sum Not Expect. expect={self.c1Cnt} query={val} sql:{sql}")
+ return
+
+ # not
+ sql1 = "select sum(c1) from st where c2 = 8764231"
+ val1 = self.queryValue(sql1)
+ sql2 = "select sum(c1) from st where c2 != 8764231"
+ val2 = self.queryValue(sql2)
+ sql3 = "select sum(c1) from st where c2 is null"
+ val3 = self.queryValue(sql3)
+ if val != val1 + val2 + val3:
+ tdLog.exit(f"Sum Not Equal. val != val1 + val2 + val3. val={val} val1={val1} val2={val2} val2={val3} sql1={sql1} sql2={sql2} sql2={sql3}")
+ return
+
+ # over than
+ sql1 = "select sum(c1) from st where c2 > 8000"
+ val1 = self.queryValue(sql1)
+ sql2 = "select sum(c1) from st where c2 <= 8000"
+ val2 = self.queryValue(sql2)
+ sql3 = "select sum(c1) from st where c2 is null"
+ val3 = self.queryValue(sql3)
+ if val != val1 + val2 + val3:
+ tdLog.exit(f"Sum Not Equal. val != val1 + val2 + val3. val={val} val1={val1} val2={val2} val2={val3} sql1={sql1} sql2={sql2} sql2={sql3}")
+ return
+
+ tdLog.info(f"check correct sum on c1 successfully.")
+
+ # check result
+ def checkResult(self, fun, val, val1, val2, sql1, sql2):
+ if fun == "count":
+ if val != val1 + val2:
+ tdLog.exit(f"{fun} NOT SAME. val != val1 + val2. val={val} val1={val1} val2={val2} sql1={sql1} sql2={sql2}")
+ return
+ elif fun == "max":
+ if val != max([val1, val2]):
+ tdLog.exit(f"{fun} NOT SAME . val != max(val1 ,val2) val={val} val1={val1} val2={val2} sql1={sql1} sql2={sql2}")
+ return
+ elif fun == "min":
+ if val != min([val1, val2]):
+ tdLog.exit(f"{fun} NOT SAME . val != min(val1 ,val2) val={val} val1={val1} val2={val2} sql1={sql1} sql2={sql2}")
+ return
+
+ # sum
+ def checkCorrentFun(self, fun, expectVal):
+ # query
+ sql = f"select {fun}(c2) from st"
+ val = self.queryValue(sql)
+ if val != expectVal:
+ tdLog.exit(f"{fun} Not Expect. expect={expectVal} query={val} sql:{sql}")
+ return
+
+ # not
+ sql1 = f"select {fun}(c2) from st where c2 = 8764231"
+ val1 = self.queryValue(sql1)
+ sql2 = f"select {fun}(c2) from st where c2 != 8764231"
+ val2 = self.queryValue(sql2)
+ self.checkResult(fun, val, val1, val2, sql1, sql2)
+
+ # over than
+ sql1 = f"select {fun}(c2) from st where c2 > 8000"
+ val1 = self.queryValue(sql1)
+ sql2 = f"select {fun}(c2) from st where c2 <= 8000"
+ val2 = self.queryValue(sql2)
+ self.checkResult(fun, val, val1, val2, sql1, sql2)
+
+ # successful
+ tdLog.info(f"check correct {fun} on c2 successfully.")
+
+ # check query corrent
+ def checkCorrect(self):
+ # count
+ self.checkCorrentFun("count", self.c2Cnt)
+ # max
+ self.checkCorrentFun("max", self.c2Max)
+ # min
+ self.checkCorrentFun("min", self.c2Min)
+ # sum
+ self.checkCorrentSum()
+
+ # c2 sum
+ sql = "select sum(c2) from st"
+ val = self.queryValue(sql)
+ # c1Sum is equal c1Cnt
+ if val != self.c2Sum:
+ tdLog.exit(f"c2 Sum Not Expect. expect={self.c2Sum} query={val} sql:{sql}")
+ return
+
+ def checkPerformance(self):
+ # have sma caculate
+ sql1 = "select count(*) from st"
+ stime = time.time()
+ tdSql.execute(sql1, 1)
+ spend1 = time.time() - stime
+
+
+ # no sma caculate
+ sql2 = "select count(*) from st where c2 != 8764231 or c2 is null"
+ stime = time.time()
+ tdSql.execute(sql2, 1)
+ spend2 = time.time() - stime
+
+ time1 = "%.2f"%(spend1*1000)
+ time2 = "%.2f"%(spend2*1000)
+ if spend2 < spend1 * 8:
+ tdLog.exit(f"performance not passed! sma spend1={time1}ms no sma spend2= {time2}ms sql1={sql1} sql2= {sql2}")
+ return
+ tdLog.info(f"performance passed! sma spend1={time1}ms no sma spend2= {time2}ms sql1={sql1} sql2= {sql2}")
+
+
+ # init
+ def init(self, conn, logSql, replicaVar=1):
+ seed = time.clock_gettime(time.CLOCK_REALTIME)
+ random.seed(seed)
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor(), True)
+
+ # run
+ def run(self):
+ # prepare env
+ self.prepareEnv()
+
+ # query
+ self.checkCorrect()
+
+ # performance
+ self.checkPerformance()
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/2-query/ts_3398.py b/tests/system-test/2-query/ts_3398.py
new file mode 100644
index 0000000000000000000000000000000000000000..54d5c9180474d103f2dbf53869690c285d886f10
--- /dev/null
+++ b/tests/system-test/2-query/ts_3398.py
@@ -0,0 +1,56 @@
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.sqlset import *
+import datetime
+
+
+class TDTestCase:
+ """This test case is used to verify the aliasName of Node structure is not truncated
+ when sum clause is more than 65 bits.
+ """
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), False)
+
+ def run(self):
+ # test case for https://jira.taosdata.com:18080/browse/TS-3405:
+ # create db
+ ret = tdSql.execute("CREATE DATABASE IF NOT EXISTS statistics1 REPLICA {} DURATION 14400m KEEP 5256000m,5256000m,5256000m PRECISION 'ms' MINROWS 100 MAXROWS 4096 COMP 2;".format(self.replicaVar))
+ tdSql.execute("use statistics1;")
+
+ # create stable
+ ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`g`(`day` timestamp,`run_state` tinyint) TAGS(`vin` binary(32));")
+ ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`b`(`day` timestamp, `total_heart` int) TAGS(`vin` binary(32));")
+ ret = tdSql.execute("CREATE STABLE IF NOT EXISTS statistics1.`tg`(`day` timestamp,`lt_4177` int,`f30_4177` int, `f35_4177` int) TAGS(`vin` binary(32));")
+
+ # insert the data to table
+ ret = tdSql.execute("insert into d1001 using statistics1.`g` tags('NJHYNBSAS0000061') values (%s, %d)" % ("'2023-05-01'", 99))
+ ret = tdSql.execute("insert into d2001 using statistics1.`b` tags('NJHYNBSAS0000061') values (%s, %d)" % ("'2023-05-01'", 99))
+ ret = tdSql.execute("insert into d3001 using statistics1.`tg` tags('NJHYNBSAS0000061') values (%s, %d, %d, %d)" % ("'2023-05-01'", 99, 99, 99))
+
+ # execute the sql statements
+ ret = tdSql.query("SELECT b.`day` `day`,sum(CASE WHEN tg.lt_4177 IS NULL THEN 0 ELSE tg.lt_4177 END \
+ + CASE WHEN tg.f35_4177 IS NULL THEN 0 ELSE tg.f35_4177 END) / 3600 es0,sum(CASE WHEN tg.lt_4177 \
+ IS NULL THEN 0 ELSE tg.lt_4177 END + CASE WHEN tg.f35_4177 IS NULL THEN 0 ELSE tg.f35_4177 \
+ END + CASE WHEN tg.f30_4177 IS NULL THEN 0 ELSE tg.f30_4177 END) / 3600 es1 FROM \
+ statistics1.b b,statistics1.tg tg,statistics1.g g WHERE b.`day` = tg.`day` AND g.`day` = b.`day` \
+ AND b.vin = tg.vin AND b.vin = g.vin AND b.`day` BETWEEN '2023-05-01' AND '2023-05-05' \
+ AND b.vin = 'NJHYNBSAS0000061' AND g.vin IS NOT NULL AND b.vin IS NOT NULL AND tg.vin IS NOT NULL \
+ GROUP BY b.`day`;")
+ # check the result
+ if 0.055 in tdSql.queryResult[0] and 0.0825 in tdSql.queryResult[0]:
+ tdLog.info("query result is correct")
+ else:
+ tdLog.info("query result is wrong")
+
+ def stop(self):
+ # clear the db
+ tdSql.execute("drop database if exists statistics1;")
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/7-tmq/stbFilterWhere.py b/tests/system-test/7-tmq/stbFilterWhere.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d8d046cef14dae6e4e01828b050201fcb575185
--- /dev/null
+++ b/tests/system-test/7-tmq/stbFilterWhere.py
@@ -0,0 +1,227 @@
+
+import taos
+import sys
+import time
+import socket
+import os
+import threading
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.common import *
+sys.path.append("./7-tmq")
+from tmqCommon import *
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug(f"start to excute {__file__}")
+ tdSql.init(conn.cursor())
+ #tdSql.init(conn.cursor(), logSql) # output sql.txt file
+
+ def prepareTestEnv(self):
+ tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'replica': 1,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 10,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 100,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 2,
+ 'showMsg': 1,
+ 'showRow': 1}
+
+ tmqCom.initConsumerTable()
+ tmqCom.create_database(tsql=tdSql, dbName=paraDict["dbName"],dropFlag=paraDict["dropFlag"], vgroups=paraDict['vgroups'],replica=paraDict['replica'])
+ tdSql.execute("alter database %s wal_retention_period 3600"%(paraDict["dbName"]))
+ tdLog.info("create stb")
+ tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
+ tdLog.info("create ctb")
+ tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'], ctbNum=paraDict['ctbNum'])
+ tdLog.info("insert data")
+ tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
+ ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
+ startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
+ return
+
+ def tmqCase_columnError(self, topicName, condition):
+ tdLog.printNoPrefix("======== test case error: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'replica': 1,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 10,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 100,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 2,
+ 'showMsg': 1,
+ 'showRow': 1}
+
+ tdLog.info("create topics from stb with column filter")
+ topicString = "create topic %s as stable %s.%s where %s" %(topicName, paraDict['dbName'], paraDict['stbName'], condition)
+ tdLog.info("create topic sql: %s"%topicString)
+ tdSql.error(topicString)
+
+ def tmqCase(self, topicName, condition):
+ tdLog.printNoPrefix("======== test case: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'replica': 1,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 10,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 100,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 2,
+ 'showMsg': 1,
+ 'showRow': 1}
+
+ expectRowsList = []
+ tmqCom.initConsumerTable()
+
+ tdLog.info("create topics from stb with tag filter")
+ topicString = "create topic %s as stable %s.%s where %s" %(topicName, paraDict['dbName'], paraDict['stbName'], condition)
+ tdLog.info("create topic sql: %s"%topicString)
+ tdSql.execute(topicString)
+
+ queryString = "select * from %s.%s where %s" %(paraDict['dbName'], paraDict['stbName'], condition)
+ tdSql.query(queryString)
+ expectRowsList.append(tdSql.getRows())
+
+ # init consume info, and start tmq_sim, then check consume result
+ tdLog.info("insert consume info to consume processor")
+ consumerId = 0
+ expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
+ topicList = topicName
+ ifcheckdata = 0
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
+
+ tdLog.info("wait the consume result")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+
+ if expectRowsList[0] != resultList[0]:
+ tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
+ tdLog.exit("0 tmq consume rows error!")
+
+ tdLog.printNoPrefix("======== test case end ...... ")
+
+ def tmqCase_addNewTable_dropTag(self, topicName, condition):
+ tdLog.printNoPrefix("======== test case1: ")
+ paraDict = {'dbName': 'dbt',
+ 'dropFlag': 1,
+ 'event': '',
+ 'vgroups': 4,
+ 'replica': 1,
+ 'stbName': 'stb',
+ 'colPrefix': 'c',
+ 'tagPrefix': 't',
+ 'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}],
+ 'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
+ 'ctbPrefix': 'ctb',
+ 'ctbStartIdx': 0,
+ 'ctbNum': 10,
+ 'rowsPerTbl': 10000,
+ 'batchNum': 100,
+ 'startTs': 1640966400000, # 2022-01-01 00:00:00.000
+ 'pollDelay': 2,
+ 'showMsg': 1,
+ 'showRow': 1}
+
+ expectRowsList = []
+ tmqCom.initConsumerTable()
+
+ tdLog.info("create topics from stb with tag filter")
+ topicString = "create topic %s as stable %s.%s where %s" %(topicName, paraDict['dbName'], paraDict['stbName'], condition)
+ tdLog.info("create topic sql: %s"%topicString)
+ tdSql.execute(topicString)
+
+ queryString = "select * from %s.%s where %s" %(paraDict['dbName'], paraDict['stbName'], condition)
+ tdSql.query(queryString)
+ expectRowsList.append(tdSql.getRows() + 1)
+
+ # init consume info, and start tmq_sim, then check consume result
+ tdLog.info("insert consume info to consume processor")
+ consumerId = 0
+ expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
+ topicList = topicName
+ ifcheckdata = 0
+ ifManualCommit = 1
+ keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
+ tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
+
+ tdLog.info("start consume processor")
+ tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
+
+ #add new table with one data
+ tdLog.info("start insert data")
+ insertString = "insert into %s.tmp using %s.%s tags(1, 1, 1, 't4', 't5') values(now, 1, 1, 1, 'c4', 'c5', now)" %(paraDict['dbName'], paraDict['dbName'], paraDict['stbName'])
+ tdSql.execute(insertString)
+
+ #test drop tag
+ tdSql.error("alter stable %s.%s drop tag t1" %(paraDict['dbName'], paraDict['stbName']))
+ tdSql.execute("alter stable %s.%s drop tag t2" %(paraDict['dbName'], paraDict['stbName']))
+ tdSql.execute("alter stable %s.%s drop column c2" %(paraDict['dbName'], paraDict['stbName']))
+
+ tdLog.info("wait the consume result")
+ expectRows = 1
+ resultList = tmqCom.selectConsumeResult(expectRows)
+
+ if expectRowsList[0] != resultList[0]:
+ tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
+ tdLog.exit("0 tmq consume rows error!")
+
+ tdLog.printNoPrefix("======== test case1 end ...... ")
+
+ def run(self):
+ tdSql.prepare()
+ self.prepareTestEnv()
+ self.tmqCase_columnError("t1", "c1 = 4 and t1 = 3")
+ self.tmqCase("t2", "2 > 1")
+ self.tmqCase("t3", "t4 = 'beijing'")
+ self.tmqCase("t4", "t4 > t3")
+ self.tmqCase("t5", "t3 = t4")
+ self.tmqCase_addNewTable_dropTag("t6", "t1 = 1")
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+event = threading.Event()
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/eco-system/main.py b/tests/system-test/eco-system/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1f72147a02273676283a3af18e2b7aa45c1cda1
--- /dev/null
+++ b/tests/system-test/eco-system/main.py
@@ -0,0 +1,37 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import re
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+ self.setsql = TDSetSql()
+
+ def run(self):
+ tdLog.info(" ------ eco-system main -------")
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/eco-system/manager/drop_table.py b/tests/system-test/eco-system/manager/drop_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4cdf4a5410fecb7dc8f339494d2a16b178ff411
--- /dev/null
+++ b/tests/system-test/eco-system/manager/drop_table.py
@@ -0,0 +1,146 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+import random
+import time
+import traceback
+
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), True)
+
+ # describe table
+ def describe_table(self, tbname):
+ columns = []
+ tags = []
+ sql = f"describe {tbname}"
+ row_cnt = tdSql.query(sql)
+ for i in range(0, row_cnt):
+ col_name = tdSql.queryResult[i][0]
+ type_name = tdSql.queryResult[i][3]
+ if type_name == "TAG":
+ tags.append(col_name)
+ else:
+ columns.append(col_name)
+
+ return columns,tags
+
+ # show tables
+ def show_tables(self):
+ sql = "show tables;"
+ row_cnt = tdSql.query(sql)
+ tables = []
+ for i in range(0, row_cnt):
+ tb_name = tdSql.queryResult[i][0]
+ tables.append(tb_name)
+
+
+ # execute sql
+ def execute(self, sql):
+ try:
+ tdSql.execute(sql, 3)
+ tdLog.info(f" exec ok. {sql}")
+ except:
+ tdLog.info(f" exe failed. {sql}")
+ traceback.print_exc()
+
+
+ # query
+ def query_table(self, columns, tags):
+ if len(columns) < 5 :
+ return
+ if len(tags) < 5:
+ return
+
+ sel_cols = random.sample(columns, random.randint(1,int(len(columns)-1)))
+ sel_tags = random.sample(tags, random.randint(1, int(len(tags)-1)))
+
+ field_cols = ",".join(sel_cols)
+ field_tags = ",".join(sel_tags)
+
+ #sql = f"select {field_cols},{field_tags} from meters ;"
+ sql = f"select {field_cols},{field_tags} from meters"
+ try:
+ tdLog.info( " query sql:" + sql)
+ tdSql.query("select * from meters limit 1")
+ except:
+ tdLog.info( " query failed :" + sql)
+ traceback.print_exc()
+
+ # change table schema
+ def drop_table(self, change_cnt):
+ # init
+
+ tables = self.show_tables()
+
+
+ for i in range(change_cnt):
+ col_idx = random.randint(0, ncol - 1)
+ tag_idx = random.randint(0, ntag - 1)
+
+ cols = list(self.column_dict.keys())
+ tags = list(self.tag_dict.keys())
+
+ # column
+ key = cols[col_idx]
+ value = self.column_dict[key]
+ sql = f'alter table meters drop column {key}'
+ self.execute(sql)
+ sql = f'alter table meters add column {key} {value}'
+ self.execute(sql)
+
+
+ # column
+ key = tags[col_idx]
+ value = self.tag_dict[key]
+ sql = f'alter table meters drop tag {key}'
+ self.execute(sql)
+ sql = f'alter table meters add tag {key} {value}'
+ self.execute(sql)
+
+ # drop and rename
+ if i % 5 == 0:
+ # update columns
+ #columns,tags = self.describe_table("meters")
+ tdLog.info(f" ======= describe table column count = {len(cols)} tags= {len(tags)}======")
+ self.query_table(cols, tags)
+
+ # run
+ def run(self):
+ # seed
+ random.seed(int(time.time()))
+ self.dbname = "schema_change"
+
+ # switch db
+ tdSql.execute(f"use {self.dbname};")
+
+ # change meters
+ self.drop_table(1000000)
+
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/eco-system/manager/same_column.py b/tests/system-test/eco-system/manager/same_column.py
new file mode 100644
index 0000000000000000000000000000000000000000..beaf4e449e76686b8c7240b21d0dcaae8f4eb9e0
--- /dev/null
+++ b/tests/system-test/eco-system/manager/same_column.py
@@ -0,0 +1,181 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+import random
+import time
+import traceback
+
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), True)
+ self.setsql = TDSetSql()
+ self.column_dict = {
+ 'bc': 'bool',
+ 'fc': 'float',
+ 'dc': 'double',
+ 'ti': 'tinyint',
+ 'si': 'smallint',
+ 'ic': 'int',
+ 'bi': 'bigint',
+ 'uit': 'tinyint unsigned',
+ 'usi': 'smallint unsigned',
+ 'ui': 'int unsigned',
+ 'ubi': 'bigint unsigned',
+ 'bin': 'binary(32)',
+ 'nch': 'nchar(64)'
+ }
+ self.tag_dict = {
+ 'groupid': 'tinyint',
+ 'location': 'binary(16)',
+ 'tfc': 'float',
+ 'tdc': 'double',
+ 'tti': 'tinyint',
+ 'tsi': 'smallint',
+ 'tic': 'int',
+ 'tbi': 'bigint',
+ 'tuit': 'tinyint unsigned',
+ 'tusi': 'smallint unsigned',
+ 'tui': 'int unsigned',
+ 'tubi': 'bigint unsigned',
+ 'tbin': 'binary(32)',
+ 'tnch': 'nchar(64)'
+ }
+
+ # describe table
+ def describe_table(self, tbname):
+ columns = []
+ tags = []
+ sql = f"describe {tbname}"
+ row_cnt = tdSql.query(sql)
+ for i in range(20, row_cnt):
+ col_name = tdSql.queryResult[i][0]
+ type_name = tdSql.queryResult[i][3]
+ if type_name == "TAG":
+ tags.append(col_name)
+ else:
+ columns.append(col_name)
+
+ return columns,tags
+
+ def drop_tag(self, tags, cnt):
+ for i in range(cnt):
+ tag_cnt = len(tags)
+ sel = random.randint(1, tag_cnt-1)
+ sql = f"alter table meters drop tag `{tags[sel]}` "
+ try:
+ tdSql.execute(sql)
+ tdLog.info(sql)
+ del tags[sel]
+ except:
+ tdLog.info(f" drop tags failed. {sql}")
+ traceback.print_exc()
+
+ # execute sql
+ def execute(self, sql):
+ try:
+ tdSql.execute(sql, 3)
+ tdLog.info(f" exec ok. {sql}")
+ except:
+ tdLog.info(f" exe failed. {sql}")
+ traceback.print_exc()
+
+
+ # query
+ def query_table(self, columns, tags):
+ if len(columns) < 5 :
+ return
+ if len(tags) < 5:
+ return
+
+ sel_cols = random.sample(columns, random.randint(1,int(len(columns)-1)))
+ sel_tags = random.sample(tags, random.randint(1, int(len(tags)-1)))
+
+ field_cols = ",".join(sel_cols)
+ field_tags = ",".join(sel_tags)
+
+ #sql = f"select {field_cols},{field_tags} from meters ;"
+ sql = f"select {field_cols},{field_tags} from meters"
+ try:
+ tdLog.info( " query sql:" + sql)
+ tdSql.query("select * from meters limit 1")
+ except:
+ tdLog.info( " query failed :" + sql)
+ traceback.print_exc()
+
+ # change table schema
+ def change_columns(self, change_cnt):
+ # init
+
+ ncol = len(self.column_dict)
+ ntag = len(self.tag_dict)
+
+ for i in range(change_cnt):
+ col_idx = random.randint(0, ncol - 1)
+ tag_idx = random.randint(0, ntag - 1)
+
+ cols = list(self.column_dict.keys())
+ tags = list(self.tag_dict.keys())
+
+ # column
+ key = cols[col_idx]
+ value = self.column_dict[key]
+ sql = f'alter table meters drop column {key}'
+ self.execute(sql)
+ sql = f'alter table meters add column {key} {value}'
+ self.execute(sql)
+
+
+ # column
+ key = tags[col_idx]
+ value = self.tag_dict[key]
+ sql = f'alter table meters drop tag {key}'
+ self.execute(sql)
+ sql = f'alter table meters add tag {key} {value}'
+ self.execute(sql)
+
+ # drop and rename
+ if i % 5 == 0:
+ # update columns
+ #columns,tags = self.describe_table("meters")
+ tdLog.info(f" ======= describe table column count = {len(cols)} tags= {len(tags)}======")
+ self.query_table(cols, tags)
+
+ # run
+ def run(self):
+ # seed
+ random.seed(int(time.time()))
+ self.dbname = "schema_change"
+
+ # switch db
+ tdSql.execute(f"use {self.dbname};")
+
+ # change meters
+ self.change_columns(1000000)
+
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/eco-system/manager/schema_change.py b/tests/system-test/eco-system/manager/schema_change.py
new file mode 100644
index 0000000000000000000000000000000000000000..400d2b100b4d99c3f0c3455e047caa7e4d45573f
--- /dev/null
+++ b/tests/system-test/eco-system/manager/schema_change.py
@@ -0,0 +1,239 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+import random
+import time
+import traceback
+
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), True)
+ self.setsql = TDSetSql()
+ self.column_dict = {
+ 'col0': 'int',
+ 'col1': 'tinyint',
+ 'col2': 'smallint',
+ 'col3': 'int',
+ 'col4': 'bigint',
+ 'col5': 'tinyint unsigned',
+ 'col6': 'smallint unsigned',
+ 'col7': 'int unsigned',
+ 'col8': 'bigint unsigned',
+ 'col9': 'float',
+ 'col10': 'double',
+ 'col11': 'bool',
+ 'col12': 'varchar(20)',
+ 'col13': 'nchar(20)'
+ }
+ self.tag_dict = {
+ 't1': 'tinyint',
+ 't2': 'smallint',
+ 't3': 'int',
+ 't4': 'bigint',
+ 't5': 'tinyint unsigned',
+ 't6': 'smallint unsigned',
+ 't7': 'int unsigned',
+ 't8': 'bigint unsigned',
+ 't9': 'float',
+ 't10': 'double',
+ 't11': 'bool',
+ 't12': 'varchar(20)',
+ 't13': 'nchar(20)',
+ 't14': 'timestamp'
+ }
+
+
+ # delete
+ def delete_col(self, columns, cnt, max_col):
+ # delte for random
+ for i in range(cnt):
+ col_cnt = len(columns)
+ if col_cnt == 0:
+ return
+ sel = random.randint(0, col_cnt - 1)
+ sql = f"alter table meters drop column `{columns[sel]}`"
+ try:
+ tdSql.execute(sql)
+ tdLog.info(f" drop cur col={len(columns)} max_col={max_col} {sql}")
+ del columns[sel]
+ except:
+ tdLog.info(f" drop column failed. {sql}")
+ traceback.print_exc()
+
+
+ # describe table
+ def describe_table(self, tbname):
+ columns = []
+ tags = []
+ sql = f"describe {tbname}"
+ row_cnt = tdSql.query(sql)
+ for i in range(20, row_cnt):
+ col_name = tdSql.queryResult[i][0]
+ type_name = tdSql.queryResult[i][3]
+ if type_name == "TAG":
+ tags.append(col_name)
+ else:
+ columns.append(col_name)
+
+ return columns,tags
+
+ def renames(self, tags, cnt):
+ col_cnt = len(tags)
+ if col_cnt < 10:
+ return
+ for i in range(cnt):
+ sel = random.randint(1, col_cnt-3)
+ new_name = tags[sel] + "n"
+ sql = f"alter table meters rename tag `{tags[sel]}` `{new_name}` "
+ try:
+ tdSql.execute(sql)
+ tdLog.info(sql)
+ tags[sel] = new_name
+ except:
+ tdLog.info(f" rename tag failed. {sql}")
+ traceback.print_exc()
+
+
+ def drop_tag(self, tags, cnt):
+ for i in range(cnt):
+ tag_cnt = len(tags)
+ sel = random.randint(1, tag_cnt-1)
+ sql = f"alter table meters drop tag `{tags[sel]}` "
+ try:
+ tdSql.execute(sql)
+ tdLog.info(sql)
+ del tags[sel]
+ except:
+ tdLog.info(f" drop tags failed. {sql}")
+ traceback.print_exc()
+
+ # query
+ def query_table(self, columns, tags):
+ if len(columns) < 10 :
+ return
+ if len(tags) < 10:
+ return
+
+ sel_cols = random.sample(columns, random.randint(2,int(len(columns)/10)))
+ sel_tags = random.sample(tags, random.randint(1,int(len(tags)/10)))
+
+ field_cols = ",".join(sel_cols)
+ field_tags = ",".join(sel_tags)
+
+ #sql = f"select {field_cols},{field_tags} from meters ;"
+ sql = f"select {field_cols},{field_tags} from meters"
+ try:
+ tdLog.info( " query sql:" + sql)
+ tdSql.query("select * from meters limit 1")
+ except:
+ tdLog.info( " query failed :" + sql)
+ traceback.print_exc()
+
+ # change table schema
+ def change_schema(self, change_cnt):
+ # init
+ columns, tags = self.describe_table("meters")
+ max_col = random.randint(200, 2000)
+ tdLog.info(f" ----------- set max column = {max_col} -------------")
+ for i in range(change_cnt):
+ col_cnt = len(self.column_dict)
+ icol = random.randint(0, col_cnt-1)
+ key = f"col{icol}"
+ col_name = key + f"_{i}_{random.randint(1,100)}"
+ col_type = self.column_dict[key]
+ sql = f'alter table meters add column `{col_name}` {col_type}'
+ sql_tag = f'alter table meters add tag `t_{col_name}` {col_type}'
+
+ try:
+ tdSql.execute(sql)
+ tdLog.info(f" add cur col={len(columns)} max_col={max_col} {sql}")
+ columns.append(col_name)
+ if random.randint(1, 4) == 2:
+ tdSql.execute(sql_tag)
+ tdLog.info(f" add tag tag_cnt={len(tags)} {sql_tag}")
+
+ except:
+ tdLog.info(f" add column failed. {sql}")
+ traceback.print_exc()
+
+
+ col_cnt = len(columns)
+ # delete
+ if col_cnt > max_col + 100:
+ self.delete_col(columns, random.randint(1, 30), max_col)
+ elif col_cnt >= max_col + 30:
+ self.delete_col(columns, random.randint(1, 4), max_col)
+ max_col = random.randint(200, 2000)
+ tdLog.info(f" ----------- set max column = {max_col} -------------")
+ elif col_cnt > max_col:
+ self.delete_col(columns, random.randint(1, 3), max_col)
+
+
+
+ if i % 50 == 0:
+ sql = f"flush database {self.dbname};"
+ tdSql.execute(sql)
+ tdLog.info(f" ***** {sql} *****")
+
+ # query
+ if i % 70 == 0:
+ self.query_table(columns, tags)
+
+ # drop and rename
+ if i % 10 == 0:
+ # update columns
+ columns,tags = self.describe_table("meters")
+ tdLog.info(f" ======= describe table column count = {len(columns)} tags= {len(tags)}======")
+
+ if random.randint(1,3) == 2:
+ self.query_table(columns, tags)
+
+ if len(tags) > 50:
+ self.drop_tag(tags, random.randint(1, 30))
+
+ self.renames(tags, random.randint(1, 10))
+
+
+ # sleep
+ #time.sleep(0.3)
+
+
+ # run
+ def run(self):
+ # seed
+ random.seed(int(time.time()))
+ self.dbname = "schema_change"
+
+ # switch db
+ tdSql.execute(f"use {self.dbname};")
+
+ # change meters
+ self.change_schema(1000000)
+
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
\ No newline at end of file
diff --git a/tests/system-test/eco-system/schemaless/insert.py b/tests/system-test/eco-system/schemaless/insert.py
new file mode 100644
index 0000000000000000000000000000000000000000..901196ebfd45232d462612f48bd89f0bf0d312e3
--- /dev/null
+++ b/tests/system-test/eco-system/schemaless/insert.py
@@ -0,0 +1,151 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import *
+import random
+import time
+import traceback
+import taos
+import string
+from taos import schemaless
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), True)
+ self.setsql = TDSetSql()
+ self.conn = conn
+ self.schema = {}
+
+ def random_string(self, count):
+ letters = string.ascii_letters
+ return ''.join(random.choice(letters) for i in range(count))
+
+ def genCol(self, col_name, isTag):
+ col_types = ["str","f64","f32","i8","u8","i16","u16","i32","u32","i64","u64"]
+ if self.schema.get(col_name) == None:
+ col_type = random.choice(col_types)
+ self.schema[col_name] = col_type
+ else:
+ col_type = self.schema[col_name]
+
+ is_num = True
+ val = ""
+ if col_type == "str":
+ val = self.random_string(random.randint(1, 10))
+ is_num = False
+ elif col_type == "f64":
+ val = random.randrange(-100000000000000, 1000000000000)/3*2.25678
+ elif col_type == "f32":
+ val = random.randrange(-100000000, 1000000000)/3*1.2345
+ elif col_type == "i8":
+ val = random.randint(-128, 127)
+ elif col_type == "u8":
+ val = random.randint(0, 256)
+ elif col_type == "i16":
+ val = random.randint(-32768, 32767)
+ elif col_type == "u16":
+ val = random.randint(0, 256*256)
+ elif col_type == "i32":
+ val = random.randint(-256*256*256*128, 256*256*256*128)
+ elif col_type == "u32":
+ val = random.randint(0, 256*256*256*256)
+ elif col_type == "i64":
+ val = random.randint(-256*256*256*256*256*256*256*128, 256*256*256*256*256*256*256*128)
+ elif col_type == "u64":
+ val = random.randint(0, 256*256*256*256*256*256*256*256)
+ else:
+ val = 100
+
+ if isTag:
+ col_val = val
+ elif is_num:
+ col_val = f'{val}{col_type}'
+ else:
+ col_val = '"' + val + '"'
+
+ return f'{col_name}={col_val}'
+
+
+ # cols
+ def genCols(self, pre, max, index, isTag):
+ col_cnt = random.randint(1, max)
+ cols = []
+ for i in range(col_cnt):
+ col_name = f'{pre}_{index}_{i}'
+ cols.append(self.genCol(col_name, isTag))
+
+ return ",".join(cols)
+
+
+ # execute sql
+ def insert(self,sql,i):
+ print("schema less insert")
+ try:
+ self.conn.schemaless_insert([sql], schemaless.SmlProtocol.LINE_PROTOCOL, schemaless.SmlPrecision.MILLI_SECONDS)
+ tdLog.info(f" exec ok i={i} {sql}")
+ except:
+ tdLog.info(f" exe failed. i={i} {sql}")
+ traceback.print_exc()
+
+ def genTags(self, i):
+ tags = f"t1={i},t2=abc,t3=work"
+ return tags
+
+ # change table schema
+ def schemaless_insert(self, change_cnt):
+ # init
+ ts = 1683194263000
+ for i in range(change_cnt):
+ index = int(i/10000) % 600
+ cols = self.genCols("c", 5, index, False)
+ tags = self.genTags(index)
+ sql = f'{self.stable},{tags} {cols} {ts + i}'
+ self.insert(sql, i)
+
+ # run
+
+ def run(self):
+ # seed
+ #random.seed(int(time.time()))
+ self.dbname = "eco_system"
+ self.stable = "sml_stb"
+
+ # switch db
+ tdSql.execute(f"use {self.dbname};")
+ tdSql.execute(f"drop table if exists {self.stable};")
+
+
+
+ # change meters
+ try:
+ self.schemaless_insert(1000000)
+ except:
+ traceback.print_exc()
+
+ print(self.schema)
+
+
+ # stop
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/utils/test/c/sml_test.c b/utils/test/c/sml_test.c
index ac5aff47273fcda49af591f55987dffaae5a7bf7..94619339e9f79920a194c60d8a8386a71c5c2c20 100644
--- a/utils/test/c/sml_test.c
+++ b/utils/test/c/sml_test.c
@@ -1132,6 +1132,155 @@ int sml_td22900_Test() {
return code;
}
+int sml_td24070_Test() {
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ TAOS_RES *pRes = taos_query(taos, "CREATE user test_db pass 'test'");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "CREATE DATABASE IF NOT EXISTS td24070_read");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "grant read on td24070_read to test_db");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "CREATE DATABASE IF NOT EXISTS td24070_write");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "grant write on td24070_write to test_db");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ taos_close(taos);
+
+
+ // test db privilege
+ taos = taos_connect("localhost", "test_db", "test", NULL, 0);
+ const char* sql[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299372000"};
+
+ pRes = taos_query(taos, "use td24070_read");
+ taos_free_result(pRes);
+
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ int code = taos_errno(pRes);
+ ASSERT(code != 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "use td24070_write");
+ taos_free_result(pRes);
+
+ pRes = taos_schemaless_insert(taos, (char **)sql, sizeof(sql) / sizeof(sql[0]), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ code = taos_errno(pRes);
+ ASSERT(code == 0);
+ taos_free_result(pRes);
+ taos_close(taos);
+ // test db privilege end
+
+
+ // test stable privilege
+ taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ pRes = taos_query(taos, "CREATE user test_stb_read pass 'test'");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "CREATE user test_stb_write pass 'test'");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "grant read on td24070_write.stb2 to test_stb_read");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "grant write on td24070_write.stb2 to test_stb_write");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+ taos_close(taos);
+
+ taos = taos_connect("localhost", "test_stb_read", "test", "td24070_write", 0);
+ const char* sql1[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299373000"};
+
+ pRes = taos_schemaless_insert(taos, (char **)sql1, sizeof(sql1) / sizeof(sql1[0]), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ code = taos_errno(pRes);
+ ASSERT(code != 0);
+ taos_free_result(pRes);
+ taos_close(taos);
+
+ taos = taos_connect("localhost", "test_stb_write", "test", "td24070_write", 0);
+ const char* sql2[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299373000"};
+
+ pRes = taos_schemaless_insert(taos, (char **)sql2, sizeof(sql2) / sizeof(sql2[0]), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ code = taos_errno(pRes);
+ ASSERT(code == 0);
+ taos_free_result(pRes);
+ taos_close(taos);
+ // test stable privilege
+
+ // test table privilege
+ taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+
+ pRes = taos_query(taos, "CREATE user test_tb_read pass 'test'");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "CREATE user test_tb_write pass 'test'");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "grant read on td24070_write.stb2 with t1=1 to test_tb_read");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+
+ pRes = taos_query(taos, "grant write on td24070_write.stb2 with t1=1 to test_tb_write");
+ ASSERT(taos_errno(pRes) == 0);
+ taos_free_result(pRes);
+ taos_close(taos);
+
+ taos = taos_connect("localhost", "test_tb_read", "test", "td24070_write", 0);
+ const char* sql3[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299374000"};
+
+
+ pRes = taos_schemaless_insert(taos, (char **)sql3, sizeof(sql3) / sizeof(sql3[0]), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ code = taos_errno(pRes);
+ ASSERT(code != 0);
+ taos_free_result(pRes);
+ taos_close(taos);
+
+ taos = taos_connect("localhost", "test_tb_write", "test", "td24070_write", 0);
+ const char* sql4[] = {"stb2,t1=1,dataModelName=t0 f1=283i32 1632299374000"};
+
+ pRes = taos_schemaless_insert(taos, (char **)sql4, sizeof(sql4) / sizeof(sql4[0]), TSDB_SML_LINE_PROTOCOL,
+ TSDB_SML_TIMESTAMP_MILLI_SECONDS);
+
+ printf("%s result:%s\n", __FUNCTION__, taos_errstr(pRes));
+ code = taos_errno(pRes);
+ ASSERT(code == 0);
+ taos_free_result(pRes);
+ taos_close(taos);
+ // test table privilege
+
+ return code;
+}
+
int sml_td23881_Test() {
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
@@ -1379,6 +1528,8 @@ int main(int argc, char *argv[]) {
}
int ret = 0;
+ ret = sml_td24070_Test();
+ ASSERT(!ret);
ret = sml_td23881_Test();
ASSERT(ret);
ret = sml_escape_Test();