提交 76431142 编写于 作者: K kailixu

Merge branch '3.0' into enh/TD-23582-3.0

......@@ -120,8 +120,14 @@ ELSE ()
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -fsanitize=address -fsanitize=undefined -fsanitize-recover=all -fsanitize=float-divide-by-zero -fsanitize=float-cast-overflow -fno-sanitize=shift-base -fno-sanitize=alignment -g3 -Wformat=0")
MESSAGE(STATUS "Compile with Address Sanitizer!")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
MESSAGE(STATUS "XXXXXXXXXXXXXX Clang/AppleClang" ${TD_DARWIN})
IF (${TD_DARWIN})
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-y2k")
ELSE ()
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Werror -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-reserved-user-defined-literal -Wno-literal-suffix -Werror=return-type -fPIC -gdwarf-2 -g3 -Wformat=2 -Wno-format-nonliteral -Wno-format-truncation -Wno-format-y2k")
ENDIF ()
ENDIF ()
# disable all assert
......
......@@ -109,7 +109,7 @@ option(
option(
BUILD_WITH_ROCKSDB
"If build with rocksdb"
OFF
ON
)
option(
......
......@@ -223,17 +223,31 @@ endif(${BUILD_WITH_LEVELDB})
# rocksdb
# To support rocksdb build on ubuntu: sudo apt-get install libgflags-dev
if(${BUILD_WITH_ROCKSDB})
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized")
option(WITH_TESTS "" OFF)
option(WITH_BENCHMARK_TOOLS "" OFF)
option(WITH_TOOLS "" OFF)
option(WITH_LIBURING "" OFF)
option(WITH_IOSTATS_CONTEXT "" OFF)
option(WITH_PERF_CONTEXT "" OFF)
option(FAIL_ON_WARNINGS "" OFF)
#option(WITH_JEMALLOC "" ON)
option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF)
IF (${TD_WINDOWS})
option(WITH_MD_LIBRARY "build with MD" OFF)
set(SYSTEM_LIBS ${SYSTEM_LIBS} shlwapi.lib rpcrt4.lib)
endif(${TD_WINDOWS})
add_subdirectory(rocksdb EXCLUDE_FROM_ALL)
target_include_directories(
rocksdb
PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/include>
)
IF (${TD_DARWIN})
target_compile_options(
rocksdb
PRIVATE -Wno-unused-private-field
)
endif(${TD_DARWIN})
endif(${BUILD_WITH_ROCKSDB})
# lucene
......
message("contrib test/rocksdb:" ${BUILD_DEPENDENCY_TESTS})
add_executable(rocksdbTest "")
target_sources(rocksdbTest
PRIVATE
"${CMAKE_CURRENT_SOURCE_DIR}/main.c"
)
target_link_libraries(rocksdbTest rocksdb)
\ No newline at end of file
target_link_libraries(rocksdbTest rocksdb)
......@@ -25,10 +25,12 @@ int main(int argc, char const *argv[]) {
// Read
rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create();
rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
//rocksdb_readoptions_set_snapshot(readoptions, rocksdb_create_snapshot(db));
char buf[256] = {0};
size_t vallen = 0;
char * val = rocksdb_get(db, readoptions, "key", 3, &vallen, &err);
printf("val:%s\n", val);
snprintf(buf, vallen+5, "val:%s", val);
printf("%ld %ld %s\n", strlen(val), vallen, buf);
// Update
// rocksdb_put(db, writeoptions, "key", 3, "eulav", 5, &err);
......@@ -43,4 +45,4 @@ int main(int argc, char const *argv[]) {
rocksdb_close(db);
return 0;
}
\ No newline at end of file
}
......@@ -44,7 +44,7 @@ For more details on features, please read through the entire documentation.
## Competitive Advantages
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb), with the following advantages.
By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other [time series databases](https://tdengine.com/tsdb/), with the following advantages.
- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression.
......@@ -123,13 +123,12 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
## Comparison with other databases
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
- [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
- [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
- [TDengine vs. InfluxDB](https://tdengine.com/tsdb-comparison-influxdb-vs-tdengine/)
- [TDengine vs. TimescaleDB](https://tdengine.com/tsdb-comparison-timescaledb-vs-tdengine/)
- [TDengine vs. OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs. Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
## More readings
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
- [Introduction to TDengine competitive advantages](https://tdengine.com/tdengine/)
......@@ -6,7 +6,7 @@ description: This document describes how to install TDengine in a Docker contain
This document describes how to install TDengine in a Docker container and perform queries and inserts.
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
- To get started with TDengine in a non-containerized environment, see [Quick Install from Package](../../get-started/package).
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
......
......@@ -10,7 +10,7 @@ import PkgListV3 from "/components/PkgListV3";
This document describes how to install TDengine on Linux/Windows/macOS and perform queries and inserts.
- The easiest way to explore TDengine is through [TDengine Cloud](http://cloud.tdengine.com).
- The easiest way to explore TDengine is through [TDengine Cloud](https://cloud.tdengine.com).
- To get started with TDengine on Docker, see [Quick Install on Docker](../../get-started/docker).
- If you want to view the source code, build TDengine yourself, or contribute to the project, see the [TDengine GitHub repository](https://github.com/taosdata/TDengine).
......@@ -208,6 +208,8 @@ The following `launchctl` commands can help you manage TDengine service:
- Check TDengine Server status: `sudo launchctl list | grep taosd`
- Check TDengine Server status details: `launchctl print system/com.tdengine.taosd`
:::info
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
- The administrator privilege is required for service management to enhance security.
......
......@@ -288,6 +288,6 @@ Prior to establishing connection, please make sure TDengine is already running a
</Tabs>
:::tip
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](https://docs.tdengine.com/train-faq/faq).
If the connection fails, in most cases it's caused by improper configuration for FQDN or firewall. Please refer to the section "Unable to establish connection" in [FAQ](../../train-faq/faq).
:::
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
```
......@@ -23,7 +23,7 @@ By subscribing to a topic, a consumer can obtain the latest data in that topic i
To implement these features, TDengine indexes its write-ahead log (WAL) file for fast random access and provides configurable methods for replacing and retaining this file. You can define a retention period and size for this file. For information, see the CREATE DATABASE statement. In this way, the WAL file is transformed into a persistent storage engine that remembers the order in which events occur. However, note that configuring an overly long retention period for your WAL files makes database compression inefficient. TDengine then uses the WAL file instead of the time-series database as its storage engine for queries in the form of topics. TDengine reads the data from the WAL file; uses a unified query engine instance to perform filtering, transformations, and other operations; and finally pushes the data to consumers.
Tips:The default data subscription is to consume data from the wal. If the wal is deleted, the consumed data will be incomplete. At this time, you can set the parameter experimental.snapshot.enable to true to obtain all data from the tsdb, but in this way, the consumption order of the data cannot be guaranteed. Therefore, it is recommended to set a reasonable retention policy for WAL based on your consumption situation to ensure that you can subscribe all data from WAL.
Tips: Data subscription is to consume data from the wal. If some wal files are deleted according to WAL retention policy, the deleted data can't be consumed any more. So you need to set a reasonable value for parameter `WAL_RETENTION_PERIOD` or `WAL_RETENTION_SIZE` when creating the database and make sure your application consume the data in a timely way to make sure there is no data loss. This behavior is similar to Kafka and other widely used message queue products.
## Data Schema and API
......@@ -294,7 +294,6 @@ You configure the following parameters when creating a consumer:
| `auto.offset.reset` | enum | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
| `enable.auto.commit` | boolean | Commit automatically; true: user application doesn't need to explicitly commit; false: user application need to handle commit by itself | Default value is true |
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
| `experimental.snapshot.enable` | boolean | Specify whether to consume data in TSDB; true: both data in WAL and in TSDB can be consumed; false: only data in WAL can be consumed | default value: false |
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | default value: false
The method of specifying these parameters depends on the language used:
......@@ -312,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
......@@ -368,7 +366,6 @@ conf := &tmq.ConfigMap{
"td.connect.port": "6030",
"client.id": "test_tmq_c",
"enable.auto.commit": "false",
"experimental.snapshot.enable": "true",
"msg.with.table.name": "true",
}
consumer, err := NewConsumer(conf)
......@@ -416,7 +413,6 @@ Python programs use the following parameters:
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
| `experimental.snapshot.enable` | string | Specify whether it's allowed to consume messages from the WAL or from TSDB | Specify `true` or `false` |
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
</TabItem>
......
......@@ -6,10 +6,12 @@ description: This document describes how to create user-defined functions (UDF),
The built-in functions of TDengine may not be sufficient for the use cases of every application. In this case, you can define custom functions for use in TDengine queries. These are known as user-defined functions (UDF). A user-defined function takes one column of data or the result of a subquery as its input.
TDengine supports user-defined functions written in C or C++. This document describes the usage of user-defined functions.
User-defined functions can be scalar functions or aggregate functions. Scalar functions, such as `abs`, `sin`, and `concat`, output a value for every row of data. Aggregate functions, such as `avg` and `max` output one value for multiple rows of data.
TDengine supports user-defined functions written in C or Python. This document describes the usage of user-defined functions.
## Implement a UDF in C
When you create a user-defined function, you must implement standard interface functions:
- For scalar functions, implement the `scalarfn` interface function.
- For aggregate functions, implement the `aggfn_start`, `aggfn`, and `aggfn_finish` interface functions.
......@@ -17,7 +19,7 @@ When you create a user-defined function, you must implement standard interface f
There are strict naming conventions for these interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
## Implementing a Scalar Function
### Implementing a Scalar Function in C
The implementation of a scalar function is described as follows:
```c
#include "taos.h"
......@@ -49,7 +51,7 @@ int32_t scalarfn_destroy() {
```
Replace `scalarfn` with the name of your function.
## Implementing an Aggregate Function
### Implementing an Aggregate Function in C
The implementation of an aggregate function is described as follows:
```c
......@@ -100,7 +102,7 @@ int32_t aggfn_destroy() {
```
Replace `aggfn` with the name of your function.
## Interface Functions
### UDF Interface Definition in C
There are strict naming conventions for interface functions. The names of the start, finish, init, and destroy interfaces must be <udf-name\>_start, <udf-name\>_finish, <udf-name\>_init, and <udf-name\>_destroy, respectively. Replace `scalarfn`, `aggfn`, and `udf` with the name of your user-defined function.
......@@ -108,8 +110,7 @@ Interface functions return a value that indicates whether the operation was succ
For information about the parameters for interface functions, see Data Model
### Interfaces for Scalar Functions
#### Scalar Interface
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
Replace `scalarfn` with the name of your function. This function performs scalar calculations on data blocks. You can configure a value through the parameters in the `resultColumn` structure.
......@@ -118,7 +119,7 @@ The parameters in the function are defined as follows:
- inputDataBlock: The data block to input.
- resultColumn: The column to output. The column to output.
### Interfaces for Aggregate Functions
#### Aggregate Interface
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
......@@ -126,7 +127,7 @@ The parameters in the function are defined as follows:
`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)`
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and aggfn is called on each block to update the result. Finally, aggfn_finish is called to generate final results from the intermediate results. The final result contains only one or zero data points.
Replace `aggfn` with the name of your function. In the function, aggfn_start is called to generate a result buffer. Data is then divided between multiple blocks, and the `aggfn` function is called on each block to update the result. Finally, aggfn_finish is called to generate the final results from the intermediate results. The final result contains only one or zero data points.
The parameters in the function are defined as follows:
- interBuf: The intermediate result buffer.
......@@ -135,15 +136,15 @@ The parameters in the function are defined as follows:
- result: The final result.
### Initializing and Terminating User-Defined Functions
#### Initialization and Cleanup Interface
`int32_t udf_init()`
`int32_t udf_destroy()`
Replace `udf`with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
Replace `udf` with the name of your function. udf_init initializes the function. udf_destroy terminates the function. If it is not necessary to initialize your function, udf_init is not required. If it is not necessary to terminate your function, udf_destroy is not required.
## Data Structure of User-Defined Functions
### Data Structures for UDF in C
```c
typedef struct SUdfColumnMeta {
int16_t type;
......@@ -193,7 +194,7 @@ typedef struct SUdfInterBuf {
```
The data structure is described as follows:
- The SUdfDataBlock block includes the number of rows (numOfRows) and number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- The SUdfDataBlock block includes the number of rows (numOfRows) and the number of columns (numCols). udfCols[i] (0 <= i <= numCols-1) indicates that each column is of type SUdfColumn.
- SUdfColumn includes the definition of the data type of the column (colMeta) and the data in the column (colData).
- The member definitions of SUdfColumnMeta are the same as the data type definitions in `taos.h`.
- The data in SUdfColumnData can become longer. varLenCol indicates variable-length data, and fixLenCol indicates fixed-length data.
......@@ -201,9 +202,9 @@ The data structure is described as follows:
Additional functions are defined in `taosudf.h` to make it easier to work with these structures.
## Compile UDF
### Compiling C UDF
To use your user-defined function in TDengine, first compile it to a dynamically linked library (DLL).
To use your user-defined function in TDengine, first, compile it to a shared library.
For example, the sample UDF `bit_and.c` can be compiled into a DLL as follows:
......@@ -213,12 +214,9 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
The generated DLL file `libbitand.so` can now be used to implement your function. Note: GCC 7.5 or later is required.
## Manage and Use User-Defined Functions
After compiling your function into a DLL, you add it to TDengine. For more information, see [User-Defined Functions](../12-taos-sql/26-udf.md).
### UDF Sample Code in C
## Sample Code
### Sample scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
#### Scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
The bit_and function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The bit_and function ignores null values.
......@@ -231,7 +229,7 @@ The bit_and function implements bitwise addition for multiple columns. If there
</details>
### Sample aggregate function: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
#### Aggregate function 1: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
The l2norm function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
......@@ -243,3 +241,151 @@ The l2norm function finds the second-order norm for all data in the input column
```
</details>
#### Aggregate function 2: [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
The max_vol function returns a string concatenating the deviceId column, the row number and column number of the maximum voltage and the maximum voltage given several voltage columns as input.
Create Table:
```bash
create table battery(ts timestamp, vol1 float, vol2 float, vol3 float, deviceId varchar(16));
```
Create the UDF:
```bash
create aggregate function max_vol as '/root/udf/libmaxvol.so' outputtype binary(64) bufsize 10240 language 'C';
```
Use the UDF in the query:
```bash
select max_vol(vol1,vol2,vol3,deviceid) from battery;
```
<details>
<summary>max_vol.c</summary>
```c
{{#include tests/script/sh/max_vol.c}}
```
</details>
## Implement a UDF in Python
Implement the specified interface functions when implementing a UDF in Python.
- implement `process` function for the scalar UDF。
- implement `start`, `reduce`, `finish` for the aggregate UDF。
- implement `init` for initialization and `destroy` for termination。
### Implement a Scalar UDF in Python
The implementation of a scalar UDF is described as follows:
```Python
def init():
# initialization
def destroy():
# destroy
def process(input: datablock) -> tuple[output_type]:
# process input datablock,
# datablock.data(row, col) is to access the python object in location(row,col)
# return tuple object consisted of object of type outputtype
```
### Implement an Aggregate UDF in Python
The implementation of an aggregate function is described as follows:
```Python
def init():
#initialization
def destroy():
#destroy
def start() -> bytes:
#return serialize(init_state)
def reduce(inputs: datablock, buf: bytes) -> bytes
# deserialize buf to state
# reduce the inputs and state into new_state.
# use inputs.data(i,j) to access python ojbect of location(i,j)
# serialize new_state into new_state_bytes
return new_state_bytes
def finish(buf: bytes) -> output_type:
#return obj of type outputtype
```
### Python UDF Interface Definition
#### Scalar interface
```Python
def process(input: datablock) -> tuple[output_type]:
```
- `input` is a data block two-dimension matrix-like object, of which method `data(row, col)` returns the Python object located at location (`row`, `col`)
- return a Python tuple object, of which each item is a Python object of type `output_type`
#### Aggregate Interface
```Python
def start() -> bytes:
def reduce(input: datablock, buf: bytes) -> bytes
def finish(buf: bytes) -> output_type:
```
- first `start()` is called to return the initial result in type `bytes`
- then the input data are divided into multiple data blocks and for each block `input`, `reduce` is called with the data block `input` and the current result `buf` bytes and generates a new intermediate result buffer.
- finally, the `finish` function is called on the intermediate result `buf` and outputs 0 or 1 data of type `output_type`
#### Initialization and Cleanup Interface
```Python
def init()
def destroy()
```
Implement `init` for initialization and `destroy` for termination.
### Data Mapping between TDengine SQL and Python UDF
The following table describes the mapping between TDengine SQL data type and Python UDF Data Type. The `NULL` value of all TDengine SQL types is mapped to the `None` value in Python.
| **TDengine SQL Data Type** | **Python Data Type** |
| :-----------------------: | ------------ |
|TINYINT / SMALLINT / INT / BIGINT | int |
|TINYINT UNSIGNED / SMALLINT UNSIGNED / INT UNSIGNED / BIGINT UNSIGNED | int |
|FLOAT / DOUBLE | float |
|BOOL | bool |
|BINARY / VARCHAR / NCHAR | bytes|
|TIMESTAMP | int |
|JSON and other types | Not Supported |
### Installing Python UDF
1. Install Python package `taospyudf` that executes Python UDF
```bash
sudo pip install taospyudf
ldconfig
```
2. If PYTHONPATH is needed to find Python packages when the Python UDF executes, include the PYTHONPATH contents into the udfdLdLibPath variable of the taos.cfg configuration file
### Python UDF Sample Code
#### Scalar Function [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
The `pybitand` function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The `pybitand` function ignores null values.
<details>
<summary>pybitand.py</summary>
```Python
{{#include tests/script/sh/pybitand.py}}
```
</details>
#### Aggregate Function [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
The `pyl2norm` function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
<details>
<summary>pyl2norm.py</summary>
```c
{{#include tests/script/sh/pyl2norm.py}}
```
</details>
## Manage and Use UDF
You need to add UDF to TDengine before using it in SQL queries. For more information about how to manage UDF and how to invoke UDF, please see [Manage and Use UDF](../12-taos-sql/26-udf.md).
......@@ -72,8 +72,8 @@ database_option: {
- 0: The database can contain multiple supertables.
- 1: The database can contain only one supertable.
- STT_TRIGGER: specifies the number of file merges triggered by flushed files. The default is 8, ranging from 1 to 16. For high-frequency scenarios with few tables, it is recommended to use the default configuration or a smaller value for this parameter; For multi-table low-frequency scenarios, it is recommended to configure this parameter with a larger value.
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_PREFIX: The prefix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the prefix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "0001" is used if TSDB_PREFIX is set to 2 but "v3" is used if TSDB_PREFIX is set to -2; It can help you to control the distribution of tables.
- TABLE_SUFFIX:The suffix in the table name that is ignored when distributing a table to a vgroup when it's a positive number, or only the suffix is used when distributing a table to a vgroup, the default value is 0; For example, if the table name v30001, then "v300" is used if TSDB_SUFFIX is set to 2 but "01" is used if TSDB_SUFFIX is set to -2; It can help you to control the distribution of tables.
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
- WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep for consumption. Alter it with a proper value at first to create topics.
- WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept for consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that the total size of WAL files to keep for consumption has no upper limit.
......
......@@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause:
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause:
PARTITION BY expr [, expr] ...
......
......@@ -886,7 +886,7 @@ INTERP(expr)
- The output time range of `INTERP` is specified by `RANGE(timestamp1,timestamp2)` parameter, with timestamp1 <= timestamp2. timestamp1 is the starting point of the output time range and must be specified. timestamp2 is the ending point of the output time range and must be specified.
- The number of rows in the result set of `INTERP` is determined by the parameter `EVERY(time_unit)`. Starting from timestamp1, one interpolation is performed for every time interval specified `time_unit` parameter. The parameter `time_unit` must be an integer, with no quotes, with a time unit of: a(millisecond)), s(second), m(minute), h(hour), d(day), or w(week). For example, `EVERY(500a)` will interpolate every 500 milliseconds.
- Interpolation is performed based on `FILL` parameter. For more information about FILL clause, see [FILL Clause](../distinguished/#fill-clause).
- `INTERP` can only be used to interpolate in single timeline. So it must be used with `partition by tbname` when it's used on a STable.
- `INTERP` can be applied to supertable by interpolating primary key sorted data of all its childtables. It can also be used with `partition by tbname` when applied to supertable to generate interpolation on each single timeline.
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
......
......@@ -147,7 +147,7 @@ In both of these methods, configuring the watermark is essential for obtaining a
## Supported functions
All [scalar functions](../function/#scalar-functions) are available in stream processing. All [System information functions](../function/#system-information-functions) are <b>not</b> allowed in stream processing. All [Aggregate functions](../function/#aggregate-functions) and [Selection functions](../function/#selection-functions) are available in stream processing, except the followings:
All [scalar functions](../function/#scalar-functions) are available in stream processing. All [Aggregate functions](../function/#aggregate-functions) and [Selection functions](../function/#selection-functions) are available in stream processing, except the followings:
- [leastsquares](../function/#leastsquares)
- [percentile](../function/#percentile)
- [top](../function/#top)
......
......@@ -120,6 +120,9 @@ Provides information about user-defined functions.
| 5 | create_time | TIMESTAMP | Creation time |
| 6 | code_len | INT | Length of the source code |
| 7 | bufsize | INT | Buffer size |
| 8 | func_language | BINARY(31) | UDF programming language |
| 9 | func_body | BINARY(16384) | UDF function body |
| 10 | func_version | INT | UDF function version. starting from 0. Increasing by 1 each time it is updated|
## INS_INDEXES
......
......@@ -129,6 +129,14 @@ SHOW QNODES;
Shows information about qnodes in the system.
## SHOW QUERIES
```sql
SHOW QUERIES;
```
Shows the queries in progress in the system.
## SHOW SCORES
```sql
......
......@@ -7,17 +7,18 @@ description: This document describes the SQL statements related to user-defined
You can create user-defined functions and import them into TDengine.
## Create UDF
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF are stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
SQL command can be executed on the host where the generated UDF DLL resides to load the UDF DLL into TDengine. This operation cannot be done through REST interface or web console. Once created, any client of the current TDengine can use these UDF functions in their SQL commands. UDF is stored in the management node of TDengine. The UDFs loaded in TDengine would be still available after TDengine is restarted.
When creating UDF, the type of UDF, i.e. a scalar function or aggregate function must be specified. If the specified type is wrong, the SQL statements using the function would fail with errors. The input data type and output data type must be consistent with the UDF definition.
- Create Scalar Function
```sql
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
```
- function_name: The scalar function name to be used in SQL statement which must be consistent with the UDF name and is also the name of the compiled DLL (.so file).
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
- OR REPLACE: if the UDF exists, the UDF properties are modified
- function_name: The scalar function name to be used in the SQL statement
- LANGUAGE 'C|Python': the programming language of UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
- output_type: The data type of the results of the UDF.
For example, the following SQL statement can be used to create a UDF from `libbitand.so`.
......@@ -25,14 +26,20 @@ CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
```sql
CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
```
For Example, the following SQL statement can be used to modify the existing function `bit_and`. The OUTPUT type is changed to BIGINT and the programming language is changed to Python.
```sql
CREATE OR REPLACE FUNCTION bit_and AS "/home/taos/udf_example/bit_and.py" OUTPUTTYPE BIGINT LANGUAGE 'Python';
```
- Create Aggregate Function
```sql
CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
```
- function_name: The aggregate function name to be used in SQL statement which must be consistent with the udfNormalFunc name and is also the name of the compiled DLL (.so file).
- library_path: The absolute path of the DLL file including the name of the shared object file (.so). The path must be quoted with single or double quotes.
- OR REPLACE: if the UDF exists, the UDF properties are modified
- function_name: The aggregate function name to be used in the SQL statement
- LANGUAGE 'C|Python': the programming language of the UDF. Now C or Python is supported. If this clause is omitted, C is assumed as the programming language.
- library_path: For C programming language, The absolute path of the DLL file including the name of the shared object file (.so). For Python programming language, the absolute path of the Python UDF script. The path must be quoted with single or double quotes.
- output_type: The output data type, the value is the literal string of the supported TDengine data type.
- buffer_size: The size of the intermediate buffer in bytes. This parameter is optional.
......@@ -41,6 +48,11 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
```
For example, the following SQL statement modifies the buffer size of existing UDF `l2norm` to 64
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
```
For more information about user-defined functions, see [User-Defined Functions](/develop/udf).
## Manage UDF
......@@ -61,9 +73,9 @@ SHOW FUNCTIONS;
## Call UDF
The function name specified when creating UDF can be used directly in SQL statements, just like builtin functions. For example:
The function name specified when creating UDF can be used directly in SQL statements, just like built-in functions. For example:
```sql
SELECT bit_and(c1,c2) FROM table;
```
The above SQL statement invokes function X for column c1 and c2 on table. You can use query keywords like WHERE with user-defined functions.
The above SQL statement invokes function X for columns c1 and c2 on the table. You can use query keywords like WHERE with user-defined functions.
......@@ -11,6 +11,7 @@ import TabItem from '@theme/TabItem';
import Preparition from "./_preparation.mdx"
import RustInsert from "../../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../../07-develop/03-insert-data/_rust_stmt.mdx"
import RustSml from "../../07-develop/03-insert-data/_rust_schemaless.mdx"
import RustQuery from "../../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
......@@ -232,6 +233,10 @@ There are two ways to query data: Using built-in types or the [serde](https://se
<RustBind />
#### Schemaless Write
<RustSml />
### Query data
<RustQuery />
......
......@@ -76,6 +76,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-A, --all-databases Dump all databases.
-D, --databases=DATABASES Dump listed databases. Use comma to separate
database names.
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump table schemas.
-y, --answer-yes Input yes for prompt. It will skip data file
......
......@@ -12,8 +12,8 @@ After TDengine starts, it automatically writes many metrics in specific interval
To deploy TDinsight, we need
- a single-node TDengine server or a multi-node TDengine cluster and a [Grafana] server are required. This dashboard requires TDengine 3.0.1.0 and above, with the monitoring feature enabled. For detailed configuration, please refer to [TDengine monitoring configuration](../config/#monitoring-parameters).
- taosAdapter has been instaleld and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taoskeeper).
- taosAdapter has been installed and running, please refer to [taosAdapter](../taosadapter).
- taosKeeper has been installed and running, please refer to [taosKeeper](../taosKeeper).
Please record
- The endpoint of taosAdapter REST service, for example `http://tdengine.local:6041`
......
......@@ -35,7 +35,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### TDengine
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
## Data Connection Setup
......
......@@ -38,7 +38,7 @@ Please refer to the [official documentation](https://grafana.com/grafana/downloa
### Install TDengine
Download the latest TDengine-server from the [Downloads](http://tdengine.com/en/all-downloads/) page on the TAOSData website and install it.
Download and install the [latest version of TDengine](https://docs.tdengine.com/releases/tdengine/).
## Data Connection Setup
......
......@@ -32,7 +32,7 @@ TDengine 3.0 is not compatible with the configuration and data files from previo
2. Run `sudo rm -rf /var/log/taos/` to delete your log files.
3. Run `sudo rm -rf /var/lib/taos/` to delete your data files.
4. Install TDengine 3.0.
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support).
5. For assistance in migrating data to TDengine 3.0, contact [TDengine Support](https://tdengine.com/support/).
### 2. How can I resolve the "Unable to establish connection" error?
......
......@@ -8,7 +8,7 @@
#include <sys/time.h>
#include <taos.h>
typedef int16_t VarDataLenT;
typedef uint16_t VarDataLenT;
#define TSDB_NCHAR_SIZE sizeof(int32_t)
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
......
......@@ -6,7 +6,7 @@
#include <string.h>
#include <taos.h>
typedef int16_t VarDataLenT;
typedef uint16_t VarDataLenT;
#define TSDB_NCHAR_SIZE sizeof(int32_t)
#define VARSTR_HEADER_SIZE sizeof(VarDataLenT)
......
......@@ -22,7 +22,7 @@
<dependency>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>3.1.0</version>
<version>3.2.1</version>
</dependency>
<!-- ANCHOR_END: dep-->
<dependency>
......
package com.taos.example;
import com.taosdata.jdbc.tmq.ConsumerRecord;
import com.taosdata.jdbc.tmq.ConsumerRecords;
import com.taosdata.jdbc.tmq.TMQConstants;
import com.taosdata.jdbc.tmq.TaosConsumer;
......@@ -64,7 +65,8 @@ public class SubscribeDemo {
consumer.subscribe(Collections.singletonList(TOPIC));
while (!shutdown.get()) {
ConsumerRecords<Meters> meters = consumer.poll(Duration.ofMillis(100));
for (Meters meter : meters) {
for (ConsumerRecord<Meters> recode : meters) {
Meters meter = recode.value();
System.out.println(meter);
}
}
......
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_json() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
// SchemalessProtocol::Json
let data = [
r#"[{"metric": "meters.current", "timestamp": 1681345954000, "value": 10.3, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611249, "value": 219, "tags": {"location": "California.LosAngeles", "groupid": 1}}, {"metric": "meters.current", "timestamp": 1648432611250, "value": 12.6, "tags": {"location": "California.SanFrancisco", "groupid": 2}}, {"metric": "meters.voltage", "timestamp": 1648432611250, "value": 221, "tags": {"location": "California.LosAngeles", "groupid": 1}}]"#
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(300u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.ttl(1000)
.req_id(301u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.req_id(302u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Json)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_line() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
let data = [
"measurement,host=host1 field1=2i,field2=2.0 1577837300000",
"measurement,host=host1 field1=2i,field2=2.0 1577837400000",
"measurement,host=host1 field1=2i,field2=2.0 1577837500000",
"measurement,host=host1 field1=2i,field2=2.0 1577837600000",
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(100u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.req_id(101u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl and req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Line)
.data(data)
.req_id(103u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}
use taos_query::common::SchemalessPrecision;
use taos_query::common::SchemalessProtocol;
use taos_query::common::SmlDataBuilder;
use crate::AsyncQueryable;
use crate::AsyncTBuilder;
use crate::TaosBuilder;
async fn put_telnet() -> anyhow::Result<()> {
// std::env::set_var("RUST_LOG", "taos=trace");
std::env::set_var("RUST_LOG", "taos=debug");
pretty_env_logger::init();
let dsn =
std::env::var("TDENGINE_ClOUD_DSN").unwrap_or("http://localhost:6041".to_string());
log::debug!("dsn: {:?}", &dsn);
let client = TaosBuilder::from_dsn(dsn)?.build().await?;
let db = "demo_schemaless_ws";
client.exec(format!("drop database if exists {db}")).await?;
client
.exec(format!("create database if not exists {db}"))
.await?;
// should specify database before insert
client.exec(format!("use {db}")).await?;
let data = [
"meters.current 1648432611249 10.3 location=California.SanFrancisco group=2",
"meters.current 1648432611250 12.6 location=California.SanFrancisco group=2",
"meters.current 1648432611249 10.8 location=California.LosAngeles group=3",
"meters.current 1648432611250 11.3 location=California.LosAngeles group=3",
"meters.voltage 1648432611249 219 location=California.SanFrancisco group=2",
"meters.voltage 1648432611250 218 location=California.SanFrancisco group=2",
"meters.voltage 1648432611249 221 location=California.LosAngeles group=3",
"meters.voltage 1648432611250 217 location=California.LosAngeles group=3",
]
.map(String::from)
.to_vec();
// demo with all fields
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.precision(SchemalessPrecision::Millisecond)
.data(data.clone())
.ttl(1000)
.req_id(200u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default precision
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.ttl(1000)
.req_id(201u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default ttl
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.req_id(202u64)
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
// demo with default req_id
let sml_data = SmlDataBuilder::default()
.protocol(SchemalessProtocol::Telnet)
.data(data.clone())
.build()?;
assert_eq!(client.put(&sml_data).await?, ());
client.exec(format!("drop database if exists {db}")).await?;
Ok(())
}
......@@ -45,7 +45,7 @@ async fn main() -> anyhow::Result<()> {
taos.exec_many([
format!("DROP TOPIC IF EXISTS tmq_meters"),
format!("DROP DATABASE IF EXISTS `{db}`"),
format!("CREATE DATABASE `{db}`"),
format!("CREATE DATABASE `{db}` WAL_RETENTION_PERIOD 3600"),
format!("USE `{db}`"),
// create super table
format!("CREATE TABLE `meters` (`ts` TIMESTAMP, `current` FLOAT, `voltage` INT, `phase` FLOAT) TAGS (`groupid` INT, `location` BINARY(24))"),
......
......@@ -207,6 +207,8 @@ Active: inactive (dead)
- 查看服务状态:`sudo launchctl list | grep taosd`
- 查看服务详细信息:`launchctl print system/com.tdengine.taosd`
:::info
- `launchctl` 命令管理`com.tdengine.taosd`需要管理员权限,务必在前面加 `sudo` 来增强安全性。
......
```rust
{{#include docs/examples/rust/nativeexample/examples/schemaless_insert_line.rs}}
```
......@@ -25,7 +25,8 @@ import CDemo from "./_sub_c.mdx";
本文档不对消息队列本身的基础知识做介绍,如果需要了解,请自行搜索。
注意:默认是从wal消费数据,如果wal被删除,消费到的数据会不全,此时可以将参数 experimental.snapshot.enable 设置为true,从tsdb获取全部数据,但是这样的话就不能保证数据的消费顺序。所以建议根据自己的消费情况合理的设置wal的保留策略,保证可以从wal里订阅到全部数据。
注意:数据订阅是从 WAL 消费数据,如果一些 WAL 文件被基于 WAL 保留策略删除,则已经删除的 WAL 文件中的数据就无法再消费到。需要根据业务需要在创建数据库时合理设置 `WAL_RETENTION_PERIOD` 或 `WAL_RETENTION_SIZE` ,并确保应用及时消费数据,这样才不会产生数据丢失的现象。数据订阅的行为与 Kafka 等广泛使用的消息队列类产品的行为相似。
## 主要数据结构和 API
不同语言下, TMQ 订阅相关的 API 及数据结构如下:
......@@ -293,7 +294,6 @@ CREATE TOPIC topic_name AS DATABASE db_name;
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交,true: 自动提交,客户端应用无需commit;false:客户端应用需要自行commit | 默认值为 true |
| `auto.commit.interval.ms` | integer | 消费记录自动提交消费位点时间间隔,单位为毫秒 | 默认值为 5000 |
| `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据。当其关闭时,只能消费依据 WAL 保留策略仍然在WAL中的数据;当其打开时,除WAL中的数据以外,也能够消费已经从WAL中删除但落盘到TSDB中的数据 | 实验功能,默认关闭 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) |默认关闭 |
对于不同编程语言,其设置方式如下:
......@@ -311,7 +311,6 @@ tmq_conf_set(conf, "group.id", "cgrpName");
tmq_conf_set(conf, "td.connect.user", "root");
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "auto.offset.reset", "earliest");
tmq_conf_set(conf, "experimental.snapshot.enable", "true");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
......@@ -367,7 +366,6 @@ conf := &tmq.ConfigMap{
"td.connect.port": "6030",
"client.id": "test_tmq_c",
"enable.auto.commit": "false",
"experimental.snapshot.enable": "true",
"msg.with.table.name": "true",
}
consumer, err := NewConsumer(conf)
......@@ -417,7 +415,6 @@ consumer = Consumer({"group.id": "local", "td.connect.ip": "127.0.0.1"})
| `enable.auto.commit` | string | 启用自动提交 | 合法值:`true`, `false` |
| `auto.commit.interval.ms` | string | 以毫秒为单位的自动提交时间间隔 | 默认值:5000 ms |
| `auto.offset.reset` | string | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` |
| `experimental.snapshot.enable` | string | 是否允许从 TSDB 消费数据 | 合法值:`true`, `false` |
</TabItem>
......
......@@ -6,18 +6,20 @@ description: "支持用户编码的聚合函数和标量函数,在查询中嵌
在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF(User Defined Function) 功能,TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。 UDF 通常以数据表中的一列数据做为输入,同时支持以嵌套子查询的结果作为输入。
TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。
用户可以通过 UDF 实现两类函数:标量函数和聚合函数。标量函数对每行数据输出一个值,如求绝对值 abs,正弦函数 sin,字符串拼接函数 concat 等。聚合函数对多行数据进行输出一个值,如求平均数 avg,最大值 max 等。
实现 UDF 时,需要实现规定的接口函数
TDengine 支持通过 C/Python 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。
## 用 C 语言实现 UDF
使用 C 语言实现 UDF 时,需要实现规定的接口函数
- 标量函数需要实现标量接口函数 scalarfn 。
- 聚合函数需要实现聚合接口函数 aggfn_start , aggfn , aggfn_finish。
- 如果需要初始化,实现 udf_init;如果需要清理工作,实现udf_destroy。
接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀(_start, _finish, _init, _destroy)的连接。列表中的scalarfn,aggfn, udf需要替换成udf函数名。
## 实现标量函数
### 用 C 语言实现标量函数
标量函数实现模板如下
```c
#include "taos.h"
......@@ -49,7 +51,7 @@ int32_t scalarfn_destroy() {
```
scalarfn 为函数名的占位符,需要替换成函数名,如bit_and。
## 实现聚合函数
### 用 C 语言实现聚合函数
聚合函数的实现模板如下
```c
......@@ -100,7 +102,7 @@ int32_t aggfn_destroy() {
```
aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
## 接口函数定义
### C 语言 UDF 接口函数定义
接口函数的名称是 udf 名称,或者是 udf 名称和特定后缀(_start, _finish, _init, _destroy)的连接。以下描述中函数名称中的 scalarfn,aggfn, udf 需要替换成udf函数名。
......@@ -108,7 +110,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
接口函数参数类型见数据结构定义。
### 标量接口函数
#### 标量函数接口
`int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)`
......@@ -118,7 +120,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
- inputDataBlock: 输入的数据块
- resultColumn: 输出列
### 聚合接口函数
#### 聚合函数接口
`int32_t aggfn_start(SUdfInterBuf *interBuf)`
......@@ -135,7 +137,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
- result:最终结果。
### UDF 初始化和销毁
#### 初始化和销毁接口
`int32_t udf_init()`
`int32_t udf_destroy()`
......@@ -143,7 +145,7 @@ aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。
其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。如果没有初始化工作,无需定义udf_init函数。如果没有清理工作,无需定义udf_destroy函数。
## UDF 数据结构
### C 语言 UDF 数据结构
```c
typedef struct SUdfColumnMeta {
int16_t type;
......@@ -201,7 +203,7 @@ typedef struct SUdfInterBuf {
为了更好的操作以上数据结构,提供了一些便利函数,定义在 taosudf.h。
## 编译 UDF
### 编译 C UDF
用户定义函数的 C 语言源代码无法直接被 TDengine 系统使用,而是需要先编译为 动态链接库,之后才能载入 TDengine 系统。
......@@ -213,12 +215,9 @@ gcc -g -O0 -fPIC -shared bit_and.c -o libbitand.so
这样就准备好了动态链接库 libbitand.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。
## 管理和使用UDF
编译好的UDF,还需要将其加入到系统才能被正常的SQL调用。关于如何管理和使用UDF,参见[UDF使用说明](../12-taos-sql/26-udf.md)
### C UDF 示例代码
## 示例代码
### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c)
#### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
bit_add 实现多列的按位与功能。如果只有一列,返回这一列。bit_add 忽略空值。
......@@ -231,7 +230,7 @@ bit_add 实现多列的按位与功能。如果只有一列,返回这一列。
</details>
### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c)
#### 聚合函数示例1 返回值为数值类型 [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
......@@ -244,7 +243,7 @@ l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先
</details>
### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/max_vol.c)
#### 聚合函数示例2 返回值为字符串类型 [max_vol](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/max_vol.c)
max_vol 实现了从多个输入的电压列中找到最大电压,返回由设备ID + 最大电压所在(行,列)+ 最大电压值 组成的组合字符串值
......@@ -268,4 +267,125 @@ select max_vol(vol1,vol2,vol3,deviceid) from battery;
{{#include tests/script/sh/max_vol.c}}
```
</details>
\ No newline at end of file
</details>
## 用 Python 语言实现 UDF
使用 Python 语言实现 UDF 时,需要实现规定的接口函数
- 标量函数需要实现标量接口函数 process 。
- 聚合函数需要实现聚合接口函数 start ,reduce ,finish。
- 如果需要初始化,实现 init;如果需要清理工作,实现 destroy。
### 用 Python 实现标量函数
标量函数实现模版如下
```Python
def init():
# initialization
def destroy():
# destroy
def process(input: datablock) -> tuple[output_type]:
# process input datablock,
# datablock.data(row, col) is to access the python object in location(row,col)
# return tuple object consisted of object of type outputtype
```
### 用 Python 实现聚合函数
聚合函数实现模版如下
```Python
def init():
#initialization
def destroy():
#destroy
def start() -> bytes:
#return serialize(init_state)
def reduce(inputs: datablock, buf: bytes) -> bytes
# deserialize buf to state
# reduce the inputs and state into new_state.
# use inputs.data(i,j) to access python ojbect of location(i,j)
# serialize new_state into new_state_bytes
return new_state_bytes
def finish(buf: bytes) -> output_type:
#return obj of type outputtype
```
### Python UDF 接口函数定义
#### 标量函数接口
```Python
def process(input: datablock) -> tuple[output_type]:
```
- input:datablock 类似二维矩阵,通过成员方法 data(row,col)返回位于 row 行,col 列的 python 对象
- 返回值是一个 Python 对象元组,每个元素类型为输出类型。
#### 聚合函数接口
```Python
def start() -> bytes:
def reduce(inputs: datablock, buf: bytes) -> bytes
def finish(buf: bytes) -> output_type:
```
首先调用 start 生成最初结果 buffer,然后输入数据会被分为多个行数据块,对每个数据块 inputs 和当前中间结果 buf 调用 reduce,得到新的中间结果,最后再调用 finish 从中间结果 buf 产生最终输出,最终输出只能含 0 或 1 条数据。
#### 初始化和销毁接口
```Python
def init()
def destroy()
```
其中 init 完成初始化工作。 destroy 完成清理工作。如果没有初始化工作,无需定义 init 函数。如果没有清理工作,无需定义 destroy 函数。
### Python 和 TDengine之间的数据类型映射
下表描述了TDengine SQL数据类型和Python数据类型的映射。任何类型的NULL值都映射成Python的None值。
| **TDengine SQL数据类型** | **Python数据类型** |
| :-----------------------: | ------------ |
|TINYINT / SMALLINT / INT / BIGINT | int |
|TINYINT UNSIGNED / SMALLINT UNSIGNED / INT UNSIGNED / BIGINT UNSIGNED | int |
|FLOAT / DOUBLE | float |
|BOOL | bool |
|BINARY / VARCHAR / NCHAR | bytes|
|TIMESTAMP | int |
|JSON and other types | 不支持 |
### Python UDF 环境的安装
1. 安装 taospyudf 包。此包执行Python UDF程序。
```bash
sudo pip install taospyudf
ldconfig
```
2. 如果 Python UDF 程序执行时,通过 PYTHONPATH 引用其它的包,可以设置 taos.cfg 的 UdfdLdLibPath 变量为PYTHONPATH的内容
### Python UDF 示例代码
#### 标量函数示例 [pybitand](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pybitand.py)
pybitand 实现多列的按位与功能。如果只有一列,返回这一列。pybitand 忽略空值。
<details>
<summary>pybitand.py</summary>
```Python
{{#include tests/script/sh/pybitand.py}}
```
</details>
#### 聚合函数示例 [pyl2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/pyl2norm.py)
pyl2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
<details>
<summary>pyl2norm.py</summary>
```c
{{#include tests/script/sh/pyl2norm.py}}
```
</details>
## 管理和使用 UDF
在使用 UDF 之前需要先将其加入到 TDengine 系统中。关于如何管理和使用 UDF,请参考[管理和使用 UDF](../12-taos-sql/26-udf.md)
此差异已折叠。
......@@ -10,6 +10,7 @@ import TabItem from '@theme/TabItem';
import Preparation from "./_preparation.mdx"
import RustInsert from "../07-develop/03-insert-data/_rust_sql.mdx"
import RustBind from "../07-develop/03-insert-data/_rust_stmt.mdx"
import RustSml from "../07-develop/03-insert-data/_rust_schemaless.mdx"
import RustQuery from "../07-develop/04-query-data/_rust.mdx"
[![Crates.io](https://img.shields.io/crates/v/taos)](https://crates.io/crates/taos) ![Crates.io](https://img.shields.io/crates/d/taos) [![docs.rs](https://img.shields.io/docsrs/taos)](https://docs.rs/taos)
......@@ -230,6 +231,10 @@ async fn demo(taos: &Taos, db: &str) -> Result<(), Error> {
<RustBind />
#### Schemaless 写入
<RustSml />
### 查询数据
<RustQuery />
......
......@@ -71,8 +71,8 @@ database_option: {
- 0:表示可以创建多张超级表。
- 1:表示只可以创建一张超级表。
- STT_TRIGGER:表示落盘文件触发文件合并的个数。默认为 1,范围 1 到 16。对于少表高频场景,此参数建议使用默认配置,或较小的值;而对于多表低频场景,此参数建议配置较大的值。
- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度
- TABLE_PREFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的前缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的前缀;例如,假定表名为 "v30001",当 TSDB_PREFIX = 2 时 使用 "0001" 来决定分配到哪个 vgroup ,当 TSDB_PREFIX = -2 时使用 "v3" 来决定分配到哪个 vgroup
- TABLE_SUFFIX:当其为正值时,在决定把一个表分配到哪个 vgroup 时要忽略表名中指定长度的后缀;当其为负值时,在决定把一个表分配到哪个 vgroup 时只使用表名中指定长度的后缀;例如,假定表名为 "v30001",当 TSDB_SUFFIX = 2 时 使用 "v300" 来决定分配到哪个 vgroup ,当 TSDB_SUFFIX = -2 时使用 "01" 来决定分配到哪个 vgroup
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
- WAL_RETENTION_PERIOD: 为了数据订阅消费,需要WAL日志文件额外保留的最大时长策略。WAL日志清理,不受订阅客户端消费状态影响。单位为 s。默认为 0,表示无需为订阅保留。新建订阅,应先设置恰当的时长策略。
- WAL_RETENTION_SIZE:为了数据订阅消费,需要WAL日志文件额外保留的最大累计大小策略。单位为 KB。默认为 0,表示累计大小无上限。
......
......@@ -55,7 +55,7 @@ window_clause: {
| INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [WATERMARK(watermark_val)] [FILL(fill_mod_and_val)]
interp_clause:
RANGE(ts_val, ts_val), EVERY(every_val), FILL(fill_mod_and_val)
RANGE(ts_val, ts_val) EVERY(every_val) FILL(fill_mod_and_val)
partition_by_clause:
PARTITION BY expr [, expr] ...
......
......@@ -888,7 +888,7 @@ INTERP(expr)
- INTERP 的输出时间范围根据 RANGE(timestamp1,timestamp2)字段来指定,需满足 timestamp1 <= timestamp2。其中 timestamp1(必选值)为输出时间范围的起始值,即如果 timestamp1 时刻符合插值条件则 timestamp1 为输出的第一条记录,timestamp2(必选值)为输出时间范围的结束值,即输出的最后一条记录的 timestamp 不能大于 timestamp2。
- INTERP 根据 EVERY(time_unit) 字段来确定输出时间范围内的结果条数,即从 timestamp1 开始每隔固定长度的时间(time_unit 值)进行插值,time_unit 可取值时间单位:1a(毫秒),1s(秒),1m(分),1h(小时),1d(天),1w(周)。例如 EVERY(500a) 将对于指定数据每500毫秒间隔进行一次插值.
- INTERP 根据 FILL 字段来决定在每个符合输出条件的时刻如何进行插值。关于 FILL 子句如何使用请参考 [FILL 子句](../distinguished/#fill-子句)
- INTERP 只能在一个时间序列内进行插值,因此当作用于超级表时必须跟 partition by tbname 一起使用
- INTERP 作用于超级表时, 会将该超级表下的所有子表数据按照主键列排序后进行插值计算,也可以搭配 PARTITION BY tbname 使用,将结果强制规约到单个时间线
- INTERP 可以与伪列 _irowts 一起使用,返回插值点所对应的时间戳(3.0.2.0版本以后支持)。
- INTERP 可以与伪列 _isfilled 一起使用,显示返回结果是否为原始记录或插值算法产生的数据(3.0.3.0版本以后支持)。
......
......@@ -227,7 +227,7 @@ T = 最新事件时间 - DELETE_MARK
## 流式计算支持的函数
1. 所有的 [单行函数](../function/#单行函数) 均可用于流计算。
2. 以下 19 个聚合/选择函数 <b>不能</b> 应用在创建流计算的 SQL 语句[系统信息函数](../function/#系统信息函数) 也不能用于流计算中。此外的其他类型的函数均可用于流计算。
2. 以下 19 个聚合/选择函数 <b>不能</b> 应用在创建流计算的 SQL 语句。此外的其他类型的函数均可用于流计算。
- [leastsquares](../function/#leastsquares)
- [percentile](../function/#percentile)
......
......@@ -120,6 +120,10 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| 5 | create_time | TIMESTAMP | 创建时间 |
| 6 | code_len | INT | 代码长度 |
| 7 | bufsize | INT | buffer 大小 |
| 8 | func_language | BINARY(31) | 自定义函数编程语言 |
| 9 | func_body | BINARY(16384) | 函数体定义 |
| 10 | func_version | INT | 函数版本号。初始版本为0,每次替换更新,版本号加1。|
## INS_INDEXES
......
......@@ -129,6 +129,14 @@ SHOW QNODES;
显示当前系统中 QNODE (查询节点)的信息。
## SHOW QUERIES
```sql
SHOW QUERIES;
```
显示当前系统中正在进行的查询。
## SHOW SCORES
```sql
......
......@@ -13,27 +13,34 @@ description: 使用 UDF 的详细指南
- 创建标量函数
```sql
CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type;
CREATE [OR REPLACE] FUNCTION function_name AS library_path OUTPUTTYPE output_type [LANGUAGE 'C|Python'];
```
- function_name:标量函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udf 的实际名称一致;
- library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
- function_name:标量函数未来在 SQL 中被调用时的函数名;
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。 如果这个从句忽略,编程语言是C语言
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;
- output_type:此函数计算结果的数据类型名称;
例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF:
例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF:
```sql
CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT;
```
例如,使用以下语句可以修改已经定义的 bit_and 函数,输出类型是 BIGINT,使用Python语言实现。
```sql
CREATE OR REPLACE FUNCTION bit_and AS "/home/taos/udf_example/bit_and.py" OUTPUTTYPE BIGINT LANGUAGE 'Python';
```
- 创建聚合函数:
```sql
CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ];
CREATE [OR REPLACE] AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ] [LANGUAGE 'C|Python'];
```
- OR REPLACE: 如果函数已经存在,会修改已有的函数属性。
- function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致;
- library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来;
- output_type:此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可;
- LANGUAGE 'C|Python':函数编程语言,目前支持C语言和Python语言。
- library_path:如果编程语言是C,路径是包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件)。如果编程语言是Python,路径是包含 UDF 函数实现的Python文件路径。这个路径需要用英文单引号或英文双引号括起来;;
- output_type:此函数计算结果的数据类型名称;
- buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。
例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF:
......@@ -41,6 +48,11 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
```
例如,使用以下语句可以修改已经定义的 l2norm 函数的缓冲区大小为64。
```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 64;
```
关于如何开发自定义函数,请参考 [UDF使用说明](/develop/udf)
## 管理 UDF
......
......@@ -27,7 +27,7 @@ description: "TDengine 3.0 版本的语法变更说明"
| - | :------- | :-------- | :------- |
| 1 | ALTER ACCOUNT | 废除 | 2.x中为企业版功能,3.0不再支持。语法暂时保留了,执行报“This statement is no longer supported”错误。
| 2 | ALTER ALL DNODES | 新增 | 修改所有DNODE的参数。
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP:3.0版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>REPLICA:3.0.0版本暂不支持修改。</li><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
| 3 | ALTER DATABASE | 调整 | <p>废除</p><ul><li>QUORUM:写入需要的副本确认数。3.0 版本默认行为是强一致性,且不支持修改为弱一致性。</li><li>BLOCKS:VNODE使用的内存块数。3.0版本使用BUFFER来表示VNODE写入内存池的大小。</li><li>UPDATE:更新操作的支持模式。3.0版本所有数据库都支持部分列更新。</li><li>CACHELAST:缓存最新一行数据的模式。3.0版本用CACHEMODEL代替。</li><li>COMP:3.0版本暂不支持修改。</li></ul><p>新增</p><ul><li>CACHEMODEL:表示是否在内存中缓存子表的最近数据。</li><li>CACHESIZE:表示缓存子表最近数据的内存大小。</li><li>WAL_FSYNC_PERIOD:代替原FSYNC参数。</li><li>WAL_LEVEL:代替原WAL参数。</li><li>WAL_RETENTION_PERIOD:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li><li>WAL_RETENTION_SIZE:3.0.4.0版本新增,wal文件的额外保留策略,用于数据订阅。</li></ul><p>调整</p><ul><li>KEEP:3.0版本新增支持带单位的设置方式。</li></ul>
| 4 | ALTER STABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改超级表的注释。</li></ul>
| 5 | ALTER TABLE | 调整 | 废除<ul><li>CHANGE TAG:修改标签列的名称。3.0版本使用RENAME TAG代替。<br/>新增</li><li>RENAME TAG:代替原CHANGE TAG子句。</li><li>COMMENT:修改表的注释。</li><li>TTL:修改表的生命周期。</li></ul>
| 6 | ALTER USER | 调整 | 废除<ul><li>PRIVILEGE:修改用户权限。3.0版本使用GRANT和REVOKE来授予和回收权限。<br/>新增</li><li>ENABLE:启用或停用此用户。</li><li>SYSINFO:修改用户是否可查看系统信息。</li></ul>
......
......@@ -79,6 +79,7 @@ Usage: taosdump [OPTION...] dbname [tbname ...]
-A, --all-databases Dump all databases.
-D, --databases=DATABASES Dump inputted databases. Use comma to separate
databases' name.
-e, --escape-character Use escaped character for database name
-N, --without-property Dump database without its properties.
-s, --schemaonly Only dump tables' schema.
-y, --answer-yes Input yes for prompt. It will skip data file
......
......@@ -735,7 +735,6 @@ charset 的有效值是 UTF-8。
| 16 | maxTmrCtrl | 是 | 否 | 3.0 行为未知 |
| 17 | monitorReplica | 是 | 否 | 由 RAFT 协议管理多副本 |
| 18 | smlTagNullName | 是 | 否 | 3.0 行为未知 |
| 19 | keepColumnName | 是 | 否 | 3.0 行为未知 |
| 20 | ratioOfQueryCores | 是 | 否 | 由 线程池 相关配置参数决定 |
| 21 | maxStreamCompDelay | 是 | 否 | 3.0 行为未知 |
| 22 | maxFirstStreamCompDelay | 是 | 否 | 3.0 行为未知 |
......
......@@ -26,6 +26,10 @@ extern "C" {
#include "tgrantCfg.h"
#endif
#ifndef GRANTS_COL_MAX_LEN
#define GRANTS_COL_MAX_LEN 196
#endif
typedef enum {
TSDB_GRANT_ALL,
TSDB_GRANT_TIME,
......@@ -47,23 +51,49 @@ typedef enum {
int32_t grantCheck(EGrantType grant);
#ifndef GRANTS_CFG
#define GRANTS_SCHEMA \
static const SSysDbTableSchema grantsSchema[] = { \
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
#ifdef TD_ENTERPRISE
#define GRANTS_SCHEMA \
static const SSysDbTableSchema grantsSchema[] = { \
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "opc_da", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "opc_ua", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "pi", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "kafka", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "influxdb", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "mqtt", .bytes = GRANTS_COL_MAX_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
}
#else
#define GRANTS_SCHEMA \
static const SSysDbTableSchema grantsSchema[] = { \
{.name = "version", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expire_time", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "expired", .bytes = 5 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "storage", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "timeseries", .bytes = 21 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "databases", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "users", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "accounts", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "dnodes", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "connections", .bytes = 11 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "streams", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "cpu_cores", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "speed", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
{.name = "querytime", .bytes = 9 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, \
}
#endif
#define GRANT_CFG_ADD
#define GRANT_CFG_SET
#define GRANT_CFG_GET
......
......@@ -1232,6 +1232,14 @@ typedef struct {
SEp ep;
} SDnodeEp;
typedef struct {
int32_t id;
int8_t isMnode;
SEp ep;
char active[TSDB_ACTIVE_KEY_LEN];
char connActive[TSDB_CONN_ACTIVE_KEY_LEN];
} SDnodeInfo;
typedef struct {
int64_t dnodeVer;
SDnodeCfg dnodeCfg;
......@@ -1625,6 +1633,21 @@ typedef struct {
int32_t tSerializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
int32_t tDeserializeSDropDnodeReq(void* buf, int32_t bufLen, SDropDnodeReq* pReq);
enum {
RESTORE_TYPE__ALL = 1,
RESTORE_TYPE__MNODE,
RESTORE_TYPE__VNODE,
RESTORE_TYPE__QNODE,
};
typedef struct {
int32_t dnodeId;
int8_t restoreType;
} SRestoreDnodeReq;
int32_t tSerializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
int32_t tDeserializeSRestoreDnodeReq(void* buf, int32_t bufLen, SRestoreDnodeReq* pReq);
typedef struct {
int32_t dnodeId;
char config[TSDB_DNODE_CONFIG_LEN];
......
......@@ -178,6 +178,7 @@ enum {
// TD_DEF_MSG_TYPE(TDMT_MND_STREAM_BEGIN_CHECKPOINT, "stream-begin-checkpoint", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_BALANCE_VGROUP_LEADER, "balance-vgroup-leader", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_RESTORE_DNODE, "restore-dnode", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_VND_MSG)
TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT, "submit", SSubmitReq, SSubmitRsp)
......@@ -225,7 +226,6 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_COMMIT, "vnode-commit", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_CREATE_INDEX, "vnode-create-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_INDEX, "vnode-drop-index", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DISABLE_WRITE, "vnode-disable-write", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)
......
此差异已折叠。
......@@ -99,11 +99,11 @@ typedef struct SSubsidiaryResInfo {
} SSubsidiaryResInfo;
typedef struct SResultDataInfo {
int16_t precision;
int16_t scale;
int16_t type;
int16_t bytes;
int32_t interBufSize;
int16_t precision;
int16_t scale;
int16_t type;
uint16_t bytes;
int32_t interBufSize;
} SResultDataInfo;
#define GET_RES_INFO(ctx) ((ctx)->resultInfo)
......
......@@ -350,6 +350,11 @@ typedef struct SDropComponentNodeStmt {
int32_t dnodeId;
} SDropComponentNodeStmt;
typedef struct SRestoreComponentNodeStmt {
ENodeType type;
int32_t dnodeId;
} SRestoreComponentNodeStmt;
typedef struct SCreateTopicStmt {
ENodeType type;
char topicName[TSDB_TABLE_NAME_LEN];
......
......@@ -211,6 +211,10 @@ typedef enum ENodeType {
QUERY_NODE_SHOW_DB_ALIVE_STMT,
QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT,
QUERY_NODE_BALANCE_VGROUP_LEADER_STMT,
QUERY_NODE_RESTORE_DNODE_STMT,
QUERY_NODE_RESTORE_QNODE_STMT,
QUERY_NODE_RESTORE_MNODE_STMT,
QUERY_NODE_RESTORE_VNODE_STMT,
// logic plan node
QUERY_NODE_LOGIC_PLAN_SCAN = 1000,
......
......@@ -185,6 +185,7 @@ typedef struct SMergeLogicNode {
int32_t numOfChannels;
int32_t srcGroupId;
bool groupSort;
bool ignoreGroupId;
} SMergeLogicNode;
typedef enum EWindowType {
......@@ -444,6 +445,7 @@ typedef struct SMergePhysiNode {
int32_t numOfChannels;
int32_t srcGroupId;
bool groupSort;
bool ignoreGroupId;
} SMergePhysiNode;
typedef struct SWinodwPhysiNode {
......
......@@ -114,7 +114,7 @@ STableDataCxt* smlInitTableDataCtx(SQuery* query, STableMeta* pTableMeta);
int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsSchema, SArray* cols,
STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl,
char* msgBuf, int16_t msgBufLen);
char* msgBuf, int32_t msgBufLen);
int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash);
int rawBlockBindData(SQuery *query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, TAOS_FIELD *fields, int numFields, bool needChangeLength);
......
......@@ -133,6 +133,16 @@ int32_t tfsMkdirAt(STfs *pTfs, const char *rname, SDiskID diskId);
*/
int32_t tfsMkdirRecurAt(STfs *pTfs, const char *rname, SDiskID diskId);
/**
* @brief check directories exist in tfs.
*
* @param pTfs The fs object.
* @param rname The rel name of directory.
* @param diskId The disk ID.
* @return true for exist, false for not exist.
*/
bool tfsDirExistAt(STfs *pTfs, const char *rname, SDiskID diskId);
/**
* @brief Remove directory at all levels in tfs.
*
......
......@@ -406,6 +406,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_SNODE_NOT_DEPLOYED TAOS_DEF_ERROR_CODE(0, 0x0411)
#define TSDB_CODE_MNODE_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0412) // internal
#define TSDB_CODE_MNODE_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0413) // internal
#define TSDB_CODE_MNODE_ONLY_TWO_MNODE TAOS_DEF_ERROR_CODE(0, 0x0414) // internal
// vnode
// #define TSDB_CODE_VND_ACTION_IN_PROGRESS TAOS_DEF_ERROR_CODE(0, 0x0500) // 2.x
......@@ -442,6 +443,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_VND_QUERY_BUSY TAOS_DEF_ERROR_CODE(0, 0x0531)
#define TSDB_CODE_VND_NOT_CATCH_UP TAOS_DEF_ERROR_CODE(0, 0x0532) // internal
#define TSDB_CODE_VND_ALREADY_IS_VOTER TAOS_DEF_ERROR_CODE(0, 0x0533) // internal
#define TSDB_CODE_VND_DIR_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0534)
// tsdb
#define TSDB_CODE_TDB_INVALID_TABLE_ID TAOS_DEF_ERROR_CODE(0, 0x0600)
......
......@@ -232,13 +232,7 @@ typedef enum ELogicConditionType {
#define TSDB_QUERY_ID_LEN 26
#define TSDB_TRANS_OPER_LEN 16
/**
* In some scenarios uint16_t (0~65535) is used to store the row len.
* - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header.
* - Secondly, if all cols are VarDataT type except primary key, we need 4 bits to store the offset, thus
* the final value is 65531-(4096-1)*4 = 49151.
*/
#define TSDB_MAX_BYTES_PER_ROW 49151
#define TSDB_MAX_BYTES_PER_ROW 65531 // 49151:65531
#define TSDB_MAX_TAGS_LEN 16384
#define TSDB_MAX_TAGS 128
......@@ -273,6 +267,9 @@ typedef enum ELogicConditionType {
#define TSDB_DNODE_CONFIG_LEN 128
#define TSDB_DNODE_VALUE_LEN 256
#define TSDB_ACTIVE_KEY_LEN 109 // history 109:?
#define TSDB_CONN_ACTIVE_KEY_LEN 257 // history 257:?
#define TSDB_DEFAULT_PKT_SIZE 65480 // same as RPC_MAX_UDP_SIZE
#define TSDB_PAYLOAD_SIZE TSDB_DEFAULT_PKT_SIZE
......@@ -410,9 +407,9 @@ typedef enum ELogicConditionType {
#define TSDB_EXPLAIN_RESULT_ROW_SIZE (16 * 1024)
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY_PLAN"
#define TSDB_MAX_FIELD_LEN 16384
#define TSDB_MAX_BINARY_LEN (TSDB_MAX_FIELD_LEN - TSDB_KEYSIZE) // keep 16384
#define TSDB_MAX_NCHAR_LEN (TSDB_MAX_FIELD_LEN - TSDB_KEYSIZE) // keep 16384
#define TSDB_MAX_FIELD_LEN 65519 // 16384:65519
#define TSDB_MAX_BINARY_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
#define TSDB_MAX_NCHAR_LEN TSDB_MAX_FIELD_LEN // 16384-8:65519
#define PRIMARYKEY_TIMESTAMP_COL_ID 1
#define COL_REACH_END(colId, maxColId) ((colId) > (maxColId))
......
......@@ -361,7 +361,7 @@ void stopAllRequests(SHashObj* pRequests);
// conn level
int hbRegisterConn(SAppHbMgr* pAppHbMgr, int64_t tscRefId, int64_t clusterId, int8_t connType);
void hbDeregisterConn(SAppHbMgr* pAppHbMgr, SClientHbKey connKey, void* param);
void hbDeregisterConn(STscObj* pTscObj, SClientHbKey connKey);
typedef struct SSqlCallbackWrapper {
SParseContext* pParseCtx;
......
......@@ -244,7 +244,7 @@ void destroyTscObj(void *pObj) {
tscTrace("begin to destroy tscObj %" PRIx64 " p:%p", tscId, pTscObj);
SClientHbKey connKey = {.tscRid = pTscObj->id, .connType = pTscObj->connType};
hbDeregisterConn(pTscObj->pAppInfo->pAppHbMgr, connKey, pTscObj->passInfo.fp);
hbDeregisterConn(pTscObj, connKey);
destroyAllRequests(pTscObj->pRequests);
taosHashCleanup(pTscObj->pRequests);
......
......@@ -994,6 +994,7 @@ SAppHbMgr *appHbMgrInit(SAppInstInfo *pAppInstInfo, char *key) {
// init stat
pAppHbMgr->startTime = taosGetTimestampMs();
pAppHbMgr->connKeyCnt = 0;
pAppHbMgr->passKeyCnt = 0;
pAppHbMgr->reportCnt = 0;
pAppHbMgr->reportBytes = 0;
pAppHbMgr->key = taosStrdup(key);
......@@ -1154,7 +1155,8 @@ int hbRegisterConn(SAppHbMgr *pAppHbMgr, int64_t tscRefId, int64_t clusterId, in
}
}
void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, void *param) {
void hbDeregisterConn(STscObj *pTscObj, SClientHbKey connKey) {
SAppHbMgr *pAppHbMgr = pTscObj->pAppInfo->pAppHbMgr;
SClientHbReq *pReq = taosHashAcquire(pAppHbMgr->activeInfo, &connKey, sizeof(SClientHbKey));
if (pReq) {
tFreeClientHbReq(pReq);
......@@ -1167,7 +1169,10 @@ void hbDeregisterConn(SAppHbMgr *pAppHbMgr, SClientHbKey connKey, void *param) {
}
atomic_sub_fetch_32(&pAppHbMgr->connKeyCnt, 1);
if (param) {
taosThreadMutexLock(&pTscObj->mutex);
if (pTscObj->passInfo.fp) {
atomic_sub_fetch_32(&pAppHbMgr->passKeyCnt, 1);
}
}
taosThreadMutexUnlock(&pTscObj->mutex);
}
\ No newline at end of file
......@@ -134,11 +134,15 @@ int taos_set_notify_cb(TAOS *taos, __taos_notify_fn_t fp, void *param, int type)
switch (type) {
case TAOS_NOTIFY_PASSVER: {
pObj->passInfo.fp = fp;
pObj->passInfo.param = param;
if (fp) {
taosThreadMutexLock(&pObj->mutex);
if (fp && !pObj->passInfo.fp) {
atomic_add_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
} else if (!fp && pObj->passInfo.fp) {
atomic_sub_fetch_32(&pObj->pAppInfo->pAppHbMgr->passKeyCnt, 1);
}
pObj->passInfo.fp = fp;
pObj->passInfo.param = param;
taosThreadMutexUnlock(&pObj->mutex);
break;
}
default: {
......@@ -1338,6 +1342,8 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
goto _return;
}
pRequest->syncQuery = true;
STscObj *pTscObj = pRequest->pTscObj;
code = transferTableNameList(tableNameList, pTscObj->acctId, pTscObj->db, &catalogReq.pTableMeta);
if (code) {
......@@ -1364,7 +1370,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
tsem_wait(&pParam->sem);
_return:
taosArrayDestroy(catalogReq.pTableMeta);
taosArrayDestroyEx(catalogReq.pTableMeta, destoryTablesReq);
destroyRequest(pRequest);
return code;
}
......
......@@ -558,15 +558,21 @@ static int32_t smlGenerateSchemaAction(SSchema *colField, SHashObj *colHash, SSm
return 0;
}
#define BOUNDARY 1024
static int32_t smlFindNearestPowerOf2(int32_t length, uint8_t type) {
int32_t result = 1;
while (result <= length) {
result *= 2;
if (length >= BOUNDARY){
result = length;
}else{
while (result <= length) {
result <<= 1;
}
}
if (type == TSDB_DATA_TYPE_BINARY && result > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
result = TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE;
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
result = (TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
} else if (type == TSDB_DATA_TYPE_NCHAR && result > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE) {
result = (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE;
}
if (type == TSDB_DATA_TYPE_NCHAR) {
......@@ -649,6 +655,17 @@ static int32_t smlBuildFieldsList(SSmlHandle *info, SSchema *schemaField, SHashO
field->bytes = getBytes(kv->type, kv->length);
}
}
int32_t maxLen = isTag ? TSDB_MAX_TAGS_LEN : TSDB_MAX_BYTES_PER_ROW;
int32_t len = 0;
for (int j = 0; j < taosArrayGetSize(results); ++j) {
SField *field = taosArrayGet(results, j);
len += field->bytes;
}
if (len > maxLen) {
return isTag ? TSDB_CODE_PAR_INVALID_TAGS_LENGTH : TSDB_CODE_PAR_INVALID_ROW_LENGTH;
}
return TSDB_CODE_SUCCESS;
}
......@@ -781,11 +798,15 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
code = smlBuildFieldsList(info, NULL, NULL, sTableData->tags, pTags, 0, true);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag1 failed. %s", info->id, pName.tname);
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
code = smlBuildFieldsList(info, NULL, NULL, sTableData->cols, pColumns, 0, false);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList col1 failed. %s", info->id, pName.tname);
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
code = smlSendMetaMsg(info, &pName, pColumns, pTags, NULL, SCHEMA_ACTION_CREATE_STABLE);
......@@ -837,6 +858,23 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
pTableMeta->tableInfo.numOfColumns, true);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList tag2 failed. %s", info->id, pName.tname);
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
if (taosArrayGetSize(pTags) + pTableMeta->tableInfo.numOfColumns > TSDB_MAX_COLUMNS) {
uError("SML:0x%" PRIx64 " too many columns than 4096", info->id);
code = TSDB_CODE_PAR_TOO_MANY_COLUMNS;
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
if (taosArrayGetSize(pTags) > TSDB_MAX_TAGS) {
uError("SML:0x%" PRIx64 " too many tags than 128", info->id);
code = TSDB_CODE_PAR_INVALID_TAGS_NUM;
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
......@@ -891,6 +929,16 @@ static int32_t smlModifyDBSchemas(SSmlHandle *info) {
pTableMeta->tableInfo.numOfColumns, false);
if (code != TSDB_CODE_SUCCESS) {
uError("SML:0x%" PRIx64 " smlBuildFieldsList col2 failed. %s", info->id, pName.tname);
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
if (taosArrayGetSize(pColumns) + pTableMeta->tableInfo.numOfTags > TSDB_MAX_COLUMNS) {
uError("SML:0x%" PRIx64 " too many columns than 4096", info->id);
code = TSDB_CODE_PAR_TOO_MANY_COLUMNS;
taosArrayDestroy(pColumns);
taosArrayDestroy(pTags);
goto end;
}
......@@ -1171,6 +1219,7 @@ static int32_t smlPushCols(SArray *colsArray, SArray *cols) {
SSmlKv *kv = (SSmlKv *)taosArrayGet(cols, i);
taosHashPut(kvHash, kv->key, kv->keyLen, &kv, POINTER_BYTES);
if (terrno == TSDB_CODE_DUP_KEY) {
taosHashCleanup(kvHash);
return terrno;
}
}
......@@ -1244,12 +1293,12 @@ static int32_t smlParseLineBottom(SSmlHandle *info) {
uDebug("SML:0x%" PRIx64 " smlParseLineBottom add meta, format:%d, linenum:%d", info->id, info->dataFormat,
info->lineNum);
SSmlSTableMeta *meta = smlBuildSTableMeta(info->dataFormat);
taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES);
smlInsertMeta(meta->tagHash, meta->tags, tinfo->tags);
if (terrno == TSDB_CODE_DUP_KEY) {
return terrno;
}
smlInsertMeta(meta->colHash, meta->cols, elements->colArray);
taosHashPut(info->superTables, elements->measure, elements->measureLen, &meta, POINTER_BYTES);
}
}
uDebug("SML:0x%" PRIx64 " smlParseLineBottom end, format:%d, linenum:%d", info->id, info->dataFormat, info->lineNum);
......@@ -1498,8 +1547,11 @@ static int smlProcess(SSmlHandle *info, char *lines[], char *rawLine, char *rawL
do {
code = smlModifyDBSchemas(info);
if (code == 0) break;
taosMsleep(500);
if (code == 0 || code == TSDB_CODE_SML_INVALID_DATA || code == TSDB_CODE_PAR_TOO_MANY_COLUMNS ||
code == TSDB_CODE_PAR_INVALID_TAGS_NUM || code == TSDB_CODE_PAR_INVALID_ROW_LENGTH ||
code == TSDB_CODE_PAR_INVALID_TAGS_LENGTH)
break;
taosMsleep(100);
uInfo("SML:0x%" PRIx64 " smlModifyDBSchemas retry code:%s, times:%d", info->id, tstrerror(code), retryNum);
} while (retryNum++ < taosHashGetSize(info->superTables) * MAX_RETRY_TIMES);
......
......@@ -575,7 +575,7 @@ static int32_t smlConvertJSONString(SSmlKv *pVal, char *typeStr, cJSON *value) {
uError("OTD:invalid type(%s) for JSON String", typeStr);
return TSDB_CODE_TSC_INVALID_JSON_TYPE;
}
pVal->length = (int16_t)strlen(value->valuestring);
pVal->length = strlen(value->valuestring);
if (pVal->type == TSDB_DATA_TYPE_BINARY && pVal->length > TSDB_MAX_BINARY_LEN - VARSTR_HEADER_SIZE) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
......
......@@ -236,7 +236,7 @@ static int32_t smlParseTagKv(SSmlHandle *info, char **sql, char *sqlEnd, SSmlLin
PROCESS_SLASH(value, valueLen)
}
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}
......
......@@ -158,7 +158,7 @@ static int32_t smlParseTelnetTags(SSmlHandle *info, char *data, char *sqlEnd, SS
return TSDB_CODE_TSC_INVALID_VALUE;
}
if (unlikely(valueLen > (TSDB_MAX_NCHAR_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
if (unlikely(valueLen > (TSDB_MAX_TAGS_LEN - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)) {
return TSDB_CODE_PAR_INVALID_VAR_COLUMN_LEN;
}
......
......@@ -2517,6 +2517,31 @@ int32_t tmq_get_topic_assignment(tmq_t* tmq, const char* pTopicName, tmq_topic_a
*numOfAssignment = num;
}
for (int32_t j = 0; j < (*numOfAssignment); ++j) {
tmq_topic_assignment* p = &(*assignment)[j];
for(int32_t i = 0; i < taosArrayGetSize(pTopic->vgs); ++i) {
SMqClientVg* pClientVg = taosArrayGet(pTopic->vgs, i);
if (pClientVg->vgId != p->vgId) {
continue;
}
SVgOffsetInfo* pOffsetInfo = &pClientVg->offsetInfo;
pOffsetInfo->currentOffset.type = TMQ_OFFSET__LOG;
char offsetBuf[80] = {0};
tFormatOffset(offsetBuf, tListLen(offsetBuf), &pOffsetInfo->currentOffset);
tscDebug("vgId:%d offset is update to:%s", p->vgId, offsetBuf);
pOffsetInfo->walVerBegin = p->begin;
pOffsetInfo->walVerEnd = p->end;
pOffsetInfo->currentOffset.version = p->currentOffset;
pOffsetInfo->committedOffset.version = p->currentOffset;
}
}
destroyCommonInfo(pCommon);
return code;
} else {
......@@ -2564,7 +2589,8 @@ int32_t tmq_offset_seek(tmq_t* tmq, const char* pTopicName, int32_t vgId, int64_
}
if (offset < pOffsetInfo->walVerBegin || offset > pOffsetInfo->walVerEnd) {
tscError("consumer:0x%" PRIx64 " invalid seek params, offset:%" PRId64, tmq->consumerId, offset);
tscError("consumer:0x%" PRIx64 " invalid seek params, offset:%" PRId64 ", valid range:[%" PRId64 ", %" PRId64 "]",
tmq->consumerId, offset, pOffsetInfo->walVerBegin, pOffsetInfo->walVerEnd);
return TSDB_CODE_INVALID_PARA;
}
......
......@@ -1122,6 +1122,8 @@ TEST(clientCase, sub_tb_test) {
return;
}
tmq_offset_seek(tmq, "topic_t1", pAssign[0].vgId, 0);
while (1) {
TAOS_RES* pRes = tmq_consumer_poll(tmq, timeout);
if (pRes != NULL) {
......
......@@ -35,6 +35,10 @@ static const SSysDbTableSchema dnodesSchema[] = {
{.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
{.name = "reboot_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = true},
{.name = "note", .bytes = 256 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
#ifdef TD_ENTERPRISE
{.name = "active_code", .bytes = TSDB_ACTIVE_KEY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "c_active_code", .bytes = TSDB_CONN_ACTIVE_KEY_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
#endif
};
static const SSysDbTableSchema mnodesSchema[] = {
......
......@@ -755,7 +755,7 @@ SColVal *tRowIterNext(SRowIter *pIter) {
}
if (pIter->pRow->flag == HAS_NULL) {
pIter->cv = COL_VAL_NULL(pTColumn->type, pTColumn->colId);
pIter->cv = COL_VAL_NULL(pTColumn->colId, pTColumn->type);
goto _exit;
}
......@@ -2439,7 +2439,7 @@ _exit:
int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t bytes, int32_t nRows, char *lengthOrbitmap,
char *data) {
int32_t code = 0;
if(data == NULL){
if (data == NULL) {
for (int32_t i = 0; i < nRows; ++i) {
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NONE](pColData, NULL, 0);
}
......@@ -2453,8 +2453,9 @@ int32_t tColDataAddValueByDataBlock(SColData *pColData, int8_t type, int32_t byt
code = tColDataAppendValueImpl[pColData->flag][CV_FLAG_NULL](pColData, NULL, 0);
if (code) goto _exit;
} else {
if(ASSERT(varDataTLen(data + offset) <= bytes)){
uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset), bytes);
if (ASSERT(varDataTLen(data + offset) <= bytes)) {
uError("var data length invalid, varDataTLen(data + offset):%d <= bytes:%d", (int)varDataTLen(data + offset),
bytes);
code = TSDB_CODE_INVALID_PARA;
goto _exit;
}
......
......@@ -1720,6 +1720,33 @@ int32_t tDeserializeSDropDnodeReq(void *buf, int32_t bufLen, SDropDnodeReq *pReq
return 0;
}
int32_t tSerializeSRestoreDnodeReq(void *buf, int32_t bufLen, SRestoreDnodeReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->dnodeId) < 0) return -1;
if (tEncodeI8(&encoder, pReq->restoreType) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSRestoreDnodeReq(void *buf, int32_t bufLen, SRestoreDnodeReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->dnodeId) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->restoreType) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSMCfgDnodeReq(void *buf, int32_t bufLen, SMCfgDnodeReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
......@@ -7691,4 +7718,4 @@ void tDeleteMqSubTopicEp(SMqSubTopicEp *pSubTopicEp) {
taosMemoryFreeClear(pSubTopicEp->schema.pSchema);
pSubTopicEp->schema.nCols = 0;
taosArrayDestroy(pSubTopicEp->vgs);
}
\ No newline at end of file
}
......@@ -174,6 +174,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_SERVER_VERSION, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_INDEX, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_INDEX, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_RESTORE_DNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_MERGE_QUERY, mmPutMsgToQueryQueue, 1) == NULL) goto _OVER;
......
......@@ -255,7 +255,7 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
SVnodeObj *pVnode = vmAcquireVnode(pMgmt, req.vgId);
if (pVnode != NULL) {
dInfo("vgId:%d, already exist", req.vgId);
dError("vgId:%d, already exist", req.vgId);
tFreeSCreateVnodeReq(&req);
vmReleaseVnode(pMgmt, pVnode);
terrno = TSDB_CODE_VND_ALREADY_EXIST;
......@@ -264,7 +264,22 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
}
snprintf(path, TSDB_FILENAME_LEN, "vnode%svnode%d", TD_DIRSEP, vnodeCfg.vgId);
if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
if (pMgmt->pTfs) {
if (tfsDirExistAt(pMgmt->pTfs, path, (SDiskID){0})) {
terrno = TSDB_CODE_VND_DIR_ALREADY_EXIST;
dError("vgId:%d, failed to restore vnode since %s", req.vgId, terrstr());
return -1;
}
} else {
if (taosDirExist(path)) {
terrno = TSDB_CODE_VND_DIR_ALREADY_EXIST;
dError("vgId:%d, failed to restore vnode since %s", req.vgId, terrstr());
return -1;
}
}
if (vnodeCreate(path, &vnodeCfg, pMgmt->pTfs) < 0) {
tFreeSCreateVnodeReq(&req);
dError("vgId:%d, failed to create vnode since %s", req.vgId, terrstr());
code = terrno;
......@@ -344,6 +359,7 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
ESyncRole role = vnodeGetRole(pVnode->pImpl);
dInfo("vgId:%d, checking node role:%d", req.vgId, role);
if(role == TAOS_SYNC_ROLE_VOTER){
dError("vgId:%d, failed to alter vnode type since node already is role:%d", req.vgId, role);
terrno = TSDB_CODE_VND_ALREADY_IS_VOTER;
vmReleaseVnode(pMgmt, pVnode);
return -1;
......
......@@ -60,7 +60,7 @@ static void vmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
if (IsReq(pMsg)) {
if (code != 0) {
if (terrno != 0) code = terrno;
dGError("msg:%p, failed to process since %s, type:%s", pMsg, terrstr(code), TMSG_INFO(pMsg->msgType));
dGError("msg:%p, failed to process since %s, type:%s", pMsg, tstrerror(code), TMSG_INFO(pMsg->msgType));
}
vmSendRsp(pMsg, code);
}
......
......@@ -218,6 +218,7 @@ static int32_t dmProcessAlterNodeTypeReq(EDndNodeType ntype, SRpcMsg *pMsg) {
ESyncRole role = (*pWrapper->func.nodeRoleFp)(pWrapper->pMgmt);
dInfo("node:%s, checking node role:%d", pWrapper->name, role);
if(role == TAOS_SYNC_ROLE_VOTER){
dError("node:%s, failed to alter node type since node already is role:%d", pWrapper->name, role);
terrno = TSDB_CODE_MNODE_ALREADY_IS_VOTER;
return -1;
}
......
......@@ -6,6 +6,7 @@ IF (TD_ENTERPRISE)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/privilege/src/privilege.c)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDb.c)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndVgroup.c)
LIST(APPEND MNODE_SRC ${TD_ENTERPRISE_DIR}/src/plugins/mnode/src/mndDnode.c)
ENDIF ()
add_library(mnode STATIC ${MNODE_SRC})
......
......@@ -118,7 +118,7 @@ typedef enum {
} ETrnPolicy;
typedef enum {
TRN_EXEC_PRARLLEL = 0,
TRN_EXEC_PARALLEL = 0,
TRN_EXEC_SERIAL = 1,
} ETrnExec;
......@@ -177,6 +177,7 @@ typedef struct {
SArray* pRpcArray;
SRWLatch lockRpcArray;
int64_t mTraceId;
TdThreadMutex mutex;
} STrans;
typedef struct {
......@@ -205,6 +206,8 @@ typedef struct {
uint16_t port;
char fqdn[TSDB_FQDN_LEN];
char ep[TSDB_EP_LEN];
char active[TSDB_ACTIVE_KEY_LEN];
char connActive[TSDB_CONN_ACTIVE_KEY_LEN];
} SDnodeObj;
typedef struct {
......
......@@ -29,7 +29,7 @@ void mndReleaseDnode(SMnode *pMnode, SDnodeObj *pDnode);
SEpSet mndGetDnodeEpset(SDnodeObj *pDnode);
int32_t mndGetDnodeSize(SMnode *pMnode);
bool mndIsDnodeOnline(SDnodeObj *pDnode, int64_t curMs);
void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeEps);
void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeInfo);
#ifdef __cplusplus
}
......
......@@ -29,6 +29,10 @@ void mndReleaseMnode(SMnode *pMnode, SMnodeObj *pObj);
bool mndIsMnode(SMnode *pMnode, int32_t dnodeId);
void mndGetMnodeEpSet(SMnode *pMnode, SEpSet *pEpSet);
int32_t mndSetDropMnodeInfoToTrans(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj, bool force);
int32_t mndSetRestoreCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj);
int32_t mndSetCreateMnodeCommitLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj);
int32_t mndSetRestoreAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj);
int32_t mndSetRestoreCreateMnodeRedoLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj);
#ifdef __cplusplus
}
......
......@@ -30,6 +30,9 @@ SQnodeObj *mndAcquireQnode(SMnode *pMnode, int32_t qnodeId);
void mndReleaseQnode(SMnode *pMnode, SQnodeObj *pObj);
int32_t mndCreateQnodeList(SMnode *pMnode, SArray **pList, int32_t limit);
int32_t mndSetDropQnodeInfoToTrans(SMnode *pMnode, STrans *pTrans, SQnodeObj *pObj, bool force);
bool mndQnodeInDnode(SQnodeObj *pQnode, int32_t dnodeId);
int32_t mndSetCreateQnodeCommitLogs(STrans *pTrans, SQnodeObj *pObj);
int32_t mndSetCreateQnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SQnodeObj *pObj);
#ifdef __cplusplus
}
......
......@@ -76,6 +76,7 @@ void mndTransSetRpcRsp(STrans *pTrans, void *pCont, int32_t contLen);
void mndTransSetCb(STrans *pTrans, ETrnFunc startFunc, ETrnFunc stopFunc, void *param, int32_t paramLen);
void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbname);
void mndTransSetSerial(STrans *pTrans);
void mndTransSetParallel(STrans *pTrans);
void mndTransSetOper(STrans *pTrans, EOperType oper);
int32_t mndTrancCheckConflict(SMnode *pMnode, STrans *pTrans);
......
......@@ -49,6 +49,11 @@ int32_t mndBuildCompactVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
void *mndBuildCreateVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
void *mndBuildDropVnodeReq(SMnode *, SDnodeObj *pDnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen);
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid);
bool mndVgroupInDnode(SVgObj *pVgroup, int32_t dnodeId);
int32_t mndBuildRestoreAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *db, SVgObj *pVgroup,
SDnodeObj *pDnode);
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup);
#ifdef __cplusplus
}
......
......@@ -27,7 +27,7 @@
#include "tmisce.h"
#include "mndCluster.h"
#define TSDB_DNODE_VER_NUMBER 1
#define TSDB_DNODE_VER_NUMBER 2
#define TSDB_DNODE_RESERVE_SIZE 64
static const char *offlineReason[] = {
......@@ -58,6 +58,7 @@ static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq);
static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq);
static int32_t mndProcessConfigDnodeRsp(SRpcMsg *pRsp);
static int32_t mndProcessStatusReq(SRpcMsg *pReq);
static int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq);
static int32_t mndRetrieveConfigs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);
static void mndCancelGetNextConfig(SMnode *pMnode, void *pIter);
......@@ -83,6 +84,7 @@ int32_t mndInitDnode(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_STATUS, mndProcessStatusReq);
mndSetMsgHandle(pMnode, TDMT_MND_DNODE_LIST, mndProcessDnodeListReq);
mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq);
mndSetMsgHandle(pMnode, TDMT_MND_RESTORE_DNODE, mndProcessRestoreDnodeReq);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CONFIGS, mndRetrieveConfigs);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CONFIGS, mndCancelGetNextConfig);
......@@ -139,6 +141,10 @@ static SSdbRaw *mndDnodeActionEncode(SDnodeObj *pDnode) {
SDB_SET_INT16(pRaw, dataPos, pDnode->port, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pDnode->fqdn, TSDB_FQDN_LEN, _OVER)
SDB_SET_RESERVE(pRaw, dataPos, TSDB_DNODE_RESERVE_SIZE, _OVER)
SDB_SET_INT16(pRaw, dataPos, TSDB_ACTIVE_KEY_LEN, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pDnode->active, TSDB_ACTIVE_KEY_LEN, _OVER)
SDB_SET_INT16(pRaw, dataPos, TSDB_CONN_ACTIVE_KEY_LEN, _OVER)
SDB_SET_BINARY(pRaw, dataPos, pDnode->connActive, TSDB_CONN_ACTIVE_KEY_LEN, _OVER)
SDB_SET_DATALEN(pRaw, dataPos, _OVER);
terrno = 0;
......@@ -161,7 +167,7 @@ static SSdbRow *mndDnodeActionDecode(SSdbRaw *pRaw) {
int8_t sver = 0;
if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto _OVER;
if (sver != TSDB_DNODE_VER_NUMBER) {
if (sver < 1 || sver > TSDB_DNODE_VER_NUMBER) {
terrno = TSDB_CODE_SDB_INVALID_DATA_VER;
goto _OVER;
}
......@@ -179,6 +185,13 @@ static SSdbRow *mndDnodeActionDecode(SSdbRaw *pRaw) {
SDB_GET_INT16(pRaw, dataPos, &pDnode->port, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pDnode->fqdn, TSDB_FQDN_LEN, _OVER)
SDB_GET_RESERVE(pRaw, dataPos, TSDB_DNODE_RESERVE_SIZE, _OVER)
if (sver > 1) {
int16_t keyLen = 0;
SDB_GET_INT16(pRaw, dataPos, &keyLen, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pDnode->active, keyLen, _OVER)
SDB_GET_INT16(pRaw, dataPos, &keyLen, _OVER)
SDB_GET_BINARY(pRaw, dataPos, pDnode->connActive, keyLen, _OVER)
}
terrno = 0;
if (tmsgUpdateDnodeInfo(&pDnode->id, NULL, pDnode->fqdn, &pDnode->port)) {
......@@ -294,6 +307,11 @@ int32_t mndGetDnodeSize(SMnode *pMnode) {
return sdbGetSize(pSdb, SDB_DNODE);
}
int32_t mndGetDbSize(SMnode *pMnode) {
SSdb *pSdb = pMnode->pSdb;
return sdbGetSize(pSdb, SDB_DB);
}
bool mndIsDnodeOnline(SDnodeObj *pDnode, int64_t curMs) {
int64_t interval = TABS(pDnode->lastAccessTime - curMs);
if (interval > 5000 * (int64_t)tsStatusInterval) {
......@@ -305,7 +323,7 @@ bool mndIsDnodeOnline(SDnodeObj *pDnode, int64_t curMs) {
return true;
}
void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeEps) {
static void mndGetDnodeEps(SMnode *pMnode, SArray *pDnodeEps) {
SSdb *pSdb = pMnode->pSdb;
int32_t numOfEps = 0;
......@@ -330,6 +348,34 @@ void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeEps) {
}
}
void mndGetDnodeData(SMnode *pMnode, SArray *pDnodeInfo) {
SSdb *pSdb = pMnode->pSdb;
int32_t numOfEps = 0;
void *pIter = NULL;
while (1) {
SDnodeObj *pDnode = NULL;
ESdbStatus objStatus = 0;
pIter = sdbFetchAll(pSdb, SDB_DNODE, pIter, (void **)&pDnode, &objStatus, true);
if (pIter == NULL) break;
SDnodeInfo dInfo;
dInfo.id = pDnode->id;
dInfo.ep.port = pDnode->port;
tstrncpy(dInfo.ep.fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
tstrncpy(dInfo.active, pDnode->active, TSDB_ACTIVE_KEY_LEN);
tstrncpy(dInfo.connActive, pDnode->connActive, TSDB_CONN_ACTIVE_KEY_LEN);
sdbRelease(pSdb, pDnode);
if (mndIsMnode(pMnode, pDnode->id)) {
dInfo.isMnode = 1;
} else {
dInfo.isMnode = 0;
}
taosArrayPush(pDnodeInfo, &dInfo);
}
}
static int32_t mndCheckClusterCfgPara(SMnode *pMnode, SDnodeObj *pDnode, const SClusterCfg *pCfg) {
if (pCfg->statusInterval != tsStatusInterval) {
mError("dnode:%d, statusInterval:%d inconsistent with cluster:%d", pDnode->id, pCfg->statusInterval,
......@@ -536,7 +582,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
goto _OVER;
}
mndGetDnodeData(pMnode, statusRsp.pDnodeEps);
mndGetDnodeEps(pMnode, statusRsp.pDnodeEps);
int32_t contLen = tSerializeSStatusRsp(NULL, 0, &statusRsp);
void *pHead = rpcMallocCont(contLen);
......@@ -745,6 +791,18 @@ _OVER:
return code;
}
extern int32_t mndProcessRestoreDnodeReqImpl(SRpcMsg *pReq);
int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq){
return mndProcessRestoreDnodeReqImpl(pReq);
}
#ifndef TD_ENTERPRISE
int32_t mndProcessRestoreDnodeReqImpl(SRpcMsg *pReq){
return 0;
}
#endif
static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SMnodeObj *pMObj, SQnodeObj *pQObj,
SSnodeObj *pSObj, int32_t numOfVnodes, bool force) {
int32_t code = -1;
......@@ -1041,6 +1099,7 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
ESdbStatus objStatus = 0;
SDnodeObj *pDnode = NULL;
int64_t curMs = taosGetTimestampMs();
char buf[TSDB_CONN_ACTIVE_KEY_LEN + VARSTR_HEADER_SIZE]; // make sure TSDB_CONN_ACTIVE_KEY_LEN >= TSDB_EP_LEN
while (numOfRows < rows) {
pShow->pIter = sdbFetchAll(pSdb, SDB_DNODE, pShow->pIter, (void **)&pDnode, &objStatus, true);
......@@ -1052,7 +1111,6 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->id, false);
char buf[tListLen(pDnode->ep) + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(buf, pDnode->ep, pShow->pMeta->pSchemas[cols].bytes);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
......@@ -1077,10 +1135,9 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
status = "offline";
}
char b1[16] = {0};
STR_TO_VARSTR(b1, status);
STR_TO_VARSTR(buf, status);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, b1, false);
colDataSetVal(pColInfo, numOfRows, buf, false);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, (const char *)&pDnode->createdTime, false);
......@@ -1095,6 +1152,16 @@ static int32_t mndRetrieveDnodes(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pB
colDataSetVal(pColInfo, numOfRows, b, false);
taosMemoryFreeClear(b);
#ifdef TD_ENTERPRISE
STR_TO_VARSTR(buf, pDnode->active);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, buf, false);
STR_TO_VARSTR(buf, pDnode->connActive);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataSetVal(pColInfo, numOfRows, buf, false);
#endif
numOfRows++;
sdbRelease(pSdb, pDnode);
}
......
......@@ -556,7 +556,7 @@ RETRIEVE_FUNC_OVER:
return code;
}
static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int16_t len) {
static void *mnodeGenTypeStr(char *buf, int32_t buflen, uint8_t type, int32_t len) {
char *msg = "unknown";
if (type >= sizeof(tDataTypes) / sizeof(tDataTypes[0])) {
return msg;
......
......@@ -275,6 +275,14 @@ static int32_t mndSetCreateMnodeRedoLogs(SMnode *pMnode, STrans *pTrans, SMnodeO
return 0;
}
int32_t mndSetRestoreCreateMnodeRedoLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) {
SSdbRaw *pRedoRaw = mndMnodeActionEncode(pObj);
if (pRedoRaw == NULL) return -1;
if (mndTransAppendRedolog(pTrans, pRedoRaw) != 0) return -1;
if (sdbSetRawStatus(pRedoRaw, SDB_STATUS_READY) != 0) return -1;
return 0;
}
static int32_t mndSetCreateMnodeUndoLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) {
SSdbRaw *pUndoRaw = mndMnodeActionEncode(pObj);
if (pUndoRaw == NULL) return -1;
......@@ -283,7 +291,7 @@ static int32_t mndSetCreateMnodeUndoLogs(SMnode *pMnode, STrans *pTrans, SMnodeO
return 0;
}
static int32_t mndSetCreateMnodeCommitLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) {
int32_t mndSetCreateMnodeCommitLogs(SMnode *pMnode, STrans *pTrans, SMnodeObj *pObj) {
SSdbRaw *pCommitRaw = mndMnodeActionEncode(pObj);
if (pCommitRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1;
......@@ -421,6 +429,55 @@ static int32_t mndSetCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDno
return 0;
}
int32_t mndSetRestoreCreateMnodeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
SDCreateMnodeReq createReq = {0};
SEpSet createEpset = {0};
while (1) {
SMnodeObj *pMObj = NULL;
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj);
if (pIter == NULL) break;
if(pMObj->id == pDnode->id) {
sdbRelease(pSdb, pMObj);
continue;
}
if(pMObj->role == TAOS_SYNC_ROLE_VOTER){
createReq.replicas[createReq.replica].id = pMObj->id;
createReq.replicas[createReq.replica].port = pMObj->pDnode->port;
memcpy(createReq.replicas[createReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
createReq.replica++;
}
else{
createReq.learnerReplicas[createReq.learnerReplica].id = pMObj->id;
createReq.learnerReplicas[createReq.learnerReplica].port = pMObj->pDnode->port;
memcpy(createReq.learnerReplicas[createReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
createReq.learnerReplica++;
}
sdbRelease(pSdb, pMObj);
}
createReq.learnerReplicas[createReq.learnerReplica].id = pDnode->id;
createReq.learnerReplicas[createReq.learnerReplica].port = pDnode->port;
memcpy(createReq.learnerReplicas[createReq.learnerReplica].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
createReq.learnerReplica++;
createReq.lastIndex = pObj->lastIndex;
createEpset.inUse = 0;
createEpset.numOfEps = 1;
createEpset.eps[0].port = pDnode->port;
memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
if (mndBuildCreateMnodeRedoAction(pTrans, &createReq, &createEpset) != 0) return -1;
return 0;
}
static int32_t mndSetAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
......@@ -465,6 +522,55 @@ static int32_t mndSetAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, S
return 0;
}
int32_t mndSetRestoreAlterMnodeTypeRedoActions(SMnode *pMnode, STrans *pTrans, SDnodeObj *pDnode, SMnodeObj *pObj) {
SSdb *pSdb = pMnode->pSdb;
void *pIter = NULL;
SDAlterMnodeTypeReq alterReq = {0};
SEpSet createEpset = {0};
while (1) {
SMnodeObj *pMObj = NULL;
pIter = sdbFetch(pSdb, SDB_MNODE, pIter, (void **)&pMObj);
if (pIter == NULL) break;
if(pMObj->id == pDnode->id) {
sdbRelease(pSdb, pMObj);
continue;
}
if(pMObj->role == TAOS_SYNC_ROLE_VOTER){
alterReq.replicas[alterReq.replica].id = pMObj->id;
alterReq.replicas[alterReq.replica].port = pMObj->pDnode->port;
memcpy(alterReq.replicas[alterReq.replica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
alterReq.replica++;
}
else{
alterReq.learnerReplicas[alterReq.learnerReplica].id = pMObj->id;
alterReq.learnerReplicas[alterReq.learnerReplica].port = pMObj->pDnode->port;
memcpy(alterReq.learnerReplicas[alterReq.learnerReplica].fqdn, pMObj->pDnode->fqdn, TSDB_FQDN_LEN);
alterReq.learnerReplica++;
}
sdbRelease(pSdb, pMObj);
}
alterReq.replicas[alterReq.replica].id = pDnode->id;
alterReq.replicas[alterReq.replica].port = pDnode->port;
memcpy(alterReq.replicas[alterReq.replica].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
alterReq.replica++;
alterReq.lastIndex = pObj->lastIndex;
createEpset.inUse = 0;
createEpset.numOfEps = 1;
createEpset.eps[0].port = pDnode->port;
memcpy(createEpset.eps[0].fqdn, pDnode->fqdn, TSDB_FQDN_LEN);
if (mndBuildAlterMnodeTypeRedoAction(pTrans, &alterReq, &createEpset) != 0) return -1;
return 0;
}
static int32_t mndCreateMnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SMCreateMnodeReq *pCreate) {
int32_t code = -1;
......
......@@ -180,7 +180,7 @@ static int32_t mndSetCreateQnodeUndoLogs(STrans *pTrans, SQnodeObj *pObj) {
return 0;
}
static int32_t mndSetCreateQnodeCommitLogs(STrans *pTrans, SQnodeObj *pObj) {
int32_t mndSetCreateQnodeCommitLogs(STrans *pTrans, SQnodeObj *pObj) {
SSdbRaw *pCommitRaw = mndQnodeActionEncode(pObj);
if (pCommitRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) return -1;
......@@ -188,7 +188,11 @@ static int32_t mndSetCreateQnodeCommitLogs(STrans *pTrans, SQnodeObj *pObj) {
return 0;
}
static int32_t mndSetCreateQnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SQnodeObj *pObj) {
bool mndQnodeInDnode(SQnodeObj *pQnode, int32_t dnodeId) {
return pQnode->pDnode->id == dnodeId;
}
int32_t mndSetCreateQnodeRedoActions(STrans *pTrans, SDnodeObj *pDnode, SQnodeObj *pObj) {
SDCreateQnodeReq createReq = {0};
createReq.dnodeId = pDnode->id;
......
......@@ -546,6 +546,7 @@ static void mndTransDropData(STrans *pTrans) {
pTrans->param = NULL;
pTrans->paramLen = 0;
}
(void)taosThreadMutexDestroy(&pTrans->mutex);
}
static int32_t mndTransActionDelete(SSdb *pSdb, STrans *pTrans, bool callFunc) {
......@@ -643,7 +644,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
pTrans->stage = TRN_STAGE_PREPARE;
pTrans->policy = policy;
pTrans->conflict = conflict;
pTrans->exec = TRN_EXEC_PRARLLEL;
pTrans->exec = TRN_EXEC_PARALLEL;
pTrans->createdTime = taosGetTimestampMs();
pTrans->redoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
pTrans->undoActions = taosArrayInit(TRANS_ARRAY_SIZE, sizeof(STransAction));
......@@ -651,6 +652,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict,
pTrans->pRpcArray = taosArrayInit(1, sizeof(SRpcHandleInfo));
pTrans->mTraceId = pReq ? TRACE_GET_ROOTID(&pReq->info.traceId) : 0;
taosInitRWLatch(&pTrans->lockRpcArray);
taosThreadMutexInit(&pTrans->mutex, NULL);
if (pTrans->redoActions == NULL || pTrans->undoActions == NULL || pTrans->commitActions == NULL ||
pTrans->pRpcArray == NULL) {
......@@ -793,6 +795,8 @@ void mndTransSetDbName(STrans *pTrans, const char *dbname, const char *stbname)
void mndTransSetSerial(STrans *pTrans) { pTrans->exec = TRN_EXEC_SERIAL; }
void mndTransSetParallel(STrans *pTrans) { pTrans->exec = TRN_EXEC_PARALLEL; }
void mndTransSetOper(STrans *pTrans, EOperType oper) { pTrans->oper = oper; }
static int32_t mndTransSync(SMnode *pMnode, STrans *pTrans) {
......@@ -1307,7 +1311,13 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
int32_t code = 0;
int32_t numOfActions = taosArrayGetSize(pTrans->redoActions);
if (numOfActions == 0) return code;
if (pTrans->redoActionPos >= numOfActions) return code;
taosThreadMutexLock(&pTrans->mutex);
if (pTrans->redoActionPos >= numOfActions) {
taosThreadMutexUnlock(&pTrans->mutex);
return code;
}
mInfo("trans:%d, execute %d actions serial, current redoAction:%d", pTrans->id, numOfActions, pTrans->redoActionPos);
......@@ -1377,6 +1387,8 @@ static int32_t mndTransExecuteRedoActionsSerial(SMnode *pMnode, STrans *pTrans)
}
}
taosThreadMutexUnlock(&pTrans->mutex);
return code;
}
......
......@@ -1155,6 +1155,28 @@ int32_t mndAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVg
return 0;
}
int32_t mndRestoreAddCreateVnodeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup, SDnodeObj *pDnode) {
STransAction action = {0};
action.epSet = mndGetDnodeEpset(pDnode);
int32_t contLen = 0;
void *pReq = mndBuildCreateVnodeReq(pMnode, pDnode, pDb, pVgroup, &contLen);
if (pReq == NULL) return -1;
action.pCont = pReq;
action.contLen = contLen;
action.msgType = TDMT_DND_CREATE_VNODE;
action.acceptableCode = TSDB_CODE_VND_ALREADY_EXIST;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
return -1;
}
return 0;
}
int32_t mndAddAlterVnodeConfirmAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) {
STransAction action = {0};
action.epSet = mndGetVgroupEpset(pMnode, pVgroup);
......@@ -1274,6 +1296,29 @@ int32_t mndAddAlterVnodeTypeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb,
return 0;
}
int32_t mndRestoreAddAlterVnodeTypeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
SDnodeObj *pDnode) {
STransAction action = {0};
action.epSet = mndGetDnodeEpset(pDnode);
int32_t contLen = 0;
void *pReq = mndBuildAlterVnodeReplicaReq(pMnode, pDb, pVgroup, pDnode->id, &contLen);
if (pReq == NULL) return -1;
action.pCont = pReq;
action.contLen = contLen;
action.msgType = TDMT_DND_ALTER_VNODE_TYPE;
action.acceptableCode = TSDB_CODE_VND_ALREADY_IS_VOTER;
action.retryCode = TSDB_CODE_VND_NOT_CATCH_UP;
if (mndTransAppendRedoAction(pTrans, &action) != 0) {
taosMemoryFree(pReq);
return -1;
}
return 0;
}
static int32_t mndAddDisableVnodeWriteAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
int32_t dnodeId) {
SDnodeObj *pDnode = mndAcquireDnode(pMnode, dnodeId);
......@@ -1917,7 +1962,7 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
int32_t vgid = pVgroup->vgId;
int8_t replica = pVgroup->replica;
if(pVgroup->replica <= 1) {
if(pVgroup->replica <= 1) {
mInfo("trans:%d, vgid:%d no need to balance, replica:%d", pTrans->id, vgid, replica);
return -1;
}
......@@ -1951,6 +1996,19 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
return -1;
}
SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName);
if (pDb == NULL) {
mError("trans:%d, vgid:%d failed to be balanced to dnode:%d, because db not exist", pTrans->id, vgid, dnodeId);
return -1;
}
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) {
mError("trans:%d, vgid:%d failed to be balanced to dnode:%d", pTrans->id, vgid, dnodeId);
return -1;
}
mndReleaseDb(pMnode, pDb);
SSdbRaw *pRaw = mndVgroupActionEncode(pVgroup);
if (pRaw == NULL) {
mError("trans:%d, vgid:%d failed to encode action to dnode:%d", pTrans->id, vgid, dnodeId);
......@@ -1965,7 +2023,8 @@ int32_t mndAddVgroupBalanceToTrans(SMnode *pMnode, SVgObj *pVgroup, STrans *pTra
}
else
{
mInfo("trans:%d, vgid:%d cant be balanced to dnode:%d, exist:%d, online:%d", pTrans->id, vgid, dnodeId, exist, online);
mInfo("trans:%d, vgid:%d cant be balanced to dnode:%d, exist:%d, online:%d",
pTrans->id, vgid, dnodeId, exist, online);
}
return 0;
......@@ -2084,6 +2143,8 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb
return -1;
}
mndSortVnodeGid(&newVgroup);
{
SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup);
if (pVgRaw == NULL) return -1;
......@@ -2097,11 +2158,72 @@ int32_t mndBuildAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *pOldDb
return 0;
}
int32_t mndBuildRestoreAlterVgroupAction(SMnode *pMnode, STrans *pTrans, SDbObj *db, SVgObj *pVgroup,
SDnodeObj *pDnode) {
SVgObj newVgroup = {0};
memcpy(&newVgroup, pVgroup, sizeof(SVgObj));
mInfo("db:%s, vgId:%d, restore vnodes, vn:0 dnode:%d", pVgroup->dbName, pVgroup->vgId,
pVgroup->vnodeGid[0].dnodeId);
if(newVgroup.replica == 1){
int selected = 0;
for(int i = 0; i < newVgroup.replica; i++){
newVgroup.vnodeGid[i].nodeRole = TAOS_SYNC_ROLE_VOTER;
if(newVgroup.vnodeGid[i].dnodeId == pDnode->id){
selected = i;
}
}
if (mndAddCreateVnodeAction(pMnode, pTrans, db, &newVgroup, &newVgroup.vnodeGid[selected]) != 0) return -1;
}
else if(newVgroup.replica == 3){
for(int i = 0; i < newVgroup.replica; i++){
if(newVgroup.vnodeGid[i].dnodeId == pDnode->id){
newVgroup.vnodeGid[i].nodeRole = TAOS_SYNC_ROLE_LEARNER;
}
else{
newVgroup.vnodeGid[i].nodeRole = TAOS_SYNC_ROLE_VOTER;
}
}
if (mndRestoreAddCreateVnodeAction(pMnode, pTrans, db, &newVgroup, pDnode) != 0) return -1;
for(int i = 0; i < newVgroup.replica; i++){
newVgroup.vnodeGid[i].nodeRole = TAOS_SYNC_ROLE_VOTER;
if(newVgroup.vnodeGid[i].dnodeId == pDnode->id){
}
}
if (mndRestoreAddAlterVnodeTypeAction(pMnode, pTrans, db, &newVgroup, pDnode) != 0)
return -1;
}
SSdbRaw *pVgRaw = mndVgroupActionEncode(&newVgroup);
if (pVgRaw == NULL) return -1;
if (mndTransAppendCommitlog(pTrans, pVgRaw) != 0) {
sdbFreeRaw(pVgRaw);
return -1;
}
(void)sdbSetRawStatus(pVgRaw, SDB_STATUS_READY);
return 0;
}
static int32_t mndAddAdjustVnodeHashRangeAction(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup) {
return 0;
}
static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
static int32_t mndTransCommitVgStatus(STrans *pTrans, SVgObj *pVg, ESdbStatus vgStatus) {
SSdbRaw *pRaw = mndVgroupActionEncode(pVg);
if (pRaw == NULL) goto _err;
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _err;
(void)sdbSetRawStatus(pRaw, vgStatus);
pRaw = NULL;
return 0;
_err:
sdbFreeRaw(pRaw);
return -1;
}
int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj *pVgroup) {
int32_t code = -1;
STrans *pTrans = NULL;
SSdbRaw *pRaw = NULL;
......@@ -2162,6 +2284,7 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
mInfo("vgId:%d, vnode:%d dnode:%d", newVg2.vgId, i, newVg2.vnodeGid[i].dnodeId);
}
// alter vgId and hash range
int32_t maxVgId = sdbGetMaxId(pMnode->pSdb, SDB_VGROUP);
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg1, maxVgId) != 0) goto _OVER;
newVg1.vgId = maxVgId;
......@@ -2170,31 +2293,24 @@ static int32_t mndSplitVgroup(SMnode *pMnode, SRpcMsg *pReq, SDbObj *pDb, SVgObj
if (mndAddAlterVnodeHashRangeAction(pMnode, pTrans, &newVg2, maxVgId) != 0) goto _OVER;
newVg2.vgId = maxVgId;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg1) != 0) goto _OVER;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, &newVg2) != 0) goto _OVER;
// adjust vgroup replica
if (pDb->cfg.replications != newVg1.replica) {
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg1, pArray) != 0) goto _OVER;
} else {
if (mndTransCommitVgStatus(pTrans, &newVg1, SDB_STATUS_READY) < 0) goto _OVER;
}
if (pDb->cfg.replications != newVg2.replica) {
if (mndBuildAlterVgroupAction(pMnode, pTrans, pDb, pDb, &newVg2, pArray) != 0) goto _OVER;
} else {
if (mndTransCommitVgStatus(pTrans, &newVg2, SDB_STATUS_READY) < 0) goto _OVER;
}
pRaw = mndVgroupActionEncode(&newVg1);
if (pRaw == NULL) goto _OVER;
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
pRaw = NULL;
pRaw = mndVgroupActionEncode(&newVg2);
if (pRaw == NULL) goto _OVER;
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
(void)sdbSetRawStatus(pRaw, SDB_STATUS_READY);
pRaw = NULL;
pRaw = mndVgroupActionEncode(pVgroup);
if (pRaw == NULL) goto _OVER;
if (mndTransAppendCommitlog(pTrans, pRaw) != 0) goto _OVER;
(void)sdbSetRawStatus(pRaw, SDB_STATUS_DROPPED);
pRaw = NULL;
if (mndTransCommitVgStatus(pTrans, pVgroup, SDB_STATUS_DROPPED) < 0) goto _OVER;
memcpy(&dbObj, pDb, sizeof(SDbObj));
if (dbObj.cfg.pRetensions != NULL) {
......@@ -2221,37 +2337,13 @@ _OVER:
return code;
}
static int32_t mndProcessSplitVgroupMsg(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
SVgObj *pVgroup = NULL;
SDbObj *pDb = NULL;
SSplitVgroupReq req = {0};
if (tDeserializeSSplitVgroupReq(pReq->pCont, pReq->contLen, &req) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
mInfo("vgId:%d, start to split", req.vgId);
if (mndCheckOperPrivilege(pMnode, pReq->info.conn.user, MND_OPER_SPLIT_VGROUP) != 0) {
goto _OVER;
}
pVgroup = mndAcquireVgroup(pMnode, req.vgId);
if (pVgroup == NULL) goto _OVER;
pDb = mndAcquireDb(pMnode, pVgroup->dbName);
if (pDb == NULL) goto _OVER;
extern int32_t mndProcessSplitVgroupMsgImp(SRpcMsg *pReq);
code = mndSplitVgroup(pMnode, pReq, pDb, pVgroup);
if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
static int32_t mndProcessSplitVgroupMsg(SRpcMsg *pReq) { return mndProcessSplitVgroupMsgImp(pReq); }
_OVER:
mndReleaseVgroup(pMnode, pVgroup);
mndReleaseDb(pMnode, pDb);
return code;
}
#ifndef TD_ENTERPRISE
int32_t mndProcessSplitVgroupMsgImp(SRpcMsg *pReq) { return 0; }
#endif
static int32_t mndSetBalanceVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SVgObj *pVgroup,
SDnodeObj *pSrc, SDnodeObj *pDst) {
......@@ -2439,6 +2531,13 @@ _OVER:
bool mndVgroupInDb(SVgObj *pVgroup, int64_t dbUid) { return !pVgroup->isTsma && pVgroup->dbUid == dbUid; }
bool mndVgroupInDnode(SVgObj *pVgroup, int32_t dnodeId) {
for(int i = 0; i < pVgroup->replica; i++){
if(pVgroup->vnodeGid[i].dnodeId == dnodeId) return true;
}
return false;
}
static void *mndBuildCompactVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_t *pContLen, int64_t compactTs,
STimeWindow tw) {
SCompactVnodeReq compactReq = {0};
......
......@@ -84,6 +84,7 @@ target_include_directories(
PUBLIC "inc"
PUBLIC "src/inc"
PUBLIC "${TD_SOURCE_DIR}/include/libs/scalar"
PUBLIC "${TD_SOURCE_DIR}/contrib/rocksdb/include"
)
target_link_libraries(
vnode
......@@ -100,6 +101,7 @@ target_link_libraries(
# PUBLIC bdb
# PUBLIC scalar
PUBLIC rocksdb
PUBLIC transport
PUBLIC stream
PUBLIC index
......
......@@ -182,7 +182,7 @@ int32_t tsdbSetTableList(STsdbReader *pReader, const void *pTableList, int32_t n
int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableList, int32_t numOfTables,
SSDataBlock *pResBlock, STsdbReader **ppReader, const char *idstr, bool countOnly);
void tsdbReaderSetId(STsdbReader* pReader, const char* idstr);
void tsdbReaderSetId(STsdbReader *pReader, const char *idstr);
void tsdbReaderClose(STsdbReader *pReader);
int32_t tsdbNextDataBlock(STsdbReader *pReader, bool *hasNext);
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SSDataBlock *pDataBlock, bool *allHave);
......@@ -196,8 +196,9 @@ void *tsdbGetIvtIdx(SMeta *pMeta);
uint64_t getReaderMaxVersion(STsdbReader *pReader);
int32_t tsdbCacherowsReaderOpen(void *pVnode, int32_t type, void *pTableIdList, int32_t numOfTables, int32_t numOfCols,
uint64_t suid, void **pReader, const char *idstr);
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, SArray *pTableUids);
SArray *pCidList, int32_t *pSlotIds, uint64_t suid, void **pReader, const char *idstr);
int32_t tsdbRetrieveCacheRows(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds, const int32_t *dstSlotIds,
SArray *pTableUids);
void *tsdbCacherowsReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
......@@ -343,6 +344,7 @@ struct SVnodeCfg {
SVnodeStats vndStats;
uint32_t hashBegin;
uint32_t hashEnd;
bool hashChange;
int16_t sttTrigger;
int16_t hashPrefix;
int16_t hashSuffix;
......
......@@ -343,6 +343,16 @@ struct STsdbFS {
SArray *aDFileSet; // SArray<SDFileSet>
};
typedef struct {
rocksdb_t *db;
rocksdb_options_t *options;
rocksdb_flushoptions_t *flushoptions;
rocksdb_writeoptions_t *writeoptions;
rocksdb_readoptions_t *readoptions;
rocksdb_writebatch_t *writebatch;
TdThreadMutex rMutex;
} SRocksCache;
struct STsdb {
char *path;
SVnode *pVnode;
......@@ -355,6 +365,7 @@ struct STsdb {
TdThreadMutex lruMutex;
SLRUCache *biCache;
TdThreadMutex biMutex;
SRocksCache rCache;
};
struct TSDBKEY {
......@@ -777,6 +788,8 @@ typedef struct SCacheRowsReader {
uint64_t suid;
char **transferBuf; // todo remove it soon
int32_t numOfCols;
SArray *pCidList;
int32_t *pSlotIds;
int32_t type;
int32_t tableIndex; // currently returned result tables
STableKeyInfo *pTableList; // table id list
......@@ -796,6 +809,10 @@ typedef struct {
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(STsdb *pTsdb);
int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSDBROW *row);
int32_t tsdbCacheGet(STsdb *pTsdb, tb_uid_t uid, SArray *pLastArray, SCacheRowsReader *pr, int32_t ltype);
int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, TSDBROW *row, STsdb *pTsdb);
int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, TSDBROW *row, bool dup);
int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, SCacheRowsReader *pr, LRUHandle **h);
......
......@@ -19,6 +19,7 @@
#include "executor.h"
#include "filter.h"
#include "qworker.h"
#include "rocksdb/c.h"
#include "sync.h"
#include "tRealloc.h"
#include "tchecksum.h"
......@@ -177,6 +178,7 @@ int tsdbClose(STsdb** pTsdb);
int32_t tsdbBegin(STsdb* pTsdb);
int32_t tsdbPrepareCommit(STsdb* pTsdb);
int32_t tsdbCommit(STsdb* pTsdb, SCommitInfo* pInfo);
int32_t tsdbCacheCommit(STsdb* pTsdb);
int32_t tsdbCompact(STsdb* pTsdb, SCompactInfo* pInfo);
int32_t tsdbFinishCommit(STsdb* pTsdb);
int32_t tsdbRollbackCommit(STsdb* pTsdb);
......@@ -193,9 +195,9 @@ STQ* tqOpen(const char* path, SVnode* pVnode);
void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t ver);
int tqRegisterPushHandle(STQ* pTq, void* pHandle, const SMqPollReq* pRequest, SRpcMsg* pRpcMsg, SMqDataRsp* pDataRsp,
int32_t type);
int32_t type);
int tqUnregisterPushHandle(STQ* pTq, const char* pKey, int32_t keyLen, uint64_t consumerId, bool rspConsumer);
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqStartStreamTasks(STQ* pTq); // restore all stream tasks after vnode launching completed.
int tqCommit(STQ*);
int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd);
......
......@@ -301,8 +301,12 @@ int32_t tqProcessSeekReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen)
}
// save the new offset value
tqDebug("vgId:%d sub:%s seek to %" PRId64 " prev offset:%" PRId64, vgId, pOffset->subKey, pOffset->val.version,
pSavedOffset->val.version);
if (pSavedOffset != NULL) {
tqDebug("vgId:%d sub:%s seek to:%" PRId64 " prev offset:%" PRId64, vgId, pOffset->subKey, pOffset->val.version,
pSavedOffset->val.version);
} else {
tqDebug("vgId:%d sub:%s seek to:%"PRId64" not saved yet", vgId, pOffset->subKey, pOffset->val.version);
}
if (tqOffsetWrite(pTq->pOffsetStore, pOffset) < 0) {
tqError("failed to save offset, vgId:%d sub:%s seek to %" PRId64, vgId, pOffset->subKey, pOffset->val.version);
......
......@@ -21,47 +21,30 @@
#define HASTYPE(_type, _t) (((_type) & (_t)) == (_t))
static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* pReader, const int32_t* slotIds,
void** pRes, const char* idStr) {
const int32_t* dstSlotIds, void** pRes, const char* idStr) {
int32_t numOfRows = pBlock->info.rows;
bool allNullRow = true;
if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST)) {
bool allNullRow = true;
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[i]);
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
if (slotIds[i] == -1) { // the primary timestamp
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
p->ts = pColVal->ts;
p->bytes = TSDB_KEYSIZE;
*(int64_t*)p->buf = pColVal->ts;
allNullRow = false;
} else {
int32_t slotId = slotIds[i];
// add check for null value, caused by the modification of table schema (new column added).
if (slotId >= taosArrayGetSize(pRow)) {
p->ts = 0;
p->isNull = true;
colDataSetNULL(pColInfoData, numOfRows);
continue;
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
p->ts = pColVal->ts;
p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal);
allNullRow = p->isNull & allNullRow;
p->ts = pColVal->ts;
p->isNull = !COL_VAL_IS_VALUE(&pColVal->colVal);
allNullRow = p->isNull & allNullRow;
if (!p->isNull) {
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
varDataSetLen(p->buf, pColVal->colVal.value.nData);
if (!p->isNull) {
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
varDataSetLen(p->buf, pColVal->colVal.value.nData);
memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData);
p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size
} else {
memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes);
p->bytes = pReader->pSchema->columns[slotId].bytes;
}
memcpy(varDataVal(p->buf), pColVal->colVal.value.pData, pColVal->colVal.value.nData);
p->bytes = pColVal->colVal.value.nData + VARSTR_HEADER_SIZE; // binary needs to plus the header size
} else {
memcpy(p->buf, &pColVal->colVal.value, pReader->pSchema->columns[slotId].bytes);
p->bytes = pReader->pSchema->columns[slotId].bytes;
}
}
......@@ -74,36 +57,31 @@ static int32_t saveOneRow(SArray* pRow, SSDataBlock* pBlock, SCacheRowsReader* p
pBlock->info.rows += allNullRow ? 0 : 1;
} else if (HASTYPE(pReader->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
for (int32_t i = 0; i < pReader->numOfCols; ++i) {
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i);
SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, dstSlotIds[i]);
if (slotIds[i] == -1) {
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
colDataSetVal(pColInfoData, numOfRows, (const char*)&pColVal->ts, false);
} else {
int32_t slotId = slotIds[i];
// add check for null value, caused by the modification of table schema (new column added).
if (slotId >= taosArrayGetSize(pRow)) {
colDataSetNULL(pColInfoData, numOfRows);
continue;
}
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
SColVal* pVal = &pColVal->colVal;
int32_t slotId = slotIds[i];
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, i);
SColVal* pVal = &pColVal->colVal;
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal)) {
colDataSetNULL(pColInfoData, numOfRows);
} else {
varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData);
memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData);
colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
}
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
continue;
}
allNullRow = false;
if (IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal)) {
colDataSetNULL(pColInfoData, numOfRows);
} else {
colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal));
varDataSetLen(pReader->transferBuf[slotId], pVal->value.nData);
memcpy(varDataVal(pReader->transferBuf[slotId]), pVal->value.pData, pVal->value.nData);
colDataSetVal(pColInfoData, numOfRows, pReader->transferBuf[slotId], false);
}
} else {
colDataSetVal(pColInfoData, numOfRows, (const char*)&pVal->value.val, !COL_VAL_IS_VALUE(pVal));
}
}
pBlock->info.rows += 1;
pBlock->info.rows += allNullRow ? 0 : 1;
} else {
tsdbError("invalid retrieve type:%d, %s", pReader->type, idStr);
return TSDB_CODE_INVALID_PARA;
......@@ -143,7 +121,7 @@ static int32_t setTableSchema(SCacheRowsReader* p, uint64_t suid, const char* id
}
int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, int32_t numOfTables, int32_t numOfCols,
uint64_t suid, void** pReader, const char* idstr) {
SArray* pCidList, int32_t* pSlotIds, uint64_t suid, void** pReader, const char* idstr) {
*pReader = NULL;
SCacheRowsReader* p = taosMemoryCalloc(1, sizeof(SCacheRowsReader));
if (p == NULL) {
......@@ -155,6 +133,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList,
p->pTsdb = p->pVnode->pTsdb;
p->verRange = (SVersionRange){.minVer = 0, .maxVer = UINT64_MAX};
p->numOfCols = numOfCols;
p->pCidList = pCidList;
p->pSlotIds = pSlotIds;
p->suid = suid;
if (numOfTables == 0) {
......@@ -226,32 +206,9 @@ void* tsdbCacherowsReaderClose(void* pReader) {
return NULL;
}
static int32_t doExtractCacheRow(SCacheRowsReader* pr, SLRUCache* lruCache, uint64_t uid, SArray** pRow,
LRUHandle** h) {
int32_t code = TSDB_CODE_SUCCESS;
*pRow = NULL;
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST_ROW)) {
code = tsdbCacheGetLastrowH(lruCache, uid, pr, h);
} else {
code = tsdbCacheGetLastH(lruCache, uid, pr, h);
}
if (code != TSDB_CODE_SUCCESS) {
return code;
}
// no data in the table of Uid
if (*h != NULL) {
*pRow = (SArray*)taosLRUCacheValue(lruCache, *h);
}
return code;
}
static void freeItem(void* pItem) {
SLastCol* pCol = (SLastCol*)pItem;
if (IS_VAR_DATA_TYPE(pCol->colVal.type)) {
if (IS_VAR_DATA_TYPE(pCol->colVal.type) && pCol->colVal.value.pData) {
taosMemoryFree(pCol->colVal.value.pData);
}
}
......@@ -277,19 +234,17 @@ static int32_t tsdbCacheQueryReseek(void* pQHandle) {
}
}
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, SArray* pTableUidList) {
int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32_t* slotIds, const int32_t* dstSlotIds,
SArray* pTableUidList) {
if (pReader == NULL || pResBlock == NULL) {
return TSDB_CODE_INVALID_PARA;
}
SCacheRowsReader* pr = pReader;
int32_t code = TSDB_CODE_SUCCESS;
SLRUCache* lruCache = pr->pVnode->pTsdb->lruCache;
LRUHandle* h = NULL;
SArray* pRow = NULL;
bool hasRes = false;
SArray* pLastCols = NULL;
int32_t code = TSDB_CODE_SUCCESS;
SArray* pRow = taosArrayInit(TARRAY_SIZE(pr->pCidList), sizeof(SLastCol));
bool hasRes = false;
SArray* pLastCols = NULL;
void** pRes = taosMemoryCalloc(pr->numOfCols, POINTER_BYTES);
if (pRes == NULL) {
......@@ -298,20 +253,22 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
}
for (int32_t j = 0; j < pr->numOfCols; ++j) {
pRes[j] = taosMemoryCalloc(
1, sizeof(SFirstLastRes) + pr->pSchema->columns[-1 == slotIds[j] ? 0 : slotIds[j]].bytes + VARSTR_HEADER_SIZE);
pRes[j] =
taosMemoryCalloc(1, sizeof(SFirstLastRes) + pr->pSchema->columns[/*-1 == slotIds[j] ? 0 : */ slotIds[j]].bytes +
VARSTR_HEADER_SIZE);
SFirstLastRes* p = (SFirstLastRes*)varDataVal(pRes[j]);
p->ts = INT64_MIN;
}
pLastCols = taosArrayInit(pr->pSchema->numOfCols, sizeof(SLastCol));
pLastCols = taosArrayInit(pr->numOfCols, sizeof(SLastCol));
if (pLastCols == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _end;
}
for (int32_t i = 0; i < pr->pSchema->numOfCols; ++i) {
struct STColumn* pCol = &pr->pSchema->columns[i];
for (int32_t i = 0; i < pr->numOfCols; ++i) {
int32_t slotId = slotIds[i];
struct STColumn* pCol = &pr->pSchema->columns[slotId];
SLastCol p = {.ts = INT64_MIN, .colVal.type = pCol->type, .colVal.flag = CV_FLAG_NULL};
if (IS_VAR_DATA_TYPE(pCol->type)) {
......@@ -328,6 +285,8 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
pr->pDataFReader = NULL;
pr->pDataFReaderLast = NULL;
int32_t ltype = (pr->type & CACHESCAN_RETRIEVE_LAST) >> 3;
// retrieve the only one last row of all tables in the uid list.
if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_SINGLE)) {
int64_t st = taosGetTimestampUs();
......@@ -335,16 +294,14 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
for (int32_t i = 0; i < pr->numOfTables; ++i) {
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
if (h == NULL) {
tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
taosArrayClearEx(pRow, freeItem);
continue;
}
if (taosArrayGetSize(pRow) <= 0) {
tsdbCacheRelease(lruCache, h);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
taosArrayClearEx(pRow, freeItem);
continue;
}
......@@ -352,47 +309,34 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
bool hasNotNullRow = true;
int64_t singleTableLastTs = INT64_MAX;
for (int32_t k = 0; k < pr->numOfCols; ++k) {
int32_t slotId = slotIds[k];
if (slotId == -1) { // the primary timestamp
SLastCol* p = taosArrayGet(pLastCols, 0);
SLastCol* pCol = (SLastCol*)taosArrayGet(pRow, 0);
if (pCol->ts > p->ts) {
hasRes = true;
p->ts = pCol->ts;
p->colVal = pCol->colVal;
singleTableLastTs = pCol->ts;
}
} else {
SLastCol* p = taosArrayGet(pLastCols, slotId);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, slotId);
if (pColVal->ts > p->ts) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal) && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
if (!COL_VAL_IS_VALUE(&p->colVal)) {
hasNotNullRow = false;
}
continue;
}
SLastCol* p = taosArrayGet(pLastCols, k);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, k);
hasRes = true;
p->ts = pColVal->ts;
if (pColVal->ts < singleTableLastTs && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
singleTableLastTs = pColVal->ts;
if (pColVal->ts > p->ts) {
if (!COL_VAL_IS_VALUE(&pColVal->colVal) && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
if (!COL_VAL_IS_VALUE(&p->colVal)) {
hasNotNullRow = false;
}
continue;
}
if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
p->colVal = pColVal->colVal;
} else {
if (COL_VAL_IS_VALUE(&pColVal->colVal)) {
memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData);
}
p->colVal.value.nData = pColVal->colVal.value.nData;
p->colVal.type = pColVal->colVal.type;
p->colVal.flag = pColVal->colVal.flag;
p->colVal.cid = pColVal->colVal.cid;
hasRes = true;
p->ts = pColVal->ts;
if (pColVal->ts < singleTableLastTs && HASTYPE(pr->type, CACHESCAN_RETRIEVE_LAST)) {
singleTableLastTs = pColVal->ts;
}
if (!IS_VAR_DATA_TYPE(pColVal->colVal.type)) {
p->colVal = pColVal->colVal;
} else {
if (COL_VAL_IS_VALUE(&pColVal->colVal)) {
memcpy(p->colVal.value.pData, pColVal->colVal.value.pData, pColVal->colVal.value.nData);
}
p->colVal.value.nData = pColVal->colVal.value.nData;
p->colVal.type = pColVal->colVal.type;
p->colVal.flag = pColVal->colVal.flag;
p->colVal.cid = pColVal->colVal.cid;
}
}
}
......@@ -414,33 +358,31 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
taosArraySet(pTableUidList, 0, &pKeyInfo->uid);
}
tsdbCacheRelease(lruCache, h);
taosArrayClearEx(pRow, freeItem);
}
if (hasRes) {
saveOneRow(pLastCols, pResBlock, pr, slotIds, pRes, pr->idstr);
saveOneRow(pLastCols, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
}
} else if (HASTYPE(pr->type, CACHESCAN_RETRIEVE_TYPE_ALL)) {
for (int32_t i = pr->tableIndex; i < pr->numOfTables; ++i) {
STableKeyInfo* pKeyInfo = &pr->pTableList[i];
code = doExtractCacheRow(pr, lruCache, pKeyInfo->uid, &pRow, &h);
if (code != TSDB_CODE_SUCCESS) {
goto _end;
}
if (h == NULL) {
tsdbCacheGet(pr->pTsdb, pKeyInfo->uid, pRow, pr, ltype);
if (TARRAY_SIZE(pRow) <= 0) {
taosArrayClearEx(pRow, freeItem);
continue;
}
if (taosArrayGetSize(pRow) <= 0) {
tsdbCacheRelease(lruCache, h);
SLastCol* pColVal = (SLastCol*)taosArrayGet(pRow, 0);
if (COL_VAL_IS_NONE(&pColVal->colVal)) {
taosArrayClearEx(pRow, freeItem);
continue;
}
saveOneRow(pRow, pResBlock, pr, slotIds, pRes, pr->idstr);
// TODO reset the pRes
saveOneRow(pRow, pResBlock, pr, slotIds, dstSlotIds, pRes, pr->idstr);
taosArrayClearEx(pRow, freeItem);
taosArrayPush(pTableUidList, &pKeyInfo->uid);
tsdbCacheRelease(lruCache, h);
pr->tableIndex += 1;
if (pResBlock->info.rows >= pResBlock->info.capacity) {
......@@ -466,6 +408,7 @@ _end:
}
taosMemoryFree(pRes);
taosArrayDestroyEx(pRow, freeItem);
taosArrayDestroyEx(pLastCols, freeItem);
return code;
}
......@@ -140,7 +140,6 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
SMemTable *pMemTable = pTsdb->mem;
STbData *pTbData = NULL;
SVBufPool *pPool = pTsdb->pVnode->inUse;
TSDBKEY lastKey = {.version = version, .ts = eKey};
// check if table exists
SMetaInfo info;
......@@ -181,7 +180,7 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
pMemTable->nDel++;
pMemTable->minVer = TMIN(pMemTable->minVer, version);
pMemTable->maxVer = TMIN(pMemTable->maxVer, version);
/*
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) {
tsdbCacheDeleteLastrow(pTsdb->lruCache, pTbData->uid, eKey);
}
......@@ -189,6 +188,10 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey);
}
*/
if (eKey >= pTbData->maxKey && sKey <= pTbData->maxKey) {
tsdbCacheDel(pTsdb, suid, uid, sKey, eKey);
}
tsdbTrace("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
" at version %" PRId64,
......@@ -284,8 +287,8 @@ bool tsdbTbDataIterNext(STbDataIter *pIter) {
int64_t tsdbCountTbDataRows(STbData *pTbData) {
SMemSkipListNode *pNode = pTbData->sl.pHead;
int64_t rowsNum = 0;
int64_t rowsNum = 0;
while (NULL != pNode) {
pNode = SL_GET_NODE_FORWARD(pNode, 0);
if (pNode == pTbData->sl.pTail) {
......@@ -298,17 +301,17 @@ int64_t tsdbCountTbDataRows(STbData *pTbData) {
return rowsNum;
}
void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj* pTableMap, int64_t *rowsNum) {
void tsdbMemTableCountRows(SMemTable *pMemTable, SHashObj *pTableMap, int64_t *rowsNum) {
taosRLockLatch(&pMemTable->latch);
for (int32_t i = 0; i < pMemTable->nBucket; ++i) {
STbData *pTbData = pMemTable->aBucket[i];
while (pTbData) {
void* p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
void *p = taosHashGet(pTableMap, &pTbData->uid, sizeof(pTbData->uid));
if (p == NULL) {
pTbData = pTbData->next;
continue;
}
*rowsNum += tsdbCountTbDataRows(pTbData);
pTbData = pTbData->next;
}
......@@ -668,15 +671,8 @@ static int32_t tsdbInsertColDataToTable(SMemTable *pMemTable, STbData *pTbData,
if (key.ts >= pTbData->maxKey) {
pTbData->maxKey = key.ts;
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, &lRow, true);
}
}
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, &lRow, pMemTable->pTsdb);
}
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);
......@@ -736,15 +732,8 @@ static int32_t tsdbInsertRowDataToTable(SMemTable *pMemTable, STbData *pTbData,
if (key.ts >= pTbData->maxKey) {
pTbData->maxKey = key.ts;
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, &lRow, true);
}
}
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, &lRow, pMemTable->pTsdb);
}
tsdbCacheUpdate(pMemTable->pTsdb, pTbData->suid, pTbData->uid, &lRow);
// SMemTable
pMemTable->minKey = TMIN(pMemTable->minKey, pTbData->minKey);
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册