提交 bad898cd 编写于 作者: D dapan1121

Merge remote-tracking branch 'origin/3.0' into feat/TD-22746

...@@ -40,7 +40,7 @@ def check_docs() { ...@@ -40,7 +40,7 @@ def check_docs() {
sh ''' sh '''
cd ${WKC} cd ${WKC}
git reset --hard git reset --hard
git clean -fxd git clean -f
rm -rf examples/rust/ rm -rf examples/rust/
git remote prune origin git remote prune origin
git fetch git fetch
...@@ -86,7 +86,7 @@ def pre_test(){ ...@@ -86,7 +86,7 @@ def pre_test(){
git fetch git fetch
cd ${WKC} cd ${WKC}
git reset --hard git reset --hard
git clean -fxd git clean -f
rm -rf examples/rust/ rm -rf examples/rust/
git remote prune origin git remote prune origin
git fetch git fetch
...@@ -201,7 +201,7 @@ def pre_test_win(){ ...@@ -201,7 +201,7 @@ def pre_test_win(){
''' '''
bat ''' bat '''
cd %WIN_COMMUNITY_ROOT% cd %WIN_COMMUNITY_ROOT%
git clean -fxd git clean -f
git reset --hard git reset --hard
git remote prune origin git remote prune origin
git fetch git fetch
...@@ -313,7 +313,8 @@ def pre_test_build_win() { ...@@ -313,7 +313,8 @@ def pre_test_build_win() {
bat ''' bat '''
cd %WIN_CONNECTOR_ROOT% cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip python.exe -m pip install --upgrade pip
python -m pip install . python -m pip uninstall taospy -y
python -m pip install taospy==2.7.6
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
''' '''
return 1 return 1
...@@ -331,8 +332,6 @@ def run_win_test() { ...@@ -331,8 +332,6 @@ def run_win_test() {
bat ''' bat '''
echo "windows test ..." echo "windows test ..."
cd %WIN_CONNECTOR_ROOT% cd %WIN_CONNECTOR_ROOT%
python.exe -m pip install --upgrade pip
python -m pip install .
xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32 xcopy /e/y/i/f %WIN_INTERNAL_ROOT%\\debug\\build\\lib\\taos.dll C:\\Windows\\System32
ls -l C:\\Windows\\System32\\taos.dll ls -l C:\\Windows\\System32\\taos.dll
time /t time /t
...@@ -387,7 +386,7 @@ pipeline { ...@@ -387,7 +386,7 @@ pipeline {
} }
steps { steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 75, unit: 'MINUTES'){ timeout(time: 126, unit: 'MINUTES'){
pre_test_win() pre_test_win()
pre_test_build_win() pre_test_build_win()
run_win_ctest() run_win_ctest()
......
...@@ -365,6 +365,6 @@ Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to th ...@@ -365,6 +365,6 @@ Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to th
For more information about TDengine, you can follow us on social media and join our Discord server: For more information about TDengine, you can follow us on social media and join our Discord server:
- [Discord](https://discord.com/invite/VZdSuUg4pS) - [Discord](https://discord.com/invite/VZdSuUg4pS)
- [Twitter](https://twitter.com/TaosData) - [Twitter](https://twitter.com/TDengineDB)
- [LinkedIn](https://www.linkedin.com/company/tdengine/) - [LinkedIn](https://www.linkedin.com/company/tdengine/)
- [YouTube](https://www.youtube.com/channel/UCmp-1U6GS_3V3hjir6Uq5DQ) - [YouTube](https://www.youtube.com/@tdengine)
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
IF (DEFINED VERNUMBER) IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER}) SET(TD_VER_NUMBER ${VERNUMBER})
ELSE () ELSE ()
SET(TD_VER_NUMBER "3.0.2.6") SET(TD_VER_NUMBER "3.0.3.0")
ENDIF () ENDIF ()
IF (DEFINED VERCOMPATIBLE) IF (DEFINED VERCOMPATIBLE)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# taosadapter # taosadapter
ExternalProject_Add(taosadapter ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
GIT_TAG db6c843 GIT_TAG d8059ff
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
# taos-tools # taos-tools
ExternalProject_Add(taos-tools ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG 1e15545 GIT_TAG d9ec91d
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR "" BINARY_DIR ""
#BUILD_IN_SOURCE TRUE #BUILD_IN_SOURCE TRUE
......
...@@ -204,7 +204,7 @@ group vnodeProcessReqs() ...@@ -204,7 +204,7 @@ group vnodeProcessReqs()
s -> s: s -> s:
note right note right
save the requests in log store save the requests in log store
and wait for comfirmation or and wait for confirmation or
other cases other cases
end note end note
...@@ -236,7 +236,7 @@ s -> s: syncAppendReqToLogStore() ...@@ -236,7 +236,7 @@ s -> s: syncAppendReqToLogStore()
s -> v: walWrite() s -> v: walWrite()
alt has meta req alt has meta req
<- s: comfirmation <- s: confirmation
else else
s -> v: vnodeApplyReqs() s -> v: vnodeApplyReqs()
end end
......
...@@ -123,11 +123,11 @@ As a high-performance, scalable and SQL supported time-series database, TDengine ...@@ -123,11 +123,11 @@ As a high-performance, scalable and SQL supported time-series database, TDengine
## Comparison with other databases ## Comparison with other databases
- [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html) - [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/performance-comparison-of-tdengine-and-influxdb/)
- [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html) - [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/query-performance-comparison-test-report-tdengine-vs-influxdb/)
- [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html) - [TDengine vs OpenTSDB](https://tdengine.com/performance-tdengine-vs-opentsdb/)
- [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html) - [TDengine vs Cassandra](https://tdengine.com/performance-tdengine-vs-cassandra/)
- [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html) - [TDengine vs InfluxDB](https://tdengine.com/performance-tdengine-vs-influxdb/)
## More readings ## More readings
- [Introduction to Time-Series Database](https://tdengine.com/tsdb/) - [Introduction to Time-Series Database](https://tdengine.com/tsdb/)
......
...@@ -202,16 +202,18 @@ After the installation is complete, double-click the /applications/TDengine to s ...@@ -202,16 +202,18 @@ After the installation is complete, double-click the /applications/TDengine to s
The following `launchctl` commands can help you manage TDengine service: The following `launchctl` commands can help you manage TDengine service:
- Start TDengine Server: `launchctl start com.tdengine.taosd` - Start TDengine Server: `sudo launchctl start com.tdengine.taosd`
- Stop TDengine Server: `launchctl stop com.tdengine.taosd` - Stop TDengine Server: `sudo launchctl stop com.tdengine.taosd`
- Check TDengine Server status: `launchctl list | grep taosd` - Check TDengine Server status: `sudo launchctl list | grep taosd`
:::info :::info
- Please use `sudo` to run `launchctl` to manage _com.tdengine.taosd_ with administrator privileges.
- The `launchctl` command does not require _root_ privileges. You don't need to use the `sudo` command. - The administrator privilege is required for service management to enhance security.
- The first content returned by the `launchctl list | grep taosd` command is the PID of the program, if '-' indicates that the TDengine service is not running. - Troubleshooting:
- The first column returned by the command `launchctl list | grep taosd` is the PID of the program. If it's `-`, that means the TDengine service is not running.
- If the service is abnormal, please check the `launchd.log` file from the system log or the `taosdlog` from the `/var/log/taos directory` for more information.
::: :::
......
...@@ -28,7 +28,7 @@ From the perspective of application program, you need to consider: ...@@ -28,7 +28,7 @@ From the perspective of application program, you need to consider:
- Writing to known existing tables is more efficient than writing to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it. - Writing to known existing tables is more efficient than writing to uncertain tables in automatic creating mode because the later needs to check whether the table exists or not before actually writing data into it.
- Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creates table automatically and may alter table schema. - Writing in SQL is more efficient than writing in schemaless mode because schemaless writing creates table automatically and may alter table schema.
Application programs need to take care of the above factors and try to take advantage of them. The application progam should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput. Application programs need to take care of the above factors and try to take advantage of them. The application program should write to single table in each write batch. The batch size needs to be tuned to a proper value on a specific system. The number of concurrent connections needs to be tuned to a proper value too to achieve the best writing throughput.
### Data Source ### Data Source
......
...@@ -7,6 +7,7 @@ title: Data Subscription ...@@ -7,6 +7,7 @@ title: Data Subscription
import Tabs from "@theme/Tabs"; import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem"; import TabItem from "@theme/TabItem";
import Java from "./_sub_java.mdx"; import Java from "./_sub_java.mdx";
import JavaWS from "./_sub_java_ws.mdx"
import Python from "./_sub_python.mdx"; import Python from "./_sub_python.mdx";
import Go from "./_sub_go.mdx"; import Go from "./_sub_go.mdx";
import Rust from "./_sub_rust.mdx"; import Rust from "./_sub_rust.mdx";
...@@ -294,7 +295,7 @@ You configure the following parameters when creating a consumer: ...@@ -294,7 +295,7 @@ You configure the following parameters when creating a consumer:
| `enable.auto.commit` | boolean | Commit automatically | Specify `true` or `false`. | | `enable.auto.commit` | boolean | Commit automatically | Specify `true` or `false`. |
| `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds | | `auto.commit.interval.ms` | integer | Interval for automatic commits, in milliseconds |
| `enable.heartbeat.background` | boolean | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | | | `enable.heartbeat.background` | boolean | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | |
| `experimental.snapshot.enable` | boolean | Specify whether to consume messages from the WAL or from TSBS | | | `experimental.snapshot.enable` | boolean | Specify whether to consume messages from TSDB | |
| `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages | | `msg.with.table.name` | boolean | Specify whether to deserialize table names from messages |
The method of specifying these parameters depends on the language used: The method of specifying these parameters depends on the language used:
...@@ -417,7 +418,7 @@ Python programs use the following parameters: ...@@ -417,7 +418,7 @@ Python programs use the following parameters:
| `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` | | `enable.auto.commit` | string | Commit automatically | pecify `true` or `false` |
| `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | | | `auto.commit.interval.ms` | string | Interval for automatic commits, in milliseconds | |
| `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) | | `auto.offset.reset` | string | Initial offset for the consumer group | Specify `earliest`, `latest`, or `none`(default) |
| `experimental.snapshot.enable` | string | Specify whether to consume messages from the WAL or from TSDB | Specify `true` or `false` | | `experimental.snapshot.enable` | string | Specify whether it's allowed to consume messages from the WAL or from TSDB | Specify `true` or `false` |
| `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` | | `enable.heartbeat.background` | string | Backend heartbeat; if enabled, the consumer does not go offline even if it has not polled for a long time | Specify `true` or `false` |
</TabItem> </TabItem>
...@@ -807,7 +808,14 @@ The following section shows sample code in various languages. ...@@ -807,7 +808,14 @@ The following section shows sample code in various languages.
</TabItem> </TabItem>
<TabItem label="Java" value="java"> <TabItem label="Java" value="java">
<Java /> <Tabs defaultValue="native">
<TabItem value="native" label="native connection">
<Java />
</TabItem>
<TabItem value="ws" label="WebSocket connection">
<JavaWS />
</TabItem>
</Tabs>
</TabItem> </TabItem>
<TabItem label="Go" value="Go"> <TabItem label="Go" value="Go">
......
...@@ -65,11 +65,11 @@ int32_t aggfn_init() { ...@@ -65,11 +65,11 @@ int32_t aggfn_init() {
} }
// aggregate start function. The intermediate value or the state(@interBuf) is initialized in this function. The function name shall be concatenation of udf name and _start suffix // aggregate start function. The intermediate value or the state(@interBuf) is initialized in this function. The function name shall be concatenation of udf name and _start suffix
// @param interbuf intermediate value to intialize // @param interbuf intermediate value to initialize
// @return error number defined in taoserror.h // @return error number defined in taoserror.h
int32_t aggfn_start(SUdfInterBuf* interBuf) { int32_t aggfn_start(SUdfInterBuf* interBuf) {
// initialize intermediate value in interBuf // initialize intermediate value in interBuf
return TSDB_CODE_SUCESS; return TSDB_CODE_SUCCESS;
} }
// aggregate reduce function. This function aggregate old state(@interbuf) and one data bock(inputBlock) and output a new state(@newInterBuf). // aggregate reduce function. This function aggregate old state(@interbuf) and one data bock(inputBlock) and output a new state(@newInterBuf).
......
import Tabs from '@theme/Tabs'; ```java
import TabItem from '@theme/TabItem'; {{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
```
<Tabs defaultValue="native"> ```java
<TabItem value="native" label="native connection"> {{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
```
```java ```java
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} {{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
``` ```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
</TabItem>
<TabItem value="ws" label="WebSocket connection">
```java
{{#include docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
</TabItem>
</Tabs>
```java
{{#include docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
...@@ -75,10 +75,10 @@ database_option: { ...@@ -75,10 +75,10 @@ database_option: {
- TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name. - TABLE_PREFIX:The prefix length in the table name that is ignored when distributing table to vnode based on table name.
- TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name. - TABLE_SUFFIX:The suffix length in the table name that is ignored when distributing table to vnode based on table name.
- TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB. - TSDB_PAGESIZE: The page size of the data storage engine in a vnode. The unit is KB. The default is 4 KB. The range is 1 to 16384, that is, 1 KB to 16 MB.
- WAL_RETENTION_PERIOD: specifies the time after which WAL files are deleted. This parameter is used for data subscription. Enter a time in seconds. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is 4 days. - WAL_RETENTION_PERIOD: specifies the maximum time of which WAL files are to be kept after consumption. This parameter is used for data subscription. Enter a time in seconds. The default value 0. A value of 0 indicates that WAL files are not required to keep after consumption. -1: the time of WAL files to keep has no upper limit.
- WAL_RETENTION_SIZE: specifies the size at which WAL files are deleted. This parameter is used for data subscription. Enter a size in KB. The default value of single copy is 0. A value of 0 indicates that each WAL file is deleted immediately after its contents are written to disk. -1: WAL files are never deleted. The default value of multiple copy is -1. - WAL_RETENTION_SIZE: specifies the maximum total size of which WAL files are to be kept after consumption. This parameter is used for data subscription. Enter a size in KB. The default value is 0. A value of 0 indicates that WAL files are not required to keep after consumption. -1: the total size of WAL files to keep has no upper limit.
- WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value of single copy is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. The default values of multiple copy is 1 day. - WAL_ROLL_PERIOD: specifies the time after which WAL files are rotated. After this period elapses, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
- WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after the previous WAL file was written to disk. - WAL_SEGMENT_SIZE: specifies the maximum size of a WAL file. After the current WAL file reaches this size, a new WAL file is created. The default value is 0. A value of 0 indicates that a new WAL file is created only after TSDB data in memory are flushed to disk.
### Example Statement ### Example Statement
......
...@@ -34,7 +34,7 @@ column_definition: ...@@ -34,7 +34,7 @@ column_definition:
SHOW STABLES [LIKE tb_name_wildcard]; SHOW STABLES [LIKE tb_name_wildcard];
``` ```
The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtabels for each supertable. The preceding SQL statement shows all supertables in the current TDengine database, including the name, creation time, number of columns, number of tags, and number of subtables for each supertable.
### View the CREATE Statement for a Supertable ### View the CREATE Statement for a Supertable
......
...@@ -248,7 +248,7 @@ You can also use the NULLS keyword to specify the position of null values. Ascen ...@@ -248,7 +248,7 @@ You can also use the NULLS keyword to specify the position of null values. Ascen
The LIMIT keyword controls the number of results that are displayed. You can also use the OFFSET keyword to specify the result to display first. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. You can include an offset in a LIMIT clause. For example, LIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh results. The LIMIT keyword controls the number of results that are displayed. You can also use the OFFSET keyword to specify the result to display first. `LIMIT` and `OFFSET` are executed after `ORDER BY` in the query execution. You can include an offset in a LIMIT clause. For example, LIMIT 5 OFFSET 2 can also be written LIMIT 2, 5. Both of these clauses display the third through the seventh results.
In a statement that includes a PARTITON BY clause, the LIMIT keyword is performed on each partition, not on the entire set of results. In a statement that includes a PARTITION BY clause, the LIMIT keyword is performed on each partition, not on the entire set of results.
## SLIMIT ## SLIMIT
......
---
sidebar_label: Tag Index
title: Tag Index
description: Use Tag Index to Improve Query Performance
---
## Introduction
Prior to TDengine 3.0.3.0 (excluded),only one index is created by default on the first tag of each super talbe, but it's not allowed to dynamically create index on any other tags. From version 3.0.30, you can dynamically create index on any tag of any type. The index created automatically by TDengine is still valid. Query performance can benefit from indexes if you use properly.
## Syntax
1. The syntax of creating an index
```sql
CREATE INDEX index_name ON tbl_name (tagColName
```
In the above statement, `index_name` if the name of the index, `tbl_name` is the name of the super table,`tagColName` is the name of the tag on which the index is being created. `tagColName` can be any type supported by TDengine.
2. The syntax of drop an index
```sql
DROP INDEX index_name
```
In the above statement, `index_name` is the name of an existing index. If the index doesn't exist, the command would fail but doesn't generate any impact to the system.
3. The syntax of show indexes in the system
```sql
SELECT * FROM information_schema.INS_INDEXES
```
You can also add filter conditions to limit the results.
## Detailed Specification
1. Indexes can improve query performance significantly if they are used properly. The operators supported by tag index include `=`, `>`, `>=`, `<`, `<=`. If you use these operators with tags, indexes can improve query performance significantly. However, for operators not in this scope, indexes don't help. More and more operators will be added in future.
2. Only one index can be created on each tag, error would be reported if you try to create more than one indexes on same tag.
3. Each time you can create an index on a single tag, you are not allowed to create indexes on multiple tags together.
4. The name of each index must be unique across the whole system, regardless of the type of the index, e.g. tag index or sma index.
5. There is no limit on the number of indexes, but each index may add some burden on the metadata subsystem. So too many indexes may decrease the efficiency of reading or writing metadata and then decrease the system performance. So it's better not to add unnecessary indexes.
6. You can' create index on a normal table or a child table.
7. If the unique values of a tag column are too few, it's better not to create index on such tag columns, the benefit would be very small.
\ No newline at end of file
...@@ -666,13 +666,13 @@ If you input a specific column, the number of non-null values in the column is r ...@@ -666,13 +666,13 @@ If you input a specific column, the number of non-null values in the column is r
ELAPSED(ts_primary_key [, time_unit]) ELAPSED(ts_primary_key [, time_unit])
``` ```
**Description**`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calcualted time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length. **Description**`elapsed` function can be used to calculate the continuous time length in which there is valid data. If it's used with `INTERVAL` clause, the returned result is the calculated time length within each time window. If it's used without `INTERVAL` caluse, the returned result is the calculated time length within the specified time range. Please be noted that the return value of `elapsed` is the number of `time_unit` in the calculated time length.
**Return value type**: Double if the input value is not NULL; **Return value type**: Double if the input value is not NULL;
**Return value type**: TIMESTAMP **Return value type**: TIMESTAMP
**Applicable tables**: table, STable, outter in nested query **Applicable tables**: table, STable, outer in nested query
**Explanations** **Explanations**
- `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key. - `ts_primary_key` parameter can only be the first column of a table, i.e. timestamp primary key.
...@@ -754,7 +754,7 @@ HYPERLOGLOG(expr) ...@@ -754,7 +754,7 @@ HYPERLOGLOG(expr)
**Description**: **Description**:
The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge. The cardinal number of a specific column is returned by using hyperloglog algorithm. The benefit of using hyperloglog algorithm is that the memory usage is under control when the data volume is huge.
However, when the data volume is very small, the result may be not accurate, it's recommented to use `select count(data) from (select unique(col) as data from table)` in this case. However, when the data volume is very small, the result may be not accurate, it's recommended to use `select count(data) from (select unique(col) as data from table)` in this case.
**Return value type**: Integer **Return value type**: Integer
...@@ -801,7 +801,7 @@ PERCENTILE(expr, p [, p1] ...) ...@@ -801,7 +801,7 @@ PERCENTILE(expr, p [, p1] ...)
**Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned. **Description**: The value whose rank in a specific column matches the specified percentage. If such a value matching the specified percentage doesn't exist in the column, an interpolation value will be returned.
**Return value type**: This function takes 2 minumum and 11 maximum parameters, and it can simultaneously return 10 percentiles at most. If 2 parameters are given, a single percentile is returned and the value type is DOUBLE. **Return value type**: This function takes 2 minimum and 11 maximum parameters, and it can simultaneously return 10 percentiles at most. If 2 parameters are given, a single percentile is returned and the value type is DOUBLE.
If more than 2 parameters are given, the return value type is a VARCHAR string, the format of which is a JSON ARRAY containing all return values. If more than 2 parameters are given, the return value type is a VARCHAR string, the format of which is a JSON ARRAY containing all return values.
**Applicable column types**: Numeric **Applicable column types**: Numeric
...@@ -811,7 +811,7 @@ PERCENTILE(expr, p [, p1] ...) ...@@ -811,7 +811,7 @@ PERCENTILE(expr, p [, p1] ...)
**More explanations**: **More explanations**:
- _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX. - _p_ is in range [0,100], when _p_ is 0, the result is same as using function MIN; when _p_ is 100, the result is same as function MAX.
- When calculating multiple percentiles of a specific column, a single PERCENTILE function with multiple parameters is adviced, as this can largely reduce the query response time. - When calculating multiple percentiles of a specific column, a single PERCENTILE function with multiple parameters is advised, as this can largely reduce the query response time.
For example, using SELECT percentile(col, 90, 95, 99) FROM table will perform better than SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table. For example, using SELECT percentile(col, 90, 95, 99) FROM table will perform better than SELECT percentile(col, 90), percentile(col, 95), percentile(col, 99) from table.
## Selection Functions ## Selection Functions
...@@ -884,6 +884,15 @@ INTERP(expr) ...@@ -884,6 +884,15 @@ INTERP(expr)
- Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0). - Pseudocolumn `_irowts` can be used along with `INTERP` to return the timestamps associated with interpolation points(support after version 3.0.2.0).
- Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0). - Pseudocolumn `_isfilled` can be used along with `INTERP` to indicate whether the results are original records or data points generated by interpolation algorithm(support after version 3.0.3.0).
**Example**
- We use the smart meters example used in this documentation to illustrate how to use the INTERP function.
- We want to downsample every 1 hour and use a linear fill for missing values. Note the order in which the "partition by" clause and the "range", "every" and "fill" parameters are used.
```sql
SELECT _irowts,INTERP(current) FROM test.meters PARTITION BY TBNAME RANGE('2017-07-22 00:00:00','2017-07-24 12:25:00') EVERY(1h) FILL(LINEAR)
```
### LAST ### LAST
```sql ```sql
......
...@@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct ...@@ -21,7 +21,7 @@ part_list can be any scalar expression, such as a column, constant, scalar funct
A PARTITION BY clause is processed as follows: A PARTITION BY clause is processed as follows:
- The PARTITION BY clause must occur after the WHERE clause - The PARTITION BY clause must occur after the WHERE clause
- The PARTITION BY caluse partitions the data according to the specified dimentions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause. - The PARTITION BY caluse partitions the data according to the specified dimensions, then perform computation on each partition. The performed computation is determined by the rest of the statement - a window clause, GROUP BY clause, or SELECT clause.
- The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value: - The PARTITION BY clause can be used together with a window clause or GROUP BY clause. In this case, the window or GROUP BY clause takes effect on every partition. For example, the following statement partitions the table by the location tag, performs downsampling over a 10 minute window, and returns the maximum value:
```sql ```sql
...@@ -32,15 +32,15 @@ The most common usage of PARTITION BY is partitioning the data in subtables by t ...@@ -32,15 +32,15 @@ The most common usage of PARTITION BY is partitioning the data in subtables by t
## Windowed Queries ## Windowed Queries
Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are three kinds of windows: time window, status window, and session window. There are two kinds of time windows: sliding window and flip time/tumbling window. The query syntax is as follows: Aggregation by time window is supported in TDengine. For example, in the case where temperature sensors report the temperature every seconds, the average temperature for every 10 minutes can be retrieved by performing a query with a time window. Window related clauses are used to divide the data set to be queried into subsets and then aggregation is performed across the subsets. There are four kinds of windows: time window, status window, session window, and event window. There are two kinds of time windows: sliding window and flip time/tumbling window. The syntax of window clause is as follows:
```sql ```sql
SELECT select_list FROM tb_name window_clause: {
[WHERE where_condition] SESSION(ts_col, tol_val)
[SESSION(ts_col, tol_val)] | STATE_WINDOW(col)
[STATE_WINDOW(col)] | INTERVAL(interval [, offset]) [SLIDING sliding] [FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})]
[INTERVAL(interval [, offset]) [SLIDING sliding]] | EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
[FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] }
``` ```
The following restrictions apply: The following restrictions apply:
...@@ -105,7 +105,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m); ...@@ -105,7 +105,7 @@ SELECT COUNT(*) FROM temp_tb_1 INTERVAL(1m) SLIDING(2m);
When using time windows, note the following: When using time windows, note the following:
- The window length for aggregation depends on the value of INTERVAL. The minimum interval is 10 ms. You can configure a window as an offset from UTC 0:00. The offset cannot be smaler than the interval. You can use SLIDING to specify the length of time that the window moves forward. - The window length for aggregation depends on the value of INTERVAL. The minimum interval is 10 ms. You can configure a window as an offset from UTC 0:00. The offset cannot be smaller than the interval. You can use SLIDING to specify the length of time that the window moves forward.
Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side. Please note that the `timezone` parameter should be configured to be the same value in the `taos.cfg` configuration file on client side and server side.
- The result set is in ascending order of timestamp when you aggregate by time window. - The result set is in ascending order of timestamp when you aggregate by time window.
...@@ -146,6 +146,26 @@ If the time interval between two continuous rows are within the time interval sp ...@@ -146,6 +146,26 @@ If the time interval between two continuous rows are within the time interval sp
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
``` ```
### Event Window
Event window is determined according to the window start condition and the window close condition. The window is started when `start_trigger_condition` is evaluated to true, the window is closed when `end_trigger_condition` is evaluated to true. `start_trigger_condition` and `end_trigger_condition` can be any conditional expressions supported by TDengine and can include multiple columns.
There may be only one row of data in an event window, when a row meets both the `start_trigger_condition` and the `end_trigger_condition`.
The window is treated as invalid or non-existing if the `end_trigger_condition` can't be met. There will be no output in case that a window can't be closed.
If the event window query is performed on a super table, TDengine consolidates all the data of all child tables into a single timeline then perform event window based query.
If you want to perform event window based query on the result set of a sub-query, the result set of the sub-query should be arranged in the order of timestamp and include the column of timestamp.
For example, the diagram below illustrates the event windows generated by the query below:
```sql
select _wstart, _wend, count(*) from t start with c1 > 0 end with c2 < 10
```
![Event Window Illustration](./event_window.webp)
### Examples ### Examples
A table of intelligent meters can be created by the SQL statement below: A table of intelligent meters can be created by the SQL statement below:
......
...@@ -55,7 +55,7 @@ description: This document describes the JSON data type in TDengine. ...@@ -55,7 +55,7 @@ description: This document describes the JSON data type in TDengine.
4. Tag Operations 4. Tag Operations
The value of a JSON tag can be altered. Please note that the full JSON will be overriden when doing this. The value of a JSON tag can be altered. Please note that the full JSON will be overridden when doing this.
The name of a JSON tag can be altered. The name of a JSON tag can be altered.
......
...@@ -179,6 +179,20 @@ Provides information about standard tables and subtables. ...@@ -179,6 +179,20 @@ Provides information about standard tables and subtables.
| 5 | tag_type | BINARY(64) | Tag type | | 5 | tag_type | BINARY(64) | Tag type |
| 6 | tag_value | BINARY(16384) | Tag value | | 6 | tag_value | BINARY(16384) | Tag value |
## INS_COLUMNS
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------- | ---------------------- |
| 1 | table_name | BINARY(192) | Table name |
| 2 | db_name | BINARY(64) | Database name |
| 3 | table_type | BINARY(21) | Table type |
| 4 | col_name | BINARY(64) | Column name |
| 5 | col_type | BINARY(32) | Column type |
| 6 | col_length | INT | Column length |
| 7 | col_precision | INT | Column precision |
| 8 | col_scale | INT | Column scale |
| 9 | col_nullable | INT | Column nullable |
## INS_USERS ## INS_USERS
Provides information about TDengine users. Provides information about TDengine users.
...@@ -274,9 +288,9 @@ Provides dnode configuration information. ...@@ -274,9 +288,9 @@ Provides dnode configuration information.
| 1 | stream_name | BINARY(64) | Stream name | | 1 | stream_name | BINARY(64) | Stream name |
| 2 | create_time | TIMESTAMP | Creation time | | 2 | create_time | TIMESTAMP | Creation time |
| 3 | sql | BINARY(1024) | SQL statement used to create the stream | | 3 | sql | BINARY(1024) | SQL statement used to create the stream |
| 4 | status | BIANRY(20) | Current status | | 4 | status | BINARY(20) | Current status |
| 5 | source_db | BINARY(64) | Source database | | 5 | source_db | BINARY(64) | Source database |
| 6 | target_db | BIANRY(64) | Target database | | 6 | target_db | BINARY(64) | Target database |
| 7 | target_table | BINARY(192) | Target table | | 7 | target_table | BINARY(192) | Target table |
| 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 8 | watermark | BIGINT | Watermark (see stream processing documentation). It should be noted that `watermark` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
| 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. | | 9 | trigger | INT | Method of triggering the result push (see stream processing documentation). It should be noted that `trigger` is a TDengine keyword and needs to be escaped with ` when used as a column name. |
...@@ -4,7 +4,7 @@ sidebar_label: SHOW Statement ...@@ -4,7 +4,7 @@ sidebar_label: SHOW Statement
description: This document describes how to use the SHOW statement in TDengine. description: This document describes how to use the SHOW statement in TDengine.
--- ---
`SHOW` command can be used to get brief system information. To get details about metatadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`. `SHOW` command can be used to get brief system information. To get details about metadata, information, and status in the system, please use `select` to query the tables in database `INFORMATION_SCHEMA`.
## SHOW APPS ## SHOW APPS
...@@ -86,10 +86,10 @@ SHOW FUNCTIONS; ...@@ -86,10 +86,10 @@ SHOW FUNCTIONS;
Shows all user-defined functions in the system. Shows all user-defined functions in the system.
## SHOW LICENSE ## SHOW LICENCES
```sql ```sql
SHOW LICENSE; SHOW LICENCES;
SHOW GRANTS; SHOW GRANTS;
``` ```
...@@ -359,7 +359,7 @@ Shows the working configuration of the parameters that must be the same on each ...@@ -359,7 +359,7 @@ Shows the working configuration of the parameters that must be the same on each
SHOW [db_name.]VGROUPS; SHOW [db_name.]VGROUPS;
``` ```
Shows information about all vgroups in the system or about the vgroups for a specified database. Shows information about all vgroups in the current database.
## SHOW VNODES ## SHOW VNODES
......
...@@ -68,7 +68,7 @@ The following return value results indicate that the verification passed. ...@@ -68,7 +68,7 @@ The following return value results indicate that the verification passed.
## HTTP request URL format ## HTTP request URL format
```text ```text
http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone] http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
``` ```
Parameter Description: Parameter Description:
...@@ -77,6 +77,7 @@ Parameter Description: ...@@ -77,6 +77,7 @@ Parameter Description:
- port: httpPort configuration item in the configuration file, default is 6041. - port: httpPort configuration item in the configuration file, default is 6041.
- db_name: Optional parameter that specifies the default database name for the executed SQL command. - db_name: Optional parameter that specifies the default database name for the executed SQL command.
- tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`. - tz: Optional parameter that specifies the timezone of the returned time, following the IANA Time Zone rules, e.g. `America/New_York`.
- req_id: Optional parameter that specifies the request id for tracing.
For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`. For example, `http://h1.taos.com:6041/rest/sql/test` is a URL to `h1.taos.com:6041` and sets the default database name to `test`.
...@@ -99,13 +100,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL ...@@ -99,13 +100,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL
Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax. Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax.
```bash ```bash
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone] curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
``` ```
or or
```bash ```bash
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone] curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
``` ```
where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`.. where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`..
...@@ -114,14 +115,41 @@ where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e. ...@@ -114,14 +115,41 @@ where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.
### HTTP Response Code ### HTTP Response Code
| **Response Code** | **Description** | Starting from `TDengine 3.0.3.0`, `taosAdapter` provides a configuration parameter `httpCodeServerError` to set whether to return a non-200 http status code when the C interface returns an error
|-------------------|----------------|
| 200 | Success. (Also used for C interface errors.) | | **Description** | **httpCodeServerError false** | **httpCodeServerError true** |
| 400 | Parameter error | |--------------------|---------------------------- ------|---------------------------------------|
| 401 | Authentication failure | | taos_errno() returns 0 | 200 | 200 |
| 404 | Interface not found | | taos_errno() returns non-0 | 200 (except authentication error) | 500 (except authentication error and 400/502 error) |
| 500 | Internal error | | Parameter error | 400 (only handle HTTP request URL parameter error) | 400 (handle HTTP request URL parameter error and taosd return error) |
| 503 | Insufficient system resources | | Authentication error | 401 | 401 |
| Interface does not exist | 404 | 404 |
| Cluster unavailable error | 502 | 502 |
| Insufficient system resources | 503 | 503 |
The C error codes that return http code 400 are:
- TSDB_CODE_TSC_SQL_SYNTAX_ERROR ( 0x0216 )
- TSDB_CODE_TSC_LINE_SYNTAX_ERROR (0x021B)
- TSDB_CODE_PAR_SYNTAX_ERROR (0x2600)
- TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE (0x060B)
- TSDB_CODE_TSC_VALUE_OUT_OF_RANGE (0x0224)
- TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE (0x263B)
The error code that returns http code 401 are:
- TSDB_CODE_MND_USER_ALREADY_EXIST (0x0350)
- TSDB_CODE_MND_USER_NOT_EXIST (0x0351)
- TSDB_CODE_MND_INVALID_USER_FORMAT (0x0352)
- TSDB_CODE_MND_INVALID_PASS_FORMAT (0x0353)
- TSDB_CODE_MND_NO_USER_FROM_CONN (0x0354)
- TSDB_CODE_MND_TOO_MANY_USERS (0x0355)
- TSDB_CODE_MND_INVALID_ALTER_OPER (0x0356)
- TSDB_CODE_MND_AUTH_FAILURE (0x0357)
The error code that returns http code 403 are:
- TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED (0x0020)
### HTTP body structure ### HTTP body structure
...@@ -269,7 +297,6 @@ Response body: ...@@ -269,7 +297,6 @@ Response body:
```json ```json
{ {
"status": "succ",
"code": 0, "code": 0,
"desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" "desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"
} }
......
...@@ -176,6 +176,14 @@ The base API is used to do things like create database connections and provide a ...@@ -176,6 +176,14 @@ The base API is used to do things like create database connections and provide a
Set the current default database to `db`. Set the current default database to `db`.
- `int taos_get_current_db(TAOS *taos, char *database, int len, int *required)`
- The variables database and len are applied by the user outside and allocated space. The current database name and length will be assigned to database and len.
- As long as the db name is not assigned to the database normally (including truncation), an error will be returned with the return value of -1, and then the user can use taos_errstr(NULL) to get error message.
- If database==NULL or len<=0, returns an error, the space required to store the db (including the last '\0') in the variable required
- If len is less than the space required to store the db (including the last '\0'), an error is returned. The truncated data assigned in the database ends with '\0'.
- If len is greater than or equal to the space required to store the db (including the last '\0'), return normal 0, and assign the db name ending with '\0' in the database.
- `void taos_close(TAOS *taos)` - `void taos_close(TAOS *taos)`
Closes the connection, where `taos` is the handle returned by `taos_connect()`. Closes the connection, where `taos` is the handle returned by `taos_connect()`.
...@@ -404,5 +412,17 @@ In addition to writing data using the SQL method or the parameter binding API, w ...@@ -404,5 +412,17 @@ In addition to writing data using the SQL method or the parameter binding API, w
Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`. Note that the timestamp resolution parameter only takes effect when the protocol type is `SML_LINE_PROTOCOL`.
For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp. For OpenTSDB's text protocol, timestamp resolution follows its official resolution rules - time precision is confirmed by the number of characters contained in the timestamp.
**Supported Versions** schemaless 其他相关的接口
This feature interface is supported from version 2.3.0.0. - `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl)`
- `TAOS_RES *taos_schemaless_insert_ttl_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl)`
- `TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl, int64_t reqid)`
**Description**
- The above seven interfaces are extension interfaces, which are mainly used to pass ttl and reqid parameters, and can be used as needed.
- Withing _raw interfaces represent data through the passed parameters lines and len. In order to solve the problem that the original interface data contains '\0' and is truncated. The totalRows pointer returns the number of parsed data rows.
- Withing _ttl interfaces can pass the ttl parameter to control the ttl expiration time of the table.
- Withing _reqid interfaces can track the entire call chain by passing the reqid parameter.
...@@ -300,7 +300,7 @@ stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int ...@@ -300,7 +300,7 @@ stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int
> **Note**: If you do not use `use db` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as db.tb. > **Note**: If you do not use `use db` to specify the database, all subsequent operations on the table need to add the database name as a prefix, such as db.tb.
### 插入数据 ### Insert data
```java ```java
// insert data // insert data
......
...@@ -39,7 +39,7 @@ The Rust Connector is still under rapid development and is not guaranteed to be ...@@ -39,7 +39,7 @@ The Rust Connector is still under rapid development and is not guaranteed to be
* Install the Rust development toolchain * Install the Rust development toolchain
* If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver) * If using the native connection, please install the TDengine client driver. Please refer to [install client driver](/reference/connector#install-client-driver)
# Add taos dependency ### Add taos dependency
Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows: Depending on the connection method, add the [taos][taos] dependency in your Rust project as follows:
...@@ -282,7 +282,7 @@ In the application code, use `pool.get()? ` to get a connection object [Taos]. ...@@ -282,7 +282,7 @@ In the application code, use `pool.get()? ` to get a connection object [Taos].
let taos = pool.get()?; let taos = pool.get()?;
``` ```
# Connectors ### Connectors
The [Taos][struct.Taos] object provides an API to perform operations on multiple databases. The [Taos][struct.Taos] object provides an API to perform operations on multiple databases.
......
...@@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of ...@@ -32,7 +32,7 @@ We recommend using the latest version of `taospy`, regardless of the version of
### Preparation ### Preparation
1. Install Python. The recent taospy package requires Python 3.6+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it. 1. Install Python. The recent taospy package requires Python 3.6.2+. The earlier versions of taospy require Python 3.7+. The taos-ws-py package requires Python 3.7+. If Python is not available on your system, refer to the [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) to install it.
2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it. 2. Install [pip](https://pypi.org/project/pip/). In most cases, the Python installer comes with the pip utility. If not, please refer to [pip documentation](https://pip.pypa.io/en/stable/installation/) to install it.
If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI. If you use a native connection, you will also need to [Install Client Driver](/reference/connector#Install-Client-Driver). The client install package includes the TDengine client dynamic link library (`libtaos.so` or `taos.dll`) and the TDengine CLI.
...@@ -228,6 +228,16 @@ All arguments to the `connect()` function are optional keyword arguments. The fo ...@@ -228,6 +228,16 @@ All arguments to the `connect()` function are optional keyword arguments. The fo
- `password`: TDengine user password. The default is `taosdata`. - `password`: TDengine user password. The default is `taosdata`.
- `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed. - `timeout`: HTTP request timeout. Enter a value in seconds. The default is `socket._GLOBAL_DEFAULT_TIMEOUT`. Usually, no configuration is needed.
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
```python
{{#include docs/examples/python/connect_websocket_examples.py:connect}}
```
The parameter of `connect()` is the url of TDengine, and the protocol is `taosws` or `ws`.
</TabItem> </TabItem>
</Tabs> </Tabs>
...@@ -298,7 +308,15 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap ...@@ -298,7 +308,15 @@ The `RestClient` class is a direct wrapper for the [REST API](/reference/rest-ap
For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html). For a more detailed description of the `sql()` method, please refer to [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html).
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
- `conn.execute`: can use to execute arbitrary SQL statements, and return the number of rows affected.
- `conn.query`: can use to execute query SQL statements, and return the query results.
</TabItem> </TabItem>
</Tabs> </Tabs>
...@@ -319,6 +337,13 @@ For a more detailed description of the `sql()` method, please refer to [RestClie ...@@ -319,6 +337,13 @@ For a more detailed description of the `sql()` method, please refer to [RestClie
{{#include docs/examples/python/conn_rest_pandas.py}} {{#include docs/examples/python/conn_rest_pandas.py}}
``` ```
</TabItem>
<TabItem value="websocket" label="WebSocket connection">
```python
{{#include docs/examples/python/conn_websocket_pandas.py}}
```
</TabItem> </TabItem>
</Tabs> </Tabs>
......
...@@ -94,7 +94,7 @@ In this scenario, modifying your project file is required in order to copy the W ...@@ -94,7 +94,7 @@ In this scenario, modifying your project file is required in order to copy the W
<ItemGroup> <ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" /> <PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
</ItemGroup> </ItemGroup>
<Target Name="copyDLLDepency" BeforeTargets="BeforeBuild"> <Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
<ItemGroup> <ItemGroup>
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" /> <DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
</ItemGroup> </ItemGroup>
......
...@@ -87,7 +87,7 @@ In this section a few sample programs which use TDengine PHP connector to access ...@@ -87,7 +87,7 @@ In this section a few sample programs which use TDengine PHP connector to access
> Any error would throw exception: `TDengine\Exception\TDengineException` > Any error would throw exception: `TDengine\Exception\TDengineException`
### Establish Conection ### Establish Connection
<details> <details>
<summary>Establish Connection</summary> <summary>Establish Connection</summary>
......
...@@ -11,7 +11,7 @@ import PkgListV3 from "/components/PkgListV3"; ...@@ -11,7 +11,7 @@ import PkgListV3 from "/components/PkgListV3";
The default installation path is C:\TDengine, including the following files (directories). The default installation path is C:\TDengine, including the following files (directories).
- _taos.exe_: TDengine CLI command-line program - _taos.exe_: TDengine CLI command-line program
- _taosadapter.exe_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares - _taosadapter.exe_: server-side executable that provides RESTful services and accepts writing requests from a variety of other software
- _taosBenchmark.exe_: TDengine testing tool - _taosBenchmark.exe_: TDengine testing tool
- _cfg_: configuration file directory - _cfg_: configuration file directory
- _driver_: client driver dynamic link library - _driver_: client driver dynamic link library
......
...@@ -61,7 +61,7 @@ The different database framework specifications for various programming language ...@@ -61,7 +61,7 @@ The different database framework specifications for various programming language
| **Connection Management** | Support | Support | Support | Support | Support | Support | | **Connection Management** | Support | Support | Support | Support | Support | Support |
| **Regular Query** | Support | Support | Support | Support | Support | Support | | **Regular Query** | Support | Support | Support | Support | Support | Support |
| **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support | | **Parameter Binding** | Not Supported | Not Supported | Support | Support | Not Supported | Support |
| **Subscription (TMQ) ** | Not Supported | Support | Support | Not Supported | Not Supported | Support | | **Subscription (TMQ) ** | Supported | Support | Support | Not Supported | Not Supported | Support |
| **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | | **Schemaless** | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported | Not Supported |
| **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support | | **Bulk Pulling (based on WebSocket) ** | Support | Support | Support | Support | Support | Support |
| **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported | | **DataFrame** | Not Supported | Support | Not Supported | Not Supported | Not Supported | Not Supported |
......
...@@ -58,9 +58,9 @@ Usage of taosAdapter: ...@@ -58,9 +58,9 @@ Usage of taosAdapter:
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
-c, --config string config path default /etc/taos/taosadapter.toml -c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
...@@ -68,8 +68,9 @@ Usage of taosAdapter: ...@@ -68,8 +68,9 @@ Usage of taosAdapter:
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit --help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL" --log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
...@@ -80,14 +81,17 @@ Usage of taosAdapter: ...@@ -80,14 +81,17 @@ Usage of taosAdapter:
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB") --log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s) --log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s) --monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY" --monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
--monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP" --monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
--monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) --monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
--monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root") --monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s) --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
...@@ -100,9 +104,9 @@ Usage of taosAdapter: ...@@ -100,9 +104,9 @@ Usage of taosAdapter:
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
...@@ -112,11 +116,11 @@ Usage of taosAdapter: ...@@ -112,11 +116,11 @@ Usage of taosAdapter:
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl) --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s) --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000) --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
...@@ -133,9 +137,9 @@ Usage of taosAdapter: ...@@ -133,9 +137,9 @@ Usage of taosAdapter:
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit --version Print the version and exit
``` ```
...@@ -324,6 +328,10 @@ This parameter controls the number of results returned by the following interfac ...@@ -324,6 +328,10 @@ This parameter controls the number of results returned by the following interfac
- `http://<fqdn>:6041/rest/sql` - `http://<fqdn>:6041/rest/sql`
- `http://<fqdn>:6041/prometheus/v1/remote_read/:db` - `http://<fqdn>:6041/prometheus/v1/remote_read/:db`
## Configure http return code
taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 http status code http status code other than when the C interface returns an error. When set to true, different http status codes will be returned according to the error code returned by C. For details, see [RESTful API](https://docs.tdengine.com/reference/rest-api/) HTTP Response Code chapter.
## Troubleshooting ## Troubleshooting
You can check the taosAdapter running status with the `systemctl status taosadapter` command. You can check the taosAdapter running status with the `systemctl status taosadapter` command.
......
...@@ -208,7 +208,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ...@@ -208,7 +208,10 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
Keep trying if failed to insert, default is no. Available with v3.0.9+. Keep trying if failed to insert, default is no. Available with v3.0.9+.
- **-z/--trying-interval <NUMBER\>** : - **-z/--trying-interval <NUMBER\>** :
Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+. Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- **-v/--vgroups <NUMBER\>** :
Specify vgroups number for creating a database, only valid with daemon version 3.0+
- **-V/--version** : - **-V/--version** :
Show version information only. Users should not use it with other parameters. Show version information only. Users should not use it with other parameters.
...@@ -239,7 +242,15 @@ The parameters listed in this section apply to all function modes. ...@@ -239,7 +242,15 @@ The parameters listed in this section apply to all function modes.
- ** keep_trying ** : Keep trying if failed to insert, default is no. Available with v3.0.9+. - ** keep_trying ** : Keep trying if failed to insert, default is no. Available with v3.0.9+.
- ** trying_interval ** : Specify interval between keep trying insert. Valid value is a postive number. Only valid when keep trying be enabled. Available with v3.0.9+. - ** trying_interval ** : Specify interval between keep trying insert. Valid value is a positive number. Only valid when keep trying be enabled. Available with v3.0.9+.
- ** childtable_from and childtable_to ** : specify the child table range to create. The range is [childtable_from, childtable_to).
 
- ** continue_if_fail ** : allow the user to specify the reaction if the insertion failed.
- "continue_if_fail" : "no" // means taosBenchmark will exit if it fails to insert as default reaction behavior.
- "continue_if_fail" : "yes" // means taosBenchmark will warn the user if it fails to insert but continue to insert the next record.
- "continue_if_fail": "smart" // means taosBenchmark will try to create the non-existent child table if it fails to insert.
#### Database related configuration parameters #### Database related configuration parameters
...@@ -352,7 +363,7 @@ The configuration parameters for specifying super table tag columns and data col ...@@ -352,7 +363,7 @@ The configuration parameters for specifying super table tag columns and data col
- **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value. - **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value.
- **max**: The maximum value of the column/label of the data type. The generated value will less than the maxium value. - **max**: The maximum value of the column/label of the data type. The generated value will less than the maximum value.
- **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values. - **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values.
......
...@@ -1590,7 +1590,7 @@ ...@@ -1590,7 +1590,7 @@
}, },
{ {
"datasource": "${DS_TDENGINE}", "datasource": "${DS_TDENGINE}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -1919,7 +1919,7 @@ ...@@ -1919,7 +1919,7 @@
}, },
{ {
"datasource": "${DS_TDENGINE}", "datasource": "${DS_TDENGINE}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -1977,7 +1977,7 @@ ...@@ -1977,7 +1977,7 @@
}, },
{ {
"datasource": "${DS_TDENGINE}", "datasource": "${DS_TDENGINE}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -2825,7 +2825,7 @@ ...@@ -2825,7 +2825,7 @@
"timeFrom": null, "timeFrom": null,
"timeRegions": [], "timeRegions": [],
"timeShift": null, "timeShift": null,
"title": "Requets Count per Minutes $fqdn", "title": "Requests Count per Minutes $fqdn",
"tooltip": { "tooltip": {
"shared": true, "shared": true,
"sort": 0, "sort": 0,
......
...@@ -1566,7 +1566,7 @@ ...@@ -1566,7 +1566,7 @@
}, },
{ {
"datasource": "${ds}", "datasource": "${ds}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -1933,7 +1933,7 @@ ...@@ -1933,7 +1933,7 @@
}, },
{ {
"datasource": "${ds}", "datasource": "${ds}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -2000,7 +2000,7 @@ ...@@ -2000,7 +2000,7 @@
}, },
{ {
"datasource": "${ds}", "datasource": "${ds}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -2961,7 +2961,7 @@ ...@@ -2961,7 +2961,7 @@
"timeFrom": null, "timeFrom": null,
"timeRegions": [], "timeRegions": [],
"timeShift": null, "timeShift": null,
"title": "Requets Count per Minutes $fqdn", "title": "Requests Count per Minutes $fqdn",
"tooltip": { "tooltip": {
"shared": true, "shared": true,
"sort": 0, "sort": 0,
...@@ -3355,4 +3355,4 @@ ...@@ -3355,4 +3355,4 @@
"title": "TDengine", "title": "TDengine",
"uid": "tdengine", "uid": "tdengine",
"version": 8 "version": 8
} }
\ No newline at end of file
...@@ -186,7 +186,7 @@ ...@@ -186,7 +186,7 @@
}, },
{ {
"datasource": "TDengine", "datasource": "TDengine",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"gridPos": { "gridPos": {
"h": 6, "h": 6,
"w": 8, "w": 8,
...@@ -253,7 +253,7 @@ ...@@ -253,7 +253,7 @@
], ],
"timeFrom": null, "timeFrom": null,
"timeShift": null, "timeShift": null,
"title": "taosd memery", "title": "taosd memory",
"type": "gauge" "type": "gauge"
}, },
{ {
......
...@@ -29,7 +29,7 @@ taos -C ...@@ -29,7 +29,7 @@ taos -C
taos --dump-config taos --dump-config
``` ```
# Configuration Parameters ## Configuration Parameters
:::note :::note
The parameters described in this document by the effect that they have on the system. The parameters described in this document by the effect that they have on the system.
...@@ -83,7 +83,7 @@ The parameters described in this document by the effect that they have on the sy ...@@ -83,7 +83,7 @@ The parameters described in this document by the effect that they have on the sy
| :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- | | :------- | :----------- | :----------------------------------------------- | :--------------------------------------------------------------------------------------------- |
| TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort | | TCP | 6030 | Communication between client and server. In a multi-node cluster, communication between nodes. serverPort |
| TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) | | TCP | 6041 | REST connection between client and server | Prior to 2.4.0.0: serverPort+11; After 2.4.0.0 refer to [taosAdapter](/reference/taosadapter/) |
| TCP | 6043 | Service Port of TaosKeeper | The parameter of TaosKeeper | | TCP | 6043 | Service Port of taosKeeper | The parameter of taosKeeper |
| TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters. | TCP | 6044 | Data access port for StatsD | Configurable through taosAdapter parameters.
| UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters. | UDP | 6045 | Data access for statsd | Configurable through taosAdapter parameters.
| TCP | 6060 | Port of Monitoring Service in Enterprise version | | | TCP | 6060 | Port of Monitoring Service in Enterprise version | |
...@@ -599,7 +599,7 @@ The charset that takes effect is UTF-8. ...@@ -599,7 +599,7 @@ The charset that takes effect is UTF-8.
| Applicable | Client only | | Applicable | Client only |
| Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0| | Meaning | Whether schemaless columns are consistently ordered, depat, discarded since 3.0.3.0|
| Value Range | 0: not consistent; 1: consistent. | | Value Range | 0: not consistent; 1: consistent. |
| Default | 1 | | Default | 0 |
## Compress Parameters ## Compress Parameters
......
...@@ -24,7 +24,7 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d ...@@ -24,7 +24,7 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d
- _taosdump_: data import and export tool - _taosdump_: data import and export tool
- _taosBenchmark_: TDengine testing tool - _taosBenchmark_: TDengine testing tool
- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos` - _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos`
- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares - _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other software
- _TDinsight.sh_: script to download TDinsight and install it - _TDinsight.sh_: script to download TDinsight and install it
- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging - _set_core.sh_: script for setting up the system to generate core dump files for easy debugging
- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. - _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution.
......
...@@ -84,7 +84,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam ...@@ -84,7 +84,7 @@ You can configure smlChildTableName in taos.cfg to specify table names, for exam
:::tip :::tip
All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed All processing logic of schemaless will still follow TDengine's underlying restrictions on data structures, such as the total length of each row of data cannot exceed
16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area. 48KB, and the total length of tag value cannot exceed 16KB. See [TDengine SQL Boundary Limits](/taos-sql/limit) for specific constraints in this area.
::: :::
...@@ -131,7 +131,7 @@ create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) ta ...@@ -131,7 +131,7 @@ create stable st (_ts timestamp, c1 bigint, c2 bool, c3 binary(6), c4 bigint) ta
This section describes the impact on the schema caused by different data being written. This section describes the impact on the schema caused by different data being written.
If you use line protocol to write to a specific tag field and then later change the field type, a schema error will ocur. This triggers an error on the write API. This is shown as follows: If you use line protocol to write to a specific tag field and then later change the field type, a schema error will occur. This triggers an error on the write API. This is shown as follows:
```json ```json
st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000 st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4 1626006833639000000
......
...@@ -31,7 +31,7 @@ The default database name written by taosAdapter is `statsd`. To specify a diffe ...@@ -31,7 +31,7 @@ The default database name written by taosAdapter is `statsd`. To specify a diffe
### Configuring StatsD ### Configuring StatsD
To use StatsD, you need to download its [source code](https://github.com/statsd/statsd). Please refer to the example file `exampleConfig.js` in the root directory of the source download to modify the configuration file. In <taosAdpater's host\>, please fill in the domain name or IP address of the server running taosAdapter, and <port for StatsD\>, please fill in the port where taosAdapter receives StatsD data (default is 6044). To use StatsD, you need to download its [source code](https://github.com/statsd/statsd). Please refer to the example file `exampleConfig.js` in the root directory of the source download to modify the configuration file. In <taosAdapter's host\>, please fill in the domain name or IP address of the server running taosAdapter, and <port for StatsD\>, please fill in the port where taosAdapter receives StatsD data (default is 6044).
``` ```
backends section add ". /backends/repeater" backends section add ". /backends/repeater"
......
...@@ -77,7 +77,7 @@ Development: false ...@@ -77,7 +77,7 @@ Development: false
### Install from source code ### Install from source code
``` ```
git clone https://github.com/taosdata/kafka-connect-tdengine.git git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine cd kafka-connect-tdengine
mvn clean package mvn clean package
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
......
...@@ -28,4 +28,4 @@ SHOW MNODES; ...@@ -28,4 +28,4 @@ SHOW MNODES;
The end point and role/status (leader, follower, candidate, offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. The end point and role/status (leader, follower, candidate, offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work.
From TDengine 3.0.0, RAFT procotol is used to guarantee the high availability, so the number of mnodes is should be 1 or 3. From TDengine 3.0.0, RAFT protocol is used to guarantee the high availability, so the number of mnodes is should be 1 or 3.
...@@ -14,8 +14,8 @@ create database db0 vgroups 100; ...@@ -14,8 +14,8 @@ create database db0 vgroups 100;
The proper value of `vgroups` depends on available system resources. Assuming there is only one database to be created in the system, then the number of `vgroups` is determined by the available resources from all dnodes. In principle more vgroups can be created if you have more CPU and memory. Disk I/O is another important factor to consider. Once the bottleneck shows on disk I/O, more vgroups may downgrad the system performance significantly. If multiple databases are to be created in the system, then the total number of `vroups` of all the databases are dependent on the available system resources. It needs to be careful to distribute vgroups among these databases, you need to consider the number of tables, data writing frequency, size of each data row for all these databases. A recommended practice is to firstly choose a starting number for `vgroups`, for example double of the number of CPU cores, then try to adjust and optimize system configurations to find the best setting for `vgroups`, then distribute these vgroups among databases. The proper value of `vgroups` depends on available system resources. Assuming there is only one database to be created in the system, then the number of `vgroups` is determined by the available resources from all dnodes. In principle more vgroups can be created if you have more CPU and memory. Disk I/O is another important factor to consider. Once the bottleneck shows on disk I/O, more vgroups may downgrad the system performance significantly. If multiple databases are to be created in the system, then the total number of `vroups` of all the databases are dependent on the available system resources. It needs to be careful to distribute vgroups among these databases, you need to consider the number of tables, data writing frequency, size of each data row for all these databases. A recommended practice is to firstly choose a starting number for `vgroups`, for example double of the number of CPU cores, then try to adjust and optimize system configurations to find the best setting for `vgroups`, then distribute these vgroups among databases.
Furthermode, TDengine distributes the vgroups of each database equally among all dnodes. In case of replica 3, the distrubtion is even more complex, TDengine tries its best to prevent any dnode from becoming a bottleneck. Furthermode, TDengine distributes the vgroups of each database equally among all dnodes. In case of replica 3, the distribution is even more complex, TDengine tries its best to prevent any dnode from becoming a bottleneck.
TDegnine utilizes the above ways to achieve load balance in a cluster, and finally achieve higher throughput. TDegnine utilizes the above ways to achieve load balance in a cluster, and finally achieve higher throughput.
Once the load balance is achieved, after some operations like deleting tables or droping databases, the load across all dnodes may become inbalanced, the method of rebalance will be provided in later versions. However, even without explicit rebalancing, TDengine will try its best to achieve new balance without manual interfering when a new database is created. Once the load balance is achieved, after some operations like deleting tables or dropping databases, the load across all dnodes may become imbalanced, the method of rebalance will be provided in later versions. However, even without explicit rebalancing, TDengine will try its best to achieve new balance without manual interfering when a new database is created.
\ No newline at end of file
...@@ -67,7 +67,7 @@ sudo systemctl start telegraf ...@@ -67,7 +67,7 @@ sudo systemctl start telegraf
Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`. Log in to the Grafana interface using a web browser at `IP:3000`, with the system's initial username and password being `admin/admin`.
Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon. Click on the gear icon on the left and select `Plugins`, you should find the TDengine data source plugin icon.
Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json`, download the dashboard JSON file and import it. You will then see the dashboard in the following screen. Click on the plus icon on the left and select `Import` to get the data from `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v3.json` (for TDengine 3.0. for TDengine 2.x, please use `telegraf-dashboard-v2.json`), download the dashboard JSON file and import it. You will then see the dashboard in the following screen.
![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp) ![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp)
......
...@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w ...@@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://w
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.3.0
<Release type="tdengine" version="3.0.3.0" />
## 3.0.2.6 ## 3.0.2.6
<Release type="tdengine" version="3.0.2.6" /> <Release type="tdengine" version="3.0.2.6" />
......
...@@ -10,6 +10,14 @@ For other historical version installers, please visit [here](https://www.taosdat ...@@ -10,6 +10,14 @@ For other historical version installers, please visit [here](https://www.taosdat
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.4.9
<Release type="tools" version="2.4.9" />
## 2.4.8
<Release type="tools" version="2.4.8" />
## 2.4.6 ## 2.4.6
<Release type="tools" version="2.4.6" /> <Release type="tools" version="2.4.6" />
......
...@@ -36,28 +36,17 @@ public class DataBaseMonitor { ...@@ -36,28 +36,17 @@ public class DataBaseMonitor {
stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)"); stmt.execute("CREATE STABLE test.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)");
} }
public Long count() throws SQLException { public long count() throws SQLException {
if (!stmt.isClosed()) { try (ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters")) {
ResultSet result = stmt.executeQuery("SELECT count(*) from test.meters");
result.next(); result.next();
return result.getLong(1); return result.getLong(1);
} }
return null;
} }
/** public long getTableCount() throws SQLException {
* show test.stables; try (ResultSet result = stmt.executeQuery("select count(*) from information_schema.ins_tables where db_name = 'test';")) {
*
* name | created_time | columns | tags | tables |
* ============================================================================================
* meters | 2022-07-20 08:39:30.902 | 4 | 2 | 620000 |
*/
public Long getTableCount() throws SQLException {
if (!stmt.isClosed()) {
ResultSet result = stmt.executeQuery("show test.stables");
result.next(); result.next();
return result.getLong(5); return result.getLong(1);
} }
return null;
} }
} }
\ No newline at end of file
...@@ -42,7 +42,7 @@ public class SQLWriter { ...@@ -42,7 +42,7 @@ public class SQLWriter {
/** /**
* Maximum SQL length. * Maximum SQL length.
*/ */
private int maxSQLLength; private int maxSQLLength = 800_000;
/** /**
* Map from table name to column values. For example: * Map from table name to column values. For example:
...@@ -81,14 +81,6 @@ public class SQLWriter { ...@@ -81,14 +81,6 @@ public class SQLWriter {
conn = getConnection(); conn = getConnection();
stmt = conn.createStatement(); stmt = conn.createStatement();
stmt.execute("use test"); stmt.execute("use test");
ResultSet rs = stmt.executeQuery("show variables");
while (rs.next()) {
String configName = rs.getString(1);
if ("maxSQLLength".equals(configName)) {
maxSQLLength = Integer.parseInt(rs.getString(2));
logger.info("maxSQLLength={}", maxSQLLength);
}
}
} }
/** /**
...@@ -149,7 +141,7 @@ public class SQLWriter { ...@@ -149,7 +141,7 @@ public class SQLWriter {
} catch (SQLException e) { } catch (SQLException e) {
// convert to error code defined in taoserror.h // convert to error code defined in taoserror.h
int errorCode = e.getErrorCode() & 0xffff; int errorCode = e.getErrorCode() & 0xffff;
if (errorCode == 0x362 || errorCode == 0x218) { if (errorCode == 0x2603) {
// Table does not exist // Table does not exist
createTables(); createTables();
executeSQL(sql); executeSQL(sql);
......
import pandas
from sqlalchemy import create_engine, text
import taos
taos_conn = taos.connect()
taos_conn.execute('drop database if exists power')
taos_conn.execute('create database if not exists power')
taos_conn.execute("use power")
taos_conn.execute(
"CREATE STABLE power.meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)")
# insert data
taos_conn.execute("""INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2)
VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000)
('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000)
power.d1002 USING power.meters TAGS('California.SanFrancisco', 3)
VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000)
power.d1003 USING power.meters TAGS('California.LosAngeles', 2)
VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000)
power.d1004 USING power.meters TAGS('California.LosAngeles', 3)
VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)""")
engine = create_engine("taosws://root:taosdata@localhost:6041")
conn = engine.connect()
df: pandas.DataFrame = pandas.read_sql(text("SELECT * FROM power.meters"), conn)
conn.close()
# print index
print(df.index)
# print data type of element in ts column
print(type(df.ts[0]))
print(df.head(3))
# output:
# RangeIndex(start=0, stop=8, step=1)
# <class 'pandas._libs.tslibs.timestamps.Timestamp'>
# ts current ... location groupid
# 0 2018-10-03 14:38:05.000 10.3 ... California.SanFrancisco 2
# 1 2018-10-03 14:38:15.000 12.6 ... California.SanFrancisco 2
# 2 2018-10-03 14:38:16.800 12.3 ... California.SanFrancisco 2
# ANCHOR: connect
import taosws
conn = taosws.connect("taosws://root:taosdata@localhost:6041")
# ANCHOR_END: connect
# ANCHOR: basic
conn.execute("drop database if exists connwspy")
conn.execute("create database if not exists connwspy")
conn.execute("use connwspy")
conn.execute("create table if not exists stb (ts timestamp, c1 int) tags (t1 int)")
conn.execute("create table if not exists tb1 using stb tags (1)")
conn.execute("insert into tb1 values (now, 1)")
conn.execute("insert into tb1 values (now, 2)")
conn.execute("insert into tb1 values (now, 3)")
r = conn.execute("select * from stb")
result = conn.query("select * from stb")
num_of_fields = result.field_count
print(num_of_fields)
for row in result:
print(row)
# output:
# 3
# ('2023-02-28 15:56:13.329 +08:00', 1, 1)
# ('2023-02-28 15:56:13.333 +08:00', 2, 1)
# ('2023-02-28 15:56:13.337 +08:00', 3, 1)
...@@ -149,7 +149,7 @@ TDengine 建议用数据采集点的名字(如上表中的 d1001)来做表 ...@@ -149,7 +149,7 @@ TDengine 建议用数据采集点的名字(如上表中的 d1001)来做表
3. 子表一定属于一张超级表,但普通表不属于任何超级表 3. 子表一定属于一张超级表,但普通表不属于任何超级表
4. 普通表无法转为子表,子表也无法转为普通表。 4. 普通表无法转为子表,子表也无法转为普通表。
超级表与基于超级表建立的子表之间的关系表现在: 超级表与基于超级表建立的子表之间的关系表现在:
1. 一张超级表包含有多张子表,这些子表具有相同的采集量 Schema,但带有不同的标签值。 1. 一张超级表包含有多张子表,这些子表具有相同的采集量 Schema,但带有不同的标签值。
2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。 2. 不能通过子表调整数据或标签的模式,对于超级表的数据模式修改立即对所有的子表生效。
......
...@@ -196,20 +196,22 @@ Active: inactive (dead) ...@@ -196,20 +196,22 @@ Active: inactive (dead)
<TabItem label="macOS 系统" value="macos"> <TabItem label="macOS 系统" value="macos">
安装后,在应用程序目录下,双击 TDengine 图标来启动程序,也可以运行 `launchctl start com.tdengine.taosd` 来启动 TDengine 服务进程。 安装后,在应用程序目录下,双击 TDengine 图标来启动程序,也可以运行 `sudo launchctl start com.tdengine.taosd` 来启动 TDengine 服务进程。
如下 `launchctl` 命令可以帮助你管理 TDengine 服务: 如下 `launchctl` 命令用于管理 TDengine 服务:
- 启动服务进程:`launchctl start com.tdengine.taosd` - 启动服务进程:`sudo launchctl start com.tdengine.taosd`
- 停止服务进程:`launchctl stop com.tdengine.taosd` - 停止服务进程:`sudo launchctl stop com.tdengine.taosd`
- 查看服务状态:`launchctl list | grep taosd` - 查看服务状态:`sudo launchctl list | grep taosd`
:::info :::info
- `launchctl` 命令不需要管理员权限,请不要在前面加 `sudo` - `launchctl` 命令管理`com.tdengine.taosd`需要管理员权限,务必在前面加 `sudo` 来增强安全性。
- `launchctl list | grep taosd` 指令返回的第一个内容是程序的 PID,若为 `-` 则说明 TDengine 服务未运行。 - `sudo launchctl list | grep taosd` 指令返回的第一列是 `taosd` 程序的 PID,若为 `-` 则说明 TDengine 服务未运行。
- 故障排查:
- 如果服务异常请查看系统日志 `launchd.log` 或者 `/var/log/taos` 目录下 `taosdlog` 日志获取更多信息。
::: :::
......
...@@ -7,6 +7,7 @@ title: 数据订阅 ...@@ -7,6 +7,7 @@ title: 数据订阅
import Tabs from "@theme/Tabs"; import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem"; import TabItem from "@theme/TabItem";
import Java from "./_sub_java.mdx"; import Java from "./_sub_java.mdx";
import JavaWS from "./_sub_java_ws.mdx";
import Python from "./_sub_python.mdx"; import Python from "./_sub_python.mdx";
import Go from "./_sub_go.mdx"; import Go from "./_sub_go.mdx";
import Rust from "./_sub_rust.mdx"; import Rust from "./_sub_rust.mdx";
...@@ -288,9 +289,9 @@ CREATE TOPIC topic_name AS DATABASE db_name; ...@@ -288,9 +289,9 @@ CREATE TOPIC topic_name AS DATABASE db_name;
| `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | | | `td.connect.port` | integer | 用于创建连接,同 `taos_connect` | |
| `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 | | `group.id` | string | 消费组 ID,同一消费组共享消费进度 | **必填项**。最大长度:192。 |
| `client.id` | string | 客户端 ID | 最大长度:192。 | | `client.id` | string | 客户端 ID | 最大长度:192。 |
| `auto.offset.reset` | enum | 消费组订阅的初始位置 | 可选:`earliest`(default), `latest`, `none` | | `auto.offset.reset` | enum | 消费组订阅的初始位置 | <br />`earliest`: default;从头开始订阅; <br/>`latest`: 仅从最新数据开始订阅; <br/>`none`: 没有提交的 offset 无法订阅 |
| `enable.auto.commit` | boolean | 是否启用消费位点自动提交 | 合法值:`true`, `false`。 | | `enable.auto.commit` | boolean | 是否启用消费位点自动提交 | 合法值:`true`, `false`。 |
| `auto.commit.interval.ms` | integer | 以毫秒为单位的消费记录自动提交消费位点时间间 | 默认 5000 m | | `auto.commit.interval.ms` | integer | 以毫秒为单位的消费记录自动提交消费位点时间间 | 默认 5000 m |
| `enable.heartbeat.background` | boolean | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 默认开启 | | `enable.heartbeat.background` | boolean | 启用后台心跳,启用后即使长时间不 poll 消息也不会造成离线 | 默认开启 |
| `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据 | 实验功能,默认关闭 | | `experimental.snapshot.enable` | boolean | 是否允许从 TSDB 消费数据 | 实验功能,默认关闭 |
| `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) | | | `msg.with.table.name` | boolean | 是否允许从消息中解析表名, 不适用于列订阅(列订阅时可将 tbname 作为列写入 subquery 语句) | |
...@@ -807,7 +808,14 @@ SHOW SUBSCRIPTIONS; ...@@ -807,7 +808,14 @@ SHOW SUBSCRIPTIONS;
</TabItem> </TabItem>
<TabItem label="Java" value="java"> <TabItem label="Java" value="java">
<Java /> <Tabs defaultValue="native">
<TabItem value="native" label="本地连接">
<Java />
</TabItem>
<TabItem value="ws" label="WebSocket 连接">
<JavaWS />
</TabItem>
</Tabs>
</TabItem> </TabItem>
<TabItem label="Go" value="Go"> <TabItem label="Go" value="Go">
......
...@@ -65,11 +65,11 @@ int32_t aggfn_init() { ...@@ -65,11 +65,11 @@ int32_t aggfn_init() {
} }
// aggregate start function. The intermediate value or the state(@interBuf) is initialized in this function. The function name shall be concatenation of udf name and _start suffix // aggregate start function. The intermediate value or the state(@interBuf) is initialized in this function. The function name shall be concatenation of udf name and _start suffix
// @param interbuf intermediate value to intialize // @param interbuf intermediate value to initialize
// @return error number defined in taoserror.h // @return error number defined in taoserror.h
int32_t aggfn_start(SUdfInterBuf* interBuf) { int32_t aggfn_start(SUdfInterBuf* interBuf) {
// initialize intermediate value in interBuf // initialize intermediate value in interBuf
return TSDB_CODE_SUCESS; return TSDB_CODE_SUCCESS;
} }
// aggregate reduce function. This function aggregate old state(@interbuf) and one data bock(inputBlock) and output a new state(@newInterBuf). // aggregate reduce function. This function aggregate old state(@interbuf) and one data bock(inputBlock) and output a new state(@newInterBuf).
......
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
<Tabs defaultValue="native">
<TabItem value="native" label="本地连接">
```java ```java
{{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}} {{#include docs/examples/java/src/main/java/com/taos/example/SubscribeDemo.java}}
``` ```
...@@ -12,20 +6,3 @@ import TabItem from '@theme/TabItem'; ...@@ -12,20 +6,3 @@ import TabItem from '@theme/TabItem';
``` ```
```java ```java
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}} {{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
</TabItem>
<TabItem value="ws" label="WebSocket 连接">
```java
{{#include docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
</TabItem>
</Tabs>
```java
{{#include docs/examples/java/src/main/java/com/taos/example/WebsocketSubscribeDemo.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/MetersDeserializer.java}}
```
```java
{{#include docs/examples/java/src/main/java/com/taos/example/Meters.java}}
```
\ No newline at end of file
...@@ -69,7 +69,7 @@ curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \ ...@@ -69,7 +69,7 @@ curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" \
## HTTP 请求格式 ## HTTP 请求格式
```text ```text
http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone] http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
``` ```
参数说明: 参数说明:
...@@ -78,6 +78,7 @@ http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone] ...@@ -78,6 +78,7 @@ http://<fqdn>:<port>/rest/sql/[db_name][?tz=timezone]
- port: 配置文件中 httpPort 配置项,缺省为 6041。 - port: 配置文件中 httpPort 配置项,缺省为 6041。
- db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。 - db_name: 可选参数,指定本次所执行的 SQL 语句的默认数据库库名。
- tz: 可选参数,指定返回时间的时区,遵照 IANA Time Zone 规则,如 `America/New_York`。 - tz: 可选参数,指定返回时间的时区,遵照 IANA Time Zone 规则,如 `America/New_York`。
- req_id: 可选参数,指定请求 id,可以用于 tracing。
例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。 例如:`http://h1.taos.com:6041/rest/sql/test` 是指向地址为 `h1.taos.com:6041` 的 URL,并将默认使用的数据库库名设置为 `test`。
...@@ -100,13 +101,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据 ...@@ -100,13 +101,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据
使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下: 使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下:
```bash ```bash
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone] curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
``` ```
或者, 或者,
```bash ```bash
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone] curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timezone[&req_id=req_id]]
``` ```
其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`。 其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`。
...@@ -115,14 +116,41 @@ curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timez ...@@ -115,14 +116,41 @@ curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name][?tz=timez
### HTTP 响应码 ### HTTP 响应码
| **response code** | **说明** | 从 `TDengine 3.0.3.0` 开始 `taosAdapter` 提供配置参数 `httpCodeServerError` 用来设置当 C 接口返回错误时是否返回非 200 的http状态码
|-------------------|----------------|
| 200 | 正确返回和 C 接口错误返回 | | **说明** | **httpCodeServerError false** | **httpCodeServerError true** |
| 400 | 参数错误返回 | |--------------------|-------------------------------|---------------------------------------|
| 401 | 鉴权失败 | | taos_errno() 返回 0 | 200 | 200 |
| 404 | 接口不存在 | | taos_errno() 返回 非0 | 200(除鉴权错误) | 500 (除鉴权错误和 400/502 错误) |
| 500 | 内部错误 | | 参数错误 | 400 (仅处理 HTTP 请求 URL 参数错误) | 400 (处理 HTTP 请求 URL 参数错误和 taosd 返回错误) |
| 503 | 系统资源不足 | | 鉴权错误 | 401 | 401 |
| 接口不存在 | 404 | 404 |
| 集群不可用错误 | 502 | 502 |
| 系统资源不足 | 503 | 503 |
返回 400 的 C 错误码为:
- TSDB_CODE_TSC_SQL_SYNTAX_ERROR ( 0x0216)
- TSDB_CODE_TSC_LINE_SYNTAX_ERROR (0x021B)
- TSDB_CODE_PAR_SYNTAX_ERROR (0x2600)
- TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE (0x060B)
- TSDB_CODE_TSC_VALUE_OUT_OF_RANGE (0x0224)
- TSDB_CODE_PAR_INVALID_FILL_TIME_RANGE (0x263B)
返回 401 的错误码为:
- TSDB_CODE_MND_USER_ALREADY_EXIST (0x0350)
- TSDB_CODE_MND_USER_NOT_EXIST ( 0x0351)
- TSDB_CODE_MND_INVALID_USER_FORMAT (0x0352)
- TSDB_CODE_MND_INVALID_PASS_FORMAT (0x0353)
- TSDB_CODE_MND_NO_USER_FROM_CONN (0x0354)
- TSDB_CODE_MND_TOO_MANY_USERS (0x0355)
- TSDB_CODE_MND_INVALID_ALTER_OPER (0x0356)
- TSDB_CODE_MND_AUTH_FAILURE (0x0357)
返回 403 的错误码为:
- TSDB_CODE_RPC_SOMENODE_NOT_CONNECTED (0x0020)
### HTTP body 结构 ### HTTP body 结构
...@@ -270,7 +298,6 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata ...@@ -270,7 +298,6 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
```json ```json
{ {
"status": "succ",
"code": 0, "code": 0,
"desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04" "desc": "/KfeAzX/f9na8qdtNZmtONryp201ma04bEl8LcvLUd7a8qdtNZmtONryp201ma04"
} }
......
...@@ -263,6 +263,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) ...@@ -263,6 +263,14 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
- `int taos_select_db(TAOS *taos, const char *db)` - `int taos_select_db(TAOS *taos, const char *db)`
将当前的缺省数据库设置为 `db`。 将当前的缺省数据库设置为 `db`。
- `int taos_get_current_db(TAOS *taos, char *database, int len, int *required)`
- database,len为用户在外面申请的空间,内部会把当前db赋值到database里。
- 只要是没有正常把db名赋值到database中(包括截断),返回错误,返回值为-1,然后用户可以通过 taos_errstr(NULL) 来获取错误提示。
- 如果,database == NULL 或者 len<=0 返回错误,required里保存存储db需要的空间(包含最后的'\0')
- 如果,len 小于 存储db需要的空间(包含最后的'\0'),返回错误,database里赋值截断的数据,以'\0'结尾。
- 如果,len 大于等于 存储db需要的空间(包含最后的'\0'),返回正常0,database里赋值以'\0‘结尾的db名。
- `void taos_close(TAOS *taos)` - `void taos_close(TAOS *taos)`
...@@ -302,7 +310,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) ...@@ -302,7 +310,7 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
- `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)` - `TAOS_FIELD *taos_fetch_fields(TAOS_RES *res)`
获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度),与 `taos_num_fileds()` 配合使用,可用来解析 `taos_fetch_row()` 返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下: 获取查询结果集每列数据的属性(列的名称、列的数据类型、列的长度),与 `taos_num_fields()` 配合使用,可用来解析 `taos_fetch_row()` 返回的一个元组(一行)的数据。 `TAOS_FIELD` 的结构如下:
```c ```c
typedef struct taosField { typedef struct taosField {
...@@ -493,5 +501,17 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 ...@@ -493,5 +501,17 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多
需要注意的是,时间戳分辨率参数只在协议类型为 `SML_LINE_PROTOCOL` 的时候生效。 需要注意的是,时间戳分辨率参数只在协议类型为 `SML_LINE_PROTOCOL` 的时候生效。
对于 OpenTSDB 的文本协议,时间戳的解析遵循其官方解析规则 — 按照时间戳包含的字符的数量来确认时间精度。 对于 OpenTSDB 的文本协议,时间戳的解析遵循其官方解析规则 — 按照时间戳包含的字符的数量来确认时间精度。
**支持版本** **schemaless 其他相关的接口**
该功能接口从 2.3.0.0 版本开始支持。 - `TAOS_RES *taos_schemaless_insert_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_raw(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision)`
- `TAOS_RES *taos_schemaless_insert_raw_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_ttl(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl)`
- `TAOS_RES *taos_schemaless_insert_ttl_with_reqid(TAOS *taos, char *lines[], int numLines, int protocol, int precision, int32_t ttl, int64_t reqid)`
- `TAOS_RES *taos_schemaless_insert_raw_ttl(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl)`
- `TAOS_RES *taos_schemaless_insert_raw_ttl_with_reqid(TAOS *taos, char *lines, int len, int32_t *totalRows, int protocol, int precision, int32_t ttl, int64_t reqid)`
**说明**
- 上面这7个接口是扩展接口,主要用于在schemaless写入时传递ttl、reqid参数,可以根据需要使用。
- 带_raw的接口通过传递的参数lines指针和长度len来表示数据,为了解决原始接口数据包含'\0'而被截断的问题。totalRows指针返回解析出来的数据行数。
- 带_ttl的接口可以传递ttl参数来控制建表的ttl到期时间。
- 带_reqid的接口可以通过传递reqid参数来追踪整个的调用链。
...@@ -32,7 +32,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con ...@@ -32,7 +32,7 @@ Python 连接器的源码托管在 [GitHub](https://github.com/taosdata/taos-con
### 准备 ### 准备
1. 安装 Python。新近版本 taospy 包要求 Python 3.6+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。 1. 安装 Python。新近版本 taospy 包要求 Python 3.6.2+。早期版本 taospy 包要求 Python 3.7+。taos-ws-py 包要求 Python 3.7+。如果系统上还没有 Python 可参考 [Python BeginnersGuide](https://wiki.python.org/moin/BeginnersGuide/Download) 安装。
2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。 2. 安装 [pip](https://pypi.org/project/pip/)。大部分情况下 Python 的安装包都自带了 pip 工具, 如果没有请参考 [pip documentation](https://pip.pypa.io/en/stable/installation/) 安装。
3. 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。 3. 如果使用原生连接,还需[安装客户端驱动](../#安装客户端驱动)。客户端软件包含了 TDengine 客户端动态链接库(libtaos.so 或 taos.dll) 和 TDengine CLI。
...@@ -229,6 +229,16 @@ curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()" ...@@ -229,6 +229,16 @@ curl -u root:taosdata http://<FQDN>:<PORT>/rest/sql -d "select server_version()"
- `password`: TDengine 用户密码。默认是 taosdata。 - `password`: TDengine 用户密码。默认是 taosdata。
- `timeout`: HTTP 请求超时时间。单位为秒。默认为 `socket._GLOBAL_DEFAULT_TIMEOUT`。 一般无需配置。 - `timeout`: HTTP 请求超时时间。单位为秒。默认为 `socket._GLOBAL_DEFAULT_TIMEOUT`。 一般无需配置。
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
```python
{{#include docs/examples/python/connect_websocket_examples.py:connect}}
```
`connect()` 函数参数为连接 url,协议为 `taosws` 或 `ws`
</TabItem> </TabItem>
</Tabs> </Tabs>
...@@ -298,8 +308,15 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ...@@ -298,8 +308,15 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
``` ```
对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。 对于 `sql()` 方法更详细的介绍, 请参考 [RestClient](https://docs.taosdata.com/api/taospy/taosrest/restclient.html)。
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
```python
{{#include docs/examples/python/connect_websocket_examples.py:basic}}
```
- `conn.execute`: 用来执行任意 SQL 语句,返回影响的行数
- `conn.query`: 用来执行查询 SQL 语句,返回查询结果
</TabItem> </TabItem>
</Tabs> </Tabs>
...@@ -320,6 +337,13 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ...@@ -320,6 +337,13 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
{{#include docs/examples/python/conn_rest_pandas.py}} {{#include docs/examples/python/conn_rest_pandas.py}}
``` ```
</TabItem>
<TabItem value="websocket" label="WebSocket 连接">
```python
{{#include docs/examples/python/conn_websocket_pandas.py}}
```
</TabItem> </TabItem>
</Tabs> </Tabs>
...@@ -335,15 +359,17 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ...@@ -335,15 +359,17 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
```python ```python
{{#include docs/examples/python/tmq_example.py}} {{#include docs/examples/python/tmq_example.py}}
``` ```
</TabItem> </TabItem>
<TabItem value="rest" label="websocket 连接"> <TabItem value="websocket" label="WebSocket 连接">
除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据。 除了原生的连接方式,Python 连接器还支持通过 websocket 订阅 TMQ 数据。
```python ```python
{{#include docs/examples/python/tmq_websocket_example.py}} {{#include docs/examples/python/tmq_websocket_example.py}}
``` ```
</TabItem> </TabItem>
</Tabs> </Tabs>
...@@ -366,7 +392,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 ...@@ -366,7 +392,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线
```python ```python
{{#include docs/examples/python/handle_exception.py}} {{#include docs/examples/python/handle_exception.py}}
``` ```
``
### 关于纳秒 (nanosecond) ### 关于纳秒 (nanosecond)
由于目前 Python 对 nanosecond 支持的不完善(见下面的链接),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,Python 连接器可能会修改相关接口。 由于目前 Python 对 nanosecond 支持的不完善(见下面的链接),目前的实现方式是在 nanosecond 精度时返回整数,而不是 ms 和 us 返回的 datetime 类型,应用开发者需要自行处理,建议使用 pandas 的 to_datetime()。未来如果 Python 正式完整支持了纳秒,Python 连接器可能会修改相关接口。
......
...@@ -96,7 +96,7 @@ dotnet add package TDengine.Connector ...@@ -96,7 +96,7 @@ dotnet add package TDengine.Connector
<ItemGroup> <ItemGroup>
<PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" /> <PackageReference Include="TDengine.Connector" Version="3.0.*" GeneratePathProperty="true" />
</ItemGroup> </ItemGroup>
<Target Name="copyDLLDepency" BeforeTargets="BeforeBuild"> <Target Name="copyDLLDependency" BeforeTargets="BeforeBuild">
<ItemGroup> <ItemGroup>
<DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" /> <DepDLLFiles Include="$(PkgTDengine_Connector)\runtimes\**\*.*" />
</ItemGroup> </ItemGroup>
......
...@@ -60,7 +60,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器 ...@@ -60,7 +60,7 @@ TDengine 版本更新往往会增加新的功能特性,列表中的连接器
| **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **连接管理** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **普通查询** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 | | **参数绑定** | 暂不支持 | 暂不支持 | 支持 | 支持 | 暂不支持 | 支持 |
| **数据订阅(TMQ)** | 暂不支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 | | **数据订阅(TMQ)** | 支持 | 支持 | 支持 | 暂不支持 | 暂不支持 | 支持 |
| **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | | **Schemaless** | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 | 暂不支持 |
| **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 | | **批量拉取(基于 WebSocket)** | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 | | **DataFrame** | 不支持 | 支持 | 不支持 | 不支持 | 不支持 | 不支持 |
......
...@@ -102,7 +102,7 @@ spec: ...@@ -102,7 +102,7 @@ spec:
# Must set if you want a cluster. # Must set if you want a cluster.
- name: TAOS_FIRST_EP - name: TAOS_FIRST_EP
value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)" value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
# TAOS_FQND should always be setted in k8s env. # TAOS_FQND should always be set in k8s env.
- name: TAOS_FQDN - name: TAOS_FQDN
value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local" value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
volumeMounts: volumeMounts:
......
...@@ -75,10 +75,10 @@ database_option: { ...@@ -75,10 +75,10 @@ database_option: {
- TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。 - TABLE_PREFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的前缀的长度。
- TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。 - TABLE_SUFFIX:内部存储引擎根据表名分配存储该表数据的 VNODE 时要忽略的后缀的长度。
- TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。 - TSDB_PAGESIZE:一个 VNODE 中时序数据存储引擎的页大小,单位为 KB,默认为 4 KB。范围为 1 到 16384,即 1 KB到 16 MB。
- WAL_RETENTION_PERIOD:wal 文件的额外保留策略,用于数据订阅。wal 的保存时长,单位为 s。单副本默认为 0,即落盘后立即删除。-1 表示不删除。多副本默认为 4 天 - WAL_RETENTION_PERIOD:数据订阅已消费WAL日志,WAL文件的最大额外保留的时长策略。单位为 s。默认为 0,表示无需额外保留。-1, 表示额外保留,时间无上限
- WAL_RETENTION_SIZE:wal 文件的额外保留策略,用于数据订阅。wal 的保存的最大上限,单位为 KB。单副本默认为 0,即落盘后立即删除。多副本默认为-1,表示不删除 - WAL_RETENTION_SIZE:数据订阅已消费WAL日志,WAL文件的最大额外保留的累计大小策略。单位为 KB。默认为 0,表示无需额外保留。-1, 表示额外保留,累计大小无上限
- WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当 wal 文件创建并写入后,经过该时间,会自动创建一个新的 wal 文件。单副本默认为 0,即仅在落盘时创建新文件。多副本默认为 1 天 - WAL_ROLL_PERIOD:wal 文件切换时长,单位为 s。当WAL文件创建并写入后,经过该时间,会自动创建一个新的WAL文件。默认为 0,即仅在TSDB落盘时创建新文件
- WAL_SEGMENT_SIZE:wal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的 wal 文件。默认为 0,即仅在落盘时创建新文件。 - WAL_SEGMENT_SIZE:wal 单个文件大小,单位为 KB。当前写入文件大小超过上限后会自动创建一个新的WAL文件。默认为 0,即仅在TSDB落盘时创建新文件。
### 创建数据库示例 ### 创建数据库示例
......
---
sidebar_label: 标签索引
title: 标签索引
description: 使用标签索引提升查询性能
---
## 简介
在 TDengine 3.0.3.0 版本之前(不含),默认在第一列 TAG 上建立索引,但不支持给其它列动态添加索引。从 3.0.3.0 版本开始,可以动态地为其它 TAG 列添加索引。对于第一个 TAG 列上自动建立的索引,其在查询中默认生效,且用户无法对其进行任何干预。适当地使用索引能够有效地提升查询性能。
## 语法
创建索引的语法如下
```sql
CREATE INDEX index_name ON tbl_name (tagColName
```
其中 `index_name` 为索引名称, `tbl_name` 为超级表名称,`tagColName` 为要在其上建立索引的 tag 列的名称。`tagColName` 的类型不受限制,即任何类型的 tag 列都可以建立索引。
删除索引的语法如下
```sql
DROP INDEX index_name
```
其中 `index_name` 为已经建立的某个索引的名称,如果该索引不存在则该命令执行失败,但不会对系统产生任何其它影响。
查看系统中已经存在的索引
```sql
SELECT * FROM information_schema.INS_INDEXES
```
也可以为上面的查询语句加上过滤条件以缩小查询范围。
## 使用说明
1. 索引使用得当能够提升数据过滤的效率,目前支持的过滤算子有 `=`, `>`, `>=`, `<`, `<=`。如果查询过滤条件中使用了这些算子,则索引能够明显提升查询效率。但如果查询过滤条件中使用的是其它算子,则索引起不到作用,查询效率没有变化。未来会逐步添加更多的算子。
2. 针对一个 tag 列只能建立一个索引,如果重复创建索引则会报错。
3. 每次只能针对一个 tag 列建立一个索引,不能同时对多个 tag 建立索引。
4. 整个系统中不管是哪种类型的索引,其名称必须唯一。
5. 对索引个数没有限制,但每增加一个索引都会导致系统中的元数据增加,过多的索引会降低元数据存取的效率从而降低整个系统的性能。所以请尽量避免添加不必要的索引。
6. 不支持对普通和子表建立索引。
7. 如果某个 tag 列的唯一值较少时,不建议对其建立索引,这种情况下收效甚微。
\ No newline at end of file
...@@ -31,15 +31,17 @@ select max(current) from meters partition by location interval(10m) ...@@ -31,15 +31,17 @@ select max(current) from meters partition by location interval(10m)
## 窗口切分查询 ## 窗口切分查询
TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)三种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。窗口切分查询语法如下: TDengine 支持按时间窗口切分方式进行聚合结果查询,比如温度传感器每秒采集一次数据,但需查询每隔 10 分钟的温度平均值。这种场景下可以使用窗口子句来获得需要的查询结果。窗口子句用于针对查询的数据集合按照窗口切分成为查询子集并进行聚合,窗口包含时间窗口(time window)、状态窗口(status window)、会话窗口(session window)、条件窗口(event window)四种窗口。其中时间窗口又可划分为滑动时间窗口和翻转时间窗口。
窗口子句语法如下:
```sql ```sql
SELECT select_list FROM tb_name window_clause: {
[WHERE where_condition] SESSION(ts_col, tol_val)
[SESSION(ts_col, tol_val)] | STATE_WINDOW(col)
[STATE_WINDOW(col)] | INTERVAL(interval_val [, interval_offset]) [SLIDING (sliding_val)] [FILL(fill_mod_and_val)]
[INTERVAL(interval [, offset]) [SLIDING sliding]] | EVENT_WINDOW START WITH start_trigger_condition END WITH end_trigger_condition
[FILL({NONE | VALUE | PREV | NULL | LINEAR | NEXT})] }
``` ```
在上述语法中的具体限制如下 在上述语法中的具体限制如下
...@@ -138,6 +140,24 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE ...@@ -138,6 +140,24 @@ SELECT tbname, _wstart, CASE WHEN voltage >= 205 and voltage <= 235 THEN 1 ELSE
SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val); SELECT COUNT(*), FIRST(ts) FROM temp_tb_1 SESSION(ts, tol_val);
``` ```
### 事件窗口
事件窗口根据开始条件和结束条件来划定窗口,当start_trigger_condition满足时则窗口开始,直到end_trigger_condition满足时窗口关闭。start_trigger_condition和end_trigger_condition可以是任意 TDengine 支持的条件表达式,且可以包含不同的列。
事件窗口可以仅包含一条数据。即当一条数据同时满足start_trigger_condition和end_trigger_condition,且当前不在一个窗口内时,这条数据自己构成了一个窗口。
事件窗口无法关闭时,不构成一个窗口,不会被输出。即有数据满足start_trigger_condition,此时窗口打开,但后续数据都不能满足end_trigger_condition,这个窗口无法被关闭,这部分数据不够成一个窗口,不会被输出。
如果直接在超级表上进行事件窗口查询,TDengine 会将超级表的数据汇总成一条时间线,然后进行事件窗口的计算。
如果需要对子查询的结果集进行事件窗口查询,那么子查询的结果集需要满足按时间线输出的要求,且可以输出有效的时间戳列。
以下面的 SQL 语句为例,事件窗口切分如图所示:
```sql
select _wstart, _wend, count(*) from t start with c1 > 0 end with c2 < 10
```
![TDengine Database 事件窗口示意图](./event_window.webp)
### 时间戳伪列 ### 时间戳伪列
窗口聚合查询结果中,如果 SQL 语句中没有指定输出查询结果中的时间戳列,那么最终结果中不会自动包含窗口的时间列信息。如果需要在结果中输出聚合结果所对应的时间窗口信息,需要在 SELECT 子句中使用时间戳相关的伪列: 时间窗口起始时间 (\_WSTART), 时间窗口结束时间 (\_WEND), 时间窗口持续时间 (\_WDURATION), 以及查询整体窗口相关的伪列: 查询窗口起始时间(\_QSTART) 和查询窗口结束时间(\_QEND)。需要注意的是时间窗口起始时间和结束时间均是闭区间,时间窗口持续时间是数据当前时间分辨率下的数值。例如,如果当前数据库的时间分辨率是毫秒,那么结果中 500 就表示当前时间窗口的持续时间是 500毫秒 (500 ms)。 窗口聚合查询结果中,如果 SQL 语句中没有指定输出查询结果中的时间戳列,那么最终结果中不会自动包含窗口的时间列信息。如果需要在结果中输出聚合结果所对应的时间窗口信息,需要在 SELECT 子句中使用时间戳相关的伪列: 时间窗口起始时间 (\_WSTART), 时间窗口结束时间 (\_WEND), 时间窗口持续时间 (\_WDURATION), 以及查询整体窗口相关的伪列: 查询窗口起始时间(\_QSTART) 和查询窗口结束时间(\_QEND)。需要注意的是时间窗口起始时间和结束时间均是闭区间,时间窗口持续时间是数据当前时间分辨率下的数值。例如,如果当前数据库的时间分辨率是毫秒,那么结果中 500 就表示当前时间窗口的持续时间是 500毫秒 (500 ms)。
......
...@@ -166,7 +166,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 ...@@ -166,7 +166,7 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| 7 | vgroup_id | INT | vgroup id | | 7 | vgroup_id | INT | vgroup id |
| 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 8 | ttl | INT | 表的生命周期。需要注意,`ttl` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | table_comment | BINARY(1024) | 表注释 | | 9 | table_comment | BINARY(1024) | 表注释 |
| 10 | type | BINARY(20) | 表类型 | | 10 | type | BINARY(21) | 表类型 |
## INS_TAGS ## INS_TAGS
...@@ -179,6 +179,20 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 ...@@ -179,6 +179,20 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| 5 | tag_type | BINARY(64) | tag 的类型 | | 5 | tag_type | BINARY(64) | tag 的类型 |
| 6 | tag_value | BINARY(16384) | tag 的值 | | 6 | tag_value | BINARY(16384) | tag 的值 |
## INS_COLUMNS
| # | **列名** | **数据类型** | **说明** |
| --- | :---------: | ------------- | ---------------------- |
| 1 | table_name | BINARY(192) | 表名 |
| 2 | db_name | BINARY(64) | 该表所在的数据库的名称 |
| 3 | table_type | BINARY(21) | 表类型 |
| 4 | col_name | BINARY(64) | 列 的名称 |
| 5 | col_type | BINARY(32) | 列 的类型 |
| 6 | col_length | INT | 列 的长度 |
| 7 | col_precision | INT | 列 的精度 |
| 8 | col_scale | INT | 列 的比例 |
| 9 | col_nullable | INT | 列 是否可以为空 |
## INS_USERS ## INS_USERS
提供系统中创建的用户的相关信息。 提供系统中创建的用户的相关信息。
...@@ -274,9 +288,9 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数 ...@@ -274,9 +288,9 @@ TDengine 内置了一个名为 `INFORMATION_SCHEMA` 的数据库,提供对数
| 1 | stream_name | BINARY(64) | 流计算名称 | | 1 | stream_name | BINARY(64) | 流计算名称 |
| 2 | create_time | TIMESTAMP | 创建时间 | | 2 | create_time | TIMESTAMP | 创建时间 |
| 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 | | 3 | sql | BINARY(1024) | 创建流计算时提供的 SQL 语句 |
| 4 | status | BIANRY(20) | 流当前状态 | | 4 | status | BINARY(20) | 流当前状态 |
| 5 | source_db | BINARY(64) | 源数据库 | | 5 | source_db | BINARY(64) | 源数据库 |
| 6 | target_db | BIANRY(64) | 目的数据库 | | 6 | target_db | BINARY(64) | 目的数据库 |
| 7 | target_table | BINARY(192) | 流计算写入的目标表 | | 7 | target_table | BINARY(192) | 流计算写入的目标表 |
| 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 8 | watermark | BIGINT | watermark,详见 SQL 手册流式计算。需要注意,`watermark` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
| 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 | | 9 | trigger | INT | 计算结果推送模式,详见 SQL 手册流式计算。需要注意,`trigger` 为 TDengine 关键字,作为列名使用时需要使用 ` 进行转义。 |
...@@ -86,10 +86,10 @@ SHOW FUNCTIONS; ...@@ -86,10 +86,10 @@ SHOW FUNCTIONS;
显示用户定义的自定义函数。 显示用户定义的自定义函数。
## SHOW LICENSE ## SHOW LICENCES
```sql ```sql
SHOW LICENSE; SHOW LICENCES;
SHOW GRANTS; SHOW GRANTS;
``` ```
...@@ -204,7 +204,7 @@ Compression_Ratio: 数据压缩率 23.98% ...@@ -204,7 +204,7 @@ Compression_Ratio: 数据压缩率 23.98%
_block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000] _block_dist: Total_Rows=[20000] Inmem_Rows=[0] MinRows=[3616] MaxRows=[4096] Average_Rows=[4000]
Total_Rows: 统计表 d0 的所有行数 为20000 行(该数值仅供参考,不是精确的行数。获得精确的行数需要使用 count 函数) Total_Rows: 统计表 d0 的存储在磁盘上行数 20000 行(该数值仅供参考,不是精确的行数。获得精确的行数需要使用 count 函数)
Inmem_Rows: 存储在写缓存中的数据行数(没有落盘),0 行表示内存缓存中没有数据 Inmem_Rows: 存储在写缓存中的数据行数(没有落盘),0 行表示内存缓存中没有数据
...@@ -303,7 +303,7 @@ SHOW DNODE dnode_id VARIABLES; ...@@ -303,7 +303,7 @@ SHOW DNODE dnode_id VARIABLES;
SHOW [db_name.]VGROUPS; SHOW [db_name.]VGROUPS;
``` ```
显示当前系统中所有 VGROUP 或某个 db 的 VGROUPS 的信息。 显示当前数据库中所有 VGROUP 的信息。
## SHOW VNODES ## SHOW VNODES
......
...@@ -41,7 +41,7 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ ...@@ -41,7 +41,7 @@ CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [
```sql ```sql
CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8; CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8;
``` ```
关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf) 关于如何开发自定义函数,请参考 [UDF使用说明](/develop/udf)
## 管理 UDF ## 管理 UDF
......
...@@ -58,9 +58,9 @@ Usage of taosAdapter: ...@@ -58,9 +58,9 @@ Usage of taosAdapter:
--collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true) --collectd.enable enable collectd. Env "TAOS_ADAPTER_COLLECTD_ENABLE" (default true)
--collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata") --collectd.password string collectd password. Env "TAOS_ADAPTER_COLLECTD_PASSWORD" (default "taosdata")
--collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045) --collectd.port int collectd server port. Env "TAOS_ADAPTER_COLLECTD_PORT" (default 6045)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL"
--collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root") --collectd.user string collectd user. Env "TAOS_ADAPTER_COLLECTD_USER" (default "root")
--collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10) --collectd.worker int collectd write worker. Env "TAOS_ADAPTER_COLLECTD_WORKER" (default 10)
--collectd.ttl int collectd data ttl. Env "TAOS_ADAPTER_COLLECTD_TTL" (default 0, means no ttl)
-c, --config string config path default /etc/taos/taosadapter.toml -c, --config string config path default /etc/taos/taosadapter.toml
--cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true) --cors.allowAllOrigins cors allow all origins. Env "TAOS_ADAPTER_CORS_ALLOW_ALL_ORIGINS" (default true)
--cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials" --cors.allowCredentials cors allow credentials. Env "TAOS_ADAPTER_CORS_ALLOW_Credentials"
...@@ -68,8 +68,9 @@ Usage of taosAdapter: ...@@ -68,8 +68,9 @@ Usage of taosAdapter:
--cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS" --cors.allowOrigins stringArray cors allow origins. Env "TAOS_ADAPTER_ALLOW_ORIGINS"
--cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets" --cors.allowWebSockets cors allow WebSockets. Env "TAOS_ADAPTER_CORS_ALLOW_WebSockets"
--cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers" --cors.exposeHeaders stringArray cors expose headers. Env "TAOS_ADAPTER_Expose_Headers"
--debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" --debug enable debug mode. Env "TAOS_ADAPTER_DEBUG" (default true)
--help Print this help message and exit --help Print this help message and exit
--httpCodeServerError Use a non-200 http status code when taosd returns an error. Env "TAOS_ADAPTER_HTTP_CODE_SERVER_ERROR"
--influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true) --influxdb.enable enable influxdb. Env "TAOS_ADAPTER_INFLUXDB_ENABLE" (default true)
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL" --log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos") --log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
...@@ -80,14 +81,17 @@ Usage of taosAdapter: ...@@ -80,14 +81,17 @@ Usage of taosAdapter:
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB") --log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s) --log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info") --logLevel string log level (panic fatal error warn warning info debug trace). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--monitor.collectDuration duration Set monitor duration. Env "TAOS_MONITOR_COLLECT_DURATION" (default 3s) --monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_MONITOR_IDENTITY" --monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE"
--monitor.incgroup Whether running in cgroup. Env "TAOS_MONITOR_INCGROUP" --monitor.disableCollectClientIP Whether to disable collecting clientIP. Env "TAOS_ADAPTER_MONITOR_DISABLE_COLLECT_CLIENT_IP"
--monitor.password string TDengine password. Env "TAOS_MONITOR_PASSWORD" (default "taosdata") --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80) --monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
--monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70) --monitor.incgroup Whether running in cgroup. Env "TAOS_ADAPTER_MONITOR_INCGROUP"
--monitor.user string TDengine user. Env "TAOS_MONITOR_USER" (default "root") --monitor.password string TDengine password. Env "TAOS_ADAPTER_MONITOR_PASSWORD" (default "taosdata")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_MONITOR_WRITE_INTERVAL" (default 30s) --monitor.pauseAllMemoryThreshold float Memory percentage threshold for pause all. Env "TAOS_ADAPTER_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default 80)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_MONITOR_WRITE_TO_TD" --monitor.pauseQueryMemoryThreshold float Memory percentage threshold for pause query. Env "TAOS_ADAPTER_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default 70)
--monitor.user string TDengine user. Env "TAOS_ADAPTER_MONITOR_USER" (default "root")
--monitor.writeInterval duration Set write to TDengine interval. Env "TAOS_ADAPTER_MONITOR_WRITE_INTERVAL" (default 30s)
--monitor.writeToTD Whether write metrics to TDengine. Env "TAOS_ADAPTER_MONITOR_WRITE_TO_TD"
--node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE" --node_exporter.caCertFile string node_exporter ca cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CA_CERT_FILE"
--node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE" --node_exporter.certFile string node_exporter cert file path. Env "TAOS_ADAPTER_NODE_EXPORTER_CERT_FILE"
--node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter") --node_exporter.db string node_exporter db name. Env "TAOS_ADAPTER_NODE_EXPORTER_DB" (default "node_exporter")
...@@ -100,9 +104,9 @@ Usage of taosAdapter: ...@@ -100,9 +104,9 @@ Usage of taosAdapter:
--node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE" --node_exporter.keyFile string node_exporter cert key file path. Env "TAOS_ADAPTER_NODE_EXPORTER_KEY_FILE"
--node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata") --node_exporter.password string node_exporter password. Env "TAOS_ADAPTER_NODE_EXPORTER_PASSWORD" (default "taosdata")
--node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s) --node_exporter.responseTimeout duration node_exporter response timeout. Env "TAOS_ADAPTER_NODE_EXPORTER_RESPONSE_TIMEOUT" (default 5s)
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"
--node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100]) --node_exporter.urls strings node_exporter urls. Env "TAOS_ADAPTER_NODE_EXPORTER_URLS" (default [http://localhost:9100])
--node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root") --node_exporter.user string node_exporter user. Env "TAOS_ADAPTER_NODE_EXPORTER_USER" (default "root")
--node_exporter.ttl int node_exporter data ttl. Env "TAOS_ADAPTER_NODE_EXPORTER_TTL"(default 0, means no ttl)
--opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true) --opentsdb.enable enable opentsdb. Env "TAOS_ADAPTER_OPENTSDB_ENABLE" (default true)
--opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1) --opentsdb_telnet.batchSize int opentsdb_telnet batch size. Env "TAOS_ADAPTER_OPENTSDB_TELNET_BATCH_SIZE" (default 1)
--opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb]) --opentsdb_telnet.dbs strings opentsdb_telnet db names. Env "TAOS_ADAPTER_OPENTSDB_TELNET_DBS" (default [opentsdb_telnet,collectd_tsdb,icinga2_tsdb,tcollector_tsdb])
...@@ -112,11 +116,11 @@ Usage of taosAdapter: ...@@ -112,11 +116,11 @@ Usage of taosAdapter:
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata") --opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
--opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049]) --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE" --opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root") --opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"(default 0, means no ttl) --pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT"
--pool.idleTimeout duration Set idle connection timeout. Env "TAOS_ADAPTER_POOL_IDLE_TIMEOUT" (default 1h0m0s) --pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT"
--pool.maxConnect int max connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_CONNECT" (default 4000) --pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE"
--pool.maxIdle int max idle connections to taosd. Env "TAOS_ADAPTER_POOL_MAX_IDLE" (default 4000)
-P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041) -P, --port int http port. Env "TAOS_ADAPTER_PORT" (default 6041)
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true) --prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1) --restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
...@@ -133,9 +137,9 @@ Usage of taosAdapter: ...@@ -133,9 +137,9 @@ Usage of taosAdapter:
--statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044) --statsd.port int statsd server port. Env "TAOS_ADAPTER_STATSD_PORT" (default 6044)
--statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp") --statsd.protocol string statsd protocol [tcp or udp]. Env "TAOS_ADAPTER_STATSD_PROTOCOL" (default "udp")
--statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE" --statsd.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_STATSD_TCP_KEEP_ALIVE"
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL"
--statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root") --statsd.user string statsd user. Env "TAOS_ADAPTER_STATSD_USER" (default "root")
--statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10) --statsd.worker int statsd write worker. Env "TAOS_ADAPTER_STATSD_WORKER" (default 10)
--statsd.ttl int statsd data ttl. Env "TAOS_ADAPTER_STATSD_TTL" (default 0, means no ttl)
--taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE" --taosConfigDir string load taos client config path. Env "TAOS_ADAPTER_TAOS_CONFIG_FILE"
--version Print the version and exit --version Print the version and exit
``` ```
...@@ -323,6 +327,10 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1 ...@@ -323,6 +327,10 @@ taosAdapter 通过参数 `restfulRowLimit` 来控制结果的返回条数,-1
- `http://<fqdn>:6041/rest/sql` - `http://<fqdn>:6041/rest/sql`
- `http://<fqdn>:6041/prometheus/v1/remote_read/:db` - `http://<fqdn>:6041/prometheus/v1/remote_read/:db`
## 配置 http 返回码
taosAdapter 通过参数 `httpCodeServerError` 来设置当 C 接口返回错误时是否返回非 200 的 http 状态码。当设置为 true 时将根据 C 返回的错误码返回不同 http 状态码。具体见 [HTTP 响应码](../../connector/rest-api/#http-响应码)
## 故障解决 ## 故障解决
您可以通过命令 `systemctl status taosadapter` 来检查 taosAdapter 运行状态。 您可以通过命令 `systemctl status taosadapter` 来检查 taosAdapter 运行状态。
......
...@@ -208,6 +208,9 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ...@@ -208,6 +208,9 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- ** -z/--trying-interval <NUMBER\>** : 失败重试间隔时间,单位为毫秒,仅在 -k 指定重试后有效。需使用 v3.0.9 以上版本。 - ** -z/--trying-interval <NUMBER\>** : 失败重试间隔时间,单位为毫秒,仅在 -k 指定重试后有效。需使用 v3.0.9 以上版本。
- **-v/--vgroups <NUMBER\>** :
创建数据库时指定 vgroups 数,仅对 TDengine v3.0+ 有效。
- **-V/--version** : - **-V/--version** :
显示版本信息并退出。不能与其它参数混用。 显示版本信息并退出。不能与其它参数混用。
...@@ -238,6 +241,13 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) ...@@ -238,6 +241,13 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
- ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。 - ** keep_trying ** : 失败后进行重试的次数,默认不重试。需使用 v3.0.9 以上版本。
- ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。 - ** trying_interval ** : 失败重试间隔时间,单位为毫秒,仅在 keep_trying 指定重试后有效。需使用 v3.0.9 以上版本。
- ** childtable_from 和 childtable_to ** : 指定写入子表范围,开闭区间为 [childtable_from, childtable_to).
 
- ** continue_if_fail ** : 允许用户定义失败后行为
“continue_if_fail”:  “no”, 失败 taosBenchmark 自动退出,默认行为
“continue_if_fail”: “yes”, 失败 taosBenchmark 警告用户,并继续写入
“continue_if_fail”: “smart”, 如果子表不存在失败,taosBenchmark 会建立子表并继续写入
#### 数据库相关配置参数 #### 数据库相关配置参数
......
...@@ -1590,7 +1590,7 @@ ...@@ -1590,7 +1590,7 @@
}, },
{ {
"datasource": "${DS_TDENGINE}", "datasource": "${DS_TDENGINE}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -1919,7 +1919,7 @@ ...@@ -1919,7 +1919,7 @@
}, },
{ {
"datasource": "${DS_TDENGINE}", "datasource": "${DS_TDENGINE}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -1977,7 +1977,7 @@ ...@@ -1977,7 +1977,7 @@
}, },
{ {
"datasource": "${DS_TDENGINE}", "datasource": "${DS_TDENGINE}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -2825,7 +2825,7 @@ ...@@ -2825,7 +2825,7 @@
"timeFrom": null, "timeFrom": null,
"timeRegions": [], "timeRegions": [],
"timeShift": null, "timeShift": null,
"title": "Requets Count per Minutes $fqdn", "title": "Requests Count per Minutes $fqdn",
"tooltip": { "tooltip": {
"shared": true, "shared": true,
"sort": 0, "sort": 0,
......
...@@ -1566,7 +1566,7 @@ ...@@ -1566,7 +1566,7 @@
}, },
{ {
"datasource": "${ds}", "datasource": "${ds}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -1933,7 +1933,7 @@ ...@@ -1933,7 +1933,7 @@
}, },
{ {
"datasource": "${ds}", "datasource": "${ds}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -2000,7 +2000,7 @@ ...@@ -2000,7 +2000,7 @@
}, },
{ {
"datasource": "${ds}", "datasource": "${ds}",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"fieldConfig": { "fieldConfig": {
"defaults": { "defaults": {
"color": { "color": {
...@@ -2961,7 +2961,7 @@ ...@@ -2961,7 +2961,7 @@
"timeFrom": null, "timeFrom": null,
"timeRegions": [], "timeRegions": [],
"timeShift": null, "timeShift": null,
"title": "Requets Count per Minutes $fqdn", "title": "Requests Count per Minutes $fqdn",
"tooltip": { "tooltip": {
"shared": true, "shared": true,
"sort": 0, "sort": 0,
...@@ -3355,4 +3355,4 @@ ...@@ -3355,4 +3355,4 @@
"title": "TDengine", "title": "TDengine",
"uid": "tdengine", "uid": "tdengine",
"version": 8 "version": 8
} }
\ No newline at end of file
...@@ -186,7 +186,7 @@ ...@@ -186,7 +186,7 @@
}, },
{ {
"datasource": "TDengine", "datasource": "TDengine",
"description": "taosd max memery last 10 minutes", "description": "taosd max memory last 10 minutes",
"gridPos": { "gridPos": {
"h": 6, "h": 6,
"w": 8, "w": 8,
...@@ -253,7 +253,7 @@ ...@@ -253,7 +253,7 @@
], ],
"timeFrom": null, "timeFrom": null,
"timeShift": null, "timeShift": null,
"title": "taosd memery", "title": "taosd memory",
"type": "gauge" "type": "gauge"
}, },
{ {
......
...@@ -46,7 +46,7 @@ sudo apt-get install grafana ...@@ -46,7 +46,7 @@ sudo apt-get install grafana
### 在 CentOS / RHEL 上安装 Grafana ### 在 CentOS / RHEL 上安装 Grafana
</TabItem> </TabItem>
<TabItem label="redhat" value="基于 CentOS / RHEL 系统"> <TabItem value="redhat" label="基于 CentOS / RHEL 系统">
您可以从官方 YUM 镜像仓库安装。 您可以从官方 YUM 镜像仓库安装。
...@@ -140,10 +140,10 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste ...@@ -140,10 +140,10 @@ Install and configure TDinsight dashboard in Grafana on Ubuntu 18.04/20.04 syste
| -n | --tdengine-ds-name | TDENGINE_DS_NAME | TDengine 数据源名称,默认为 TDengine。 | | -n | --tdengine-ds-name | TDENGINE_DS_NAME | TDengine 数据源名称,默认为 TDengine。 |
| -a | --tdengine-api | TDENGINE_API | TDengine REST API 端点。默认为`http://127.0.0.1:6041`。 | | -a | --tdengine-api | TDENGINE_API | TDengine REST API 端点。默认为`http://127.0.0.1:6041`。 |
| -u | --tdengine-user | TDENGINE_USER | TDengine 用户名。 [默认值:root] | | -u | --tdengine-user | TDENGINE_USER | TDengine 用户名。 [默认值:root] |
| -p | --tdengine-密码 | TDENGINE_PASSWORD | TDengine 密码。 [默认:taosdata] | | -p | --tdengine-password | TDENGINE_PASSWORD | TDengine 密码。 [默认:taosdata] |
| -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值:tdinsight] | | -i | --tdinsight-uid | TDINSIGHT_DASHBOARD_UID | TDinsight 仪表盘`uid`。 [默认值:tdinsight] |
| -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] | | -t | --tdinsight-title | TDINSIGHT_DASHBOARD_TITLE | TDinsight 仪表盘标题。 [默认:TDinsight] |
| -e | --tdinsight-可编辑 | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] | | -e | --tdinsight-editable | TDINSIGHT_DASHBOARD_EDITABLE | 如果配置仪表盘可以编辑。 [默认值:false] |
| -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 | | -E | --external-notifier | EXTERNAL_NOTIFIER | 将外部通知程序 uid 应用于 TDinsight 仪表盘。 |
假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本: 假设您在主机 `tdengine` 上启动 TDengine 数据库,HTTP API 端口为 `6041`,用户为 `root1`,密码为 `pass5ord`。执行脚本:
...@@ -175,7 +175,7 @@ sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata - ...@@ -175,7 +175,7 @@ sudo ./TDengine.sh -n TDengine-Env1 -a http://another:6041 -u root -p taosdata -
特别地,当您使用 Grafana Cloud 或其他组织时,`-O` 可用于设置组织 ID。 `-G` 可指定 Grafana 插件安装目录。 `-e` 参数将仪表盘设置为可编辑。 特别地,当您使用 Grafana Cloud 或其他组织时,`-O` 可用于设置组织 ID。 `-G` 可指定 Grafana 插件安装目录。 `-e` 参数将仪表盘设置为可编辑。
</TabItem> </TabItem>
<TabItem label="manual" value="手动设置 TDinsight"> <TabItem value="manual" label="手动设置 TDinsight">
### 安装 TDengine 数据源插件 ### 安装 TDengine 数据源插件
......
...@@ -29,7 +29,7 @@ taos -C ...@@ -29,7 +29,7 @@ taos -C
taos --dump-config taos --dump-config
``` ```
# 配置参数详细列表 ## 配置参数详细列表
:::note :::note
本节内容覆盖产品的配置参数,适用于服务端的参数按其对产品行为的影响进行分类,这其中有部分参数也同时适用于客户端;但有少量参数仅适用于客户端,这部分参数进行了单独归类。 本节内容覆盖产品的配置参数,适用于服务端的参数按其对产品行为的影响进行分类,这其中有部分参数也同时适用于客户端;但有少量参数仅适用于客户端,这部分参数进行了单独归类。
...@@ -83,7 +83,7 @@ taos --dump-config ...@@ -83,7 +83,7 @@ taos --dump-config
| :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- | | :--- | :-------- | :---------------------------------- | :--------------------------------------------------------------------------------------------------------------------------------- |
| TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 | | TCP | 6030 | 客户端与服务端之间通讯,多节点集群的节点间通讯。 | 由配置文件设置 serverPort 决定。 |
| TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 | | TCP | 6041 | 客户端与服务端之间的 RESTful 通讯。 | 随 serverPort 端口变化。注意 taosAdapter 配置或有不同,请参考相应[文档](/reference/taosadapter/)。 |
| TCP | 6043 | TaosKeeper 监控服务端口。 | 随 TaosKeeper 启动参数设置变化。 | | TCP | 6043 | taosKeeper 监控服务端口。 | 随 taosKeeper 启动参数设置变化。 |
| TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化| | TCP | 6044 | 支持 StatsD 的数据接入端口。 | 随 taosAdapter 启动参数设置变化|
| UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化 | | UDP | 6045 | 支持 collectd 数据接入端口。 | 随 taosAdapter 启动参数设置变化 |
| TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | | | TCP | 6060 | 企业版内 Monitor 服务的网络端口。 | |
...@@ -104,7 +104,7 @@ taos --dump-config ...@@ -104,7 +104,7 @@ taos --dump-config
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录,监控信息将通过 HTTP 协议发送给由 `monitorFqdn``monitorProt` 指定的 TaosKeeper 监控服务 | | 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录,监控信息将通过 HTTP 协议发送给由 `monitorFqdn``monitorProt` 指定的 taosKeeper 监控服务 |
| 取值范围 | 0:关闭监控服务, 1:激活监控服务。 | | 取值范围 | 0:关闭监控服务, 1:激活监控服务。 |
| 缺省值 | 0 | | 缺省值 | 0 |
...@@ -113,7 +113,7 @@ taos --dump-config ...@@ -113,7 +113,7 @@ taos --dump-config
| 属性 | 说明 | | 属性 | 说明 |
| -------- | -------------------------- | | -------- | -------------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | TaosKeeper 监控服务的 FQDN | | 含义 | taosKeeper 监控服务的 FQDN |
| 缺省值 | 无 | | 缺省值 | 无 |
### monitorPort ### monitorPort
...@@ -121,7 +121,7 @@ taos --dump-config ...@@ -121,7 +121,7 @@ taos --dump-config
| 属性 | 说明 | | 属性 | 说明 |
| -------- | --------------------------- | | -------- | --------------------------- |
| 适用范围 | 仅服务端适用 | | 适用范围 | 仅服务端适用 |
| 含义 | TaosKeeper 监控服务的端口号 | | 含义 | taosKeeper 监控服务的端口号 |
| 缺省值 | 6043 | | 缺省值 | 6043 |
### monitorInterval ### monitorInterval
...@@ -355,8 +355,6 @@ charset 的有效值是 UTF-8。 ...@@ -355,8 +355,6 @@ charset 的有效值是 UTF-8。
| 取值范围 | 0-4096 | | 取值范围 | 0-4096 |
| 缺省值 | CPU 核数的 2 倍 | | 缺省值 | CPU 核数的 2 倍 |
## 时间相关 |
## 性能调优 ## 性能调优
### numOfCommitThreads ### numOfCommitThreads
...@@ -611,7 +609,7 @@ charset 的有效值是 UTF-8。 ...@@ -611,7 +609,7 @@ charset 的有效值是 UTF-8。
| 适用范围 | 仅客户端适用 | | 适用范围 | 仅客户端适用 |
| 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 | | 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 |
| 值域 | 0:不一致;1: 一致 | | 值域 | 0:不一致;1: 一致 |
| 缺省值 | 1 | | 缺省值 | 0 |
## 其他 ## 其他
......
...@@ -87,7 +87,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000 ...@@ -87,7 +87,7 @@ st,t1=3,t2=4,t3=t3 c1=3i64,c3="passit",c2=false,c4=4f64 1626006833639000000
:::tip :::tip
无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过 无模式所有的处理逻辑,仍会遵循 TDengine 对数据结构的底层限制,例如每行数据的总长度不能超过
16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit) 48KB,标签值的总长度不超过16KB。这方面的具体限制约束请参见 [TDengine SQL 边界限制](/taos-sql/limit)
::: :::
......
...@@ -6,7 +6,7 @@ description: TDengine 3.0 版本监控指标的导出工具 ...@@ -6,7 +6,7 @@ description: TDengine 3.0 版本监控指标的导出工具
## 简介 ## 简介
TaosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。 taosKeeper 是 TDengine 3.0 版本监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。
## 安装 ## 安装
......
...@@ -31,7 +31,7 @@ deleteTimings = true ...@@ -31,7 +31,7 @@ deleteTimings = true
### 配置 StatsD ### 配置 StatsD
使用 StatsD 需要下载其[源代码](https://github.com/statsd/statsd)。其配置文件请参考其源代码下载到本地的根目录下的示例文件 `exampleConfig.js` 进行修改。其中 <taosAdpater's host\> 填写运行 taosAdapter 的服务器域名或 IP 地址,<port for StatsD\>请填写 taosAdapter 接收 StatsD 数据的端口(默认为 6044)。 使用 StatsD 需要下载其[源代码](https://github.com/statsd/statsd)。其配置文件请参考其源代码下载到本地的根目录下的示例文件 `exampleConfig.js` 进行修改。其中 <taosAdapter's host\> 填写运行 taosAdapter 的服务器域名或 IP 地址,<port for StatsD\>请填写 taosAdapter 接收 StatsD 数据的端口(默认为 6044)。
``` ```
backends 部分添加 "./backends/repeater" backends 部分添加 "./backends/repeater"
......
...@@ -79,7 +79,7 @@ Development: false ...@@ -79,7 +79,7 @@ Development: false
### 从源码安装 ### 从源码安装
``` ```
git clone https://github.com/taosdata/kafka-connect-tdengine.git git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git
cd kafka-connect-tdengine cd kafka-connect-tdengine
mvn clean package mvn clean package
unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip
......
...@@ -67,7 +67,7 @@ sudo systemctl start telegraf ...@@ -67,7 +67,7 @@ sudo systemctl start telegraf
使用 Web 浏览器访问 `IP:3000` 登录 Grafana 界面,系统初始用户名密码为 admin/admin。 使用 Web 浏览器访问 `IP:3000` 登录 Grafana 界面,系统初始用户名密码为 admin/admin。
点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。 点击左侧齿轮图标并选择 `Plugins`,应该可以找到 TDengine data source 插件图标。
点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v0.1.0.json` 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘: 点击左侧加号图标并选择 `Import`,从 `https://github.com/taosdata/grafanaplugin/blob/master/examples/telegraf/grafana/dashboards/telegraf-dashboard-v3.json` (适配 TDengine 3.0, 如果使用 TDengine 2.x, 请下载 `telegraf-dashboard-v2.json`) 下载 dashboard JSON 文件后导入。之后可以看到如下界面的仪表盘:
![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp) ![TDengine Database IT-DevOps-Solutions-telegraf-dashboard](./IT-DevOps-Solutions-telegraf-dashboard.webp)
......
...@@ -10,6 +10,11 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do ...@@ -10,6 +10,11 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 3.0.3.0
<Release type="tdengine" version="3.0.3.0" />
## 3.0.2.6 ## 3.0.2.6
<Release type="tdengine" version="3.0.2.6" /> <Release type="tdengine" version="3.0.2.6" />
......
...@@ -10,6 +10,14 @@ taosTools 各版本安装包下载链接如下: ...@@ -10,6 +10,14 @@ taosTools 各版本安装包下载链接如下:
import Release from "/components/ReleaseV3"; import Release from "/components/ReleaseV3";
## 2.4.9
<Release type="tools" version="2.4.9" />
## 2.4.8
<Release type="tools" version="2.4.8" />
## 2.4.6 ## 2.4.6
<Release type="tools" version="2.4.6" /> <Release type="tools" version="2.4.6" />
......
## TDengine SpringBoot + Mybatis Demo ## TDengine SpringBoot + Mybatis Demo
## 需要提前创建 test 数据库 ## 需要提前创建 test 数据库
```
$ taos -s 'create database if not exists test'
$ curl http://localhost:8080/weather/init
```
### 配置 application.properties ### 配置 application.properties
```properties ```properties
# datasource config # datasource config
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
<id column="ts" jdbcType="TIMESTAMP" property="ts"/> <id column="ts" jdbcType="TIMESTAMP" property="ts"/>
<result column="temperature" jdbcType="FLOAT" property="temperature"/> <result column="temperature" jdbcType="FLOAT" property="temperature"/>
<result column="humidity" jdbcType="FLOAT" property="humidity"/> <result column="humidity" jdbcType="FLOAT" property="humidity"/>
<result column="bytes" jdbcType="BINARY" property="bytes" />
</resultMap> </resultMap>
<select id="lastOne" resultType="java.util.Map"> <select id="lastOne" resultType="java.util.Map">
...@@ -36,6 +37,11 @@ ...@@ -36,6 +37,11 @@
binary binary
( (
64 64
),
bytes
binary
(
64
)) tags )) tags
( (
location nchar location nchar
...@@ -63,8 +69,8 @@ ...@@ -63,8 +69,8 @@
</select> </select>
<insert id="insert" parameterType="com.taosdata.example.springbootdemo.domain.Weather"> <insert id="insert" parameterType="com.taosdata.example.springbootdemo.domain.Weather">
insert into test.t#{groupId} (ts, temperature, humidity, note) insert into test.t#{groupId} (ts, temperature, humidity, note, bytes)
values (#{ts}, ${temperature}, ${humidity}, #{note}) values (#{ts}, ${temperature}, ${humidity}, #{note}, #{bytes})
</insert> </insert>
<select id="getSubTables" resultType="String"> <select id="getSubTables" resultType="String">
......
...@@ -2,6 +2,7 @@ package com.taosdata.example.springbootdemo.domain; ...@@ -2,6 +2,7 @@ package com.taosdata.example.springbootdemo.domain;
import com.fasterxml.jackson.annotation.JsonFormat; import com.fasterxml.jackson.annotation.JsonFormat;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp; import java.sql.Timestamp;
public class Weather { public class Weather {
...@@ -12,6 +13,9 @@ public class Weather { ...@@ -12,6 +13,9 @@ public class Weather {
private Float humidity; private Float humidity;
private String location; private String location;
private String note; private String note;
// In rest mode, the byte[] type is not recommended.
// UTF-8 is used to encode the byte arrays, that result may affect the SQL correctness
private byte[] bytes;
private int groupId; private int groupId;
public Weather() { public Weather() {
...@@ -70,4 +74,30 @@ public class Weather { ...@@ -70,4 +74,30 @@ public class Weather {
public void setNote(String note) { public void setNote(String note) {
this.note = note; this.note = note;
} }
public byte[] getBytes() {
return bytes;
}
public void setBytes(byte[] bytes) {
this.bytes = bytes;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("Weather{");
sb.append("ts=").append(ts);
sb.append(", temperature=").append(temperature);
sb.append(", humidity=").append(humidity);
sb.append(", location='").append(location).append('\'');
sb.append(", note='").append(note).append('\'');
sb.append(", bytes -> string=");
if (bytes == null) sb.append("null");
else {
sb.append(new String(bytes, StandardCharsets.UTF_8));
}
sb.append(", groupId=").append(groupId);
sb.append('}');
return sb.toString();
}
} }
...@@ -5,6 +5,7 @@ import com.taosdata.example.springbootdemo.domain.Weather; ...@@ -5,6 +5,7 @@ import com.taosdata.example.springbootdemo.domain.Weather;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service; import org.springframework.stereotype.Service;
import java.nio.charset.StandardCharsets;
import java.sql.Timestamp; import java.sql.Timestamp;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
...@@ -30,6 +31,7 @@ public class WeatherService { ...@@ -30,6 +31,7 @@ public class WeatherService {
weather.setLocation(locations[random.nextInt(locations.length)]); weather.setLocation(locations[random.nextInt(locations.length)]);
weather.setGroupId(i % locations.length); weather.setGroupId(i % locations.length);
weather.setNote("note-" + i); weather.setNote("note-" + i);
weather.setBytes(locations[random.nextInt(locations.length)].getBytes(StandardCharsets.UTF_8));
weatherMapper.createTable(weather); weatherMapper.createTable(weather);
count += weatherMapper.insert(weather); count += weatherMapper.insert(weather);
} }
......
...@@ -50,6 +50,7 @@ extern int32_t tsTagFilterResCacheSize; ...@@ -50,6 +50,7 @@ extern int32_t tsTagFilterResCacheSize;
// queue & threads // queue & threads
extern int32_t tsNumOfRpcThreads; extern int32_t tsNumOfRpcThreads;
extern int32_t tsNumOfRpcSessions; extern int32_t tsNumOfRpcSessions;
extern int32_t tsTimeToGetAvailableConn;
extern int32_t tsNumOfCommitThreads; extern int32_t tsNumOfCommitThreads;
extern int32_t tsNumOfTaskQueueThreads; extern int32_t tsNumOfTaskQueueThreads;
extern int32_t tsNumOfMnodeQueryThreads; extern int32_t tsNumOfMnodeQueryThreads;
...@@ -73,6 +74,10 @@ extern int32_t tsHeartbeatTimeout; ...@@ -73,6 +74,10 @@ extern int32_t tsHeartbeatTimeout;
// vnode // vnode
extern int64_t tsVndCommitMaxIntervalMs; extern int64_t tsVndCommitMaxIntervalMs;
// mnode
extern int64_t tsMndSdbWriteDelta;
extern int64_t tsMndLogRetention;
// monitor // monitor
extern bool tsEnableMonitor; extern bool tsEnableMonitor;
extern int32_t tsMonitorInterval; extern int32_t tsMonitorInterval;
......
...@@ -66,6 +66,15 @@ extern int32_t tMsgDict[]; ...@@ -66,6 +66,15 @@ extern int32_t tMsgDict[];
typedef uint16_t tmsg_t; typedef uint16_t tmsg_t;
static inline bool tmsgIsValid(tmsg_t type) {
if (type < TDMT_DND_MAX_MSG || type < TDMT_MND_MAX_MSG || type < TDMT_VND_MAX_MSG || type < TDMT_SCH_MAX_MSG ||
type < TDMT_STREAM_MAX_MSG || type < TDMT_MON_MAX_MSG || type < TDMT_SYNC_MAX_MSG || type < TDMT_VND_STREAM_MSG ||
type < TDMT_VND_TMQ_MSG || type < TDMT_VND_TMQ_MAX_MSG) {
return true;
} else {
return false;
}
}
static inline bool vnodeIsMsgBlock(tmsg_t type) { static inline bool vnodeIsMsgBlock(tmsg_t type) {
return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) || return (type == TDMT_VND_CREATE_TABLE) || (type == TDMT_VND_ALTER_TABLE) || (type == TDMT_VND_DROP_TABLE) ||
(type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM); (type == TDMT_VND_UPDATE_TAG_VAL) || (type == TDMT_VND_ALTER_CONFIRM);
...@@ -603,6 +612,7 @@ typedef struct { ...@@ -603,6 +612,7 @@ typedef struct {
char user[TSDB_USER_LEN]; char user[TSDB_USER_LEN];
char passwd[TSDB_PASSWORD_LEN]; char passwd[TSDB_PASSWORD_LEN];
int64_t startTime; int64_t startTime;
char sVer[TSDB_VERSION_LEN];
} SConnectReq; } SConnectReq;
int32_t tSerializeSConnectReq(void* buf, int32_t bufLen, SConnectReq* pReq); int32_t tSerializeSConnectReq(void* buf, int32_t bufLen, SConnectReq* pReq);
...@@ -929,12 +939,19 @@ typedef struct { ...@@ -929,12 +939,19 @@ typedef struct {
int32_t minRows; int32_t minRows;
int32_t maxRows; int32_t maxRows;
int32_t walFsyncPeriod; int32_t walFsyncPeriod;
int16_t hashPrefix;
int16_t hashSuffix;
int8_t walLevel; int8_t walLevel;
int8_t precision; int8_t precision;
int8_t compression; int8_t compression;
int8_t replications; int8_t replications;
int8_t strict; int8_t strict;
int8_t cacheLast; int8_t cacheLast;
int32_t tsdbPageSize;
int32_t walRetentionPeriod;
int32_t walRollPeriod;
int64_t walRetentionSize;
int64_t walSegmentSize;
int32_t numOfRetensions; int32_t numOfRetensions;
SArray* pRetensions; SArray* pRetensions;
int8_t schemaless; int8_t schemaless;
...@@ -1015,7 +1032,8 @@ int32_t tDeserializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchR ...@@ -1015,7 +1032,8 @@ int32_t tDeserializeSUserAuthBatchRsp(void* buf, int32_t bufLen, SUserAuthBatchR
void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp); void tFreeSUserAuthBatchRsp(SUserAuthBatchRsp* pRsp);
typedef struct { typedef struct {
char db[TSDB_DB_FNAME_LEN]; char db[TSDB_DB_FNAME_LEN];
STimeWindow timeRange;
} SCompactDbReq; } SCompactDbReq;
int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq); int32_t tSerializeSCompactDbReq(void* buf, int32_t bufLen, SCompactDbReq* pReq);
...@@ -1274,9 +1292,10 @@ int32_t tSerializeSDropIdxReq(void* buf, int32_t bufLen, SDropIndexReq* pReq); ...@@ -1274,9 +1292,10 @@ int32_t tSerializeSDropIdxReq(void* buf, int32_t bufLen, SDropIndexReq* pReq);
int32_t tDeserializeSDropIdxReq(void* buf, int32_t bufLen, SDropIndexReq* pReq); int32_t tDeserializeSDropIdxReq(void* buf, int32_t bufLen, SDropIndexReq* pReq);
typedef struct { typedef struct {
int64_t dbUid; int64_t dbUid;
char db[TSDB_DB_FNAME_LEN]; char db[TSDB_DB_FNAME_LEN];
int64_t compactStartTime; int64_t compactStartTime;
STimeWindow tw;
} SCompactVnodeReq; } SCompactVnodeReq;
int32_t tSerializeSCompactVnodeReq(void* buf, int32_t bufLen, SCompactVnodeReq* pReq); int32_t tSerializeSCompactVnodeReq(void* buf, int32_t bufLen, SCompactVnodeReq* pReq);
...@@ -1812,7 +1831,7 @@ typedef struct { ...@@ -1812,7 +1831,7 @@ typedef struct {
#define STREAM_TRIGGER_AT_ONCE 1 #define STREAM_TRIGGER_AT_ONCE 1
#define STREAM_TRIGGER_WINDOW_CLOSE 2 #define STREAM_TRIGGER_WINDOW_CLOSE 2
#define STREAM_TRIGGER_MAX_DELAY 3 #define STREAM_TRIGGER_MAX_DELAY 3
#define STREAM_DEFAULT_IGNORE_EXPIRED 0 #define STREAM_DEFAULT_IGNORE_EXPIRED 1
#define STREAM_FILL_HISTORY_ON 1 #define STREAM_FILL_HISTORY_ON 1
#define STREAM_FILL_HISTORY_OFF 0 #define STREAM_FILL_HISTORY_OFF 0
#define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF #define STREAM_DEFAULT_FILL_HISTORY STREAM_FILL_HISTORY_OFF
...@@ -1904,10 +1923,10 @@ typedef struct { ...@@ -1904,10 +1923,10 @@ typedef struct {
} SMqConsumerLostMsg, SMqConsumerRecoverMsg, SMqConsumerClearMsg; } SMqConsumerLostMsg, SMqConsumerRecoverMsg, SMqConsumerClearMsg;
typedef struct { typedef struct {
int64_t consumerId; int64_t consumerId;
char cgroup[TSDB_CGROUP_LEN]; char cgroup[TSDB_CGROUP_LEN];
char clientId[256]; char clientId[256];
SArray* topicNames; // SArray<char**> SArray* topicNames; // SArray<char**>
} SCMSubscribeReq; } SCMSubscribeReq;
static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) { static FORCE_INLINE int32_t tSerializeSCMSubscribeReq(void** buf, const SCMSubscribeReq* pReq) {
...@@ -2684,7 +2703,7 @@ typedef struct { ...@@ -2684,7 +2703,7 @@ typedef struct {
char subKey[TSDB_SUBSCRIBE_KEY_LEN]; char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int8_t subType; int8_t subType;
int8_t withMeta; int8_t withMeta;
char* qmsg; char* qmsg; // SubPlanToString
int64_t suid; int64_t suid;
} SMqRebVgReq; } SMqRebVgReq;
......
...@@ -110,239 +110,240 @@ ...@@ -110,239 +110,240 @@
#define TK_TABLE_SUFFIX 92 #define TK_TABLE_SUFFIX 92
#define TK_NK_COLON 93 #define TK_NK_COLON 93
#define TK_MAX_SPEED 94 #define TK_MAX_SPEED 94
#define TK_TABLE 95 #define TK_START 95
#define TK_NK_LP 96 #define TK_WITH 96
#define TK_NK_RP 97 #define TK_TIMESTAMP 97
#define TK_STABLE 98 #define TK_END 98
#define TK_ADD 99 #define TK_TABLE 99
#define TK_COLUMN 100 #define TK_NK_LP 100
#define TK_MODIFY 101 #define TK_NK_RP 101
#define TK_RENAME 102 #define TK_STABLE 102
#define TK_TAG 103 #define TK_ADD 103
#define TK_SET 104 #define TK_COLUMN 104
#define TK_NK_EQ 105 #define TK_MODIFY 105
#define TK_USING 106 #define TK_RENAME 106
#define TK_TAGS 107 #define TK_TAG 107
#define TK_COMMENT 108 #define TK_SET 108
#define TK_BOOL 109 #define TK_NK_EQ 109
#define TK_TINYINT 110 #define TK_USING 110
#define TK_SMALLINT 111 #define TK_TAGS 111
#define TK_INT 112 #define TK_COMMENT 112
#define TK_INTEGER 113 #define TK_BOOL 113
#define TK_BIGINT 114 #define TK_TINYINT 114
#define TK_FLOAT 115 #define TK_SMALLINT 115
#define TK_DOUBLE 116 #define TK_INT 116
#define TK_BINARY 117 #define TK_INTEGER 117
#define TK_TIMESTAMP 118 #define TK_BIGINT 118
#define TK_NCHAR 119 #define TK_FLOAT 119
#define TK_UNSIGNED 120 #define TK_DOUBLE 120
#define TK_JSON 121 #define TK_BINARY 121
#define TK_VARCHAR 122 #define TK_NCHAR 122
#define TK_MEDIUMBLOB 123 #define TK_UNSIGNED 123
#define TK_BLOB 124 #define TK_JSON 124
#define TK_VARBINARY 125 #define TK_VARCHAR 125
#define TK_DECIMAL 126 #define TK_MEDIUMBLOB 126
#define TK_MAX_DELAY 127 #define TK_BLOB 127
#define TK_WATERMARK 128 #define TK_VARBINARY 128
#define TK_ROLLUP 129 #define TK_DECIMAL 129
#define TK_TTL 130 #define TK_MAX_DELAY 130
#define TK_SMA 131 #define TK_WATERMARK 131
#define TK_DELETE_MARK 132 #define TK_ROLLUP 132
#define TK_FIRST 133 #define TK_TTL 133
#define TK_LAST 134 #define TK_SMA 134
#define TK_SHOW 135 #define TK_DELETE_MARK 135
#define TK_PRIVILEGES 136 #define TK_FIRST 136
#define TK_DATABASES 137 #define TK_LAST 137
#define TK_TABLES 138 #define TK_SHOW 138
#define TK_STABLES 139 #define TK_PRIVILEGES 139
#define TK_MNODES 140 #define TK_DATABASES 140
#define TK_QNODES 141 #define TK_TABLES 141
#define TK_FUNCTIONS 142 #define TK_STABLES 142
#define TK_INDEXES 143 #define TK_MNODES 143
#define TK_ACCOUNTS 144 #define TK_QNODES 144
#define TK_APPS 145 #define TK_FUNCTIONS 145
#define TK_CONNECTIONS 146 #define TK_INDEXES 146
#define TK_LICENCES 147 #define TK_ACCOUNTS 147
#define TK_GRANTS 148 #define TK_APPS 148
#define TK_QUERIES 149 #define TK_CONNECTIONS 149
#define TK_SCORES 150 #define TK_LICENCES 150
#define TK_TOPICS 151 #define TK_GRANTS 151
#define TK_VARIABLES 152 #define TK_QUERIES 152
#define TK_CLUSTER 153 #define TK_SCORES 153
#define TK_BNODES 154 #define TK_TOPICS 154
#define TK_SNODES 155 #define TK_VARIABLES 155
#define TK_TRANSACTIONS 156 #define TK_CLUSTER 156
#define TK_DISTRIBUTED 157 #define TK_BNODES 157
#define TK_CONSUMERS 158 #define TK_SNODES 158
#define TK_SUBSCRIPTIONS 159 #define TK_TRANSACTIONS 159
#define TK_VNODES 160 #define TK_DISTRIBUTED 160
#define TK_ALIVE 161 #define TK_CONSUMERS 161
#define TK_LIKE 162 #define TK_SUBSCRIPTIONS 162
#define TK_TBNAME 163 #define TK_VNODES 163
#define TK_QTAGS 164 #define TK_ALIVE 164
#define TK_AS 165 #define TK_LIKE 165
#define TK_INDEX 166 #define TK_TBNAME 166
#define TK_FUNCTION 167 #define TK_QTAGS 167
#define TK_INTERVAL 168 #define TK_AS 168
#define TK_COUNT 169 #define TK_INDEX 169
#define TK_LAST_ROW 170 #define TK_FUNCTION 170
#define TK_TOPIC 171 #define TK_INTERVAL 171
#define TK_WITH 172 #define TK_COUNT 172
#define TK_META 173 #define TK_LAST_ROW 173
#define TK_CONSUMER 174 #define TK_TOPIC 174
#define TK_GROUP 175 #define TK_META 175
#define TK_DESC 176 #define TK_CONSUMER 176
#define TK_DESCRIBE 177 #define TK_GROUP 177
#define TK_RESET 178 #define TK_DESC 178
#define TK_QUERY 179 #define TK_DESCRIBE 179
#define TK_CACHE 180 #define TK_RESET 180
#define TK_EXPLAIN 181 #define TK_QUERY 181
#define TK_ANALYZE 182 #define TK_CACHE 182
#define TK_VERBOSE 183 #define TK_EXPLAIN 183
#define TK_NK_BOOL 184 #define TK_ANALYZE 184
#define TK_RATIO 185 #define TK_VERBOSE 185
#define TK_NK_FLOAT 186 #define TK_NK_BOOL 186
#define TK_OUTPUTTYPE 187 #define TK_RATIO 187
#define TK_AGGREGATE 188 #define TK_NK_FLOAT 188
#define TK_BUFSIZE 189 #define TK_OUTPUTTYPE 189
#define TK_STREAM 190 #define TK_AGGREGATE 190
#define TK_INTO 191 #define TK_BUFSIZE 191
#define TK_TRIGGER 192 #define TK_LANGUAGE 192
#define TK_AT_ONCE 193 #define TK_STREAM 193
#define TK_WINDOW_CLOSE 194 #define TK_INTO 194
#define TK_IGNORE 195 #define TK_TRIGGER 195
#define TK_EXPIRED 196 #define TK_AT_ONCE 196
#define TK_FILL_HISTORY 197 #define TK_WINDOW_CLOSE 197
#define TK_UPDATE 198 #define TK_IGNORE 198
#define TK_SUBTABLE 199 #define TK_EXPIRED 199
#define TK_KILL 200 #define TK_FILL_HISTORY 200
#define TK_CONNECTION 201 #define TK_UPDATE 201
#define TK_TRANSACTION 202 #define TK_SUBTABLE 202
#define TK_BALANCE 203 #define TK_KILL 203
#define TK_VGROUP 204 #define TK_CONNECTION 204
#define TK_MERGE 205 #define TK_TRANSACTION 205
#define TK_REDISTRIBUTE 206 #define TK_BALANCE 206
#define TK_SPLIT 207 #define TK_VGROUP 207
#define TK_DELETE 208 #define TK_MERGE 208
#define TK_INSERT 209 #define TK_REDISTRIBUTE 209
#define TK_NULL 210 #define TK_SPLIT 210
#define TK_NK_QUESTION 211 #define TK_DELETE 211
#define TK_NK_ARROW 212 #define TK_INSERT 212
#define TK_ROWTS 213 #define TK_NULL 213
#define TK_QSTART 214 #define TK_NK_QUESTION 214
#define TK_QEND 215 #define TK_NK_ARROW 215
#define TK_QDURATION 216 #define TK_ROWTS 216
#define TK_WSTART 217 #define TK_QSTART 217
#define TK_WEND 218 #define TK_QEND 218
#define TK_WDURATION 219 #define TK_QDURATION 219
#define TK_IROWTS 220 #define TK_WSTART 220
#define TK_ISFILLED 221 #define TK_WEND 221
#define TK_CAST 222 #define TK_WDURATION 222
#define TK_NOW 223 #define TK_IROWTS 223
#define TK_TODAY 224 #define TK_ISFILLED 224
#define TK_TIMEZONE 225 #define TK_CAST 225
#define TK_CLIENT_VERSION 226 #define TK_NOW 226
#define TK_SERVER_VERSION 227 #define TK_TODAY 227
#define TK_SERVER_STATUS 228 #define TK_TIMEZONE 228
#define TK_CURRENT_USER 229 #define TK_CLIENT_VERSION 229
#define TK_CASE 230 #define TK_SERVER_VERSION 230
#define TK_END 231 #define TK_SERVER_STATUS 231
#define TK_WHEN 232 #define TK_CURRENT_USER 232
#define TK_THEN 233 #define TK_CASE 233
#define TK_ELSE 234 #define TK_WHEN 234
#define TK_BETWEEN 235 #define TK_THEN 235
#define TK_IS 236 #define TK_ELSE 236
#define TK_NK_LT 237 #define TK_BETWEEN 237
#define TK_NK_GT 238 #define TK_IS 238
#define TK_NK_LE 239 #define TK_NK_LT 239
#define TK_NK_GE 240 #define TK_NK_GT 240
#define TK_NK_NE 241 #define TK_NK_LE 241
#define TK_MATCH 242 #define TK_NK_GE 242
#define TK_NMATCH 243 #define TK_NK_NE 243
#define TK_CONTAINS 244 #define TK_MATCH 244
#define TK_IN 245 #define TK_NMATCH 245
#define TK_JOIN 246 #define TK_CONTAINS 246
#define TK_INNER 247 #define TK_IN 247
#define TK_SELECT 248 #define TK_JOIN 248
#define TK_DISTINCT 249 #define TK_INNER 249
#define TK_WHERE 250 #define TK_SELECT 250
#define TK_PARTITION 251 #define TK_DISTINCT 251
#define TK_BY 252 #define TK_WHERE 252
#define TK_SESSION 253 #define TK_PARTITION 253
#define TK_STATE_WINDOW 254 #define TK_BY 254
#define TK_EVENT_WINDOW 255 #define TK_SESSION 255
#define TK_START 256 #define TK_STATE_WINDOW 256
#define TK_SLIDING 257 #define TK_EVENT_WINDOW 257
#define TK_FILL 258 #define TK_SLIDING 258
#define TK_VALUE 259 #define TK_FILL 259
#define TK_VALUE_F 260 #define TK_VALUE 260
#define TK_NONE 261 #define TK_VALUE_F 261
#define TK_PREV 262 #define TK_NONE 262
#define TK_NULL_F 263 #define TK_PREV 263
#define TK_LINEAR 264 #define TK_NULL_F 264
#define TK_NEXT 265 #define TK_LINEAR 265
#define TK_HAVING 266 #define TK_NEXT 266
#define TK_RANGE 267 #define TK_HAVING 267
#define TK_EVERY 268 #define TK_RANGE 268
#define TK_ORDER 269 #define TK_EVERY 269
#define TK_SLIMIT 270 #define TK_ORDER 270
#define TK_SOFFSET 271 #define TK_SLIMIT 271
#define TK_LIMIT 272 #define TK_SOFFSET 272
#define TK_OFFSET 273 #define TK_LIMIT 273
#define TK_ASC 274 #define TK_OFFSET 274
#define TK_NULLS 275 #define TK_ASC 275
#define TK_ABORT 276 #define TK_NULLS 276
#define TK_AFTER 277 #define TK_ABORT 277
#define TK_ATTACH 278 #define TK_AFTER 278
#define TK_BEFORE 279 #define TK_ATTACH 279
#define TK_BEGIN 280 #define TK_BEFORE 280
#define TK_BITAND 281 #define TK_BEGIN 281
#define TK_BITNOT 282 #define TK_BITAND 282
#define TK_BITOR 283 #define TK_BITNOT 283
#define TK_BLOCKS 284 #define TK_BITOR 284
#define TK_CHANGE 285 #define TK_BLOCKS 285
#define TK_COMMA 286 #define TK_CHANGE 286
#define TK_CONCAT 287 #define TK_COMMA 287
#define TK_CONFLICT 288 #define TK_CONCAT 288
#define TK_COPY 289 #define TK_CONFLICT 289
#define TK_DEFERRED 290 #define TK_COPY 290
#define TK_DELIMITERS 291 #define TK_DEFERRED 291
#define TK_DETACH 292 #define TK_DELIMITERS 292
#define TK_DIVIDE 293 #define TK_DETACH 293
#define TK_DOT 294 #define TK_DIVIDE 294
#define TK_EACH 295 #define TK_DOT 295
#define TK_FAIL 296 #define TK_EACH 296
#define TK_FILE 297 #define TK_FAIL 297
#define TK_FOR 298 #define TK_FILE 298
#define TK_GLOB 299 #define TK_FOR 299
#define TK_ID 300 #define TK_GLOB 300
#define TK_IMMEDIATE 301 #define TK_ID 301
#define TK_IMPORT 302 #define TK_IMMEDIATE 302
#define TK_INITIALLY 303 #define TK_IMPORT 303
#define TK_INSTEAD 304 #define TK_INITIALLY 304
#define TK_ISNULL 305 #define TK_INSTEAD 305
#define TK_KEY 306 #define TK_ISNULL 306
#define TK_MODULES 307 #define TK_KEY 307
#define TK_NK_BITNOT 308 #define TK_MODULES 308
#define TK_NK_SEMI 309 #define TK_NK_BITNOT 309
#define TK_NOTNULL 310 #define TK_NK_SEMI 310
#define TK_OF 311 #define TK_NOTNULL 311
#define TK_PLUS 312 #define TK_OF 312
#define TK_PRIVILEGE 313 #define TK_PLUS 313
#define TK_RAISE 314 #define TK_PRIVILEGE 314
#define TK_REPLACE 315 #define TK_RAISE 315
#define TK_RESTRICT 316 #define TK_REPLACE 316
#define TK_ROW 317 #define TK_RESTRICT 317
#define TK_SEMI 318 #define TK_ROW 318
#define TK_STAR 319 #define TK_SEMI 319
#define TK_STATEMENT 320 #define TK_STAR 320
#define TK_STRICT 321 #define TK_STATEMENT 321
#define TK_STRING 322 #define TK_STRICT 322
#define TK_TIMES 323 #define TK_STRING 323
#define TK_VALUES 324 #define TK_TIMES 324
#define TK_VARIABLE 325 #define TK_VALUES 325
#define TK_VIEW 326 #define TK_VARIABLE 326
#define TK_WAL 327 #define TK_VIEW 327
#define TK_WAL 328
#define TK_NK_SPACE 600 #define TK_NK_SPACE 600
#define TK_NK_COMMENT 601 #define TK_NK_COMMENT 601
......
...@@ -53,10 +53,6 @@ typedef struct { ...@@ -53,10 +53,6 @@ typedef struct {
#define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0])) #define varDataNetLen(v) (htons(((VarDataLenT *)(v))[0]))
#define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v)) #define varDataNetTLen(v) (sizeof(VarDataLenT) + varDataNetLen(v))
// this data type is internally used only in 'in' query to hold the values
#define TSDB_DATA_TYPE_POINTER_ARRAY (1000)
#define TSDB_DATA_TYPE_VALUE_ARRAY (1001)
#define GET_TYPED_DATA(_v, _finalType, _type, _data) \ #define GET_TYPED_DATA(_v, _finalType, _type, _data) \
do { \ do { \
switch (_type) { \ switch (_type) { \
......
...@@ -219,6 +219,7 @@ int32_t qStreamRecoverFinish(qTaskInfo_t tinfo); ...@@ -219,6 +219,7 @@ int32_t qStreamRecoverFinish(qTaskInfo_t tinfo);
int32_t qStreamRestoreParam(qTaskInfo_t tinfo); int32_t qStreamRestoreParam(qTaskInfo_t tinfo);
bool qStreamRecoverScanFinished(qTaskInfo_t tinfo); bool qStreamRecoverScanFinished(qTaskInfo_t tinfo);
void qStreamCloseTsdbReader(void* task); void qStreamCloseTsdbReader(void* task);
void resetTaskInfo(qTaskInfo_t tinfo);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
...@@ -41,14 +41,15 @@ extern "C" { ...@@ -41,14 +41,15 @@ extern "C" {
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE) #define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define SHOW_ALIVE_RESULT_COLS 1 #define SHOW_ALIVE_RESULT_COLS 1
#define PRIVILEGE_TYPE_MASK(n) (1 << n)
#define PRIVILEGE_TYPE_ALL PRIVILEGE_TYPE_MASK(0) #define BIT_FLAG_MASK(n) (1 << n)
#define PRIVILEGE_TYPE_READ PRIVILEGE_TYPE_MASK(1) #define BIT_FLAG_SET_MASK(val, mask) ((val) |= (mask))
#define PRIVILEGE_TYPE_WRITE PRIVILEGE_TYPE_MASK(2) #define BIT_FLAG_TEST_MASK(val, mask) (((val) & (mask)) != 0)
#define PRIVILEGE_TYPE_SUBSCRIBE PRIVILEGE_TYPE_MASK(3)
#define PRIVILEGE_TYPE_TEST_MASK(val, mask) (((val) & (mask)) != 0) #define PRIVILEGE_TYPE_ALL BIT_FLAG_MASK(0)
#define PRIVILEGE_TYPE_READ BIT_FLAG_MASK(1)
#define PRIVILEGE_TYPE_WRITE BIT_FLAG_MASK(2)
#define PRIVILEGE_TYPE_SUBSCRIBE BIT_FLAG_MASK(3)
typedef struct SDatabaseOptions { typedef struct SDatabaseOptions {
ENodeType type; ENodeType type;
...@@ -127,6 +128,8 @@ typedef struct STrimDatabaseStmt { ...@@ -127,6 +128,8 @@ typedef struct STrimDatabaseStmt {
typedef struct SCompactDatabaseStmt { typedef struct SCompactDatabaseStmt {
ENodeType type; ENodeType type;
char dbName[TSDB_DB_NAME_LEN]; char dbName[TSDB_DB_NAME_LEN];
SNode* pStart;
SNode* pEnd;
} SCompactDatabaseStmt; } SCompactDatabaseStmt;
typedef struct STableOptions { typedef struct STableOptions {
...@@ -265,6 +268,7 @@ typedef struct SShowStmt { ...@@ -265,6 +268,7 @@ typedef struct SShowStmt {
typedef struct SShowCreateDatabaseStmt { typedef struct SShowCreateDatabaseStmt {
ENodeType type; ENodeType type;
char dbName[TSDB_DB_NAME_LEN]; char dbName[TSDB_DB_NAME_LEN];
char dbFName[TSDB_DB_FNAME_LEN];
void* pCfg; // SDbCfgInfo void* pCfg; // SDbCfgInfo
} SShowCreateDatabaseStmt; } SShowCreateDatabaseStmt;
...@@ -392,6 +396,15 @@ typedef struct SKillQueryStmt { ...@@ -392,6 +396,15 @@ typedef struct SKillQueryStmt {
char queryId[TSDB_QUERY_ID_LEN]; char queryId[TSDB_QUERY_ID_LEN];
} SKillQueryStmt; } SKillQueryStmt;
typedef enum EStreamOptionsSetFlag {
SOPT_TRIGGER_TYPE_SET = BIT_FLAG_MASK(0),
SOPT_WATERMARK_SET = BIT_FLAG_MASK(1),
SOPT_DELETE_MARK_SET = BIT_FLAG_MASK(2),
SOPT_FILL_HISTORY_SET = BIT_FLAG_MASK(3),
SOPT_IGNORE_EXPIRED_SET = BIT_FLAG_MASK(4),
SOPT_IGNORE_UPDATE_SET = BIT_FLAG_MASK(5),
} EStreamOptionsSetFlag;
typedef struct SStreamOptions { typedef struct SStreamOptions {
ENodeType type; ENodeType type;
int8_t triggerType; int8_t triggerType;
...@@ -401,6 +414,7 @@ typedef struct SStreamOptions { ...@@ -401,6 +414,7 @@ typedef struct SStreamOptions {
int8_t fillHistory; int8_t fillHistory;
int8_t ignoreExpired; int8_t ignoreExpired;
int8_t ignoreUpdate; int8_t ignoreUpdate;
int64_t setFlag;
} SStreamOptions; } SStreamOptions;
typedef struct SCreateStreamStmt { typedef struct SCreateStreamStmt {
...@@ -430,6 +444,7 @@ typedef struct SCreateFunctionStmt { ...@@ -430,6 +444,7 @@ typedef struct SCreateFunctionStmt {
char libraryPath[PATH_MAX]; char libraryPath[PATH_MAX];
SDataType outputDt; SDataType outputDt;
int32_t bufSize; int32_t bufSize;
int8_t language;
} SCreateFunctionStmt; } SCreateFunctionStmt;
typedef struct SDropFunctionStmt { typedef struct SDropFunctionStmt {
......
...@@ -35,7 +35,6 @@ extern "C" { ...@@ -35,7 +35,6 @@ extern "C" {
#define SYNC_MAX_RECV_TIME_RANGE_MS 1200 #define SYNC_MAX_RECV_TIME_RANGE_MS 1200
#define SYNC_DEL_WAL_MS (1000 * 60) #define SYNC_DEL_WAL_MS (1000 * 60)
#define SYNC_ADD_QUORUM_COUNT 3 #define SYNC_ADD_QUORUM_COUNT 3
#define SYNC_MNODE_LOG_RETENTION 10000
#define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1) #define SYNC_VNODE_LOG_RETENTION (TSDB_SYNC_LOG_BUFFER_RETENTION + 1)
#define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10 #define SNAPSHOT_MAX_CLOCK_SKEW_MS 1000 * 10
#define SNAPSHOT_WAIT_MS 1000 * 30 #define SNAPSHOT_WAIT_MS 1000 * 30
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define _TD_UTIL_HTTP_H_ #define _TD_UTIL_HTTP_H_
#include "os.h" #include "os.h"
#include "tref.h"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -24,7 +25,8 @@ extern "C" { ...@@ -24,7 +25,8 @@ extern "C" {
typedef enum { HTTP_GZIP, HTTP_FLAT } EHttpCompFlag; typedef enum { HTTP_GZIP, HTTP_FLAT } EHttpCompFlag;
int32_t taosSendHttpReport(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen, EHttpCompFlag flag); int32_t taosSendHttpReport(const char* server, const char* uri, uint16_t port, char* pCont, int32_t contLen,
EHttpCompFlag flag);
#ifdef __cplusplus #ifdef __cplusplus
} }
......
...@@ -114,7 +114,7 @@ typedef struct SRpcInit { ...@@ -114,7 +114,7 @@ typedef struct SRpcInit {
int32_t connLimitNum; int32_t connLimitNum;
int32_t connLimitLock; int32_t connLimitLock;
int32_t timeToGetConn;
int8_t supportBatch; // 0: no batch, 1. batch int8_t supportBatch; // 0: no batch, 1. batch
int32_t batchSize; int32_t batchSize;
void *parent; void *parent;
......
...@@ -29,9 +29,11 @@ extern "C" { ...@@ -29,9 +29,11 @@ extern "C" {
#define calloc CALLOC_FUNC_TAOS_FORBID #define calloc CALLOC_FUNC_TAOS_FORBID
#define realloc REALLOC_FUNC_TAOS_FORBID #define realloc REALLOC_FUNC_TAOS_FORBID
#define free FREE_FUNC_TAOS_FORBID #define free FREE_FUNC_TAOS_FORBID
#ifdef strdup
#undef strdup
#define strdup STRDUP_FUNC_TAOS_FORBID #define strdup STRDUP_FUNC_TAOS_FORBID
#endif
#endif // ifndef ALLOW_FORBID_FUNC #endif // ifndef ALLOW_FORBID_FUNC
#endif // if !defined(WINDOWS) #endif // if !defined(WINDOWS)
int32_t taosMemoryDbgInit(); int32_t taosMemoryDbgInit();
......
...@@ -116,6 +116,7 @@ int32_t* taosGetErrno(); ...@@ -116,6 +116,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x012B) #define TSDB_CODE_NO_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x012B)
#define TSDB_CODE_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x012C) #define TSDB_CODE_TIMEOUT_ERROR TAOS_DEF_ERROR_CODE(0, 0x012C)
#define TSDB_CODE_MSG_ENCODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x012D) #define TSDB_CODE_MSG_ENCODE_ERROR TAOS_DEF_ERROR_CODE(0, 0x012D)
#define TSDB_CODE_NO_ENOUGH_DISKSPACE TAOS_DEF_ERROR_CODE(0, 0x012E)
#define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130) // #define TSDB_CODE_APP_IS_STARTING TAOS_DEF_ERROR_CODE(0, 0x0130) //
#define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131) // #define TSDB_CODE_APP_IS_STOPPING TAOS_DEF_ERROR_CODE(0, 0x0131) //
......
...@@ -206,7 +206,7 @@ typedef enum ELogicConditionType { ...@@ -206,7 +206,7 @@ typedef enum ELogicConditionType {
#define TSDB_FUNC_TYPE_SCALAR 1 #define TSDB_FUNC_TYPE_SCALAR 1
#define TSDB_FUNC_TYPE_AGGREGATE 2 #define TSDB_FUNC_TYPE_AGGREGATE 2
#define TSDB_FUNC_SCRIPT_BIN_LIB 0 #define TSDB_FUNC_SCRIPT_BIN_LIB 0
#define TSDB_FUNC_SCRIPT_LUA 1 #define TSDB_FUNC_SCRIPT_PYTHON 1
#define TSDB_FUNC_MAX_RETRIEVE 1024 #define TSDB_FUNC_MAX_RETRIEVE 1024
#define TSDB_INDEX_NAME_LEN 65 // 64 + 1 '\0' #define TSDB_INDEX_NAME_LEN 65 // 64 + 1 '\0'
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册