提交 b824256e 编写于 作者: B Benguang Zhao

Merge branch '2.6' into FIX/TS-1438

......@@ -7,7 +7,8 @@ def sync_source() {
sh '''
hostname
date
'''
env
'''
sh '''
cd ${WKC}
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
......@@ -57,6 +58,7 @@ def sync_source() {
[ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md"
git pull >/dev/null
git clean -dfx
git log -5
'''
script {
if (env.CHANGE_TARGET == 'master') {
......@@ -90,6 +92,7 @@ def sync_source() {
cd ${WK}
git pull >/dev/null
git clean -dfx
git log -5
'''
script {
if (env.CHANGE_URL =~ /\/TDengine\//) {
......@@ -98,16 +101,13 @@ def sync_source() {
cd ${WKC}
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
if [ ! -d src/connector/python/.github ]; then
rm -rf src/connector/python/* || :
rm -rf src/connector/python/.* || :
git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector"
else
cd src/connector/python || echo "src/connector/python not exist"
git pull || :
cd ${WKC}
fi
git log -5
'''
sh '''
cd ${WKC}
rm -rf src/connector/python
mkdir -p src/connector/python
git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector"
'''
} else if (env.CHANGE_URL =~ /\/TDinternal\//) {
sh '''
......@@ -115,16 +115,13 @@ def sync_source() {
cd ${WK}
git fetch origin +refs/pull/${CHANGE_ID}/merge
git checkout -qf FETCH_HEAD
if [ ! -d community/src/connector/python/.github ]; then
rm -rf community/src/connector/python/* || :
rm -rf community/src/connector/python/.* || :
git clone --depth 1 https://github.com/taosdata/taos-connector-python community/src/connector/python || echo "failed to clone python connector"
else
cd community/src/connector/python || echo "community/src/connector/python not exist"
git pull || :
cd ${WK}
fi
git log -5
'''
sh '''
cd ${WKC}
rm -rf src/connector/python
mkdir -p src/connector/python
git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector"
'''
} else {
sh '''
......@@ -136,16 +133,6 @@ def sync_source() {
cd ${WKC}
git submodule update --init --recursive
'''
sh '''
cd ${WKC}
git branch
git log -5
'''
sh '''
cd ${WK}
git branch
git log -5
'''
}
def pre_test() {
sync_source()
......@@ -157,6 +144,7 @@ def pre_test() {
go env -w GO111MODULE=on
cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null
make -j8 >/dev/null
make install
'''
return 1
}
......@@ -174,7 +162,7 @@ def pre_test_mac() {
return 1
}
pipeline {
agent {label " dispatcher "}
agent none
options { skipDefaultCheckout() }
environment{
WK = '/var/data/jenkins/workspace/TDinternal'
......@@ -182,69 +170,6 @@ pipeline {
LOGDIR = '/var/data/jenkins/workspace/log'
}
stages {
stage ('pre_build') {
steps {
sh '''
date
pwd
env
hostname
'''
}
}
stage ('Parallel build stage') {
//only build pr
options { skipDefaultCheckout() }
when {
allOf {
changeRequest()
not { expression { env.CHANGE_BRANCH =~ /docs\// }}
}
}
parallel {
stage ('dispatcher sync source') {
steps {
timeout(time: 20, unit: 'MINUTES') {
sync_source()
script {
sh '''
echo "dispatcher ready"
date
'''
}
}
}
}
stage ('build worker01') {
agent {label " worker01 "}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test()
script {
sh '''
echo "worker01 build done"
date
'''
}
}
}
}
stage ('build worker02') {
agent {label " worker02 "}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test()
script {
sh '''
echo "worker02 build done"
date
'''
}
}
}
}
}
}
stage('run test') {
options { skipDefaultCheckout() }
when {
......@@ -254,28 +179,28 @@ pipeline {
}
}
parallel {
stage ('build worker07_arm64') {
agent {label " worker07_arm64 "}
stage ('build arm64') {
agent {label " worker07_arm64 || worker09_arm64 "}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test()
script {
sh '''
echo "worker07_arm64 build done"
echo "arm64 build done"
date
'''
}
}
}
}
stage ('build Mac_catalina ') {
stage ('build Mac') {
agent {label " Mac_catalina "}
steps {
timeout(time: 20, unit: 'MINUTES') {
pre_test_mac()
script {
sh '''
echo "Mac_catalina build done"
echo "Mac build done"
date
'''
}
......@@ -283,17 +208,28 @@ pipeline {
}
}
stage('run cases') {
agent {label " worker01 || worker02 "}
steps {
sh '''
date
pwd
hostname
'''
timeout(time: 15, unit: 'MINUTES') {
pre_test()
script {
sh '''
echo "Linux build done"
date
'''
}
}
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
timeout(time: 20, unit: 'MINUTES') {
timeout(time: 25, unit: 'MINUTES') {
sh '''
date
cd ${WKC}/tests/parallel_test
time ./run.sh -m m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME}
time ./run.sh -m /home/m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME}
date
hostname
'''
......
......@@ -54,14 +54,14 @@ Database changed.
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
==========================================================================================
14 | 38000 | ready | 1 | 1 | master | 0 |
15 | 38000 | ready | 1 | 1 | master | 0 |
16 | 38000 | ready | 1 | 1 | master | 0 |
17 | 38000 | ready | 1 | 1 | master | 0 |
18 | 37001 | ready | 1 | 1 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 |
14 | 38000 | ready | 1 | 1 | leader | 0 |
15 | 38000 | ready | 1 | 1 | leader | 0 |
16 | 38000 | ready | 1 | 1 | leader | 0 |
17 | 38000 | ready | 1 | 1 | leader | 0 |
18 | 37001 | ready | 1 | 1 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 |
Query OK, 8 row(s) in set (0.001154s)
```
......@@ -161,14 +161,14 @@ First `show vgroups` is executed to show the vgroup distribution.
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | compacting |
==========================================================================================
14 | 38000 | ready | 1 | 3 | master | 0 |
15 | 38000 | ready | 1 | 3 | master | 0 |
16 | 38000 | ready | 1 | 3 | master | 0 |
17 | 38000 | ready | 1 | 3 | master | 0 |
18 | 37001 | ready | 1 | 3 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 |
14 | 38000 | ready | 1 | 3 | leader | 0 |
15 | 38000 | ready | 1 | 3 | leader | 0 |
16 | 38000 | ready | 1 | 3 | leader | 0 |
17 | 38000 | ready | 1 | 3 | leader | 0 |
18 | 37001 | ready | 1 | 3 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 |
Query OK, 8 row(s) in set (0.001314s)
```
......@@ -191,14 +191,14 @@ Query OK, 0 row(s) in set (0.000575s)
taos> show vgroups;
vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting |
=================================================================================================================
14 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
15 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
16 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
17 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 |
18 | 37001 | ready | 2 | 1 | slave | 3 | master | 0 |
19 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
20 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
21 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 |
14 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
15 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
16 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
17 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 |
18 | 37001 | ready | 2 | 1 | follower | 3 | leader | 0 |
19 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
20 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
21 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 |
Query OK, 8 row(s) in set (0.001242s)
```
......@@ -207,7 +207,7 @@ It can be seen from above output that vgId 18 has been moved from dnode 3 to dno
:::note
- Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0.
- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
- Only a vnode in normal state, i.e. leader or follower, can be moved. vnode can't be moved when its in status offline, unsynced or syncing.
- Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk.
:::
......@@ -27,7 +27,7 @@ There may be multiple dnodes in a cluster, but only one mnode can be started in
SHOW MNODES;
```
The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
The end point and role/status (leader, follower, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched.
For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher.
......@@ -58,13 +58,13 @@ When a dnode is offline, it can be detected by the TDengine cluster. There are t
- If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster.
:::note
If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the leader node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service.
:::
## Arbitrator
The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a leader node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc.
To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally.
......
......@@ -21,7 +21,7 @@ The following example is in an Ubuntu environment and uses the `curl` tool to ve
The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number.
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
```
The following return value results indicate that the verification passed.
......@@ -106,13 +106,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL
Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax.
```bash
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
Or
```bash
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`.
......@@ -192,7 +192,7 @@ Response body:
- query all records from table d1001 of database demo
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
```
Response body:
......@@ -218,7 +218,7 @@ Response body:
- Create database demo:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
```
Response body:
......@@ -240,7 +240,7 @@ Response body:
When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt
```
Response body:
......@@ -268,7 +268,7 @@ Response body:
When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc
```
Response body:
......
......@@ -250,7 +250,7 @@ The [Taos] structure is the connection manager in [libtaos] and provides two mai
Column information is stored using [ColumnMeta].
``rust
```rust
let cols = &q.column_meta;
for col in cols {
println!("name: {}, type: {:?} , bytes: {}", col.name, col.type_, col.bytes);
......
......@@ -7,7 +7,7 @@ description: "taosBenchmark (once called taosdemo ) is a tool for testing the pe
## Introduction
taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility and for the convenience of past users.
taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can be configured to generate user defined databases, supertables, subtables, and the time series data to populate these for performance benchmarking. taosBenchmark is highly configurable and some of the configurations include the time interval for inserting data, the number of working threads and the capability to insert disordered data. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users.
## Installation
......@@ -21,9 +21,9 @@ There are two ways to install taosBenchmark:
### Configuration and running methods
TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [JSON configuration file](#configuration-file-parameters-in-detail). These two methods are mutually exclusive. Users can use `-f <json file>` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters.
taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file.
taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file.
**Make sure that the TDengine cluster is running correctly before running taosBenchmark. **
......@@ -57,9 +57,8 @@ Use the following command-line to run taosBenchmark and control its behavior via
taosBenchmark -f <json file>
```
**Here are a few examples of configuration files:**
#### Example of inserting a scenario JSON configuration file
#### Configuration file examples
##### Example of inserting a scenario JSON configuration file
<details>
<summary>insert.json</summary>
......@@ -70,7 +69,7 @@ taosBenchmark -f <json file>
</details>
#### Query Scenario JSON Profile Example
##### Query Scenario JSON Profile Example
<details>
<summary>query.json</summary>
......@@ -81,7 +80,7 @@ taosBenchmark -f <json file>
</details>
#### Subscription JSON configuration example
##### Subscription JSON configuration example
<details>
<summary>subscribe.json</summary>
......@@ -172,7 +171,11 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
Switch parameter specifying whether to use escape characters in the super table and sub-table names. By default is not used.
- **-C/--chinese** :
<<<<<<< HEAD
Switch specifying whether to use Unicode Chinese characters in nchar and binary. By default is not used.
=======
specify whether to use Unicode Chinese characters in nchar and binary, the default is no.
>>>>>>> 108548b4d6 (docs: typo)
- **-N/--normal-table** :
This parameter indicates that taosBenchmark will create only normal tables instead of super tables. The default value is false. It can be used if the insert mode is taosc, stmt, and rest.
......@@ -373,7 +376,7 @@ The configuration parameters for querying the sub-tables or the normal tables ar
- **sqls**.
- **sql**: the SQL command to be executed.
- **result**: the file to save the query result. If it is unspecified, taosBenchark will not save the result.
- **result**: the file to save the query result. If it is unspecified, taosBenchmark will not save the result.
#### Configuration parameters of query super table
......
......@@ -36,6 +36,8 @@ There are two ways to install taosdump:
:::tip
- taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema.
- Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value.
- The export of taosdump does not support resuming from an interruption. Therefore, if the taosdump process terminates unexpectedly, delete all related files that have been exported or generated.
- The import of taosdump supports resuming from an interruption, but when the process resumes, you will receive some "table already exists" messages, which could be ignored.
:::
......
......@@ -211,7 +211,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode",
"title": "Leader MNode",
"transformations": [
{
"id": "filterByValue",
......@@ -221,7 +221,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......@@ -300,7 +300,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode Create Time",
"title": "Leader MNode Create Time",
"transformations": [
{
"id": "filterByValue",
......@@ -310,7 +310,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......
......@@ -153,7 +153,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode",
"title": "Leader MNode",
"transformations": [
{
"id": "filterByValue",
......@@ -163,7 +163,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......@@ -246,7 +246,7 @@
],
"timeFrom": null,
"timeShift": null,
"title": "Master MNode Create Time",
"title": "Leader MNode Create Time",
"transformations": [
{
"id": "filterByValue",
......@@ -256,7 +256,7 @@
"config": {
"id": "regex",
"options": {
"value": "master"
"value": "leader"
}
},
"fieldName": "role"
......
......@@ -274,8 +274,8 @@ Details of the metrics are as follows.
This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom).
- **First EP**: the `firstEp` setting in the current TDengine cluster.
- **Version**: TDengine server version (master mnode).
- **Master Uptime**: The time elapsed since the current Master MNode was elected as Master.
- **Version**: TDengine server version (leader mnode).
- **Leader Uptime**: The time elapsed since the current Leader MNode was elected as Leader.
- **Expire Time** - Enterprise version expiration time.
- **Used Measuring Points** - The number of measuring points used by the Enterprise Edition.
- **Databases** - The number of databases.
......@@ -333,7 +333,7 @@ Data node resource usage display with repeated multiple rows for the variable `$
2. **Has MNodes?**: whether the current dnode is a mnode.
3. **CPU Cores**: the number of CPU cores.
4. **VNodes Number**: the number of VNodes in the current dnode.
5. **VNodes Masters**: the number of vnodes in the master role.
5. **VNodes Masters**: the number of vnodes in the leader role.
6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes.
7. **Current Memory Usage of taosd**: memory usage of taosd processes.
8. **Disk Used**: The total disk usage percentage of the taosd data directory.
......
......@@ -26,7 +26,6 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d
- _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos`
- _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares
- _tarbitrator_: provides arbitration for two-node cluster deployments
- _run_taosd_and_taosadapter.sh_: script to start both taosd and taosAdapter
- _TDinsight.sh_: script to download TDinsight and install it
- _set_core.sh_: script for setting up the system to generate core dump files for easy debugging
- _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution.
......
......@@ -31,38 +31,41 @@ TDengine currently supports Grafana versions 7.5 and above. Users can go to the
### Install Grafana Plugin and Configure Data Source
<Tabs defaultValue="script">
<TabItem value="script" label="Using Script">
<TabItem value="gui" label="With GUI">
Set the url and authorization environment variables by `export` or a [`.env`(dotenv) file](https://hexdocs.pm/dotenvy/dotenv-file-format.html):
Under Grafana 8, plugin catalog allows you to [browse and manage plugins within Grafana](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog) (but for Grafana 7.x, use **With Script** or **Install & Configure Manually**). Find the page at **Configurations > Plugins**, search **TDengine** and click it to install.
```sh
export TDENGINE_API=http://tdengine.local:6041
# user + password
export TDENGINE_USER=user
export TDENGINE_PASSWORD=password
# Other useful variables
# - If to install TDengine data source, default is true
export TDENGINE_DS_ENABLED=false
# - Data source name to be created, default is TDengine
export TDENGINE_DS_NAME=TDengine
# - Data source organization id, default is 1
export GF_ORG_ID=1
# - Data source is editable in admin ui or not, default is 0 (false)
export TDENGINE_EDITABLE=1
```
![Search tdengine in grafana plugins](./grafana/grafana-plugin-search-tdengine.png)
Installation may cost some minutes, then you can **Create a TDengine data source**:
![Install and configure Grafana data source](./grafana/grafana-install-and-config.png)
Then you can add a TDengine data source by filling up the configuration options.
![TDengine Database Grafana plugin add data source](./grafana/grafana-data-source.png)
You can create dashboards with TDengine now.
</TabItem>
<TabItem value="script" label="With Script">
Run `install.sh`:
On a server with Grafana installed, run `install.sh` with TDengine url and username/passwords will install TDengine data source plugin and add a data source named TDengine. This is the recommended way for Grafana 7.x or [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) users.
```sh
bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)"
bash -c "$(curl -fsSL \
https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
-a http://localhost:6041 \
-u root \
-p taosdata
```
With this script, TDengine data source plugin and the Grafana data source will be installed and created automatically with Grafana provisioning configurations. Save the script and type `./install.sh --help` for the full usage of the script.
Restart Grafana service and open Grafana in web-browser, usually <http://localhost:3000>.
And then, restart Grafana service and open Grafana in web-browser, usually <http://localhost:3000>.
Save the script and type `./install.sh --help` for the full usage of the script.
</TabItem>
<TabItem value="manual" label="Install & Configure Manually">
Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation.
......@@ -115,6 +118,73 @@ Click `Save & Test` to test. You should see a success message if the test worked
![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp)
</TabItem>
<TabItem value="container" label="Container">
Please refer to [Install plugins in the Docker container](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). This will install `tdengine-datasource` plugin when Grafana container starts:
```bash
docker run -d \
-p 3000:3000 \
--name=grafana \
-e "GF_INSTALL_PLUGINS=tdengine-datasource" \
grafana/grafana
```
You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file:
1. Save the provisioning configuration file to `tdengine.yml`.
```yml
apiVersion: 1
datasources:
- name: TDengine
type: tdengine-datasource
orgId: 1
url: "$TDENGINE_API"
isDefault: true
secureJsonData:
url: "$TDENGINE_URL"
basicAuth: "$TDENGINE_BASIC_AUTH"
token: "$TDENGINE_CLOUD_TOKEN"
version: 1
editable: true
```
2. Write `docker-compose.yml` with [TDengine](https://hub.docker.com/r/tdengine/tdengine) and [Grafana](https://hub.docker.com/r/grafana/grafana) image.
```yml
version: "3.7"
services:
tdengine:
image: tdengine/tdengine:2.6.0.2
environment:
TAOS_FQDN: tdengine
volumes:
- tdengine-data:/var/lib/taos/
grafana:
image: grafana/grafana:8.5.6
volumes:
- ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
- grafana-data:/var/lib/grafana
environment:
# install tdengine plugin at start
GF_INSTALL_PLUGINS: "tdengine-datasource"
TDENGINE_URL: "http://tdengine:6041"
#printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64
TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ=="
ports:
- 3000:3000
volumes:
grafana-data:
tdengine-data:
```
3. Start TDengine and Grafana services: `docker-compose up -d`.
Open Grafana <http://localhost:3000>, and you can add dashboard with TDengine now.
</TabItem>
</Tabs>
......
......@@ -22,9 +22,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc
**Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused.
**TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster.
......@@ -62,13 +62,13 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei
1. Application initiates a request to insert data through JDBC, ODBC, or other APIs.
2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode.
3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode.
4. TAOSC initiates an insert request to master vnode.
4. TAOSC initiates an insert request to leader vnode.
5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup.
6. TAOSC notifies APP that writing is successful.
For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode.
For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node.
For Step 4 and 5, without caching, TAOSC can't recognize the leader in the virtual node group, so assumes that the first vnode is the leader and sends a request to it. If this vnode is not the leader, it will reply to the actual leader as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of leader node.
The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications.
......@@ -119,65 +119,65 @@ The load balancing process does not require any manual intervention, and it is t
## Data Writing and Replication Process
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect.
If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect.
### Master vnode Writing Process
### Leader vnode Writing Process
Master Vnode uses a writing process as follows:
Leader Vnode uses a writing process as follows:
![TDengine Database Master Writing Process](write_master.webp)
<center> Figure 3: TDengine Master writing process </center>
![TDengine Database Leader Writing Process](write_master.webp)
<center> Figure 3: TDengine Leader writing process </center>
1. Master vnode receives the application data insertion request, verifies, and moves to next step;
1. Leader vnode receives the application data insertion request, verifies, and moves to next step;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data;
3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data;
4. Write into memory and add the record to “skip list”;
5. Master vnode returns a confirmation message to the application, indicating a successful write.
5. Leader vnode returns a confirmation message to the application, indicating a successful write.
6. If any of Step 2, 3 or 4 fails, the error will directly return to the application.
### Slave vnode Writing Process
### Follower vnode Writing Process
For a slave vnode, the write process as follows:
For a follower vnode, the write process as follows:
![TDengine Database Slave Writing Process](write_slave.webp)
<center> Figure 4: TDengine Slave Writing Process </center>
![TDengine Database Follower Writing Process](write_slave.webp)
<center> Figure 4: TDengine Follower Writing Process </center>
1. Slave vnode receives a data insertion request forwarded by Master vnode;
1. Follower vnode receives a data insertion request forwarded by Leader vnode;
2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file;
3. Write into memory and add the record to “skip list”.
Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same.
### Remote Disaster Recovery and IDC (Internet Data Center) Migration
As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
As discussed above, TDengine writes using Leader and Follower processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools.
On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, leader and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed.
However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows:
1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Slave vnode will become the new master, thus losing one record.
1. Leader vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down;
2. Follower vnode receives the write request, then processing fails before writing to the log in Step 2;
3. Follower vnode will become the new leader, thus losing one record.
In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above.
Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet**
### Master/slave Selection
### Leader/follower Selection
Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one.
When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows:
When a vnode starts, the roles (leader, follower) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a leader-selection process. The rules are as follows:
1. If there’s only one replica, it’s always master
2. When all replicas are online, the one with latest version is master
3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master.
1. If there’s only one replica, it’s always leader
2. When all replicas are online, the one with latest version is leader
3. Over half of online nodes are virtual nodes, and some virtual node is follower, it will automatically become leader
4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as leader.
### Synchronous Replication
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application.
For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Leader forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in follower. If “quorum-1” reply confirms are not received within a certain period of time, the leader vnode will return an error to the application.
With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication.
......
......@@ -109,7 +109,7 @@ taos>
It's also able to access the REST interface provided by TDengine in container from the host.
```
curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
```
Output is like below:
......@@ -147,7 +147,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604
- Verify the REST interface:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql
```
Below is an example output:
......
......@@ -21,7 +21,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安
下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号:
```html
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql
```
返回值结果如下表示验证通过:
......@@ -106,13 +106,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据
使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下:
```bash
curl -H 'Authorization: Basic <TOKEN>' -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -H "Authorization: Basic <TOKEN>" -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
或者
```bash
curl -u username:password -d '<SQL>' <ip>:<PORT>/rest/sql/[db_name]
curl -L -u username:password -d "<SQL>" <ip>:<PORT>/rest/sql/[db_name]
```
其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==`
......@@ -192,7 +192,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
- 在 demo 库里查询表 d1001 的所有记录:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql
```
返回值:
......@@ -218,7 +218,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
- 创建库 demo:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql
```
返回值:
......@@ -240,7 +240,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata
HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt
```
返回结果:
......@@ -268,7 +268,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001
HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc
```
返回值:
......
......@@ -39,6 +39,8 @@ taosdump 有两种安装方式:
:::tip
- taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。
- taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。
- taosdump 的导出不支持中断恢复,所以当进程意外终止后,正确的处理方式是删除当前已导出或生成的所有相关文件。
- taosdump 的导入支持中断恢复,但是当进程重新启动时,会收到一些“表已经存在”的提示,可以忽视。
:::
......
......@@ -26,7 +26,6 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
- _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos
- _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件
- _tarbitrator_: 提供双节点集群部署的仲裁功能
- _run_taosd_and_taosadapter.sh_:同时启动 taosd 和 taosAdapter 的脚本
- _TDinsight.sh_:用于下载 TDinsight 并安装的脚本
- _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本
- _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。
......
......@@ -29,39 +29,41 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/
### 安装 Grafana Plugin 并配置数据源
<Tabs defaultValue="script">
<TabItem value="script" label="使用安装脚本">
<TabItem value="gui" label="图形化界面安装">
将集群信息设置为环境变量;也可以使用 `.env` 文件,请参考 [dotenv](https://hexdocs.pm/dotenvy/dotenv-file-format.html):
使用 Grafana 最新版本(8.5+),您可以在 Grafana 中[浏览和管理插件](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog)(对于 7.x 版本,请使用 **安装脚本** 或 **手动安装并配置** 方式)。在 Grafana 管理界面中的 **Configurations > Plugins** 页面直接搜索并按照提示安装 TDengine。
```sh
export TDENGINE_API=http://tdengine.local:6041
# user + password
export TDENGINE_USER=user
export TDENGINE_PASSWORD=password
# 其他环境变量:
# - 是否安装数据源,默认为 true,表示安装
export TDENGINE_DS_ENABLED=false
# - 数据源名称,默认为 TDengine
export TDENGINE_DS_NAME=TDengine
# - 数据源所属组织 ID,默认为 1
export GF_ORG_ID=1
# - 数据源是否可通过管理面板编辑,默认为 0,表示不可编辑
export TDENGINE_EDITABLE=1
```
![Search tdengine in grafana plugins](grafana-plugin-search-tdengine.png)
如图示即安装完毕,按照指示 **Create a TDengine data source** 添加数据源。
![Install and configure Grafana data source](grafana-install-and-config.png)
输入 TDengine 相关配置,完成数据源配置。
![TDengine Database Grafana plugin add data source](./grafana-data-source.png)
配置完毕,现在可以使用 TDengine 创建 Dashboard 了。
</TabItem>
<TabItem value="script" label="使用安装脚本">
运行安装脚本:
对于使用 Grafana 7.x 版本或使用 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置的用户,可以在 Grafana 服务器上使用安装脚本自动安装插件即添加数据源 Provisioning 配置文件。
```sh
bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)"
bash -c "$(curl -fsSL \
https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \
-a http://localhost:6041 \
-u root \
-p taosdata
```
该脚本将自动安装 Grafana 插件并配置数据源。安装完毕后,需要重启 Grafana 服务后生效。
安装完毕后,需要重启 Grafana 服务后方可生效。
保存该脚本并执行 `./install.sh --help` 可查看详细帮助文档。
</TabItem>
<TabItem value="manual" label="手动安装并配置">
<TabItem value="manual" label="手动安装">
使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。
......@@ -113,6 +115,73 @@ GF_INSTALL_PLUGINS=tdengine-datasource
![TDengine Database Grafana plugin add data source](./add_datasource4.webp)
</TabItem>
<TabItem value="container" label="K8s/Docker 容器">
参考 [Grafana 容器化安装说明](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container)。使用如下命令启动一个容器,并自动安装 TDengine 插件:
```bash
docker run -d \
-p 3000:3000 \
--name=grafana \
-e "GF_INSTALL_PLUGINS=tdengine-datasource" \
grafana/grafana
```
使用 docker-compose,配置 Grafana Provisioning 自动化配置,体验 TDengine + Grafana 组合的零配置启动:
1. 保存该文件为 `tdengine.yml`。
```yml
apiVersion: 1
datasources:
- name: TDengine
type: tdengine-datasource
orgId: 1
url: "$TDENGINE_API"
isDefault: true
secureJsonData:
url: "$TDENGINE_URL"
basicAuth: "$TDENGINE_BASIC_AUTH"
token: "$TDENGINE_CLOUD_TOKEN"
version: 1
editable: true
```
2. 保存该文件为 `docker-compose.yml`。
```yml
version: "3.7"
services:
tdengine:
image: tdengine/tdengine:2.6.0.2
environment:
TAOS_FQDN: tdengine
volumes:
- tdengine-data:/var/lib/taos/
grafana:
image: grafana/grafana:8.5.6
volumes:
- ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml
- grafana-data:/var/lib/grafana
environment:
# install tdengine plugin at start
GF_INSTALL_PLUGINS: "tdengine-datasource"
TDENGINE_URL: "http://tdengine:6041"
#printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64
TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ=="
ports:
- 3000:3000
volumes:
grafana-data:
tdengine-data:
```
3. 使用 docker-compose 命令启动 TDengine + Grafana :`docker-compose up -d`。
打开 Grafana <http://localhost:3000>,现在可以添加 Dashboard 了。
</TabItem>
</Tabs>
......
......@@ -108,7 +108,7 @@ taos>
也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。
```
curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql
curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
```
输出示例如下:
......@@ -148,7 +148,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604
使用 curl 命令验证 RESTful 接口可以正常工作:
```bash
curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql
curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql
```
输出示例如下:
......
......@@ -105,6 +105,9 @@ elif echo $osinfo | grep -qwi "debian"; then
elif echo $osinfo | grep -qwi "Kylin"; then
# echo "This is Kylin system"
os_type=1
elif echo $osinfo | grep -qwi "Red"; then
# echo "This is Red Hat system"
os_type=1
elif echo $osinfo | grep -qwi "centos"; then
# echo "This is centos system"
os_type=2
......@@ -196,7 +199,6 @@ function install_bin() {
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/*
......@@ -211,7 +213,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
[ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
[ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || :
if [ "$verMode" == "cluster" ]; then
......
......@@ -178,7 +178,6 @@ function install_bin() {
if [ "$osType" != "Darwin" ]; then
${csudo}rm -f ${bin_link_dir}/perfMonitor || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}cp -r ${binary_dir}/build/bin/${clientName} ${install_main_dir}/bin || :
......@@ -192,7 +191,6 @@ function install_bin() {
${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/run_${serverName}_and_${adapterName}.sh ${install_main_dir}/bin
${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin
${csudo}chmod 0555 ${install_main_dir}/bin/*
......@@ -204,7 +202,6 @@ function install_bin() {
[ -x ${install_main_dir}/bin/${demoName} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName} ${bin_link_dir}/${demoName} || :
[ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || :
[ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || :
[ -x ${install_main_dir}/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
[ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || :
else
......
......@@ -91,7 +91,6 @@ else
${build_dir}/bin/tarbitrator\
${script_dir}/remove.sh \
${script_dir}/set_core.sh \
${script_dir}/run_taosd_and_taosadapter.sh \
${script_dir}/startPre.sh \
${script_dir}/taosd-dump-cfg.gdb"
fi
......@@ -158,7 +157,6 @@ if [ $adapterName != "taosadapter" ]; then
sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service
# !!! do not change taosadaptor here
mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName}
mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh
mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb
fi
......
......@@ -81,7 +81,6 @@ function clean_bin() {
${csudo}rm -f ${bin_link_dir}/${uninstallScript} || :
${csudo}rm -f ${bin_link_dir}/tarbitrator || :
${csudo}rm -f ${bin_link_dir}/set_core || :
${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || :
${csudo}rm -f ${bin_link_dir}/TDinsight.sh || :
}
......
#!/bin/bash
[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter &
taosd
......@@ -4240,7 +4240,8 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) {
tscAddIntoSqlList(pSql);
}
if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly
// upstream may be freed before retry
if (pQueryInfo->pUpstream && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly
code = doInitSubState(pSql, (int32_t) taosArrayGetSize(pQueryInfo->pUpstream));
if (code != TSDB_CODE_SUCCESS) {
goto _error;
......
......@@ -24,7 +24,6 @@
#define OPT_ABORT 1 /* �Cabort */
int indicator = 1;
int p_port = 6041;
struct termios oldtio;
extern int wcwidth(wchar_t c);
......@@ -79,7 +78,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
if (arg) {
args.cloud = false;
tsDnodeShellPort = atoi(arg);
p_port = atoi(arg);
args.port = atoi(arg);
} else {
fprintf(stderr, "Invalid port\n");
return -1;
......@@ -239,16 +238,16 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
argp_parse(&argp, argc, argv, 0, 0, arguments);
if (args.cloudDsn == NULL) {
if (args.cloud) {
args.cloudDsn = getenv("TDENGINE_CLOUD_DSN");
if (args.cloudDsn == NULL) {
args.cloud = false;
}
}
if (args.cloud) {
args.cloudDsn = getenv("TDENGINE_CLOUD_DSN");
if (args.cloudDsn == NULL) {
args.cloud = false;
}
}
} else {
args.cloud = true;
args.cloud = true;
}
if (arguments->abort) {
#ifndef _ALPINE
error(10, 0, "ABORTED");
......
......@@ -250,6 +250,12 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) {
if (args.cloudDsn == NULL) {
if (args.cloud) {
args.cloudDsn = getenv("TDENGINE_CLOUD_DSN");
if (args.cloudDsn[strlen(args.cloudDsn) - 1] == '\"') {
args.cloudDsn[strlen(args.cloudDsn) - 1] = '\0';
}
if (args.cloudDsn[0] == '\"') {
args.cloudDsn += 1;
}
if (args.cloudDsn == NULL) {
args.cloud = false;
}
......
Subproject commit 0a81480420d6601bbdb57770ee64e40f24c4ea83
Subproject commit d3c29fb492514cbaf08cb533976121bff5d94dea
......@@ -3645,7 +3645,6 @@ static void diff_function(SQLFunctionCtx *pCtx) {
SDiffFuncInfo *pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo);
void *data = GET_INPUT_DATA_LIST(pCtx);
bool isFirstBlock = (pDiffInfo->valueAssigned == false);
int32_t notNullElems = 0;
......@@ -3668,7 +3667,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
if (pDiffInfo->valueAssigned) {
int32_t diff = (int32_t)(pData[i] - pDiffInfo->i64Prev);
if (diff >= 0 || !pDiffInfo->ignoreNegative) {
*pOutput = (int32_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null
*pOutput = diff;
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
......@@ -3694,7 +3693,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
if (pDiffInfo->valueAssigned) {
int64_t diff = pData[i] - pDiffInfo->i64Prev;
if (diff >= 0 || !pDiffInfo->ignoreNegative) {
*pOutput = pData[i] - pDiffInfo->i64Prev; // direct previous may be null
*pOutput = diff;
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
......@@ -3720,7 +3719,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
if (pDiffInfo->valueAssigned) {
double diff = pData[i] - pDiffInfo->d64Prev;
if (diff >= 0 || !pDiffInfo->ignoreNegative) {
SET_DOUBLE_VAL(pOutput, pData[i] - pDiffInfo->d64Prev); // direct previous may be null
SET_DOUBLE_VAL(pOutput, diff);
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
......@@ -3746,7 +3745,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
if (pDiffInfo->valueAssigned) {
float diff = (float)(pData[i] - pDiffInfo->d64Prev);
if (diff >= 0 || !pDiffInfo->ignoreNegative) {
*pOutput = (float)(pData[i] - pDiffInfo->d64Prev);
*pOutput = diff;
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
......@@ -3772,7 +3771,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
if (pDiffInfo->valueAssigned) {
int16_t diff = (int16_t)(pData[i] - pDiffInfo->i64Prev);
if (diff >= 0 || !pDiffInfo->ignoreNegative) {
*pOutput = (int16_t)(pData[i] - pDiffInfo->i64Prev);
*pOutput = diff;
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
......@@ -3798,7 +3797,7 @@ static void diff_function(SQLFunctionCtx *pCtx) {
if (pDiffInfo->valueAssigned) {
int8_t diff = (int8_t)(pData[i] - pDiffInfo->i64Prev);
if (diff >= 0 || !pDiffInfo->ignoreNegative) {
*pOutput = (int8_t)(pData[i] - pDiffInfo->i64Prev);
*pOutput = diff;
*pTimestamp = (tsList != NULL)? tsList[i]:0;
pOutput += 1;
pTimestamp += 1;
......@@ -3816,23 +3815,15 @@ static void diff_function(SQLFunctionCtx *pCtx) {
qError("error input type");
}
// initial value is not set yet
if (!pDiffInfo->valueAssigned || notNullElems <= 0) {
/*
* 1. current block and blocks before are full of null
* 2. current block may be null value
*/
assert(pCtx->hasNull);
} else {
if (notNullElems > 0) {
for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) {
SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t];
if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) {
aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx);
}
}
int32_t forwardStep = (isFirstBlock) ? notNullElems : notNullElems;
GET_RES_INFO(pCtx)->numOfRes += forwardStep;
GET_RES_INFO(pCtx)->numOfRes += notNullElems;
}
}
......
......@@ -1588,6 +1588,8 @@ static bool initGroupbyInfo(const SSDataBlock *pSDataBlock, const SGroupbyExpr *
return true;
}
pInfo->pGroupbyDataInfo = taosArrayInit(pGroupbyExpr->numOfGroupCols, sizeof(SGroupbyDataInfo));
// head put key length (int32_t type)
pInfo->totalBytes = sizeof(int32_t);
for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) {
SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, k);
......@@ -1624,7 +1626,8 @@ static void buildGroupbyKeyBuf(const SSDataBlock *pSDataBlock, SGroupbyOperatorI
*buf = NULL;
return;
}
*buf = p;
*buf = p;
p += sizeof(int32_t);
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pGroupbyDataInfo); i++) {
SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, i);
......@@ -1646,26 +1649,22 @@ static void buildGroupbyKeyBuf(const SSDataBlock *pSDataBlock, SGroupbyOperatorI
memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM));
p += strlen(MULTI_KEY_DELIM);
}
// calc keyLen and save
int32_t keyLen = (p - *buf) - sizeof(int32_t);
*(int32_t *)(*buf) = keyLen;
}
static bool isGroupbyKeyEqual(void *a, void *b, void *ext) {
SGroupbyOperatorInfo *pInfo = (SGroupbyOperatorInfo *)ext;
if (memcmp(a, b, pInfo->totalBytes) == 0) {
return true;
int32_t len1 = *(int32_t *)a;
int32_t len2 = *(int32_t *)b;
if (len1 != len2) {
return false;
}
int32_t offset = 0;
for (int32_t i = 0; i < taosArrayGetSize(pInfo->pGroupbyDataInfo); i++) {
SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, i);
char *a1 = (char *)a + sizeof(int32_t);
char *b1 = (char *)b + sizeof(int32_t);
char *k1 = (char *)a + offset;
char *k2 = (char *)b + offset;
if (getComparFunc(pDataInfo->type, 0)(k1, k2) != 0) {
return false;
}
offset += pDataInfo->bytes;
offset += (int32_t)strlen(MULTI_KEY_DELIM);
}
return true;
return memcmp(a1, b1, len1) == 0;
}
static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pInfo, SSDataBlock *pSDataBlock) {
......@@ -1708,7 +1707,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo);
}
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, pInfo->totalBytes, item->groupIndex);
char *preKey = pInfo->prevData + sizeof(int32_t);
int32_t keyLen = *(int32_t *)pInfo->prevData;
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, preKey, type, keyLen, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
......@@ -1730,7 +1731,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn
if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) {
setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo);
}
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, pInfo->totalBytes, item->groupIndex);
char *preKey = pInfo->prevData + sizeof(int32_t);
int32_t keyLen = *(int32_t *)pInfo->prevData;
int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, preKey, type, keyLen, item->groupIndex);
if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code
longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR);
}
......@@ -4312,14 +4315,15 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction
// find colid in dataBlock
int32_t bytes, offset = 0;
char* val = NULL;
char* prevData = pInfo->prevData + sizeof(int32_t); // head is key length (int32_t type)
for (int32_t idx = 0; idx < taosArrayGetSize(pInfo->pGroupbyDataInfo); idx++) {
SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, idx);
if (pDataInfo->index == pExpr1->colInfo.colId) {
bytes = pDataInfo->bytes;
val = pInfo->prevData + offset;
val = prevData + offset;
break;
}
offset += pDataInfo->bytes;
offset += pDataInfo->bytes + strlen(MULTI_KEY_DELIM); // multi value split by MULTI_KEY_DELIM
}
if (val == NULL) { continue; }
......
......@@ -1298,7 +1298,7 @@ static int32_t offsetSkipBlock(STsdbQueryHandle* q, SBlockInfo* pBlockInfo, int6
range.from = i;
}
}
range.to = 0;
range.to = sblock;
taosArrayPush(pArray, &range);
range.from = -1;
break;
......@@ -1314,7 +1314,7 @@ static int32_t offsetSkipBlock(STsdbQueryHandle* q, SBlockInfo* pBlockInfo, int6
if(range.from == -1) {
range.from = i;
} else {
if(range.to + 1 != i) {
if(range.to - 1 != i) {
// add the previous
taosArrayPush(pArray, &range);
range.from = i;
......@@ -1359,16 +1359,17 @@ static void shrinkBlocksByQuery(STsdbQueryHandle *pQueryHandle, STableCheckInfo
SBlockIdx *compIndex = pQueryHandle->rhelper.pBlkIdx;
bool order = ASCENDING_TRAVERSE(pQueryHandle->order);
TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL;
if (order) {
assert(pCheckInfo->lastKey <= pQueryHandle->window.ekey && pQueryHandle->window.skey <= pQueryHandle->window.ekey);
s = pQueryHandle->window.skey;
e = pQueryHandle->window.ekey;
} else {
assert(pCheckInfo->lastKey >= pQueryHandle->window.ekey && pQueryHandle->window.skey >= pQueryHandle->window.ekey);
e = pQueryHandle->window.skey;
s = pQueryHandle->window.ekey;
}
TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL;
s = MIN(pCheckInfo->lastKey, pQueryHandle->window.ekey);
e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey);
// discard the unqualified data block based on the query time window
int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC);
if (s > pCompInfo->blocks[start].keyLast) {
......
......@@ -15,6 +15,7 @@ import sys
from util.log import *
from util.cases import *
from util.sql import *
import os
from datetime import timedelta
......@@ -73,6 +74,14 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, "master")
cmd = "taos -h 127.0.0.1 -s 'show databases'"
r = os.popen(cmd)
text = r.read()
r.close
if 'Unable to establish connection' in text:
tdLog.exit("%s failed: command 'taos -h 127.0.0.1' Unable to establish connection" % __file__)
def stop(self):
tdSql.close()
......
......@@ -16,6 +16,7 @@ import taos
from util.log import *
from util.cases import *
from util.sql import *
from util.dnodes import *
import numpy as np
......@@ -156,7 +157,25 @@ class TDTestCase:
tdSql.error("select diff(col) from st group by dev")
tdSql.error("select diff(col) from st group by col")
# TS-1612
os.system("tar -zxf %s/functions/data.tar.gz" % os.getcwd())
tdSql.execute("create database radb")
tdSql.execute("use radb")
tdSql.execute("CREATE TABLE `vehicle_automode` (`time` TIMESTAMP,`auto_ctl_odom` INT) TAGS (`mac_address` BINARY(30))")
tdSql.execute("CREATE TABLE `va_00545a230327` USING `vehicle_automode` TAGS ('00545a230327')")
tdSql.execute("insert into va_00545a230327 file 'data/va_00545a230327.csv' ")
tdSql.query("select * from vehicle_automode")
rows = tdSql.queryRows
tdSql.query("select diff(auto_ctl_odom,1) as aco from radb.vehicle_automode GROUP BY tbname")
tdSql.checkRows(rows - 1)
os.system("rm -rf data")
tdDnodes.stop(1)
tdDnodes.start(1)
tdSql.query("select diff(auto_ctl_odom,1) as aco from radb.vehicle_automode GROUP BY tbname")
tdSql.checkRows(rows - 1)
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
......
......@@ -88,6 +88,23 @@ class TDTestCase:
tdSql.query("select count(*) from tb group by c1")
tdSql.checkRows(0)
# TS-1619
tdSql.execute("create database test")
tdSql.execute("use test")
tdSql.execute("create table stb(ts timestamp, c1 int, c2 nchar(30)) tags(t1 int)")
for i in range(3):
tdSql.execute("create table t%d using stb tags(%d)" % (i, i))
sql = "insert into t%d values " % i
for j in range(16):
if j % 4 == 0:
s = '00'
else:
s = str (j % 4 * 15)
sql += "(%d, %d, '2022-06-01 0%d:%s')" % (self.ts + j, i, int( j / 4 ), s)
tdSql.execute(sql)
tdSql.query("select c2, sum(c1) from stb group by c2")
tdSql.checkRows(16)
def stop(self):
tdSql.close()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册