diff --git a/Jenkinsfile2 b/Jenkinsfile2 index e03994b975181139ea62c62ee74357ac3863592b..79954fb969a12d232096c70c0b3ce6da6714d932 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -7,7 +7,8 @@ def sync_source() { sh ''' hostname date - ''' + env + ''' sh ''' cd ${WKC} [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" @@ -57,6 +58,7 @@ def sync_source() { [ -f src/connector/grafanaplugin/README.md ] && rm -f src/connector/grafanaplugin/README.md > /dev/null || echo "failed to remove grafanaplugin README.md" git pull >/dev/null git clean -dfx + git log -5 ''' script { if (env.CHANGE_TARGET == 'master') { @@ -90,6 +92,7 @@ def sync_source() { cd ${WK} git pull >/dev/null git clean -dfx + git log -5 ''' script { if (env.CHANGE_URL =~ /\/TDengine\//) { @@ -98,16 +101,13 @@ def sync_source() { cd ${WKC} git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - - if [ ! -d src/connector/python/.github ]; then - rm -rf src/connector/python/* || : - rm -rf src/connector/python/.* || : - git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector" - else - cd src/connector/python || echo "src/connector/python not exist" - git pull || : - cd ${WKC} - fi + git log -5 + ''' + sh ''' + cd ${WKC} + rm -rf src/connector/python + mkdir -p src/connector/python + git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector" ''' } else if (env.CHANGE_URL =~ /\/TDinternal\//) { sh ''' @@ -115,16 +115,13 @@ def sync_source() { cd ${WK} git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - - if [ ! -d community/src/connector/python/.github ]; then - rm -rf community/src/connector/python/* || : - rm -rf community/src/connector/python/.* || : - git clone --depth 1 https://github.com/taosdata/taos-connector-python community/src/connector/python || echo "failed to clone python connector" - else - cd community/src/connector/python || echo "community/src/connector/python not exist" - git pull || : - cd ${WK} - fi + git log -5 + ''' + sh ''' + cd ${WKC} + rm -rf src/connector/python + mkdir -p src/connector/python + git clone --depth 1 https://github.com/taosdata/taos-connector-python src/connector/python || echo "failed to clone python connector" ''' } else { sh ''' @@ -136,16 +133,6 @@ def sync_source() { cd ${WKC} git submodule update --init --recursive ''' - sh ''' - cd ${WKC} - git branch - git log -5 - ''' - sh ''' - cd ${WK} - git branch - git log -5 - ''' } def pre_test() { sync_source() @@ -157,6 +144,7 @@ def pre_test() { go env -w GO111MODULE=on cmake .. -DBUILD_HTTP=false -DBUILD_TOOLS=true > /dev/null make -j8 >/dev/null + make install ''' return 1 } @@ -174,7 +162,7 @@ def pre_test_mac() { return 1 } pipeline { - agent {label " dispatcher "} + agent none options { skipDefaultCheckout() } environment{ WK = '/var/data/jenkins/workspace/TDinternal' @@ -182,69 +170,6 @@ pipeline { LOGDIR = '/var/data/jenkins/workspace/log' } stages { - stage ('pre_build') { - steps { - sh ''' - date - pwd - env - hostname - ''' - } - } - stage ('Parallel build stage') { - //only build pr - options { skipDefaultCheckout() } - when { - allOf { - changeRequest() - not { expression { env.CHANGE_BRANCH =~ /docs\// }} - } - } - parallel { - stage ('dispatcher sync source') { - steps { - timeout(time: 20, unit: 'MINUTES') { - sync_source() - script { - sh ''' - echo "dispatcher ready" - date - ''' - } - } - } - } - stage ('build worker01') { - agent {label " worker01 "} - steps { - timeout(time: 20, unit: 'MINUTES') { - pre_test() - script { - sh ''' - echo "worker01 build done" - date - ''' - } - } - } - } - stage ('build worker02') { - agent {label " worker02 "} - steps { - timeout(time: 20, unit: 'MINUTES') { - pre_test() - script { - sh ''' - echo "worker02 build done" - date - ''' - } - } - } - } - } - } stage('run test') { options { skipDefaultCheckout() } when { @@ -254,28 +179,28 @@ pipeline { } } parallel { - stage ('build worker07_arm64') { - agent {label " worker07_arm64 "} + stage ('build arm64') { + agent {label " worker07_arm64 || worker09_arm64 "} steps { timeout(time: 20, unit: 'MINUTES') { pre_test() script { sh ''' - echo "worker07_arm64 build done" + echo "arm64 build done" date ''' } } } } - stage ('build Mac_catalina ') { + stage ('build Mac') { agent {label " Mac_catalina "} steps { timeout(time: 20, unit: 'MINUTES') { pre_test_mac() script { sh ''' - echo "Mac_catalina build done" + echo "Mac build done" date ''' } @@ -283,17 +208,28 @@ pipeline { } } stage('run cases') { + agent {label " worker01 || worker02 "} steps { sh ''' date + pwd hostname ''' + timeout(time: 15, unit: 'MINUTES') { + pre_test() + script { + sh ''' + echo "Linux build done" + date + ''' + } + } catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') { - timeout(time: 20, unit: 'MINUTES') { + timeout(time: 25, unit: 'MINUTES') { sh ''' date cd ${WKC}/tests/parallel_test - time ./run.sh -m m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME} + time ./run.sh -m /home/m.json -t cases.task -l ${LOGDIR} -b ${BRANCH_NAME} date hostname ''' diff --git a/docs/en/10-cluster/02-cluster-mgmt.md b/docs/en/10-cluster/02-cluster-mgmt.md index 674c92e2766a4eb304079140af19c8efea72d55e..bd3386c41161fc55b4bedcecd6ad3ab5c35be8b6 100644 --- a/docs/en/10-cluster/02-cluster-mgmt.md +++ b/docs/en/10-cluster/02-cluster-mgmt.md @@ -54,14 +54,14 @@ Database changed. taos> show vgroups; vgId | tables | status | onlines | v1_dnode | v1_status | compacting | ========================================================================================== - 14 | 38000 | ready | 1 | 1 | master | 0 | - 15 | 38000 | ready | 1 | 1 | master | 0 | - 16 | 38000 | ready | 1 | 1 | master | 0 | - 17 | 38000 | ready | 1 | 1 | master | 0 | - 18 | 37001 | ready | 1 | 1 | master | 0 | - 19 | 37000 | ready | 1 | 1 | master | 0 | - 20 | 37000 | ready | 1 | 1 | master | 0 | - 21 | 37000 | ready | 1 | 1 | master | 0 | + 14 | 38000 | ready | 1 | 1 | leader | 0 | + 15 | 38000 | ready | 1 | 1 | leader | 0 | + 16 | 38000 | ready | 1 | 1 | leader | 0 | + 17 | 38000 | ready | 1 | 1 | leader | 0 | + 18 | 37001 | ready | 1 | 1 | leader | 0 | + 19 | 37000 | ready | 1 | 1 | leader | 0 | + 20 | 37000 | ready | 1 | 1 | leader | 0 | + 21 | 37000 | ready | 1 | 1 | leader | 0 | Query OK, 8 row(s) in set (0.001154s) ``` @@ -161,14 +161,14 @@ First `show vgroups` is executed to show the vgroup distribution. taos> show vgroups; vgId | tables | status | onlines | v1_dnode | v1_status | compacting | ========================================================================================== - 14 | 38000 | ready | 1 | 3 | master | 0 | - 15 | 38000 | ready | 1 | 3 | master | 0 | - 16 | 38000 | ready | 1 | 3 | master | 0 | - 17 | 38000 | ready | 1 | 3 | master | 0 | - 18 | 37001 | ready | 1 | 3 | master | 0 | - 19 | 37000 | ready | 1 | 1 | master | 0 | - 20 | 37000 | ready | 1 | 1 | master | 0 | - 21 | 37000 | ready | 1 | 1 | master | 0 | + 14 | 38000 | ready | 1 | 3 | leader | 0 | + 15 | 38000 | ready | 1 | 3 | leader | 0 | + 16 | 38000 | ready | 1 | 3 | leader | 0 | + 17 | 38000 | ready | 1 | 3 | leader | 0 | + 18 | 37001 | ready | 1 | 3 | leader | 0 | + 19 | 37000 | ready | 1 | 1 | leader | 0 | + 20 | 37000 | ready | 1 | 1 | leader | 0 | + 21 | 37000 | ready | 1 | 1 | leader | 0 | Query OK, 8 row(s) in set (0.001314s) ``` @@ -191,14 +191,14 @@ Query OK, 0 row(s) in set (0.000575s) taos> show vgroups; vgId | tables | status | onlines | v1_dnode | v1_status | v2_dnode | v2_status | compacting | ================================================================================================================= - 14 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | - 15 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | - 16 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | - 17 | 38000 | ready | 1 | 3 | master | 0 | NULL | 0 | - 18 | 37001 | ready | 2 | 1 | slave | 3 | master | 0 | - 19 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | - 20 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | - 21 | 37000 | ready | 1 | 1 | master | 0 | NULL | 0 | + 14 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 | + 15 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 | + 16 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 | + 17 | 38000 | ready | 1 | 3 | leader | 0 | NULL | 0 | + 18 | 37001 | ready | 2 | 1 | follower | 3 | leader | 0 | + 19 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 | + 20 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 | + 21 | 37000 | ready | 1 | 1 | leader | 0 | NULL | 0 | Query OK, 8 row(s) in set (0.001242s) ``` @@ -207,7 +207,7 @@ It can be seen from above output that vgId 18 has been moved from dnode 3 to dno :::note - Manual load balancing can only be performed when the automatic load balancing is disabled, i.e. `balance` is set to 0. -- Only a vnode in normal state, i.e. master or slave, can be moved. vnode can't be moved when its in status offline, unsynced or syncing. +- Only a vnode in normal state, i.e. leader or follower, can be moved. vnode can't be moved when its in status offline, unsynced or syncing. - Before moving a vnode, it's necessary to make sure the target dnode has enough resources: CPU, memory and disk. ::: diff --git a/docs/en/10-cluster/03-ha-and-lb.md b/docs/en/10-cluster/03-ha-and-lb.md index bd718eef9f8dc181628132de831dbca2af59d158..9780e8f6c68904e444d07c6a8c87b095c6b70ead 100644 --- a/docs/en/10-cluster/03-ha-and-lb.md +++ b/docs/en/10-cluster/03-ha-and-lb.md @@ -27,7 +27,7 @@ There may be multiple dnodes in a cluster, but only one mnode can be started in SHOW MNODES; ``` -The end point and role/status (master, slave, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. +The end point and role/status (leader, follower, unsynced, or offline) of all mnodes can be shown by the above command. When the first dnode is started in a cluster, there must be one mnode in this dnode. Without at least one mnode, the cluster cannot work. If `numOfMNodes` is configured to 2, another mnode will be started when the second dnode is launched. For the high availability of mnode, `numOfMnodes` needs to be configured to 2 or a higher value. Because the data consistency between mnodes must be guaranteed, the replica confirmation parameter `quorum` is set to 2 automatically if `numOfMNodes` is set to 2 or higher. @@ -58,13 +58,13 @@ When a dnode is offline, it can be detected by the TDengine cluster. There are t - If the dnode has been offline over the threshold configured in `offlineThreshold` in `taos.cfg`, the dnode will be removed from the cluster automatically. A system alert will be generated and automatic load balancing will be triggered if `balance` is set to 1. When the removed dnode is restarted and becomes online, it will not join the cluster automatically. The system administrator has to manually join the dnode to the cluster. :::note -If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the master node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. +If all the vnodes in a vgroup (or mnodes in mnode group) are in offline or unsynced status, the leader node can only be voted on, after all the vnodes or mnodes in the group become online and can exchange status. Following this, the vgroup (or mnode group) is able to provide service. ::: ## Arbitrator -The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a master node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. +The "arbitrator" component is used to address the special case when the number of replicas is set to an even number like 2,4 etc. If half of the vnodes in a vgroup don't work, it is impossible to vote and select a leader node. This situation also applies to mnodes if the number of mnodes is set to an even number like 2,4 etc. To resolve this problem, a new arbitrator component named `tarbitrator`, an abbreviation of TDengine Arbitrator, was introduced. The `tarbitrator` simulates a vnode or mnode but it's only responsible for network communication and doesn't handle any actual data access. As long as more than half of the vnode or mnode, including Arbitrator, are available the vnode group or mnode group can provide data insertion or query services normally. diff --git a/docs/en/14-reference/02-rest-api/02-rest-api.mdx b/docs/en/14-reference/02-rest-api/02-rest-api.mdx index 990af861961e9daf4ac775462e21d6d9852d17c1..fe18349a6dae3ad44772b4a30a2c3d4ad75b0f47 100644 --- a/docs/en/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/en/14-reference/02-rest-api/02-rest-api.mdx @@ -21,7 +21,7 @@ The following example is in an Ubuntu environment and uses the `curl` tool to ve The following example lists all databases on the host h1.taosdata.com. To use it in your environment, replace `h1.taosdata.com` and `6041` (the default port) with the actual running TDengine service FQDN and port number. ```html -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql ``` The following return value results indicate that the verification passed. @@ -106,13 +106,13 @@ The HTTP request's BODY is a complete SQL command, and the data table in the SQL Use `curl` to initiate an HTTP request with a custom authentication method, with the following syntax. ```bash -curl -H 'Authorization: Basic ' -d '' :/rest/sql/[db_name] +curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name] ``` Or ```bash -curl -u username:password -d '' :/rest/sql/[db_name] +curl -L -u username:password -d "" :/rest/sql/[db_name] ``` where `TOKEN` is the string after Base64 encoding of `{username}:{password}`, e.g. `root:taosdata` is encoded as `cm9vdDp0YW9zZGF0YQ==`. @@ -192,7 +192,7 @@ Response body: - query all records from table d1001 of database demo ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql ``` Response body: @@ -218,7 +218,7 @@ Response body: - Create database demo: ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql ``` Response body: @@ -240,7 +240,7 @@ Response body: When the HTTP request URL uses `/rest/sqlt`, the returned result set's timestamp value will be in Unix timestamp format, for example: ```bash -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt ``` Response body: @@ -268,7 +268,7 @@ Response body: When the HTTP request URL uses `/rest/sqlutc`, the timestamp of the returned result set will be expressed as a UTC format, for example: ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc ``` Response body: diff --git a/docs/en/14-reference/03-connector/rust.mdx b/docs/en/14-reference/03-connector/rust.mdx index a5cbaeac8077cda42690d9cc232062a685a51f41..56ca586c7e8ada6e4422596906e01887d4726fd0 100644 --- a/docs/en/14-reference/03-connector/rust.mdx +++ b/docs/en/14-reference/03-connector/rust.mdx @@ -250,7 +250,7 @@ The [Taos] structure is the connection manager in [libtaos] and provides two mai Column information is stored using [ColumnMeta]. - ``rust + ```rust let cols = &q.column_meta; for col in cols { println!("name: {}, type: {:?} , bytes: {}", col.name, col.type_, col.bytes); diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md index 7cf1f95eb116b5f87b3bc1e05b647b9b0da3c544..0951717f5ae5f17676bd4aaefcd24f0da829c12c 100644 --- a/docs/en/14-reference/05-taosbenchmark.md +++ b/docs/en/14-reference/05-taosbenchmark.md @@ -7,7 +7,7 @@ description: "taosBenchmark (once called taosdemo ) is a tool for testing the pe ## Introduction -taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can flexibly control the number and type of databases, supertables, tag columns, number and type of data columns, and sub-tables, and types of databases, super tables, the number and types of data columns, the number of sub-tables, the amount of data per sub-table, the time interval for inserting data, the number of working threads, whether and how to insert disordered data, and so on. The installer provides taosdemo as a soft link to taosBenchmark for compatibility and for the convenience of past users. +taosBenchmark (formerly taosdemo ) is a tool for testing the performance of TDengine products. taosBenchmark can test the performance of TDengine's insert, query, and subscription functions and simulate large amounts of data generated by many devices. taosBenchmark can be configured to generate user defined databases, supertables, subtables, and the time series data to populate these for performance benchmarking. taosBenchmark is highly configurable and some of the configurations include the time interval for inserting data, the number of working threads and the capability to insert disordered data. The installer provides taosdemo as a soft link to taosBenchmark for compatibility with past users. ## Installation @@ -21,9 +21,9 @@ There are two ways to install taosBenchmark: ### Configuration and running methods -TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#Command-line arguments in detailed) and [JSON configuration file](#Configuration file arguments in detailed). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. +TaosBenchmark needs to be executed on the terminal of the operating system, it supports two configuration methods: [Command-line arguments](#command-line-arguments-in-detail) and [JSON configuration file](#configuration-file-parameters-in-detail). These two methods are mutually exclusive. Users can use `-f ` to specify a configuration file. When running taosBenchmark with command-line arguments to control its behavior, users should use other parameters for configuration, but not the `-f` parameter. In addition, taosBenchmark offers a special way of running without parameters. -taosBenchmark supports complete performance testing of TDengine. taosBenchmark supports the TDengine functions in three categories: write, query, and subscribe. These three functions are mutually exclusive, and users can select only one of them each time taosBenchmark runs. It is important to note that the type of functionality to be tested is not configurable when using the command-line configuration method, which can only test writing performance. To test the query and subscription performance of the TDengine, you must use the configuration file method and specify the function type to test via the parameter `filetype` in the configuration file. +taosBenchmark supports the complete performance testing of TDengine by providing functionally to write, query, and subscribe. These three functions are mutually exclusive, users can only select one of them each time taosBenchmark runs. The query and subscribe functionalities are only configurable using a json configuration file by specifying the parameter `filetype`, while write can be performed through both the command-line and a configuration file. **Make sure that the TDengine cluster is running correctly before running taosBenchmark. ** @@ -57,9 +57,8 @@ Use the following command-line to run taosBenchmark and control its behavior via taosBenchmark -f ``` -**Here are a few examples of configuration files:** - -#### Example of inserting a scenario JSON configuration file +#### Configuration file examples +##### Example of inserting a scenario JSON configuration file
insert.json @@ -70,7 +69,7 @@ taosBenchmark -f
-#### Query Scenario JSON Profile Example +##### Query Scenario JSON Profile Example
query.json @@ -81,7 +80,7 @@ taosBenchmark -f
-#### Subscription JSON configuration example +##### Subscription JSON configuration example
subscribe.json @@ -172,7 +171,11 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) Switch parameter specifying whether to use escape characters in the super table and sub-table names. By default is not used. - **-C/--chinese** : +<<<<<<< HEAD Switch specifying whether to use Unicode Chinese characters in nchar and binary. By default is not used. +======= + specify whether to use Unicode Chinese characters in nchar and binary, the default is no. +>>>>>>> 108548b4d6 (docs: typo) - **-N/--normal-table** : This parameter indicates that taosBenchmark will create only normal tables instead of super tables. The default value is false. It can be used if the insert mode is taosc, stmt, and rest. @@ -373,7 +376,7 @@ The configuration parameters for querying the sub-tables or the normal tables ar - **sqls**. - **sql**: the SQL command to be executed. - - **result**: the file to save the query result. If it is unspecified, taosBenchark will not save the result. + - **result**: the file to save the query result. If it is unspecified, taosBenchmark will not save the result. #### Configuration parameters of query super table diff --git a/docs/en/14-reference/06-taosdump.md b/docs/en/14-reference/06-taosdump.md index 5403e40925f633ce62795cc6037fc8c8f7aad07a..96e68d0edbf9a2f6880cc557580d3dfb20def947 100644 --- a/docs/en/14-reference/06-taosdump.md +++ b/docs/en/14-reference/06-taosdump.md @@ -36,6 +36,8 @@ There are two ways to install taosdump: :::tip - taosdump versions after 1.4.1 provide the `-I` argument for parsing Avro file schema and data. If users specify `-s` then only taosdump will parse schema. - Backups after taosdump 1.4.2 use the batch count specified by the `-B` parameter. The default value is 16384. If, in some environments, low network speed or disk performance causes "Error actual dump ... batch ...", then try changing the `-B` parameter to a smaller value. +- The export of taosdump does not support resuming from an interruption. Therefore, if the taosdump process terminates unexpectedly, delete all related files that have been exported or generated. +- The import of taosdump supports resuming from an interruption, but when the process resumes, you will receive some "table already exists" messages, which could be ignored. ::: diff --git a/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json b/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json index f651983528ca824b4e6b14586aac5a5bfb4ecab8..54dc1062d6440cc0fc7b8c69d9e4c6b53e4cd01e 100644 --- a/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json +++ b/docs/en/14-reference/07-tdinsight/assets/15146-tdengine-monitor-dashboard.json @@ -211,7 +211,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "Master MNode", + "title": "Leader MNode", "transformations": [ { "id": "filterByValue", @@ -221,7 +221,7 @@ "config": { "id": "regex", "options": { - "value": "master" + "value": "leader" } }, "fieldName": "role" @@ -300,7 +300,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "Master MNode Create Time", + "title": "Leader MNode Create Time", "transformations": [ { "id": "filterByValue", @@ -310,7 +310,7 @@ "config": { "id": "regex", "options": { - "value": "master" + "value": "leader" } }, "fieldName": "role" diff --git a/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json index b4254c428b28a0084e54b5e3c509dd2e0ec651b9..1add8522a712aa2cfef6187e577c42d205432b66 100644 --- a/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json +++ b/docs/en/14-reference/07-tdinsight/assets/tdengine-grafana-7.x.json @@ -153,7 +153,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "Master MNode", + "title": "Leader MNode", "transformations": [ { "id": "filterByValue", @@ -163,7 +163,7 @@ "config": { "id": "regex", "options": { - "value": "master" + "value": "leader" } }, "fieldName": "role" @@ -246,7 +246,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "Master MNode Create Time", + "title": "Leader MNode Create Time", "transformations": [ { "id": "filterByValue", @@ -256,7 +256,7 @@ "config": { "id": "regex", "options": { - "value": "master" + "value": "leader" } }, "fieldName": "role" diff --git a/docs/en/14-reference/07-tdinsight/index.md b/docs/en/14-reference/07-tdinsight/index.md index cebfafa225e6e8de75ff84bb51fa664784177910..e74c9de7b2aa71278a99d45f250e0dcaf86d4704 100644 --- a/docs/en/14-reference/07-tdinsight/index.md +++ b/docs/en/14-reference/07-tdinsight/index.md @@ -274,8 +274,8 @@ Details of the metrics are as follows. This section contains the current information and status of the cluster, the alert information is also here (from left to right, top to bottom). - **First EP**: the `firstEp` setting in the current TDengine cluster. -- **Version**: TDengine server version (master mnode). -- **Master Uptime**: The time elapsed since the current Master MNode was elected as Master. +- **Version**: TDengine server version (leader mnode). +- **Leader Uptime**: The time elapsed since the current Leader MNode was elected as Leader. - **Expire Time** - Enterprise version expiration time. - **Used Measuring Points** - The number of measuring points used by the Enterprise Edition. - **Databases** - The number of databases. @@ -333,7 +333,7 @@ Data node resource usage display with repeated multiple rows for the variable `$ 2. **Has MNodes?**: whether the current dnode is a mnode. 3. **CPU Cores**: the number of CPU cores. 4. **VNodes Number**: the number of VNodes in the current dnode. -5. **VNodes Masters**: the number of vnodes in the master role. +5. **VNodes Masters**: the number of vnodes in the leader role. 6. **Current CPU Usage of taosd**: CPU usage rate of taosd processes. 7. **Current Memory Usage of taosd**: memory usage of taosd processes. 8. **Disk Used**: The total disk usage percentage of the taosd data directory. diff --git a/docs/en/14-reference/12-directory.md b/docs/en/14-reference/12-directory.md index 304e3bcb434ee9a6ba338577a4d1ba546b548e3f..d6cffd22e054a759e67d34dd3e8fbb1a8585569c 100644 --- a/docs/en/14-reference/12-directory.md +++ b/docs/en/14-reference/12-directory.md @@ -26,7 +26,6 @@ All executable files of TDengine are in the _/usr/local/taos/bin_ directory by d - _remove.sh_: script to uninstall TDengine, please execute it carefully, link to the **rmtaos** command in the /usr/bin directory. Will remove the TDengine installation directory `/usr/local/taos`, but will keep `/etc/taos`, `/var/lib/taos`, `/var/log/taos` - _taosadapter_: server-side executable that provides RESTful services and accepts writing requests from a variety of other softwares - _tarbitrator_: provides arbitration for two-node cluster deployments -- _run_taosd_and_taosadapter.sh_: script to start both taosd and taosAdapter - _TDinsight.sh_: script to download TDinsight and install it - _set_core.sh_: script for setting up the system to generate core dump files for easy debugging - _taosd-dump-cfg.gdb_: script to facilitate debugging of taosd's gdb execution. diff --git a/docs/en/20-third-party/01-grafana.mdx b/docs/en/20-third-party/01-grafana.mdx index b51d5a8d904601802efec0db5847203b72fa2668..696be9e4d5bd3e53619a55d02ef4b8dce67fce94 100644 --- a/docs/en/20-third-party/01-grafana.mdx +++ b/docs/en/20-third-party/01-grafana.mdx @@ -31,38 +31,41 @@ TDengine currently supports Grafana versions 7.5 and above. Users can go to the ### Install Grafana Plugin and Configure Data Source - + -Set the url and authorization environment variables by `export` or a [`.env`(dotenv) file](https://hexdocs.pm/dotenvy/dotenv-file-format.html): +Under Grafana 8, plugin catalog allows you to [browse and manage plugins within Grafana](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog) (but for Grafana 7.x, use **With Script** or **Install & Configure Manually**). Find the page at **Configurations > Plugins**, search **TDengine** and click it to install. -```sh -export TDENGINE_API=http://tdengine.local:6041 -# user + password -export TDENGINE_USER=user -export TDENGINE_PASSWORD=password - -# Other useful variables -# - If to install TDengine data source, default is true -export TDENGINE_DS_ENABLED=false -# - Data source name to be created, default is TDengine -export TDENGINE_DS_NAME=TDengine -# - Data source organization id, default is 1 -export GF_ORG_ID=1 -# - Data source is editable in admin ui or not, default is 0 (false) -export TDENGINE_EDITABLE=1 -``` +![Search tdengine in grafana plugins](./grafana/grafana-plugin-search-tdengine.png) + +Installation may cost some minutes, then you can **Create a TDengine data source**: + +![Install and configure Grafana data source](./grafana/grafana-install-and-config.png) + +Then you can add a TDengine data source by filling up the configuration options. + +![TDengine Database Grafana plugin add data source](./grafana/grafana-data-source.png) + +You can create dashboards with TDengine now. + + + -Run `install.sh`: +On a server with Grafana installed, run `install.sh` with TDengine url and username/passwords will install TDengine data source plugin and add a data source named TDengine. This is the recommended way for Grafana 7.x or [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) users. ```sh -bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" +bash -c "$(curl -fsSL \ + https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \ + -a http://localhost:6041 \ + -u root \ + -p taosdata ``` -With this script, TDengine data source plugin and the Grafana data source will be installed and created automatically with Grafana provisioning configurations. Save the script and type `./install.sh --help` for the full usage of the script. +Restart Grafana service and open Grafana in web-browser, usually . -And then, restart Grafana service and open Grafana in web-browser, usually . +Save the script and type `./install.sh --help` for the full usage of the script. + Follow the installation steps in [Grafana](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation) with the [``grafana-cli`` command-line tool](https://grafana.com/docs/grafana/latest/administration/cli/) for plugin installation. @@ -115,6 +118,73 @@ Click `Save & Test` to test. You should see a success message if the test worked ![TDengine Database TDinsight plugin add database 4](./grafana/add_datasource4.webp) + + + +Please refer to [Install plugins in the Docker container](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container). This will install `tdengine-datasource` plugin when Grafana container starts: + +```bash +docker run -d \ + -p 3000:3000 \ + --name=grafana \ + -e "GF_INSTALL_PLUGINS=tdengine-datasource" \ + grafana/grafana +``` + +You can setup a zero-configuration stack for TDengine + Grafana by [docker-compose](https://docs.docker.com/compose/) and [Grafana provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) file: + +1. Save the provisioning configuration file to `tdengine.yml`. + + ```yml + apiVersion: 1 + datasources: + - name: TDengine + type: tdengine-datasource + orgId: 1 + url: "$TDENGINE_API" + isDefault: true + secureJsonData: + url: "$TDENGINE_URL" + basicAuth: "$TDENGINE_BASIC_AUTH" + token: "$TDENGINE_CLOUD_TOKEN" + version: 1 + editable: true + ``` + +2. Write `docker-compose.yml` with [TDengine](https://hub.docker.com/r/tdengine/tdengine) and [Grafana](https://hub.docker.com/r/grafana/grafana) image. + + ```yml + version: "3.7" + + services: + tdengine: + image: tdengine/tdengine:2.6.0.2 + environment: + TAOS_FQDN: tdengine + volumes: + - tdengine-data:/var/lib/taos/ + grafana: + image: grafana/grafana:8.5.6 + volumes: + - ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml + - grafana-data:/var/lib/grafana + environment: + # install tdengine plugin at start + GF_INSTALL_PLUGINS: "tdengine-datasource" + TDENGINE_URL: "http://tdengine:6041" + #printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64 + TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ==" + ports: + - 3000:3000 + volumes: + grafana-data: + tdengine-data: + ``` + +3. Start TDengine and Grafana services: `docker-compose up -d`. + +Open Grafana , and you can add dashboard with TDengine now. + diff --git a/docs/en/20-third-party/grafana/grafana-data-source.png b/docs/en/20-third-party/grafana/grafana-data-source.png new file mode 100644 index 0000000000000000000000000000000000000000..989ffcca0bf5baae8798b0695e259aca35f0442a Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-data-source.png differ diff --git a/docs/en/20-third-party/grafana/grafana-install-and-config.png b/docs/en/20-third-party/grafana/grafana-install-and-config.png new file mode 100644 index 0000000000000000000000000000000000000000..b918da8b2d62e694fe1797e09cf8f23f103bc97e Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-install-and-config.png differ diff --git a/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png b/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png new file mode 100644 index 0000000000000000000000000000000000000000..cf3b66977b64f7dcd617f06024a66066cd62810e Binary files /dev/null and b/docs/en/20-third-party/grafana/grafana-plugin-search-tdengine.png differ diff --git a/docs/en/21-tdinternal/01-arch.md b/docs/en/21-tdinternal/01-arch.md index 4d8bed4d2d6b3a0404e10213aeab599767325cc2..d7d472eb98a22325e850f4f040dccaa34d02bbff 100644 --- a/docs/en/21-tdinternal/01-arch.md +++ b/docs/en/21-tdinternal/01-arch.md @@ -22,9 +22,9 @@ A complete TDengine system runs on one or more physical nodes. Logically, it inc **Virtual node (vnode)**: To better support data sharding, load balancing and prevent data from overheating or skewing, data nodes are virtualized into multiple virtual nodes (vnode, V2, V3, V4, etc. in the figure). Each vnode is a relatively independent work unit, which is the basic unit of time-series data storage and has independent running threads, memory space and persistent storage path. A vnode contains a certain number of tables (data collection points). When a new table is created, the system checks whether a new vnode needs to be created. The number of vnodes that can be created on a data node depends on the capacity of the hardware of the physical node where the data node is located. A vnode belongs to only one DB, but a DB can have multiple vnodes. In addition to the stored time-series data, a vnode also stores the schema and tag values of the included tables. A virtual node is uniquely identified in the system by the EP of the data node and the VGroup ID to which it belongs and is created and managed by the management node. -**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The master/slave mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the master. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. +**Management node (mnode)**: A virtual logical unit responsible for monitoring and maintaining the running status of all data nodes and load balancing among nodes (M in the figure). At the same time, the management node is also responsible for the storage and management of metadata (including users, databases, tables, static tags, etc.), so it is also called Meta Node. Multiple (up to 5) mnodes can be configured in a TDengine cluster, and they are automatically constructed into a virtual management node group (M0, M1, M2 in the figure). The leader/follower mechanism is adopted for the mnode group and the data synchronization is carried out in a strongly consistent way. Any data update operation can only be executed on the leader. The creation of mnode cluster is completed automatically by the system without manual intervention. There is at most one mnode on each dnode, which is uniquely identified by the EP of the data node to which it belongs. Each dnode automatically obtains the EP of the dnode where all mnodes in the whole cluster are located, through internal messaging interaction. -**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a master/slave mechanism. Write operations can only be performed on the master vnode, and then replicated to slave vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. +**Virtual node group (VGroup)**: Vnodes on different data nodes can form a virtual node group to ensure the high availability of the system. The virtual node group is managed in a leader/follower mechanism. Write operations can only be performed on the leader vnode, and then replicated to follower vnodes, thus ensuring that one single replica of data is copied on multiple physical nodes. The number of virtual nodes in a vgroup equals the number of data replicas. If the number of replicas of a DB is N, the system must have at least N data nodes. The number of replicas can be specified by the parameter `“replica”` when creating a DB, and the default is 1. Using the multi-replication feature of TDengine, the same high data reliability can be achieved without the need for expensive storage devices such as disk arrays. Virtual node groups are created and managed by the management node, and the management node assigns a system unique ID, aka VGroup ID. If two virtual nodes have the same vnode group ID, it means that they belong to the same group and the data is backed up to each other. The number of virtual nodes in a virtual node group can be dynamically changed, allowing only one, that is, no data replication. VGroup ID is never changed. Even if a virtual node group is deleted, its ID will not be reused. **TAOSC**: TAOSC is the driver provided by TDengine to applications. It is responsible for dealing with the interaction between application and cluster, and provides the native interface for the C/C++ language. It is also embedded in the JDBC, C #, Python, Go, Node.js language connection libraries. Applications interact with the whole cluster through TAOSC instead of directly connecting to data nodes in the cluster. This module is responsible for obtaining and caching metadata; forwarding requests for insertion, query, etc. to the correct data node; when returning the results to the application, TAOSC also needs to be responsible for the final level of aggregation, sorting, filtering and other operations. For JDBC, C/C++/C#/Python/Go/Node.js interfaces, this module runs on the physical node where the application is located. At the same time, in order to support the fully distributed RESTful interface, TAOSC has a running instance on each dnode of TDengine cluster. @@ -62,13 +62,13 @@ To explain the relationship between vnode, mnode, TAOSC and application and thei 1. Application initiates a request to insert data through JDBC, ODBC, or other APIs. 2. TAOSC checks the cache to see if meta data exists for the table. If it does, it goes straight to Step 4. If not, TAOSC sends a get meta-data request to mnode. 3. Mnode returns the meta-data of the table to TAOSC. Meta-data contains the schema of the table, and also the vgroup information to which the table belongs (the vnode ID and the End Point of the dnode where the table belongs. If the number of replicas is N, there will be N groups of End Points). If TAOSC does not receive a response from the mnode for a long time, and there are multiple mnodes, TAOSC will send a request to the next mnode. -4. TAOSC initiates an insert request to master vnode. +4. TAOSC initiates an insert request to leader vnode. 5. After vnode inserts the data, it gives a reply to TAOSC, indicating that the insertion is successful. If TAOSC doesn't get a response from vnode for a long time, TAOSC will treat this node as offline. In this case, if there are multiple replicas of the inserted database, TAOSC will issue an insert request to the next vnode in vgroup. 6. TAOSC notifies APP that writing is successful. For Step 2 and 3, when TAOSC starts, it does not know the End Point of mnode, so it will directly initiate a request to the configured serving End Point of the cluster. If the dnode that receives the request does not have a mnode configured, it will reply with the mnode EP list, so that TAOSC will re-issue a request to obtain meta-data to the EP of another mnode. -For Step 4 and 5, without caching, TAOSC can't recognize the master in the virtual node group, so assumes that the first vnode is the master and sends a request to it. If this vnode is not the master, it will reply to the actual master as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of master node. +For Step 4 and 5, without caching, TAOSC can't recognize the leader in the virtual node group, so assumes that the first vnode is the leader and sends a request to it. If this vnode is not the leader, it will reply to the actual leader as a new target to which TAOSC shall send a request. Once a response of successful insertion is obtained, TAOSC will cache the information of leader node. The above describes the process of inserting data. The processes of querying and computing are the same. TAOSC encapsulates and hides all these complicated processes, and it is transparent to applications. @@ -119,65 +119,65 @@ The load balancing process does not require any manual intervention, and it is t ## Data Writing and Replication Process -If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Master and all others are slaves. When the application writes a new record to system, only the Master vnode can accept the writing request. If a slave vnode receives a writing request, the system will notifies TAOSC to redirect. +If a database has N replicas, a virtual node group has N virtual nodes. But only one is the Leader and all others are slaves. When the application writes a new record to system, only the Leader vnode can accept the writing request. If a follower vnode receives a writing request, the system will notifies TAOSC to redirect. -### Master vnode Writing Process +### Leader vnode Writing Process -Master Vnode uses a writing process as follows: +Leader Vnode uses a writing process as follows: -![TDengine Database Master Writing Process](write_master.webp) -
Figure 3: TDengine Master writing process
+![TDengine Database Leader Writing Process](write_master.webp) +
Figure 3: TDengine Leader writing process
-1. Master vnode receives the application data insertion request, verifies, and moves to next step; +1. Leader vnode receives the application data insertion request, verifies, and moves to next step; 2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; -3. If there are multiple replicas, vnode will forward data packet to slave vnodes in the same virtual node group, and the forwarded packet has a version number with data; +3. If there are multiple replicas, vnode will forward data packet to follower vnodes in the same virtual node group, and the forwarded packet has a version number with data; 4. Write into memory and add the record to “skip list”; -5. Master vnode returns a confirmation message to the application, indicating a successful write. +5. Leader vnode returns a confirmation message to the application, indicating a successful write. 6. If any of Step 2, 3 or 4 fails, the error will directly return to the application. -### Slave vnode Writing Process +### Follower vnode Writing Process -For a slave vnode, the write process as follows: +For a follower vnode, the write process as follows: -![TDengine Database Slave Writing Process](write_slave.webp) -
Figure 4: TDengine Slave Writing Process
+![TDengine Database Follower Writing Process](write_slave.webp) +
Figure 4: TDengine Follower Writing Process
-1. Slave vnode receives a data insertion request forwarded by Master vnode; +1. Follower vnode receives a data insertion request forwarded by Leader vnode; 2. If the system configuration parameter `“walLevel”` is greater than 0, vnode will write the original request packet into database log file WAL. If walLevel is set to 2 and fsync is set to 0, TDengine will make WAL data written immediately to ensure that even system goes down, all data can be recovered from database log file; 3. Write into memory and add the record to “skip list”. -Compared with Master vnode, slave vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. +Compared with Leader vnode, follower vnode has no forwarding or reply confirmation step, means two steps less. But writing into memory and WAL is exactly the same. ### Remote Disaster Recovery and IDC (Internet Data Center) Migration -As discussed above, TDengine writes using Master and Slave processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. +As discussed above, TDengine writes using Leader and Follower processes. TDengine adopts asynchronous replication for data synchronization. This method can greatly improve write performance, with no obvious impact from network delay. By configuring IDC and rack number for each physical node, it can be ensured that for a virtual node group, virtual nodes are composed of physical nodes from different IDC and different racks, thus implementing remote disaster recovery without other tools. -On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, master and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. +On the other hand, TDengine supports dynamic modification of the replica number. Once the number of replicas increases, the newly added virtual nodes will immediately enter the data synchronization process. After synchronization is complete, added virtual nodes can provide services. In the synchronization process, leader and other synchronized virtual nodes keep serving. With this feature, TDengine can provide IDC migration without service interruption. It is only necessary to add new physical nodes to the existing IDC cluster, and then remove old physical nodes after the data synchronization is completed. However, the asynchronous replication has a very low probability scenario where data may be lost. The specific scenario is as follows: -1. Master vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down; -2. Slave vnode receives the write request, then processing fails before writing to the log in Step 2; -3. Slave vnode will become the new master, thus losing one record. +1. Leader vnode has finished its 5-step operations, confirmed the success of writing to APP, and then goes down; +2. Follower vnode receives the write request, then processing fails before writing to the log in Step 2; +3. Follower vnode will become the new leader, thus losing one record. In theory, for asynchronous replication, there is no guarantee to prevent data loss. However, this is an extremely low probability scenario as described above. Note: Remote disaster recovery and no-downtime IDC migration are only supported by Enterprise Edition. **Hint: This function is not available yet** -### Master/slave Selection +### Leader/follower Selection Vnode maintains a version number. When memory data is persisted, the version number will also be persisted. For each data update operation, whether it is time-series data or metadata, this version number will be increased by one. -When a vnode starts, the roles (master, slave) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a master-selection process. The rules are as follows: +When a vnode starts, the roles (leader, follower) are uncertain, and the data is in an unsynchronized state. It’s necessary to establish TCP connections with other nodes in the virtual node group and exchange status, including version and its own roles. Through the exchange, the system implements a leader-selection process. The rules are as follows: -1. If there’s only one replica, it’s always master -2. When all replicas are online, the one with latest version is master -3. Over half of online nodes are virtual nodes, and some virtual node is slave, it will automatically become master -4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as master. +1. If there’s only one replica, it’s always leader +2. When all replicas are online, the one with latest version is leader +3. Over half of online nodes are virtual nodes, and some virtual node is follower, it will automatically become leader +4. For 2 and 3, if multiple virtual nodes meet the requirement, the first vnode in virtual node group list will be selected as leader. ### Synchronous Replication -For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Master forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in slave. If “quorum-1” reply confirms are not received within a certain period of time, the master vnode will return an error to the application. +For scenarios with strong data consistency requirements, asynchronous data replication is not applicable, because there is a small probability of data loss. So, TDengine provides a synchronous replication mechanism for users. When creating a database, in addition to specifying the number of replicas, user also needs to specify a new parameter “quorum”. If quorum is greater than one, it means that every time the Leader forwards a message to the replica, it needs to wait for “quorum-1” reply confirms before informing the application that data has been successfully written in follower. If “quorum-1” reply confirms are not received within a certain period of time, the leader vnode will return an error to the application. With synchronous replication, performance of system will decrease and latency will increase. Because metadata needs strong consistency, the default for data synchronization between mnodes is synchronous replication. diff --git a/docs/en/27-train-faq/03-docker.md b/docs/en/27-train-faq/03-docker.md index afee13c1377b0b4331d6f7ec20251d1aa2db81a1..0378fffb8bdbc4cae8d4d2176ec3d745a548c2fe 100644 --- a/docs/en/27-train-faq/03-docker.md +++ b/docs/en/27-train-faq/03-docker.md @@ -109,7 +109,7 @@ taos> It's also able to access the REST interface provided by TDengine in container from the host. ``` -curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql +curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql ``` Output is like below: @@ -147,7 +147,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604 - Verify the REST interface: ```bash -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql ``` Below is an example output: diff --git a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx index 43099319b9c5bb1420c199cfa9f7def0b2c44d3d..a8a92606e4aadf7298359023e739d568788094fd 100644 --- a/docs/zh/14-reference/02-rest-api/02-rest-api.mdx +++ b/docs/zh/14-reference/02-rest-api/02-rest-api.mdx @@ -21,7 +21,7 @@ RESTful 接口不依赖于任何 TDengine 的库,因此客户端不需要安 下面示例是列出所有的数据库,请把 h1.taosdata.com 和 6041(缺省值)替换为实际运行的 TDengine 服务 FQDN 和端口号: ```html -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' h1.taosdata.com:6041/rest/sql +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" h1.taosdata.com:6041/rest/sql ``` 返回值结果如下表示验证通过: @@ -106,13 +106,13 @@ HTTP 请求的 BODY 里就是一个完整的 SQL 语句,SQL 语句中的数据 使用 `curl` 通过自定义身份认证方式来发起一个 HTTP Request,语法如下: ```bash -curl -H 'Authorization: Basic ' -d '' :/rest/sql/[db_name] +curl -L -H "Authorization: Basic " -d "" :/rest/sql/[db_name] ``` 或者 ```bash -curl -u username:password -d '' :/rest/sql/[db_name] +curl -L -u username:password -d "" :/rest/sql/[db_name] ``` 其中,`TOKEN` 为 `{username}:{password}` 经过 Base64 编码之后的字符串,例如 `root:taosdata` 编码后为 `cm9vdDp0YW9zZGF0YQ==` @@ -192,7 +192,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata - 在 demo 库里查询表 d1001 的所有记录: ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sql ``` 返回值: @@ -218,7 +218,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata - 创建库 demo: ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "create database demo" 192.168.0.1:6041/rest/sql ``` 返回值: @@ -240,7 +240,7 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata HTTP 请求 URL 采用 `/rest/sqlt` 时,返回结果集的时间戳将采用 Unix 时间戳格式表示,例如 ```bash -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.d1001" 192.168.0.1:6041/rest/sqlt ``` 返回结果: @@ -268,7 +268,7 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001 HTTP 请求 URL 采用 `/rest/sqlutc` 时,返回结果集的时间戳将采用 UTC 时间字符串表示,例如 ```bash - curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc + curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "select * from demo.t1" 192.168.0.1:6041/rest/sqlutc ``` 返回值: diff --git a/docs/zh/14-reference/06-taosdump.md b/docs/zh/14-reference/06-taosdump.md index 3a9f2e9acd215be102991a1d91fba285ef6315bb..95ee20bfbae3f0f57c51b8fcd3bd9b35b4764f3d 100644 --- a/docs/zh/14-reference/06-taosdump.md +++ b/docs/zh/14-reference/06-taosdump.md @@ -39,6 +39,8 @@ taosdump 有两种安装方式: :::tip - taosdump 1.4.1 之后的版本提供 `-I` 参数,用于解析 avro 文件 schema 和数据,如果指定 `-s` 参数将只解析 schema。 - taosdump 1.4.2 之后的备份使用 `-B` 参数指定的批次数,默认值为 16384,如果在某些环境下由于网络速度或磁盘性能不足导致 "Error actual dump .. batch .." 可以通过 `-B` 参数调整为更小的值进行尝试。 +- taosdump 的导出不支持中断恢复,所以当进程意外终止后,正确的处理方式是删除当前已导出或生成的所有相关文件。 +- taosdump 的导入支持中断恢复,但是当进程重新启动时,会收到一些“表已经存在”的提示,可以忽视。 ::: diff --git a/docs/zh/14-reference/12-directory.md b/docs/zh/14-reference/12-directory.md index f8c8cb4a082f691cf75db9bed3b42d0d6e1bc8a3..0caf7e03c32b475e82b6f0bcf58ba2d9225aa6bc 100644 --- a/docs/zh/14-reference/12-directory.md +++ b/docs/zh/14-reference/12-directory.md @@ -26,7 +26,6 @@ TDengine 的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 - _remove.sh_:卸载 TDengine 的脚本,请谨慎执行,链接到/usr/bin 目录下的**rmtaos**命令。会删除 TDengine 的安装目录/usr/local/taos,但会保留/etc/taos、/var/lib/taos、/var/log/taos - _taosadapter_: 提供 RESTful 服务和接受其他多种软件写入请求的服务端可执行文件 - _tarbitrator_: 提供双节点集群部署的仲裁功能 -- _run_taosd_and_taosadapter.sh_:同时启动 taosd 和 taosAdapter 的脚本 - _TDinsight.sh_:用于下载 TDinsight 并安装的脚本 - _set_core.sh_:用于方便调试设置系统生成 core dump 文件的脚本 - _taosd-dump-cfg.gdb_:用于方便调试 taosd 的 gdb 执行脚本。 diff --git a/docs/zh/20-third-party/01-grafana.mdx b/docs/zh/20-third-party/01-grafana.mdx index b54989f0115bc07bef81ca363b5909ffa970c6ad..d7a03ba6c878c293bf7f1786a57e9e0808fed711 100644 --- a/docs/zh/20-third-party/01-grafana.mdx +++ b/docs/zh/20-third-party/01-grafana.mdx @@ -29,39 +29,41 @@ TDengine 能够与开源数据可视化系统 [Grafana](https://www.grafana.com/ ### 安装 Grafana Plugin 并配置数据源 - + -将集群信息设置为环境变量;也可以使用 `.env` 文件,请参考 [dotenv](https://hexdocs.pm/dotenvy/dotenv-file-format.html): +使用 Grafana 最新版本(8.5+),您可以在 Grafana 中[浏览和管理插件](https://grafana.com/docs/grafana/next/administration/plugin-management/#plugin-catalog)(对于 7.x 版本,请使用 **安装脚本** 或 **手动安装并配置** 方式)。在 Grafana 管理界面中的 **Configurations > Plugins** 页面直接搜索并按照提示安装 TDengine。 -```sh -export TDENGINE_API=http://tdengine.local:6041 -# user + password -export TDENGINE_USER=user -export TDENGINE_PASSWORD=password - -# 其他环境变量: -# - 是否安装数据源,默认为 true,表示安装 -export TDENGINE_DS_ENABLED=false -# - 数据源名称,默认为 TDengine -export TDENGINE_DS_NAME=TDengine -# - 数据源所属组织 ID,默认为 1 -export GF_ORG_ID=1 -# - 数据源是否可通过管理面板编辑,默认为 0,表示不可编辑 -export TDENGINE_EDITABLE=1 -``` +![Search tdengine in grafana plugins](grafana-plugin-search-tdengine.png) + +如图示即安装完毕,按照指示 **Create a TDengine data source** 添加数据源。 + +![Install and configure Grafana data source](grafana-install-and-config.png) + +输入 TDengine 相关配置,完成数据源配置。 + +![TDengine Database Grafana plugin add data source](./grafana-data-source.png) + +配置完毕,现在可以使用 TDengine 创建 Dashboard 了。 + + + -运行安装脚本: +对于使用 Grafana 7.x 版本或使用 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) 配置的用户,可以在 Grafana 服务器上使用安装脚本自动安装插件即添加数据源 Provisioning 配置文件。 ```sh -bash -c "$(curl -fsSL https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" +bash -c "$(curl -fsSL \ + https://raw.githubusercontent.com/taosdata/grafanaplugin/master/install.sh)" -- \ + -a http://localhost:6041 \ + -u root \ + -p taosdata ``` -该脚本将自动安装 Grafana 插件并配置数据源。安装完毕后,需要重启 Grafana 服务后生效。 +安装完毕后,需要重启 Grafana 服务后方可生效。 保存该脚本并执行 `./install.sh --help` 可查看详细帮助文档。 - + 使用 [`grafana-cli` 命令行工具](https://grafana.com/docs/grafana/latest/administration/cli/) 进行插件[安装](https://grafana.com/grafana/plugins/tdengine-datasource/?tab=installation)。 @@ -113,6 +115,73 @@ GF_INSTALL_PLUGINS=tdengine-datasource ![TDengine Database Grafana plugin add data source](./add_datasource4.webp) + + + +参考 [Grafana 容器化安装说明](https://grafana.com/docs/grafana/next/setup-grafana/installation/docker/#install-plugins-in-the-docker-container)。使用如下命令启动一个容器,并自动安装 TDengine 插件: + +```bash +docker run -d \ + -p 3000:3000 \ + --name=grafana \ + -e "GF_INSTALL_PLUGINS=tdengine-datasource" \ + grafana/grafana +``` + +使用 docker-compose,配置 Grafana Provisioning 自动化配置,体验 TDengine + Grafana 组合的零配置启动: + +1. 保存该文件为 `tdengine.yml`。 + + ```yml + apiVersion: 1 + datasources: + - name: TDengine + type: tdengine-datasource + orgId: 1 + url: "$TDENGINE_API" + isDefault: true + secureJsonData: + url: "$TDENGINE_URL" + basicAuth: "$TDENGINE_BASIC_AUTH" + token: "$TDENGINE_CLOUD_TOKEN" + version: 1 + editable: true + ``` + +2. 保存该文件为 `docker-compose.yml`。 + + ```yml + version: "3.7" + + services: + tdengine: + image: tdengine/tdengine:2.6.0.2 + environment: + TAOS_FQDN: tdengine + volumes: + - tdengine-data:/var/lib/taos/ + grafana: + image: grafana/grafana:8.5.6 + volumes: + - ./tdengine.yml/:/etc/grafana/provisioning/tdengine.yml + - grafana-data:/var/lib/grafana + environment: + # install tdengine plugin at start + GF_INSTALL_PLUGINS: "tdengine-datasource" + TDENGINE_URL: "http://tdengine:6041" + #printf "$TDENGINE_USER:$TDENGINE_PASSWORD" | base64 + TDENGINE_BASIC_AUTH: "cm9vdDp0YmFzZTEyNQ==" + ports: + - 3000:3000 + volumes: + grafana-data: + tdengine-data: + ``` + +3. 使用 docker-compose 命令启动 TDengine + Grafana :`docker-compose up -d`。 + +打开 Grafana ,现在可以添加 Dashboard 了。 + diff --git a/docs/zh/20-third-party/grafana-data-source.png b/docs/zh/20-third-party/grafana-data-source.png new file mode 100644 index 0000000000000000000000000000000000000000..989ffcca0bf5baae8798b0695e259aca35f0442a Binary files /dev/null and b/docs/zh/20-third-party/grafana-data-source.png differ diff --git a/docs/zh/20-third-party/grafana-install-and-config.png b/docs/zh/20-third-party/grafana-install-and-config.png new file mode 100644 index 0000000000000000000000000000000000000000..b918da8b2d62e694fe1797e09cf8f23f103bc97e Binary files /dev/null and b/docs/zh/20-third-party/grafana-install-and-config.png differ diff --git a/docs/zh/20-third-party/grafana-plugin-search-tdengine.png b/docs/zh/20-third-party/grafana-plugin-search-tdengine.png new file mode 100644 index 0000000000000000000000000000000000000000..cf3b66977b64f7dcd617f06024a66066cd62810e Binary files /dev/null and b/docs/zh/20-third-party/grafana-plugin-search-tdengine.png differ diff --git a/docs/zh/27-train-faq/03-docker.md b/docs/zh/27-train-faq/03-docker.md index 7791569b25e102b4634f0fb899fc0973cacc0aa1..72b4603dda2a078d04427290c560c9818f19915e 100644 --- a/docs/zh/27-train-faq/03-docker.md +++ b/docs/zh/27-train-faq/03-docker.md @@ -108,7 +108,7 @@ taos> 也可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。 ``` -curl -u root:taosdata -d 'show databases' 127.0.0.1:6041/rest/sql +curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql ``` 输出示例如下: @@ -148,7 +148,7 @@ docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-604 使用 curl 命令验证 RESTful 接口可以正常工作: ```bash -curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'show databases;' 127.0.0.1:6041/rest/sql +curl -L -H "Authorization: Basic cm9vdDp0YW9zZGF0YQ==" -d "show databases;" 127.0.0.1:6041/rest/sql ``` 输出示例如下: diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index b14c14c7eeda9ffc3ac4c67d3e062554d3a7710a..d74a962210318f59f4afc9a369f95503b342bbd2 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -105,6 +105,9 @@ elif echo $osinfo | grep -qwi "debian"; then elif echo $osinfo | grep -qwi "Kylin"; then # echo "This is Kylin system" os_type=1 +elif echo $osinfo | grep -qwi "Red"; then + # echo "This is Red Hat system" + os_type=1 elif echo $osinfo | grep -qwi "centos"; then # echo "This is centos system" os_type=2 @@ -196,7 +199,6 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : ${csudo}cp -r ${script_dir}/bin/* ${install_main_dir}/bin && ${csudo}chmod 0555 ${install_main_dir}/bin/* @@ -211,7 +213,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/TDinsight.sh ] && ${csudo}ln -s ${install_main_dir}/bin/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo}ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index bcb3664c29c33e71f849c9f43e62335fb123fe71..c40fe14e3a34354e03f2169fca7e9f0171c064ee 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -178,7 +178,6 @@ function install_bin() { if [ "$osType" != "Darwin" ]; then ${csudo}rm -f ${bin_link_dir}/perfMonitor || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}cp -r ${binary_dir}/build/bin/${clientName} ${install_main_dir}/bin || : @@ -192,7 +191,6 @@ function install_bin() { ${csudo}cp -r ${script_dir}/taosd-dump-cfg.gdb ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/remove.sh ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/set_core.sh ${install_main_dir}/bin - ${csudo}cp -r ${script_dir}/run_${serverName}_and_${adapterName}.sh ${install_main_dir}/bin ${csudo}cp -r ${script_dir}/startPre.sh ${install_main_dir}/bin ${csudo}chmod 0555 ${install_main_dir}/bin/* @@ -204,7 +202,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/${demoName} ] && ${csudo}ln -s ${install_main_dir}/bin/${demoName} ${bin_link_dir}/${demoName} || : [ -x ${install_main_dir}/bin/perfMonitor ] && ${csudo}ln -s ${install_main_dir}/bin/perfMonitor ${bin_link_dir}/perfMonitor || : [ -x ${install_main_dir}/set_core.sh ] && ${csudo}ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : - [ -x ${install_main_dir}/run_${serverName}_and_${adapterName}.sh ] && ${csudo}ln -s ${install_main_dir}/bin/run_${serverName}_and_${adapterName}.sh ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : [ -x ${install_main_dir}/bin/remove.sh ] && ${csudo}ln -s ${install_main_dir}/bin/remove.sh ${bin_link_dir}/${uninstallScript} || : else diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index 139749e4e64bd2f7c4983915274c8c2b879ad775..8c45b0a7d12485d3e95554f7d2223790366f3401 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -91,7 +91,6 @@ else ${build_dir}/bin/tarbitrator\ ${script_dir}/remove.sh \ ${script_dir}/set_core.sh \ - ${script_dir}/run_taosd_and_taosadapter.sh \ ${script_dir}/startPre.sh \ ${script_dir}/taosd-dump-cfg.gdb" fi @@ -158,7 +157,6 @@ if [ $adapterName != "taosadapter" ]; then sed -i "s/taosadapter/${adapterName}/g" ${install_dir}/cfg/$adapterName.service # !!! do not change taosadaptor here mv ${install_dir}/bin/taosadapter ${install_dir}/bin/${adapterName} - mv ${install_dir}/bin/run_taosd_and_taosadapter.sh ${install_dir}/bin/run_${serverName}_and_${adapterName}.sh mv ${install_dir}/bin/taosd-dump-cfg.gdb ${install_dir}/bin/${serverName}-dump-cfg.gdb fi diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index db8a2097477fa26f269348fa7a1846f509a8f09d..cb20085125571d4037c264ab713ce4678874de0c 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -81,7 +81,6 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/${uninstallScript} || : ${csudo}rm -f ${bin_link_dir}/tarbitrator || : ${csudo}rm -f ${bin_link_dir}/set_core || : - ${csudo}rm -f ${bin_link_dir}/run_${serverName}_and_${adapterName}.sh || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : } diff --git a/packaging/tools/run_taosd_and_taosadapter.sh b/packaging/tools/run_taosd_and_taosadapter.sh deleted file mode 100755 index 9ab9eb484a4a5bbc4e3d3994d97b61e0f4bd328d..0000000000000000000000000000000000000000 --- a/packaging/tools/run_taosd_and_taosadapter.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash -[[ -x /usr/bin/taosadapter ]] && /usr/bin/taosadapter & -taosd diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 8bbcf9b05721867438c15e893e5424a862d5d4ed..13eacfc6715ca485bac3a3cfa65d7ccb5d1f9d43 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4240,7 +4240,8 @@ void executeQuery(SSqlObj* pSql, SQueryInfo* pQueryInfo) { tscAddIntoSqlList(pSql); } - if (taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly + // upstream may be freed before retry + if (pQueryInfo->pUpstream && taosArrayGetSize(pQueryInfo->pUpstream) > 0) { // nest query. do execute it firstly code = doInitSubState(pSql, (int32_t) taosArrayGetSize(pQueryInfo->pUpstream)); if (code != TSDB_CODE_SUCCESS) { goto _error; diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index aa67019628ebeed565a874e5f766632aaafae9b6..863da2e1a727f073fe5eabf2b9d8d17f4e05c4b4 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -24,7 +24,6 @@ #define OPT_ABORT 1 /* �Cabort */ int indicator = 1; -int p_port = 6041; struct termios oldtio; extern int wcwidth(wchar_t c); @@ -79,7 +78,7 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { if (arg) { args.cloud = false; tsDnodeShellPort = atoi(arg); - p_port = atoi(arg); + args.port = atoi(arg); } else { fprintf(stderr, "Invalid port\n"); return -1; @@ -239,16 +238,16 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { argp_parse(&argp, argc, argv, 0, 0, arguments); if (args.cloudDsn == NULL) { - if (args.cloud) { - args.cloudDsn = getenv("TDENGINE_CLOUD_DSN"); - if (args.cloudDsn == NULL) { - args.cloud = false; - } - } + if (args.cloud) { + args.cloudDsn = getenv("TDENGINE_CLOUD_DSN"); + if (args.cloudDsn == NULL) { + args.cloud = false; + } + } } else { - args.cloud = true; + args.cloud = true; } - + if (arguments->abort) { #ifndef _ALPINE error(10, 0, "ABORTED"); diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index f7a2e47bc83251b68e2a62d38962c7cce08c45fd..9aab9f49cd1be5051f28e3c69887bd8766e8af75 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -250,6 +250,12 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { if (args.cloudDsn == NULL) { if (args.cloud) { args.cloudDsn = getenv("TDENGINE_CLOUD_DSN"); + if (args.cloudDsn[strlen(args.cloudDsn) - 1] == '\"') { + args.cloudDsn[strlen(args.cloudDsn) - 1] = '\0'; + } + if (args.cloudDsn[0] == '\"') { + args.cloudDsn += 1; + } if (args.cloudDsn == NULL) { args.cloud = false; } diff --git a/src/kit/taos-tools b/src/kit/taos-tools index 0a81480420d6601bbdb57770ee64e40f24c4ea83..d3c29fb492514cbaf08cb533976121bff5d94dea 160000 --- a/src/kit/taos-tools +++ b/src/kit/taos-tools @@ -1 +1 @@ -Subproject commit 0a81480420d6601bbdb57770ee64e40f24c4ea83 +Subproject commit d3c29fb492514cbaf08cb533976121bff5d94dea diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index fc36974c9b969091564a93551117378207980c2b..8d9560f7ec56cf13ff6da0b1a41d784763a7e950 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -3645,7 +3645,6 @@ static void diff_function(SQLFunctionCtx *pCtx) { SDiffFuncInfo *pDiffInfo = GET_ROWCELL_INTERBUF(pResInfo); void *data = GET_INPUT_DATA_LIST(pCtx); - bool isFirstBlock = (pDiffInfo->valueAssigned == false); int32_t notNullElems = 0; @@ -3668,7 +3667,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { int32_t diff = (int32_t)(pData[i] - pDiffInfo->i64Prev); if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = (int32_t)(pData[i] - pDiffInfo->i64Prev); // direct previous may be null + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3694,7 +3693,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { int64_t diff = pData[i] - pDiffInfo->i64Prev; if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = pData[i] - pDiffInfo->i64Prev; // direct previous may be null + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3720,7 +3719,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { double diff = pData[i] - pDiffInfo->d64Prev; if (diff >= 0 || !pDiffInfo->ignoreNegative) { - SET_DOUBLE_VAL(pOutput, pData[i] - pDiffInfo->d64Prev); // direct previous may be null + SET_DOUBLE_VAL(pOutput, diff); *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3746,7 +3745,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { float diff = (float)(pData[i] - pDiffInfo->d64Prev); if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = (float)(pData[i] - pDiffInfo->d64Prev); + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3772,7 +3771,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { int16_t diff = (int16_t)(pData[i] - pDiffInfo->i64Prev); if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = (int16_t)(pData[i] - pDiffInfo->i64Prev); + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3798,7 +3797,7 @@ static void diff_function(SQLFunctionCtx *pCtx) { if (pDiffInfo->valueAssigned) { int8_t diff = (int8_t)(pData[i] - pDiffInfo->i64Prev); if (diff >= 0 || !pDiffInfo->ignoreNegative) { - *pOutput = (int8_t)(pData[i] - pDiffInfo->i64Prev); + *pOutput = diff; *pTimestamp = (tsList != NULL)? tsList[i]:0; pOutput += 1; pTimestamp += 1; @@ -3816,23 +3815,15 @@ static void diff_function(SQLFunctionCtx *pCtx) { qError("error input type"); } - // initial value is not set yet - if (!pDiffInfo->valueAssigned || notNullElems <= 0) { - /* - * 1. current block and blocks before are full of null - * 2. current block may be null value - */ - assert(pCtx->hasNull); - } else { + if (notNullElems > 0) { for (int t = 0; t < pCtx->tagInfo.numOfTagCols; ++t) { SQLFunctionCtx* tagCtx = pCtx->tagInfo.pTagCtxList[t]; if (tagCtx->functionId == TSDB_FUNC_TAG_DUMMY) { aAggs[TSDB_FUNC_TAGPRJ].xFunction(tagCtx); } } - int32_t forwardStep = (isFirstBlock) ? notNullElems : notNullElems; - GET_RES_INFO(pCtx)->numOfRes += forwardStep; + GET_RES_INFO(pCtx)->numOfRes += notNullElems; } } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 492bf5241c4f8e4caae11b1553d5579fc00b2398..7d4b0b7edba8f7664f5c797a41241dbe7b2fe4b2 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1588,6 +1588,8 @@ static bool initGroupbyInfo(const SSDataBlock *pSDataBlock, const SGroupbyExpr * return true; } pInfo->pGroupbyDataInfo = taosArrayInit(pGroupbyExpr->numOfGroupCols, sizeof(SGroupbyDataInfo)); + // head put key length (int32_t type) + pInfo->totalBytes = sizeof(int32_t); for (int32_t k = 0; k < pGroupbyExpr->numOfGroupCols; ++k) { SColIndex* pColIndex = taosArrayGet(pGroupbyExpr->columnInfo, k); @@ -1624,7 +1626,8 @@ static void buildGroupbyKeyBuf(const SSDataBlock *pSDataBlock, SGroupbyOperatorI *buf = NULL; return; } - *buf = p; + *buf = p; + p += sizeof(int32_t); for (int32_t i = 0; i < taosArrayGetSize(pInfo->pGroupbyDataInfo); i++) { SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, i); @@ -1646,26 +1649,22 @@ static void buildGroupbyKeyBuf(const SSDataBlock *pSDataBlock, SGroupbyOperatorI memcpy(p, MULTI_KEY_DELIM, strlen(MULTI_KEY_DELIM)); p += strlen(MULTI_KEY_DELIM); } + + // calc keyLen and save + int32_t keyLen = (p - *buf) - sizeof(int32_t); + *(int32_t *)(*buf) = keyLen; } static bool isGroupbyKeyEqual(void *a, void *b, void *ext) { - SGroupbyOperatorInfo *pInfo = (SGroupbyOperatorInfo *)ext; - if (memcmp(a, b, pInfo->totalBytes) == 0) { - return true; + int32_t len1 = *(int32_t *)a; + int32_t len2 = *(int32_t *)b; + if (len1 != len2) { + return false; } - int32_t offset = 0; - for (int32_t i = 0; i < taosArrayGetSize(pInfo->pGroupbyDataInfo); i++) { - SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, i); + char *a1 = (char *)a + sizeof(int32_t); + char *b1 = (char *)b + sizeof(int32_t); - char *k1 = (char *)a + offset; - char *k2 = (char *)b + offset; - if (getComparFunc(pDataInfo->type, 0)(k1, k2) != 0) { - return false; - } - offset += pDataInfo->bytes; - offset += (int32_t)strlen(MULTI_KEY_DELIM); - } - return true; + return memcmp(a1, b1, len1) == 0; } static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pInfo, SSDataBlock *pSDataBlock) { @@ -1708,7 +1707,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo); } - int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, pInfo->totalBytes, item->groupIndex); + char *preKey = pInfo->prevData + sizeof(int32_t); + int32_t keyLen = *(int32_t *)pInfo->prevData; + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, preKey, type, keyLen, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); } @@ -1730,7 +1731,9 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo); } - int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, pInfo->totalBytes, item->groupIndex); + char *preKey = pInfo->prevData + sizeof(int32_t); + int32_t keyLen = *(int32_t *)pInfo->prevData; + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, preKey, type, keyLen, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); } @@ -4312,14 +4315,15 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction // find colid in dataBlock int32_t bytes, offset = 0; char* val = NULL; + char* prevData = pInfo->prevData + sizeof(int32_t); // head is key length (int32_t type) for (int32_t idx = 0; idx < taosArrayGetSize(pInfo->pGroupbyDataInfo); idx++) { SGroupbyDataInfo *pDataInfo = taosArrayGet(pInfo->pGroupbyDataInfo, idx); if (pDataInfo->index == pExpr1->colInfo.colId) { bytes = pDataInfo->bytes; - val = pInfo->prevData + offset; + val = prevData + offset; break; } - offset += pDataInfo->bytes; + offset += pDataInfo->bytes + strlen(MULTI_KEY_DELIM); // multi value split by MULTI_KEY_DELIM } if (val == NULL) { continue; } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 56e215e0b28828ec18255f36e516fc39e59f0b4a..992b6285d63eb17a064b22d11bcecb7629f397a1 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -1298,7 +1298,7 @@ static int32_t offsetSkipBlock(STsdbQueryHandle* q, SBlockInfo* pBlockInfo, int6 range.from = i; } } - range.to = 0; + range.to = sblock; taosArrayPush(pArray, &range); range.from = -1; break; @@ -1314,7 +1314,7 @@ static int32_t offsetSkipBlock(STsdbQueryHandle* q, SBlockInfo* pBlockInfo, int6 if(range.from == -1) { range.from = i; } else { - if(range.to + 1 != i) { + if(range.to - 1 != i) { // add the previous taosArrayPush(pArray, &range); range.from = i; @@ -1359,16 +1359,17 @@ static void shrinkBlocksByQuery(STsdbQueryHandle *pQueryHandle, STableCheckInfo SBlockIdx *compIndex = pQueryHandle->rhelper.pBlkIdx; bool order = ASCENDING_TRAVERSE(pQueryHandle->order); + TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL; if (order) { assert(pCheckInfo->lastKey <= pQueryHandle->window.ekey && pQueryHandle->window.skey <= pQueryHandle->window.ekey); + s = pQueryHandle->window.skey; + e = pQueryHandle->window.ekey; } else { assert(pCheckInfo->lastKey >= pQueryHandle->window.ekey && pQueryHandle->window.skey >= pQueryHandle->window.ekey); + e = pQueryHandle->window.skey; + s = pQueryHandle->window.ekey; } - TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL; - s = MIN(pCheckInfo->lastKey, pQueryHandle->window.ekey); - e = MAX(pCheckInfo->lastKey, pQueryHandle->window.ekey); - // discard the unqualified data block based on the query time window int32_t start = binarySearchForBlock(pCompInfo->blocks, compIndex->numOfBlocks, s, TSDB_ORDER_ASC); if (s > pCompInfo->blocks[start].keyLast) { diff --git a/tests/pytest/client/client.py b/tests/pytest/client/client.py index 9a155a4df9ec1f4b6b1ce4860a75938c5edc7731..9192f7e6d3690deea70c224f10e1bf505b23e683 100644 --- a/tests/pytest/client/client.py +++ b/tests/pytest/client/client.py @@ -15,6 +15,7 @@ import sys from util.log import * from util.cases import * from util.sql import * +import os from datetime import timedelta @@ -73,6 +74,14 @@ class TDTestCase: tdSql.checkRows(1) tdSql.checkData(0, 0, 2) tdSql.checkData(0, 1, "master") + + cmd = "taos -h 127.0.0.1 -s 'show databases'" + r = os.popen(cmd) + text = r.read() + r.close + + if 'Unable to establish connection' in text: + tdLog.exit("%s failed: command 'taos -h 127.0.0.1' Unable to establish connection" % __file__) def stop(self): tdSql.close() diff --git a/tests/pytest/functions/data.tar.gz b/tests/pytest/functions/data.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..9b0fd32993cb2654e9b6c2a9546903436da43f27 Binary files /dev/null and b/tests/pytest/functions/data.tar.gz differ diff --git a/tests/pytest/functions/function_diff.py b/tests/pytest/functions/function_diff.py index 5995b821d1a42e75d7b80e0d564e4281da9b3979..9742518886b93ec644e994f211583a37e61cf4e3 100644 --- a/tests/pytest/functions/function_diff.py +++ b/tests/pytest/functions/function_diff.py @@ -16,6 +16,7 @@ import taos from util.log import * from util.cases import * from util.sql import * +from util.dnodes import * import numpy as np @@ -156,7 +157,25 @@ class TDTestCase: tdSql.error("select diff(col) from st group by dev") tdSql.error("select diff(col) from st group by col") - + + # TS-1612 + os.system("tar -zxf %s/functions/data.tar.gz" % os.getcwd()) + tdSql.execute("create database radb") + tdSql.execute("use radb") + tdSql.execute("CREATE TABLE `vehicle_automode` (`time` TIMESTAMP,`auto_ctl_odom` INT) TAGS (`mac_address` BINARY(30))") + tdSql.execute("CREATE TABLE `va_00545a230327` USING `vehicle_automode` TAGS ('00545a230327')") + tdSql.execute("insert into va_00545a230327 file 'data/va_00545a230327.csv' ") + tdSql.query("select * from vehicle_automode") + rows = tdSql.queryRows + tdSql.query("select diff(auto_ctl_odom,1) as aco from radb.vehicle_automode GROUP BY tbname") + tdSql.checkRows(rows - 1) + os.system("rm -rf data") + + tdDnodes.stop(1) + tdDnodes.start(1) + tdSql.query("select diff(auto_ctl_odom,1) as aco from radb.vehicle_automode GROUP BY tbname") + tdSql.checkRows(rows - 1) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/query/queryGroupbySort.py b/tests/pytest/query/queryGroupbySort.py index 6439fc6560d7f74b6d27ba3847f2459918fd94dc..c5ca1efacd6b4d29ea4c4b9212873ea85aa435ed 100644 --- a/tests/pytest/query/queryGroupbySort.py +++ b/tests/pytest/query/queryGroupbySort.py @@ -88,6 +88,23 @@ class TDTestCase: tdSql.query("select count(*) from tb group by c1") tdSql.checkRows(0) + # TS-1619 + tdSql.execute("create database test") + tdSql.execute("use test") + tdSql.execute("create table stb(ts timestamp, c1 int, c2 nchar(30)) tags(t1 int)") + for i in range(3): + tdSql.execute("create table t%d using stb tags(%d)" % (i, i)) + sql = "insert into t%d values " % i + for j in range(16): + if j % 4 == 0: + s = '00' + else: + s = str (j % 4 * 15) + sql += "(%d, %d, '2022-06-01 0%d:%s')" % (self.ts + j, i, int( j / 4 ), s) + tdSql.execute(sql) + + tdSql.query("select c2, sum(c1) from stb group by c2") + tdSql.checkRows(16) def stop(self): tdSql.close()